41 Commits

Author SHA1 Message Date
mae
94a3ea9e5c cmd/irdump: formatted disassembly 2026-04-18 11:29:57 -05:00
mae
3a52d45378 cmd/irdump: basic disassembler 2026-04-18 11:29:57 -05:00
mae
b1e381fcff cmd/irdump: create cli 2026-04-18 11:29:57 -05:00
mae
9fad74b374 cmd/pkgserver: fix gitignore 2026-04-18 11:23:29 -05:00
mae
05cdf1135d cmd/pkgserver: better no results handling 2026-04-18 11:23:29 -05:00
mae
217e83276f cmd/pkgserver: better no results handling 2026-04-18 11:23:29 -05:00
mae
fbf1dd4c6c cmd/pkgserver: finish search implementation 2026-04-18 11:23:29 -05:00
mae
5a552993e5 cmd/pkgserver: remove get endpoint count field 2026-04-18 11:23:29 -05:00
mae
63b1f3fc4b cmd/pkgserver: search endpoint 2026-04-18 11:23:29 -05:00
mae
42bed68072 cmd/pkgserver: pagination bugfix 2026-04-18 11:23:29 -05:00
0066556d19 cmd/pkgserver: guard sass/ts behind build tag
Packaging nodejs and ruby is an immense burden for the Rosa OS base system, and these files diff poorly.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
mae
fa6e0f1ea0 cmd/pkgserver: add size 2026-04-18 11:23:29 -05:00
bdbdced477 cmd/pkgserver: expose size and store pre-encoded ident
This change also handles SIGSEGV correctly in newStatusHandler, and makes serving status fully zero copy.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
8a80f6dbab cmd/pkgserver: look up status by name once
This has far less overhead.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
f5fdfcb271 cmd/pkgserver: refer to preset in index
This enables referencing back to internal/rosa through an entry obtained via the index.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
87a19d7ec1 cmd/pkgserver: handle unversioned value
This omits the field for an unversioned artifact, and only does so once on startup.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
f81527eb07 cmd/pkgserver: determine disposition route in mux
This removes duplicate checks and uses the more sound check in mux.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
0e20a7d86e cmd/pkgserver: format get error messages
This improves source code readability on smaller displays.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
d881c059b1 cmd/pkgserver: constant string in pattern
This resolves patterns at compile time.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
c17b6ee190 cmd/pkgserver: satisfy handler signature in method
This is somewhat cleaner.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
65a0cf068d cmd/pkgserver: log instead of write encoding error
This message is unlikely to be useful to the user, and output may be partially written at this point, causing the error to be even less intelligible.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
964e47fcec cmd/pkgserver: appropriately mark test helpers
This improves usefulness of test log messages.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
78bfc3acec cmd/pkgserver: do not omit report field
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
de580952af cmd/pkgserver: gracefully shut down on signal
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
dbb07b0631 cmd/pkgserver: specify full addr string in flag
This allows greater flexibility.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
ed4692659e cmd/pkgserver: make report argument optional
This allows serving metadata only without a populated report. This also removes the out-of-bounds read on args when no arguments are passed.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
f19b86fe94 cmd/pkgserver: embed internal/rosa metadata
This change also cleans up and reduces some unnecessary copies.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
bc069cc83d cmd/pkgserver: do not assume default mux
This helps with testing.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
def2b8fab7 cmd/pkgserver: create index without report
This is useful for testing, where report testdata is not available.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:23:29 -05:00
mae
23c957b22c cmd/pkgserver: add sort orders, change pagination rules 2026-04-18 11:23:29 -05:00
mae
dc13407386 cmd/pkgserver: add /status endpoint 2026-04-18 11:23:29 -05:00
mae
42a2918644 cmd/pkgserver: minimum viable frontend 2026-04-18 11:23:29 -05:00
mae
d8e720bcc9 cmd/pkgserver: api versioning 2026-04-18 11:23:29 -05:00
mae
459acd7dce cmd/pkgserver: add get endpoint 2026-04-18 11:23:29 -05:00
mae
e7c64bcd41 cmd/pkgserver: add count endpoint and restructure 2026-04-18 11:23:29 -05:00
mae
eee4e3be5e cmd/pkgserver: add status endpoint 2026-04-18 11:23:29 -05:00
mae
01ed763993 cmd/pkgserver: add createPackageIndex 2026-04-18 11:23:29 -05:00
mae
3930b2bf7f cmd/pkgserver: add command handler 2026-04-18 11:23:29 -05:00
mae
5158ecffd1 cmd/pkgserver: replace favicon 2026-04-18 11:23:29 -05:00
mae
d41dd227dd cmd/pkgserver: pagination 2026-04-18 11:23:29 -05:00
mae
0d7be9c287 cmd/pkgserver: basic web ui 2026-04-18 11:23:29 -05:00
128 changed files with 2479 additions and 6879 deletions

4
.gitignore vendored
View File

@@ -7,8 +7,8 @@
# go generate
/cmd/hakurei/LICENSE
/cmd/mbf/internal/pkgserver/ui/static
/internal/pkg/internal/testtool/testtool
/cmd/pkgserver/ui/static/*.js
/internal/pkg/testdata/testtool
/internal/rosa/hakurei_current.tar.gz
# cmd/dist default destination

5
all.sh
View File

@@ -1,3 +1,6 @@
#!/bin/sh -e
HAKUREI_DIST_MAKE='' exec "$(dirname -- "$0")/cmd/dist/dist.sh"
TOOLCHAIN_VERSION="$(go version)"
cd "$(dirname -- "$0")/"
echo "# Building cmd/dist using ${TOOLCHAIN_VERSION}."
go run -v --tags=dist ./cmd/dist

View File

@@ -4,23 +4,15 @@ import "strings"
const (
// SpecialOverlayEscape is the escape string for overlay mount options.
//
// Deprecated: This is no longer used and will be removed in 0.5.
SpecialOverlayEscape = `\`
// SpecialOverlayOption is the separator string between overlay mount options.
//
// Deprecated: This is no longer used and will be removed in 0.5.
SpecialOverlayOption = ","
// SpecialOverlayPath is the separator string between overlay paths.
//
// Deprecated: This is no longer used and will be removed in 0.5.
SpecialOverlayPath = ":"
)
// EscapeOverlayDataSegment escapes a string for formatting into the data
// argument of an overlay mount system call.
//
// Deprecated: This is no longer used and will be removed in 0.5.
func EscapeOverlayDataSegment(s string) string {
if s == "" {
return ""

10
cmd/dist/dist.sh vendored
View File

@@ -1,10 +0,0 @@
#!/bin/sh -e
TOOLCHAIN_VERSION="$(go version)"
cd "$(dirname -- "$0")/../.."
echo "Building cmd/dist using ${TOOLCHAIN_VERSION}."
FLAGS=''
if test -n "$VERBOSE"; then
FLAGS="$FLAGS -v"
fi
go run $FLAGS --tags=dist ./cmd/dist

40
cmd/dist/main.go vendored
View File

@@ -42,19 +42,14 @@ func mustRun(ctx context.Context, name string, arg ...string) {
var comp []byte
func main() {
fmt.Println()
log.SetFlags(0)
log.SetPrefix("")
log.SetPrefix("# ")
verbose := os.Getenv("VERBOSE") != ""
runTests := os.Getenv("HAKUREI_DIST_MAKE") == ""
version := getenv("HAKUREI_VERSION", "untagged")
prefix := getenv("PREFIX", "/usr")
destdir := getenv("DESTDIR", "dist")
if verbose {
log.Println()
}
if err := os.MkdirAll(destdir, 0755); err != nil {
log.Fatal(err)
}
@@ -81,17 +76,12 @@ func main() {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
defer cancel()
verboseFlag := "-v"
if !verbose {
verboseFlag = "-buildvcs=false"
}
log.Printf("Building hakurei for %s/%s.", runtime.GOOS, runtime.GOARCH)
log.Println("Building hakurei.")
mustRun(ctx, "go", "generate", "./...")
mustRun(
ctx, "go", "build",
"-trimpath",
verboseFlag, "-o", s,
"-v", "-o", s,
"-ldflags=-s -w "+
"-buildid= -linkmode external -extldflags=-static "+
"-X hakurei.app/internal/info.buildVersion="+version+" "+
@@ -100,19 +90,17 @@ func main() {
"-X main.hakureiPath="+prefix+"/bin/hakurei",
"./...",
)
log.Println()
fmt.Println()
if runTests {
log.Println("##### Testing Hakurei.")
mustRun(
ctx, "go", "test",
"-ldflags=-buildid= -linkmode external -extldflags=-static",
"./...",
)
log.Println()
}
log.Println("Testing Hakurei.")
mustRun(
ctx, "go", "test",
"-ldflags=-buildid= -linkmode external -extldflags=-static",
"./...",
)
fmt.Println()
log.Println("##### Creating distribution.")
log.Println("Creating distribution.")
const suffix = ".tar.gz"
distName := "hakurei-" + version + "-" + runtime.GOARCH
var f *os.File
@@ -133,7 +121,7 @@ func main() {
}()
h := sha512.New()
gw, _ := gzip.NewWriterLevel(io.MultiWriter(f, h), gzip.BestCompression)
gw := gzip.NewWriter(io.MultiWriter(f, h))
tw := tar.NewWriter(gw)
mustWriteHeader := func(name string, size int64, mode os.FileMode) {

76
cmd/irdump/main.go Normal file
View File

@@ -0,0 +1,76 @@
package main
import (
"errors"
"log"
"os"
"hakurei.app/command"
"hakurei.app/internal/pkg"
)
func main() {
log.SetFlags(0)
log.SetPrefix("irdump: ")
var (
flagOutput string
flagReal bool
flagHeader bool
flagForce bool
flagRaw bool
)
c := command.New(os.Stderr, log.Printf, "irdump", func(args []string) (err error) {
var input *os.File
if len(args) != 1 {
return errors.New("irdump requires 1 argument")
}
if input, err = os.Open(args[0]); err != nil {
return
}
defer input.Close()
var output *os.File
if flagOutput == "" {
output = os.Stdout
} else {
defer output.Close()
if output, err = os.Create(flagOutput); err != nil {
return
}
}
var out string
if out, err = pkg.Disassemble(input, flagReal, flagHeader, flagForce, flagRaw); err != nil {
return
}
if _, err = output.WriteString(out); err != nil {
return
}
return
}).Flag(
&flagOutput,
"o", command.StringFlag(""),
"Output file for asm (leave empty for stdout)",
).Flag(
&flagReal,
"r", command.BoolFlag(false),
"skip label generation; idents print real value",
).Flag(
&flagHeader,
"H", command.BoolFlag(false),
"display artifact headers",
).Flag(
&flagForce,
"f", command.BoolFlag(false),
"force display (skip validations)",
).Flag(
&flagRaw,
"R", command.BoolFlag(false),
"don't format output",
)
c.MustParse(os.Args[1:], func(err error) {
log.Fatal(err)
})
}

View File

@@ -7,7 +7,6 @@ import (
"testing"
"hakurei.app/check"
"hakurei.app/container"
"hakurei.app/internal/pkg"
"hakurei.app/message"
)
@@ -20,15 +19,8 @@ type cache struct {
// Should generally not be used directly.
c *pkg.Cache
cures, jobs int
// Primarily to work around missing landlock LSM.
hostAbstract bool
// Set SCHED_IDLE.
idle bool
// Unset [pkg.CSuppressInit].
verboseInit bool
// Loaded artifact of [rosa.QEMU].
qemu pkg.Artifact
cures, jobs int
hostAbstract, idle bool
base string
}
@@ -39,6 +31,9 @@ func (cache *cache) open() (err error) {
return os.ErrInvalid
}
if cache.base == "" {
cache.base = "cache"
}
var base *check.Absolute
if cache.base, err = filepath.Abs(cache.base); err != nil {
return
@@ -53,9 +48,6 @@ func (cache *cache) open() (err error) {
if cache.hostAbstract {
flags |= pkg.CHostAbstract
}
if !cache.verboseInit {
flags |= pkg.CSuppressInit
}
done := make(chan struct{})
defer close(done)
@@ -81,39 +73,6 @@ func (cache *cache) open() (err error) {
cache.jobs,
base,
)
if err != nil {
return
}
done <- struct{}{}
if cache.qemu != nil {
var pathname *check.Absolute
pathname, _, err = cache.c.Cure(cache.qemu)
if err != nil {
cache.c.Close()
return
}
pkg.RegisterArch("riscv64", container.BinfmtEntry{
Offset: 0,
Magic: "\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xf3\x00",
Mask: "\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff",
Interpreter: pathname.Append(
"system/bin",
"qemu-riscv64",
),
})
pkg.RegisterArch("arm64", container.BinfmtEntry{
Offset: 0,
Magic: "\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xb7\x00",
Mask: "\xff\xff\xff\xff\xff\xff\xff\xfc\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff",
Interpreter: pathname.Append(
"system/bin",
"qemu-aarch64",
),
})
}
return
}

View File

@@ -99,9 +99,10 @@ func cancelIdent(
var ident pkg.ID
if _, err := io.ReadFull(conn, ident[:]); err != nil {
return nil, false, errors.Join(err, conn.Close())
} else if err = conn.Close(); err != nil {
return nil, false, err
}
ok := cache.Cancel(unique.Make(ident))
return &ident, ok, conn.Close()
return &ident, cache.Cancel(unique.Make(ident)), nil
}
// serve services connections from a [net.UnixListener].
@@ -193,11 +194,11 @@ func serve(
}
case specialAbort:
log.Println("aborting all pending cures")
cm.c.Abort()
if _err := conn.Close(); _err != nil {
log.Println(_err)
}
log.Println("aborting all pending cures")
cm.c.Abort()
}
return
@@ -305,7 +306,6 @@ func cancelRemote(
ctx context.Context,
addr *net.UnixAddr,
a pkg.Artifact,
wait bool,
) error {
done, conn, err := dial(ctx, addr)
if err != nil {
@@ -324,19 +324,13 @@ func cancelRemote(
} else if n != len(id) {
return errors.Join(io.ErrShortWrite, conn.Close())
}
if wait {
if _, err = conn.Read(make([]byte, 1)); err == io.EOF {
err = nil
}
}
return errors.Join(err, conn.Close())
return conn.Close()
}
// abortRemote aborts all [pkg.Artifact] curing on a daemon.
func abortRemote(
ctx context.Context,
addr *net.UnixAddr,
wait bool,
) error {
done, conn, err := dial(ctx, addr)
if err != nil {
@@ -345,10 +339,5 @@ func abortRemote(
defer close(done)
err = writeSpecialHeader(conn, specialAbort)
if wait && err == nil {
if _, err = conn.Read(make([]byte, 1)); err == io.EOF {
err = nil
}
}
return errors.Join(err, conn.Close())
}

View File

@@ -106,11 +106,11 @@ func TestDaemon(t *testing.T) {
}
}()
if err = cancelRemote(ctx, &addr, pkg.NewFile("nonexistent", nil), true); err != nil {
if err = cancelRemote(ctx, &addr, pkg.NewFile("nonexistent", nil)); err != nil {
t.Fatalf("cancelRemote: error = %v", err)
}
if err = abortRemote(ctx, &addr, true); err != nil {
if err = abortRemote(ctx, &addr); err != nil {
t.Fatalf("abortRemote: error = %v", err)
}

View File

@@ -17,12 +17,25 @@ func commandInfo(
args []string,
w io.Writer,
writeStatus bool,
r *rosa.Report,
reportPath string,
) (err error) {
if len(args) == 0 {
return errors.New("info requires at least 1 argument")
}
var r *rosa.Report
if reportPath != "" {
if r, err = rosa.OpenReport(reportPath); err != nil {
return err
}
defer func() {
if closeErr := r.Close(); err == nil {
err = closeErr
}
}()
defer r.HandleAccess(&err)()
}
// recovered by HandleAccess
mustPrintln := func(a ...any) {
if _, _err := fmt.Fprintln(w, a...); _err != nil {

View File

@@ -95,7 +95,7 @@ status : not in report
var (
cm *cache
buf strings.Builder
r *rosa.Report
rp string
)
if tc.status != nil || tc.report != "" {
@@ -108,25 +108,14 @@ status : not in report
}
if tc.report != "" {
pathname := filepath.Join(t.TempDir(), "report")
err := os.WriteFile(
pathname,
rp = filepath.Join(t.TempDir(), "report")
if err := os.WriteFile(
rp,
unsafe.Slice(unsafe.StringData(tc.report), len(tc.report)),
0400,
)
if err != nil {
); err != nil {
t.Fatal(err)
}
r, err = rosa.OpenReport(pathname)
if err != nil {
t.Fatal(err)
}
defer func() {
if err = r.Close(); err != nil {
t.Fatal(err)
}
}()
}
if tc.status != nil {
@@ -168,7 +157,7 @@ status : not in report
tc.args,
&buf,
cm != nil,
r,
rp,
); !reflect.DeepEqual(err, wantErr) {
t.Fatalf("commandInfo: error = %v, want %v", err, wantErr)
}

View File

@@ -1,9 +0,0 @@
// Package ui holds the static web UI.
package ui
import "net/http"
// Register arranges for mux to serve the embedded frontend.
func Register(mux *http.ServeMux) {
mux.Handle("GET /", http.FileServer(http.FS(static)))
}

View File

@@ -1,21 +0,0 @@
//go:build frontend
package ui
import (
"embed"
"io/fs"
)
//go:generate tsc
//go:generate cp index.html style.css static
//go:embed static
var _static embed.FS
var static = func() fs.FS {
if f, err := fs.Sub(_static, "static"); err != nil {
panic(err)
} else {
return f
}
}()

View File

@@ -20,7 +20,6 @@ import (
"io"
"log"
"net"
"net/http"
"os"
"os/signal"
"path/filepath"
@@ -42,9 +41,6 @@ import (
"hakurei.app/internal/pkg"
"hakurei.app/internal/rosa"
"hakurei.app/message"
"hakurei.app/cmd/mbf/internal/pkgserver"
"hakurei.app/cmd/mbf/internal/pkgserver/ui"
)
func main() {
@@ -63,16 +59,17 @@ func main() {
defer stop()
var cm cache
defer func() { cm.Close() }()
defer func() {
cm.Close()
if r := recover(); r != nil {
fmt.Println(r)
log.Fatal("consider scrubbing the on-disk cache")
}
}()
var (
flagQuiet bool
flagQEMU bool
flagArch string
flagCheck bool
flagLTO bool
flagCrossOverride int
addr net.UnixAddr
)
@@ -80,38 +77,11 @@ func main() {
msg.SwapVerbose(!flagQuiet)
cm.ctx, cm.msg = ctx, msg
cm.base = os.ExpandEnv(cm.base)
if cm.base == "" {
cm.base = "cache"
}
addr.Net = "unix"
addr.Name = os.ExpandEnv(addr.Name)
if addr.Name == "" {
addr.Name = filepath.Join(cm.base, "daemon")
}
var flags int
if !flagCheck {
flags |= rosa.OptSkipCheck
}
if !flagLTO {
flags |= rosa.OptLLVMNoLTO
}
rosa.DropCaches("", flags)
cross := flagArch != "" && flagArch != runtime.GOARCH
if flagQEMU || cross {
cm.qemu = rosa.Std.Load(rosa.QEMU)
}
if cross {
if flagCrossOverride != -1 {
flags = flagCrossOverride
}
rosa.DropCaches(flagArch, flags)
if !rosa.HasStage0() {
return pkg.UnsupportedArchError(flagArch)
}
addr.Name = "daemon"
}
return nil
@@ -119,30 +89,6 @@ func main() {
&flagQuiet,
"q", command.BoolFlag(false),
"Do not print cure messages",
).Flag(
&flagQEMU,
"register", command.BoolFlag(false),
"Enable additional target architectures",
).Flag(
&flagArch,
"arch", command.StringFlag(runtime.GOARCH),
"Target architecture",
).Flag(
&flagLTO,
"lto", command.BoolFlag(false),
"Enable LTO in stage2 and stage3 LLVM toolchains",
).Flag(
&flagCheck,
"check", command.BoolFlag(true),
"Run test suites",
).Flag(
&flagCrossOverride,
"cross-flags", command.IntFlag(-1),
"Override non-native target preset flags",
).Flag(
&cm.verboseInit,
"v", command.BoolFlag(false),
"Do not suppress verbose output from init",
).Flag(
&cm.cures,
"cures", command.IntFlag(0),
@@ -175,17 +121,7 @@ func main() {
c.NewCommand(
"checksum", "Compute checksum of data read from standard input",
func([]string) error {
done := make(chan struct{})
defer close(done)
go func() {
select {
case <-ctx.Done():
os.Exit(1)
case <-done:
return
}
}()
go func() { <-ctx.Done(); os.Exit(1) }()
h := sha512.New384()
if _, err := io.Copy(h, os.Stdin); err != nil {
return err
@@ -219,7 +155,6 @@ func main() {
{
var (
flagBind string
flagStatus bool
flagReport string
)
@@ -227,52 +162,8 @@ func main() {
"info",
"Display out-of-band metadata of an artifact",
func(args []string) (err error) {
const shutdownTimeout = 15 * time.Second
var r *rosa.Report
if flagReport != "" {
if r, err = rosa.OpenReport(flagReport); err != nil {
return err
}
defer func() {
if closeErr := r.Close(); err == nil {
err = closeErr
}
}()
defer r.HandleAccess(&err)()
}
if flagBind == "" {
return commandInfo(&cm, args, os.Stdout, flagStatus, r)
}
var mux http.ServeMux
ui.Register(&mux)
if err = pkgserver.Register(ctx, &mux, r); err != nil {
return
}
server := http.Server{Addr: flagBind, Handler: &mux}
go func() {
<-ctx.Done()
cc, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
defer cancel()
if _err := server.Shutdown(cc); _err != nil {
log.Fatal(_err)
}
}()
msg.Verbosef("listening on %q", flagBind)
err = server.ListenAndServe()
if errors.Is(err, http.ErrServerClosed) {
err = nil
}
return
return commandInfo(&cm, args, os.Stdout, flagStatus, flagReport)
},
).Flag(
&flagBind,
"bind", command.StringFlag(""),
"TCP address for the server to listen on",
).Flag(
&flagStatus,
"status", command.BoolFlag(false),
@@ -431,7 +322,7 @@ func main() {
if err = cm.Do(func(cache *pkg.Cache) (err error) {
pathname, _, err = cache.Cure(
(t - 2).Load(rosa.LLVM),
(t - 2).Load(rosa.Clang),
)
return
}); err != nil {
@@ -441,7 +332,7 @@ func main() {
if err = cm.Do(func(cache *pkg.Cache) (err error) {
pathname, checksum[0], err = cache.Cure(
(t - 1).Load(rosa.LLVM),
(t - 1).Load(rosa.Clang),
)
return
}); err != nil {
@@ -450,7 +341,7 @@ func main() {
log.Println("stage2:", pathname)
if err = cm.Do(func(cache *pkg.Cache) (err error) {
pathname, checksum[1], err = cache.Cure(
t.Load(rosa.LLVM),
t.Load(rosa.Clang),
)
return
}); err != nil {
@@ -506,11 +397,6 @@ func main() {
flagExport string
flagRemote bool
flagNoReply bool
flagFaults bool
flagPop bool
flagBoot bool
flagStd bool
)
c.NewCommand(
"cure",
@@ -524,18 +410,11 @@ func main() {
return fmt.Errorf("unknown artifact %q", args[0])
}
t := rosa.Std
if flagBoot {
t -= 2
} else if flagStd {
t -= 1
}
switch {
default:
var pathname *check.Absolute
err := cm.Do(func(cache *pkg.Cache) (err error) {
pathname, _, err = cache.Cure(t.Load(p))
pathname, _, err = cache.Cure(rosa.Std.Load(p))
return
})
if err != nil {
@@ -588,7 +467,7 @@ func main() {
return cm.Do(func(cache *pkg.Cache) error {
return cache.EnterExec(
ctx,
t.Load(p),
rosa.Std.Load(p),
true, os.Stdin, os.Stdout, os.Stderr,
rosa.AbsSystem.Append("bin", "mksh"),
"sh",
@@ -600,7 +479,7 @@ func main() {
if flagNoReply {
flags |= remoteNoReply
}
a := t.Load(p)
a := rosa.Std.Load(p)
pathname, err := cureRemote(ctx, &addr, a, flags)
if !flagNoReply && err == nil {
log.Println(pathname)
@@ -610,55 +489,12 @@ func main() {
cc, cancel := context.WithDeadline(context.Background(), daemonDeadline())
defer cancel()
if _err := cancelRemote(cc, &addr, a, false); _err != nil {
if _err := cancelRemote(cc, &addr, a); _err != nil {
log.Println(err)
}
}
return err
case flagFaults:
var faults []pkg.Fault
if err := cm.Do(func(cache *pkg.Cache) (err error) {
faults, err = cache.ReadFaults(t.Load(p))
return
}); err != nil {
return err
}
for _, fault := range faults {
log.Printf("%s: %s ago", fault.String(), time.Since(fault.Time()))
}
return nil
case flagPop:
var faults []pkg.Fault
if err := cm.Do(func(cache *pkg.Cache) (err error) {
faults, err = cache.ReadFaults(t.Load(p))
return
}); err != nil {
return err
}
if len(faults) == 0 {
return errors.New("no fault entries found")
}
fault := faults[len(faults)-1]
r, err := fault.Open()
if err != nil {
return err
}
if _, err = io.Copy(os.Stdout, r); err != nil {
_ = r.Close()
return err
}
fmt.Println()
if err = r.Close(); err != nil {
return err
}
log.Printf("faulting cure terminated %s ago", time.Since(fault.Time()))
return fault.Destroy()
}
},
).Flag(
@@ -681,52 +517,13 @@ func main() {
&flagNoReply,
"no-reply", command.BoolFlag(false),
"Do not receive a reply from the daemon",
).Flag(
&flagBoot,
"boot", command.BoolFlag(false),
"Build on the stage0 toolchain",
).Flag(
&flagStd,
"std", command.BoolFlag(false),
"Build on the intermediate toolchain",
).Flag(
&flagFaults,
"faults", command.BoolFlag(false),
"Display fault entries of the specified artifact",
).Flag(
&flagPop,
"pop", command.BoolFlag(false),
"Display and destroy the most recent fault entry",
)
}
c.NewCommand(
"clear",
"Remove all fault entries from the cache",
func([]string) error {
return cm.Do(func(*pkg.Cache) error {
pathname := filepath.Join(cm.base, "fault")
dents, err := os.ReadDir(pathname)
if err != nil {
return err
}
for _, dent := range dents {
msg.Verbosef("destroying entry %s", dent.Name())
if err = os.Remove(filepath.Join(pathname, dent.Name())); err != nil {
return err
}
}
log.Printf("destroyed %d fault entries", len(dents))
return nil
})
},
)
c.NewCommand(
"abort",
"Abort all pending cures on the daemon",
func([]string) error { return abortRemote(ctx, &addr, false) },
func([]string) error { return abortRemote(ctx, &addr) },
)
{
@@ -749,7 +546,7 @@ func main() {
presets[i] = p
}
base := rosa.LLVM
base := rosa.Clang
if !flagWithToolchain {
base = rosa.Musl
}
@@ -821,7 +618,6 @@ func main() {
z.Hostname = "localhost"
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
z.Quiet = !cm.verboseInit
if s, ok := os.LookupEnv("TERM"); ok {
z.Env = append(z.Env, "TERM="+s)
}

View File

@@ -1,47 +0,0 @@
package main
import (
"net"
"os"
"testing"
"hakurei.app/internal/rosa"
)
func TestMain(m *testing.M) {
rosa.DropCaches("", rosa.OptLLVMNoLTO)
os.Exit(m.Run())
}
func TestCureAll(t *testing.T) {
t.Parallel()
const env = "ROSA_TEST_DAEMON"
if !testing.Verbose() {
t.Skip("verbose flag not set")
}
pathname, ok := os.LookupEnv(env)
if !ok {
t.Skip(env + " not set")
}
addr := net.UnixAddr{Net: "unix", Name: pathname}
t.Cleanup(func() {
if t.Failed() {
if err := abortRemote(t.Context(), &addr, false); err != nil {
t.Fatal(err)
}
}
})
for i := range rosa.PresetEnd {
p := rosa.PArtifact(i)
t.Run(rosa.GetMetadata(p).Name, func(t *testing.T) {
_, err := cureRemote(t.Context(), &addr, rosa.Std.Load(p), 0)
if err != nil {
t.Error(err)
}
})
}
}

View File

@@ -1,8 +1,6 @@
// Package pkgserver implements the package metadata service backend.
package pkgserver
package main
import (
"context"
"encoding/json"
"log"
"net/http"
@@ -10,7 +8,6 @@ import (
"path"
"strconv"
"sync"
"time"
"hakurei.app/internal/info"
"hakurei.app/internal/rosa"
@@ -161,29 +158,6 @@ func (index *packageIndex) registerAPI(mux *http.ServeMux) {
mux.HandleFunc("GET /status/", index.newStatusHandler(true))
}
// Register arranges for mux to service API requests.
func Register(ctx context.Context, mux *http.ServeMux, report *rosa.Report) error {
var index packageIndex
index.search = make(searchCache)
if err := index.populate(report); err != nil {
return err
}
ticker := time.NewTicker(1 * time.Minute)
go func() {
for {
select {
case <-ctx.Done():
ticker.Stop()
return
case <-ticker.C:
index.search.clean()
}
}
}()
index.registerAPI(mux)
return nil
}
// writeAPIPayload sets headers common to API responses and encodes payload as
// JSON for the response body.
func writeAPIPayload(w http.ResponseWriter, payload any) {

View File

@@ -1,4 +1,4 @@
package pkgserver
package main
import (
"net/http"
@@ -118,31 +118,33 @@ func TestAPIGet(t *testing.T) {
checkStatus(t, resp, http.StatusOK)
checkAPIHeader(t, w.Header())
checkPayloadFunc(t, resp, func(got *struct {
Count int `json:"count"`
Values []*metadata `json:"values"`
}) bool {
return slices.EqualFunc(got.Values, want, func(a, b *metadata) bool {
return (a.Version == b.Version ||
a.Version == rosa.Unversioned ||
b.Version == rosa.Unversioned) &&
a.HasReport == b.HasReport &&
a.Name == b.Name &&
a.Description == b.Description &&
a.Website == b.Website
})
return got.Count == len(want) &&
slices.EqualFunc(got.Values, want, func(a, b *metadata) bool {
return (a.Version == b.Version ||
a.Version == rosa.Unversioned ||
b.Version == rosa.Unversioned) &&
a.HasReport == b.HasReport &&
a.Name == b.Name &&
a.Description == b.Description &&
a.Website == b.Website
})
})
})
}
checkWithSuffix("declarationAscending", "?limit=2&index=1&sort=0", []*metadata{
checkWithSuffix("declarationAscending", "?limit=2&index=0&sort=0", []*metadata{
{
Metadata: rosa.GetMetadata(0),
Version: rosa.Std.Version(0),
},
{
Metadata: rosa.GetMetadata(1),
Version: rosa.Std.Version(1),
},
{
Metadata: rosa.GetMetadata(2),
Version: rosa.Std.Version(2),
},
})
checkWithSuffix("declarationAscending offset", "?limit=3&index=5&sort=0", []*metadata{
{

View File

@@ -1,4 +1,4 @@
package pkgserver
package main
import (
"cmp"
@@ -50,7 +50,7 @@ type metadata struct {
}
// populate deterministically populates packageIndex, optionally with a report.
func (index *packageIndex) populate(report *rosa.Report) (err error) {
func (index *packageIndex) populate(cache *pkg.Cache, report *rosa.Report) (err error) {
if report != nil {
defer report.HandleAccess(&err)()
index.handleAccess = report.HandleAccess
@@ -58,7 +58,6 @@ func (index *packageIndex) populate(report *rosa.Report) (err error) {
var work [rosa.PresetUnexportedStart]*metadata
index.names = make(map[string]*metadata)
ir := pkg.NewIR()
for p := range rosa.PresetUnexportedStart {
m := metadata{
p: p,
@@ -73,8 +72,8 @@ func (index *packageIndex) populate(report *rosa.Report) (err error) {
m.Version = ""
}
if report != nil {
id := ir.Ident(rosa.Std.Load(p))
if cache != nil && report != nil {
id := cache.Ident(rosa.Std.Load(p))
m.ids = pkg.Encode(id.Value())
m.status, m.Size = report.ArtifactOf(id)
m.HasReport = m.Size >= 0

114
cmd/pkgserver/main.go Normal file
View File

@@ -0,0 +1,114 @@
package main
import (
"context"
"errors"
"log"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"hakurei.app/check"
"hakurei.app/command"
"hakurei.app/internal/pkg"
"hakurei.app/internal/rosa"
"hakurei.app/message"
)
const shutdownTimeout = 15 * time.Second
func main() {
log.SetFlags(0)
log.SetPrefix("pkgserver: ")
var (
flagBaseDir string
flagAddr string
)
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
defer stop()
msg := message.New(log.Default())
c := command.New(os.Stderr, log.Printf, "pkgserver", func(args []string) error {
var (
cache *pkg.Cache
report *rosa.Report
)
switch len(args) {
case 0:
break
case 1:
baseDir, err := check.NewAbs(flagBaseDir)
if err != nil {
return err
}
cache, err = pkg.Open(ctx, msg, 0, 0, baseDir)
if err != nil {
return err
}
defer cache.Close()
report, err = rosa.OpenReport(args[0])
if err != nil {
return err
}
default:
return errors.New("pkgserver requires 1 argument")
}
var index packageIndex
index.search = make(searchCache)
if err := index.populate(cache, report); err != nil {
return err
}
ticker := time.NewTicker(1 * time.Minute)
go func() {
for {
select {
case <-ctx.Done():
ticker.Stop()
return
case <-ticker.C:
index.search.clean()
}
}
}()
var mux http.ServeMux
uiRoutes(&mux)
index.registerAPI(&mux)
server := http.Server{
Addr: flagAddr,
Handler: &mux,
}
go func() {
<-ctx.Done()
c, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
defer cancel()
if err := server.Shutdown(c); err != nil {
log.Fatal(err)
}
}()
return server.ListenAndServe()
}).Flag(
&flagBaseDir,
"b", command.StringFlag(""),
"base directory for cache",
).Flag(
&flagAddr,
"addr", command.StringFlag(":8067"),
"TCP network address to listen on",
)
c.MustParse(os.Args[1:], func(err error) {
if errors.Is(err, http.ErrServerClosed) {
os.Exit(0)
}
log.Fatal(err)
})
}

View File

@@ -1,4 +1,4 @@
package pkgserver
package main
import (
"bytes"
@@ -15,7 +15,7 @@ func newIndex(t *testing.T) *packageIndex {
t.Helper()
var index packageIndex
if err := index.populate(nil); err != nil {
if err := index.populate(nil, nil); err != nil {
t.Fatalf("populate: error = %v", err)
}
return &index

View File

@@ -1,4 +1,4 @@
package pkgserver
package main
import (
"cmp"

33
cmd/pkgserver/ui.go Normal file
View File

@@ -0,0 +1,33 @@
package main
import "net/http"
func serveWebUI(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
w.Header().Set("Pragma", "no-cache")
w.Header().Set("Expires", "0")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Set("X-XSS-Protection", "1")
w.Header().Set("X-Frame-Options", "DENY")
http.ServeFileFS(w, r, content, "ui/index.html")
}
func serveStaticContent(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/static/style.css":
http.ServeFileFS(w, r, content, "ui/static/style.css")
case "/favicon.ico":
http.ServeFileFS(w, r, content, "ui/static/favicon.ico")
case "/static/index.js":
http.ServeFileFS(w, r, content, "ui/static/index.js")
default:
http.NotFound(w, r)
}
}
func uiRoutes(mux *http.ServeMux) {
mux.HandleFunc("GET /{$}", serveWebUI)
mux.HandleFunc("GET /favicon.ico", serveStaticContent)
mux.HandleFunc("GET /static/", serveStaticContent)
}

View File

@@ -3,9 +3,9 @@
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="style.css">
<link rel="stylesheet" href="static/style.css">
<title>Hakurei PkgServer</title>
<script src="index.js"></script>
<script src="static/index.js"></script>
</head>
<body>
<h1>Hakurei PkgServer</h1>

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

9
cmd/pkgserver/ui_full.go Normal file
View File

@@ -0,0 +1,9 @@
//go:build frontend
package main
import "embed"
//go:generate tsc -p ui
//go:embed ui/*
var content embed.FS

View File

@@ -1,7 +1,7 @@
//go:build !frontend
package ui
package main
import "testing/fstest"
var static fstest.MapFS
var content fstest.MapFS

View File

@@ -20,14 +20,11 @@
};
virtualisation = {
# Hopefully reduces spurious test failures:
memorySize = if pkgs.stdenv.hostPlatform.is32bit then 2046 else 8192;
diskSize = 6 * 1024;
qemu.options = [
# Increase test performance:
"-smp 16"
"-smp 8"
];
};

View File

@@ -28,7 +28,7 @@ testers.nixosTest {
# For go tests:
(pkgs.writeShellScriptBin "sharefs-workload-hakurei-tests" ''
cp -r "${self.packages.${system}.hakurei.src}" "/sdcard/hakurei" && cd "/sdcard/hakurei"
${fhs}/bin/hakurei-fhs -c 'ROSA_SKIP_BINFMT=1 CC="clang -O3 -Werror" go test ./...'
${fhs}/bin/hakurei-fhs -c 'CC="clang -O3 -Werror" go test ./...'
'')
];

View File

@@ -1,46 +0,0 @@
package container
import (
"strings"
"unsafe"
"hakurei.app/check"
)
// escapeBinfmt escapes magic/mask sequences in a [BinfmtEntry].
func escapeBinfmt(buf *strings.Builder, s string) string {
const lowerhex = "0123456789abcdef"
buf.Reset()
for _, c := range unsafe.Slice(unsafe.StringData(s), len(s)) {
switch c {
case 0, '\\', ':':
buf.WriteString(`\x`)
buf.WriteByte(lowerhex[c>>4])
buf.WriteByte(lowerhex[c&0xf])
default:
buf.WriteByte(c)
}
}
return buf.String()
}
// BinfmtEntry is an entry to be registered by the init process.
type BinfmtEntry struct {
// The offset of the magic/mask in the file, counted in bytes.
Offset byte
// The byte sequence binfmt_misc is matching for.
Magic string
// An (optional, defaults to all 0xff) mask.
Mask string
// The program that should be invoked with the binary as first argument.
Interpreter *check.Absolute
}
// Valid returns whether e can be registered into the kernel.
func (e *BinfmtEntry) Valid() bool {
return e != nil &&
int(e.Offset)+max(len(e.Magic), len(e.Mask)) < 128 &&
e.Interpreter != nil && len(e.Interpreter.String()) < 128
}

View File

@@ -1,62 +0,0 @@
package container
import (
"strings"
"testing"
"hakurei.app/fhs"
)
func TestEscapeBinfmt(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
magic string
want string
}{
{"packed DOS applications", "\x0eDEX", "\x0eDEX"},
{"riscv64 magic",
"\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xf3\x00",
"\x7fELF\x02\x01\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\x02\\x00\xf3\\x00"},
{"riscv64 mask",
"\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff",
"\xff\xff\xff\xff\xff\xff\xff\\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff"},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
got := escapeBinfmt(new(strings.Builder), tc.magic)
if got != tc.want {
t.Errorf("escapeBinfmt: %q, want %q", got, tc.want)
}
})
}
}
func TestBinfmtEntry(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
e BinfmtEntry
valid bool
}{
{"zero", BinfmtEntry{}, false},
{"large offset", BinfmtEntry{Offset: 128}, false},
{"long magic", BinfmtEntry{Magic: strings.Repeat("\x00", 128)}, false},
{"long mask", BinfmtEntry{Mask: strings.Repeat("\x00", 128)}, false},
{"valid", BinfmtEntry{Interpreter: fhs.AbsRoot}, true},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
if tc.e.Valid() != tc.valid {
t.Errorf("Valid: %v", !tc.valid)
}
})
}
}

View File

@@ -18,7 +18,6 @@ const (
CAP_SETPCAP = 0x8
CAP_NET_ADMIN = 0xc
CAP_DAC_OVERRIDE = 0x1
CAP_SETFCAP = 0x1f
)
type (

View File

@@ -67,9 +67,6 @@ type (
// Copied to the underlying [exec.Cmd].
WaitDelay time.Duration
// Suppress verbose output of init.
Quiet bool
cmd *exec.Cmd
ctx context.Context
msg message.Msg
@@ -91,20 +88,12 @@ type (
// Time to wait for processes lingering after the initial process terminates.
AdoptWaitDelay time.Duration
// Map uid/gid 0 in the init process. Requires [FstypeProc] attached to
// [fhs.Proc] in the container filesystem.
InitAsRoot bool
// Mapped Uid in user namespace.
Uid int
// Mapped Gid in user namespace.
Gid int
// Hostname value in UTS namespace.
Hostname string
// Register binfmt_misc entries.
Binfmt []BinfmtEntry
// Alternative pathname to attach binfmt_misc filesystem. The zero value
// requires [FstypeProc] to be made available at [fhs.Proc].
BinfmtPath *check.Absolute
// Sequential container setup ops.
*Ops
@@ -224,9 +213,6 @@ func (p *Container) Start() error {
if p.cmd.Process != nil {
return errors.New("container: already started")
}
if !p.InitAsRoot && len(p.Binfmt) > 0 {
return errors.New("container: init as root required, but not enabled")
}
if err := ensureCloseOnExec(); err != nil {
return err
@@ -297,18 +283,6 @@ func (p *Container) Start() error {
if !p.HostNet {
p.cmd.SysProcAttr.Cloneflags |= CLONE_NEWNET
}
if p.InitAsRoot {
p.cmd.SysProcAttr.AmbientCaps = append(p.cmd.SysProcAttr.AmbientCaps,
// mappings during init as root
CAP_SETFCAP,
)
if !p.SeccompDisable &&
len(p.SeccompRules) == 0 &&
p.SeccompPresets&std.PresetDenyNS != 0 {
return errors.New("container: as root requires late namespace creation")
}
}
// place setup pipe before user supplied extra files, this is later restored by init
if r, w, err := os.Pipe(); err != nil {
@@ -368,6 +342,8 @@ func (p *Container) Start() error {
Err: ENOSYS,
Origin: true,
}
} else {
p.msg.Verbosef("landlock abi version %d", abi)
}
if rulesetFd, err := rulesetAttr.Create(0); err != nil {
@@ -377,6 +353,7 @@ func (p *Container) Start() error {
Err: err,
}
} else {
p.msg.Verbosef("enforcing landlock ruleset %s", rulesetAttr)
if err = landlock.RestrictSelf(rulesetFd, 0); err != nil {
_ = Close(rulesetFd)
return &StartError{
@@ -433,6 +410,7 @@ func (p *Container) Start() error {
}
}
p.msg.Verbose("starting container init")
if err := p.cmd.Start(); err != nil {
return &StartError{
Step: "start container init",
@@ -503,6 +481,7 @@ func (p *Container) Serve() (err error) {
}
case <-done:
p.msg.Verbose("setup payload took", time.Since(t))
return
}
}(p.setup[1])
@@ -512,7 +491,7 @@ func (p *Container) Serve() (err error) {
Getuid(),
Getgid(),
len(p.ExtraFiles),
p.msg.IsVerbose() && !p.Quiet,
p.msg.IsVerbose(),
})
}

View File

@@ -16,8 +16,6 @@ import (
"strings"
"syscall"
"testing"
"time"
"unsafe"
"hakurei.app/check"
"hakurei.app/command"
@@ -235,9 +233,6 @@ func earlyMnt(mnt ...*vfs.MountInfoEntry) func(*testing.T, context.Context) []*v
return func(*testing.T, context.Context) []*vfs.MountInfoEntry { return mnt }
}
//go:linkname toHost hakurei.app/container.toHost
func toHost(name string) string
var containerTestCases = []struct {
name string
filter bool
@@ -337,15 +332,13 @@ var containerTestCases = []struct {
func(t *testing.T, ctx context.Context) []*vfs.MountInfoEntry {
return []*vfs.MountInfoEntry{
ent("/", hst.PrivateTmp, "rw", "overlay", "overlay",
"rw"+
",lowerdir+="+
toHost(ctx.Value(testVal("lower0")).(*check.Absolute).String())+
",lowerdir+="+
toHost(ctx.Value(testVal("lower1")).(*check.Absolute).String())+
"rw,lowerdir="+
container.InternalToHostOvlEscape(ctx.Value(testVal("lower0")).(*check.Absolute).String())+":"+
container.InternalToHostOvlEscape(ctx.Value(testVal("lower1")).(*check.Absolute).String())+
",upperdir="+
toHost(ctx.Value(testVal("upper")).(*check.Absolute).String())+
container.InternalToHostOvlEscape(ctx.Value(testVal("upper")).(*check.Absolute).String())+
",workdir="+
toHost(ctx.Value(testVal("work")).(*check.Absolute).String())+
container.InternalToHostOvlEscape(ctx.Value(testVal("work")).(*check.Absolute).String())+
",redirect_dir=nofollow,uuid=on,userxattr"),
}
},
@@ -395,11 +388,9 @@ var containerTestCases = []struct {
func(t *testing.T, ctx context.Context) []*vfs.MountInfoEntry {
return []*vfs.MountInfoEntry{
ent("/", hst.PrivateTmp, "rw", "overlay", "overlay",
"ro"+
",lowerdir+="+
toHost(ctx.Value(testVal("lower0")).(*check.Absolute).String())+
",lowerdir+="+
toHost(ctx.Value(testVal("lower1")).(*check.Absolute).String())+
"ro,lowerdir="+
container.InternalToHostOvlEscape(ctx.Value(testVal("lower0")).(*check.Absolute).String())+":"+
container.InternalToHostOvlEscape(ctx.Value(testVal("lower1")).(*check.Absolute).String())+
",redirect_dir=nofollow,userxattr"),
}
},
@@ -409,11 +400,39 @@ var containerTestCases = []struct {
func TestContainer(t *testing.T) {
t.Parallel()
var suffix string
runTests:
t.Run("cancel", testContainerCancel(nil, func(t *testing.T, c *container.Container) {
wantErr := context.Canceled
wantExitCode := 0
if err := c.Wait(); !reflect.DeepEqual(err, wantErr) {
if m, ok := container.InternalMessageFromError(err); ok {
t.Error(m)
}
t.Errorf("Wait: error = %#v, want %#v", err, wantErr)
}
if ps := c.ProcessState(); ps == nil {
t.Errorf("ProcessState unexpectedly returned nil")
} else if code := ps.ExitCode(); code != wantExitCode {
t.Errorf("ExitCode: %d, want %d", code, wantExitCode)
}
}))
t.Run("forward", testContainerCancel(func(c *container.Container) {
c.ForwardCancel = true
}, func(t *testing.T, c *container.Container) {
var exitError *exec.ExitError
if err := c.Wait(); !errors.As(err, &exitError) {
if m, ok := container.InternalMessageFromError(err); ok {
t.Error(m)
}
t.Errorf("Wait: error = %v", err)
}
if code := exitError.ExitCode(); code != blockExitCodeInterrupt {
t.Errorf("ExitCode: %d, want %d", code, blockExitCodeInterrupt)
}
}))
for i, tc := range containerTestCases {
_suffix := suffix
t.Run(tc.name+_suffix, func(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
wantOps, wantOpsCtx := tc.ops(t)
@@ -437,8 +456,6 @@ runTests:
c.SeccompDisable = !tc.filter
c.RetainSession = tc.session
c.HostNet = tc.net
c.InitAsRoot = _suffix != ""
c.Env = append(c.Env, "HAKUREI_TEST_SUFFIX="+_suffix)
if info.CanDegrade {
if _, err := landlock.GetABI(); err != nil {
if !errors.Is(err, syscall.ENOSYS) {
@@ -448,9 +465,6 @@ runTests:
t.Log("Landlock LSM is unavailable, enabling HostAbstract")
}
}
if c.InitAsRoot {
c.SeccompPresets &= ^std.PresetDenyNS
}
c.
Readonly(check.MustAbs(pathReadonly), 0755).
@@ -519,11 +533,6 @@ runTests:
}
})
}
if suffix == "" {
suffix = " as root"
goto runTests
}
}
func ent(root, target, vfsOptstr, fsType, source, fsOptstr string) *vfs.MountInfoEntry {
@@ -546,118 +555,49 @@ func hostnameFromTestCase(name string) string {
}
func testContainerCancel(
t *testing.T,
containerExtra func(c *container.Container),
waitCheck func(ps *os.ProcessState, waitErr error),
) {
ctx, cancel := context.WithCancel(t.Context())
c := helperNewContainer(ctx, "block")
c.Stdout, c.Stderr = os.Stdout, os.Stderr
if containerExtra != nil {
containerExtra(c)
}
ready := make(chan struct{})
var waitErr error
r, w, err := os.Pipe()
if err != nil {
t.Fatalf("cannot pipe: %v", err)
}
c.ExtraFiles = append(c.ExtraFiles, w)
go func() {
defer close(ready)
if _, _err := r.Read(make([]byte, 1)); _err != nil {
panic(_err)
}
}()
if err = c.Start(); err != nil {
if m, ok := container.InternalMessageFromError(err); ok {
t.Fatal(m)
} else {
t.Fatalf("cannot start container: %v", err)
}
}
done := make(chan struct{})
go func() {
defer close(done)
waitErr = c.Wait()
_ = r.SetReadDeadline(time.Now())
}()
if err = c.Serve(); err != nil {
if m, ok := container.InternalMessageFromError(err); ok {
t.Error(m)
} else {
t.Errorf("cannot serve setup params: %v", err)
}
}
<-ready
cancel()
<-done
waitCheck(c.ProcessState(), waitErr)
}
func TestForward(t *testing.T) {
t.Parallel()
f := func(ps *os.ProcessState, waitErr error) {
var exitError *exec.ExitError
if !errors.As(waitErr, &exitError) {
if m, ok := container.InternalMessageFromError(waitErr); ok {
t.Error(m)
}
t.Errorf("Wait: error = %v", waitErr)
}
if code := exitError.ExitCode(); code != blockExitCodeInterrupt {
t.Errorf("ExitCode: %d, want %d", code, blockExitCodeInterrupt)
}
}
t.Run("direct", func(t *testing.T) {
waitCheck func(t *testing.T, c *container.Container),
) func(t *testing.T) {
return func(t *testing.T) {
t.Parallel()
testContainerCancel(t, func(c *container.Container) {
c.ForwardCancel = true
}, f)
})
t.Run("as root", func(t *testing.T) {
testContainerCancel(t, func(c *container.Container) {
c.ForwardCancel = true
c.InitAsRoot = true
c.Proc(fhs.AbsProc)
}, f)
})
}
ctx, cancel := context.WithCancel(t.Context())
func TestCancel(t *testing.T) {
t.Parallel()
c := helperNewContainer(ctx, "block")
c.Stdout, c.Stderr = os.Stdout, os.Stderr
if containerExtra != nil {
containerExtra(c)
}
f := func(ps *os.ProcessState, waitErr error) {
wantErr := context.Canceled
if !reflect.DeepEqual(waitErr, wantErr) {
if m, ok := container.InternalMessageFromError(waitErr); ok {
t.Error(m)
ready := make(chan struct{})
if r, w, err := os.Pipe(); err != nil {
t.Fatalf("cannot pipe: %v", err)
} else {
c.ExtraFiles = append(c.ExtraFiles, w)
go func() {
defer close(ready)
if _, err = r.Read(make([]byte, 1)); err != nil {
panic(err.Error())
}
}()
}
if err := c.Start(); err != nil {
if m, ok := container.InternalMessageFromError(err); ok {
t.Fatal(m)
} else {
t.Fatalf("cannot start container: %v", err)
}
} else if err = c.Serve(); err != nil {
if m, ok := container.InternalMessageFromError(err); ok {
t.Error(m)
} else {
t.Errorf("cannot serve setup params: %v", err)
}
t.Errorf("Wait: error = %#v, want %#v", waitErr, wantErr)
}
if ps == nil {
t.Errorf("ProcessState unexpectedly returned nil")
} else if code := ps.ExitCode(); code != 0 {
t.Errorf("ExitCode: %d, want %d", code, 0)
}
<-ready
cancel()
waitCheck(t, c)
}
t.Run("direct", func(t *testing.T) {
t.Parallel()
testContainerCancel(t, nil, f)
})
t.Run("as root", func(t *testing.T) {
testContainerCancel(t, func(c *container.Container) {
c.InitAsRoot = true
c.Proc(fhs.AbsProc)
}, f)
})
}
func TestContainerString(t *testing.T) {
@@ -693,8 +633,6 @@ func init() {
})
c.Command("container", command.UsageInternal, func(args []string) error {
asRoot := os.Getenv("HAKUREI_TEST_SUFFIX") == " as root"
if len(args) != 1 {
return syscall.EINVAL
}
@@ -712,66 +650,6 @@ func init() {
return fmt.Errorf("gid: %d, want %d", gid, tc.gid)
}
// no attack surface increase during as root due to no_new_privs
var wantBounding uintptr = 1
asRootNot := " not"
if !asRoot {
wantBounding = 0
asRootNot = ""
}
const (
PR_CAP_AMBIENT = 0x2f
PR_CAP_AMBIENT_IS_SET = 0x1
)
for i := range container.LastCap(nil) + 1 {
r, _, errno := syscall.Syscall(
syscall.SYS_PRCTL,
PR_CAP_AMBIENT,
PR_CAP_AMBIENT_IS_SET,
i,
)
if errno != 0 {
return os.NewSyscallError("prctl", errno)
}
if r != 0 {
return fmt.Errorf("capability %d in ambient set", i)
}
r, _, errno = syscall.Syscall(
syscall.SYS_PRCTL,
syscall.PR_CAPBSET_READ,
i,
0,
)
if errno != 0 {
return os.NewSyscallError("prctl", errno)
}
if r != wantBounding {
return fmt.Errorf("capability %d%s in bounding set", i, asRootNot)
}
}
const _LINUX_CAPABILITY_VERSION_3 = 0x20080522
var capData struct {
effective uint32
permitted uint32
inheritable uint32
}
if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&struct {
version uint32
pid int32
}{_LINUX_CAPABILITY_VERSION_3, 0})), uintptr(unsafe.Pointer(&capData)), 0); errno != 0 {
return os.NewSyscallError("capget", errno)
}
if max(capData.effective, capData.permitted, capData.inheritable) != 0 {
return fmt.Errorf(
"effective = %d, permitted = %d, inheritable = %d",
capData.effective, capData.permitted, capData.inheritable,
)
}
wantHost := hostnameFromTestCase(tc.name)
if host, err := os.Hostname(); err != nil {
return fmt.Errorf("cannot get hostname: %v", err)
@@ -889,7 +767,7 @@ func TestMain(m *testing.M) {
}
c.MustParse(os.Args[1:], func(err error) {
if err != nil {
log.Fatal(err)
log.Fatal(err.Error())
}
})
return

View File

@@ -65,8 +65,6 @@ type syscallDispatcher interface {
remount(msg message.Msg, target string, flags uintptr) error
// mountTmpfs provides mountTmpfs.
mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error
// mountOverlay provides mountOverlay.
mountOverlay(target string, options [][2]string) error
// ensureFile provides ensureFile.
ensureFile(name string, perm, pperm os.FileMode) error
// mustLoopback provides mustLoopback.
@@ -171,9 +169,6 @@ func (direct) remount(msg message.Msg, target string, flags uintptr) error {
func (k direct) mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error {
return mountTmpfs(k, fsname, target, flags, size, perm)
}
func (k direct) mountOverlay(target string, options [][2]string) error {
return mountOverlay(target, options)
}
func (direct) ensureFile(name string, perm, pperm os.FileMode) error {
return ensureFile(name, perm, pperm)
}

View File

@@ -468,14 +468,6 @@ func (k *kstub) mountTmpfs(fsname, target string, flags uintptr, size int, perm
stub.CheckArg(k.Stub, "perm", perm, 4))
}
func (k *kstub) mountOverlay(target string, options [][2]string) error {
k.Helper()
return k.Expects("mountOverlay").Error(
stub.CheckArg(k.Stub, "target", target, 0),
stub.CheckArgReflect(k.Stub, "options", options, 1),
)
}
func (k *kstub) ensureFile(name string, perm, pperm os.FileMode) error {
k.Helper()
return k.Expects("ensureFile").Error(

View File

@@ -118,10 +118,6 @@ func errnoFallback(op, path string, err error) (syscall.Errno, *os.PathError) {
// mount wraps syscall.Mount for error handling.
func mount(source, target, fstype string, flags uintptr, data string) error {
if max(len(source), len(target), len(data))+1 > os.Getpagesize() {
return &MountError{source, target, fstype, flags, data, syscall.ENOMEM}
}
err := syscall.Mount(source, target, fstype, flags, data)
if err == nil {
return nil

View File

@@ -11,13 +11,11 @@ import (
"path/filepath"
"slices"
"strconv"
"strings"
"sync"
"sync/atomic"
. "syscall"
"time"
"hakurei.app/check"
"hakurei.app/container/seccomp"
"hakurei.app/ext"
"hakurei.app/fhs"
@@ -184,33 +182,23 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
cancel()
}
uid, gid := param.Uid, param.Gid
if param.InitAsRoot {
uid, gid = 0, 0
}
// write uid/gid map here so parent does not need to set dumpable
if err := k.setDumpable(ext.SUID_DUMP_USER); err != nil {
k.fatalf(msg, "cannot set SUID_DUMP_USER: %v", err)
}
if err := k.writeFile(
fhs.Proc+"self/uid_map",
[]byte(strconv.Itoa(uid)+" "+strconv.Itoa(param.HostUid)+" 1\n"),
0,
); err != nil {
if err := k.writeFile(fhs.Proc+"self/uid_map",
append([]byte{}, strconv.Itoa(param.Uid)+" "+strconv.Itoa(param.HostUid)+" 1\n"...),
0); err != nil {
k.fatalf(msg, "%v", err)
}
if err := k.writeFile(
fhs.Proc+"self/setgroups",
if err := k.writeFile(fhs.Proc+"self/setgroups",
[]byte("deny\n"),
0,
); err != nil && !os.IsNotExist(err) {
0); err != nil && !os.IsNotExist(err) {
k.fatalf(msg, "%v", err)
}
if err := k.writeFile(fhs.Proc+"self/gid_map",
[]byte(strconv.Itoa(gid)+" "+strconv.Itoa(param.HostGid)+" 1\n"),
0,
); err != nil {
append([]byte{}, strconv.Itoa(param.Gid)+" "+strconv.Itoa(param.HostGid)+" 1\n"...),
0); err != nil {
k.fatalf(msg, "%v", err)
}
if err := k.setDumpable(ext.SUID_DUMP_DISABLE); err != nil {
@@ -235,23 +223,6 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
state := &setupState{process: make(map[int]WaitStatus), Params: &param.Params, Msg: msg, Context: ctx}
defer cancel()
if err := k.mount(SourceTmpfsRootfs, intermediateHostPath, FstypeTmpfs, MS_NODEV|MS_NOSUID, zeroString); err != nil {
k.fatalf(msg, "cannot mount intermediate root: %v", optionalErrorUnwrap(err))
}
if err := k.chdir(intermediateHostPath); err != nil {
k.fatalf(msg, "cannot enter intermediate host path: %v", err)
}
if len(param.Binfmt) > 0 {
for i, e := range param.Binfmt {
if pathname, err := k.evalSymlinks(e.Interpreter.String()); err != nil {
k.fatal(msg, err)
} else if param.Binfmt[i].Interpreter, err = check.NewAbs(pathname); err != nil {
k.fatal(msg, err)
}
}
}
/* early is called right before pivot_root into intermediate root;
this step is mostly for gathering information that would otherwise be
difficult to obtain via library functions after pivot_root, and
@@ -271,6 +242,13 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
}
}
if err := k.mount(SourceTmpfsRootfs, intermediateHostPath, FstypeTmpfs, MS_NODEV|MS_NOSUID, zeroString); err != nil {
k.fatalf(msg, "cannot mount intermediate root: %v", optionalErrorUnwrap(err))
}
if err := k.chdir(intermediateHostPath); err != nil {
k.fatalf(msg, "cannot enter intermediate host path: %v", err)
}
if err := k.mkdir(sysrootDir, 0755); err != nil {
k.fatalf(msg, "%v", err)
}
@@ -307,48 +285,6 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
}
}
if len(param.Binfmt) > 0 {
const interpreter = "/interpreter"
if param.BinfmtPath == nil {
param.BinfmtPath = fhs.AbsProcSys.Append("fs/binfmt_misc")
}
binfmt := sysrootPath + param.BinfmtPath.String()
if err := k.mkdirAll(binfmt, 0); err != nil {
k.fatal(msg, err)
}
if err := k.mount(
SourceBinfmtMisc,
binfmt,
FstypeBinfmtMisc,
MS_NOSUID|MS_NOEXEC|MS_NODEV,
zeroString,
); err != nil {
k.fatal(msg, err)
}
var buf strings.Builder
buf.Grow(1920)
register := binfmt + "/register"
for i, e := range param.Binfmt {
if err := k.symlink(hostPath+e.Interpreter.String(), interpreter); err != nil {
k.fatal(msg, err)
} else if err = k.writeFile(register, []byte(":"+
strconv.Itoa(i)+":"+
"M:"+
strconv.Itoa(int(e.Offset))+":"+
escapeBinfmt(&buf, e.Magic)+":"+
escapeBinfmt(&buf, e.Mask)+":"+
interpreter+":"+
"F"), 0); err != nil {
k.fatal(msg, err)
} else if err = k.remove(interpreter); err != nil {
k.fatal(msg, err)
}
}
}
// setup requiring host root complete at this point
if err := k.mount(hostDir, hostDir, zeroString, MS_SILENT|MS_REC|MS_PRIVATE, zeroString); err != nil {
k.fatalf(msg, "cannot make host root rprivate: %v", optionalErrorUnwrap(err))
@@ -387,19 +323,11 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
}
}
var keepCaps []uintptr
if param.Privileged {
keepCaps = append(keepCaps, CAP_SYS_ADMIN, CAP_SETPCAP)
}
if param.InitAsRoot {
keepCaps = append(keepCaps, CAP_SETFCAP)
}
if err := k.capAmbientClearAll(); err != nil {
k.fatalf(msg, "cannot clear the ambient capability set: %v", err)
}
for i := range lastcap + 1 {
if slices.Contains(keepCaps, i) {
for i := uintptr(0); i <= lastcap; i++ {
if param.Privileged && i == CAP_SYS_ADMIN {
continue
}
if err := k.capBoundingSetDrop(i); err != nil {
@@ -408,23 +336,20 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
}
var keep [2]uint32
for _, c := range keepCaps {
keep[capToIndex(c)] |= capToMask(c)
}
if param.Privileged {
keep[capToIndex(CAP_SYS_ADMIN)] |= capToMask(CAP_SYS_ADMIN)
if err := k.capAmbientRaise(CAP_SYS_ADMIN); err != nil {
k.fatalf(msg, "cannot raise CAP_SYS_ADMIN: %v", err)
}
}
if err := k.capset(
&capHeader{_LINUX_CAPABILITY_VERSION_3, 0},
&[2]capData{{keep[0], keep[0], keep[0]}, {keep[1], keep[1], keep[1]}},
&[2]capData{{0, keep[0], keep[0]}, {0, keep[1], keep[1]}},
); err != nil {
k.fatalf(msg, "cannot capset: %v", err)
}
for _, c := range keepCaps {
if err := k.capAmbientRaise(c); err != nil {
k.fatalf(msg, "cannot raise %#x: %v", c, err)
}
}
if !param.SeccompDisable {
rules := param.SeccompRules
if len(rules) == 0 { // non-empty rules slice always overrides presets
@@ -549,14 +474,6 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
cmd.ExtraFiles = extraFiles
cmd.Dir = param.Dir.String()
if param.InitAsRoot {
cmd.SysProcAttr = &SysProcAttr{
Cloneflags: CLONE_NEWUSER,
UidMappings: []SysProcIDMap{{ContainerID: param.Uid, HostID: 0, Size: 1}},
GidMappings: []SysProcIDMap{{ContainerID: param.Gid, HostID: 0, Size: 1}},
}
}
msg.Verbosef("starting initial process %s", param.Path)
if err := k.start(cmd); err != nil {
k.fatalf(msg, "%v", err)

View File

@@ -332,8 +332,6 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("fatalf", stub.ExpectArgs{"invalid op at index %d", []any{0}}, nil, nil),
/* end early */
@@ -372,8 +370,6 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("fatalf", stub.ExpectArgs{"invalid op at index %d", []any{0}}, nil, nil),
/* end early */
@@ -412,8 +408,6 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", stub.UniqueError(61)),
call("fatalf", stub.ExpectArgs{"cannot prepare op at index %d: %v", []any{0, stub.UniqueError(61)}}, nil, nil),
@@ -453,8 +447,6 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", &os.PathError{Op: "readlink", Path: "/", Err: stub.UniqueError(60)}),
call("fatal", stub.ExpectArgs{[]any{"cannot readlink /: unique error 60 injected by the test suite"}}, nil, nil),
@@ -494,6 +486,9 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, stub.UniqueError(58)),
call("fatalf", stub.ExpectArgs{"cannot mount intermediate root: %v", []any{stub.UniqueError(58)}}, nil, nil),
},
@@ -531,6 +526,9 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, stub.UniqueError(56)),
call("fatalf", stub.ExpectArgs{"cannot enter intermediate host path: %v", []any{stub.UniqueError(56)}}, nil, nil),
@@ -569,11 +567,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, stub.UniqueError(54)),
call("fatalf", stub.ExpectArgs{"%v", []any{stub.UniqueError(54)}}, nil, nil),
},
@@ -611,11 +609,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, stub.UniqueError(52)),
call("fatalf", stub.ExpectArgs{"cannot bind sysroot: %v", []any{stub.UniqueError(52)}}, nil, nil),
@@ -654,11 +652,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, stub.UniqueError(50)),
@@ -698,11 +696,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -743,11 +741,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -789,11 +787,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -844,11 +842,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -899,11 +897,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -955,11 +953,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1012,11 +1010,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1071,11 +1069,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1131,11 +1129,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1192,11 +1190,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1254,11 +1252,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1317,11 +1315,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1381,11 +1379,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1446,11 +1444,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1512,11 +1510,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1586,11 +1584,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1624,6 +1622,7 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x8)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil),
@@ -1655,9 +1654,8 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0x200100, 0x200100, 0x200100}, {0, 0, 0}}}, nil, nil),
call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, stub.UniqueError(19)),
call("fatalf", stub.ExpectArgs{"cannot raise %#x: %v", []any{uintptr(0x15), stub.UniqueError(19)}}, nil, nil),
call("fatalf", stub.ExpectArgs{"cannot raise CAP_SYS_ADMIN: %v", []any{stub.UniqueError(19)}}, nil, nil),
},
}, nil},
@@ -1693,11 +1691,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1731,6 +1729,7 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x8)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil),
@@ -1762,7 +1761,8 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0x200100, 0x200100, 0x200100}, {0, 0, 0}}}, nil, stub.UniqueError(17)),
call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0, 0x200000, 0x200000}, {0, 0, 0}}}, nil, stub.UniqueError(17)),
call("fatalf", stub.ExpectArgs{"cannot capset: %v", []any{stub.UniqueError(17)}}, nil, nil),
},
}, nil},
@@ -1799,11 +1799,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1837,6 +1837,7 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x8)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil),
@@ -1868,9 +1869,8 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0x200100, 0x200100, 0x200100}, {0, 0, 0}}}, nil, nil),
call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, nil),
call("capAmbientRaise", stub.ExpectArgs{uintptr(0x8)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0, 0x200000, 0x200000}, {0, 0, 0}}}, nil, nil),
call("verbosef", stub.ExpectArgs{"resolving presets %#x", []any{std.FilterPreset(0xf)}}, nil, nil),
call("seccompLoad", stub.ExpectArgs{seccomp.Preset(0xf, 0), seccomp.ExportFlag(0)}, nil, stub.UniqueError(15)),
call("fatalf", stub.ExpectArgs{"cannot load syscall filter: %v", []any{stub.UniqueError(15)}}, nil, nil),
@@ -1908,11 +1908,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2032,11 +2032,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(4), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2132,11 +2132,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(4), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2232,11 +2232,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(4), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2323,11 +2323,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(4), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2418,11 +2418,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(4), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2520,11 +2520,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2659,11 +2659,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2697,6 +2697,7 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x8)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil),
@@ -2728,9 +2729,8 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0x200100, 0x200100, 0x200100}, {0, 0, 0}}}, nil, nil),
call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, nil),
call("capAmbientRaise", stub.ExpectArgs{uintptr(0x8)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0, 0x200000, 0x200000}, {0, 0, 0}}}, nil, nil),
call("verbosef", stub.ExpectArgs{"resolving presets %#x", []any{std.FilterPreset(0xf)}}, nil, nil),
call("seccompLoad", stub.ExpectArgs{seccomp.Preset(0xf, 0), seccomp.ExportFlag(0)}, nil, nil),
call("verbosef", stub.ExpectArgs{"%d filter rules loaded", []any{73}}, nil, nil),

View File

@@ -4,9 +4,9 @@ import (
"encoding/gob"
"fmt"
"slices"
"strings"
"hakurei.app/check"
"hakurei.app/ext"
"hakurei.app/fhs"
)
@@ -150,7 +150,7 @@ func (o *MountOverlayOp) early(_ *setupState, k syscallDispatcher) error {
if v, err := k.evalSymlinks(o.Upper.String()); err != nil {
return err
} else {
o.upper = toHost(v)
o.upper = check.EscapeOverlayDataSegment(toHost(v))
}
}
@@ -158,7 +158,7 @@ func (o *MountOverlayOp) early(_ *setupState, k syscallDispatcher) error {
if v, err := k.evalSymlinks(o.Work.String()); err != nil {
return err
} else {
o.work = toHost(v)
o.work = check.EscapeOverlayDataSegment(toHost(v))
}
}
}
@@ -168,39 +168,12 @@ func (o *MountOverlayOp) early(_ *setupState, k syscallDispatcher) error {
if v, err := k.evalSymlinks(a.String()); err != nil {
return err
} else {
o.lower[i] = toHost(v)
o.lower[i] = check.EscapeOverlayDataSegment(toHost(v))
}
}
return nil
}
// mountOverlay sets up an overlay mount via [ext.FS].
func mountOverlay(target string, options [][2]string) error {
fs, err := ext.OpenFS(SourceOverlay, 0)
if err != nil {
return err
}
if err = fs.SetString("source", SourceOverlay); err != nil {
_ = fs.Close()
return err
}
for _, option := range options {
if err = fs.SetString(option[0], option[1]); err != nil {
_ = fs.Close()
return err
}
}
if err = fs.SetFlag(OptionOverlayUserxattr); err != nil {
_ = fs.Close()
return err
}
if err = fs.Mount(target, 0); err != nil {
_ = fs.Close()
return err
}
return fs.Close()
}
func (o *MountOverlayOp) apply(state *setupState, k syscallDispatcher) error {
target := o.Target.String()
if !o.noPrefix {
@@ -221,7 +194,7 @@ func (o *MountOverlayOp) apply(state *setupState, k syscallDispatcher) error {
}
}
options := make([][2]string, 0, 2+len(o.lower))
options := make([]string, 0, 4)
if o.upper == zeroString && o.work == zeroString { // readonly
if len(o.Lower) < 2 {
@@ -232,16 +205,15 @@ func (o *MountOverlayOp) apply(state *setupState, k syscallDispatcher) error {
if len(o.Lower) == 0 {
return &OverlayArgumentError{OverlayEmptyLower, zeroString}
}
options = append(options, [][2]string{
{OptionOverlayUpperdir, o.upper},
{OptionOverlayWorkdir, o.work},
}...)
}
for _, lower := range o.lower {
options = append(options, [2]string{OptionOverlayLowerdir + "+", lower})
options = append(options,
OptionOverlayUpperdir+"="+o.upper,
OptionOverlayWorkdir+"="+o.work)
}
options = append(options,
OptionOverlayLowerdir+"="+strings.Join(o.lower, check.SpecialOverlayPath),
OptionOverlayUserxattr)
return k.mountOverlay(target, options)
return k.mount(SourceOverlay, target, FstypeOverlay, 0, strings.Join(options, check.SpecialOverlayOption))
}
func (o *MountOverlayOp) late(*setupState, syscallDispatcher) error { return nil }

View File

@@ -97,12 +97,13 @@ func TestMountOverlayOp(t *testing.T) {
call("mkdirAll", stub.ExpectArgs{"/sysroot", os.FileMode(0705)}, nil, nil),
call("mkdirTemp", stub.ExpectArgs{"/", "overlay.upper.*"}, "overlay.upper.32768", nil),
call("mkdirTemp", stub.ExpectArgs{"/", "overlay.work.*"}, "overlay.work.32768", nil),
call("mountOverlay", stub.ExpectArgs{"/sysroot", [][2]string{
{"upperdir", "overlay.upper.32768"},
{"workdir", "overlay.work.32768"},
{"lowerdir+", `/host/var/lib/planterette/base/debian:f92c9052`},
{"lowerdir+", `/host/var/lib/planterette/app/org.chromium.Chromium@debian:f92c9052`},
}}, nil, nil),
call("mount", stub.ExpectArgs{"overlay", "/sysroot", "overlay", uintptr(0), "" +
"upperdir=overlay.upper.32768," +
"workdir=overlay.work.32768," +
"lowerdir=" +
`/host/var/lib/planterette/base/debian\:f92c9052:` +
`/host/var/lib/planterette/app/org.chromium.Chromium@debian\:f92c9052,` +
"userxattr"}, nil, nil),
}, nil},
{"short lower ro", &Params{ParentPerm: 0755}, &MountOverlayOp{
@@ -128,10 +129,11 @@ func TestMountOverlayOp(t *testing.T) {
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store0"}, "/mnt-root/nix/.ro-store0", nil),
}, nil, []stub.Call{
call("mkdirAll", stub.ExpectArgs{"/nix/store", os.FileMode(0755)}, nil, nil),
call("mountOverlay", stub.ExpectArgs{"/nix/store", [][2]string{
{"lowerdir+", "/host/mnt-root/nix/.ro-store"},
{"lowerdir+", "/host/mnt-root/nix/.ro-store0"},
}}, nil, nil),
call("mount", stub.ExpectArgs{"overlay", "/nix/store", "overlay", uintptr(0), "" +
"lowerdir=" +
"/host/mnt-root/nix/.ro-store:" +
"/host/mnt-root/nix/.ro-store0," +
"userxattr"}, nil, nil),
}, nil},
{"success ro", &Params{ParentPerm: 0755}, &MountOverlayOp{
@@ -145,10 +147,11 @@ func TestMountOverlayOp(t *testing.T) {
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store0"}, "/mnt-root/nix/.ro-store0", nil),
}, nil, []stub.Call{
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0755)}, nil, nil),
call("mountOverlay", stub.ExpectArgs{"/sysroot/nix/store", [][2]string{
{"lowerdir+", "/host/mnt-root/nix/.ro-store"},
{"lowerdir+", "/host/mnt-root/nix/.ro-store0"},
}}, nil, nil),
call("mount", stub.ExpectArgs{"overlay", "/sysroot/nix/store", "overlay", uintptr(0), "" +
"lowerdir=" +
"/host/mnt-root/nix/.ro-store:" +
"/host/mnt-root/nix/.ro-store0," +
"userxattr"}, nil, nil),
}, nil},
{"nil lower", &Params{ParentPerm: 0700}, &MountOverlayOp{
@@ -216,11 +219,7 @@ func TestMountOverlayOp(t *testing.T) {
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store"}, "/mnt-root/nix/ro-store", nil),
}, nil, []stub.Call{
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil),
call("mountOverlay", stub.ExpectArgs{"/sysroot/nix/store", [][2]string{
{"upperdir", "/host/mnt-root/nix/.rw-store/.upper"},
{"workdir", "/host/mnt-root/nix/.rw-store/.work"},
{"lowerdir+", "/host/mnt-root/nix/ro-store"},
}}, nil, stub.UniqueError(0)),
call("mount", stub.ExpectArgs{"overlay", "/sysroot/nix/store", "overlay", uintptr(0), "upperdir=/host/mnt-root/nix/.rw-store/.upper,workdir=/host/mnt-root/nix/.rw-store/.work,lowerdir=/host/mnt-root/nix/ro-store,userxattr"}, nil, stub.UniqueError(0)),
}, stub.UniqueError(0)},
{"success single layer", &Params{ParentPerm: 0700}, &MountOverlayOp{
@@ -234,11 +233,11 @@ func TestMountOverlayOp(t *testing.T) {
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store"}, "/mnt-root/nix/ro-store", nil),
}, nil, []stub.Call{
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil),
call("mountOverlay", stub.ExpectArgs{"/sysroot/nix/store", [][2]string{
{"upperdir", "/host/mnt-root/nix/.rw-store/.upper"},
{"workdir", "/host/mnt-root/nix/.rw-store/.work"},
{"lowerdir+", "/host/mnt-root/nix/ro-store"},
}}, nil, nil),
call("mount", stub.ExpectArgs{"overlay", "/sysroot/nix/store", "overlay", uintptr(0), "" +
"upperdir=/host/mnt-root/nix/.rw-store/.upper," +
"workdir=/host/mnt-root/nix/.rw-store/.work," +
"lowerdir=/host/mnt-root/nix/ro-store," +
"userxattr"}, nil, nil),
}, nil},
{"success", &Params{ParentPerm: 0700}, &MountOverlayOp{
@@ -262,15 +261,16 @@ func TestMountOverlayOp(t *testing.T) {
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store3"}, "/mnt-root/nix/ro-store3", nil),
}, nil, []stub.Call{
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil),
call("mountOverlay", stub.ExpectArgs{"/sysroot/nix/store", [][2]string{
{"upperdir", "/host/mnt-root/nix/.rw-store/.upper"},
{"workdir", "/host/mnt-root/nix/.rw-store/.work"},
{"lowerdir+", "/host/mnt-root/nix/ro-store"},
{"lowerdir+", "/host/mnt-root/nix/ro-store0"},
{"lowerdir+", "/host/mnt-root/nix/ro-store1"},
{"lowerdir+", "/host/mnt-root/nix/ro-store2"},
{"lowerdir+", "/host/mnt-root/nix/ro-store3"},
}}, nil, nil),
call("mount", stub.ExpectArgs{"overlay", "/sysroot/nix/store", "overlay", uintptr(0), "" +
"upperdir=/host/mnt-root/nix/.rw-store/.upper," +
"workdir=/host/mnt-root/nix/.rw-store/.work," +
"lowerdir=" +
"/host/mnt-root/nix/ro-store:" +
"/host/mnt-root/nix/ro-store0:" +
"/host/mnt-root/nix/ro-store1:" +
"/host/mnt-root/nix/ro-store2:" +
"/host/mnt-root/nix/ro-store3," +
"userxattr"}, nil, nil),
}, nil},
})

View File

@@ -40,9 +40,6 @@ const (
// SourceMqueue is used when mounting mqueue.
// Note that any source value is allowed when fstype is [FstypeMqueue].
SourceMqueue = "mqueue"
// SourceBinfmtMisc is used when mounting binfmt_misc.
// Note that any source value is allowed when fstype is [SourceBinfmtMisc].
SourceBinfmtMisc = "binfmt_misc"
// SourceOverlay is used when mounting overlay.
// Note that any source value is allowed when fstype is [FstypeOverlay].
SourceOverlay = "overlay"
@@ -73,9 +70,6 @@ const (
// FstypeMqueue represents the mqueue pseudo-filesystem.
// This filesystem type is usually mounted on /dev/mqueue.
FstypeMqueue = "mqueue"
// FstypeBinfmtMisc represents the binfmt_misc pseudo-filesystem.
// This filesystem type is usually mounted on /proc/sys/fs/binfmt_misc.
FstypeBinfmtMisc = "binfmt_misc"
// FstypeOverlay represents the overlay pseudo-filesystem.
// This filesystem type can be mounted anywhere in the container filesystem.
FstypeOverlay = "overlay"

View File

@@ -10,6 +10,7 @@ import (
"testing"
"unsafe"
"hakurei.app/check"
"hakurei.app/vfs"
)
@@ -49,6 +50,9 @@ func TestToHost(t *testing.T) {
}
}
// InternalToHostOvlEscape exports toHost passed to [check.EscapeOverlayDataSegment].
func InternalToHostOvlEscape(s string) string { return check.EscapeOverlayDataSegment(toHost(s)) }
func TestCreateFile(t *testing.T) {
t.Run("nonexistent", func(t *testing.T) {
t.Run("mkdir", func(t *testing.T) {

267
ext/fs.go
View File

@@ -1,267 +0,0 @@
package ext
import (
"os"
"runtime"
"syscall"
"unsafe"
)
// include/uapi/linux/mount.h
/*
* move_mount() flags.
*/
const (
MOVE_MOUNT_F_SYMLINKS = 1 << iota /* Follow symlinks on from path */
MOVE_MOUNT_F_AUTOMOUNTS /* Follow automounts on from path */
MOVE_MOUNT_F_EMPTY_PATH /* Empty from path permitted */
_
MOVE_MOUNT_T_SYMLINKS /* Follow symlinks on to path */
MOVE_MOUNT_T_AUTOMOUNTS /* Follow automounts on to path */
MOVE_MOUNT_T_EMPTY_PATH /* Empty to path permitted */
_
MOVE_MOUNT_SET_GROUP /* Set sharing group instead */
MOVE_MOUNT_BENEATH /* Mount beneath top mount */
)
/*
* fsopen() flags.
*/
const (
FSOPEN_CLOEXEC = 1 << iota
)
/*
* fspick() flags.
*/
const (
FSPICK_CLOEXEC = 1 << iota
FSPICK_SYMLINK_NOFOLLOW
FSPICK_NO_AUTOMOUNT
FSPICK_EMPTY_PATH
)
/*
* The type of fsconfig() call made.
*/
const (
FSCONFIG_SET_FLAG = iota /* Set parameter, supplying no value */
FSCONFIG_SET_STRING /* Set parameter, supplying a string value */
FSCONFIG_SET_BINARY /* Set parameter, supplying a binary blob value */
FSCONFIG_SET_PATH /* Set parameter, supplying an object by path */
FSCONFIG_SET_PATH_EMPTY /* Set parameter, supplying an object by (empty) path */
FSCONFIG_SET_FD /* Set parameter, supplying an object by fd */
FSCONFIG_CMD_CREATE /* Create new or reuse existing superblock */
FSCONFIG_CMD_RECONFIGURE /* Invoke superblock reconfiguration */
FSCONFIG_CMD_CREATE_EXCL /* Create new superblock, fail if reusing existing superblock */
)
/*
* fsmount() flags.
*/
const (
FSMOUNT_CLOEXEC = 1 << iota
)
/*
* Mount attributes.
*/
const (
MOUNT_ATTR_RDONLY = 0x00000001 /* Mount read-only */
MOUNT_ATTR_NOSUID = 0x00000002 /* Ignore suid and sgid bits */
MOUNT_ATTR_NODEV = 0x00000004 /* Disallow access to device special files */
MOUNT_ATTR_NOEXEC = 0x00000008 /* Disallow program execution */
MOUNT_ATTR__ATIME = 0x00000070 /* Setting on how atime should be updated */
MOUNT_ATTR_RELATIME = 0x00000000 /* - Update atime relative to mtime/ctime. */
MOUNT_ATTR_NOATIME = 0x00000010 /* - Do not update access times. */
MOUNT_ATTR_STRICTATIME = 0x00000020 /* - Always perform atime updates */
MOUNT_ATTR_NODIRATIME = 0x00000080 /* Do not update directory access times */
MOUNT_ATTR_IDMAP = 0x00100000 /* Idmap mount to @userns_fd in struct mount_attr. */
MOUNT_ATTR_NOSYMFOLLOW = 0x00200000 /* Do not follow symlinks */
)
// FS provides low-level wrappers around the suite of file-descriptor-based
// mount facilities in Linux.
type FS struct {
fd uintptr
c runtime.Cleanup
}
// newFS allocates a new [FS] for the specified fd.
func newFS(fd uintptr) *FS {
fs := FS{fd: fd}
fs.c = runtime.AddCleanup(&fs, func(fd uintptr) {
_ = syscall.Close(int(fd))
}, fd)
return &fs
}
// Close closes the underlying filesystem context.
func (fs *FS) Close() error {
if fs == nil {
return syscall.EINVAL
}
err := syscall.Close(int(fs.fd))
fs.c.Stop()
return err
}
// OpenFS creates a new filesystem context.
func OpenFS(fsname string, flags int) (fs *FS, err error) {
var s *byte
s, err = syscall.BytePtrFromString(fsname)
if err != nil {
return
}
fd, _, errno := syscall.Syscall(
SYS_FSOPEN,
uintptr(unsafe.Pointer(s)),
uintptr(flags|FSOPEN_CLOEXEC),
0,
)
if errno != 0 {
err = os.NewSyscallError("fsopen", errno)
} else {
fs = newFS(fd)
}
return
}
// PickFS selects filesystem for reconfiguration.
func PickFS(dirfd int, pathname string, flags int) (fs *FS, err error) {
var s *byte
s, err = syscall.BytePtrFromString(pathname)
if err != nil {
return
}
fd, _, errno := syscall.Syscall(
SYS_FSPICK,
uintptr(dirfd),
uintptr(unsafe.Pointer(s)),
uintptr(flags|FSPICK_CLOEXEC),
)
if errno != 0 {
err = os.NewSyscallError("fspick", errno)
} else {
fs = newFS(fd)
}
return
}
// config configures new or existing filesystem context.
func (fs *FS) config(cmd uint, key *byte, value unsafe.Pointer, aux int) (err error) {
_, _, errno := syscall.Syscall6(
SYS_FSCONFIG,
fs.fd,
uintptr(cmd),
uintptr(unsafe.Pointer(key)),
uintptr(value),
uintptr(aux),
0,
)
if errno != 0 {
err = os.NewSyscallError("fsconfig", errno)
}
return
}
// SetFlag sets the flag parameter named by key. ([FSCONFIG_SET_FLAG])
func (fs *FS) SetFlag(key string) (err error) {
var s *byte
s, err = syscall.BytePtrFromString(key)
if err != nil {
return
}
return fs.config(FSCONFIG_SET_FLAG, s, nil, 0)
}
// SetString sets the string parameter named by key to the value specified by
// value. ([FSCONFIG_SET_STRING])
func (fs *FS) SetString(key, value string) (err error) {
var s0 *byte
s0, err = syscall.BytePtrFromString(key)
if err != nil {
return
}
var s1 *byte
s1, err = syscall.BytePtrFromString(value)
if err != nil {
return
}
return fs.config(FSCONFIG_SET_STRING, s0, unsafe.Pointer(s1), 0)
}
// mount instantiates mount object from filesystem context.
func (fs *FS) mount(flags, attrFlags int) (fsfd int, err error) {
r, _, errno := syscall.Syscall(
SYS_FSMOUNT,
fs.fd,
uintptr(flags|FSMOUNT_CLOEXEC),
uintptr(attrFlags),
)
fsfd = int(r)
if errno != 0 {
err = os.NewSyscallError("fsmount", errno)
}
return
}
// MoveMount moves or attaches mount object to filesystem.
func MoveMount(
fromDirfd int,
fromPathname string,
toDirfd int,
toPathname string,
flags int,
) (err error) {
var s0 *byte
s0, err = syscall.BytePtrFromString(fromPathname)
if err != nil {
return
}
var s1 *byte
s1, err = syscall.BytePtrFromString(toPathname)
if err != nil {
return
}
_, _, errno := syscall.Syscall6(
SYS_MOVE_MOUNT,
uintptr(fromDirfd),
uintptr(unsafe.Pointer(s0)),
uintptr(toDirfd),
uintptr(unsafe.Pointer(s1)),
uintptr(flags),
0,
)
if errno != 0 {
err = os.NewSyscallError("move_mount", errno)
}
return
}
// Mount attaches the underlying filesystem context to the specified pathname.
func (fs *FS) Mount(pathname string, attrFlags int) error {
if err := fs.config(FSCONFIG_CMD_CREATE_EXCL, nil, nil, 0); err != nil {
return err
}
fd, err := fs.mount(0, attrFlags)
if err != nil {
return err
}
err = MoveMount(
fd, "",
-1, pathname,
MOVE_MOUNT_F_EMPTY_PATH,
)
closeErr := syscall.Close(fd)
if err == nil {
err = closeErr
}
return err
}

View File

@@ -42,8 +42,6 @@ var (
AbsDevShm = unsafeAbs(DevShm)
// AbsProc is [Proc] as [check.Absolute].
AbsProc = unsafeAbs(Proc)
// AbsProcSys is [ProcSys] as [check.Absolute].
AbsProcSys = unsafeAbs(ProcSys)
// AbsProcSelfExe is [ProcSelfExe] as [check.Absolute].
AbsProcSelfExe = unsafeAbs(ProcSelfExe)
// AbsSys is [Sys] as [check.Absolute].

216
internal/pkg/asm.go Normal file
View File

@@ -0,0 +1,216 @@
package pkg
import (
"encoding/binary"
"fmt"
"io"
"strconv"
"strings"
)
type asmOutLine struct {
pos int
word int
kindData int64
valueData []byte
indent int
kind string
value string
}
var spacingLine = asmOutLine{
pos: -1,
kindData: -1,
valueData: nil,
indent: 0,
kind: "",
value: "",
}
func Disassemble(r io.Reader, real bool, showHeader bool, force bool, raw bool) (s string, err error) {
var lines []asmOutLine
sb := new(strings.Builder)
header := true
pos := new(int)
for err == nil {
if header {
var kind uint64
var size uint64
var bsize []byte
p := *pos
if _, kind, err = nextUint64(r, pos); err != nil {
break
}
if bsize, size, err = nextUint64(r, pos); err != nil {
break
}
if showHeader {
lines = append(lines, asmOutLine{p, 8, int64(kind), bsize, 0, "head " + intToKind(kind), ""})
}
for i := 0; uint64(i) < size; i++ {
var did Checksum
var dkind uint64
p := *pos
if _, dkind, err = nextUint64(r, pos); err != nil {
break
}
if _, did, err = nextIdent(r, pos); err != nil {
break
}
if showHeader {
lines = append(lines, asmOutLine{p, 8, int64(dkind), nil, 1, intToKind(dkind), Encode(did)})
}
}
header = false
}
var k uint32
p := *pos
if _, k, err = nextUint32(r, pos); err != nil {
break
}
kind := IRValueKind(k)
switch kind {
case IRKindEnd:
var a uint32
var ba []byte
if ba, a, err = nextUint32(r, pos); err != nil {
break
}
if a&1 != 0 {
var sum Checksum
if _, sum, err = nextIdent(r, pos); err != nil {
break
}
lines = append(lines, asmOutLine{p, 4, int64(kind), ba, 1, "end ", Encode(sum)})
} else {
lines = append(lines, asmOutLine{p, 4, int64(kind), []byte{0, 0, 0, 0}, 1, "end ", ""})
}
lines = append(lines, spacingLine)
header = true
continue
case IRKindIdent:
var a []byte
// discard ancillary
if a, _, err = nextUint32(r, pos); err != nil {
break
}
var sum Checksum
if _, sum, err = nextIdent(r, pos); err != nil {
break
}
lines = append(lines, asmOutLine{p, 4, int64(kind), a, 1, "id ", Encode(sum)})
continue
case IRKindUint32:
var i uint32
var bi []byte
if bi, i, err = nextUint32(r, pos); err != nil {
break
}
lines = append(lines, asmOutLine{p, 4, int64(kind), bi, 1, "int ", strconv.FormatUint(uint64(i), 10)})
case IRKindString:
var l uint32
var bl []byte
if bl, l, err = nextUint32(r, pos); err != nil {
break
}
s := make([]byte, l+(wordSize-(l)%wordSize)%wordSize)
var n int
if n, err = r.Read(s); err != nil {
break
}
*pos = *pos + n
lines = append(lines, asmOutLine{p, 4, int64(kind), bl, 1, "str ", strconv.Quote(string(s[:l]))})
continue
default:
var bi []byte
if bi, _, err = nextUint32(r, pos); err != nil {
break
}
lines = append(lines, asmOutLine{p, 4, int64(kind), bi, 1, "????", ""})
}
}
if err != io.EOF {
return
}
err = nil
for _, line := range lines {
if raw {
if line.pos != -1 {
sb.WriteString(fmt.Sprintf("%s\t%s\n", line.kind, line.value))
}
} else {
if line.pos == -1 {
sb.WriteString("\n")
} else if line.word == 4 {
sb.WriteString(fmt.Sprintf("%06x: %04x %04x%s %s %s\n", line.pos, binary.LittleEndian.AppendUint32(nil, uint32(line.kindData)), line.valueData, headerSpacing(showHeader), line.kind, line.value))
} else {
kind := binary.LittleEndian.AppendUint64(nil, uint64(line.kindData))
value := line.valueData
if len(value) == 8 {
sb.WriteString(fmt.Sprintf("%06x: %04x %04x %04x %04x %s %s\n", line.pos, kind[:4], kind[4:], value[:4], value[4:], line.kind, line.value))
} else {
sb.WriteString(fmt.Sprintf("%06x: %04x %04x %s %s\n", line.pos, kind[:4], kind[4:], line.kind, line.value))
}
}
}
}
return sb.String(), err
}
func nextUint32(r io.Reader, pos *int) ([]byte, uint32, error) {
i := make([]byte, 4)
_, err := r.Read(i)
if err != nil {
return i, 0, err
}
p := *pos + 4
*pos = p
return i, binary.LittleEndian.Uint32(i), nil
}
func nextUint64(r io.Reader, pos *int) ([]byte, uint64, error) {
i := make([]byte, 8)
_, err := r.Read(i)
if err != nil {
return i, 0, err
}
p := *pos + 8
*pos = p
return i, binary.LittleEndian.Uint64(i), nil
}
func nextIdent(r io.Reader, pos *int) ([]byte, Checksum, error) {
i := make([]byte, 48)
if _, err := r.Read(i); err != nil {
return i, Checksum{}, err
}
p := *pos + 48
*pos = p
return i, Checksum(i), nil
}
func intToKind(i uint64) string {
switch Kind(i) {
case KindHTTPGet:
return "http"
case KindTar:
return "tar "
case KindExec:
return "exec"
case KindExecNet:
return "exen"
case KindFile:
return "file"
default:
return fmt.Sprintf("$%d ", i-KindCustomOffset)
}
}
func headerSpacing(showHeader bool) string {
if showHeader {
return " "
}
return ""
}

View File

@@ -64,6 +64,78 @@ func TestFlatten(t *testing.T) {
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C"), nil},
{"sample cache file", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: 0400, Data: []byte{0}},
"checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq": {Mode: 0400, Data: []byte{0, 0, 0, 0, 0xad, 0xb, 0, 4, 0xfe, 0xfe, 0, 0, 0xfe, 0xca, 0, 0}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
"identifier/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
"identifier/cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
"identifier/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: 0400, Path: "checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq", Data: []byte{0, 0, 0, 0, 0xad, 0xb, 0, 4, 0xfe, 0xfe, 0, 0, 0xfe, 0xca, 0, 0}},
{Mode: 0400, Path: "checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte{0}},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("St9rlE-mGZ5gXwiv_hzQ_B8bZP-UUvSNmf4nHUZzCMOumb6hKnheZSe0dmnuc4Q2"), nil},
{"sample http get cure", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU": {Mode: 0400, Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/oM-2pUlk-mOxK1t3aMWZer69UdOQlAXiAgMrpZ1476VoOqpYVP1aGFS9_HYy-D8_": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: 0400, Path: "checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU", Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/oM-2pUlk-mOxK1t3aMWZer69UdOQlAXiAgMrpZ1476VoOqpYVP1aGFS9_HYy-D8_", Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("L_0RFHpr9JUS4Zp14rz2dESSRvfLzpvqsLhR1-YjQt8hYlmEdVl7vI3_-v8UNPKs"), nil},
{"sample directory step simple", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500},
"check": {Mode: 0400, Data: []byte{0, 0}},
"lib": {Mode: fs.ModeDir | 0700},
"lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"lib/pkgconfig": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0500, Path: "."},
{Mode: 0400, Path: "check", Data: []byte{0, 0}},
{Mode: fs.ModeDir | 0700, Path: "lib"},
{Mode: fs.ModeSymlink | 0777, Path: "lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
{Mode: fs.ModeDir | 0700, Path: "lib/pkgconfig"},
}, pkg.MustDecode("qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b"), nil},
{"sample directory step garbage", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500},
@@ -79,6 +151,421 @@ func TestFlatten(t *testing.T) {
{Mode: fs.ModeDir | 0500, Path: "lib/pkgconfig"},
}, pkg.MustDecode("CUx-3hSbTWPsbMfDhgalG4Ni_GmR9TnVX8F99tY_P5GtkYvczg9RrF5zO0jX9XYT"), nil},
{"sample directory", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b": {Mode: fs.ModeDir | 0500},
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/check": {Mode: 0400, Data: []byte{0, 0}},
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib": {Mode: fs.ModeDir | 0700},
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/pkgconfig": {Mode: fs.ModeDir | 0700},
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b"},
{Mode: 0400, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/check", Data: []byte{0, 0}},
{Mode: fs.ModeDir | 0700, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib"},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
{Mode: fs.ModeDir | 0700, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/pkgconfig"},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("WVpvsVqVKg9Nsh744x57h51AuWUoUR2nnh8Md-EYBQpk6ziyTuUn6PLtF2e0Eu_d"), nil},
{"sample no assume checksum", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M": {Mode: fs.ModeDir | 0500},
"checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M/check": {Mode: 0400, Data: []byte{}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/_wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M")},
"identifier/_wEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M")},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M"},
{Mode: 0400, Path: "checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M/check", Data: []byte{}},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_wEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M")},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("OC290t23aimNo2Rp2pPwan5GI2KRLRdOwYxXQMD9jw0QROgHnNXWodoWdV0hwu2w"), nil},
{"sample tar step unpack", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500},
"checksum": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"identifier": {Mode: fs.ModeDir | 0500},
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"work": {Mode: fs.ModeDir | 0500},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0500, Path: "."},
{Mode: fs.ModeDir | 0500, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP"},
{Mode: 0400, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check", Data: []byte{0, 0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib"},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig"},
{Mode: fs.ModeDir | 0500, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
{Mode: fs.ModeDir | 0500, Path: "work"},
}, pkg.MustDecode("cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM"), nil},
{"sample tar", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
"identifier/rg7F1D5hwv6o4xctjD5zDq4i5MD0mArTsUIWfhUbik8xC6Bsyt3mjXXOm3goojTz": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM"},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP"},
{Mode: 0400, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check", Data: []byte{0, 0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib"},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig"},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work"},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/rg7F1D5hwv6o4xctjD5zDq4i5MD0mArTsUIWfhUbik8xC6Bsyt3mjXXOm3goojTz", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("NQTlc466JmSVLIyWklm_u8_g95jEEb98PxJU-kjwxLpfdjwMWJq0G8ze9R4Vo1Vu"), nil},
{"sample tar expand step unpack", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500},
"libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0500, Path: "."},
{Mode: fs.ModeSymlink | 0777, Path: "libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
}, pkg.MustDecode("CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN"), nil},
{"sample tar expand", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN": {Mode: fs.ModeDir | 0500},
"checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
"identifier/_v1blm2h-_KA-dVaawdpLas6MjHc6rbhhFS8JWwx8iJxZGUu8EBbRrhr5AaZ9PJL": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN"},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_v1blm2h-_KA-dVaawdpLas6MjHc6rbhhFS8JWwx8iJxZGUu8EBbRrhr5AaZ9PJL", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("hSoSSgCYTNonX3Q8FjvjD1fBl-E-BQyA6OTXro2OadXqbST4tZ-akGXszdeqphRe"), nil},
{"testtool", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500},
"check": {Mode: 0400, Data: []byte{0}},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0500, Path: "."},
{Mode: 0400, Path: "check", Data: []byte{0}},
}, pkg.MustDecode("GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"), nil},
{"sample exec container", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/dztPS6jRjiZtCF4_p8AzfnxGp6obkhrgFVsxdodbKWUoAEVtDz3MykepJB4kI_ks": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/dztPS6jRjiZtCF4_p8AzfnxGp6obkhrgFVsxdodbKWUoAEVtDz3MykepJB4kI_ks", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("Q5DluWQCAeohLoiGRImurwFp3vdz9IfQCoj7Fuhh73s4KQPRHpEQEnHTdNHmB8Fx"), nil},
{"testtool net", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500},
"check": {Mode: 0400, Data: []byte("net")},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0500, Path: "."},
{Mode: 0400, Path: "check", Data: []byte("net")},
}, pkg.MustDecode("a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W"), nil},
{"sample exec net container", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W": {Mode: fs.ModeDir | 0500},
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check": {Mode: 0400, Data: []byte("net")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/G8qPxD9puvvoOVV7lrT80eyDeIl3G_CCFoKw12c8mCjMdG1zF7NEPkwYpNubClK3": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
{Mode: fs.ModeDir | 0500, Path: "checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W"},
{Mode: 0400, Path: "checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check", Data: []byte("net")},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/G8qPxD9puvvoOVV7lrT80eyDeIl3G_CCFoKw12c8mCjMdG1zF7NEPkwYpNubClK3", Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("bPYvvqxpfV7xcC1EptqyKNK1klLJgYHMDkzBcoOyK6j_Aj5hb0mXNPwTwPSK5F6Z"), nil},
{"sample exec container overlay root", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/RdMA-mubnrHuu3Ky1wWyxauSYCO0ZH_zCPUj3uDHqkfwv5sGcByoF_g5PjlGiClb": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/RdMA-mubnrHuu3Ky1wWyxauSYCO0ZH_zCPUj3uDHqkfwv5sGcByoF_g5PjlGiClb", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("PO2DSSCa4yoSgEYRcCSZfQfwow1yRigL3Ry-hI0RDI4aGuFBha-EfXeSJnG_5_Rl"), nil},
{"sample exec container overlay work", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/5hlaukCirnXE4W_RSLJFOZN47Z5RiHnacXzdFp_70cLgiJUGR6cSb_HaFftkzi0-": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/5hlaukCirnXE4W_RSLJFOZN47Z5RiHnacXzdFp_70cLgiJUGR6cSb_HaFftkzi0-", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("iaRt6l_Wm2n-h5UsDewZxQkCmjZjyL8r7wv32QT2kyV55-Lx09Dq4gfg9BiwPnKs"), nil},
{"sample exec container multiple layers", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK": {Mode: fs.ModeDir | 0500},
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check": {Mode: 0400, Data: []byte("layers")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/B-kc5iJMx8GtlCua4dz6BiJHnDAOUfPjgpbKq4e-QEn0_CZkSYs3fOA1ve06qMs2": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
"identifier/p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
{Mode: fs.ModeDir | 0500, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK"},
{Mode: 0400, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check", Data: []byte("layers")},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/B-kc5iJMx8GtlCua4dz6BiJHnDAOUfPjgpbKq4e-QEn0_CZkSYs3fOA1ve06qMs2", Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("O2YzyR7IUGU5J2CADy0hUZ3A5NkP_Vwzs4UadEdn2oMZZVWRtH0xZGJ3HXiimTnZ"), nil},
{"sample exec container layer promotion", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/kvJIqZo5DKFOxC2ZQ-8_nPaQzEAz9cIm3p6guO-uLqm-xaiPu7oRkSnsu411jd_U": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"identifier/xXTIYcXmgJWNLC91c417RRrNM9cjELwEZHpGvf8Fk_GNP5agRJp_SicD0w9aMeLJ": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/kvJIqZo5DKFOxC2ZQ-8_nPaQzEAz9cIm3p6guO-uLqm-xaiPu7oRkSnsu411jd_U", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/xXTIYcXmgJWNLC91c417RRrNM9cjELwEZHpGvf8Fk_GNP5agRJp_SicD0w9aMeLJ", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("3EaW6WibLi9gl03_UieiFPaFcPy5p4x3JPxrnLJxGaTI-bh3HU9DK9IMx7c3rrNm"), nil},
{"sample file short", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: 0400, Data: []byte{0}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: 0400, Path: "checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte{0}},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi", Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("iR6H5OIsyOW4EwEgtm9rGzGF6DVtyHLySEtwnFE8bnus9VJcoCbR4JIek7Lw-vwT"), nil},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {

View File

@@ -9,10 +9,8 @@ import (
"os"
"os/exec"
"path/filepath"
"runtime"
"slices"
"strconv"
"sync"
"syscall"
"time"
"unique"
@@ -96,32 +94,6 @@ func MustPath(pathname string, writable bool, a ...Artifact) ExecPath {
return ExecPath{check.MustAbs(pathname), a, writable}
}
var (
binfmt map[string]container.BinfmtEntry
binfmtMu sync.RWMutex
)
// RegisterArch arranges for [KindExec] and [KindExecNet] to support a new
// architecture via a binfmt_misc entry. Each architecture must be registered
// at most once.
func RegisterArch(arch string, e container.BinfmtEntry) {
if arch == "" {
panic(UnsupportedArchError(arch))
}
binfmtMu.Lock()
defer binfmtMu.Unlock()
if binfmt == nil {
binfmt = make(map[string]container.BinfmtEntry)
}
if _, ok := binfmt[arch]; ok {
panic("attempting to register " + strconv.Quote(arch) + " twice")
}
binfmt[arch] = e
}
const (
// ExecTimeoutDefault replaces out of range [NewExec] timeout values.
ExecTimeoutDefault = 15 * time.Minute
@@ -138,8 +110,6 @@ type execArtifact struct {
// Caller-supplied user-facing reporting name, guaranteed to be nonzero
// during initialisation.
name string
// Target architecture.
arch string
// Caller-supplied inner mount points.
paths []ExecPath
@@ -162,40 +132,28 @@ type execArtifact struct {
var _ fmt.Stringer = new(execArtifact)
// execMeasuredArtifact is like execArtifact but implements [KnownChecksum] and
// has its resulting container optionally keep the host net namespace.
type execMeasuredArtifact struct {
// execNetArtifact is like execArtifact but implements [KnownChecksum] and has
// its resulting container keep the host net namespace.
type execNetArtifact struct {
checksum Checksum
// Whether to keep host net namespace.
hostNet bool
execArtifact
}
var _ KnownChecksum = new(execMeasuredArtifact)
var _ KnownChecksum = new(execNetArtifact)
// Checksum returns the caller-supplied checksum.
func (a *execMeasuredArtifact) Checksum() Checksum { return a.checksum }
func (a *execNetArtifact) Checksum() Checksum { return a.checksum }
// Kind returns [KindExecNet], or [KindExec] if hostNet is false.
func (a *execMeasuredArtifact) Kind() Kind {
if a == nil || a.hostNet {
return KindExecNet
}
return KindExec
}
// Kind returns the hardcoded [Kind] constant.
func (*execNetArtifact) Kind() Kind { return KindExecNet }
// Cure cures the [Artifact] in the container described by the caller. The
// container optionally retains host networking.
func (a *execMeasuredArtifact) Cure(f *FContext) error {
return a.cure(f, a.hostNet)
// container retains host networking.
func (a *execNetArtifact) Cure(f *FContext) error {
return a.cure(f, true)
}
// ErrNetChecksum is panicked by [NewExec] if host net namespace is requested
// with a nil checksum.
var ErrNetChecksum = errors.New("attempting to keep net namespace without checksum")
// NewExec returns a new [Artifact] that executes the program path in a
// container with specified paths bind mounted read-only in order. A private
// instance of /proc and /dev is made available to the container.
@@ -209,7 +167,7 @@ var ErrNetChecksum = errors.New("attempting to keep net namespace without checks
// regular or symlink.
//
// If checksum is non-nil, the resulting [Artifact] implements [KnownChecksum]
// and its container optionally runs in the host net namespace.
// and its container runs in the host net namespace.
//
// The container is allowed to run for the specified duration before the initial
// process and all processes originating from it is terminated. A zero or
@@ -220,10 +178,10 @@ var ErrNetChecksum = errors.New("attempting to keep net namespace without checks
// container and does not affect curing outcome. Because of this, it is omitted
// from parameter data for computing identifier.
func NewExec(
name, arch string,
name string,
checksum *Checksum,
timeout time.Duration,
hostNet, exclusive bool,
exclusive bool,
dir *check.Absolute,
env []string,
@@ -235,23 +193,17 @@ func NewExec(
if name == "" {
name = "exec-" + filepath.Base(pathname.String())
}
if arch == "" {
arch = runtime.GOARCH
}
if timeout <= 0 {
timeout = ExecTimeoutDefault
}
if timeout > ExecTimeoutMax {
timeout = ExecTimeoutMax
}
a := execArtifact{name, arch, paths, dir, env, pathname, args, timeout, exclusive}
a := execArtifact{name, paths, dir, env, pathname, args, timeout, exclusive}
if checksum == nil {
if hostNet {
panic(ErrNetChecksum)
}
return &a
}
return &execMeasuredArtifact{*checksum, hostNet, a}
return &execNetArtifact{*checksum, a}
}
// Kind returns the hardcoded [Kind] constant.
@@ -259,7 +211,6 @@ func (*execArtifact) Kind() Kind { return KindExec }
// Params writes paths, executable pathname and args.
func (a *execArtifact) Params(ctx *IContext) {
ctx.WriteString(a.arch)
ctx.WriteString(a.name)
ctx.WriteUint32(uint32(len(a.paths)))
@@ -306,26 +257,11 @@ func (a *execArtifact) Params(ctx *IContext) {
}
}
// UnsupportedArchError describes an unsupported or invalid architecture.
type UnsupportedArchError string
func (e UnsupportedArchError) Error() string {
if e == "" {
return "invalid architecture name"
}
return "unsupported architecture " + string(e)
}
// readExecArtifact interprets IR values and returns the address of execArtifact
// or execNetArtifact.
func readExecArtifact(r *IRReader, net bool) Artifact {
r.DiscardAll()
arch := r.ReadString()
if arch == "" {
panic(UnsupportedArchError(arch))
}
name := r.ReadString()
sz := r.ReadUint32()
@@ -376,17 +312,22 @@ func readExecArtifact(r *IRReader, net bool) Artifact {
exclusive := r.ReadUint32() != 0
checksum, ok := r.Finalise()
var checksumP *Checksum
if ok {
checksumP = new(checksum.Value())
}
if net && !ok {
panic(ErrExpectedChecksum)
var checksumP *Checksum
if net {
if !ok {
panic(ErrExpectedChecksum)
}
checksumVal := checksum.Value()
checksumP = &checksumVal
} else {
if ok {
panic(ErrUnexpectedChecksum)
}
}
return NewExec(
name, arch, checksumP, timeout, net, exclusive, dir, env, pathname, args, paths...,
name, checksumP, timeout, exclusive, dir, env, pathname, args, paths...,
)
}
@@ -495,23 +436,11 @@ func (a *execArtifact) makeContainer(
if z.HostNet {
z.Hostname = "cure-net"
}
z.Quiet = flags&CSuppressInit != 0
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
z.Dir, z.Path, z.Args = a.dir, a.path, a.args
z.Env = slices.Concat(a.env, []string{EnvJobs + "=" + strconv.Itoa(jobs)})
z.Grow(len(a.paths) + 4)
if a.arch != runtime.GOARCH {
binfmtMu.RLock()
e, ok := binfmt[a.arch]
binfmtMu.RUnlock()
if !ok {
return nil, UnsupportedArchError(a.arch)
}
z.Binfmt = []container.BinfmtEntry{e}
z.InitAsRoot = true
}
for i, b := range a.paths {
if i == overlayWorkIndex {
if err = os.MkdirAll(work.String(), 0700); err != nil {
@@ -598,9 +527,9 @@ func (c *Cache) EnterExec(
case *execArtifact:
e = f
case *execMeasuredArtifact:
case *execNetArtifact:
e = &f.execArtifact
hostNet = f.hostNet
hostNet = true
default:
return ErrNotExec
@@ -701,6 +630,12 @@ func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
_ = stdout.Close()
return
}
defer func() {
if err != nil && !errors.As(err, new(*exec.ExitError)) {
_ = stdout.Close()
_ = stderr.Close()
}
}()
brStdout, brStderr := f.cache.getReader(stdout), f.cache.getReader(stderr)
stdoutDone, stderrDone := make(chan struct{}), make(chan struct{})
@@ -715,11 +650,6 @@ func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
io.TeeReader(brStderr, status),
)
defer func() {
if err != nil && !errors.As(err, new(*exec.ExitError)) {
_ = stdout.Close()
_ = stderr.Close()
}
<-stdoutDone
<-stderrDone
f.cache.putReader(brStdout)

View File

@@ -1,70 +1,44 @@
package pkg_test
//go:generate env CGO_ENABLED=0 go build -tags testtool -o testdata/testtool ./testdata
import (
"bytes"
_ "embed"
"encoding/gob"
"errors"
"io/fs"
"net"
"os"
"os/exec"
"path/filepath"
"slices"
"testing"
"unique"
"hakurei.app/check"
"hakurei.app/container"
"hakurei.app/hst"
"hakurei.app/internal/info"
"hakurei.app/internal/pkg"
"hakurei.app/internal/stub"
"hakurei.app/internal/pkg/internal/testtool/expected"
)
// testtoolBin is the container test tool binary made available to the
// execArtifact for testing its curing environment.
//
//go:generate env CGO_ENABLED=0 go build -tags testtool -o internal/testtool ./internal/testtool
//go:embed internal/testtool/testtool
//go:embed testdata/testtool
var testtoolBin []byte
func init() {
pathname, err := filepath.Abs("internal/testtool/testtool")
if err != nil {
panic(err)
}
pkg.RegisterArch("cafe", container.BinfmtEntry{
Magic: expected.Magic,
Interpreter: check.MustAbs(pathname),
})
}
func TestExec(t *testing.T) {
t.Parallel()
wantOffline := expectsFS{
".": {Mode: fs.ModeDir | 0500},
"check": {Mode: 0400, Data: []byte{0}},
}
wantOfflineEncode := pkg.Encode(wantOffline.hash())
failingArtifact := &stubArtifact{
kind: pkg.KindTar,
params: []byte("doomed artifact"),
cure: func(t *pkg.TContext) error {
return stub.UniqueError(0xcafe)
},
}
wantChecksumOffline := pkg.MustDecode(
"GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9",
)
checkWithCache(t, []cacheTestCase{
{"offline", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
{"offline", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool()
cureMany(t, c, []cureStep{
{"container", pkg.NewExec(
"exec-offline", "", new(wantOffline.hash()), 0, false, false,
"exec-offline", nil, 0, false,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"),
@@ -84,128 +58,67 @@ func TestExec(t *testing.T) {
},
}),
pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantOffline, nil},
{"substitution", pkg.NewExec(
"exec-offline", "", new(wantOffline.hash()), 0, false, false,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"),
[]string{"testtool"},
pkg.MustPath("/file", false, newStubFile(
pkg.KindHTTPGet,
pkg.ID{0xfe, 0},
nil,
nil, nil,
)),
// substitution miss fails in testtool due to differing idents
pkg.MustPath("/.hakurei", false, &stubArtifact{
kind: pkg.KindTar,
params: []byte("empty directory (substituted)"),
cure: func(t *pkg.TContext) error {
return os.MkdirAll(t.GetWorkDir().String(), 0700)
},
}),
pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantOffline, nil},
), ignorePathname, wantChecksumOffline, nil},
{"error passthrough", pkg.NewExec(
"", "", nil, 0, false, true,
"", nil, 0, true,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"),
[]string{"testtool"},
pkg.MustPath("/proc/nonexistent", false, failingArtifact),
), nil, nil, &pkg.DependencyCureError{
pkg.MustPath("/proc/nonexistent", false, &stubArtifact{
kind: pkg.KindTar,
params: []byte("doomed artifact"),
cure: func(t *pkg.TContext) error {
return stub.UniqueError(0xcafe)
},
}),
), nil, pkg.Checksum{}, &pkg.DependencyCureError{
{
A: failingArtifact,
Ident: unique.Make(pkg.ID(pkg.MustDecode(
"Sowo6oZRmG6xVtUaxB6bDWZhVsqAJsIJWUp0OPKlE103cY0lodx7dem8J-qQF0Z1",
))),
Err: stub.UniqueError(0xcafe),
},
}},
{"invalid paths", pkg.NewExec(
"", "", nil, 0, false, false,
"", nil, 0, false,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"),
[]string{"testtool"},
pkg.ExecPath{},
), nil, nil, pkg.ErrInvalidPaths},
), nil, pkg.Checksum{}, pkg.ErrInvalidPaths},
})
// check init failure passthrough
initFailureArtifact := pkg.NewExec(
"", "", nil, 0, false, false,
var exitError *exec.ExitError
if _, _, err := c.Cure(pkg.NewExec(
"", nil, 0, false,
pkg.AbsWork,
nil,
check.MustAbs("/opt/bin/testtool"),
[]string{"testtool"},
)
var exitError *exec.ExitError
if _, _, err := c.Cure(initFailureArtifact); !errors.As(err, &exitError) ||
)); !errors.As(err, &exitError) ||
exitError.ExitCode() != hst.ExitFailure {
t.Fatalf("Cure: error = %v, want init exit status 1", err)
}
var faultStatus []byte
if faults, err := c.ReadFaults(initFailureArtifact); err != nil {
t.Fatal(err)
} else if len(faults) != 1 {
t.Fatalf("ReadFaults: %v", faults)
} else if faultStatus, err = os.ReadFile(faults[0].String()); err != nil {
t.Fatal(err)
} else if err = faults[0].Destroy(); err != nil {
t.Fatal(err)
} else {
t.Logf("destroyed expected fault at %s", faults[0].Time().UTC())
}
if !bytes.HasPrefix(faultStatus, []byte(
"internal/pkg ",
)) || !bytes.Contains(faultStatus, []byte(
"\ninit: fork/exec /opt/bin/testtool: no such file or directory\n",
)) {
t.Errorf("unexpected status:\n%s", string(faultStatus))
}
destroyStatus(t, base, 2, 1)
testtoolDestroy(t, base, c)
}, expectsFS{
".": {Mode: fs.ModeDir | 0700},
}, pkg.MustDecode("Q5DluWQCAeohLoiGRImurwFp3vdz9IfQCoj7Fuhh73s4KQPRHpEQEnHTdNHmB8Fx")},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantOfflineEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantOfflineEncode + "/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/IY91PCtOpCYy21AaIK0c9f8-Z6fb2_2ewoHWkt4dxoLf0GOrWqS8yAGFLV84b1Dw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"identifier/QwS7SmiatdqryQYgESdGw7Yw2PcpNf0vNfpvUA0t92BTlKiUjfCrXyMW17G2X77X": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/" + expected.Offline: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"net", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
{"net", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool()
wantNet := expectsFS{
".": {Mode: fs.ModeDir | 0500},
"check": {Mode: 0400, Data: []byte("net")},
}
wantChecksum := pkg.MustDecode(
"a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W",
)
cureMany(t, c, []cureStep{
{"container", pkg.NewExec(
"exec-net", "", new(wantNet.hash()), 0, true, false,
"exec-net", &wantChecksum, 0, false,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"),
@@ -225,37 +138,18 @@ func TestExec(t *testing.T) {
},
}),
pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantNet, nil},
), ignorePathname, wantChecksum, nil},
})
destroyStatus(t, base, 2, 0)
testtoolDestroy(t, base, c)
}, expectsFS{
".": {Mode: fs.ModeDir | 0700},
}, pkg.MustDecode("bPYvvqxpfV7xcC1EptqyKNK1klLJgYHMDkzBcoOyK6j_Aj5hb0mXNPwTwPSK5F6Z")},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W": {Mode: fs.ModeDir | 0500},
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check": {Mode: 0400, Data: []byte("net")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/" + expected.Net: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"overlay root", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
{"overlay root", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool()
cureMany(t, c, []cureStep{
{"container", pkg.NewExec(
"exec-overlay-root", "", nil, 0, false, false,
"exec-overlay-root", nil, 0, false,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
check.MustAbs("/opt/bin/testtool"),
@@ -269,35 +163,18 @@ func TestExec(t *testing.T) {
},
}),
pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantOffline, nil},
), ignorePathname, wantChecksumOffline, nil},
})
destroyStatus(t, base, 2, 0)
testtoolDestroy(t, base, c)
}, expectsFS{
".": {Mode: fs.ModeDir | 0700},
}, pkg.MustDecode("PO2DSSCa4yoSgEYRcCSZfQfwow1yRigL3Ry-hI0RDI4aGuFBha-EfXeSJnG_5_Rl")},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantOfflineEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantOfflineEncode + "/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/" + expected.OvlRoot: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"overlay work", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
{"overlay work", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool()
cureMany(t, c, []cureStep{
{"container", pkg.NewExec(
"exec-overlay-work", "", nil, 0, false, false,
"exec-overlay-work", nil, 0, false,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
check.MustAbs("/work/bin/testtool"),
@@ -316,35 +193,18 @@ func TestExec(t *testing.T) {
return os.MkdirAll(t.GetWorkDir().String(), 0700)
},
}), pkg.Path(pkg.AbsWork, false /* ignored */, testtool),
), ignorePathname, wantOffline, nil},
), ignorePathname, wantChecksumOffline, nil},
})
destroyStatus(t, base, 2, 0)
testtoolDestroy(t, base, c)
}, expectsFS{
".": {Mode: fs.ModeDir | 0700},
}, pkg.MustDecode("iaRt6l_Wm2n-h5UsDewZxQkCmjZjyL8r7wv32QT2kyV55-Lx09Dq4gfg9BiwPnKs")},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantOfflineEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantOfflineEncode + "/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/" + expected.Work: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"multiple layers", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
{"multiple layers", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool()
cureMany(t, c, []cureStep{
{"container", pkg.NewExec(
"exec-multiple-layers", "", nil, 0, false, false,
"exec-multiple-layers", nil, 0, false,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
check.MustAbs("/opt/bin/testtool"),
@@ -385,40 +245,18 @@ func TestExec(t *testing.T) {
},
}),
pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantOffline, nil},
), ignorePathname, wantChecksumOffline, nil},
})
destroyStatus(t, base, 2, 0)
testtoolDestroy(t, base, c)
}, expectsFS{
".": {Mode: fs.ModeDir | 0700},
}, pkg.MustDecode("O2YzyR7IUGU5J2CADy0hUZ3A5NkP_Vwzs4UadEdn2oMZZVWRtH0xZGJ3HXiimTnZ")},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantOfflineEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantOfflineEncode + "/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK": {Mode: fs.ModeDir | 0500},
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check": {Mode: 0400, Data: []byte("layers")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/B-kc5iJMx8GtlCua4dz6BiJHnDAOUfPjgpbKq4e-QEn0_CZkSYs3fOA1ve06qMs2": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
"identifier/" + expected.Layers: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"overlay layer promotion", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
{"overlay layer promotion", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool()
cureMany(t, c, []cureStep{
{"container", pkg.NewExec(
"exec-layer-promotion", "", nil, 0, false, true,
"exec-layer-promotion", nil, 0, true,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
check.MustAbs("/opt/bin/testtool"),
@@ -438,96 +276,11 @@ func TestExec(t *testing.T) {
},
}),
pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantOffline, nil},
), ignorePathname, wantChecksumOffline, nil},
})
destroyStatus(t, base, 2, 0)
testtoolDestroy(t, base, c)
}, expectsFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantOfflineEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantOfflineEncode + "/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/kvJIqZo5DKFOxC2ZQ-8_nPaQzEAz9cIm3p6guO-uLqm-xaiPu7oRkSnsu411jd_U": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"identifier/" + expected.Promote: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"binfmt", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
if info.CanDegrade && os.Getenv("ROSA_SKIP_BINFMT") != "" {
t.Skip("binfmt_misc test explicitly skipped")
}
cureMany(t, c, []cureStep{
{"container", pkg.NewExec(
"exec-binfmt", "cafe", nil, 0, false, true,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1", "HAKUREI_BINFMT=1"},
check.MustAbs("/opt/bin/sample"),
[]string{"sample"},
pkg.MustPath("/", true, &stubArtifact{
kind: pkg.KindTar,
params: []byte("empty directory"),
cure: func(t *pkg.TContext) error {
return os.MkdirAll(t.GetWorkDir().String(), 0700)
},
}),
pkg.MustPath("/opt", false, overrideIdent{pkg.ID{0xfe, 0xff}, &stubArtifact{
kind: pkg.KindTar,
cure: func(t *pkg.TContext) error {
work := t.GetWorkDir()
if err := os.MkdirAll(
work.Append("bin").String(),
0700,
); err != nil {
return err
}
return os.WriteFile(t.GetWorkDir().Append(
"bin",
"sample",
).String(), []byte(expected.Full), 0500)
},
}}),
), ignorePathname, expectsFS{
".": {Mode: fs.ModeDir | 0500},
"check": {Mode: 0400, Data: []byte("binfmt")},
}, nil},
})
destroyStatus(t, base, 2, 0)
}, expectsFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/5aevg3YpDxjqQZ-pdvXK7YqgkL5JKqcoStYQxeD96kuYar6K2mRQWMHib6NQRnpV": {Mode: fs.ModeDir | 0500},
"checksum/5aevg3YpDxjqQZ-pdvXK7YqgkL5JKqcoStYQxeD96kuYar6K2mRQWMHib6NQRnpV/bin": {Mode: fs.ModeDir | 0700},
"checksum/5aevg3YpDxjqQZ-pdvXK7YqgkL5JKqcoStYQxeD96kuYar6K2mRQWMHib6NQRnpV/bin/sample": {Mode: 0500, Data: []byte("\xca\xfe\xba\xbe\xfd\xfd:3")},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/UnDo4B5KneEUY5b4vRUk_y9MWgkWuw2N8f8a2XayO686xXur-aZmX2-7n_8tKMe3": {Mode: fs.ModeDir | 0500},
"checksum/UnDo4B5KneEUY5b4vRUk_y9MWgkWuw2N8f8a2XayO686xXur-aZmX2-7n_8tKMe3/check": {Mode: 0400, Data: []byte("binfmt")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/6VQTJ1lI5BmVuI1YFYJ8ClO3MRORvTTrcWFDcUU-l5Ga8EofxCxGlSTYN-u8dKj_": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/UnDo4B5KneEUY5b4vRUk_y9MWgkWuw2N8f8a2XayO686xXur-aZmX2-7n_8tKMe3")},
"identifier/_v8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/5aevg3YpDxjqQZ-pdvXK7YqgkL5JKqcoStYQxeD96kuYar6K2mRQWMHib6NQRnpV")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
}, pkg.MustDecode("3EaW6WibLi9gl03_UieiFPaFcPy5p4x3JPxrnLJxGaTI-bh3HU9DK9IMx7c3rrNm")},
})
}

View File

@@ -1,7 +1,6 @@
package pkg_test
import (
"io/fs"
"testing"
"hakurei.app/check"
@@ -11,27 +10,18 @@ import (
func TestFile(t *testing.T) {
t.Parallel()
want := expectsFile{0}
checkWithCache(t, []cacheTestCase{
{"file", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
cureMany(t, c, []cureStep{
{"short", pkg.NewFile("null", []byte{0}), base.Append(
"identifier",
"3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi",
), want, nil},
), pkg.MustDecode(
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
), nil},
})
}, expectsFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + pkg.Encode(want.hash()): {Mode: 0400, Data: []byte{0}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
"substitute": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
}, pkg.MustDecode(
"iR6H5OIsyOW4EwEgtm9rGzGF6DVtyHLySEtwnFE8bnus9VJcoCbR4JIek7Lw-vwT",
)},
})
}

View File

@@ -1,9 +0,0 @@
// Package expected contains data shared between test helper and test harness.
package expected
const (
// Magic are magic bytes in the binfmt test case.
Magic = "\xca\xfe\xba\xbe\xfd\xfd"
// Full is the full content of the binfmt test case executable.
Full = Magic + ":3"
)

View File

@@ -1,10 +0,0 @@
package expected
const (
Offline = "q5ktDTq0miP-VvB2blxqXQeaRXCUWgP_KbC18KNtUDtyoaI_h5mHmGuPMArVEBDs"
OvlRoot = "NacZGXwuRkTvcHaG08a22ujJ8qCWN0RSoFlRSR5FSt0ZcBbJ28FRvkYsHEtX7G8i"
Layers = "WBJDrATtX6rIE5yAu8ePX3WmDF0Tt9kFiue0m3cRnyRoVx1my8a67fh3CAW486oP"
Net = "CmYtj2sNB3LHtqiDuck_Lz3MjLLIiwyP8N4NDitQ1Icvv__LVP9p8tm-sHeQaKKp"
Promote = "TX3eCloaQFkV-SZIH6Jg6E5WKH--rcXY1P0jnZKmLFKWrNqnOzd4G9eIBh6i5ywN"
Work = "OuNiLSC68pZhAOr1YQ4WbV1tzASA0nxLEBcK7lO7MqxDY_j8dmP_C612RTuF23Lu"
)

View File

@@ -1,10 +0,0 @@
package expected
const (
Offline = "WapqyoPxbWSnq07dWHt71mHaJXq99pAjJfFlELlJljSiZMhTFqqlzU1_mN86shSj"
OvlRoot = "V9anFOiRvjGfAeBhLl14AL8TKdWZyD0WTPYe4fS9mOBw8iW5Lmarvt6TG6MV8uWm"
Layers = "tKx7JNRoSBdK_7MdzI-nwTNV2wmiPzwYdcd17oLmXKL_iLmUzUiA79qTqdrTasrv"
Net = "aXyDLzBCJ9XltXZIfetEVsEkrqHfcXuD5XE_FcUnYbN3emwL55N6P8LlHzNfGnM5"
Promote = "3k4V16n96Lq04gjFSKmm4sFjyQ883FFBNXgTy9s_DjeTwxT3pg_iacEh8yMb_S4m"
Work = "6Q49MhFWRE3Ne6MycwAotgl1GtoU5WCHqJNWG2byYZCY-zX-IxPrWiKk7bKkNzhE"
)

View File

@@ -1,10 +0,0 @@
package expected
const (
Offline = "Z6yXE5gOJScL3srmnVMWgCXccDiUNZ5snSrf6RkXuU1_U0rX_kGVwsfHUgNG_awd"
OvlRoot = "zYXJHFRLuxvUhuisZEXgGgVvdQd6piMfp5jmtT6jdVjvC2gICXquOq-UTwlrSD5I"
Layers = "_F8EDazHbcLeT0sVSQXRN_kn9IjduqJcDYgzXpsT-hpKU4EBcZ0PISN2zchpqMbm"
Net = "CA_FAaSIYJgapBEHV40doxpH23PdUEy_6s1TZc7wfSPN0XYqwGpMceXXDSabGveO"
Promote = "_3LPrLp--4h9k4GsNNApu9hHtAafq-GUhfU6d4hJKBDKT3bz_szOsvkXxc5sK53d"
Work = "FEgHeiCD_WT4wsfB-9kDH5n6cRWCEYtJmXdKZgmUUukAOoXumH_hLlosXREC-tqq"
)

View File

@@ -76,9 +76,6 @@ type IContext struct {
// Written to by various methods, should be zeroed after [Artifact.Params]
// returns and must not be exposed directly.
w io.Writer
// Optional [Artifact] to cureRes cache, replaces [IRKindIdent] with
// checksum values if non-nil.
inputs map[Artifact]cureRes
}
// irZero is a zero IR word.
@@ -166,15 +163,7 @@ func (i *IContext) WriteIdent(a Artifact) {
defer i.ic.putIdentBuf(buf)
IRKindIdent.encodeHeader(0).put(buf[:])
if i.inputs != nil {
res, ok := i.inputs[a]
if !ok {
panic(InvalidLookupError(i.ic.Ident(a).Value()))
}
*(*ID)(buf[wordSize:]) = res.checksum.Value()
} else {
*(*ID)(buf[wordSize:]) = i.ic.Ident(a).Value()
}
*(*ID)(buf[wordSize:]) = i.ic.Ident(a).Value()
i.mustWrite(buf[:])
}
@@ -218,44 +207,19 @@ func (i *IContext) WriteString(s string) {
// Encode writes a deterministic, efficient representation of a to w and returns
// the first non-nil error encountered while writing to w.
func (ic *irCache) Encode(w io.Writer, a Artifact) (err error) {
return ic.encode(w, a, nil)
}
// encode implements Encode but replaces identifiers with their cured checksums
// for a non-nil ident. Caller must acquire Cache.identMu.
func (ic *irCache) encode(
w io.Writer,
a Artifact,
inputs map[Artifact]cureRes,
) (err error) {
deps := a.Dependencies()
idents := make([]*extIdent, len(deps))
if inputs == nil {
for i, d := range deps {
dbuf, did := ic.unsafeIdent(d, true)
if dbuf == nil {
dbuf = ic.getIdentBuf()
binary.LittleEndian.PutUint64(dbuf[:], uint64(d.Kind()))
*(*ID)(dbuf[wordSize:]) = did.Value()
} else {
ic.storeIdent(d, dbuf)
}
defer ic.putIdentBuf(dbuf)
idents[i] = dbuf
}
} else {
for i, d := range deps {
res, ok := inputs[d]
if !ok {
return InvalidLookupError(ic.Ident(d).Value())
}
dbuf := ic.getIdentBuf()
for i, d := range deps {
dbuf, did := ic.unsafeIdent(d, true)
if dbuf == nil {
dbuf = ic.getIdentBuf()
binary.LittleEndian.PutUint64(dbuf[:], uint64(d.Kind()))
*(*ID)(dbuf[wordSize:]) = res.checksum.Value()
defer ic.putIdentBuf(dbuf)
idents[i] = dbuf
*(*ID)(dbuf[wordSize:]) = did.Value()
} else {
ic.storeIdent(d, dbuf)
}
defer ic.putIdentBuf(dbuf)
idents[i] = dbuf
}
slices.SortFunc(idents, func(a, b *extIdent) int {
return bytes.Compare(a[:], b[:])
@@ -280,7 +244,7 @@ func (ic *irCache) encode(
}
func() {
i := IContext{ic, w, inputs}
i := IContext{ic, w}
defer panicToError(&err)
defer func() { i.ic, i.w = nil, nil }()
@@ -468,12 +432,6 @@ func (e InvalidKindError) Error() string {
// register is not safe for concurrent use. register must not be called after
// the first instance of [Cache] has been opened.
func register(k Kind, f IRReadFunc) {
openMu.Lock()
defer openMu.Unlock()
if opened {
panic("attempting to register after open")
}
if _, ok := irArtifact[k]; ok {
panic("attempting to register " + strconv.Itoa(int(k)) + " twice")
}

View File

@@ -3,7 +3,6 @@ package pkg_test
import (
"bytes"
"io"
"io/fs"
"reflect"
"testing"
@@ -39,7 +38,7 @@ func TestIRRoundtrip(t *testing.T) {
)},
{"exec offline", pkg.NewExec(
"exec-offline", "", nil, 0, false, false,
"exec-offline", nil, 0, false,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"),
@@ -59,9 +58,9 @@ func TestIRRoundtrip(t *testing.T) {
)},
{"exec net", pkg.NewExec(
"exec-net", "",
"exec-net",
(*pkg.Checksum)(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))),
0, false, false,
0, false,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"),
@@ -80,28 +79,6 @@ func TestIRRoundtrip(t *testing.T) {
)),
)},
{"exec measured", pkg.NewExec(
"exec-measured", "",
(*pkg.Checksum)(bytes.Repeat([]byte{0xfd}, len(pkg.Checksum{}))),
0, false, false,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"),
[]string{"testtool", "measured"},
pkg.MustPath("/file", false, pkg.NewFile("file", []byte(
"stub file",
))), pkg.MustPath("/.hakurei", false, pkg.NewHTTPGetTar(
nil, "file:///hakurei.tar",
pkg.Checksum(bytes.Repeat([]byte{0xfd}, len(pkg.Checksum{}))),
pkg.TarUncompressed,
)), pkg.MustPath("/opt", false, pkg.NewHTTPGetTar(
nil, "file:///testtool.tar.gz",
pkg.Checksum(bytes.Repeat([]byte{0xfd}, len(pkg.Checksum{}))),
pkg.TarGzip,
)),
)},
{"file anonymous", pkg.NewFile("", []byte{0})},
{"file", pkg.NewFile("stub", []byte("stub"))},
}
@@ -128,13 +105,9 @@ func TestIRRoundtrip(t *testing.T) {
if err := <-done; err != nil {
t.Fatalf("EncodeAll: error = %v", err)
}
}, expectsFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"identifier": {Mode: fs.ModeDir | 0700},
"substitute": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
},
}, pkg.MustDecode(
"E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C",
),
}
}
checkWithCache(t, testCasesCache)

View File

@@ -3,7 +3,6 @@ package pkg_test
import (
"crypto/sha512"
"io"
"io/fs"
"net/http"
"reflect"
"testing"
@@ -86,13 +85,7 @@ func TestHTTPGet(t *testing.T) {
if _, err := f.Cure(r); !reflect.DeepEqual(err, wantErrNotFound) {
t.Fatalf("Cure: error = %#v, want %#v", err, wantErrNotFound)
}
}, expectsFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"identifier": {Mode: fs.ModeDir | 0700},
"substitute": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
{"cure", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
r := newRContext(t, c)
@@ -151,18 +144,6 @@ func TestHTTPGet(t *testing.T) {
if _, _, err := c.Cure(f); !reflect.DeepEqual(err, wantErrNotFound) {
t.Fatalf("Pathname: error = %#v, want %#v", err, wantErrNotFound)
}
}, expectsFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU": {Mode: 0400, Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/oM-2pUlk-mOxK1t3aMWZer69UdOQlAXiAgMrpZ1476VoOqpYVP1aGFS9_HYy-D8_": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
"substitute": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
}, pkg.MustDecode("L_0RFHpr9JUS4Zp14rz2dESSRvfLzpvqsLhR1-YjQt8hYlmEdVl7vI3_-v8UNPKs")},
})
}

View File

@@ -4,7 +4,6 @@ package pkg
import (
"bufio"
"bytes"
"cmp"
"context"
"crypto/sha512"
"encoding/base64"
@@ -16,17 +15,14 @@ import (
"io/fs"
"maps"
"os"
"os/signal"
"path/filepath"
"runtime"
"slices"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"testing"
"time"
"unique"
"unsafe"
@@ -74,64 +70,6 @@ func MustDecode(s string) (checksum Checksum) {
return
}
var (
// extension is a string uniquely identifying a set of custom [Artifact]
// implementations registered by calling [Register].
extension string
// openMu synchronises access to global state for initialisation.
openMu sync.Mutex
// opened is false if [Open] was never called.
opened bool
)
// Extension returns a string uniquely identifying the currently registered set
// of custom [Artifact], or the zero value if none was registered.
func Extension() string { return extension }
// ValidExtension returns whether s is valid for use in a call to SetExtension.
func ValidExtension(s string) bool {
if l := len(s); l == 0 || l > 128 {
return false
}
for _, v := range s {
if v < 'a' || v > 'z' {
return false
}
}
return true
}
// ErrInvalidExtension is returned for a variant identification string for which
// [ValidExtension] returns false.
var ErrInvalidExtension = errors.New("invalid extension variant identification string")
// SetExtension sets the extension variant identification string. SetExtension
// must be called before [Open] if custom [Artifact] implementations had been
// recorded by calling [Register].
//
// The variant identification string must be between 1 and 128 bytes long and
// consists of only bytes between 'a' and 'z'.
//
// SetExtension is not safe for concurrent use. SetExtension is called at most
// once and must not be called after the first instance of Cache has been opened.
func SetExtension(s string) {
openMu.Lock()
defer openMu.Unlock()
if opened {
panic("attempting to set extension after open")
}
if extension != "" {
panic("attempting to set extension twice")
}
if !ValidExtension(s) {
panic(ErrInvalidExtension)
}
extension = s
statusHeader = makeStatusHeader(s)
}
// common holds elements and receives methods shared between different contexts.
type common struct {
// Context specific to this [Artifact]. The toplevel context in [Cache] must
@@ -164,27 +102,19 @@ type TContext struct {
common
}
// makeStatusHeader creates the header written to every status file. This should
// not be called directly, its result is stored in statusHeader and will not
// change after the first [Cache] is opened.
func makeStatusHeader(extension string) string {
// statusHeader is the header written to all status files in dirStatus.
var statusHeader = func() string {
s := programName
if v := info.Version(); v != info.FallbackVersion {
s += " " + v
}
if extension != "" {
s += " with " + extension + " extensions"
}
s += " (" + runtime.GOARCH + ")"
if name, err := os.Hostname(); err == nil {
s += " on " + name
}
s += "\n\n"
return s
}
// statusHeader is the header written to all status files in dirStatus.
var statusHeader = makeStatusHeader("")
}()
// prepareStatus initialises the status file once.
func (t *TContext) prepareStatus() error {
@@ -250,14 +180,7 @@ func (t *TContext) destroy(errP *error) {
*errP = errors.Join(*errP, err)
}
if *errP != nil {
*errP = errors.Join(*errP, os.Rename(
t.statusPath.String(), t.cache.base.Append(
dirFault,
t.ids+"."+strconv.FormatUint(uint64(
time.Now().UnixNano(),
), 10),
).String(),
))
*errP = errors.Join(*errP, os.Remove(t.statusPath.String()))
}
t.status = nil
}
@@ -504,9 +427,6 @@ const (
// KindFile is the kind of [Artifact] returned by [NewFile].
KindFile
// _kindEnd is the total number of kinds and does not denote a kind.
_kindEnd
// KindCustomOffset is the first [Kind] value reserved for implementations
// not from this package.
KindCustomOffset = 1 << 31
@@ -518,34 +438,30 @@ const (
)
const (
// fileLock is the lock file for exclusive access to the cache directory.
// fileLock is the file name appended to Cache.base for guaranteeing
// exclusive access to the cache directory.
fileLock = "lock"
// fileVariant is a file holding the variant identification string set by a
// prior call to [SetExtension].
fileVariant = "variant"
// dirSubstitute holds symlinks to artifacts by checksum, named after their
// substitute identifier.
dirSubstitute = "substitute"
// dirIdentifier holds symlinks to artifacts by checksum, named after their
// IR-based identifier.
// dirIdentifier is the directory name appended to Cache.base for storing
// artifacts named after their [ID].
dirIdentifier = "identifier"
// dirChecksum holds artifacts named after their [Checksum].
// dirChecksum is the directory name appended to Cache.base for storing
// artifacts named after their [Checksum].
dirChecksum = "checksum"
// dirStatus holds artifact metadata and logs named after their IR-based
// identifier. For [FloodArtifact], the same file is also available under
// its substitute identifier.
// dirStatus is the directory name appended to Cache.base for storing
// artifact metadata and logs named after their [ID].
dirStatus = "status"
// dirFault holds status files of faulted cures.
dirFault = "fault"
// dirWork holds working pathnames set up during [Cache.Cure].
// dirWork is the directory name appended to Cache.base for working
// pathnames set up during [Cache.Cure].
dirWork = "work"
// dirTemp holds scratch space allocated during [Cache.Cure].
// dirTemp is the directory name appended to Cache.base for scratch space
// pathnames allocated during [Cache.Cure].
dirTemp = "temp"
// dirExecScratch is scratch space set up for the container started by
// [Cache.EnterExec]. Exclusivity via Cache.inExec.
// dirExecScratch is the directory name appended to Cache.base for scratch
// space setting up the container started by [Cache.EnterExec]. Exclusivity
// via Cache.inExec.
dirExecScratch = "scratch"
// checksumLinknamePrefix is prepended to the encoded [Checksum] value
@@ -624,17 +540,6 @@ const (
// impurity due to [KindExecNet] being [KnownChecksum]. This flag exists
// to support kernels without Landlock LSM enabled.
CHostAbstract
// CPromoteVariant allows [pkg.Open] to promote an unextended on-disk cache
// to the current extension variant. This is a one-way operation.
CPromoteVariant
// CSuppressInit arranges for verbose output of the container init to be
// suppressed regardless of [message.Msg] state.
CSuppressInit
// CIgnoreSubstitutes disables content-based dependency substitution.
CIgnoreSubstitutes
)
// toplevel holds [context.WithCancel] over caller-supplied context, where all
@@ -690,11 +595,6 @@ type Cache struct {
// Synchronises access to dirChecksum.
checksumMu sync.RWMutex
// Presence of an alternative in the cache. Keys are not valid identifiers
// and must not be used as such.
substitute map[unique.Handle[ID]]unique.Handle[Checksum]
// Synchronises access to substitute and corresponding filesystem entries.
substituteMu sync.RWMutex
// Identifier to content pair cache.
ident map[unique.Handle[ID]]unique.Handle[Checksum]
// Identifier to error pair for unrecoverably faulted [Artifact].
@@ -905,14 +805,11 @@ func (c *Cache) Scrub(checks int) error {
checks = runtime.NumCPU()
}
c.substituteMu.Lock()
defer c.substituteMu.Unlock()
c.identMu.Lock()
defer c.identMu.Unlock()
c.checksumMu.Lock()
defer c.checksumMu.Unlock()
c.substitute = make(map[unique.Handle[ID]]unique.Handle[Checksum])
c.ident = make(map[unique.Handle[ID]]unique.Handle[Checksum])
c.identErr = make(map[unique.Handle[ID]]error)
c.artifact.Clear()
@@ -1020,52 +917,47 @@ func (c *Cache) Scrub(checks int) error {
wg.Wait()
}
for _, suffix := range []string{
dirSubstitute,
dirIdentifier,
} {
dir = c.base.Append(suffix)
if entries, readdirErr := os.ReadDir(dir.String()); readdirErr != nil {
addErr(dir, readdirErr)
} else {
wg.Add(len(entries))
for _, ent := range entries {
w <- checkEntry{ent, func(ent os.DirEntry, want *Checksum) bool {
got := p.Get().(*Checksum)
defer p.Put(got)
dir = c.base.Append(dirIdentifier)
if entries, readdirErr := os.ReadDir(dir.String()); readdirErr != nil {
addErr(dir, readdirErr)
} else {
wg.Add(len(entries))
for _, ent := range entries {
w <- checkEntry{ent, func(ent os.DirEntry, want *Checksum) bool {
got := p.Get().(*Checksum)
defer p.Put(got)
pathname := dir.Append(ent.Name())
if linkname, err := os.Readlink(
pathname.String(),
); err != nil {
seMu.Lock()
se.Errs[pathname.Handle()] = append(se.Errs[pathname.Handle()], err)
se.DanglingIdentifiers = append(se.DanglingIdentifiers, *want)
seMu.Unlock()
return false
} else if err = Decode(got, filepath.Base(linkname)); err != nil {
seMu.Lock()
lnp := dir.Append(linkname)
se.Errs[lnp.Handle()] = append(se.Errs[lnp.Handle()], err)
se.DanglingIdentifiers = append(se.DanglingIdentifiers, *want)
seMu.Unlock()
return false
}
pathname := dir.Append(ent.Name())
if linkname, err := os.Readlink(
pathname.String(),
); err != nil {
seMu.Lock()
se.Errs[pathname.Handle()] = append(se.Errs[pathname.Handle()], err)
se.DanglingIdentifiers = append(se.DanglingIdentifiers, *want)
seMu.Unlock()
return false
} else if err = Decode(got, filepath.Base(linkname)); err != nil {
seMu.Lock()
lnp := dir.Append(linkname)
se.Errs[lnp.Handle()] = append(se.Errs[lnp.Handle()], err)
se.DanglingIdentifiers = append(se.DanglingIdentifiers, *want)
seMu.Unlock()
return false
}
if _, err := os.Stat(pathname.String()); err != nil {
if !errors.Is(err, os.ErrNotExist) {
addErr(pathname, err)
}
seMu.Lock()
se.DanglingIdentifiers = append(se.DanglingIdentifiers, *want)
seMu.Unlock()
return false
if _, err := os.Stat(pathname.String()); err != nil {
if !errors.Is(err, os.ErrNotExist) {
addErr(pathname, err)
}
return true
}}
}
wg.Wait()
seMu.Lock()
se.DanglingIdentifiers = append(se.DanglingIdentifiers, *want)
seMu.Unlock()
return false
}
return true
}}
}
wg.Wait()
}
dir = c.base.Append(dirStatus)
@@ -1213,52 +1105,6 @@ func (c *Cache) finaliseIdent(
close(done)
}
// zeroChecksum is a zero [Checksum] handle, used for comparison only.
var zeroChecksum unique.Handle[Checksum]
// loadSubstitute returns a checksum corresponding to a substitute identifier,
// or zeroChecksum if an alternative is not available.
func (c *Cache) loadSubstitute(
substitute unique.Handle[ID],
) (unique.Handle[Checksum], error) {
c.substituteMu.RLock()
if checksum, ok := c.substitute[substitute]; ok {
c.substituteMu.RUnlock()
return checksum, nil
}
linkname, err := os.Readlink(c.base.Append(
dirSubstitute,
Encode(substitute.Value()),
).String())
c.substituteMu.RUnlock()
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
return zeroChecksum, err
}
c.substituteMu.Lock()
c.substitute[substitute] = zeroChecksum
c.substituteMu.Unlock()
return zeroChecksum, nil
}
var checksum unique.Handle[Checksum]
buf := c.getIdentBuf()
err = Decode((*Checksum)(buf[:]), filepath.Base(linkname))
if err == nil {
checksum = unique.Make(Checksum(buf[:]))
c.substituteMu.Lock()
c.substitute[substitute] = checksum
c.substituteMu.Unlock()
}
c.putIdentBuf(buf)
return checksum, err
}
// Done returns a channel that is closed when the ongoing cure of an [Artifact]
// referred to by the specified identifier completes. Done may return nil if
// no ongoing cure of the specified identifier exists.
@@ -1493,8 +1339,8 @@ func (c *Cache) Cure(a Artifact) (
// CureError wraps a non-nil error returned attempting to cure an [Artifact].
type CureError struct {
A Artifact
Err error
Ident unique.Handle[ID]
Err error
}
// Unwrap returns the underlying error.
@@ -1507,63 +1353,40 @@ func (e *CureError) Error() string { return e.Err.Error() }
type DependencyCureError []*CureError
// unwrapM recursively expands underlying errors into a caller-supplied map.
func (e *DependencyCureError) unwrapM(
ctx context.Context,
ir *IRCache,
me map[unique.Handle[ID]]*CureError,
) {
func (e *DependencyCureError) unwrapM(me map[unique.Handle[ID]]*CureError) {
for _, err := range *e {
if ctx.Err() != nil {
break
}
id := ir.Ident(err.A)
if _, ok := me[id]; ok {
if _, ok := me[err.Ident]; ok {
continue
}
if _e, ok := err.Err.(*DependencyCureError); ok {
_e.unwrapM(ctx, ir, me)
_e.unwrapM(me)
continue
}
me[id] = err
me[err.Ident] = err
}
}
// unwrap recursively expands and deduplicates underlying errors.
func (e *DependencyCureError) unwrap(
ctx context.Context,
ir *IRCache,
) DependencyCureError {
func (e *DependencyCureError) unwrap() DependencyCureError {
me := make(map[unique.Handle[ID]]*CureError)
e.unwrapM(ctx, ir, me)
type ent struct {
id unique.Handle[ID]
err *CureError
}
errs := make([]*ent, 0, len(me))
for id, err := range me {
errs = append(errs, &ent{id, err})
}
e.unwrapM(me)
errs := slices.AppendSeq(
make(DependencyCureError, 0, len(me)),
maps.Values(me),
)
var identBuf [2]ID
slices.SortFunc(errs, func(a, b *ent) int {
identBuf[0], identBuf[1] = a.id.Value(), b.id.Value()
slices.SortFunc(errs, func(a, b *CureError) int {
identBuf[0], identBuf[1] = a.Ident.Value(), b.Ident.Value()
return slices.Compare(identBuf[0][:], identBuf[1][:])
})
_errs := make(DependencyCureError, len(errs))
for i, v := range errs {
_errs[i] = v.err
}
return _errs
return errs
}
// Unwrap returns a deduplicated slice of underlying errors.
func (e *DependencyCureError) Unwrap() []error {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
defer cancel()
errs := e.unwrap(ctx, NewIR())
errs := e.unwrap()
_errs := make([]error, len(errs))
for i, err := range errs {
_errs[i] = err
@@ -1573,23 +1396,14 @@ func (e *DependencyCureError) Unwrap() []error {
// Error returns a user-facing multiline error message.
func (e *DependencyCureError) Error() string {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
defer cancel()
ir := NewIR()
errs := e.unwrap(ctx, ir)
errs := e.unwrap()
if len(errs) == 0 {
return "invalid dependency cure outcome"
}
var buf strings.Builder
buf.WriteString("errors curing dependencies:")
for _, err := range errs {
buf.WriteString("\n\t" +
reportName(err.A, ir.Ident(err.A)) + ": " +
err.Error())
}
if ctx.Err() != nil {
buf.WriteString("\nerror resolution cancelled")
buf.WriteString("\n\t" + Encode(err.Ident.Value()) + ": " + err.Error())
}
return buf.String()
}
@@ -1759,44 +1573,16 @@ func (c *Cache) cure(a Artifact, curesExempt bool) (
return
}
var (
checksums string
substitute unique.Handle[ID]
alternative *check.Absolute
)
var checksums string
defer func() {
if err == nil && checksums != "" {
linkname := checksumLinknamePrefix + checksums
err = os.Symlink(
linkname,
checksumLinknamePrefix+checksums,
pathname.String(),
)
if err == nil {
err = zeroTimes(pathname.String())
}
if err == nil && alternative != nil {
c.substituteMu.Lock()
err = os.Symlink(
linkname,
alternative.String(),
)
if errors.Is(err, os.ErrExist) {
c.msg.Verbosef(
"creating alternative over %s for artifact %s",
Encode(substitute.Value()), ids,
)
err = nil
}
if err == nil {
err = zeroTimes(alternative.String())
}
if err == nil && checksum != zeroChecksum {
c.substitute[substitute] = checksum
}
c.substituteMu.Unlock()
}
}
}()
@@ -1993,64 +1779,11 @@ func (c *Cache) cure(a Artifact, curesExempt bool) (
f.deps[deps[i]] = p
}
sh := sha512.New384()
err = c.encode(sh, a, f.deps)
if err != nil {
return
}
buf := c.getIdentBuf()
sh.Sum(buf[wordSize:wordSize])
substitute = unique.Make(ID(buf[wordSize:]))
substitutes := Encode(substitute.Value())
c.putIdentBuf(buf)
alternative = c.base.Append(
dirSubstitute,
substitutes,
)
if c.flags&CIgnoreSubstitutes == 0 {
var substituteChecksum unique.Handle[Checksum]
substituteChecksum, err = c.loadSubstitute(substitute)
if err != nil {
return
}
if substituteChecksum != zeroChecksum {
checksum = substituteChecksum
checksums = Encode(checksum.Value())
checksumPathname = c.base.Append(
dirChecksum,
checksums,
)
if _, err = os.Lstat(c.base.Append(
dirStatus,
substitutes,
).String()); err == nil {
err = os.Symlink(substitutes, c.base.Append(
dirStatus,
ids,
).String())
} else if errors.Is(err, os.ErrNotExist) {
err = nil
}
return
}
}
defer f.destroy(&err)
if err = c.enterCure(a, curesExempt); err != nil {
return
}
err = ca.Cure(&f)
if err == nil && f.status != nil {
err = os.Link(c.base.Append(
dirStatus,
ids,
).String(), c.base.Append(
dirStatus,
substitutes,
).String())
}
c.exitCure(a, curesExempt)
if err != nil {
return
@@ -2143,7 +1876,7 @@ func (pending *pendingArtifactDep) cure(c *Cache) {
}
pending.errsMu.Lock()
*pending.errs = append(*pending.errs, &CureError{pending.a, err})
*pending.errs = append(*pending.errs, &CureError{c.Ident(pending.a), err})
pending.errsMu.Unlock()
}
@@ -2159,52 +1892,6 @@ func (c *Cache) OpenStatus(a Artifact) (r io.ReadSeekCloser, err error) {
return
}
// Fault holds the pathname and termination time of an [Artifact] fault entry.
type Fault struct {
*check.Absolute
t uint64
}
// Time returns the instant in time where the fault occurred.
func (f Fault) Time() time.Time { return time.Unix(0, int64(f.t)) }
// Open opens the underlying entry for reading.
func (f Fault) Open() (io.ReadCloser, error) { return os.Open(f.Absolute.String()) }
// Destroy removes the underlying fault entry.
func (f Fault) Destroy() error { return os.Remove(f.Absolute.String()) }
// ReadFaults returns fault entries for an [Artifact].
func (c *Cache) ReadFaults(a Artifact) (faults []Fault, err error) {
prefix := Encode(c.Ident(a).Value()) + "."
var dents []os.DirEntry
if dents, err = os.ReadDir(c.base.Append(dirFault).String()); err != nil {
return
}
for _, dent := range dents {
name := dent.Name()
if !strings.HasPrefix(name, prefix) {
continue
}
var t uint64
t, err = strconv.ParseUint(name[len(prefix):], 10, 64)
if err != nil {
return
}
faults = append(faults, Fault{c.base.Append(
dirFault,
name,
), t})
}
slices.SortFunc(faults, func(a, b Fault) int {
return cmp.Compare(a.t, b.t)
})
return
}
// Abort cancels all pending cures and waits for them to clean up, but does not
// close the cache.
func (c *Cache) Abort() {
@@ -2243,20 +1930,6 @@ func (c *Cache) Close() {
c.unlock()
}
// UnsupportedVariantError describes an on-disk cache with an extension variant
// identification string that differs from the value returned by [Extension].
type UnsupportedVariantError string
func (e UnsupportedVariantError) Error() string {
return "unsupported variant " + strconv.Quote(string(e))
}
var (
// ErrWouldPromote is returned by [Open] if the [CPromoteVariant] bit is not
// set and the on-disk cache requires variant promotion.
ErrWouldPromote = errors.New("operation would promote unextended cache")
)
// Open returns the address of a newly opened instance of [Cache].
//
// Concurrent cures of a [FloodArtifact] dependency graph is limited to the
@@ -2288,14 +1961,6 @@ func open(
base *check.Absolute,
lock bool,
) (*Cache, error) {
openMu.Lock()
defer openMu.Unlock()
opened = true
if extension == "" && len(irArtifact) != int(_kindEnd) {
panic("attempting to open cache with incomplete variant setup")
}
if cures < 1 {
cures = runtime.NumCPU()
}
@@ -2304,17 +1969,13 @@ func open(
}
for _, name := range []string{
dirSubstitute,
dirIdentifier,
dirChecksum,
dirStatus,
dirFault,
dirWork,
} {
if err := os.MkdirAll(
base.Append(name).String(),
0700,
); err != nil && !errors.Is(err, os.ErrExist) {
if err := os.MkdirAll(base.Append(name).String(), 0700); err != nil &&
!errors.Is(err, os.ErrExist) {
return nil, err
}
}
@@ -2331,7 +1992,6 @@ func open(
irCache: zeroIRCache(),
substitute: make(map[unique.Handle[ID]]unique.Handle[Checksum]),
ident: make(map[unique.Handle[ID]]unique.Handle[Checksum]),
identErr: make(map[unique.Handle[ID]]error),
identPending: make(map[unique.Handle[ID]]*pendingCure),
@@ -2353,45 +2013,6 @@ func open(
c.unlock = func() {}
}
variantPath := base.Append(fileVariant).String()
if p, err := os.ReadFile(variantPath); err != nil {
if !errors.Is(err, os.ErrNotExist) {
c.unlock()
return nil, err
}
// nonexistence implies newly created cache, or a cache predating
// variant identification strings, in which case it is silently promoted
if err = os.WriteFile(
variantPath,
[]byte(extension),
0400,
); err != nil {
c.unlock()
return nil, err
}
} else if s := string(p); s == "" {
if extension != "" {
if flags&CPromoteVariant == 0 {
c.unlock()
return nil, ErrWouldPromote
}
if err = os.WriteFile(
variantPath,
[]byte(extension),
0400,
); err != nil {
c.unlock()
return nil, err
}
}
} else if !ValidExtension(s) {
c.unlock()
return nil, ErrInvalidExtension
} else if s != extension {
c.unlock()
return nil, UnsupportedVariantError(s)
}
return &c, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -20,31 +20,6 @@ import (
func TestTar(t *testing.T) {
t.Parallel()
want := expectsFS{
".": {Mode: fs.ModeDir | 0500},
"checksum": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"identifier": {Mode: fs.ModeDir | 0500},
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"work": {Mode: fs.ModeDir | 0500},
}
wantEncode := pkg.Encode(want.hash())
wantExpand := expectsFS{
".": {Mode: fs.ModeDir | 0500},
"libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
}
wantExpandEncode := pkg.Encode(wantExpand.hash())
checkWithCache(t, []cacheTestCase{
{"http", 0, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
checkTarHTTP(t, base, c, fstest.MapFS{
@@ -62,32 +37,10 @@ func TestTar(t *testing.T) {
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"work": {Mode: fs.ModeDir | 0700},
}, want)
}, expectsFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/checksum": {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
"checksum/" + wantEncode + "/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"checksum/" + wantEncode + "/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/identifier": {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"checksum/" + wantEncode + "/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"checksum/" + wantEncode + "/work": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantEncode)},
"identifier/rg7F1D5hwv6o4xctjD5zDq4i5MD0mArTsUIWfhUbik8xC6Bsyt3mjXXOm3goojTz": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantEncode)},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
}, pkg.MustDecode(
"cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM",
))
}, pkg.MustDecode("NQTlc466JmSVLIyWklm_u8_g95jEEb98PxJU-kjwxLpfdjwMWJq0G8ze9R4Vo1Vu")},
{"http expand", 0, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
checkTarHTTP(t, base, c, fstest.MapFS{
@@ -95,23 +48,10 @@ func TestTar(t *testing.T) {
"lib": {Mode: fs.ModeDir | 0700},
"lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
}, wantExpand)
}, expectsFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantExpandEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantExpandEncode + "/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantExpandEncode)},
"identifier/_v1blm2h-_KA-dVaawdpLas6MjHc6rbhhFS8JWwx8iJxZGUu8EBbRrhr5AaZ9PJL": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantExpandEncode)},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
}, pkg.MustDecode(
"CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN",
))
}, pkg.MustDecode("hSoSSgCYTNonX3Q8FjvjD1fBl-E-BQyA6OTXro2OadXqbST4tZ-akGXszdeqphRe")},
})
}
@@ -120,7 +60,7 @@ func checkTarHTTP(
base *check.Absolute,
c *pkg.Cache,
testdataFsys fs.FS,
want expectsKnown,
wantChecksum pkg.Checksum,
) {
var testdata string
{
@@ -254,24 +194,24 @@ func checkTarHTTP(
{"file", a, base.Append(
"identifier",
pkg.Encode(wantIdent),
), want, nil},
), wantChecksum, nil},
{"directory", pkg.NewTar(
&tarDir,
pkg.TarGzip,
), ignorePathname, want, nil},
), ignorePathname, wantChecksum, nil},
{"multiple entries", pkg.NewTar(
&tarDirMulti,
pkg.TarGzip,
), nil, nil, errors.New(
), nil, pkg.Checksum{}, errors.New(
"input directory does not contain a single regular file",
)},
{"bad type", pkg.NewTar(
&tarDirType,
pkg.TarGzip,
), nil, nil, errors.New(
), nil, pkg.Checksum{}, errors.New(
"input directory does not contain a single regular file",
)},
@@ -281,6 +221,6 @@ func checkTarHTTP(
cure: func(t *pkg.TContext) error {
return stub.UniqueError(0xcafe)
},
}, pkg.TarGzip), nil, nil, stub.UniqueError(0xcafe)},
}, pkg.TarGzip), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
})
}

View File

@@ -14,29 +14,15 @@ import (
"strconv"
"strings"
"hakurei.app/check"
"hakurei.app/fhs"
"hakurei.app/vfs"
"hakurei.app/internal/pkg/internal/testtool/expected"
)
func main() {
log.SetFlags(0)
log.SetPrefix("testtool: ")
if os.Getenv("HAKUREI_BINFMT") == "1" {
wantArgs := []string{"/interpreter", "/opt/bin/sample"}
if !slices.Equal(os.Args, wantArgs) {
log.Fatalf("Args: %q, want %q", os.Args, wantArgs)
}
if err := os.WriteFile("check", []byte("binfmt"), 0400); err != nil {
log.Fatal(err)
}
return
}
environ := slices.DeleteFunc(slices.Clone(os.Environ()), func(s string) bool {
return s == "CURE_JOBS="+strconv.Itoa(runtime.NumCPU())
})
@@ -162,40 +148,59 @@ func main() {
}
const checksumEmptyDir = "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"
ident := expected.Offline
ident := "dztPS6jRjiZtCF4_p8AzfnxGp6obkhrgFVsxdodbKWUoAEVtDz3MykepJB4kI_ks"
log.Println(m)
next := func() { m = m.Next; log.Println(m) }
if overlayRoot {
ident = expected.OvlRoot
ident = "RdMA-mubnrHuu3Ky1wWyxauSYCO0ZH_zCPUj3uDHqkfwv5sGcByoF_g5PjlGiClb"
if m.Root != "/" || m.Target != "/" ||
m.Source != "overlay" || m.FsType != "overlay" {
log.Fatal("unexpected root mount entry")
}
var lowerdir []string
var lowerdir string
for _, o := range strings.Split(m.FsOptstr, ",") {
const lowerdirKey = "lowerdir+="
const lowerdirKey = "lowerdir="
if strings.HasPrefix(o, lowerdirKey) {
lowerdir = append(lowerdir, o[len(lowerdirKey):])
lowerdir = o[len(lowerdirKey):]
}
}
if !layers {
if len(lowerdir) != 1 || filepath.Base(lowerdir[0]) != checksumEmptyDir {
if filepath.Base(lowerdir) != checksumEmptyDir {
log.Fatal("unexpected artifact checksum")
}
} else {
ident = expected.Layers
ident = "p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT"
if len(lowerdir) != 2 ||
filepath.Base(lowerdir[0]) != "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU" ||
filepath.Base(lowerdir[1]) != "nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK" {
log.Fatalf("unexpected lowerdirs %s", strings.Join(lowerdir, ", "))
lowerdirsEscaped := strings.Split(lowerdir, ":")
lowerdirs := lowerdirsEscaped[:0]
// ignore the option separator since it does not appear in ident
for i, e := range lowerdirsEscaped {
if len(e) > 0 &&
e[len(e)-1] == check.SpecialOverlayEscape[0] &&
(len(e) == 1 || e[len(e)-2] != check.SpecialOverlayEscape[0]) {
// ignore escaped pathname separator since it does not
// appear in ident
e = e[:len(e)-1]
if len(lowerdirsEscaped) != i {
lowerdirsEscaped[i+1] = e + lowerdirsEscaped[i+1]
continue
}
}
lowerdirs = append(lowerdirs, e)
}
if len(lowerdirs) != 2 ||
filepath.Base(lowerdirs[0]) != "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU" ||
filepath.Base(lowerdirs[1]) != "nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK" {
log.Fatalf("unexpected lowerdirs %s", strings.Join(lowerdirs, ", "))
}
}
} else {
if hostNet {
ident = expected.Net
ident = "G8qPxD9puvvoOVV7lrT80eyDeIl3G_CCFoKw12c8mCjMdG1zF7NEPkwYpNubClK3"
}
if m.Root != "/sysroot" || m.Target != "/" {
@@ -214,14 +219,14 @@ func main() {
}
if promote {
ident = expected.Promote
ident = "xXTIYcXmgJWNLC91c417RRrNM9cjELwEZHpGvf8Fk_GNP5agRJp_SicD0w9aMeLJ"
}
next() // testtool artifact
next()
if overlayWork {
ident = expected.Work
ident = "5hlaukCirnXE4W_RSLJFOZN47Z5RiHnacXzdFp_70cLgiJUGR6cSb_HaFftkzi0-"
if m.Root != "/" || m.Target != "/work" ||
m.Source != "overlay" || m.FsType != "overlay" {
log.Fatal("unexpected work mount entry")

View File

@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"net/http"
"runtime"
"strconv"
"sync"
@@ -17,7 +16,9 @@ import (
type PArtifact int
const (
LLVM PArtifact = iota
CompilerRT PArtifact = iota
LLVMRuntimes
Clang
// EarlyInit is the Rosa OS init program.
EarlyInit
@@ -56,8 +57,6 @@ const (
Fakeroot
Findutils
Flex
FontUtil
Freetype
Fuse
GMP
GLib
@@ -73,63 +72,35 @@ const (
Gzip
Hakurei
HakureiDist
Hwdata
IPTables
Kmod
LIT
LibX11
LibXau
LibXdmcp
LibXext
LibXfixes
LibXfont2
LibXrandr
LibXrender
LibXxf86vm
Libarchive
Libbsd
Libcap
Libconfig
LibdisplayInfo
Libclc
Libdrm
Libepoxy
Libev
Libexpat
Libffi
Libfontenc
Libgd
Libglvnd
Libiconv
Libmd
Libmnl
Libnftnl
Libpciaccess
Libpng
Libnftnl
Libpsl
Libseccomp
Libtasn1
Libtirpc
Libtool
Libucontext
Libunistring
Libva
LibxcbRenderUtil
LibxcbUtil
LibxcbUtilImage
LibxcbUtilKeysyms
LibxcbUtilWM
Libxcvt
Libxkbfile
Libxml2
Libxshmfence
Libxslt
Libxtrans
LMSensors
M4
MPC
MPFR
Make
Mesa
Meson
Mksh
MuslFts
@@ -151,39 +122,28 @@ const (
PerlPodParser
PerlSGMLS
PerlTermReadKey
PerlTestCmd
PerlTextCharWidth
PerlTextWrapI18N
PerlUnicodeLineBreak
PerlYAMLTiny
Pixman
PkgConfig
Procps
Python
PythonFlitCore
PythonHatchling
PythonIniConfig
PythonMako
PythonMarkupSafe
PythonPackaging
PythonPathspec
PythonPluggy
PythonPyTest
PythonPyYAML
PythonPycparser
PythonPygments
PythonSetuptools
PythonSetuptoolsSCM
PythonTroveClassifiers
PythonVCSVersioning
PythonWheel
QEMU
Rdfind
Readline
Rsync
Sed
Setuptools
SPIRVHeaders
SPIRVLLVMTranslator
SPIRVTools
SquashfsTools
Strace
@@ -194,44 +154,25 @@ const (
toyboxEarly
Unzip
UtilLinux
VIM
Wayland
WaylandProtocols
XCB
XCBProto
XDGDBusProxy
XZ
Xkbcomp
XkeyboardConfig
XorgProto
Xserver
Xproto
Zlib
Zstd
// PresetUnexportedStart is the first unexported preset.
PresetUnexportedStart
stage0Dist = iota - 1
llvmSource
// earlyCompilerRT is an early, standalone compiler-rt installation for the
// standalone runtimes build.
//
// earlyCompilerRT must only be loaded by [LLVM].
earlyCompilerRT
// earlyRuntimes is an early, standalone installation of LLVM runtimes to
// work around the cmake build system leaking the system LLVM installation
// when invoking the newly built toolchain.
//
// earlyRuntimes must only be loaded by [LLVM].
earlyRuntimes
llvmSource = iota - 1
buildcatrust
utilMacros
// Musl is a standalone libc that does not depend on the toolchain.
Musl
// muslHeaders is a system installation of [Musl] headers.
muslHeaders
// gcc is a hacked-to-pieces GCC toolchain meant for use in intermediate
// stages only. This preset and its direct output must never be exposed.
@@ -376,40 +317,15 @@ var (
}
// artifactsOnce is for lazy initialisation of artifacts.
artifactsOnce [_toolchainEnd][len(artifactsM)]sync.Once
// arch is the target architecture.
arch = runtime.GOARCH
// presetOpts globally modifies behaviour of presets.
presetOpts int
)
const (
// OptSkipCheck skips running all test suites.
OptSkipCheck = 1 << iota
// OptLLVMNoLTO disables LTO in all [LLVM] stages.
OptLLVMNoLTO
)
// Arch returns the target architecture.
func Arch() string { return arch }
// Flags returns the current preset flags
func Flags() int { return presetOpts }
// zero zeros the value pointed to by p.
func zero[T any](p *T) { var v T; *p = v }
// DropCaches arranges for all cached [pkg.Artifact] to be freed some time after
// it returns. Must not be used concurrently with any other function from this
// package.
func DropCaches(targetArch string, flags int) {
if targetArch == "" {
targetArch = runtime.GOARCH
}
arch = targetArch
presetOpts = flags
func DropCaches() {
zero(&artifacts)
zero(&artifactsOnce)
}

View File

@@ -20,16 +20,13 @@ func TestLoad(t *testing.T) {
}
func BenchmarkAll(b *testing.B) {
arch, flags := rosa.Arch(), rosa.Flags()
b.Cleanup(func() { rosa.DropCaches(arch, flags) })
for b.Loop() {
for i := range rosa.PresetEnd {
rosa.Std.Load(rosa.PArtifact(i))
}
b.StopTimer()
rosa.DropCaches("", 0)
rosa.DropCaches()
b.StartTimer()
}
}

View File

@@ -1,386 +0,0 @@
// Package azalea implements a proof-of-concept, domain-specific language for
// Rosa OS software packaging.
package azalea
import (
"errors"
"io"
"strconv"
"text/scanner"
)
// idents are runes accepted in an identifier.
var idents = [...]bool{
'0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true,
'7': true, '8': true, '9': true,
'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true,
'H': true, 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true,
'O': true, 'P': true, 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true,
'V': true, 'W': true, 'X': true, 'Y': true, 'Z': true,
'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true,
'h': true, 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true,
'o': true, 'p': true, 'q': true, 'r': true, 's': true, 't': true, 'u': true,
'v': true, 'w': true, 'x': true, 'y': true, 'z': true,
'-': true, '_': true,
}
// TokenError describes an unexpected token.
type TokenError [2]rune
func (e TokenError) Error() string {
return "expected " + scanner.TokenString(e[0]) +
", found " + scanner.TokenString(e[1])
}
// ExprError is an unexpected token encountered while parsing an expression.
type ExprError rune
func (e ExprError) Error() string {
return "unexpected token " + scanner.TokenString(rune(e))
}
// must1 returns v, or panics if err is not nil.
func must1[T any](v T, err error) T {
if err != nil {
panic(err)
}
return v
}
// parser retains the current token.
type parser struct {
s scanner.Scanner
tok rune
}
// scan advances the underlying scanner to the next token, storing its result.
func (p *parser) scan() rune { p.tok = p.s.Scan(); return p.tok }
// scanAs advances the scanner for an expected token.
func (p *parser) scanAs(expects rune) {
e := TokenError{expects, p.scan()}
if e[0] != e[1] {
panic(e)
}
}
// parseInt parses the current token as a base 10 representation of a 64-bit
// signed integer.
func (p *parser) parseInt() int64 {
return must1(strconv.ParseInt(p.s.TokenText(), 10, 64))
}
// A String represents an identifier or string literal.
type String struct {
Value string
Ident bool
}
// A StringSpec describes a statement evaluating down to a string value.
type StringSpec []String
// parseString parses the current token as a string.
func (p *parser) parseString() string {
return must1(strconv.Unquote(p.s.TokenText()))
}
// appendStringSpec parses from the next token until the end of the StringSpec.
// It always advances past the final token.
func (p *parser) appendStringSpec(
op bool,
data StringSpec,
) (v StringSpec, ok bool) {
ok = true
v = data
for {
if op {
switch tok := p.scan(); tok {
case '+':
break
default:
return
}
}
switch tok := p.scan(); tok {
case scanner.String, scanner.RawString:
v = append(v, String{p.parseString(), false})
case scanner.Ident:
v = append(v, String{p.s.TokenText(), true})
default:
ok = op
return
}
op = true
}
}
// A KV holds a key/value pair.
type KV struct {
K string
V any
}
// An Arg represents an argument of [Func].
type Arg struct {
K []string
V any
R bool
}
// Func is a function call or package declaration.
type Func struct {
// Function or package identifier.
Ident string
// Whether this is a package declaration.
Package bool
// Key-value arguments.
Args []Arg
}
// parseExpr scans and parses the current expression. A nil return indicates
// [scanner.EOF].
func (p *parser) parseExpr() (any, bool) {
switch p.tok {
case scanner.EOF:
return nil, false
case scanner.Int:
return p.parseInt(), false
case scanner.String, scanner.RawString:
v, ok := p.appendStringSpec(true, StringSpec{
{p.parseString(), false},
})
if !ok {
panic(TokenError{scanner.String, p.tok})
}
return v, true
case scanner.Ident:
var v Func
v.Ident = p.s.TokenText()
if v.Package = v.Ident == "package"; v.Package {
p.scanAs(scanner.Ident)
v.Ident = p.s.TokenText()
}
p.scan()
switch p.tok {
case '{':
for {
p.scan()
switch p.tok {
case '}':
return v, false
case scanner.Ident:
break
default:
panic(TokenError{scanner.Ident, p.tok})
}
var next bool
arg := Arg{K: []string{p.s.TokenText()}}
delim := true
arg:
for {
p.scan()
switch p.tok {
case ',':
if delim {
delim = false
continue
}
panic(ExprError(p.tok))
case scanner.Ident:
if delim {
panic(TokenError{',', p.tok})
}
delim = true
arg.K = append(arg.K, p.s.TokenText())
default:
break arg
}
}
switch p.tok {
case '=':
break
case '*':
arg.R = true
p.scanAs('=')
default:
panic(TokenError{'=', p.tok})
}
p.scan()
arg.V, next = p.parseExpr()
v.Args = append(v.Args, arg)
if !next {
p.scanAs(';')
}
}
case ';':
return StringSpec{{v.Ident, true}}, true
case '+':
s, ok := p.appendStringSpec(false, StringSpec{
{v.Ident, true},
})
if !ok {
panic(TokenError{scanner.String, p.tok})
}
return s, p.tok != scanner.EOF
case scanner.EOF:
return StringSpec{{v.Ident, true}}, false
default:
return StringSpec{{v.Ident, true}}, true
}
case '{':
var v []KV
for {
p.scan()
switch p.tok {
case '}':
return v, false
case scanner.String:
pair := KV{K: p.parseString()}
p.scan()
switch p.tok {
case ';':
break
case ':':
var next bool
p.scan()
pair.V, next = p.parseExpr()
if !next {
p.scanAs(';')
}
break
default:
panic(ExprError(p.tok))
}
v = append(v, pair)
default:
panic(ExprError(p.tok))
}
}
case '[':
var (
v []any
e any
delim bool
next bool
)
for {
if !next {
p.scan()
}
switch p.tok {
case ',':
if delim {
delim = false
next = false
continue
}
panic(ExprError(','))
case ']':
return v, false
case scanner.EOF:
panic(ExprError(scanner.EOF))
default:
if delim {
panic(TokenError{',', p.tok})
}
delim = true
break
}
e, next = p.parseExpr()
v = append(v, e)
}
default:
panic(ExprError(p.tok))
}
}
// ScanError is the error count parsing all expressions.
type ScanError int
func (ScanError) Error() string {
return "aborting due to scanning errors"
}
// Parse parses expressions from r.
func Parse(r io.Reader) (e []any, err error) {
var p parser
p.s.Init(r)
p.s.Mode = scanner.ScanIdents |
scanner.ScanInts |
scanner.ScanStrings |
scanner.ScanRawStrings |
scanner.ScanComments |
scanner.SkipComments
p.s.IsIdentRune = func(ch rune, i int) bool {
if i == 0 && ch >= '0' && ch <= '9' {
return false
}
return ch > 0 && ch < rune(len(idents)) && idents[ch]
}
defer func() {
v := recover()
if v == nil {
return
}
_err, ok := v.(error)
if !ok {
panic(v)
}
if err == nil {
err = _err
return
}
err = errors.Join(err, _err)
}()
p.scan()
for {
expr, next := p.parseExpr()
if expr == nil {
break
}
e = append(e, expr)
if !next {
p.scan()
}
}
if p.s.ErrorCount != 0 {
err = ScanError(p.s.ErrorCount)
}
return
}

View File

@@ -1,149 +0,0 @@
package azalea_test
import (
_ "embed"
"reflect"
"strings"
"testing"
"text/scanner"
"hakurei.app/internal/rosa/azalea"
)
//go:embed testdata/gcc.az
var sample string
func TestParse(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
data string
want []any
err error
}{
{"invalid", "}", nil, azalea.ExprError('}')},
{"bad sep", "f{v?}", nil, azalea.TokenError{'=', '?'}},
{"bad ident", "f{9}", nil, azalea.TokenError{scanner.Ident, scanner.Int}},
{"share bad sep", "f { v,,v = v; }", nil, azalea.ExprError(',')},
{"share missing sep", "f { v v }", nil, azalea.TokenError{',', scanner.Ident}},
{"ident string", `v`, []any{azalea.StringSpec{
{Value: "v", Ident: true},
}}, nil},
{"ident string concat", `v+"\xfd"`, []any{azalea.StringSpec{
{Value: "v", Ident: true},
{Value: "\xfd"},
}}, nil},
{"truncated string concat", `v+`, nil,
azalea.TokenError{scanner.String, scanner.EOF}},
{"unexpected string concat", `v+9`, nil,
azalea.TokenError{scanner.String, scanner.Int}},
{"empty pairs", `{}`, []any{[]azalea.KV(nil)}, nil},
{"short kv", `{"\x00":v;}`, []any{[]azalea.KV{
{K: "\x00", V: azalea.StringSpec{azalea.String{Value: "v", Ident: true}}},
}}, nil},
{"truncated kv", `{"\x00"`, nil, azalea.ExprError(scanner.EOF)},
{"ident kv", `{v="";}`, nil, azalea.ExprError(scanner.Ident)},
{"empty array", `[]`, []any{[]any(nil)}, nil},
{"integer array", `[9]`, []any{[]any{int64(9)}}, nil},
{"short array", `[ "\x00" ]`, []any{
[]any{azalea.StringSpec{{Value: "\x00"}}},
}, nil},
{"short array delim", `[ "\x00", ]`, []any{
[]any{azalea.StringSpec{{Value: "\x00"}}},
}, nil},
{"missing array value", `[ "\x00", , v ]`, nil, azalea.ExprError(',')},
{"missing array delimiter", `[ v0 v1 ]`, nil, azalea.TokenError{',', scanner.Ident}},
{"truncated array", `[ "\x00"`, nil,
azalea.ExprError(scanner.EOF)},
{"gcc", sample, []any{azalea.Func{
Ident: "gcc",
Package: true,
Args: []azalea.Arg{
{K: []string{"description"}, V: azalea.StringSpec{{Value: "The GNU Compiler Collection"}}},
{K: []string{"website"}, V: azalea.StringSpec{{Value: "https://www.gnu.org/software/gcc"}}},
{K: []string{"anitya"}, V: int64(6502)},
{K: []string{"version"}, V: azalea.StringSpec{{Value: "16.1.0"}}, R: true},
{K: []string{"source"}, V: azalea.Func{Ident: "remoteTar", Package: false, Args: []azalea.Arg{
{K: []string{"url"}, V: azalea.StringSpec{
{Value: "https://ftp.tsukuba.wide.ad.jp/software/gcc/releases/"},
{Value: "gcc-"}, {Value: "version", Ident: true},
{Value: "/gcc-"}, {Value: "version", Ident: true},
{Value: ".tar.gz"},
}},
{K: []string{"checksum"}, V: azalea.StringSpec{
{Value: "4ASoWbxaA2FW7PAB0zzHDPC5XnNhyaAyjtDPpGzceSLeYnEIXsNYZR3PA_Zu5P0K"},
}},
{K: []string{"compress"}, V: azalea.StringSpec{{Value: "gzip", Ident: true}}},
}}},
{K: []string{"patches"}, V: []any{
azalea.StringSpec{{Value: "musl-off64_t-loff_t.patch"}},
azalea.StringSpec{{Value: "musl-legacy-lfs.patch"}},
}},
{K: []string{"exclusive"}, V: azalea.StringSpec{{Value: "true", Ident: true}}},
{K: []string{"exec"}, V: azalea.Func{
Ident: "make",
Args: []azalea.Arg{
{K: []string{"configure"}, V: []azalea.KV{
{K: "disable-multilib"},
{K: "enable-default-pie"},
{K: "disable-nls"},
{K: "with-gnu-as"},
{K: "with-gnu-ld"},
{K: "with-system-zlib"},
{K: "enable-languages", V: azalea.StringSpec{{Value: "c,c++,go"}}},
{K: "with-native-system-header-dir", V: azalea.StringSpec{{Value: "/system/include"}}},
{K: "with-multilib-list", V: azalea.Func{
Ident: "arch",
Args: []azalea.Arg{
{K: []string{"amd64", "arm64"}, V: azalea.StringSpec{{Value: "''"}}},
{K: []string{"default"}, V: azalea.StringSpec{{Value: "unset", Ident: true}}},
},
}},
}},
{K: []string{"make"}, V: []any{
azalea.StringSpec{{Value: "BOOT_CFLAGS='-O2 -g'"}},
azalea.Func{
Ident: "noop",
Args: []azalea.Arg{
{K: []string{"key"}, V: azalea.StringSpec{
{Value: "value", Ident: true},
}},
},
},
azalea.StringSpec{{Value: "bootstrap"}},
}},
{K: []string{"skip-check"}, V: azalea.StringSpec{{Value: "true", Ident: true}}},
},
}},
{K: []string{"inputs"}, V: []any{
azalea.StringSpec{{Value: "binutils", Ident: true}},
azalea.StringSpec{{Value: "mpc", Ident: true}},
azalea.StringSpec{{Value: "zlib", Ident: true}},
azalea.StringSpec{{Value: "libucontext", Ident: true}},
azalea.StringSpec{{Value: "kernel-headers", Ident: true}},
}},
},
}}, nil},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
p, err := azalea.Parse(strings.NewReader(tc.data))
if !reflect.DeepEqual(p, tc.want) {
t.Errorf("Parse: %#v, want %#v", p, tc.want)
}
if !reflect.DeepEqual(err, tc.err) {
t.Errorf("Parse: error = %v, want %v", err, tc.err)
}
})
}
}

View File

@@ -1,57 +0,0 @@
package gcc {
description = "The GNU Compiler Collection";
website = "https://www.gnu.org/software/gcc";
anitya = 6502;
version* = "16.1.0";
source = remoteTar {
url = "https://ftp.tsukuba.wide.ad.jp/software/gcc/releases/"+
"gcc-"+version+"/gcc-"+version+".tar.gz";
checksum = "4ASoWbxaA2FW7PAB0zzHDPC5XnNhyaAyjtDPpGzceSLeYnEIXsNYZR3PA_Zu5P0K";
compress = gzip;
};
patches = [
"musl-off64_t-loff_t.patch",
"musl-legacy-lfs.patch",
];
// GCC spends most of its time in its many configure scripts, however
// it also saturates the CPU for a consequential amount of time.
exclusive = true;
exec = make {
configure = {
"disable-multilib";
"enable-default-pie";
"disable-nls";
"with-gnu-as";
"with-gnu-ld";
"with-system-zlib";
"enable-languages": "c,c++,go";
"with-native-system-header-dir": "/system/include";
"with-multilib-list": arch {
amd64, arm64 = "''";
default = unset;
};
};
make = [
"BOOT_CFLAGS='-O2 -g'",
noop { key = value; },
"bootstrap",
];
// This toolchain is hacked to pieces, it is not expected to ever work
// well in its current state. That does not matter as long as the
// toolchain it produces passes its own test suite.
skip-check = true;
};
inputs = [
binutils,
mpc,
zlib,
libucontext,
kernel-headers,
];
}

View File

@@ -5,6 +5,7 @@ import (
"io"
"net/http"
"os"
"runtime"
"time"
"hakurei.app/fhs"
@@ -87,7 +88,7 @@ func (a busyboxBin) Cure(t *pkg.TContext) (err error) {
// the https://busybox.net/downloads/binaries/ binary release.
func newBusyboxBin() pkg.Artifact {
var version, url, checksum string
switch arch {
switch runtime.GOARCH {
case "amd64":
version = "1.35.0"
url = "https://busybox.net/downloads/binaries/" +
@@ -100,11 +101,11 @@ func newBusyboxBin() pkg.Artifact {
checksum = "npJjBO7iwhjW6Kx2aXeSxf8kXhVgTCDChOZTTsI8ZfFfa3tbsklxRiidZQdrVERg"
default:
panic("unsupported target " + arch)
panic("unsupported target " + runtime.GOARCH)
}
return pkg.NewExec(
"busybox-bin-"+version, arch, nil, pkg.ExecTimeoutMax, false, false,
"busybox-bin-"+version, nil, pkg.ExecTimeoutMax, false,
fhs.AbsRoot, []string{
"PATH=/system/bin",
},

View File

@@ -10,8 +10,8 @@ import (
func (t Toolchain) newCMake() (pkg.Artifact, string) {
const (
version = "4.3.2"
checksum = "6QylwRVKletndTSkZTV2YBRwgd_9rUVgav_QW23HpjUgV21AVYZOUOal8tdBDmO7"
version = "4.3.1"
checksum = "RHpzZiM1kJ5bwLjo9CpXSeHJJg3hTtV9QxBYpQoYwKFtRh5YhGWpShrqZCSOzQN6"
)
return t.NewPackage("cmake", version, newFromGitHubRelease(
"Kitware/CMake",
@@ -122,18 +122,11 @@ type CMakeHelper struct {
// Path elements joined with source.
Append []string
// Value of CMAKE_BUILD_TYPE. The zero value is equivalent to "Release".
BuildType string
// CMake CACHE entries.
Cache []KV
// Runs after install.
Script string
// Replaces the default test command.
Test string
// Whether to skip running tests.
SkipTest bool
// Whether to generate Makefile instead.
Make bool
}
@@ -166,30 +159,20 @@ func (*CMakeHelper) wantsDir() string { return "/cure/" }
// script generates the cure script.
func (attr *CMakeHelper) script(name string) string {
if attr == nil {
attr = new(CMakeHelper)
attr = &CMakeHelper{
Cache: []KV{
{"CMAKE_BUILD_TYPE", "Release"},
},
}
}
if len(attr.Cache) == 0 {
panic("CACHE must be non-empty")
}
generate := "Ninja"
test := "ninja " + jobsFlagE + " test"
if attr.Make {
generate = "'Unix Makefiles'"
test = "make " + jobsFlagE + " test"
}
if attr.Test != "" {
test = attr.Test
}
script := attr.Script
if !attr.SkipTest && presetOpts&OptSkipCheck == 0 {
script += "\n" + test
}
cache := make([]KV, 1, 1+len(attr.Cache))
cache[0] = KV{"CMAKE_BUILD_TYPE", "Release"}
if attr.BuildType != "" {
cache[0][1] = attr.BuildType
}
cache = append(cache, attr.Cache...)
return `
cmake -G ` + generate + ` \
@@ -198,7 +181,7 @@ cmake -G ` + generate + ` \
-DCMAKE_ASM_COMPILER_TARGET="${ROSA_TRIPLE}" \
-DCMAKE_INSTALL_LIBDIR=lib \
` + strings.Join(slices.Collect(func(yield func(string) bool) {
for _, v := range cache {
for _, v := range attr.Cache {
if !yield("-D" + v[0] + "=" + v[1]) {
return
}
@@ -208,5 +191,5 @@ cmake -G ` + generate + ` \
'/usr/src/` + name + `/` + filepath.Join(attr.Append...) + `'
cmake --build . --parallel=` + jobsE + `
cmake --install . --prefix=/work/system
` + script
` + attr.Script
}

View File

@@ -4,8 +4,8 @@ import "hakurei.app/internal/pkg"
func (t Toolchain) newCurl() (pkg.Artifact, string) {
const (
version = "8.20.0"
checksum = "xyHXwrngIRGMasuzhn-I5MSCOhktwINbsWt1f_LuR-5jRVvyx_g6U1EQfDLEbr9r"
version = "8.19.0"
checksum = "YHuVLVVp8q_Y7-JWpID5ReNjq2Zk6t7ArHB6ngQXilp_R5l3cubdxu3UKo-xDByv"
)
return t.NewPackage("curl", version, newTar(
"https://curl.se/download/curl-"+version+".tar.bz2",

View File

@@ -1,27 +0,0 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newFreetype() (pkg.Artifact, string) {
const (
version = "2.14.3"
checksum = "-WfLv8fVJNyCHpP_lriiDzOcVbBL9ajdQ3tl8AzIIUa9-8sVpU9irxOmSMgRHWYz"
)
return t.NewPackage("freetype", version, newTar(
"https://download.savannah.gnu.org/releases/freetype/"+
"freetype-"+version+".tar.gz",
checksum,
pkg.TarGzip,
), nil, (*MakeHelper)(nil)), version
}
func init() {
artifactsM[Freetype] = Metadata{
f: Toolchain.newFreetype,
Name: "freetype",
Description: "a freely available software library to render fonts",
Website: "http://www.freetype.org/",
ID: 854,
}
}

View File

@@ -9,8 +9,8 @@ import (
func (t Toolchain) newGit() (pkg.Artifact, string) {
const (
version = "2.54.0"
checksum = "7vGKtFOJGqY8DO4e8UMRax7dLgImXKQz5MMalec6MlgYrsarffSJjgOughwRFpSH"
version = "2.53.0"
checksum = "rlqSTeNgSeVKJA7nvzGqddFH8q3eFEPB4qRZft-4zth8wTHnbTbm7J90kp_obHGm"
)
return t.NewPackage("git", version, newTar(
"https://www.kernel.org/pub/software/scm/git/"+
@@ -20,9 +20,6 @@ func (t Toolchain) newGit() (pkg.Artifact, string) {
), &PackageAttr{
ScriptEarly: `
ln -s ../../system/bin/perl /usr/bin/ || true
# test suite assumes apache
rm -f /system/bin/httpd
`,
// uses source tree as scratch space
@@ -41,7 +38,6 @@ function disable_test {
fi
}
disable_test t1800-hook
disable_test t5319-multi-pack-index
disable_test t1305-config-include
disable_test t3900-i18n-commit
@@ -55,14 +51,6 @@ disable_test t9300-fast-import
disable_test t0211-trace2-perf
disable_test t1517-outside-repo
disable_test t2200-add-update
disable_test t0027-auto-crlf
disable_test t7513-interpret-trailers
disable_test t7703-repack-geometric
disable_test t7002-mv-sparse-checkout
disable_test t1451-fsck-buffer
disable_test t4104-apply-boundary
disable_test t4200-rerere
disable_test t5515-fetch-merge-logic
`,
Check: []string{
"-C t",
@@ -75,9 +63,6 @@ disable_test t5515-fetch-merge-logic
NO_INSTALL_HARDLINKS=1 \
install`,
},
// test suite hangs on mksh
Bash,
Diffutils,
Autoconf,
Gettext,
@@ -113,7 +98,7 @@ func (t Toolchain) NewViaGit(
return t.New(strings.TrimSuffix(
path.Base(url),
".git",
)+"-src-"+path.Base(rev), THostNet, t.AppendPresets(nil,
)+"-src-"+path.Base(rev), 0, t.AppendPresets(nil,
NSSCACert,
Git,
), &checksum, nil, `

View File

@@ -17,8 +17,9 @@ func (t Toolchain) newSPIRVHeaders() (pkg.Artifact, string) {
"vulkan-sdk-"+version,
checksum,
), nil, &CMakeHelper{
// upstream has no tests
SkipTest: true,
Cache: []KV{
{"CMAKE_BUILD_TYPE", "Release"},
},
}), version
}
func init() {
@@ -63,6 +64,7 @@ func (t Toolchain) newSPIRVTools() (pkg.Artifact, string) {
checksum,
), nil, &CMakeHelper{
Cache: []KV{
{"CMAKE_BUILD_TYPE", "Release"},
{"SPIRV-Headers_SOURCE_DIR", "/system"},
},
},
@@ -84,15 +86,13 @@ func init() {
},
ID: 14894,
latest: (*Versions).getStable,
}
}
func (t Toolchain) newGlslang() (pkg.Artifact, string) {
const (
version = "16.3.0"
checksum = "xyqDf8k3-D0_BXHGi0uLgMglnJ05Rf3j73QgbDs3sGtKNdBIQhY8JfqX1NcNoJQN"
version = "16.2.0"
checksum = "6_UuF9reLRDaVkgO-9IfB3kMwme3lQZM8LL8YsJwPdUFkrjzxJtf2A9X3w9nFxj2"
)
return t.NewPackage("glslang", version, newFromGitHub(
"KhronosGroup/glslang",
@@ -104,9 +104,11 @@ func (t Toolchain) newGlslang() (pkg.Artifact, string) {
Chmod: true,
}, &CMakeHelper{
Cache: []KV{
{"CMAKE_BUILD_TYPE", "Release"},
{"BUILD_SHARED_LIBS", "ON"},
{"ALLOW_EXTERNAL_SPIRV_TOOLS", "ON"},
},
Script: "ctest",
},
Python,
Bash,
@@ -126,123 +128,3 @@ func init() {
ID: 205796,
}
}
func (t Toolchain) newSPIRVLLVMTranslator() (pkg.Artifact, string) {
const (
version = "22.1.2"
checksum = "JZAaV5ewYcm-35YA_U2BM2IcsQouZtX1BLZR0zh2vSlfEXMsT5OCtY4Gh5RJkcGy"
)
skipChecks := []string{
// error: line 13: OpTypeCooperativeMatrixKHR Scope is limited to Workgroup and Subgroup
"cooperative_matrix_constant_null.spvasm",
}
switch arch {
case "arm64":
skipChecks = append(skipChecks,
// LLVM ERROR: unsupported calling convention
"DebugInfo/COFF/no-cus.ll",
"DebugInfo/Generic/2009-11-05-DeadGlobalVariable.ll",
"DebugInfo/Generic/2009-11-10-CurrentFn.ll",
"DebugInfo/Generic/2010-01-05-DbgScope.ll",
"DebugInfo/Generic/2010-03-12-llc-crash.ll",
"DebugInfo/Generic/2010-03-24-MemberFn.ll",
"DebugInfo/Generic/2010-04-19-FramePtr.ll",
"DebugInfo/Generic/2010-06-29-InlinedFnLocalVar.ll",
"DebugInfo/Generic/2010-10-01-crash.ll",
"DebugInfo/Generic/PR20038.ll",
"DebugInfo/Generic/constant-pointers.ll",
"DebugInfo/Generic/dead-argument-order.ll",
"DebugInfo/Generic/debug-info-eis-option.ll",
"DebugInfo/Generic/def-line.ll",
"DebugInfo/Generic/discriminator.ll",
"DebugInfo/Generic/dwarf-public-names.ll",
"DebugInfo/Generic/enum.ll",
"DebugInfo/Generic/func-using-decl.ll",
"DebugInfo/Generic/global.ll",
"DebugInfo/Generic/imported-name-inlined.ll",
"DebugInfo/Generic/incorrect-variable-debugloc1.ll",
"DebugInfo/Generic/inline-scopes.ll",
"DebugInfo/Generic/inlined-arguments.ll",
"DebugInfo/Generic/inlined-vars.ll",
"DebugInfo/Generic/linear-dbg-value.ll",
"DebugInfo/Generic/linkage-name-abstract.ll",
"DebugInfo/Generic/member-order.ll",
"DebugInfo/Generic/missing-abstract-variable.ll",
"DebugInfo/Generic/multiline.ll",
"DebugInfo/Generic/namespace_function_definition.ll",
"DebugInfo/Generic/namespace_inline_function_definition.ll",
"DebugInfo/Generic/noscopes.ll",
"DebugInfo/Generic/ptrsize.ll",
"DebugInfo/Generic/restrict.ll",
"DebugInfo/Generic/two-cus-from-same-file.ll",
"DebugInfo/Generic/version.ll",
"DebugInfo/LocalAddressSpace.ll",
"DebugInfo/UnknownBaseType.ll",
"DebugInfo/expr-opcode.ll",
)
}
return t.NewPackage("spirv-llvm-translator", version, newFromGitHub(
"KhronosGroup/SPIRV-LLVM-Translator",
"v"+version, checksum,
), &PackageAttr{
Patches: []KV{
{"remove-early-prefix", `diff --git a/CMakeLists.txt b/CMakeLists.txt
index c000a77e..f18f3fde 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -164,7 +164,7 @@ install(
${LLVM_SPIRV_INCLUDE_DIRS}/LLVMSPIRVOpts.h
${LLVM_SPIRV_INCLUDE_DIRS}/LLVMSPIRVExtensions.inc
DESTINATION
- ${CMAKE_INSTALL_PREFIX}/include/LLVMSPIRVLib
+ include/LLVMSPIRVLib
)
configure_file(LLVMSPIRVLib.pc.in ${CMAKE_BINARY_DIR}/LLVMSPIRVLib.pc @ONLY)
@@ -172,5 +172,5 @@ install(
FILES
${CMAKE_BINARY_DIR}/LLVMSPIRVLib.pc
DESTINATION
- ${CMAKE_INSTALL_PREFIX}/lib${LLVM_LIBDIR_SUFFIX}/pkgconfig
+ lib${LLVM_LIBDIR_SUFFIX}/pkgconfig
)
;`},
},
// litArgs emits shell syntax
ScriptEarly: `
export LIT_OPTS=` + litArgs(true, skipChecks...) + `
`,
}, &CMakeHelper{
Cache: []KV{
{"CMAKE_SKIP_BUILD_RPATH", "ON"},
{"BUILD_SHARED_LIBS", "ON"},
{"LLVM_SPIRV_ENABLE_LIBSPIRV_DIS", "ON"},
{"LLVM_EXTERNAL_SPIRV_HEADERS_SOURCE_DIR", "/system"},
{"LLVM_EXTERNAL_LIT", "/system/bin/lit"},
{"LLVM_INCLUDE_TESTS", "ON"},
},
},
Bash,
LIT,
SPIRVTools,
), version
}
func init() {
artifactsM[SPIRVLLVMTranslator] = Metadata{
f: Toolchain.newSPIRVLLVMTranslator,
Name: "spirv-llvm-translator",
Description: "bi-directional translation between SPIR-V and LLVM IR",
Website: "https://github.com/KhronosGroup/SPIRV-LLVM-Translator",
Dependencies: P{
SPIRVTools,
},
ID: 227273,
}
}

View File

@@ -1,47 +1,11 @@
package rosa
import (
"slices"
"strconv"
"strings"
"runtime"
"hakurei.app/internal/pkg"
)
// skipGNUTests generates a string for skipping specific tests by number in a
// GNU test suite. This is nontrivial because the test suite does not support
// excluding tests in any way, so ranges for all but the skipped tests have to
// be specified instead.
//
// For example, to skip test 764, ranges around the skipped test must be
// specified:
//
// 1-763 765-
//
// Tests are numbered starting from 1. The resulting string is unquoted.
func skipGNUTests(tests ...int) string {
tests = slices.Clone(tests)
slices.Sort(tests)
var buf strings.Builder
if tests[0] != 1 {
buf.WriteString("1-")
}
for i, n := range tests {
if n != 1 && (i == 0 || tests[i-1] != n-1) {
buf.WriteString(strconv.Itoa(n - 1))
buf.WriteString(" ")
}
if i == len(tests)-1 || tests[i+1] != n+1 {
buf.WriteString(strconv.Itoa(n + 1))
buf.WriteString("-")
}
}
return buf.String()
}
func (t Toolchain) newM4() (pkg.Artifact, string) {
const (
version = "1.4.21"
@@ -83,15 +47,7 @@ func (t Toolchain) newBison() (pkg.Artifact, string) {
"https://ftpmirror.gnu.org/gnu/bison/bison-"+version+".tar.gz",
checksum,
pkg.TarGzip,
), nil, &MakeHelper{
Check: []string{
"TESTSUITEFLAGS=" + jobsFlagE + "' " + skipGNUTests(
// clang miscompiles (SIGILL)
764,
) + "'",
"check",
},
},
), nil, (*MakeHelper)(nil),
M4,
Diffutils,
Sed,
@@ -111,8 +67,8 @@ func init() {
func (t Toolchain) newSed() (pkg.Artifact, string) {
const (
version = "4.10"
checksum = "TXTRFQJCyflb-bpBRI2S5Y1DpplwvT7-KfXtpqN4AdZgZ5OtI6yStn1-bkhDKx51"
version = "4.9"
checksum = "pe7HWH4PHNYrazOTlUoE1fXmhn2GOPFN_xE62i0llOr3kYGrH1g2_orDz0UtZ9Nt"
)
return t.NewPackage("sed", version, newTar(
"https://ftpmirror.gnu.org/gnu/sed/sed-"+version+".tar.gz",
@@ -120,8 +76,6 @@ func (t Toolchain) newSed() (pkg.Artifact, string) {
pkg.TarGzip,
), nil, (*MakeHelper)(nil),
Diffutils,
KernelHeaders,
), version
}
func init() {
@@ -230,9 +184,6 @@ func (t Toolchain) newLibtool() (pkg.Artifact, string) {
checksum,
pkg.TarGzip,
), nil, &MakeHelper{
// _Z2a2c: symbol not found
SkipCheck: t.isStage0(),
Check: []string{
"TESTSUITEFLAGS=" + jobsFlagE,
"check",
@@ -423,8 +374,8 @@ func init() {
func (t Toolchain) newCoreutils() (pkg.Artifact, string) {
const (
version = "9.11"
checksum = "t8UMed5wpFEoC56aa42_yidfOAaRGzOfj7MRtQkkqgGbpXiskNA8bd-EmVSQkZie"
version = "9.10"
checksum = "o-B9wssRnZySzJUI1ZJAgw-bZtj1RC67R9po2AcM2OjjS8FQIl16IRHpC6IwO30i"
)
return t.NewPackage("coreutils", version, newTar(
"https://ftpmirror.gnu.org/gnu/coreutils/coreutils-"+version+".tar.gz",
@@ -436,13 +387,106 @@ func (t Toolchain) newCoreutils() (pkg.Artifact, string) {
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh
test_disable '#!/bin/sh' tests/split/line-bytes.sh
test_disable '#!/bin/sh' tests/ls/hyperlink.sh
test_disable '#!/bin/sh' tests/misc/user.sh
test_disable 'int main(){return 0;}' gnulib-tests/test-chown.c
test_disable 'int main(){return 0;}' gnulib-tests/test-fchownat.c
test_disable 'int main(){return 0;}' gnulib-tests/test-lchown.c
`,
Patches: []KV{
{"tests-fix-job-control", `From 21d287324aa43aa3a31f39619ade0deac7fd6013 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?P=C3=A1draig=20Brady?= <P@draigBrady.com>
Date: Tue, 24 Feb 2026 15:44:41 +0000
Subject: [PATCH] tests: fix job control triggering test termination
This avoids the test harness being terminated like:
make[1]: *** [Makefile:24419: check-recursive] Hangup
make[3]: *** [Makefile:24668: check-TESTS] Hangup
make: *** [Makefile:24922: check] Hangup
make[2]: *** [Makefile:24920: check-am] Hangup
make[4]: *** [Makefile:24685: tests/misc/usage_vs_refs.log] Error 129
...
This happened sometimes when the tests were being run non interactively.
For example when run like:
setsid make TESTS="tests/timeout/timeout.sh \
tests/tail/overlay-headers.sh" SUBDIRS=. -j2 check
Note the race window can be made bigger by adding a sleep
after tail is stopped in overlay-headers.sh
The race can trigger the kernel to induce its job control
mechanism to prevent stuck processes.
I.e. where it sends SIGHUP + SIGCONT to a process group
when it determines that group may become orphaned,
and there are stopped processes in that group.
* tests/tail/overlay-headers.sh: Use setsid(1) to keep the stopped
tail process in a separate process group, thus avoiding any kernel
job control protection mechanism.
* tests/timeout/timeout.sh: Use setsid(1) to avoid the kernel
checking the main process group when sleep(1) is reparented.
Fixes https://bugs.gnu.org/80477
---
tests/tail/overlay-headers.sh | 8 +++++++-
tests/timeout/timeout.sh | 11 ++++++++---
2 files changed, 15 insertions(+), 4 deletions(-)
diff --git a/tests/tail/overlay-headers.sh b/tests/tail/overlay-headers.sh
index be9b6a7df..1e6da0a3f 100755
--- a/tests/tail/overlay-headers.sh
+++ b/tests/tail/overlay-headers.sh
@@ -20,6 +20,8 @@
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ tail sleep
+setsid true || skip_ 'setsid required to control groups'
+
# Function to count number of lines from tail
# while ignoring transient errors due to resource limits
countlines_ ()
@@ -54,7 +56,11 @@ echo start > file2 || framework_failure_
env sleep 60 & sleep=$!
# Note don't use timeout(1) here as it currently
-# does not propagate SIGCONT
+# does not propagate SIGCONT.
+# Note use setsid here to ensure we're in a separate process group
+# as we're going to STOP this tail process, and this can trigger
+# the kernel to send SIGHUP to a group if other tests have
+# processes that are reparented. (See tests/timeout/timeout.sh).
tail $fastpoll --pid=$sleep -f file1 file2 > out & pid=$!
# Ensure tail is running
diff --git a/tests/timeout/timeout.sh b/tests/timeout/timeout.sh
index 9a395416b..fbb043312 100755
--- a/tests/timeout/timeout.sh
+++ b/tests/timeout/timeout.sh
@@ -56,9 +56,14 @@ returns_ 124 timeout --foreground -s0 -k1 .1 sleep 10 && fail=1
) || fail=1
# Don't be confused when starting off with a child (Bug#9098).
-out=$(sleep .1 & exec timeout .5 sh -c 'sleep 2; echo foo')
-status=$?
-test "$out" = "" && test $status = 124 || fail=1
+# Use setsid to avoid sleep being in the test's process group, as
+# upon reparenting it can trigger an orphaned process group SIGHUP
+# (if there were stopped processes in other tests).
+if setsid true; then
+ out=$(setsid sleep .1 & exec timeout .5 sh -c 'sleep 2; echo foo')
+ status=$?
+ test "$out" = "" && test $status = 124 || fail=1
+fi
# Verify --verbose output
cat > exp <<\EOF
--
2.53.0
`},
},
Flag: TEarly,
}, &MakeHelper{
Configure: []KV{
@@ -713,20 +757,15 @@ func init() {
func (t Toolchain) newParallel() (pkg.Artifact, string) {
const (
version = "20260422"
checksum = "eTsepxgqhXpMEhPd55qh-W5y4vjKn0x9TD2mzbJCNZYtFf4lT4Wzoqr74HGJYBEH"
version = "20260322"
checksum = "gHoPmFkOO62ev4xW59HqyMlodhjp8LvTsBOwsVKHUUdfrt7KwB8koXmSVqQ4VOrB"
)
return t.NewPackage("parallel", version, newTar(
"https://ftpmirror.gnu.org/gnu/parallel/parallel-"+version+".tar.bz2",
checksum,
pkg.TarBzip2,
), &PackageAttr{
ScriptEarly: `
ln -s ../system/bin/bash /bin/
`,
}, (*MakeHelper)(nil),
), nil, (*MakeHelper)(nil),
Perl,
Bash,
), version
}
func init() {
@@ -842,7 +881,7 @@ func (t Toolchain) newGnuTLS() (pkg.Artifact, string) {
)
var configureExtra []KV
switch arch {
switch runtime.GOARCH {
case "arm64":
configureExtra = []KV{
{"disable-hardware-acceleration"},
@@ -1104,11 +1143,10 @@ func init() {
func (t Toolchain) newMPC() (pkg.Artifact, string) {
const (
version = "1.4.1"
checksum = "ZffaZyWkvIw0iPvRe5EJ7O-VvHtSkbbb3K_7SgPtK810NvGan7nbF0T5-6tozjQN"
checksum = "wdXAhplnS89FjVp20m2nC2CmLFQeyQqLpQAfViTy4vPxFdv2WYOTtfBKeIk5_Rec"
)
return t.NewPackage("mpc", version, newFromGitLab(
"gitlab.inria.fr",
"mpc/mpc",
return t.NewPackage("mpc", version, t.newTagRemote(
"https://gitlab.inria.fr/mpc/mpc.git",
version, checksum,
), &PackageAttr{
// does not find mpc-impl.h otherwise
@@ -1142,12 +1180,12 @@ func init() {
func (t Toolchain) newGCC() (pkg.Artifact, string) {
const (
version = "16.1.0"
checksum = "4ASoWbxaA2FW7PAB0zzHDPC5XnNhyaAyjtDPpGzceSLeYnEIXsNYZR3PA_Zu5P0K"
version = "15.2.0"
checksum = "TXJ5WrbXlGLzy1swghQTr4qxgDCyIZFgJry51XEPTBZ8QYbVmFeB4lZbSMtPJ-a1"
)
var configureExtra []KV
switch arch {
switch runtime.GOARCH {
case "amd64", "arm64":
configureExtra = append(configureExtra, KV{"with-multilib-list", "''"})
}

View File

@@ -1,35 +0,0 @@
package rosa
import (
"slices"
"strconv"
"strings"
"testing"
)
func TestSkipGNUTests(t *testing.T) {
t.Parallel()
testCases := []struct {
tests []int
want string
}{
{[]int{764}, "1-763 765-"},
{[]int{764, 0xcafe, 37, 9}, "1-8 10-36 38-763 765-51965 51967-"},
{[]int{1, 2, 0xbed}, "3-3052 3054-"},
{[]int{3, 4}, "1-2 5-"},
}
for _, tc := range testCases {
t.Run(strings.Join(slices.Collect(func(yield func(string) bool) {
for _, n := range tc.tests {
yield(strconv.Itoa(n))
}
}), ","), func(t *testing.T) {
t.Parallel()
if got := skipGNUTests(tc.tests...); got != tc.want {
t.Errorf("skipGNUTests: %q, want %q", got, tc.want)
}
})
}
}

View File

@@ -1,6 +1,7 @@
package rosa
import (
"runtime"
"slices"
"hakurei.app/internal/pkg"
@@ -9,9 +10,9 @@ import (
// newGoBootstrap returns the Go bootstrap toolchain.
func (t Toolchain) newGoBootstrap() pkg.Artifact {
const checksum = "8o9JL_ToiQKadCTb04nvBDkp8O1xiWOolAxVEqaTGodieNe4lOFEjlOxN3bwwe23"
return t.New("go1.4-bootstrap", 0, t.AppendPresets(nil,
Bash,
), nil, []string{
return t.New("go1.4-bootstrap", 0, []pkg.Artifact{
t.Load(Bash),
}, nil, []string{
"CGO_ENABLED=0",
}, `
mkdir -p /var/tmp/ /work/system/
@@ -34,13 +35,9 @@ func (t Toolchain) newGo(
script string,
extra ...pkg.Artifact,
) pkg.Artifact {
name := "all"
if presetOpts&OptSkipCheck != 0 {
name = "make"
}
return t.New("go"+version, 0, t.AppendPresets(extra,
Bash,
), nil, slices.Concat([]string{
return t.New("go"+version, 0, slices.Concat([]pkg.Artifact{
t.Load(Bash),
}, extra), nil, slices.Concat([]string{
"CC=cc",
"GOCACHE=/tmp/gocache",
"GOROOT_BOOTSTRAP=/system/go",
@@ -51,7 +48,7 @@ cp -r /usr/src/go /work/system
cd /work/system/go/src
chmod -R +w ..
`+script+`
./`+name+`.bash
./all.bash
mkdir /work/system/bin
ln -s \
@@ -72,7 +69,7 @@ func (t Toolchain) newGoLatest() (pkg.Artifact, string) {
finalEnv []string
)
switch arch {
switch runtime.GOARCH {
case "amd64":
bootstrapExtra = append(bootstrapExtra, t.newGoBootstrap())
@@ -82,7 +79,7 @@ func (t Toolchain) newGoLatest() (pkg.Artifact, string) {
finalEnv = append(finalEnv, "CGO_ENABLED=0")
default:
panic("unsupported target " + arch)
panic("unsupported target " + runtime.GOARCH)
}
go119 := t.newGo(
@@ -107,7 +104,7 @@ echo \
[]string{"CGO_ENABLED=0"}, `
sed -i \
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
cmd/link/internal/`+arch+`/obj.go
cmd/link/internal/`+runtime.GOARCH+`/obj.go
rm \
crypto/tls/handshake_client_test.go \
@@ -125,17 +122,17 @@ echo \
[]string{"CGO_ENABLED=0"}, `
sed -i \
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
cmd/link/internal/`+arch+`/obj.go
cmd/link/internal/`+runtime.GOARCH+`/obj.go
`, go121,
)
go125 := t.newGo(
"1.25.10",
"TwKwatkpwal-j9U2sDSRPEdM3YesI4Gm88YgGV59wtU-L85K9gA7UPy9SCxn6PMb",
"1.25.7",
"fyylHdBVRUobnBjYj3NKBaYPUw3kGmo2mEELiZonOYurPfbarNU1x77B99Fjut7Q",
[]string{"CGO_ENABLED=0"}, `
sed -i \
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
cmd/link/internal/`+arch+`/obj.go
cmd/link/internal/`+runtime.GOARCH+`/obj.go
rm \
os/root_unix_test.go \
@@ -144,8 +141,8 @@ rm \
)
const (
version = "1.26.3"
checksum = "lEiFocZFnN5fKvZzmwVdqc9pYUjAuhzqZGbuiOqxUP4XdcY8yECisKcqsQ_eNn1N"
version = "1.26.2"
checksum = "v-6BE89_1g3xYf-9oIYpJKFXlo3xKHYJj2_VGkaUq8ZVkIVQmLwrto-xGG03OISH"
)
return t.newGo(
version,
@@ -153,15 +150,10 @@ rm \
finalEnv, `
sed -i \
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
cmd/link/internal/`+arch+`/obj.go
sed -i \
's/cpu.X86.HasAVX512VBMI/& \&\& cpu.X86.HasPOPCNT/' \
internal/runtime/gc/scan/scan_amd64.go
cmd/link/internal/`+runtime.GOARCH+`/obj.go
rm \
os/root_unix_test.go \
cmd/cgo/internal/testsanitizers/tsan_test.go \
cmd/cgo/internal/testsanitizers/cshared_test.go
os/root_unix_test.go
`, go125,
), version
}

View File

@@ -7,8 +7,8 @@ import (
func (t Toolchain) newGLib() (pkg.Artifact, string) {
const (
version = "2.88.1"
checksum = "Rkszn6W4RHjyspyqfXdVAVawdwDJCuS0Zu0f7qot7tbJhnw2fUDoUUJB40m-1MCX"
version = "2.88.0"
checksum = "T79Cg4z6j-sDZ2yIwvbY4ccRv2-fbwbqgcw59F5NQ6qJT6z4v261vbYp3dHO6Ma3"
)
return t.NewPackage("glib", version, t.newTagRemote(
"https://gitlab.gnome.org/GNOME/glib.git",

View File

@@ -7,8 +7,9 @@ func (t Toolchain) newHakurei(
withHostname bool,
) pkg.Artifact {
hostname := `
echo 'Building test helper (hostname).'
go build -o /bin/hostname /usr/src/hostname/main.go
echo '# Building test helper (hostname).'
go build -v -o /bin/hostname /usr/src/hostname/main.go
echo
`
if !withHostname {
hostname = ""
@@ -63,9 +64,9 @@ func init() {
return t.newHakurei("", `
mkdir -p /work/system/libexec/hakurei/
echo "Building hakurei for $(go env GOOS)/$(go env GOARCH)."
go generate ./...
go build -trimpath -tags=rosa -o /work/system/libexec/hakurei -ldflags="-s -w
echo '# Building hakurei.'
go generate -v ./...
go build -trimpath -v -tags=rosa -o /work/system/libexec/hakurei -ldflags="-s -w
-buildid=
-linkmode external
-extldflags=-static
@@ -76,7 +77,7 @@ go build -trimpath -tags=rosa -o /work/system/libexec/hakurei -ldflags="-s -w
" ./...
echo
echo '##### Testing hakurei.'
echo '# Testing hakurei.'
go test -ldflags='-buildid= -linkmode external -extldflags=-static' ./...
echo
@@ -96,13 +97,9 @@ mkdir -p /work/system/bin/
}
artifactsM[HakureiDist] = Metadata{
f: func(t Toolchain) (pkg.Artifact, string) {
name := "all"
if presetOpts&OptSkipCheck != 0 {
name = "make"
}
return t.newHakurei("-dist", `
export HAKUREI_VERSION
DESTDIR=/work /usr/src/hakurei/`+name+`.sh
DESTDIR=/work /usr/src/hakurei/all.sh
`, true), hakureiVersion
},

View File

@@ -4,13 +4,13 @@ package rosa
import "hakurei.app/internal/pkg"
const hakureiVersion = "0.4.2"
const hakureiVersion = "0.4.0"
// hakureiSource is the source code of a hakurei release.
var hakureiSource = newTar(
"https://git.gensokyo.uk/rosa/hakurei/archive/"+
"v"+hakureiVersion+".tar.gz",
"jadgaOrxv5ABGvzQ_Rk0aPGz7U8K-427TbMhQNQ32scSizEnlR44Pu7NoWYWVZWq",
"wfQ9DqCW0Fw9o91wj-I55waoqzB-UqzzuC0_2h-P-1M78SgZ1WHSPCDJMth6EyC2",
pkg.TarGzip,
)

View File

@@ -1,34 +0,0 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newHwdata() (pkg.Artifact, string) {
const (
version = "0.407"
checksum = "6p1XD0CRuzt6hLfjv4ShKBW934BexmoPkRrmwxD4J63fBVCzVBRHyF8pVJdW_Xjm"
)
return t.NewPackage("hwdata", version, newFromGitHub(
"vcrhonek/hwdata",
"v"+version, checksum,
), &PackageAttr{
Writable: true,
EnterSource: true,
}, &MakeHelper{
// awk: fatal: cannot open file `hwdata.spec' for reading: No such file or directory
InPlace: true,
// lspci: Unknown option 'A' (see "lspci --help")
SkipCheck: true,
}), version
}
func init() {
artifactsM[Hwdata] = Metadata{
f: Toolchain.newHwdata,
Name: "hwdata",
Description: "contains various hardware identification and configuration data",
Website: "https://github.com/vcrhonek/hwdata",
ID: 5387,
}
}

View File

@@ -2,12 +2,12 @@ package rosa
import "hakurei.app/internal/pkg"
const kernelVersion = "6.12.87"
const kernelVersion = "6.12.81"
var kernelSource = newTar(
"https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/"+
"snapshot/linux-"+kernelVersion+".tar.gz",
"QTl5teIy0K5KsOLYGHQ3FbnPCZNRH2bySXVzghiOoHDdM3zAcSPUkmdly85lMzHx",
"fBkNwf82DQXh74in6gaF2Jot7Vg-Vlcp9BUtCEipL9mvcM1EXLVFdV7FcrO20Eve",
pkg.TarGzip,
)
@@ -29,22 +29,8 @@ func init() {
}
func (t Toolchain) newKernelHeaders() (pkg.Artifact, string) {
const checksum = "lCmBNcMeUmXifg0vecKOPy3GAaFcJSmOPnf3wit9xYTDSTsFADPt1xxUFfmTn1fD"
return t.NewPackage("kernel-headers", kernelVersion, kernelSource, &PackageAttr{
Flag: TEarly,
KnownChecksum: new(mustDecode(checksum)),
Paths: []pkg.ExecPath{
// updated manually for API changes
pkg.Path(AbsUsrSrc.Append("version.h"), false, pkg.NewFile(
"version.h", []byte(`#define LINUX_VERSION_CODE 396372
#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c)))
#define LINUX_VERSION_MAJOR 6
#define LINUX_VERSION_PATCHLEVEL 12
#define LINUX_VERSION_SUBLEVEL 84
`),
)),
},
Flag: TEarly,
}, &MakeHelper{
SkipConfigure: true,
@@ -57,11 +43,7 @@ func (t Toolchain) newKernelHeaders() (pkg.Artifact, string) {
"INSTALL_HDR_PATH=/work/system",
"headers_install",
},
Install: `
cat \
/usr/src/version.h > \
/work/system/include/linux/version.h
`,
Install: "# headers installed during make",
},
Rsync,
), kernelVersion

View File

@@ -1,16 +1,16 @@
#
# Automatically generated file; DO NOT EDIT.
# Linux/x86 6.12.84 Kernel Configuration
# Linux/x86 6.12.80 Kernel Configuration
#
CONFIG_CC_VERSION_TEXT="clang version 22.1.4"
CONFIG_CC_VERSION_TEXT="clang version 22.1.2"
CONFIG_GCC_VERSION=0
CONFIG_CC_IS_CLANG=y
CONFIG_CLANG_VERSION=220104
CONFIG_CLANG_VERSION=220102
CONFIG_AS_IS_LLVM=y
CONFIG_AS_VERSION=220104
CONFIG_AS_VERSION=220102
CONFIG_LD_VERSION=0
CONFIG_LD_IS_LLD=y
CONFIG_LLD_VERSION=220104
CONFIG_LLD_VERSION=220102
CONFIG_RUSTC_VERSION=0
CONFIG_RUSTC_LLVM_VERSION=0
CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
@@ -3175,8 +3175,14 @@ CONFIG_PATA_ACPI=y
CONFIG_ATA_GENERIC=y
CONFIG_PATA_LEGACY=m
CONFIG_MD=y
# CONFIG_BLK_DEV_MD is not set
CONFIG_BLK_DEV_MD=m
CONFIG_MD_BITMAP_FILE=y
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m
# CONFIG_BCACHE_DEBUG is not set
# CONFIG_BCACHE_ASYNC_REGISTRATION is not set
@@ -3199,7 +3205,7 @@ CONFIG_DM_ERA=m
CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m
# CONFIG_DM_RAID is not set
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m
@@ -11630,7 +11636,10 @@ CONFIG_RANDSTRUCT_NONE=y
CONFIG_XOR_BLOCKS=m
CONFIG_ASYNC_CORE=m
CONFIG_ASYNC_MEMCPY=m
CONFIG_ASYNC_XOR=m
CONFIG_ASYNC_PQ=m
CONFIG_ASYNC_RAID6_RECOV=m
CONFIG_CRYPTO=y
#
@@ -11916,6 +11925,8 @@ CONFIG_BINARY_PRINTF=y
#
# Library routines
#
CONFIG_RAID6_PQ=m
CONFIG_RAID6_PQ_BENCHMARK=y
CONFIG_LINEAR_RANGES=y
CONFIG_PACKING=y
CONFIG_BITREVERSE=y
@@ -12460,6 +12471,7 @@ CONFIG_RUNTIME_TESTING_MENU=y
# CONFIG_INTERVAL_TREE_TEST is not set
# CONFIG_PERCPU_TEST is not set
# CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_ASYNC_RAID6_TEST is not set
# CONFIG_TEST_HEXDUMP is not set
# CONFIG_TEST_KSTRTOX is not set
# CONFIG_TEST_PRINTF is not set

View File

@@ -1,16 +1,16 @@
#
# Automatically generated file; DO NOT EDIT.
# Linux/arm64 6.12.83 Kernel Configuration
# Linux/arm64 6.12.80 Kernel Configuration
#
CONFIG_CC_VERSION_TEXT="clang version 22.1.4"
CONFIG_CC_VERSION_TEXT="clang version 21.1.8"
CONFIG_GCC_VERSION=0
CONFIG_CC_IS_CLANG=y
CONFIG_CLANG_VERSION=220104
CONFIG_CLANG_VERSION=210108
CONFIG_AS_IS_LLVM=y
CONFIG_AS_VERSION=220104
CONFIG_AS_VERSION=210108
CONFIG_LD_VERSION=0
CONFIG_LD_IS_LLD=y
CONFIG_LLD_VERSION=220104
CONFIG_LLD_VERSION=210108
CONFIG_RUSTC_VERSION=0
CONFIG_RUSTC_LLVM_VERSION=0
CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
@@ -3253,8 +3253,14 @@ CONFIG_PATA_ACPI=y
CONFIG_ATA_GENERIC=y
CONFIG_PATA_LEGACY=m
CONFIG_MD=y
# CONFIG_BLK_DEV_MD is not set
CONFIG_BLK_DEV_MD=m
CONFIG_MD_BITMAP_FILE=y
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m
# CONFIG_BCACHE_DEBUG is not set
# CONFIG_BCACHE_ASYNC_REGISTRATION is not set
@@ -3277,7 +3283,7 @@ CONFIG_DM_ERA=m
CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m
# CONFIG_DM_RAID is not set
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m
@@ -10294,6 +10300,7 @@ CONFIG_ALTERA_MSGDMA=m
# CONFIG_AMBA_PL08X is not set
CONFIG_APPLE_ADMAC=m
CONFIG_AXI_DMAC=m
CONFIG_BCM_SBA_RAID=m
CONFIG_DMA_BCM2835=m
CONFIG_DMA_SUN6I=m
CONFIG_DW_AXI_DMAC=m
@@ -13285,7 +13292,12 @@ CONFIG_RANDSTRUCT_NONE=y
CONFIG_XOR_BLOCKS=m
CONFIG_ASYNC_CORE=m
CONFIG_ASYNC_MEMCPY=m
CONFIG_ASYNC_XOR=m
CONFIG_ASYNC_PQ=m
CONFIG_ASYNC_RAID6_RECOV=m
CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA=y
CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA=y
CONFIG_CRYPTO=y
#
@@ -13628,6 +13640,8 @@ CONFIG_BINARY_PRINTF=y
#
# Library routines
#
CONFIG_RAID6_PQ=m
CONFIG_RAID6_PQ_BENCHMARK=y
CONFIG_LINEAR_RANGES=y
CONFIG_PACKING=y
CONFIG_BITREVERSE=y
@@ -14158,6 +14172,7 @@ CONFIG_RUNTIME_TESTING_MENU=y
# CONFIG_INTERVAL_TREE_TEST is not set
# CONFIG_PERCPU_TEST is not set
# CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_ASYNC_RAID6_TEST is not set
# CONFIG_TEST_HEXDUMP is not set
# CONFIG_TEST_KSTRTOX is not set
# CONFIG_TEST_PRINTF is not set

View File

@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
# Linux/riscv 6.12.80 Kernel Configuration
# Linux/riscv 6.12.77 Kernel Configuration
#
CONFIG_CC_VERSION_TEXT="clang version 22.1.2"
CONFIG_GCC_VERSION=0
@@ -37,6 +37,11 @@ CONFIG_BUILD_SALT=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_ZSTD=y
# CONFIG_KERNEL_GZIP is not set
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
# CONFIG_KERNEL_XZ is not set
# CONFIG_KERNEL_LZO is not set
# CONFIG_KERNEL_LZ4 is not set
CONFIG_KERNEL_ZSTD=y
CONFIG_DEFAULT_INIT=""
CONFIG_DEFAULT_HOSTNAME="rosa-early"
@@ -2843,8 +2848,14 @@ CONFIG_PATA_ACPI=y
CONFIG_ATA_GENERIC=y
CONFIG_PATA_LEGACY=m
CONFIG_MD=y
# CONFIG_BLK_DEV_MD is not set
CONFIG_BLK_DEV_MD=m
CONFIG_MD_BITMAP_FILE=y
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m
# CONFIG_BCACHE_DEBUG is not set
# CONFIG_BCACHE_ASYNC_REGISTRATION is not set
@@ -2867,7 +2878,7 @@ CONFIG_DM_ERA=m
CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m
# CONFIG_DM_RAID is not set
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m
@@ -10644,7 +10655,10 @@ CONFIG_RANDSTRUCT_NONE=y
CONFIG_XOR_BLOCKS=m
CONFIG_ASYNC_CORE=m
CONFIG_ASYNC_MEMCPY=m
CONFIG_ASYNC_XOR=m
CONFIG_ASYNC_PQ=m
CONFIG_ASYNC_RAID6_RECOV=m
CONFIG_CRYPTO=y
#
@@ -10904,6 +10918,8 @@ CONFIG_BINARY_PRINTF=y
#
# Library routines
#
CONFIG_RAID6_PQ=m
CONFIG_RAID6_PQ_BENCHMARK=y
CONFIG_LINEAR_RANGES=y
CONFIG_PACKING=y
CONFIG_BITREVERSE=y
@@ -11392,6 +11408,7 @@ CONFIG_RUNTIME_TESTING_MENU=y
# CONFIG_INTERVAL_TREE_TEST is not set
# CONFIG_PERCPU_TEST is not set
# CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_ASYNC_RAID6_TEST is not set
# CONFIG_TEST_HEXDUMP is not set
# CONFIG_TEST_KSTRTOX is not set
# CONFIG_TEST_PRINTF is not set

View File

@@ -1,100 +0,0 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newLibarchive() (pkg.Artifact, string) {
const (
version = "3.8.7"
checksum = "CUJK4MDQmZmATClgQBH2Wt-7Ts4iiSUlg1J_TVb6-5IK3rVUgVLIMc5k-bnWB9w3"
)
return t.NewPackage("libarchive", version, newFromGitHub(
"libarchive/libarchive",
"v"+version, checksum,
), &PackageAttr{
Paths: []pkg.ExecPath{
pkg.Path(AbsUsrSrc.Append(
"CTestCustom.cmake",
), false, pkg.NewFile("CTestCustom.cmake", []byte(`
list(APPEND CTEST_CUSTOM_TESTS_IGNORE
"libarchive_test_archive_string_conversion_fail_c"
"libarchive_test_archive_string_conversion_fail_latin1"
"libarchive_test_archive_string_update_utf8_koi8"
"libarchive_test_gnutar_filename_encoding_KOI8R_UTF8"
"libarchive_test_gnutar_filename_encoding_KOI8R_CP866"
"libarchive_test_gnutar_filename_encoding_CP1251_UTF8"
"libarchive_test_gnutar_filename_encoding_Russian_Russia"
"libarchive_test_gnutar_filename_encoding_EUCJP_UTF8"
"libarchive_test_gnutar_filename_encoding_EUCJP_CP932"
"libarchive_test_gnutar_filename_encoding_CP932_UTF8"
"libarchive_test_pax_filename_encoding_KOI8R"
"libarchive_test_pax_filename_encoding_CP1251"
"libarchive_test_pax_filename_encoding_EUCJP"
"libarchive_test_pax_filename_encoding_CP932"
"libarchive_test_read_format_cpio_filename_UTF8_eucJP"
"libarchive_test_read_format_cpio_filename_CP866_KOI8R"
"libarchive_test_read_format_cpio_filename_KOI8R_CP866"
"libarchive_test_read_format_cpio_filename_UTF8_KOI8R"
"libarchive_test_read_format_cpio_filename_UTF8_CP866"
"libarchive_test_read_format_cpio_filename_eucJP_CP932"
"libarchive_test_read_format_cpio_filename_UTF8_CP932"
"libarchive_test_read_format_cpio_filename_CP866_CP1251"
"libarchive_test_read_format_cpio_filename_CP866_CP1251_win"
"libarchive_test_read_format_cpio_filename_KOI8R_CP1251"
"libarchive_test_read_format_cpio_filename_UTF8_CP1251"
"libarchive_test_read_format_gtar_filename_CP866_KOI8R"
"libarchive_test_read_format_gtar_filename_KOI8R_CP866"
"libarchive_test_read_format_gtar_filename_eucJP_CP932"
"libarchive_test_read_format_gtar_filename_CP866_CP1251"
"libarchive_test_read_format_gtar_filename_CP866_CP1251_win"
"libarchive_test_read_format_gtar_filename_KOI8R_CP1251"
"libarchive_test_read_format_rar_unicode_CP932"
"libarchive_test_read_format_zip_filename_CP932_eucJP"
"libarchive_test_read_format_zip_filename_UTF8_eucJP"
"libarchive_test_read_format_zip_filename_CP866_KOI8R"
"libarchive_test_read_format_zip_filename_KOI8R_CP866"
"libarchive_test_read_format_zip_filename_UTF8_KOI8R"
"libarchive_test_read_format_zip_filename_UTF8_CP866"
"libarchive_test_read_format_zip_filename_CP932_CP932"
"libarchive_test_read_format_zip_filename_UTF8_CP932"
"libarchive_test_read_format_zip_filename_CP866_CP1251"
"libarchive_test_read_format_zip_filename_CP866_CP1251_win"
"libarchive_test_read_format_zip_filename_KOI8R_CP1251"
"libarchive_test_read_format_zip_filename_UTF8_CP1251"
"libarchive_test_ustar_filename_encoding_KOI8R_UTF8"
"libarchive_test_ustar_filename_encoding_KOI8R_CP866"
"libarchive_test_ustar_filename_encoding_CP1251_UTF8"
"libarchive_test_ustar_filename_encoding_Russian_Russia"
"libarchive_test_ustar_filename_encoding_EUCJP_UTF8"
"libarchive_test_ustar_filename_encoding_EUCJP_CP932"
"libarchive_test_ustar_filename_encoding_CP932_UTF8"
"libarchive_test_zip_filename_encoding_KOI8R"
"libarchive_test_zip_filename_encoding_ru_RU_CP1251"
"libarchive_test_zip_filename_encoding_Russian_Russia"
"libarchive_test_zip_filename_encoding_EUCJP"
"libarchive_test_zip_filename_encoding_CP932"
"libarchive_test_read_format_cab_filename"
"libarchive_test_read_format_lha_filename"
"libarchive_test_read_format_tar_filename"
"libarchive_test_read_format_ustar_filename"
"libarchive_test_read_append_wrong_filter"
)
`))),
},
Writable: true,
ScriptEarly: `
install -Dv /usr/src/CTestCustom.cmake /cure/
`,
}, (*CMakeHelper)(nil)), version
}
func init() {
artifactsM[Libarchive] = Metadata{
f: Toolchain.newLibarchive,
Name: "libarchive",
Description: "multi-format archive and compression library",
Website: "https://www.libarchive.org/",
ID: 1558,
}
}

View File

@@ -4,8 +4,8 @@ import "hakurei.app/internal/pkg"
func (t Toolchain) newLibmd() (pkg.Artifact, string) {
const (
version = "1.2.0"
checksum = "1rJ6joAO0wwMZvSfnRNkc1MOhywyAq7SM8VmF92NvDtv7Qdl1LRbjm5fg_DFFtGj"
version = "1.1.0"
checksum = "9apYqPPZm0j5HQT8sCsVIhnVIqRD7XgN7kPIaTwTqnTuUq5waUAMq4M7ev8CODJ1"
)
return t.NewPackage("libmd", version, t.newTagRemote(
"https://git.hadrons.org/git/libmd.git",

View File

@@ -1,50 +0,0 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newLibconfig() (pkg.Artifact, string) {
const (
version = "1.8.2"
checksum = "fD32hjeAZuTz98g6WYHRwsxphrgrEFqxi5Z1jlJemPckPBfxpS3i5HgshAuA6vmT"
)
return t.NewPackage("libconfig", version, newFromGitHub(
"hyperrealm/libconfig",
"v"+version,
checksum,
), &PackageAttr{
Patches: []KV{
{"disable-broken-tests", `diff --git a/tests/tests.c b/tests/tests.c
index eba7eae..f916d2e 100644
--- a/tests/tests.c
+++ b/tests/tests.c
@@ -753,7 +753,6 @@ int main(int argc, char **argv)
int failures;
TT_SUITE_START(LibConfigTests);
- TT_SUITE_TEST(LibConfigTests, ParsingAndFormatting);
TT_SUITE_TEST(LibConfigTests, ParseInvalidFiles);
TT_SUITE_TEST(LibConfigTests, ParseInvalidStrings);
TT_SUITE_TEST(LibConfigTests, BigInt1);
@@ -768,7 +767,6 @@ int main(int argc, char **argv)
TT_SUITE_TEST(LibConfigTests, OverrideSetting);
TT_SUITE_TEST(LibConfigTests, SettingLookups);
TT_SUITE_TEST(LibConfigTests, ReadStream);
- TT_SUITE_TEST(LibConfigTests, BinaryAndHex);
TT_SUITE_RUN(LibConfigTests);
failures = TT_SUITE_NUM_FAILURES(LibConfigTests);
TT_SUITE_END(LibConfigTests);
`},
},
}, (*CMakeHelper)(nil)), version
}
func init() {
artifactsM[Libconfig] = Metadata{
f: Toolchain.newLibconfig,
Name: "libconfig",
Description: "a simple library for processing structured configuration files",
Website: "https://hyperrealm.github.io/libconfig/",
ID: 1580,
}
}

View File

@@ -1,30 +0,0 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newLibdisplayInfo() (pkg.Artifact, string) {
const (
version = "0.3.0"
checksum = "yjOqPUHHYgRtpqGw5RI1n2Q1_hO5j0LiFNMbjcRWV4Nf71XwwoC9fZMlKBDeLchT"
)
return t.NewPackage("libdisplay-info", version, newFromGitLab(
"gitlab.freedesktop.org",
"emersion/libdisplay-info",
version, checksum,
), nil, (*MesonHelper)(nil),
Diffutils,
Hwdata,
), version
}
func init() {
artifactsM[LibdisplayInfo] = Metadata{
f: Toolchain.newLibdisplayInfo,
Name: "libdisplay-info",
Description: "EDID and DisplayID library",
Website: "https://gitlab.freedesktop.org/emersion/libdisplay-info",
ID: 326668,
}
}

View File

@@ -1,33 +0,0 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newLibepoxy() (pkg.Artifact, string) {
const (
version = "1.5.10"
checksum = "OHI8wshrlGw6BMGrmSyejJtwzM2gPhyFJrTsKxULyKMmYrfgcOe7Iw2ibVoUND_Q"
)
return t.NewPackage("libepoxy", version, newFromGitHub(
"anholt/libepoxy",
version,
checksum,
), nil, &MesonHelper{
Setup: []KV{
{"Dglx", "no"},
{"Degl", "no"},
},
},
LibX11,
), version
}
func init() {
artifactsM[Libepoxy] = Metadata{
f: Toolchain.newLibepoxy,
Name: "libepoxy",
Description: "a library for handling OpenGL function pointer management",
Website: "https://github.com/anholt/libepoxy",
ID: 6090,
}
}

View File

@@ -8,8 +8,8 @@ import (
func (t Toolchain) newLibexpat() (pkg.Artifact, string) {
const (
version = "2.8.1"
checksum = "iMEtbOJhQfGof2GxSlxffQSI1va_NDDQ9VIuqcPbNZ0291Dr8wttD5QecYyjIQap"
version = "2.7.5"
checksum = "vTRUjjg-qbHSXUBYKXgzVHkUO7UNyuhrkSYrE7ikApQm0g-OvQ8tspw4w55M-1Tp"
)
return t.NewPackage("libexpat", version, newFromGitHubRelease(
"libexpat/libexpat",

View File

@@ -1,38 +0,0 @@
package rosa
import (
"strings"
"hakurei.app/internal/pkg"
)
func (t Toolchain) newLibpng() (pkg.Artifact, string) {
const (
version = "1.6.58"
checksum = "m_a5lROJH7vmF3cMjqwTUqURuQLhV1JQx2ySPzcN3VPdgDB9pG3UINsIx_mtkr-t"
)
return t.NewPackage("libpng", version, newTar(
"https://downloads.sourceforge.net/project/libpng/libpng"+
strings.Join(strings.SplitN(version, ".", 3)[:2], "")+
"/"+version+"/libpng-"+version+".tar.gz",
checksum,
pkg.TarGzip,
), nil, (*MakeHelper)(nil),
Zlib,
), version
}
func init() {
artifactsM[Libpng] = Metadata{
f: Toolchain.newLibpng,
Name: "libpng",
Description: "the official PNG reference library",
Website: "https://www.libpng.org/pub/png/libpng.html",
Dependencies: P{
Zlib,
},
ID: 1705,
}
}

View File

@@ -1,44 +0,0 @@
package rosa
import (
"strings"
"hakurei.app/internal/pkg"
)
func (t Toolchain) newLibtirpc() (pkg.Artifact, string) {
const (
version = "1.3.7"
checksum = "nzFfu7LNvnSNiNAryD1vtnNWnU-Xqee8KqfXUKoBf5yjb5-dkeRkYuRijdCoYLof"
)
return t.NewPackage("libtirpc", version, t.newTagRemote(
"git://linux-nfs.org/~steved/libtirpc",
"libtirpc-"+
strings.Join(strings.SplitN(version, ".", 3), "-"),
checksum,
), nil, &MakeHelper{
Generate: "sh -e ./bootstrap",
Configure: []KV{
{"CFLAGS", `"$(pkg-config --cflags libbsd-overlay) ${CFLAGS:-}"`},
{"disable-gssapi"},
},
},
Automake,
Libtool,
PkgConfig,
Libbsd,
KernelHeaders,
), version
}
func init() {
artifactsM[Libtirpc] = Metadata{
f: Toolchain.newLibtirpc,
Name: "libtirpc",
Description: "a port of Suns Transport-Independent RPC library to Linux",
Website: "https://sourceforge.net/projects/libtirpc/",
ID: 1740,
}
}

View File

@@ -4,8 +4,8 @@ import "hakurei.app/internal/pkg"
func (t Toolchain) newLibucontext() (pkg.Artifact, string) {
const (
version = "1.5.1"
checksum = "mUgeyJknjMxT-5fORzz-rqhZfP3Y7EZGBhOwvhuX7MsF4Pk9wkuwtrLf5IML-jWu"
version = "1.5"
checksum = "Ggk7FMmDNBdCx1Z9PcNWWW6LSpjGYssn2vU0GK5BLXJYw7ZxZbA2m_eSgT9TFnIG"
)
return t.NewPackage("libucontext", version, newFromGitHub(
"kaniini/libucontext",

View File

@@ -5,11 +5,10 @@ import "hakurei.app/internal/pkg"
func (t Toolchain) newLibxml2() (pkg.Artifact, string) {
const (
version = "2.15.3"
checksum = "oJy74htGlEpf70KPvpW18fYJo0RQQkCXZRwqUz6NoXborS3HCq3Nm4gsyaSeNmUH"
checksum = "oWkNe53c3d4Lt4OzrXPHBcOLHJ3TWqpa0x7B7bh_DyZ-uIMiplpdZjQRgRWVal2h"
)
return t.NewPackage("libxml2", version, newFromGitLab(
"gitlab.gnome.org",
"GNOME/libxml2",
return t.NewPackage("libxml2", version, t.newTagRemote(
"https://gitlab.gnome.org/GNOME/libxml2.git",
"v"+version, checksum,
), &PackageAttr{
// can't create shell.out: Read-only file system

View File

@@ -5,11 +5,10 @@ import "hakurei.app/internal/pkg"
func (t Toolchain) newLibxslt() (pkg.Artifact, string) {
const (
version = "1.1.45"
checksum = "67ks7v8od2oWaEGf23Sst_Xbn_8brQyolQjqxPoO-lK35k_WJhi2Px5JJgbk-nfn"
checksum = "MZc_dyUWpHChkWDKa5iycrECxBsRd4ZMbYfL4VojTbung593mlH2tHGmxYB6NFYT"
)
return t.NewPackage("libxslt", version, newFromGitLab(
"gitlab.gnome.org",
"GNOME/libxslt",
return t.NewPackage("libxslt", version, t.newTagRemote(
"https://gitlab.gnome.org/GNOME/libxslt.git",
"v"+version, checksum,
), nil, &MakeHelper{
Generate: "NOCONFIGURE=1 ./autogen.sh",

View File

@@ -1,60 +1,50 @@
package rosa
import (
"regexp"
"slices"
"strings"
import "hakurei.app/internal/pkg"
"hakurei.app/internal/pkg"
)
func init() {
artifactsM[llvmSource] = Metadata{
f: func(t Toolchain) (pkg.Artifact, string) {
return t.NewPatchedSource("llvm", llvmVersion, newFromGitHub(
"llvm/llvm-project",
"llvmorg-"+llvmVersion,
llvmChecksum,
), true, llvmPatches...), llvmVersion
},
// litArgs returns [LIT] arguments for optional verbosity and check skipping.
func litArgs(verbose bool, skipChecks ...string) string {
args := []string{"-sv"}
if verbose {
args[0] = "--verbose"
Name: "llvm-project",
Description: "LLVM monorepo with Rosa OS patches",
ID: 1830,
}
if len(skipChecks) > 0 {
skipChecks = slices.Clone(skipChecks)
for i, s := range skipChecks {
s = regexp.QuoteMeta(s)
s = strings.ReplaceAll(s, "/", "\\/")
skipChecks[i] = s
}
args = append(args,
"--filter-out='\\''"+strings.Join(skipChecks, "|")+"'\\''")
}
return "'" + strings.Join(args, " ") + "'"
}
func (t Toolchain) newEarlyCompilerRT() (pkg.Artifact, string) {
version := t.Version(llvmSource)
major, _, _ := strings.Cut(version, ".")
return t.NewPackage("early-compiler-rt", version, t.Load(llvmSource), &PackageAttr{
func (t Toolchain) newCompilerRT() (pkg.Artifact, string) {
muslHeaders, _ := t.newMusl(true)
return t.NewPackage("compiler-rt", llvmVersion, t.Load(llvmSource), &PackageAttr{
NonStage0: []pkg.Artifact{
muslHeaders,
},
Env: stage0ExclConcat(t, []string{},
"LDFLAGS="+earlyLDFLAGS(false),
),
Flag: TExclusive,
}, &CMakeHelper{
Append: []string{"compiler-rt"},
Cache: []KV{
{"ENABLE_LINKER_BUILD_ID", "ON"},
{"CMAKE_BUILD_TYPE", "Release"},
{"LLVM_HOST_TRIPLE", `"${ROSA_TRIPLE}"`},
{"LLVM_DEFAULT_TARGET_TRIPLE", `"${ROSA_TRIPLE}"`},
// libc++ not yet available
{"CMAKE_CXX_COMPILER_TARGET", ""},
{"LLVM_HOST_TRIPLE", `"${ROSA_TRIPLE}"`},
{"LLVM_DEFAULT_TARGET_TRIPLE", `"${ROSA_TRIPLE}"`},
{"LLVM_ENABLE_RTTI", "ON"},
{"LLVM_BUILD_LLVM_DYLIB", "ON"},
{"LLVM_LINK_LLVM_DYLIB", "ON"},
{"LLVM_ENABLE_PER_TARGET_RUNTIME_DIR", "ON"},
{"COMPILER_RT_BUILD_BUILTINS", "ON"},
{"COMPILER_RT_DEFAULT_TARGET_ONLY", "ON"},
{"COMPILER_RT_USE_BUILTINS_LIBRARY", "ON"},
{"COMPILER_RT_DEFAULT_TARGET_ONLY", "OFF"},
{"COMPILER_RT_SANITIZERS_TO_BUILD", "asan"},
{"COMPILER_RT_BUILD_GWP_ASAN", "OFF"},
{"LLVM_ENABLE_PER_TARGET_RUNTIME_DIR", "ON"},
// does not work without libunwind
{"COMPILER_RT_BUILD_CTX_PROFILE", "OFF"},
@@ -63,12 +53,11 @@ func (t Toolchain) newEarlyCompilerRT() (pkg.Artifact, string) {
{"COMPILER_RT_BUILD_PROFILE", "OFF"},
{"COMPILER_RT_BUILD_XRAY", "OFF"},
},
SkipTest: true,
Script: `
mkdir -p "/work/system/lib/clang/` + major + `/lib/"
mkdir -p "/work/system/lib/clang/` + llvmVersionMajor + `/lib/"
ln -s \
"../../../${ROSA_TRIPLE}" \
"/work/system/lib/clang/` + major + `/lib/"
"/work/system/lib/clang/` + llvmVersionMajor + `/lib/"
ln -s \
"clang_rt.crtbegin-` + linuxArch() + `.o" \
@@ -80,16 +69,16 @@ ln -s \
},
Python,
muslHeaders,
KernelHeaders,
), version
), llvmVersion
}
func init() {
artifactsM[earlyCompilerRT] = Metadata{
f: Toolchain.newEarlyCompilerRT,
artifactsM[CompilerRT] = Metadata{
f: Toolchain.newCompilerRT,
Name: "early-compiler-rt",
Description: "early LLVM runtime: compiler-rt",
Name: "compiler-rt",
Description: "LLVM runtime: compiler-rt",
Website: "https://llvm.org/",
Dependencies: P{
Musl,
@@ -97,210 +86,110 @@ func init() {
}
}
func (t Toolchain) newEarlyRuntimes() (pkg.Artifact, string) {
version := t.Version(llvmSource)
return t.NewPackage("early-runtimes", version, t.Load(llvmSource), &PackageAttr{
func (t Toolchain) newLLVMRuntimes() (pkg.Artifact, string) {
return t.NewPackage("llvm-runtimes", llvmVersion, t.Load(llvmSource), &PackageAttr{
NonStage0: t.AppendPresets(nil, CompilerRT),
Env: stage0ExclConcat(t, []string{},
"LDFLAGS="+earlyLDFLAGS(false),
),
Flag: TExclusive,
}, &CMakeHelper{
Append: []string{"runtimes"},
Cache: []KV{
{"ENABLE_LINKER_BUILD_ID", "ON"},
{"CMAKE_BUILD_TYPE", "Release"},
{"LLVM_HOST_TRIPLE", `"${ROSA_TRIPLE}"`},
{"LLVM_DEFAULT_TARGET_TRIPLE", `"${ROSA_TRIPLE}"`},
{"LLVM_ENABLE_RUNTIMES", "'libunwind;libcxx;libcxxabi'"},
{"LIBUNWIND_USE_COMPILER_RT", "ON"},
{"LIBCXX_HAS_MUSL_LIBC", "ON"},
{"LIBCXX_USE_COMPILER_RT", "ON"},
{"LIBCXXABI_USE_COMPILER_RT", "ON"},
{"LIBCXXABI_USE_LLVM_UNWINDER", "ON"},
// libc++ not yet available
{"CMAKE_CXX_COMPILER_WORKS", "ON"},
{"LLVM_HOST_TRIPLE", `"${ROSA_TRIPLE}"`},
{"LLVM_DEFAULT_TARGET_TRIPLE", `"${ROSA_TRIPLE}"`},
{"LLVM_ENABLE_RTTI", "ON"},
{"LLVM_BUILD_LLVM_DYLIB", "ON"},
{"LLVM_LINK_LLVM_DYLIB", "ON"},
{"LLVM_ENABLE_RUNTIMES", "'libunwind;libcxx;libcxxabi'"},
{"LIBUNWIND_ENABLE_ASSERTIONS", "OFF"},
{"LIBUNWIND_USE_COMPILER_RT", "ON"},
{"LIBCXX_HAS_MUSL_LIBC", "ON"},
{"LIBCXX_USE_COMPILER_RT", "ON"},
{"LIBCXX_CXX_ABI", "libcxxabi"},
{"LIBCXX_ENABLE_STATIC_ABI_LIBRARY", "OFF"},
{"LIBCXX_HARDENING_MODE", "fast"},
{"LIBCXX_HAS_ATOMIC_LIB", "OFF"},
{"LIBCXXABI_USE_COMPILER_RT", "ON"},
{"LIBCXXABI_USE_LLVM_UNWINDER", "ON"},
{"LIBCXXABI_ENABLE_STATIC_UNWINDER", "OFF"},
{"LIBCXXABI_HAS_CXA_THREAD_ATEXIT_IMPL", "OFF"},
{"LLVM_ENABLE_ZLIB", "FORCE_ON"},
{"LLVM_ENABLE_ZSTD", "FORCE_ON"},
{"LLVM_ENABLE_ZLIB", "OFF"},
{"LLVM_ENABLE_ZSTD", "OFF"},
{"LLVM_ENABLE_LIBXML2", "OFF"},
},
SkipTest: true,
},
Python,
Zlib,
Zstd,
earlyCompilerRT,
KernelHeaders,
), version
), llvmVersion
}
func init() {
artifactsM[earlyRuntimes] = Metadata{
f: Toolchain.newEarlyRuntimes,
artifactsM[LLVMRuntimes] = Metadata{
f: Toolchain.newLLVMRuntimes,
Name: "early-runtimes",
Description: "early LLVM runtimes: libunwind, libcxx, libcxxabi",
Name: "llvm-runtimes",
Description: "LLVM runtimes: libunwind, libcxx, libcxxabi",
Website: "https://llvm.org/",
Dependencies: P{
earlyCompilerRT,
CompilerRT,
},
}
}
func (t Toolchain) newLLVM() (pkg.Artifact, string) {
var early PArtifact = muslHeaders
func (t Toolchain) newClang() (pkg.Artifact, string) {
target := "'AArch64;RISCV;X86'"
if t.isStage0() {
// The LLVM build system uses the system installation when building with
// LLVM_LINK_LLVM_DYLIB, since it builds runtimes after the fact, using
// the just-built toolchain. This is unacceptable in stage0 due to the
// potential version difference. Later stages bootstrap off of runtimes
// of its previous stage via 3-stage determinism.
early = earlyRuntimes
target = "Native"
}
cache := []KV{
{"ENABLE_LINKER_BUILD_ID", "ON"},
{"COMPILER_RT_USE_BUILTINS_LIBRARY", "ON"},
{"COMPILER_RT_DEFAULT_TARGET_ONLY", "ON"},
{"COMPILER_RT_BUILD_GWP_ASAN", "OFF"},
{"LIBCXX_CXX_ABI", "libcxxabi"},
{"LIBCXX_USE_COMPILER_RT", "ON"},
{"LIBCXX_ENABLE_STATIC_ABI_LIBRARY", "OFF"},
{"LIBCXX_HAS_MUSL_LIBC", "ON"},
{"LIBCXX_HARDENING_MODE", "fast"},
{"LIBCXXABI_USE_LLVM_UNWINDER", "ON"},
{"LIBCXXABI_ENABLE_STATIC_UNWINDER", "OFF"},
{"LIBCXXABI_USE_COMPILER_RT", "ON"},
{"LLVM_INSTALL_BINUTILS_SYMLINKS", "ON"},
{"LLVM_INSTALL_UTILS", "ON"},
{"LLVM_BUILD_LLVM_DYLIB", "ON"},
{"LLVM_LINK_LLVM_DYLIB", "ON"},
{"LLVM_APPEND_VC_REV", "OFF"},
{"LLVM_ENABLE_RTTI", "ON"},
{"LLVM_ENABLE_ZLIB", "FORCE_ON"},
{"LLVM_ENABLE_ZSTD", "FORCE_ON"},
{"LLVM_ENABLE_PER_TARGET_RUNTIME_DIR", "ON"},
{"LLVM_INCLUDE_BENCHMARKS", "OFF"},
{"CLANG_DEFAULT_RTLIB", "compiler-rt"},
{"CLANG_DEFAULT_UNWINDLIB", "libunwind"},
{"CLANG_DEFAULT_CXX_STDLIB", "libc++"},
{"CLANG_CONFIG_FILE_SYSTEM_DIR", "/system/etc/clang"},
{"LLVM_ENABLE_FFI", "OFF"},
{"LLVM_ENABLE_LIBXML2", "OFF"},
{"LLVM_ENABLE_LIBCXX", "ON"},
{"LLVM_ENABLE_LLD", "ON"},
{"LIBUNWIND_ENABLE_ASSERTIONS", "OFF"},
{"LIBUNWIND_USE_COMPILER_RT", "ON"},
{"LLVM_HOST_TRIPLE", `"${ROSA_TRIPLE}"`},
{"LLVM_DEFAULT_TARGET_TRIPLE", `"${ROSA_TRIPLE}"`},
{"LLVM_ENABLE_PROJECTS", "'" + strings.Join([]string{
"clang",
"lld",
}, ";") + "'"},
{"LLVM_ENABLE_RUNTIMES", "'" + strings.Join([]string{
"compiler-rt",
"libcxx",
"libcxxabi",
"libunwind",
"libclc",
}, ";") + "'"},
}
if !t.isStage0() {
skipChecks := []string{
// expensive, pointless to run here
"benchmarks",
// LLVM ERROR: Tried to execute an unknown external function: roundevenf
"ExecutionEngine/Interpreter/intrinsics.ll",
// clang: deadlocks with LLVM_BUILD_LLVM_DYLIB
"crash-recovery-modules",
// clang: fatal error: '__config_site' file not found
"CodeGen/PowerPC/ppc-xmmintrin.c",
"CodeGen/PowerPC/ppc-mmintrin.c",
"CodeGen/PowerPC/ppc-emmintrin.c",
"CodeGen/PowerPC/ppc-pmmintrin.c",
"CodeGen/PowerPC/ppc-tmmintrin.c",
"CodeGen/PowerPC/ppc-smmintrin.c",
"CodeGenCUDA/amdgpu-alias-undef-symbols.cu",
// cxx: fails on musl
"close.dont-get-rid-of-buffer",
"re/re.traits",
"std/time",
"localization/locales",
"localization/locale.categories",
"selftest/dsl/dsl.sh.py",
"input.output/iostream.format",
"locale-specific_form",
// cxx: deadlocks
"std/thread/thread.jthread",
// unwind: fails on musl
"eh_frame_fde_pc_range",
}
switch arch {
case "arm64":
skipChecks = append(skipChecks,
// LLVM: intermittently crashes
"ExecutionEngine/OrcLazy/multiple-compile-threads-basic.ll",
// unwind: unexpectedly passes
"unwind_leaffunction",
)
}
if presetOpts&OptLLVMNoLTO == 0 {
cache = append(cache, []KV{
// very expensive
{"LLVM_ENABLE_LTO", "Thin"},
}...)
}
cache = append(cache, []KV{
// symbols: clock_gettime, mallopt
{"COMPILER_RT_INCLUDE_TESTS", "OFF"},
{"LLVM_BUILD_TESTS", "ON"},
{"LLVM_LIT_ARGS", litArgs(true, skipChecks...)},
}...)
}
version := t.Version(llvmSource)
return t.NewPackage("llvm", version, t.Load(llvmSource), &PackageAttr{
return t.NewPackage("clang", llvmVersion, t.Load(llvmSource), &PackageAttr{
NonStage0: t.AppendPresets(nil, LLVMRuntimes),
Env: stage0ExclConcat(t, []string{},
"CFLAGS="+earlyCFLAGS,
"CXXFLAGS="+earlyCXXFLAGS(),
"LDFLAGS="+earlyLDFLAGS(false),
),
Flag: TExclusive,
}, &CMakeHelper{
Append: []string{"llvm"},
Cache: cache,
Cache: []KV{
{"CMAKE_BUILD_TYPE", "Release"},
{"LLVM_HOST_TRIPLE", `"${ROSA_TRIPLE}"`},
{"LLVM_DEFAULT_TARGET_TRIPLE", `"${ROSA_TRIPLE}"`},
{"LLVM_ENABLE_PROJECTS", "'clang;lld'"},
{"LLVM_ENABLE_LIBCXX", "ON"},
{"LLVM_USE_LINKER", "lld"},
{"LLVM_INSTALL_BINUTILS_SYMLINKS", "ON"},
{"LLVM_INSTALL_CCTOOLS_SYMLINKS", "ON"},
{"LLVM_LIT_ARGS", "'--verbose'"},
{"CLANG_DEFAULT_LINKER", "lld"},
{"CLANG_DEFAULT_CXX_STDLIB", "libc++"},
{"CLANG_DEFAULT_RTLIB", "compiler-rt"},
{"CLANG_DEFAULT_UNWINDLIB", "libunwind"},
{"LLVM_TARGETS_TO_BUILD", target},
{"CMAKE_CROSSCOMPILING", "OFF"},
{"CXX_SUPPORTS_CUSTOM_LINKER", "ON"},
{"LLVM_ENABLE_ZLIB", "OFF"},
{"LLVM_ENABLE_ZSTD", "OFF"},
{"LLVM_ENABLE_LIBXML2", "OFF"},
},
Script: `
ln -s ld.lld /work/system/bin/ld
ln -s clang /work/system/bin/cc
ln -s clang /work/system/bin/cpp
ln -s clang++ /work/system/bin/c++
`,
// LLVM_LINK_LLVM_DYLIB causes llvm test suite to leak system
// installation into test environment, and the tests end up testing the
// system installation instead. Tests are disabled on stage0 and relies
// on 3-stage determinism to test later stages.
SkipTest: t.isStage0(),
Test: `
chmod +w /bin && ln -s \
../system/bin/chmod \
../system/bin/mkdir \
../system/bin/rm \
../system/bin/tr \
../system/bin/awk \
/bin
ninja ` + jobsFlagE + ` check-all
`,
},
@@ -312,44 +201,44 @@ ninja ` + jobsFlagE + ` check-all
Coreutils,
Findutils,
Zlib,
Zstd,
early,
KernelHeaders,
), version
), llvmVersion
}
func init() {
const (
version = "22.1.5"
checksum = "32gOaLPHcUlo3hkdk5RbFumTE01XKeCAYZcpvn8IDHF95egXVfDFSl6eZL3ChMen"
)
artifactsM[Clang] = Metadata{
f: Toolchain.newClang,
artifactsM[llvmSource] = Metadata{
f: func(t Toolchain) (pkg.Artifact, string) {
return t.NewPatchedSource("llvm", version, newFromGitHub(
"llvm/llvm-project",
"llvmorg-"+version,
checksum,
), true, llvmPatches...), version
},
Name: "llvm-project",
Description: "LLVM monorepo with Rosa OS patches",
ID: 1830,
}
artifactsM[LLVM] = Metadata{
f: Toolchain.newLLVM,
Name: "llvm",
Description: "a collection of modular and reusable compiler and toolchain technologies",
Website: "https://llvm.org",
Name: "clang",
Description: `an "LLVM native" C/C++/Objective-C compiler`,
Website: "https://llvm.org/",
Dependencies: P{
Zlib,
Zstd,
Musl,
LLVMRuntimes,
},
}
}
func (t Toolchain) newLibclc() (pkg.Artifact, string) {
return t.NewPackage("libclc", llvmVersion, t.Load(llvmSource), nil, &CMakeHelper{
Append: []string{"libclc"},
Cache: []KV{
{"CMAKE_BUILD_TYPE", "Release"},
{"LLVM_HOST_TRIPLE", `"${ROSA_TRIPLE}"`},
{"LLVM_DEFAULT_TARGET_TRIPLE", `"${ROSA_TRIPLE}"`},
{"LIBCLC_TARGETS_TO_BUILD", "all"},
},
Script: "ninja " + jobsFlagE + " test",
}), llvmVersion
}
func init() {
artifactsM[Libclc] = Metadata{
f: Toolchain.newLibclc,
Name: "libclc",
Description: "an open source, BSD/MIT dual licensed implementation of the library requirements of the OpenCL C programming language",
Website: "https://libclc.llvm.org/",
}
}

View File

@@ -0,0 +1,9 @@
package rosa
// latest version of LLVM, conditional to temporarily avoid broken new releases
const (
llvmVersionMajor = "22"
llvmVersion = llvmVersionMajor + ".1.3"
llvmChecksum = "CUwnpzua_y28HZ9oI0NmcKL2wClsSjFpgY9do5-7cCZJHI5KNF64vfwGvY0TYyR3"
)

View File

@@ -90,26 +90,11 @@ index 8ac8d4eb9181..e46b04a898ca 100644
addSystemInclude(DriverArgs, CC1Args, ResourceDirInclude);
`},
{"path-system-libraries", `diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp
index cb6a9b242421..b8d31690d1af 100644
--- a/clang/lib/Driver/ToolChains/Gnu.cpp
+++ b/clang/lib/Driver/ToolChains/Gnu.cpp
@@ -2314,6 +2314,10 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
Prefixes.push_back("/opt/rh/devtoolset-2/root/usr");
}
+ if (TargetTriple.getVendor() == llvm::Triple::Rosa) {
+ Prefixes.push_back(concat(SysRoot, "/system"));
+ }
+
// Fall back to /usr which is used by most non-Solaris systems.
Prefixes.push_back(concat(SysRoot, "/usr"));
}
diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
index d525b417b4ea..2b93f401733e 100644
{"path-system-libraries", `diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
index 8ac8d4eb9181..f4d1347ab64d 100644
--- a/clang/lib/Driver/ToolChains/Linux.cpp
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
@@ -302,6 +302,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
@@ -282,6 +282,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
const bool IsHexagon = Arch == llvm::Triple::hexagon;
const bool IsRISCV = Triple.isRISCV();
const bool IsCSKY = Triple.isCSKY();
@@ -117,7 +102,18 @@ index d525b417b4ea..2b93f401733e 100644
if (IsCSKY && !SelectedMultilibs.empty())
SysRoot = SysRoot + SelectedMultilibs.back().osSuffix();
@@ -341,8 +342,12 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
@@ -318,12 +319,23 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
const std::string OSLibDir = std::string(getOSLibDir(Triple, Args));
const std::string MultiarchTriple = getMultiarchTriple(D, Triple, SysRoot);
+ if (IsRosa) {
+ ExtraOpts.push_back("-rpath");
+ ExtraOpts.push_back("/system/lib");
+ ExtraOpts.push_back("-rpath");
+ ExtraOpts.push_back(concat("/system/lib", MultiarchTriple));
+ }
+
// mips32: Debian multilib, we use /libo32, while in other case, /lib is
// used. We need add both libo32 and /lib.
if (Arch == llvm::Triple::mips || Arch == llvm::Triple::mipsel) {
Generic_GCC::AddMultilibPaths(D, SysRoot, "libo32", MultiarchTriple, Paths);
@@ -132,7 +128,7 @@ index d525b417b4ea..2b93f401733e 100644
}
Generic_GCC::AddMultilibPaths(D, SysRoot, OSLibDir, MultiarchTriple, Paths);
@@ -360,18 +365,30 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
@@ -341,18 +353,30 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
Paths);
}
@@ -168,7 +164,7 @@ index d525b417b4ea..2b93f401733e 100644
}
ToolChain::RuntimeLibType Linux::GetDefaultRuntimeLibType() const {
@@ -572,6 +589,9 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
@@ -457,6 +481,9 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
return Triple.isArch64Bit() ? "/system/bin/linker64" : "/system/bin/linker";
}
if (Triple.isMusl()) {
@@ -178,5 +174,18 @@ index d525b417b4ea..2b93f401733e 100644
std::string ArchName;
bool IsArm = false;
diff --git a/clang/tools/clang-installapi/Options.cpp b/clang/tools/clang-installapi/Options.cpp
index 64324a3f8b01..15ce70b68217 100644
--- a/clang/tools/clang-installapi/Options.cpp
+++ b/clang/tools/clang-installapi/Options.cpp
@@ -515,7 +515,7 @@ bool Options::processFrontendOptions(InputArgList &Args) {
FEOpts.FwkPaths = std::move(FrameworkPaths);
// Add default framework/library paths.
- PathSeq DefaultLibraryPaths = {"/usr/lib", "/usr/local/lib"};
+ PathSeq DefaultLibraryPaths = {"/usr/lib", "/system/lib", "/usr/local/lib"};
PathSeq DefaultFrameworkPaths = {"/Library/Frameworks",
"/System/Library/Frameworks"};
`},
}

View File

@@ -1,59 +0,0 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newLMSensors() (pkg.Artifact, string) {
const (
version = "3-6-2"
checksum = "7JYNutrihe-FP6r3ftf96uFZJJWPfxnBHL0ALSMA-vovaXVRr-sAjlLitw7WWpCI"
)
return t.NewPackage("lm_sensors", version, newFromGitHub(
"lm-sensors/lm-sensors",
"V"+version,
checksum,
), &PackageAttr{
Writable: true,
Chmod: true,
EnterSource: true,
ScriptEarly: `
ln -s \
../../system/bin/perl \
/usr/bin/
`,
}, &MakeHelper{
InPlace: true,
SkipConfigure: true,
Make: []string{
"CC=cc",
"ETCDIR=/system/etc",
"PREFIX=/system",
},
Check: []string{
"CC=cc",
"check",
},
Install: "make DESTDIR=/work PREFIX=/system install",
},
Perl,
PerlTestCmd,
M4,
Bison,
Flex,
), version
}
func init() {
artifactsM[LMSensors] = Metadata{
f: Toolchain.newLMSensors,
Name: "lm_sensors",
Description: "user-space support for hardware monitoring drivers",
Website: "https://hwmon.wiki.kernel.org/lm_sensors",
ID: 1831,
}
}

Some files were not shown because too many files have changed in this diff Show More