Compare commits
189 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
d61faa09eb
|
|||
|
50153788ef
|
|||
|
c84fe63217
|
|||
|
eb67e5e0a8
|
|||
|
948afe33e5
|
|||
|
76c657177d
|
|||
|
4356f978aa
|
|||
|
4f17dad645
|
|||
|
68b7d41c65
|
|||
|
e48f303e38
|
|||
|
f1fd406b82
|
|||
|
53b1de3395
|
|||
|
92dcadbf27
|
|||
|
0bd6a18326
|
|||
|
67d592c337
|
|||
|
fdc8a8419b
|
|||
|
122cfbf63a
|
|||
|
504f5d28fe
|
|||
|
3eadd5c580
|
|||
|
4d29333807
|
|||
|
e1533fa4c6
|
|||
|
9a74d5273d
|
|||
|
2abc8c454e
|
|||
|
fecb963e85
|
|||
|
cd9da57f20
|
|||
|
c6a95f5a6a
|
|||
|
228489371d
|
|||
|
490471d22b
|
|||
|
763d2572fe
|
|||
|
bb1b6beb87
|
|||
|
3224a7da63
|
|||
|
8a86cf74ee
|
|||
|
e34a59e332
|
|||
|
861801597d
|
|||
|
334578fdde
|
|||
|
20790af71e
|
|||
|
43b8a40fc0
|
|||
|
87c3059214
|
|||
|
6956dfc31a
|
|||
|
d9ebaf20f8
|
|||
|
acee0b3632
|
|||
|
5e55a796df
|
|||
|
f6eaf76ec9
|
|||
|
5c127a7035
|
|||
|
8a26521f5b
|
|||
|
0fd4556e38
|
|||
|
50b82dcf82
|
|||
|
20a8d30821
|
|||
|
cdf2e4a2fb
|
|||
|
dcb8a6ea06
|
|||
|
094a62ba9d
|
|||
|
6420b6e6e8
|
|||
|
d7d058fdc5
|
|||
|
84795b5d9f
|
|||
|
f84d30deed
|
|||
|
77821feb8b
|
|||
|
eb1060f395
|
|||
|
0e08254595
|
|||
|
349d8693bf
|
|||
|
e88ae87e50
|
|||
|
7cd4aa838c
|
|||
|
641942a4e3
|
|||
|
b6a66acfe4
|
|||
|
b72dc43bc3
|
|||
|
8e59ff98b5
|
|||
|
f06d7fd387
|
|||
|
ba75587132
|
|||
|
9a06ce2db0
|
|||
|
3ec15bcdf1
|
|||
|
d933234784
|
|||
|
1c49c75f95
|
|||
|
6a01a55d7e
|
|||
|
b14964a66d
|
|||
|
ff98c9ded9
|
|||
|
7f3d1d6375
|
|||
|
3a4f20b759
|
|||
|
21858ecfe4
|
|||
|
574a64aa85
|
|||
|
85d27229fd
|
|||
|
83fb80d710
|
|||
|
fe6dc62ebf
|
|||
|
823f9c76a7
|
|||
|
2df913999b
|
|||
|
52c959bd6a
|
|||
|
d258dea0bf
|
|||
|
dc96302111
|
|||
|
88e9a143d6
|
|||
|
8d06c0235b
|
|||
|
4155adc16a
|
|||
|
2a9525c77a
|
|||
|
efc90c3221
|
|||
|
610ee13ab3
|
|||
|
5936e6a4aa
|
|||
|
3499a82785
|
|||
|
088d35e4e6
|
|||
|
1667df9c43
|
|||
|
156dd767ef
|
|||
|
5fe166a4a7
|
|||
|
41a8d03dd2
|
|||
|
610572d0e6
|
|||
|
29951c5174
|
|||
|
91c3594dee
|
|||
|
7ccc2fc5ec
|
|||
|
63e137856e
|
|||
|
e1e46504a1
|
|||
|
ec9343ebd6
|
|||
|
423808ac76
|
|||
|
2494ede106
|
|||
|
da3848b92f
|
|||
|
34cb4ebd3b
|
|||
|
f712466714
|
|||
|
f2430b5f5e
|
|||
|
863e6f5db6
|
|||
|
23df2ab999
|
|||
|
7bd4d7d0e6
|
|||
|
b3c30bcc51
|
|||
|
38059db835
|
|||
|
409fd3149e
|
|||
|
4eea136308
|
|||
|
c86ff02d8d
|
|||
|
e8dda70c41
|
|||
|
7ea4e8b643
|
|||
|
5eefebcb48
|
|||
|
8e08e8f518
|
|||
|
54da6ce03d
|
|||
|
3a21ba1bca
|
|||
|
45301559bf
|
|||
|
0df87ab111
|
|||
|
aa0a949cef
|
|||
|
ce0064384d
|
|||
|
53d80f4b66
|
|||
|
156096ac98
|
|||
|
ceb75538cf
|
|||
|
0741a614ed
|
|||
|
e7e9b4caea
|
|||
|
f6d32e482a
|
|||
|
79adf217f4
|
|||
|
8efffd72f4
|
|||
|
86ad8b72aa
|
|||
|
e91049c3c5
|
|||
|
3d4d32932d
|
|||
|
0ab6c13c77
|
|||
|
834cb0d40b
|
|||
|
7548a627e5
|
|||
|
b98d27f773
|
|||
|
f3aa31e401
|
|||
|
4da26681b5
|
|||
|
4897b0259e
|
|||
|
d6e4f85864
|
|||
|
3eb927823f
|
|||
|
d76b9d04b8
|
|||
|
fa93476896
|
|||
|
bd0ef086b1
|
|||
|
05202cf994
|
|||
|
40081e7a06
|
|||
|
863d3dcf9f
|
|||
|
8ad9909065
|
|||
|
deda16da38
|
|||
|
55465c6e72
|
|||
|
ce249d23f1
|
|||
|
dd5d792d14
|
|||
|
d15d2ec2bd
|
|||
|
3078c41ce7
|
|||
|
e9de5d3aca
|
|||
|
993afde840
|
|||
|
c9cd16fd2a
|
|||
|
e42ea32dbe
|
|||
|
e7982b4ee9
|
|||
|
ef1ebf12d9
|
|||
|
775a9f57c9
|
|||
|
2f8ca83376
|
|||
|
3d720ada92
|
|||
|
2e5362e536
|
|||
|
6d3bd27220
|
|||
|
a27305cb4a
|
|||
|
0e476c5e5b
|
|||
|
54712e0426
|
|||
|
b77c1ecfdb
|
|||
|
dce5839a79
|
|||
|
d597592e1f
|
|||
|
056f5b12d4
|
|||
|
da2bb546ba
|
|||
|
7bfbd59810
|
|||
|
ea815a59e8
|
|||
|
28a8dc67d2
|
|||
|
ec49c63c5f
|
|||
|
5a50bf80ee
|
|||
|
ce06b7b663
|
|||
|
08bdc68f3a
|
@@ -72,6 +72,23 @@ jobs:
|
||||
path: result/*
|
||||
retention-days: 1
|
||||
|
||||
sharefs:
|
||||
name: ShareFS
|
||||
runs-on: nix
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run NixOS test
|
||||
run: nix build --out-link "result" --print-out-paths --print-build-logs .#checks.x86_64-linux.sharefs
|
||||
|
||||
- name: Upload test output
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "sharefs-vm-output"
|
||||
path: result/*
|
||||
retention-days: 1
|
||||
|
||||
hpkg:
|
||||
name: Hpkg
|
||||
runs-on: nix
|
||||
@@ -96,6 +113,7 @@ jobs:
|
||||
- race
|
||||
- sandbox
|
||||
- sandbox-race
|
||||
- sharefs
|
||||
- hpkg
|
||||
runs-on: nix
|
||||
steps:
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -27,6 +27,7 @@ go.work.sum
|
||||
|
||||
# go generate
|
||||
/cmd/hakurei/LICENSE
|
||||
/internal/pkg/testdata/testtool
|
||||
|
||||
# release
|
||||
/dist/hakurei-*
|
||||
|
||||
243
cmd/mbf/main.go
Normal file
243
cmd/mbf/main.go
Normal file
@@ -0,0 +1,243 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unique"
|
||||
|
||||
"hakurei.app/command"
|
||||
"hakurei.app/container"
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/internal/pkg"
|
||||
"hakurei.app/internal/rosa"
|
||||
"hakurei.app/message"
|
||||
)
|
||||
|
||||
func main() {
|
||||
container.TryArgv0(nil)
|
||||
|
||||
log.SetFlags(0)
|
||||
log.SetPrefix("mbf: ")
|
||||
msg := message.New(log.Default())
|
||||
|
||||
if os.Geteuid() == 0 {
|
||||
log.Fatal("this program must not run as root")
|
||||
}
|
||||
|
||||
var cache *pkg.Cache
|
||||
ctx, stop := signal.NotifyContext(context.Background(),
|
||||
syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
|
||||
defer stop()
|
||||
defer func() {
|
||||
if cache != nil {
|
||||
cache.Close()
|
||||
}
|
||||
|
||||
if r := recover(); r != nil {
|
||||
fmt.Println(r)
|
||||
log.Fatal("consider scrubbing the on-disk cache")
|
||||
}
|
||||
}()
|
||||
|
||||
var (
|
||||
flagQuiet bool
|
||||
flagCures int
|
||||
flagBase string
|
||||
flagTShift int
|
||||
)
|
||||
c := command.New(os.Stderr, log.Printf, "mbf", func([]string) (err error) {
|
||||
msg.SwapVerbose(!flagQuiet)
|
||||
|
||||
var base *check.Absolute
|
||||
if flagBase, err = filepath.Abs(flagBase); err != nil {
|
||||
return
|
||||
} else if base, err = check.NewAbs(flagBase); err != nil {
|
||||
return
|
||||
}
|
||||
if cache, err = pkg.Open(ctx, msg, flagCures, base); err == nil {
|
||||
if flagTShift < 0 {
|
||||
cache.SetThreshold(0)
|
||||
} else if flagTShift > 31 {
|
||||
cache.SetThreshold(1 << 31)
|
||||
} else {
|
||||
cache.SetThreshold(1 << flagTShift)
|
||||
}
|
||||
}
|
||||
return
|
||||
}).Flag(
|
||||
&flagQuiet,
|
||||
"q", command.BoolFlag(false),
|
||||
"Do not print cure messages",
|
||||
).Flag(
|
||||
&flagCures,
|
||||
"cures", command.IntFlag(0),
|
||||
"Maximum number of dependencies to cure at any given time",
|
||||
).Flag(
|
||||
&flagBase,
|
||||
"d", command.StringFlag("cache"),
|
||||
"Directory to store cured artifacts",
|
||||
).Flag(
|
||||
&flagTShift,
|
||||
"tshift", command.IntFlag(-1),
|
||||
"Dependency graph size exponent, to the power of 2",
|
||||
)
|
||||
|
||||
{
|
||||
var flagShifts int
|
||||
c.NewCommand(
|
||||
"scrub", "Examine the on-disk cache for errors",
|
||||
func(args []string) error {
|
||||
if len(args) > 0 {
|
||||
return errors.New("scrub expects no arguments")
|
||||
}
|
||||
if flagShifts < 0 || flagShifts > 31 {
|
||||
flagShifts = 12
|
||||
}
|
||||
return cache.Scrub(runtime.NumCPU() << flagShifts)
|
||||
},
|
||||
).Flag(
|
||||
&flagShifts,
|
||||
"shift", command.IntFlag(12),
|
||||
"Scrub parallelism size exponent, to the power of 2",
|
||||
)
|
||||
}
|
||||
|
||||
c.NewCommand(
|
||||
"stage3",
|
||||
"Check for toolchain 3-stage non-determinism",
|
||||
func(args []string) (err error) {
|
||||
_, _, _, stage2 := (rosa.Std - 1).NewLLVM()
|
||||
_, _, _, stage3 := rosa.Std.NewLLVM()
|
||||
var (
|
||||
pathname *check.Absolute
|
||||
checksum [2]unique.Handle[pkg.Checksum]
|
||||
)
|
||||
|
||||
if pathname, checksum[0], err = cache.Cure(stage2); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Println("stage2:", pathname)
|
||||
if pathname, checksum[1], err = cache.Cure(stage3); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Println("stage3:", pathname)
|
||||
|
||||
if checksum[0] != checksum[1] {
|
||||
err = &pkg.ChecksumMismatchError{
|
||||
Got: checksum[0].Value(),
|
||||
Want: checksum[1].Value(),
|
||||
}
|
||||
}
|
||||
return
|
||||
},
|
||||
)
|
||||
|
||||
c.NewCommand(
|
||||
"cure",
|
||||
"Cure the named artifact and show its path",
|
||||
func(args []string) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("cure requires 1 argument")
|
||||
}
|
||||
var p rosa.PArtifact
|
||||
switch args[0] {
|
||||
case "acl":
|
||||
p = rosa.ACL
|
||||
case "attr":
|
||||
p = rosa.Attr
|
||||
case "autoconf":
|
||||
p = rosa.Autoconf
|
||||
case "bash":
|
||||
p = rosa.Bash
|
||||
case "busybox":
|
||||
p = rosa.Busybox
|
||||
case "cmake":
|
||||
p = rosa.CMake
|
||||
case "coreutils":
|
||||
p = rosa.Coreutils
|
||||
case "diffutils":
|
||||
p = rosa.Diffutils
|
||||
case "gettext":
|
||||
p = rosa.Gettext
|
||||
case "git":
|
||||
p = rosa.Git
|
||||
case "go":
|
||||
p = rosa.Go
|
||||
case "gperf":
|
||||
p = rosa.Gperf
|
||||
case "hakurei":
|
||||
p = rosa.Hakurei
|
||||
case "kernel-headers":
|
||||
p = rosa.KernelHeaders
|
||||
case "libXau":
|
||||
p = rosa.LibXau
|
||||
case "libexpat":
|
||||
p = rosa.Libexpat
|
||||
case "libseccomp":
|
||||
p = rosa.Libseccomp
|
||||
case "libxml2":
|
||||
p = rosa.Libxml2
|
||||
case "libffi":
|
||||
p = rosa.Libffi
|
||||
case "libgd":
|
||||
p = rosa.Libgd
|
||||
case "m4":
|
||||
p = rosa.M4
|
||||
case "make":
|
||||
p = rosa.Make
|
||||
case "meson":
|
||||
p = rosa.Meson
|
||||
case "ninja":
|
||||
p = rosa.Ninja
|
||||
case "patch":
|
||||
p = rosa.Patch
|
||||
case "perl":
|
||||
p = rosa.Perl
|
||||
case "pkg-config":
|
||||
p = rosa.PkgConfig
|
||||
case "python":
|
||||
p = rosa.Python
|
||||
case "rsync":
|
||||
p = rosa.Rsync
|
||||
case "setuptools":
|
||||
p = rosa.Setuptools
|
||||
case "wayland":
|
||||
p = rosa.Wayland
|
||||
case "wayland-protocols":
|
||||
p = rosa.WaylandProtocols
|
||||
case "xcb":
|
||||
p = rosa.XCB
|
||||
case "xcb-proto":
|
||||
p = rosa.XCBProto
|
||||
case "xproto":
|
||||
p = rosa.Xproto
|
||||
case "zlib":
|
||||
p = rosa.Zlib
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported artifact %q", args[0])
|
||||
}
|
||||
|
||||
pathname, _, err := cache.Cure(rosa.Std.Load(p))
|
||||
if err == nil {
|
||||
log.Println(pathname)
|
||||
}
|
||||
return err
|
||||
|
||||
},
|
||||
)
|
||||
|
||||
c.MustParse(os.Args[1:], func(err error) {
|
||||
if cache != nil {
|
||||
cache.Close()
|
||||
}
|
||||
log.Fatal(err)
|
||||
})
|
||||
}
|
||||
282
cmd/sharefs/fuse-operations.c
Normal file
282
cmd/sharefs/fuse-operations.c
Normal file
@@ -0,0 +1,282 @@
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE /* O_DIRECT */
|
||||
#endif
|
||||
|
||||
#include <dirent.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
|
||||
/* TODO(ophestra): remove after 05ce67fea99ca09cd4b6625cff7aec9cc222dd5a reaches a release */
|
||||
#include <sys/syscall.h>
|
||||
|
||||
#include "fuse-operations.h"
|
||||
|
||||
/* MUST_TRANSLATE_PATHNAME translates a userspace pathname to a relative pathname;
|
||||
* the resulting address points to a constant string or part of pathname, it is never heap allocated. */
|
||||
#define MUST_TRANSLATE_PATHNAME(pathname) \
|
||||
do { \
|
||||
if (pathname == NULL) \
|
||||
return -EINVAL; \
|
||||
while (*pathname == '/') \
|
||||
pathname++; \
|
||||
if (*pathname == '\0') \
|
||||
pathname = "."; \
|
||||
} while (0)
|
||||
|
||||
/* GET_CONTEXT_PRIV obtains fuse context and private data for the calling thread. */
|
||||
#define GET_CONTEXT_PRIV(ctx, priv) \
|
||||
do { \
|
||||
ctx = fuse_get_context(); \
|
||||
priv = ctx->private_data; \
|
||||
} while (0)
|
||||
|
||||
/* impl_getattr modifies a struct stat from the kernel to present to userspace;
|
||||
* impl_getattr returns a negative errno style error code. */
|
||||
static int impl_getattr(struct fuse_context *ctx, struct stat *statbuf) {
|
||||
/* allowlist of permitted types */
|
||||
if (!S_ISDIR(statbuf->st_mode) && !S_ISREG(statbuf->st_mode) && !S_ISLNK(statbuf->st_mode)) {
|
||||
return -ENOTRECOVERABLE; /* returning an errno causes all operations on the file to return EIO */
|
||||
}
|
||||
|
||||
#define OVERRIDE_PERM(v) (statbuf->st_mode & ~0777) | (v & 0777)
|
||||
if (S_ISDIR(statbuf->st_mode))
|
||||
statbuf->st_mode = OVERRIDE_PERM(SHAREFS_PERM_DIR);
|
||||
else if (S_ISREG(statbuf->st_mode))
|
||||
statbuf->st_mode = OVERRIDE_PERM(SHAREFS_PERM_REG);
|
||||
else
|
||||
statbuf->st_mode = 0; /* should always be symlink in this case */
|
||||
|
||||
statbuf->st_uid = ctx->uid;
|
||||
statbuf->st_gid = SHAREFS_MEDIA_RW_ID;
|
||||
statbuf->st_ctim = statbuf->st_mtim;
|
||||
statbuf->st_nlink = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* fuse_operations implementation */
|
||||
|
||||
int sharefs_getattr(const char *pathname, struct stat *statbuf, struct fuse_file_info *fi) {
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
(void)fi;
|
||||
|
||||
if (fstatat(priv->dirfd, pathname, statbuf, AT_SYMLINK_NOFOLLOW) == -1)
|
||||
return -errno;
|
||||
return impl_getattr(ctx, statbuf);
|
||||
}
|
||||
|
||||
int sharefs_readdir(const char *pathname, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *fi, enum fuse_readdir_flags flags) {
|
||||
int fd;
|
||||
DIR *dp;
|
||||
struct stat st;
|
||||
int ret = 0;
|
||||
struct dirent *de;
|
||||
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
(void)offset;
|
||||
(void)fi;
|
||||
|
||||
if ((fd = openat(priv->dirfd, pathname, O_RDONLY | O_DIRECTORY | O_CLOEXEC)) == -1)
|
||||
return -errno;
|
||||
if ((dp = fdopendir(fd)) == NULL) {
|
||||
close(fd);
|
||||
return -errno;
|
||||
}
|
||||
|
||||
errno = 0; /* for the next readdir call */
|
||||
while ((de = readdir(dp)) != NULL) {
|
||||
if (flags & FUSE_READDIR_PLUS) {
|
||||
if (fstatat(dirfd(dp), de->d_name, &st, AT_SYMLINK_NOFOLLOW) == -1) {
|
||||
ret = -errno;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((ret = impl_getattr(ctx, &st)) < 0)
|
||||
break;
|
||||
|
||||
errno = 0;
|
||||
ret = filler(buf, de->d_name, &st, 0, FUSE_FILL_DIR_PLUS);
|
||||
} else
|
||||
ret = filler(buf, de->d_name, NULL, 0, 0);
|
||||
|
||||
if (ret != 0) {
|
||||
ret = errno != 0 ? -errno : -EIO; /* filler */
|
||||
break;
|
||||
}
|
||||
|
||||
errno = 0; /* for the next readdir call */
|
||||
}
|
||||
if (ret == 0 && errno != 0)
|
||||
ret = -errno; /* readdir */
|
||||
|
||||
closedir(dp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sharefs_mkdir(const char *pathname, mode_t mode) {
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
(void)mode;
|
||||
|
||||
if (mkdirat(priv->dirfd, pathname, SHAREFS_PERM_DIR) == -1)
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sharefs_unlink(const char *pathname) {
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
if (unlinkat(priv->dirfd, pathname, 0) == -1)
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sharefs_rmdir(const char *pathname) {
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
if (unlinkat(priv->dirfd, pathname, AT_REMOVEDIR) == -1)
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sharefs_rename(const char *oldpath, const char *newpath, unsigned int flags) {
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(oldpath);
|
||||
MUST_TRANSLATE_PATHNAME(newpath);
|
||||
|
||||
/* TODO(ophestra): replace with wrapper after 05ce67fea99ca09cd4b6625cff7aec9cc222dd5a reaches a release */
|
||||
if (syscall(__NR_renameat2, priv->dirfd, oldpath, priv->dirfd, newpath, flags) == -1)
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sharefs_truncate(const char *pathname, off_t length, struct fuse_file_info *fi) {
|
||||
int fd;
|
||||
int ret;
|
||||
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
(void)fi;
|
||||
|
||||
if ((fd = openat(priv->dirfd, pathname, O_WRONLY | O_CLOEXEC)) == -1)
|
||||
return -errno;
|
||||
if ((ret = ftruncate(fd, length)) == -1)
|
||||
ret = -errno;
|
||||
close(fd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sharefs_utimens(const char *pathname, const struct timespec times[2], struct fuse_file_info *fi) {
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
(void)fi;
|
||||
|
||||
if (utimensat(priv->dirfd, pathname, times, AT_SYMLINK_NOFOLLOW) == -1)
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sharefs_create(const char *pathname, mode_t mode, struct fuse_file_info *fi) {
|
||||
int fd;
|
||||
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
(void)mode;
|
||||
|
||||
if ((fd = openat(priv->dirfd, pathname, fi->flags & ~SHAREFS_FORBIDDEN_FLAGS, SHAREFS_PERM_REG)) == -1)
|
||||
return -errno;
|
||||
fi->fh = fd;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sharefs_open(const char *pathname, struct fuse_file_info *fi) {
|
||||
int fd;
|
||||
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
if ((fd = openat(priv->dirfd, pathname, fi->flags & ~SHAREFS_FORBIDDEN_FLAGS)) == -1)
|
||||
return -errno;
|
||||
fi->fh = fd;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sharefs_read(const char *pathname, char *buf, size_t count, off_t offset, struct fuse_file_info *fi) {
|
||||
int ret;
|
||||
|
||||
(void)pathname;
|
||||
|
||||
if ((ret = pread(fi->fh, buf, count, offset)) == -1)
|
||||
return -errno;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sharefs_write(const char *pathname, const char *buf, size_t count, off_t offset, struct fuse_file_info *fi) {
|
||||
int ret;
|
||||
|
||||
(void)pathname;
|
||||
|
||||
if ((ret = pwrite(fi->fh, buf, count, offset)) == -1)
|
||||
return -errno;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sharefs_statfs(const char *pathname, struct statvfs *statbuf) {
|
||||
int fd;
|
||||
int ret;
|
||||
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
if ((fd = openat(priv->dirfd, pathname, O_RDONLY | O_CLOEXEC)) == -1)
|
||||
return -errno;
|
||||
if ((ret = fstatvfs(fd, statbuf)) == -1)
|
||||
ret = -errno;
|
||||
close(fd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sharefs_release(const char *pathname, struct fuse_file_info *fi) {
|
||||
(void)pathname;
|
||||
|
||||
return close(fi->fh);
|
||||
}
|
||||
|
||||
int sharefs_fsync(const char *pathname, int datasync, struct fuse_file_info *fi) {
|
||||
(void)pathname;
|
||||
|
||||
if (datasync ? fdatasync(fi->fh) : fsync(fi->fh) == -1)
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
34
cmd/sharefs/fuse-operations.h
Normal file
34
cmd/sharefs/fuse-operations.h
Normal file
@@ -0,0 +1,34 @@
|
||||
#define FUSE_USE_VERSION FUSE_MAKE_VERSION(3, 12)
|
||||
#include <fuse.h>
|
||||
#include <fuse_lowlevel.h> /* for fuse_cmdline_help */
|
||||
|
||||
#if (FUSE_VERSION < FUSE_MAKE_VERSION(3, 12))
|
||||
#error This package requires libfuse >= v3.12
|
||||
#endif
|
||||
|
||||
#define SHAREFS_MEDIA_RW_ID (1 << 10) - 1 /* owning gid presented to userspace */
|
||||
#define SHAREFS_PERM_DIR 0700 /* permission bits for directories presented to userspace */
|
||||
#define SHAREFS_PERM_REG 0600 /* permission bits for regular files presented to userspace */
|
||||
#define SHAREFS_FORBIDDEN_FLAGS O_DIRECT /* these open flags are cleared unconditionally */
|
||||
|
||||
/* sharefs_private is populated by sharefs_init and contains process-wide context */
|
||||
struct sharefs_private {
|
||||
int dirfd; /* source dirfd opened during sharefs_init */
|
||||
uintptr_t setup; /* cgo handle of opaque setup state */
|
||||
};
|
||||
|
||||
int sharefs_getattr(const char *pathname, struct stat *statbuf, struct fuse_file_info *fi);
|
||||
int sharefs_readdir(const char *pathname, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *fi, enum fuse_readdir_flags flags);
|
||||
int sharefs_mkdir(const char *pathname, mode_t mode);
|
||||
int sharefs_unlink(const char *pathname);
|
||||
int sharefs_rmdir(const char *pathname);
|
||||
int sharefs_rename(const char *oldpath, const char *newpath, unsigned int flags);
|
||||
int sharefs_truncate(const char *pathname, off_t length, struct fuse_file_info *fi);
|
||||
int sharefs_utimens(const char *pathname, const struct timespec times[2], struct fuse_file_info *fi);
|
||||
int sharefs_create(const char *pathname, mode_t mode, struct fuse_file_info *fi);
|
||||
int sharefs_open(const char *pathname, struct fuse_file_info *fi);
|
||||
int sharefs_read(const char *pathname, char *buf, size_t count, off_t offset, struct fuse_file_info *fi);
|
||||
int sharefs_write(const char *pathname, const char *buf, size_t count, off_t offset, struct fuse_file_info *fi);
|
||||
int sharefs_statfs(const char *pathname, struct statvfs *statbuf);
|
||||
int sharefs_release(const char *pathname, struct fuse_file_info *fi);
|
||||
int sharefs_fsync(const char *pathname, int datasync, struct fuse_file_info *fi);
|
||||
556
cmd/sharefs/fuse.go
Normal file
556
cmd/sharefs/fuse.go
Normal file
@@ -0,0 +1,556 @@
|
||||
package main
|
||||
|
||||
/*
|
||||
#cgo pkg-config: --static fuse3
|
||||
|
||||
#include "fuse-operations.h"
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
extern void *sharefs_init(struct fuse_conn_info *conn, struct fuse_config *cfg);
|
||||
extern void sharefs_destroy(void *private_data);
|
||||
|
||||
typedef void (*closure)();
|
||||
static inline struct fuse_opt _FUSE_OPT_END() { return (struct fuse_opt)FUSE_OPT_END; };
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path"
|
||||
"runtime"
|
||||
"runtime/cgo"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"hakurei.app/container"
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/container/std"
|
||||
"hakurei.app/hst"
|
||||
"hakurei.app/internal/helper/proc"
|
||||
"hakurei.app/internal/info"
|
||||
"hakurei.app/message"
|
||||
)
|
||||
|
||||
type (
|
||||
// closure represents a C function pointer.
|
||||
closure = C.closure
|
||||
|
||||
// fuseArgs represents the fuse_args structure.
|
||||
fuseArgs = C.struct_fuse_args
|
||||
|
||||
// setupState holds state used for setup. Its cgo handle is included in
|
||||
// sharefs_private and considered opaque to non-setup callbacks.
|
||||
setupState struct {
|
||||
// Whether sharefs_init failed.
|
||||
initFailed bool
|
||||
|
||||
// Whether to create source directory as root.
|
||||
mkdir bool
|
||||
|
||||
// Open file descriptor to fuse.
|
||||
Fuse int
|
||||
|
||||
// Pathname to open for dirfd.
|
||||
Source *check.Absolute
|
||||
// New uid and gid to set by sharefs_init when starting as root.
|
||||
Setuid, Setgid int
|
||||
}
|
||||
)
|
||||
|
||||
func init() { gob.Register(new(setupState)) }
|
||||
|
||||
// destroySetup invalidates the setup [cgo.Handle] in a sharefs_private structure.
|
||||
func destroySetup(private_data unsafe.Pointer) (ok bool) {
|
||||
if private_data == nil {
|
||||
return false
|
||||
}
|
||||
priv := (*C.struct_sharefs_private)(private_data)
|
||||
|
||||
if h := cgo.Handle(priv.setup); h != 0 {
|
||||
priv.setup = 0
|
||||
h.Delete()
|
||||
ok = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//export sharefs_init
|
||||
func sharefs_init(_ *C.struct_fuse_conn_info, cfg *C.struct_fuse_config) unsafe.Pointer {
|
||||
ctx := C.fuse_get_context()
|
||||
priv := (*C.struct_sharefs_private)(ctx.private_data)
|
||||
setup := cgo.Handle(priv.setup).Value().(*setupState)
|
||||
|
||||
if os.Geteuid() == 0 {
|
||||
log.Println("filesystem daemon must not run as root")
|
||||
goto fail
|
||||
}
|
||||
|
||||
cfg.use_ino = C.true
|
||||
cfg.direct_io = C.false
|
||||
// getattr is context-dependent
|
||||
cfg.attr_timeout = 0
|
||||
cfg.entry_timeout = 0
|
||||
cfg.negative_timeout = 0
|
||||
|
||||
// all future filesystem operations happen through this dirfd
|
||||
if fd, err := syscall.Open(setup.Source.String(), syscall.O_DIRECTORY|syscall.O_RDONLY|syscall.O_CLOEXEC, 0); err != nil {
|
||||
log.Printf("cannot open %q: %v", setup.Source, err)
|
||||
goto fail
|
||||
} else if err = syscall.Fchdir(fd); err != nil {
|
||||
_ = syscall.Close(fd)
|
||||
log.Printf("cannot enter %q: %s", setup.Source, err)
|
||||
goto fail
|
||||
} else {
|
||||
priv.dirfd = C.int(fd)
|
||||
}
|
||||
|
||||
return ctx.private_data
|
||||
|
||||
fail:
|
||||
setup.initFailed = true
|
||||
C.fuse_exit(ctx.fuse)
|
||||
return nil
|
||||
}
|
||||
|
||||
//export sharefs_destroy
|
||||
func sharefs_destroy(private_data unsafe.Pointer) {
|
||||
if private_data != nil {
|
||||
destroySetup(private_data)
|
||||
priv := (*C.struct_sharefs_private)(private_data)
|
||||
|
||||
if err := syscall.Close(int(priv.dirfd)); err != nil {
|
||||
log.Printf("cannot close source directory: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// showHelp prints the help message.
|
||||
func showHelp(args *fuseArgs) {
|
||||
executableName := sharefsName
|
||||
if args.argc > 0 {
|
||||
executableName = path.Base(C.GoString(*args.argv))
|
||||
} else if name, err := os.Executable(); err == nil {
|
||||
executableName = path.Base(name)
|
||||
}
|
||||
|
||||
fmt.Printf("usage: %s [options] <mountpoint>\n\n", executableName)
|
||||
|
||||
fmt.Println("Filesystem options:")
|
||||
fmt.Println(" -o source=/data/media source directory to be mounted")
|
||||
fmt.Println(" -o setuid=1023 uid to run as when starting as root")
|
||||
fmt.Println(" -o setgid=1023 gid to run as when starting as root")
|
||||
|
||||
fmt.Println("\nFUSE options:")
|
||||
C.fuse_cmdline_help()
|
||||
C.fuse_lib_help(args)
|
||||
}
|
||||
|
||||
// parseOpts parses fuse options via fuse_opt_parse.
|
||||
func parseOpts(args *fuseArgs, setup *setupState, log *log.Logger) (ok bool) {
|
||||
var unsafeOpts struct {
|
||||
// Pathname to writable source directory.
|
||||
source *C.char
|
||||
|
||||
// Whether to create source directory as root.
|
||||
mkdir C.int
|
||||
|
||||
// Decimal string representation of uid to set when running as root.
|
||||
setuid *C.char
|
||||
// Decimal string representation of gid to set when running as root.
|
||||
setgid *C.char
|
||||
|
||||
// Decimal string representation of open file descriptor to read setupState from.
|
||||
// This is an internal detail for containerisation and must not be specified directly.
|
||||
setup *C.char
|
||||
}
|
||||
|
||||
if C.fuse_opt_parse(args, unsafe.Pointer(&unsafeOpts), &[]C.struct_fuse_opt{
|
||||
{templ: C.CString("source=%s"), offset: C.ulong(unsafe.Offsetof(unsafeOpts.source)), value: 0},
|
||||
{templ: C.CString("mkdir"), offset: C.ulong(unsafe.Offsetof(unsafeOpts.mkdir)), value: 1},
|
||||
{templ: C.CString("setuid=%s"), offset: C.ulong(unsafe.Offsetof(unsafeOpts.setuid)), value: 0},
|
||||
{templ: C.CString("setgid=%s"), offset: C.ulong(unsafe.Offsetof(unsafeOpts.setgid)), value: 0},
|
||||
|
||||
{templ: C.CString("setup=%s"), offset: C.ulong(unsafe.Offsetof(unsafeOpts.setup)), value: 0},
|
||||
|
||||
C._FUSE_OPT_END(),
|
||||
}[0], nil) == -1 {
|
||||
return false
|
||||
}
|
||||
|
||||
if unsafeOpts.source != nil {
|
||||
defer C.free(unsafe.Pointer(unsafeOpts.source))
|
||||
}
|
||||
if unsafeOpts.setuid != nil {
|
||||
defer C.free(unsafe.Pointer(unsafeOpts.setuid))
|
||||
}
|
||||
if unsafeOpts.setgid != nil {
|
||||
defer C.free(unsafe.Pointer(unsafeOpts.setgid))
|
||||
}
|
||||
|
||||
if unsafeOpts.setup != nil {
|
||||
defer C.free(unsafe.Pointer(unsafeOpts.setup))
|
||||
|
||||
if v, err := strconv.Atoi(C.GoString(unsafeOpts.setup)); err != nil || v < 3 {
|
||||
log.Println("invalid value for option setup")
|
||||
return false
|
||||
} else {
|
||||
r := os.NewFile(uintptr(v), "setup")
|
||||
defer func() {
|
||||
if err = r.Close(); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}()
|
||||
if err = gob.NewDecoder(r).Decode(setup); err != nil {
|
||||
log.Println(err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
if setup.Fuse < 3 {
|
||||
log.Println("invalid file descriptor", setup.Fuse)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if unsafeOpts.source == nil {
|
||||
showHelp(args)
|
||||
return false
|
||||
} else if a, err := check.NewAbs(C.GoString(unsafeOpts.source)); err != nil {
|
||||
log.Println(err)
|
||||
return false
|
||||
} else {
|
||||
setup.Source = a
|
||||
}
|
||||
setup.mkdir = unsafeOpts.mkdir != 0
|
||||
|
||||
if unsafeOpts.setuid == nil {
|
||||
setup.Setuid = -1
|
||||
} else if v, err := strconv.Atoi(C.GoString(unsafeOpts.setuid)); err != nil || v <= 0 {
|
||||
log.Println("invalid value for option setuid")
|
||||
return false
|
||||
} else {
|
||||
setup.Setuid = v
|
||||
}
|
||||
if unsafeOpts.setgid == nil {
|
||||
setup.Setgid = -1
|
||||
} else if v, err := strconv.Atoi(C.GoString(unsafeOpts.setgid)); err != nil || v <= 0 {
|
||||
log.Println("invalid value for option setgid")
|
||||
return false
|
||||
} else {
|
||||
setup.Setgid = v
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// copyArgs returns a heap allocated copy of an argument slice in fuse_args representation.
|
||||
func copyArgs(s ...string) fuseArgs {
|
||||
if len(s) == 0 {
|
||||
return fuseArgs{argc: 0, argv: nil, allocated: 0}
|
||||
}
|
||||
args := unsafe.Slice((**C.char)(C.malloc(C.size_t(uintptr(len(s))*unsafe.Sizeof(s[0])))), len(s))
|
||||
for i, arg := range s {
|
||||
args[i] = C.CString(arg)
|
||||
}
|
||||
return fuseArgs{argc: C.int(len(s)), argv: &args[0], allocated: 1}
|
||||
}
|
||||
|
||||
// freeArgs frees the contents of argument list.
|
||||
func freeArgs(args *fuseArgs) { C.fuse_opt_free_args(args) }
|
||||
|
||||
// unsafeAddArgument adds an argument to fuseArgs via fuse_opt_add_arg.
|
||||
// The last byte of arg must be 0.
|
||||
func unsafeAddArgument(args *fuseArgs, arg string) {
|
||||
C.fuse_opt_add_arg(args, (*C.char)(unsafe.Pointer(unsafe.StringData(arg))))
|
||||
}
|
||||
|
||||
func _main(s ...string) (exitCode int) {
|
||||
msg := message.New(log.Default())
|
||||
container.TryArgv0(msg)
|
||||
runtime.LockOSThread()
|
||||
|
||||
// don't mask creation mode, kernel already did that
|
||||
syscall.Umask(0)
|
||||
|
||||
var pinner runtime.Pinner
|
||||
defer pinner.Unpin()
|
||||
|
||||
args := copyArgs(s...)
|
||||
defer freeArgs(&args)
|
||||
|
||||
// this causes the kernel to enforce access control based on
|
||||
// struct stat populated by sharefs_getattr
|
||||
unsafeAddArgument(&args, "-odefault_permissions\x00")
|
||||
|
||||
var priv C.struct_sharefs_private
|
||||
pinner.Pin(&priv)
|
||||
var setup setupState
|
||||
priv.setup = C.uintptr_t(cgo.NewHandle(&setup))
|
||||
defer destroySetup(unsafe.Pointer(&priv))
|
||||
|
||||
var opts C.struct_fuse_cmdline_opts
|
||||
if C.fuse_parse_cmdline(&args, &opts) != 0 {
|
||||
return 1
|
||||
}
|
||||
if opts.mountpoint != nil {
|
||||
defer C.free(unsafe.Pointer(opts.mountpoint))
|
||||
}
|
||||
|
||||
if opts.show_version != 0 {
|
||||
fmt.Println("hakurei version", info.Version())
|
||||
fmt.Println("FUSE library version", C.GoString(C.fuse_pkgversion()))
|
||||
C.fuse_lowlevel_version()
|
||||
return 0
|
||||
}
|
||||
|
||||
if opts.show_help != 0 {
|
||||
showHelp(&args)
|
||||
return 0
|
||||
} else if opts.mountpoint == nil {
|
||||
log.Println("no mountpoint specified")
|
||||
return 2
|
||||
} else {
|
||||
// hack to keep fuse_parse_cmdline happy in the container
|
||||
mountpoint := C.GoString(opts.mountpoint)
|
||||
pathnameArg := -1
|
||||
for i, arg := range s {
|
||||
if arg == mountpoint {
|
||||
pathnameArg = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if pathnameArg < 0 {
|
||||
log.Println("mountpoint must be absolute")
|
||||
return 2
|
||||
}
|
||||
s[pathnameArg] = container.Nonexistent
|
||||
}
|
||||
|
||||
if !parseOpts(&args, &setup, msg.GetLogger()) {
|
||||
return 1
|
||||
}
|
||||
asRoot := os.Geteuid() == 0
|
||||
|
||||
if asRoot {
|
||||
if setup.Setuid <= 0 || setup.Setgid <= 0 {
|
||||
log.Println("setuid and setgid must not be 0")
|
||||
return 1
|
||||
}
|
||||
|
||||
if setup.Fuse >= 3 {
|
||||
log.Println("filesystem daemon must not run as root")
|
||||
return 1
|
||||
}
|
||||
|
||||
if setup.mkdir {
|
||||
if err := os.MkdirAll(setup.Source.String(), 0700); err != nil {
|
||||
if !errors.Is(err, os.ErrExist) {
|
||||
log.Println(err)
|
||||
return 1
|
||||
}
|
||||
// skip setup for existing source directory
|
||||
} else if err = os.Chown(setup.Source.String(), setup.Setuid, setup.Setgid); err != nil {
|
||||
log.Println(err)
|
||||
return 1
|
||||
}
|
||||
}
|
||||
} else if setup.Fuse < 3 && (setup.Setuid > 0 || setup.Setgid > 0) {
|
||||
log.Println("setuid and setgid has no effect when not starting as root")
|
||||
return 1
|
||||
} else if setup.mkdir {
|
||||
log.Println("mkdir has no effect when not starting as root")
|
||||
return 1
|
||||
}
|
||||
|
||||
op := C.struct_fuse_operations{
|
||||
init: closure(C.sharefs_init),
|
||||
destroy: closure(C.sharefs_destroy),
|
||||
|
||||
// implemented in fuse-helper.c
|
||||
getattr: closure(C.sharefs_getattr),
|
||||
readdir: closure(C.sharefs_readdir),
|
||||
mkdir: closure(C.sharefs_mkdir),
|
||||
unlink: closure(C.sharefs_unlink),
|
||||
rmdir: closure(C.sharefs_rmdir),
|
||||
rename: closure(C.sharefs_rename),
|
||||
truncate: closure(C.sharefs_truncate),
|
||||
utimens: closure(C.sharefs_utimens),
|
||||
create: closure(C.sharefs_create),
|
||||
open: closure(C.sharefs_open),
|
||||
read: closure(C.sharefs_read),
|
||||
write: closure(C.sharefs_write),
|
||||
statfs: closure(C.sharefs_statfs),
|
||||
release: closure(C.sharefs_release),
|
||||
fsync: closure(C.sharefs_fsync),
|
||||
}
|
||||
|
||||
fuse := C.fuse_new_fn(&args, &op, C.size_t(unsafe.Sizeof(op)), unsafe.Pointer(&priv))
|
||||
if fuse == nil {
|
||||
return 3
|
||||
}
|
||||
defer C.fuse_destroy(fuse)
|
||||
se := C.fuse_get_session(fuse)
|
||||
|
||||
if setup.Fuse < 3 {
|
||||
// unconfined, set up mount point and container
|
||||
if C.fuse_mount(fuse, opts.mountpoint) != 0 {
|
||||
return 4
|
||||
}
|
||||
// unmounted by initial process
|
||||
defer func() {
|
||||
if exitCode == 5 {
|
||||
C.fuse_unmount(fuse)
|
||||
}
|
||||
}()
|
||||
|
||||
if asRoot {
|
||||
if err := syscall.Setresgid(setup.Setgid, setup.Setgid, setup.Setgid); err != nil {
|
||||
log.Printf("cannot set gid: %v", err)
|
||||
return 5
|
||||
}
|
||||
if err := syscall.Setgroups(nil); err != nil {
|
||||
log.Printf("cannot set supplementary groups: %v", err)
|
||||
return 5
|
||||
}
|
||||
if err := syscall.Setresuid(setup.Setuid, setup.Setuid, setup.Setuid); err != nil {
|
||||
log.Printf("cannot set uid: %v", err)
|
||||
return 5
|
||||
}
|
||||
}
|
||||
|
||||
msg.SwapVerbose(opts.debug != 0)
|
||||
ctx := context.Background()
|
||||
if opts.foreground != 0 {
|
||||
c, cancel := signal.NotifyContext(ctx, syscall.SIGINT, syscall.SIGTERM)
|
||||
defer cancel()
|
||||
ctx = c
|
||||
}
|
||||
z := container.New(ctx, msg)
|
||||
z.AllowOrphan = opts.foreground == 0
|
||||
z.Env = os.Environ()
|
||||
|
||||
// keep fuse_parse_cmdline happy in the container
|
||||
z.Tmpfs(check.MustAbs(container.Nonexistent), 1<<10, 0755)
|
||||
|
||||
if a, err := check.NewAbs(container.MustExecutable(msg)); err != nil {
|
||||
log.Println(err)
|
||||
return 5
|
||||
} else {
|
||||
z.Path = a
|
||||
}
|
||||
z.Args = s
|
||||
z.ForwardCancel = true
|
||||
z.SeccompPresets |= std.PresetStrict
|
||||
z.ParentPerm = 0700
|
||||
z.Bind(setup.Source, setup.Source, std.BindWritable)
|
||||
if !z.AllowOrphan {
|
||||
z.WaitDelay = hst.WaitDelayMax
|
||||
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
|
||||
}
|
||||
z.Bind(z.Path, z.Path, 0)
|
||||
setup.Fuse = int(proc.ExtraFileSlice(&z.ExtraFiles, os.NewFile(uintptr(C.fuse_session_fd(se)), "fuse")))
|
||||
|
||||
var setupWriter io.WriteCloser
|
||||
if fd, w, err := container.Setup(&z.ExtraFiles); err != nil {
|
||||
log.Println(err)
|
||||
return 5
|
||||
} else {
|
||||
z.Args = append(z.Args, "-osetup="+strconv.Itoa(fd))
|
||||
setupWriter = w
|
||||
}
|
||||
|
||||
if err := z.Start(); err != nil {
|
||||
if m, ok := message.GetMessage(err); ok {
|
||||
log.Println(m)
|
||||
} else {
|
||||
log.Println(err)
|
||||
}
|
||||
return 5
|
||||
}
|
||||
if err := z.Serve(); err != nil {
|
||||
if m, ok := message.GetMessage(err); ok {
|
||||
log.Println(m)
|
||||
} else {
|
||||
log.Println(err)
|
||||
}
|
||||
return 5
|
||||
}
|
||||
|
||||
if err := gob.NewEncoder(setupWriter).Encode(&setup); err != nil {
|
||||
log.Println(err)
|
||||
return 5
|
||||
} else if err = setupWriter.Close(); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
if !z.AllowOrphan {
|
||||
if err := z.Wait(); err != nil {
|
||||
var exitError *exec.ExitError
|
||||
if !errors.As(err, &exitError) || exitError == nil {
|
||||
log.Println(err)
|
||||
return 5
|
||||
}
|
||||
switch code := exitError.ExitCode(); syscall.Signal(code & 0x7f) {
|
||||
case syscall.SIGINT:
|
||||
case syscall.SIGTERM:
|
||||
|
||||
default:
|
||||
return code
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
} else { // confined
|
||||
C.free(unsafe.Pointer(opts.mountpoint))
|
||||
// must be heap allocated
|
||||
opts.mountpoint = C.CString("/dev/fd/" + strconv.Itoa(setup.Fuse))
|
||||
|
||||
if err := os.Chdir("/"); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
if C.fuse_mount(fuse, opts.mountpoint) != 0 {
|
||||
return 4
|
||||
}
|
||||
defer C.fuse_unmount(fuse)
|
||||
|
||||
if C.fuse_set_signal_handlers(se) != 0 {
|
||||
return 6
|
||||
}
|
||||
defer C.fuse_remove_signal_handlers(se)
|
||||
|
||||
if opts.singlethread != 0 {
|
||||
if C.fuse_loop(fuse) != 0 {
|
||||
return 8
|
||||
}
|
||||
} else {
|
||||
loopConfig := C.fuse_loop_cfg_create()
|
||||
if loopConfig == nil {
|
||||
return 7
|
||||
}
|
||||
defer C.fuse_loop_cfg_destroy(loopConfig)
|
||||
|
||||
C.fuse_loop_cfg_set_clone_fd(loopConfig, C.uint(opts.clone_fd))
|
||||
|
||||
C.fuse_loop_cfg_set_idle_threads(loopConfig, opts.max_idle_threads)
|
||||
C.fuse_loop_cfg_set_max_threads(loopConfig, opts.max_threads)
|
||||
if C.fuse_loop_mt(fuse, loopConfig) != 0 {
|
||||
return 8
|
||||
}
|
||||
}
|
||||
|
||||
if setup.initFailed {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
113
cmd/sharefs/fuse_test.go
Normal file
113
cmd/sharefs/fuse_test.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
)
|
||||
|
||||
func TestParseOpts(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
args []string
|
||||
want setupState
|
||||
wantLog string
|
||||
wantOk bool
|
||||
}{
|
||||
{"zero length", []string{}, setupState{}, "", false},
|
||||
|
||||
{"not absolute", []string{"sharefs",
|
||||
"-o", "source=nonexistent",
|
||||
"-o", "setuid=1023",
|
||||
"-o", "setgid=1023",
|
||||
}, setupState{}, "sharefs: path \"nonexistent\" is not absolute\n", false},
|
||||
|
||||
{"not specified", []string{"sharefs",
|
||||
"-o", "setuid=1023",
|
||||
"-o", "setgid=1023",
|
||||
}, setupState{}, "", false},
|
||||
|
||||
{"invalid setuid", []string{"sharefs",
|
||||
"-o", "source=/proc/nonexistent",
|
||||
"-o", "setuid=ff",
|
||||
"-o", "setgid=1023",
|
||||
}, setupState{
|
||||
Source: check.MustAbs("/proc/nonexistent"),
|
||||
}, "sharefs: invalid value for option setuid\n", false},
|
||||
|
||||
{"invalid setgid", []string{"sharefs",
|
||||
"-o", "source=/proc/nonexistent",
|
||||
"-o", "setuid=1023",
|
||||
"-o", "setgid=ff",
|
||||
}, setupState{
|
||||
Source: check.MustAbs("/proc/nonexistent"),
|
||||
Setuid: 1023,
|
||||
}, "sharefs: invalid value for option setgid\n", false},
|
||||
|
||||
{"simple", []string{"sharefs",
|
||||
"-o", "source=/proc/nonexistent",
|
||||
}, setupState{
|
||||
Source: check.MustAbs("/proc/nonexistent"),
|
||||
Setuid: -1,
|
||||
Setgid: -1,
|
||||
}, "", true},
|
||||
|
||||
{"root", []string{"sharefs",
|
||||
"-o", "source=/proc/nonexistent",
|
||||
"-o", "setuid=1023",
|
||||
"-o", "setgid=1023",
|
||||
}, setupState{
|
||||
Source: check.MustAbs("/proc/nonexistent"),
|
||||
Setuid: 1023,
|
||||
Setgid: 1023,
|
||||
}, "", true},
|
||||
|
||||
{"setuid", []string{"sharefs",
|
||||
"-o", "source=/proc/nonexistent",
|
||||
"-o", "setuid=1023",
|
||||
}, setupState{
|
||||
Source: check.MustAbs("/proc/nonexistent"),
|
||||
Setuid: 1023,
|
||||
Setgid: -1,
|
||||
}, "", true},
|
||||
|
||||
{"setgid", []string{"sharefs",
|
||||
"-o", "source=/proc/nonexistent",
|
||||
"-o", "setgid=1023",
|
||||
}, setupState{
|
||||
Source: check.MustAbs("/proc/nonexistent"),
|
||||
Setuid: -1,
|
||||
Setgid: 1023,
|
||||
}, "", true},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
got setupState
|
||||
buf bytes.Buffer
|
||||
)
|
||||
args := copyArgs(tc.args...)
|
||||
defer freeArgs(&args)
|
||||
unsafeAddArgument(&args, "-odefault_permissions\x00")
|
||||
|
||||
if ok := parseOpts(&args, &got, log.New(&buf, "sharefs: ", 0)); ok != tc.wantOk {
|
||||
t.Errorf("parseOpts: ok = %v, want %v", ok, tc.wantOk)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(&got, &tc.want) {
|
||||
t.Errorf("parseOpts: setup = %#v, want %#v", got, tc.want)
|
||||
}
|
||||
|
||||
if buf.String() != tc.wantLog {
|
||||
t.Errorf("parseOpts: log =\n%s\nwant\n%s", buf.String(), tc.wantLog)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
31
cmd/sharefs/main.go
Normal file
31
cmd/sharefs/main.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// sharefsName is the prefix used by log.std in the sharefs process.
|
||||
const sharefsName = "sharefs"
|
||||
|
||||
// handleMountArgs returns an alternative, libfuse-compatible args slice for
|
||||
// args passed by mount -t fuse.sharefs [options] sharefs <mountpoint>.
|
||||
//
|
||||
// In this case, args always has a length of 5 with index 0 being what comes
|
||||
// after "fuse." in the filesystem type, 1 is the uninterpreted string passed
|
||||
// to mount (sharefsName is used as the magic string to enable this hack),
|
||||
// 2 is passed through to libfuse as mountpoint, and 3 is always "-o".
|
||||
func handleMountArgs(args []string) []string {
|
||||
if len(args) == 5 && args[1] == sharefsName && args[3] == "-o" {
|
||||
return []string{sharefsName, args[2], "-o", args[4]}
|
||||
}
|
||||
return slices.Clone(args)
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
log.SetPrefix(sharefsName + ": ")
|
||||
|
||||
os.Exit(_main(handleMountArgs(os.Args)...))
|
||||
}
|
||||
29
cmd/sharefs/main_test.go
Normal file
29
cmd/sharefs/main_test.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHandleMountArgs(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
args []string
|
||||
want []string
|
||||
}{
|
||||
{"nil", nil, nil},
|
||||
{"passthrough", []string{"sharefs", "-V"}, []string{"sharefs", "-V"}},
|
||||
{"replace", []string{"/sbin/sharefs", "sharefs", "/sdcard", "-o", "rw"}, []string{"sharefs", "/sdcard", "-o", "rw"}},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if got := handleMountArgs(tc.args); !slices.Equal(got, tc.want) {
|
||||
t.Errorf("handleMountArgs: %q, want %q", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
41
cmd/sharefs/test/configuration.nix
Normal file
41
cmd/sharefs/test/configuration.nix
Normal file
@@ -0,0 +1,41 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
users.users = {
|
||||
alice = {
|
||||
isNormalUser = true;
|
||||
description = "Alice Foobar";
|
||||
password = "foobar";
|
||||
uid = 1000;
|
||||
};
|
||||
};
|
||||
|
||||
home-manager.users.alice.home.stateVersion = "24.11";
|
||||
|
||||
# Automatically login on tty1 as a normal user:
|
||||
services.getty.autologinUser = "alice";
|
||||
|
||||
environment = {
|
||||
# For benchmarking sharefs:
|
||||
systemPackages = [ pkgs.fsmark ];
|
||||
};
|
||||
|
||||
virtualisation = {
|
||||
diskSize = 6 * 1024;
|
||||
|
||||
qemu.options = [
|
||||
# Increase test performance:
|
||||
"-smp 8"
|
||||
];
|
||||
};
|
||||
|
||||
environment.hakurei = rec {
|
||||
enable = true;
|
||||
stateDir = "/var/lib/hakurei";
|
||||
sharefs.source = "${stateDir}/sdcard";
|
||||
users.alice = 0;
|
||||
|
||||
extraHomeConfig = {
|
||||
home.stateVersion = "23.05";
|
||||
};
|
||||
};
|
||||
}
|
||||
44
cmd/sharefs/test/default.nix
Normal file
44
cmd/sharefs/test/default.nix
Normal file
@@ -0,0 +1,44 @@
|
||||
{
|
||||
testers,
|
||||
|
||||
system,
|
||||
self,
|
||||
}:
|
||||
testers.nixosTest {
|
||||
name = "sharefs";
|
||||
nodes.machine =
|
||||
{ options, pkgs, ... }:
|
||||
let
|
||||
fhs =
|
||||
let
|
||||
hakurei = options.environment.hakurei.package.default;
|
||||
in
|
||||
pkgs.buildFHSEnv {
|
||||
pname = "hakurei-fhs";
|
||||
inherit (hakurei) version;
|
||||
targetPkgs = _: hakurei.targetPkgs;
|
||||
extraOutputsToInstall = [ "dev" ];
|
||||
profile = ''
|
||||
export PKG_CONFIG_PATH="/usr/share/pkgconfig:$PKG_CONFIG_PATH"
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
environment.systemPackages = [
|
||||
# For go tests:
|
||||
(pkgs.writeShellScriptBin "sharefs-workload-hakurei-tests" ''
|
||||
cp -r "${self.packages.${system}.hakurei.src}" "/sdcard/hakurei" && cd "/sdcard/hakurei"
|
||||
${fhs}/bin/hakurei-fhs -c 'CC="clang -O3 -Werror" go test ./...'
|
||||
'')
|
||||
];
|
||||
|
||||
imports = [
|
||||
./configuration.nix
|
||||
|
||||
self.nixosModules.hakurei
|
||||
self.inputs.home-manager.nixosModules.home-manager
|
||||
];
|
||||
};
|
||||
|
||||
testScript = builtins.readFile ./test.py;
|
||||
}
|
||||
60
cmd/sharefs/test/test.py
Normal file
60
cmd/sharefs/test/test.py
Normal file
@@ -0,0 +1,60 @@
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
|
||||
# To check sharefs version:
|
||||
print(machine.succeed("sharefs -V"))
|
||||
|
||||
# Make sure sharefs started:
|
||||
machine.wait_for_unit("sdcard.mount")
|
||||
|
||||
machine.succeed("mkdir /mnt")
|
||||
def check_bad_opts_output(opts, want, source="/etc", privileged=False):
|
||||
output = machine.fail(("" if privileged else "sudo -u alice -i ") + f"sharefs -f -o source={source},{opts} /mnt 2>&1")
|
||||
if output != want:
|
||||
raise Exception(f"unexpected output: {output}")
|
||||
|
||||
# Malformed setuid/setgid representation:
|
||||
check_bad_opts_output("setuid=ff", "sharefs: invalid value for option setuid\n")
|
||||
check_bad_opts_output("setgid=ff", "sharefs: invalid value for option setgid\n")
|
||||
|
||||
# Bounds check for setuid/setgid:
|
||||
check_bad_opts_output("setuid=0", "sharefs: invalid value for option setuid\n")
|
||||
check_bad_opts_output("setgid=0", "sharefs: invalid value for option setgid\n")
|
||||
check_bad_opts_output("setuid=-1", "sharefs: invalid value for option setuid\n")
|
||||
check_bad_opts_output("setgid=-1", "sharefs: invalid value for option setgid\n")
|
||||
|
||||
# Non-root setuid/setgid:
|
||||
check_bad_opts_output("setuid=1023", "sharefs: setuid and setgid has no effect when not starting as root\n")
|
||||
check_bad_opts_output("setgid=1023", "sharefs: setuid and setgid has no effect when not starting as root\n")
|
||||
check_bad_opts_output("setuid=1023,setgid=1023", "sharefs: setuid and setgid has no effect when not starting as root\n")
|
||||
check_bad_opts_output("mkdir", "sharefs: mkdir has no effect when not starting as root\n")
|
||||
|
||||
# Starting as root without setuid/setgid:
|
||||
check_bad_opts_output("allow_other", "sharefs: setuid and setgid must not be 0\n", privileged=True)
|
||||
check_bad_opts_output("setuid=1023", "sharefs: setuid and setgid must not be 0\n", privileged=True)
|
||||
check_bad_opts_output("setgid=1023", "sharefs: setuid and setgid must not be 0\n", privileged=True)
|
||||
|
||||
# Make sure nothing actually got mounted:
|
||||
machine.fail("umount /mnt")
|
||||
machine.succeed("rmdir /mnt")
|
||||
|
||||
# Unprivileged mount/unmount:
|
||||
machine.succeed("sudo -u alice -i mkdir /home/alice/{sdcard,persistent}")
|
||||
machine.succeed("sudo -u alice -i sharefs -o source=/home/alice/persistent /home/alice/sdcard")
|
||||
machine.succeed("sudo -u alice -i touch /home/alice/sdcard/check")
|
||||
machine.succeed("sudo -u alice -i umount /home/alice/sdcard")
|
||||
machine.succeed("sudo -u alice -i rm /home/alice/persistent/check")
|
||||
machine.succeed("sudo -u alice -i rmdir /home/alice/{sdcard,persistent}")
|
||||
|
||||
# Benchmark sharefs:
|
||||
machine.succeed("fs_mark -v -d /sdcard/fs_mark -l /tmp/fs_log.txt")
|
||||
machine.copy_from_vm("/tmp/fs_log.txt", "")
|
||||
|
||||
# Check permissions:
|
||||
machine.succeed("sudo -u sharefs touch /var/lib/hakurei/sdcard/fs_mark/.check")
|
||||
machine.succeed("sudo -u sharefs rm /var/lib/hakurei/sdcard/fs_mark/.check")
|
||||
machine.succeed("sudo -u alice rm -rf /sdcard/fs_mark")
|
||||
machine.fail("ls /var/lib/hakurei/sdcard/fs_mark")
|
||||
|
||||
# Run hakurei tests on sharefs:
|
||||
machine.succeed("sudo -u alice -i sharefs-workload-hakurei-tests")
|
||||
@@ -14,6 +14,7 @@ const (
|
||||
|
||||
CAP_SYS_ADMIN = 0x15
|
||||
CAP_SETPCAP = 0x8
|
||||
CAP_NET_ADMIN = 0xc
|
||||
CAP_DAC_OVERRIDE = 0x1
|
||||
)
|
||||
|
||||
|
||||
@@ -9,46 +9,60 @@ import (
|
||||
"slices"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unique"
|
||||
)
|
||||
|
||||
// AbsoluteError is returned by [NewAbs] and holds the invalid pathname.
|
||||
type AbsoluteError struct{ Pathname string }
|
||||
type AbsoluteError string
|
||||
|
||||
func (e *AbsoluteError) Error() string { return fmt.Sprintf("path %q is not absolute", e.Pathname) }
|
||||
func (e *AbsoluteError) Is(target error) bool {
|
||||
var ce *AbsoluteError
|
||||
func (e AbsoluteError) Error() string {
|
||||
return fmt.Sprintf("path %q is not absolute", string(e))
|
||||
}
|
||||
|
||||
func (e AbsoluteError) Is(target error) bool {
|
||||
var ce AbsoluteError
|
||||
if !errors.As(target, &ce) {
|
||||
return errors.Is(target, syscall.EINVAL)
|
||||
}
|
||||
return *e == *ce
|
||||
return e == ce
|
||||
}
|
||||
|
||||
// Absolute holds a pathname checked to be absolute.
|
||||
type Absolute struct{ pathname string }
|
||||
type Absolute struct{ pathname unique.Handle[string] }
|
||||
|
||||
// ok returns whether [Absolute] is not the zero value.
|
||||
func (a *Absolute) ok() bool { return a != nil && *a != (Absolute{}) }
|
||||
|
||||
// unsafeAbs returns [check.Absolute] on any string value.
|
||||
func unsafeAbs(pathname string) *Absolute { return &Absolute{pathname} }
|
||||
func unsafeAbs(pathname string) *Absolute {
|
||||
return &Absolute{unique.Make(pathname)}
|
||||
}
|
||||
|
||||
// String returns the checked pathname.
|
||||
func (a *Absolute) String() string {
|
||||
if a.pathname == "" {
|
||||
if !a.ok() {
|
||||
panic("attempted use of zero Absolute")
|
||||
}
|
||||
return a.pathname.Value()
|
||||
}
|
||||
|
||||
// Handle returns the underlying [unique.Handle].
|
||||
func (a *Absolute) Handle() unique.Handle[string] {
|
||||
return a.pathname
|
||||
}
|
||||
|
||||
// Is efficiently compares the underlying pathname.
|
||||
func (a *Absolute) Is(v *Absolute) bool {
|
||||
if a == nil && v == nil {
|
||||
return true
|
||||
}
|
||||
return a != nil && v != nil &&
|
||||
a.pathname != "" && v.pathname != "" &&
|
||||
a.pathname == v.pathname
|
||||
return a.ok() && v.ok() && a.pathname == v.pathname
|
||||
}
|
||||
|
||||
// NewAbs checks pathname and returns a new [Absolute] if pathname is absolute.
|
||||
func NewAbs(pathname string) (*Absolute, error) {
|
||||
if !path.IsAbs(pathname) {
|
||||
return nil, &AbsoluteError{pathname}
|
||||
return nil, AbsoluteError(pathname)
|
||||
}
|
||||
return unsafeAbs(pathname), nil
|
||||
}
|
||||
@@ -70,35 +84,49 @@ func (a *Absolute) Append(elem ...string) *Absolute {
|
||||
// Dir calls [path.Dir] with [Absolute] as its argument.
|
||||
func (a *Absolute) Dir() *Absolute { return unsafeAbs(path.Dir(a.String())) }
|
||||
|
||||
func (a *Absolute) GobEncode() ([]byte, error) { return []byte(a.String()), nil }
|
||||
// GobEncode returns the checked pathname.
|
||||
func (a *Absolute) GobEncode() ([]byte, error) {
|
||||
return []byte(a.String()), nil
|
||||
}
|
||||
|
||||
// GobDecode stores data if it represents an absolute pathname.
|
||||
func (a *Absolute) GobDecode(data []byte) error {
|
||||
pathname := string(data)
|
||||
if !path.IsAbs(pathname) {
|
||||
return &AbsoluteError{pathname}
|
||||
return AbsoluteError(pathname)
|
||||
}
|
||||
a.pathname = pathname
|
||||
a.pathname = unique.Make(pathname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Absolute) MarshalJSON() ([]byte, error) { return json.Marshal(a.String()) }
|
||||
// MarshalJSON returns a JSON representation of the checked pathname.
|
||||
func (a *Absolute) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(a.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON stores data if it represents an absolute pathname.
|
||||
func (a *Absolute) UnmarshalJSON(data []byte) error {
|
||||
var pathname string
|
||||
if err := json.Unmarshal(data, &pathname); err != nil {
|
||||
return err
|
||||
}
|
||||
if !path.IsAbs(pathname) {
|
||||
return &AbsoluteError{pathname}
|
||||
return AbsoluteError(pathname)
|
||||
}
|
||||
a.pathname = pathname
|
||||
a.pathname = unique.Make(pathname)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SortAbs calls [slices.SortFunc] for a slice of [Absolute].
|
||||
func SortAbs(x []*Absolute) {
|
||||
slices.SortFunc(x, func(a, b *Absolute) int { return strings.Compare(a.String(), b.String()) })
|
||||
slices.SortFunc(x, func(a, b *Absolute) int {
|
||||
return strings.Compare(a.String(), b.String())
|
||||
})
|
||||
}
|
||||
|
||||
// CompactAbs calls [slices.CompactFunc] for a slice of [Absolute].
|
||||
func CompactAbs(s []*Absolute) []*Absolute {
|
||||
return slices.CompactFunc(s, func(a *Absolute, b *Absolute) bool { return a.String() == b.String() })
|
||||
return slices.CompactFunc(s, func(a *Absolute, b *Absolute) bool {
|
||||
return a.Is(b)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -31,8 +31,8 @@ func TestAbsoluteError(t *testing.T) {
|
||||
}{
|
||||
{"EINVAL", new(AbsoluteError), syscall.EINVAL, true},
|
||||
{"not EINVAL", new(AbsoluteError), syscall.EBADE, false},
|
||||
{"ne val", new(AbsoluteError), &AbsoluteError{Pathname: "etc"}, false},
|
||||
{"equals", &AbsoluteError{Pathname: "etc"}, &AbsoluteError{Pathname: "etc"}, true},
|
||||
{"ne val", new(AbsoluteError), AbsoluteError("etc"), false},
|
||||
{"equals", AbsoluteError("etc"), AbsoluteError("etc"), true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -45,7 +45,7 @@ func TestAbsoluteError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
want := `path "etc" is not absolute`
|
||||
if got := (&AbsoluteError{Pathname: "etc"}).Error(); got != want {
|
||||
if got := (AbsoluteError("etc")).Error(); got != want {
|
||||
t.Errorf("Error: %q, want %q", got, want)
|
||||
}
|
||||
})
|
||||
@@ -62,8 +62,8 @@ func TestNewAbs(t *testing.T) {
|
||||
wantErr error
|
||||
}{
|
||||
{"good", "/etc", MustAbs("/etc"), nil},
|
||||
{"not absolute", "etc", nil, &AbsoluteError{Pathname: "etc"}},
|
||||
{"zero", "", nil, &AbsoluteError{Pathname: ""}},
|
||||
{"not absolute", "etc", nil, AbsoluteError("etc")},
|
||||
{"zero", "", nil, AbsoluteError("")},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -84,7 +84,7 @@ func TestNewAbs(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
defer func() {
|
||||
wantPanic := &AbsoluteError{Pathname: "etc"}
|
||||
wantPanic := AbsoluteError("etc")
|
||||
|
||||
if r := recover(); !reflect.DeepEqual(r, wantPanic) {
|
||||
t.Errorf("MustAbs: panic = %v; want %v", r, wantPanic)
|
||||
@@ -175,7 +175,7 @@ func TestCodecAbsolute(t *testing.T) {
|
||||
|
||||
`"/etc"`, `{"val":"/etc","magic":3236757504}`},
|
||||
{"not absolute", nil,
|
||||
&AbsoluteError{Pathname: "etc"},
|
||||
AbsoluteError("etc"),
|
||||
"\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\a\xff\x80\x00\x03etc",
|
||||
",\xff\x83\x03\x01\x01\x06sCheck\x01\xff\x84\x00\x01\x02\x01\bPathname\x01\xff\x80\x00\x01\x05Magic\x01\x06\x00\x00\x00\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\x0f\xff\x84\x01\x03etc\x01\xfb\x01\x81\xda\x00\x00\x00",
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ type (
|
||||
// Container represents a container environment being prepared or run.
|
||||
// None of [Container] methods are safe for concurrent use.
|
||||
Container struct {
|
||||
// Whether the container init should stay alive after its parent terminates.
|
||||
AllowOrphan bool
|
||||
// Cgroup fd, nil to disable.
|
||||
Cgroup *int
|
||||
// ExtraFiles passed through to initial process in the container,
|
||||
@@ -253,7 +255,6 @@ func (p *Container) Start() error {
|
||||
p.cmd.Dir = fhs.Root
|
||||
p.cmd.SysProcAttr = &SysProcAttr{
|
||||
Setsid: !p.RetainSession,
|
||||
Pdeathsig: SIGKILL,
|
||||
Cloneflags: CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNS |
|
||||
CLONE_NEWIPC | CLONE_NEWUTS | CLONE_NEWCGROUP,
|
||||
|
||||
@@ -262,12 +263,17 @@ func (p *Container) Start() error {
|
||||
CAP_SYS_ADMIN,
|
||||
// drop capabilities
|
||||
CAP_SETPCAP,
|
||||
// bring up loopback interface
|
||||
CAP_NET_ADMIN,
|
||||
// overlay access to upperdir and workdir
|
||||
CAP_DAC_OVERRIDE,
|
||||
},
|
||||
|
||||
UseCgroupFD: p.Cgroup != nil,
|
||||
}
|
||||
if !p.AllowOrphan {
|
||||
p.cmd.SysProcAttr.Pdeathsig = SIGKILL
|
||||
}
|
||||
if p.cmd.SysProcAttr.UseCgroupFD {
|
||||
p.cmd.SysProcAttr.CgroupFD = *p.Cgroup
|
||||
}
|
||||
|
||||
@@ -61,6 +61,8 @@ type syscallDispatcher interface {
|
||||
mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error
|
||||
// ensureFile provides ensureFile.
|
||||
ensureFile(name string, perm, pperm os.FileMode) error
|
||||
// mustLoopback provides mustLoopback.
|
||||
mustLoopback(msg message.Msg)
|
||||
|
||||
// seccompLoad provides [seccomp.Load].
|
||||
seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error
|
||||
@@ -164,6 +166,7 @@ func (k direct) mountTmpfs(fsname, target string, flags uintptr, size int, perm
|
||||
func (direct) ensureFile(name string, perm, pperm os.FileMode) error {
|
||||
return ensureFile(name, perm, pperm)
|
||||
}
|
||||
func (direct) mustLoopback(msg message.Msg) { mustLoopback(msg) }
|
||||
|
||||
func (direct) seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error {
|
||||
return seccomp.Load(rules, flags)
|
||||
|
||||
@@ -465,6 +465,8 @@ func (k *kstub) ensureFile(name string, perm, pperm os.FileMode) error {
|
||||
stub.CheckArg(k.Stub, "pperm", pperm, 2))
|
||||
}
|
||||
|
||||
func (*kstub) mustLoopback(message.Msg) { /* noop */ }
|
||||
|
||||
func (k *kstub) seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error {
|
||||
k.Helper()
|
||||
return k.Expects("seccompLoad").Error(
|
||||
|
||||
@@ -18,7 +18,7 @@ func messageFromError(err error) (m string, ok bool) {
|
||||
if m, ok = messagePrefixP[os.PathError]("cannot ", err); ok {
|
||||
return
|
||||
}
|
||||
if m, ok = messagePrefixP[check.AbsoluteError](zeroString, err); ok {
|
||||
if m, ok = messagePrefix[check.AbsoluteError](zeroString, err); ok {
|
||||
return
|
||||
}
|
||||
if m, ok = messagePrefix[OpRepeatError](zeroString, err); ok {
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestMessageFromError(t *testing.T) {
|
||||
Err: stub.UniqueError(0xdeadbeef),
|
||||
}, "cannot mount /sysroot: unique error 3735928559 injected by the test suite", true},
|
||||
|
||||
{"absolute", &check.AbsoluteError{Pathname: "etc/mtab"},
|
||||
{"absolute", check.AbsoluteError("etc/mtab"),
|
||||
`path "etc/mtab" is not absolute`, true},
|
||||
|
||||
{"repeat", OpRepeatError("autoetc"),
|
||||
|
||||
@@ -26,6 +26,8 @@ var (
|
||||
// AbsRunUser is [RunUser] as [check.Absolute].
|
||||
AbsRunUser = unsafeAbs(RunUser)
|
||||
|
||||
// AbsUsr is [Usr] as [check.Absolute].
|
||||
AbsUsr = unsafeAbs(Usr)
|
||||
// AbsUsrBin is [UsrBin] as [check.Absolute].
|
||||
AbsUsrBin = unsafeAbs(UsrBin)
|
||||
|
||||
|
||||
@@ -170,6 +170,10 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
||||
offsetSetup = int(setupFd + 1)
|
||||
}
|
||||
|
||||
if !params.HostNet {
|
||||
k.mustLoopback(msg)
|
||||
}
|
||||
|
||||
// write uid/gid map here so parent does not need to set dumpable
|
||||
if err := k.setDumpable(SUID_DUMP_USER); err != nil {
|
||||
k.fatalf(msg, "cannot set SUID_DUMP_USER: %v", err)
|
||||
|
||||
@@ -312,7 +312,10 @@ func TestMountOverlayOp(t *testing.T) {
|
||||
},
|
||||
}},
|
||||
|
||||
{"ephemeral", new(Ops).OverlayEphemeral(check.MustAbs("/nix/store"), check.MustAbs("/mnt-root/nix/.ro-store")), Ops{
|
||||
{"ephemeral", new(Ops).OverlayEphemeral(
|
||||
check.MustAbs("/nix/store"),
|
||||
check.MustAbs("/mnt-root/nix/.ro-store"),
|
||||
), Ops{
|
||||
&MountOverlayOp{
|
||||
Target: check.MustAbs("/nix/store"),
|
||||
Lower: []*check.Absolute{check.MustAbs("/mnt-root/nix/.ro-store")},
|
||||
@@ -320,7 +323,10 @@ func TestMountOverlayOp(t *testing.T) {
|
||||
},
|
||||
}},
|
||||
|
||||
{"readonly", new(Ops).OverlayReadonly(check.MustAbs("/nix/store"), check.MustAbs("/mnt-root/nix/.ro-store")), Ops{
|
||||
{"readonly", new(Ops).OverlayReadonly(
|
||||
check.MustAbs("/nix/store"),
|
||||
check.MustAbs("/mnt-root/nix/.ro-store"),
|
||||
), Ops{
|
||||
&MountOverlayOp{
|
||||
Target: check.MustAbs("/nix/store"),
|
||||
Lower: []*check.Absolute{check.MustAbs("/mnt-root/nix/.ro-store")},
|
||||
|
||||
@@ -31,7 +31,7 @@ func (l *SymlinkOp) Valid() bool { return l != nil && l.Target != nil && l.LinkN
|
||||
func (l *SymlinkOp) early(_ *setupState, k syscallDispatcher) error {
|
||||
if l.Dereference {
|
||||
if !path.IsAbs(l.LinkName) {
|
||||
return &check.AbsoluteError{Pathname: l.LinkName}
|
||||
return check.AbsoluteError(l.LinkName)
|
||||
}
|
||||
if name, err := k.readlink(l.LinkName); err != nil {
|
||||
return err
|
||||
|
||||
@@ -23,7 +23,7 @@ func TestSymlinkOp(t *testing.T) {
|
||||
Target: check.MustAbs("/etc/mtab"),
|
||||
LinkName: "etc/mtab",
|
||||
Dereference: true,
|
||||
}, nil, &check.AbsoluteError{Pathname: "etc/mtab"}, nil, nil},
|
||||
}, nil, check.AbsoluteError("etc/mtab"), nil, nil},
|
||||
|
||||
{"readlink", &Params{ParentPerm: 0755}, &SymlinkOp{
|
||||
Target: check.MustAbs("/etc/mtab"),
|
||||
|
||||
269
container/netlink.go
Normal file
269
container/netlink.go
Normal file
@@ -0,0 +1,269 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"net"
|
||||
"os"
|
||||
. "syscall"
|
||||
"unsafe"
|
||||
|
||||
"hakurei.app/container/std"
|
||||
"hakurei.app/message"
|
||||
)
|
||||
|
||||
// rtnetlink represents a NETLINK_ROUTE socket.
|
||||
type rtnetlink struct {
|
||||
// Sent as part of rtnetlink messages.
|
||||
pid uint32
|
||||
// AF_NETLINK socket.
|
||||
fd int
|
||||
// Whether the socket is open.
|
||||
ok bool
|
||||
// Message sequence number.
|
||||
seq uint32
|
||||
}
|
||||
|
||||
// open creates the underlying NETLINK_ROUTE socket.
|
||||
func (s *rtnetlink) open() (err error) {
|
||||
if s.ok || s.fd < 0 {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
s.pid = uint32(Getpid())
|
||||
if s.fd, err = Socket(
|
||||
AF_NETLINK,
|
||||
SOCK_RAW|SOCK_CLOEXEC,
|
||||
NETLINK_ROUTE,
|
||||
); err != nil {
|
||||
return os.NewSyscallError("socket", err)
|
||||
} else if err = Bind(s.fd, &SockaddrNetlink{
|
||||
Family: AF_NETLINK,
|
||||
Pid: s.pid,
|
||||
}); err != nil {
|
||||
_ = s.close()
|
||||
return os.NewSyscallError("bind", err)
|
||||
} else {
|
||||
s.ok = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// close closes the underlying NETLINK_ROUTE socket.
|
||||
func (s *rtnetlink) close() error {
|
||||
if !s.ok {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
s.ok = false
|
||||
err := Close(s.fd)
|
||||
s.fd = -1
|
||||
return err
|
||||
}
|
||||
|
||||
// roundtrip sends a netlink message and handles the reply.
|
||||
func (s *rtnetlink) roundtrip(data []byte) error {
|
||||
if !s.ok {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
defer func() { s.seq++ }()
|
||||
|
||||
if err := Sendto(s.fd, data, 0, &SockaddrNetlink{
|
||||
Family: AF_NETLINK,
|
||||
}); err != nil {
|
||||
return os.NewSyscallError("sendto", err)
|
||||
}
|
||||
buf := make([]byte, Getpagesize())
|
||||
|
||||
done:
|
||||
for {
|
||||
p := buf
|
||||
if n, _, err := Recvfrom(s.fd, p, 0); err != nil {
|
||||
return os.NewSyscallError("recvfrom", err)
|
||||
} else if n < NLMSG_HDRLEN {
|
||||
return errors.ErrUnsupported
|
||||
} else {
|
||||
p = p[:n]
|
||||
}
|
||||
|
||||
if msgs, err := ParseNetlinkMessage(p); err != nil {
|
||||
return err
|
||||
} else {
|
||||
for _, m := range msgs {
|
||||
if m.Header.Seq != s.seq || m.Header.Pid != s.pid {
|
||||
return errors.ErrUnsupported
|
||||
}
|
||||
if m.Header.Type == NLMSG_DONE {
|
||||
break done
|
||||
}
|
||||
if m.Header.Type == NLMSG_ERROR {
|
||||
if len(m.Data) >= 4 {
|
||||
errno := Errno(-std.ScmpInt(binary.NativeEndian.Uint32(m.Data)))
|
||||
if errno == 0 {
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
||||
return errors.ErrUnsupported
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// mustRoundtrip calls roundtrip and terminates via msg for a non-nil error.
|
||||
func (s *rtnetlink) mustRoundtrip(msg message.Msg, data []byte) {
|
||||
err := s.roundtrip(data)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if closeErr := Close(s.fd); closeErr != nil {
|
||||
msg.Verbosef("cannot close: %v", err)
|
||||
}
|
||||
|
||||
switch err.(type) {
|
||||
case *os.SyscallError:
|
||||
msg.GetLogger().Fatalf("cannot %v", err)
|
||||
|
||||
case Errno:
|
||||
msg.GetLogger().Fatalf("RTNETLINK answers: %v", err)
|
||||
|
||||
default:
|
||||
msg.GetLogger().Fatalln("RTNETLINK answers with unexpected message")
|
||||
}
|
||||
}
|
||||
|
||||
// newaddrLo represents a RTM_NEWADDR message with two addresses.
|
||||
type newaddrLo struct {
|
||||
header NlMsghdr
|
||||
data IfAddrmsg
|
||||
|
||||
r0 RtAttr
|
||||
a0 [4]byte // in_addr
|
||||
r1 RtAttr
|
||||
a1 [4]byte // in_addr
|
||||
}
|
||||
|
||||
// sizeofNewaddrLo is the expected size of newaddrLo.
|
||||
const sizeofNewaddrLo = NLMSG_HDRLEN + SizeofIfAddrmsg + (SizeofRtAttr+4)*2
|
||||
|
||||
// newaddrLo returns the address of a populated newaddrLo.
|
||||
func (s *rtnetlink) newaddrLo(lo int) *newaddrLo {
|
||||
return &newaddrLo{NlMsghdr{
|
||||
Len: sizeofNewaddrLo,
|
||||
Type: RTM_NEWADDR,
|
||||
Flags: NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE | NLM_F_EXCL,
|
||||
Seq: s.seq,
|
||||
Pid: s.pid,
|
||||
}, IfAddrmsg{
|
||||
Family: AF_INET,
|
||||
Prefixlen: 8,
|
||||
Flags: IFA_F_PERMANENT,
|
||||
Scope: RT_SCOPE_HOST,
|
||||
Index: uint32(lo),
|
||||
}, RtAttr{
|
||||
Len: uint16(SizeofRtAttr + len(newaddrLo{}.a0)),
|
||||
Type: IFA_LOCAL,
|
||||
}, [4]byte{127, 0, 0, 1}, RtAttr{
|
||||
Len: uint16(SizeofRtAttr + len(newaddrLo{}.a1)),
|
||||
Type: IFA_ADDRESS,
|
||||
}, [4]byte{127, 0, 0, 1}}
|
||||
}
|
||||
|
||||
func (msg *newaddrLo) toWireFormat() []byte {
|
||||
var buf [sizeofNewaddrLo]byte
|
||||
|
||||
*(*uint32)(unsafe.Pointer(&buf[0:4][0])) = msg.header.Len
|
||||
*(*uint16)(unsafe.Pointer(&buf[4:6][0])) = msg.header.Type
|
||||
*(*uint16)(unsafe.Pointer(&buf[6:8][0])) = msg.header.Flags
|
||||
*(*uint32)(unsafe.Pointer(&buf[8:12][0])) = msg.header.Seq
|
||||
*(*uint32)(unsafe.Pointer(&buf[12:16][0])) = msg.header.Pid
|
||||
|
||||
buf[16] = msg.data.Family
|
||||
buf[17] = msg.data.Prefixlen
|
||||
buf[18] = msg.data.Flags
|
||||
buf[19] = msg.data.Scope
|
||||
*(*uint32)(unsafe.Pointer(&buf[20:24][0])) = msg.data.Index
|
||||
|
||||
*(*uint16)(unsafe.Pointer(&buf[24:26][0])) = msg.r0.Len
|
||||
*(*uint16)(unsafe.Pointer(&buf[26:28][0])) = msg.r0.Type
|
||||
copy(buf[28:32], msg.a0[:])
|
||||
*(*uint16)(unsafe.Pointer(&buf[32:34][0])) = msg.r1.Len
|
||||
*(*uint16)(unsafe.Pointer(&buf[34:36][0])) = msg.r1.Type
|
||||
copy(buf[36:40], msg.a1[:])
|
||||
|
||||
return buf[:]
|
||||
}
|
||||
|
||||
// newlinkLo represents a RTM_NEWLINK message.
|
||||
type newlinkLo struct {
|
||||
header NlMsghdr
|
||||
data IfInfomsg
|
||||
}
|
||||
|
||||
// sizeofNewlinkLo is the expected size of newlinkLo.
|
||||
const sizeofNewlinkLo = NLMSG_HDRLEN + SizeofIfInfomsg
|
||||
|
||||
// newlinkLo returns the address of a populated newlinkLo.
|
||||
func (s *rtnetlink) newlinkLo(lo int) *newlinkLo {
|
||||
return &newlinkLo{NlMsghdr{
|
||||
Len: sizeofNewlinkLo,
|
||||
Type: RTM_NEWLINK,
|
||||
Flags: NLM_F_REQUEST | NLM_F_ACK,
|
||||
Seq: s.seq,
|
||||
Pid: s.pid,
|
||||
}, IfInfomsg{
|
||||
Family: AF_UNSPEC,
|
||||
Index: int32(lo),
|
||||
Flags: IFF_UP,
|
||||
Change: IFF_UP,
|
||||
}}
|
||||
}
|
||||
|
||||
func (msg *newlinkLo) toWireFormat() []byte {
|
||||
var buf [sizeofNewlinkLo]byte
|
||||
|
||||
*(*uint32)(unsafe.Pointer(&buf[0:4][0])) = msg.header.Len
|
||||
*(*uint16)(unsafe.Pointer(&buf[4:6][0])) = msg.header.Type
|
||||
*(*uint16)(unsafe.Pointer(&buf[6:8][0])) = msg.header.Flags
|
||||
*(*uint32)(unsafe.Pointer(&buf[8:12][0])) = msg.header.Seq
|
||||
*(*uint32)(unsafe.Pointer(&buf[12:16][0])) = msg.header.Pid
|
||||
|
||||
buf[16] = msg.data.Family
|
||||
*(*uint16)(unsafe.Pointer(&buf[18:20][0])) = msg.data.Type
|
||||
*(*int32)(unsafe.Pointer(&buf[20:24][0])) = msg.data.Index
|
||||
*(*uint32)(unsafe.Pointer(&buf[24:28][0])) = msg.data.Flags
|
||||
*(*uint32)(unsafe.Pointer(&buf[28:32][0])) = msg.data.Change
|
||||
|
||||
return buf[:]
|
||||
}
|
||||
|
||||
// mustLoopback creates the loopback address and brings the lo interface up.
|
||||
// mustLoopback calls a fatal method of the underlying [log.Logger] of m with a
|
||||
// user-facing error message if RTNETLINK behaves unexpectedly.
|
||||
func mustLoopback(msg message.Msg) {
|
||||
log := msg.GetLogger()
|
||||
|
||||
var lo int
|
||||
if ifi, err := net.InterfaceByName("lo"); err != nil {
|
||||
log.Fatalln(err)
|
||||
} else {
|
||||
lo = ifi.Index
|
||||
}
|
||||
|
||||
var s rtnetlink
|
||||
if err := s.open(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := s.close(); err != nil {
|
||||
msg.Verbosef("cannot close netlink: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
s.mustRoundtrip(msg, s.newaddrLo(lo).toWireFormat())
|
||||
s.mustRoundtrip(msg, s.newlinkLo(lo).toWireFormat())
|
||||
}
|
||||
72
container/netlink_test.go
Normal file
72
container/netlink_test.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestSizeof(t *testing.T) {
|
||||
if got := unsafe.Sizeof(newaddrLo{}); got != sizeofNewaddrLo {
|
||||
t.Fatalf("newaddrLo: sizeof = %#x, want %#x", got, sizeofNewaddrLo)
|
||||
}
|
||||
|
||||
if got := unsafe.Sizeof(newlinkLo{}); got != sizeofNewlinkLo {
|
||||
t.Fatalf("newlinkLo: sizeof = %#x, want %#x", got, sizeofNewlinkLo)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRtnetlinkMessage(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
msg interface{ toWireFormat() []byte }
|
||||
want []byte
|
||||
}{
|
||||
{"newaddrLo", (&rtnetlink{pid: 1, seq: 0}).newaddrLo(1), []byte{
|
||||
/* Len */ 0x28, 0, 0, 0,
|
||||
/* Type */ 0x14, 0,
|
||||
/* Flags */ 5, 6,
|
||||
/* Seq */ 0, 0, 0, 0,
|
||||
/* Pid */ 1, 0, 0, 0,
|
||||
|
||||
/* Family */ 2,
|
||||
/* Prefixlen */ 8,
|
||||
/* Flags */ 0x80,
|
||||
/* Scope */ 0xfe,
|
||||
/* Index */ 1, 0, 0, 0,
|
||||
|
||||
/* Len */ 8, 0,
|
||||
/* Type */ 2, 0,
|
||||
/* in_addr */ 127, 0, 0, 1,
|
||||
|
||||
/* Len */ 8, 0,
|
||||
/* Type */ 1, 0,
|
||||
/* in_addr */ 127, 0, 0, 1,
|
||||
}},
|
||||
|
||||
{"newlinkLo", (&rtnetlink{pid: 1, seq: 1}).newlinkLo(1), []byte{
|
||||
/* Len */ 0x20, 0, 0, 0,
|
||||
/* Type */ 0x10, 0,
|
||||
/* Flags */ 5, 0,
|
||||
/* Seq */ 1, 0, 0, 0,
|
||||
/* Pid */ 1, 0, 0, 0,
|
||||
|
||||
/* Family */ 0,
|
||||
/* pad */ 0,
|
||||
/* Type */ 0, 0,
|
||||
/* Index */ 1, 0, 0, 0,
|
||||
/* Flags */ 1, 0, 0, 0,
|
||||
/* Change */ 1, 0, 0, 0,
|
||||
}},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if got := tc.msg.toWireFormat(); string(got) != string(tc.want) {
|
||||
t.Fatalf("toWireFormat: %#v, want %#v", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
12
dist/install.sh
vendored
12
dist/install.sh
vendored
@@ -1,12 +1,12 @@
|
||||
#!/bin/sh
|
||||
cd "$(dirname -- "$0")" || exit 1
|
||||
|
||||
install -vDm0755 "bin/hakurei" "${HAKUREI_INSTALL_PREFIX}/usr/bin/hakurei"
|
||||
install -vDm0755 "bin/hpkg" "${HAKUREI_INSTALL_PREFIX}/usr/bin/hpkg"
|
||||
install -vDm0755 "bin/hakurei" "${DESTDIR}/usr/bin/hakurei"
|
||||
install -vDm0755 "bin/sharefs" "${DESTDIR}/usr/bin/sharefs"
|
||||
|
||||
install -vDm4511 "bin/hsu" "${HAKUREI_INSTALL_PREFIX}/usr/bin/hsu"
|
||||
if [ ! -f "${HAKUREI_INSTALL_PREFIX}/etc/hsurc" ]; then
|
||||
install -vDm0400 "hsurc.default" "${HAKUREI_INSTALL_PREFIX}/etc/hsurc"
|
||||
install -vDm4511 "bin/hsu" "${DESTDIR}/usr/bin/hsu"
|
||||
if [ ! -f "${DESTDIR}/etc/hsurc" ]; then
|
||||
install -vDm0400 "hsurc.default" "${DESTDIR}/etc/hsurc"
|
||||
fi
|
||||
|
||||
install -vDm0644 "comp/_hakurei" "${HAKUREI_INSTALL_PREFIX}/usr/share/zsh/site-functions/_hakurei"
|
||||
install -vDm0644 "comp/_hakurei" "${DESTDIR}/usr/share/zsh/site-functions/_hakurei"
|
||||
11
flake.nix
11
flake.nix
@@ -69,6 +69,8 @@
|
||||
withRace = true;
|
||||
};
|
||||
|
||||
sharefs = callPackage ./cmd/sharefs/test { inherit system self; };
|
||||
|
||||
hpkg = callPackage ./cmd/hpkg/test { inherit system self; };
|
||||
|
||||
formatting = runCommandLocal "check-formatting" { nativeBuildInputs = [ nixfmt-rfc-style ]; } ''
|
||||
@@ -136,6 +138,10 @@
|
||||
;
|
||||
};
|
||||
hsu = pkgs.callPackage ./cmd/hsu/package.nix { inherit (self.packages.${system}) hakurei; };
|
||||
sharefs = pkgs.linkFarm "sharefs" {
|
||||
"bin/sharefs" = "${hakurei}/libexec/sharefs";
|
||||
"bin/mount.fuse.sharefs" = "${hakurei}/libexec/sharefs";
|
||||
};
|
||||
|
||||
dist = pkgs.runCommand "${hakurei.name}-dist" { buildInputs = hakurei.targetPkgs ++ [ pkgs.pkgsStatic.musl ]; } ''
|
||||
# go requires XDG_CACHE_HOME for the build cache
|
||||
@@ -160,7 +166,10 @@
|
||||
pkgs = nixpkgsFor.${system};
|
||||
in
|
||||
{
|
||||
default = pkgs.mkShell { buildInputs = hakurei.targetPkgs; };
|
||||
default = pkgs.mkShell {
|
||||
buildInputs = hakurei.targetPkgs;
|
||||
hardeningDisable = [ "fortify" ];
|
||||
};
|
||||
withPackage = pkgs.mkShell { buildInputs = [ hakurei ] ++ hakurei.targetPkgs; };
|
||||
|
||||
vm =
|
||||
|
||||
@@ -25,8 +25,7 @@ var (
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
if os.Getenv("GO_TEST_SKIP_ACL") == "1" {
|
||||
t.Log("acl test skipped")
|
||||
t.SkipNow()
|
||||
t.Skip("acl test skipped")
|
||||
}
|
||||
|
||||
testFilePath := path.Join(t.TempDir(), testFileName)
|
||||
@@ -143,6 +142,7 @@ func (c *getFAclInvocation) run(name string) error {
|
||||
}
|
||||
|
||||
c.cmd = exec.Command("getfacl", "--omit-header", "--absolute-names", "--numeric", name)
|
||||
c.cmd.Stderr = os.Stderr
|
||||
|
||||
scanErr := make(chan error, 1)
|
||||
if p, err := c.cmd.StdoutPipe(); err != nil {
|
||||
@@ -254,7 +254,7 @@ func getfacl(t *testing.T, name string) []*getFAclResp {
|
||||
t.Fatalf("getfacl: error = %v", err)
|
||||
}
|
||||
if len(c.pe) != 0 {
|
||||
t.Errorf("errors encountered parsing getfacl output\n%s", errors.Join(c.pe...).Error())
|
||||
t.Errorf("errors encountered parsing getfacl output\n%s", errors.Join(c.pe...))
|
||||
}
|
||||
return c.val
|
||||
}
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestSpPulseOp(t *testing.T) {
|
||||
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, "proc/nonexistent/cookie", nil),
|
||||
}, nil, nil, &hst.AppError{
|
||||
Step: "locate PulseAudio cookie",
|
||||
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/cookie"},
|
||||
Err: check.AbsoluteError("proc/nonexistent/cookie"),
|
||||
}, nil, nil, nil, nil, nil},
|
||||
|
||||
{"cookie loadFile", func(bool, bool) outcomeOp {
|
||||
@@ -272,7 +272,7 @@ func TestDiscoverPulseCookie(t *testing.T) {
|
||||
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
||||
}}, &hst.AppError{
|
||||
Step: "locate PulseAudio cookie",
|
||||
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/pulse-cookie"},
|
||||
Err: check.AbsoluteError("proc/nonexistent/pulse-cookie"),
|
||||
}},
|
||||
|
||||
{"success override", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
||||
@@ -286,7 +286,7 @@ func TestDiscoverPulseCookie(t *testing.T) {
|
||||
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
||||
}}, &hst.AppError{
|
||||
Step: "locate PulseAudio cookie",
|
||||
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/home"},
|
||||
Err: check.AbsoluteError("proc/nonexistent/home"),
|
||||
}},
|
||||
|
||||
{"home stat", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
||||
@@ -321,7 +321,7 @@ func TestDiscoverPulseCookie(t *testing.T) {
|
||||
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
||||
}}, &hst.AppError{
|
||||
Step: "locate PulseAudio cookie",
|
||||
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/xdg"},
|
||||
Err: check.AbsoluteError("proc/nonexistent/xdg"),
|
||||
}},
|
||||
|
||||
{"xdg stat", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
||||
|
||||
@@ -514,7 +514,7 @@ var ErrNotDone = errors.New("did not receive a Core::Done event targeting previo
|
||||
const (
|
||||
// syncTimeout is the maximum duration [Core.Sync] is allowed to take before
|
||||
// receiving [CoreDone] or failing.
|
||||
syncTimeout = 5 * time.Second
|
||||
syncTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// Sync queues a [CoreSync] message for the PipeWire server and initiates a Roundtrip.
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
@@ -27,10 +26,16 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Conn is a low level unix socket interface used by [Context].
|
||||
type Conn interface {
|
||||
// MightBlock informs the implementation that the next call to
|
||||
// Recvmsg or Sendmsg might block. A zero or negative timeout
|
||||
// cancels this behaviour.
|
||||
MightBlock(timeout time.Duration)
|
||||
|
||||
// Recvmsg calls syscall.Recvmsg on the underlying socket.
|
||||
Recvmsg(p, oob []byte, flags int) (n, oobn, recvflags int, err error)
|
||||
|
||||
@@ -138,45 +143,142 @@ func New(conn Conn, props SPADict) (*Context, error) {
|
||||
return &ctx, nil
|
||||
}
|
||||
|
||||
// A SyscallConnCloser is a [syscall.Conn] that implements [io.Closer].
|
||||
type SyscallConnCloser interface {
|
||||
syscall.Conn
|
||||
io.Closer
|
||||
// unixConn is an implementation of the [Conn] interface for connections
|
||||
// to Unix domain sockets.
|
||||
type unixConn struct {
|
||||
fd int
|
||||
|
||||
// Whether creation of a new epoll instance was attempted.
|
||||
epoll bool
|
||||
// File descriptor referring to the new epoll instance.
|
||||
// Valid if epoll is true and epollErr is nil.
|
||||
epollFd int
|
||||
// Error returned by syscall.EpollCreate1.
|
||||
epollErr error
|
||||
// Stores epoll events from the kernel.
|
||||
epollBuf [32]syscall.EpollEvent
|
||||
|
||||
// If non-zero, next call is treated as a blocking call.
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// A SyscallConn is a [Conn] adapter for [syscall.Conn].
|
||||
type SyscallConn struct{ SyscallConnCloser }
|
||||
// Dial connects to a Unix domain socket described by name.
|
||||
func Dial(name string) (Conn, error) {
|
||||
if fd, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC|syscall.SOCK_NONBLOCK, 0); err != nil {
|
||||
return nil, os.NewSyscallError("socket", err)
|
||||
} else if err = syscall.Connect(fd, &syscall.SockaddrUnix{Name: name}); err != nil {
|
||||
_ = syscall.Close(fd)
|
||||
return nil, os.NewSyscallError("connect", err)
|
||||
} else {
|
||||
return &unixConn{fd: fd}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Recvmsg implements [Conn.Recvmsg] via [syscall.Conn.SyscallConn].
|
||||
func (conn SyscallConn) Recvmsg(p, oob []byte, flags int) (n, oobn, recvflags int, err error) {
|
||||
var rc syscall.RawConn
|
||||
if rc, err = conn.SyscallConn(); err != nil {
|
||||
// MightBlock informs the implementation that the next call
|
||||
// might block for a non-zero timeout.
|
||||
func (conn *unixConn) MightBlock(timeout time.Duration) {
|
||||
if timeout < 0 {
|
||||
timeout = 0
|
||||
}
|
||||
conn.timeout = timeout
|
||||
}
|
||||
|
||||
// wantsEpoll is called at the beginning of any method that might use epoll.
|
||||
func (conn *unixConn) wantsEpoll() error {
|
||||
if !conn.epoll {
|
||||
conn.epoll = true
|
||||
conn.epollFd, conn.epollErr = syscall.EpollCreate1(syscall.EPOLL_CLOEXEC)
|
||||
if conn.epollErr == nil {
|
||||
if conn.epollErr = syscall.EpollCtl(conn.epollFd, syscall.EPOLL_CTL_ADD, conn.fd, &syscall.EpollEvent{
|
||||
Events: syscall.EPOLLERR | syscall.EPOLLHUP,
|
||||
Fd: int32(conn.fd),
|
||||
}); conn.epollErr != nil {
|
||||
_ = syscall.Close(conn.epollFd)
|
||||
}
|
||||
}
|
||||
}
|
||||
return conn.epollErr
|
||||
}
|
||||
|
||||
// wait waits for a specific I/O event on fd. Caller must arrange for wantsEpoll
|
||||
// to be called somewhere before wait is called.
|
||||
func (conn *unixConn) wait(event uint32) (err error) {
|
||||
if conn.timeout == 0 {
|
||||
return nil
|
||||
}
|
||||
deadline := time.Now().Add(conn.timeout)
|
||||
conn.timeout = 0
|
||||
|
||||
if err = syscall.EpollCtl(conn.epollFd, syscall.EPOLL_CTL_MOD, conn.fd, &syscall.EpollEvent{
|
||||
Events: event | syscall.EPOLLERR | syscall.EPOLLHUP,
|
||||
Fd: int32(conn.fd),
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if controlErr := rc.Control(func(fd uintptr) {
|
||||
n, oobn, recvflags, _, err = syscall.Recvmsg(int(fd), p, oob, flags)
|
||||
}); controlErr != nil && err == nil {
|
||||
err = controlErr
|
||||
for timeout := deadline.Sub(time.Now()); timeout > 0; timeout = deadline.Sub(time.Now()) {
|
||||
var n int
|
||||
if n, err = syscall.EpollWait(conn.epollFd, conn.epollBuf[:], int(timeout/time.Millisecond)); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch n {
|
||||
case 1: // only the socket fd is ever added
|
||||
if conn.epollBuf[0].Fd != int32(conn.fd) { // unreachable
|
||||
return syscall.ENOTRECOVERABLE
|
||||
}
|
||||
if conn.epollBuf[0].Events&event == event ||
|
||||
conn.epollBuf[0].Events&syscall.EPOLLERR|syscall.EPOLLHUP != 0 {
|
||||
return nil
|
||||
}
|
||||
err = syscall.ETIME
|
||||
continue
|
||||
|
||||
case 0: // timeout
|
||||
return syscall.ETIMEDOUT
|
||||
|
||||
default: // unreachable
|
||||
return syscall.ENOTRECOVERABLE
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Sendmsg implements [Conn.Sendmsg] via [syscall.Conn.SyscallConn].
|
||||
func (conn SyscallConn) Sendmsg(p, oob []byte, flags int) (n int, err error) {
|
||||
var rc syscall.RawConn
|
||||
if rc, err = conn.SyscallConn(); err != nil {
|
||||
// Recvmsg calls syscall.Recvmsg on the underlying socket.
|
||||
func (conn *unixConn) Recvmsg(p, oob []byte, flags int) (n, oobn, recvflags int, err error) {
|
||||
if err = conn.wantsEpoll(); err != nil {
|
||||
return
|
||||
} else if err = conn.wait(syscall.EPOLLIN); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if controlErr := rc.Control(func(fd uintptr) {
|
||||
n, err = syscall.SendmsgN(int(fd), p, oob, nil, flags)
|
||||
}); controlErr != nil && err == nil {
|
||||
err = controlErr
|
||||
}
|
||||
n, oobn, recvflags, _, err = syscall.Recvmsg(conn.fd, p, oob, flags)
|
||||
return
|
||||
}
|
||||
|
||||
// Sendmsg calls syscall.Sendmsg on the underlying socket.
|
||||
func (conn *unixConn) Sendmsg(p, oob []byte, flags int) (n int, err error) {
|
||||
if err = conn.wantsEpoll(); err != nil {
|
||||
return
|
||||
} else if err = conn.wait(syscall.EPOLLOUT); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n, err = syscall.SendmsgN(conn.fd, p, oob, nil, flags)
|
||||
return
|
||||
}
|
||||
|
||||
// Close closes the underlying socket and the epoll fd if populated.
|
||||
func (conn *unixConn) Close() (err error) {
|
||||
if conn.epoll && conn.epollErr == nil {
|
||||
conn.epollErr = syscall.Close(conn.epollFd)
|
||||
}
|
||||
if err = syscall.Close(conn.fd); err != nil {
|
||||
return
|
||||
}
|
||||
return conn.epollErr
|
||||
}
|
||||
|
||||
// MustNew calls [New](conn, props) and panics on error.
|
||||
// It is intended for use in tests with hard-coded strings.
|
||||
func MustNew(conn Conn, props SPADict) *Context {
|
||||
@@ -310,7 +412,7 @@ func (ctx *Context) recvmsg(remaining []byte) (payload []byte, err error) {
|
||||
}
|
||||
if err != syscall.EAGAIN && err != syscall.EWOULDBLOCK {
|
||||
ctx.closeReceivedFiles()
|
||||
return nil, os.NewSyscallError("recvmsg", err)
|
||||
return nil, &ProxyFatalError{Err: os.NewSyscallError("recvmsg", err), ProxyErrs: ctx.cloneAsProxyErrors()}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -347,7 +449,7 @@ func (ctx *Context) sendmsg(p []byte, fds ...int) error {
|
||||
}
|
||||
|
||||
if err != nil && err != syscall.EAGAIN && err != syscall.EWOULDBLOCK {
|
||||
return os.NewSyscallError("sendmsg", err)
|
||||
return &ProxyFatalError{Err: os.NewSyscallError("sendmsg", err), ProxyErrs: ctx.cloneAsProxyErrors()}
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -598,8 +700,15 @@ func (ctx *Context) Roundtrip() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
const (
|
||||
// roundtripTimeout is the maximum duration socket operations during
|
||||
// Context.roundtrip is allowed to block for.
|
||||
roundtripTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
// roundtrip implements the Roundtrip method without checking proxyErrors.
|
||||
func (ctx *Context) roundtrip() (err error) {
|
||||
ctx.conn.MightBlock(roundtripTimeout)
|
||||
if err = ctx.sendmsg(ctx.buf, ctx.pendingFiles...); err != nil {
|
||||
return
|
||||
}
|
||||
@@ -633,6 +742,7 @@ func (ctx *Context) roundtrip() (err error) {
|
||||
}()
|
||||
|
||||
var remaining []byte
|
||||
ctx.conn.MightBlock(roundtripTimeout)
|
||||
for {
|
||||
remaining, err = ctx.consume(remaining)
|
||||
if err == nil {
|
||||
@@ -857,14 +967,14 @@ const Remote = "PIPEWIRE_REMOTE"
|
||||
|
||||
const DEFAULT_SYSTEM_RUNTIME_DIR = "/run/pipewire"
|
||||
|
||||
// connectName connects to a PipeWire remote by name and returns the [net.UnixConn].
|
||||
func connectName(name string, manager bool) (conn *net.UnixConn, err error) {
|
||||
// connectName connects to a PipeWire remote by name and returns the resulting [Conn].
|
||||
func connectName(name string, manager bool) (conn Conn, err error) {
|
||||
if manager && !strings.HasSuffix(name, "-manager") {
|
||||
return connectName(name+"-manager", false)
|
||||
}
|
||||
|
||||
if path.IsAbs(name) || (len(name) > 0 && name[0] == '@') {
|
||||
return net.DialUnix("unix", nil, &net.UnixAddr{Name: name, Net: "unix"})
|
||||
return Dial(name)
|
||||
} else {
|
||||
runtimeDir, ok := os.LookupEnv("PIPEWIRE_RUNTIME_DIR")
|
||||
if !ok || !path.IsAbs(runtimeDir) {
|
||||
@@ -879,7 +989,7 @@ func connectName(name string, manager bool) (conn *net.UnixConn, err error) {
|
||||
if !ok || !path.IsAbs(runtimeDir) {
|
||||
runtimeDir = DEFAULT_SYSTEM_RUNTIME_DIR
|
||||
}
|
||||
return net.DialUnix("unix", nil, &net.UnixAddr{Name: path.Join(runtimeDir, name), Net: "unix"})
|
||||
return Dial(path.Join(runtimeDir, name))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -897,12 +1007,11 @@ func ConnectName(name string, manager bool, props SPADict) (ctx *Context, err er
|
||||
}
|
||||
}
|
||||
|
||||
var conn *net.UnixConn
|
||||
var conn Conn
|
||||
if conn, err = connectName(name, manager); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ctx, err = New(SyscallConn{conn}, props); err != nil {
|
||||
if ctx, err = New(conn, props); err != nil {
|
||||
ctx = nil
|
||||
_ = conn.Close()
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"strconv"
|
||||
. "syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"hakurei.app/container/stub"
|
||||
"hakurei.app/internal/pipewire"
|
||||
@@ -715,6 +716,18 @@ type stubUnixConn struct {
|
||||
current int
|
||||
}
|
||||
|
||||
func (conn *stubUnixConn) MightBlock(timeout time.Duration) {
|
||||
if timeout != 5*time.Second {
|
||||
panic("unexpected timeout " + timeout.String())
|
||||
}
|
||||
if conn.current == 0 ||
|
||||
(conn.samples[conn.current-1].nr == SYS_RECVMSG && conn.samples[conn.current-1].errno == EAGAIN && conn.samples[conn.current].nr == SYS_SENDMSG) ||
|
||||
(conn.samples[conn.current-1].nr == SYS_SENDMSG && conn.samples[conn.current].nr == SYS_RECVMSG) {
|
||||
return
|
||||
}
|
||||
panic("unexpected blocking hint before sample " + strconv.Itoa(conn.current))
|
||||
}
|
||||
|
||||
// nextSample returns the current sample and increments the counter.
|
||||
func (conn *stubUnixConn) nextSample(nr uintptr) (sample *stubUnixConnSample, wantOOB []byte, err error) {
|
||||
sample = &conn.samples[conn.current]
|
||||
|
||||
211
internal/pkg/dir.go
Normal file
211
internal/pkg/dir.go
Normal file
@@ -0,0 +1,211 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"crypto/sha512"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
)
|
||||
|
||||
// FlatEntry is a directory entry to be encoded for [Flatten].
|
||||
type FlatEntry struct {
|
||||
Mode fs.FileMode // file mode bits
|
||||
Path string // pathname of the file
|
||||
Data []byte // file content or symlink destination
|
||||
}
|
||||
|
||||
/*
|
||||
| mode uint32 | path_sz uint32 |
|
||||
| data_sz uint64 |
|
||||
| path string |
|
||||
| data []byte |
|
||||
*/
|
||||
|
||||
// wordSize is the boundary which binary segments are always aligned to.
|
||||
const wordSize = 8
|
||||
|
||||
// alignSize returns the padded size for aligning sz.
|
||||
func alignSize(sz int) int {
|
||||
return sz + (wordSize-(sz)%wordSize)%wordSize
|
||||
}
|
||||
|
||||
// Encode encodes the entry for transmission or hashing.
|
||||
func (ent *FlatEntry) Encode(w io.Writer) (n int, err error) {
|
||||
pPathSize := alignSize(len(ent.Path))
|
||||
if pPathSize > math.MaxUint32 {
|
||||
return 0, syscall.E2BIG
|
||||
}
|
||||
pDataSize := alignSize(len(ent.Data))
|
||||
|
||||
payload := make([]byte, wordSize*2+pPathSize+pDataSize)
|
||||
binary.LittleEndian.PutUint32(payload, uint32(ent.Mode))
|
||||
binary.LittleEndian.PutUint32(payload[wordSize/2:], uint32(len(ent.Path)))
|
||||
binary.LittleEndian.PutUint64(payload[wordSize:], uint64(len(ent.Data)))
|
||||
copy(payload[wordSize*2:], ent.Path)
|
||||
copy(payload[wordSize*2+pPathSize:], ent.Data)
|
||||
return w.Write(payload)
|
||||
}
|
||||
|
||||
// ErrInsecurePath is returned by [FlatEntry.Decode] if validation is requested
|
||||
// and a nonlocal path is encountered in the stream.
|
||||
var ErrInsecurePath = errors.New("insecure file path")
|
||||
|
||||
// Decode decodes the entry from its representation produced by Encode.
|
||||
func (ent *FlatEntry) Decode(r io.Reader, validate bool) (n int, err error) {
|
||||
var nr int
|
||||
|
||||
header := make([]byte, wordSize*2)
|
||||
nr, err = r.Read(header)
|
||||
n += nr
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) && n != 0 {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
ent.Mode = fs.FileMode(binary.LittleEndian.Uint32(header))
|
||||
pathSize := int(binary.LittleEndian.Uint32(header[wordSize/2:]))
|
||||
pPathSize := alignSize(pathSize)
|
||||
dataSize := int(binary.LittleEndian.Uint64(header[wordSize:]))
|
||||
pDataSize := alignSize(dataSize)
|
||||
|
||||
buf := make([]byte, pPathSize+pDataSize)
|
||||
nr, err = r.Read(buf)
|
||||
n += nr
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
if nr != len(buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ent.Path = string(buf[:pathSize])
|
||||
if ent.Mode.IsDir() {
|
||||
ent.Data = nil
|
||||
} else {
|
||||
ent.Data = buf[pPathSize : pPathSize+dataSize]
|
||||
}
|
||||
|
||||
if validate && !filepath.IsLocal(ent.Path) {
|
||||
err = ErrInsecurePath
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// DirScanner provides an efficient interface for reading a stream of encoded
|
||||
// [FlatEntry]. Successive calls to the Scan method will step through the
|
||||
// entries in the stream.
|
||||
type DirScanner struct {
|
||||
// Underlying reader to scan [FlatEntry] representations from.
|
||||
r io.Reader
|
||||
|
||||
// First non-EOF I/O error, returned by the Err method.
|
||||
err error
|
||||
|
||||
// Entry to store results in. Its address is returned by the Entry method
|
||||
// and is updated on every call to Scan.
|
||||
ent FlatEntry
|
||||
|
||||
// Validate pathnames during decoding.
|
||||
validate bool
|
||||
}
|
||||
|
||||
// NewDirScanner returns the address of a new instance of [DirScanner] reading
|
||||
// from r. The caller must no longer read from r after this function returns.
|
||||
func NewDirScanner(r io.Reader, validate bool) *DirScanner {
|
||||
return &DirScanner{r: r, validate: validate}
|
||||
}
|
||||
|
||||
// Err returns the first non-EOF I/O error.
|
||||
func (s *DirScanner) Err() error {
|
||||
if errors.Is(s.err, io.EOF) {
|
||||
return nil
|
||||
}
|
||||
return s.err
|
||||
}
|
||||
|
||||
// Entry returns the address to the [FlatEntry] value storing the last result.
|
||||
func (s *DirScanner) Entry() *FlatEntry { return &s.ent }
|
||||
|
||||
// Scan advances to the next [FlatEntry].
|
||||
func (s *DirScanner) Scan() bool {
|
||||
if s.err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var n int
|
||||
n, s.err = s.ent.Decode(s.r, s.validate)
|
||||
if errors.Is(s.err, io.EOF) {
|
||||
return n != 0
|
||||
}
|
||||
return s.err == nil
|
||||
}
|
||||
|
||||
// Flatten writes a deterministic representation of the contents of fsys to w.
|
||||
// The resulting data can be hashed to produce a deterministic checksum for the
|
||||
// directory.
|
||||
func Flatten(fsys fs.FS, root string, w io.Writer) (n int, err error) {
|
||||
var nr int
|
||||
err = fs.WalkDir(fsys, root, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var fi fs.FileInfo
|
||||
fi, err = d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ent := FlatEntry{
|
||||
Path: path,
|
||||
Mode: fi.Mode(),
|
||||
}
|
||||
if ent.Mode.IsRegular() {
|
||||
if ent.Data, err = fs.ReadFile(fsys, path); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if ent.Mode&fs.ModeSymlink != 0 {
|
||||
var newpath string
|
||||
if newpath, err = fs.ReadLink(fsys, path); err != nil {
|
||||
return err
|
||||
}
|
||||
ent.Data = []byte(newpath)
|
||||
} else if !ent.Mode.IsDir() {
|
||||
return InvalidFileModeError(ent.Mode)
|
||||
}
|
||||
|
||||
nr, err = ent.Encode(w)
|
||||
n += nr
|
||||
return err
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// HashFS returns a checksum produced by hashing the result of [Flatten].
|
||||
func HashFS(buf *Checksum, fsys fs.FS, root string) error {
|
||||
h := sha512.New384()
|
||||
if _, err := Flatten(fsys, root, h); err != nil {
|
||||
return err
|
||||
}
|
||||
h.Sum(buf[:0])
|
||||
return nil
|
||||
}
|
||||
|
||||
// HashDir returns a checksum produced by hashing the result of [Flatten].
|
||||
func HashDir(buf *Checksum, pathname *check.Absolute) error {
|
||||
return HashFS(buf, os.DirFS(pathname.String()), ".")
|
||||
}
|
||||
570
internal/pkg/dir_test.go
Normal file
570
internal/pkg/dir_test.go
Normal file
@@ -0,0 +1,570 @@
|
||||
package pkg_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/fs"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
func TestFlatten(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
fsys fs.FS
|
||||
entries []pkg.FlatEntry
|
||||
sum pkg.Checksum
|
||||
err error
|
||||
}{
|
||||
{"bad type", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
"invalid": {Mode: fs.ModeCharDevice | 0400},
|
||||
}, nil, pkg.Checksum{}, pkg.InvalidFileModeError(
|
||||
fs.ModeCharDevice | 0400,
|
||||
)},
|
||||
|
||||
{"empty", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C"), nil},
|
||||
|
||||
{"sample cache file", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: 0400, Data: []byte{0}},
|
||||
"checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq": {Mode: 0400, Data: []byte{0, 0, 0, 0, 0xad, 0xb, 0, 4, 0xfe, 0xfe, 0, 0, 0xfe, 0xca, 0, 0}},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||
"identifier/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||
"identifier/cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||
"identifier/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: 0400, Path: "checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq", Data: []byte{0, 0, 0, 0, 0xad, 0xb, 0, 4, 0xfe, 0xfe, 0, 0, 0xfe, 0xca, 0, 0}},
|
||||
{Mode: 0400, Path: "checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte{0}},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("St9rlE-mGZ5gXwiv_hzQ_B8bZP-UUvSNmf4nHUZzCMOumb6hKnheZSe0dmnuc4Q2"), nil},
|
||||
|
||||
{"sample http get cure", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU": {Mode: 0400, Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/NqVORkT6L9HX6Za7kT2zcibY10qFqBaxEjPiYFrBQX-ZFr3yxCzJxbKOP0zVjeWb": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
|
||||
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: 0400, Path: "checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU", Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/NqVORkT6L9HX6Za7kT2zcibY10qFqBaxEjPiYFrBQX-ZFr3yxCzJxbKOP0zVjeWb", Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("bqtn69RkV5E7V7GhhgCFjcvbxmaqrO8DywamM4Tyjf10F6EJBHjXiIa_tFRtF4iN"), nil},
|
||||
|
||||
{"sample directory step simple", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"check": {Mode: 0400, Data: []byte{0, 0}},
|
||||
|
||||
"lib": {Mode: fs.ModeDir | 0700},
|
||||
"lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
|
||||
"lib/pkgconfig": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||
|
||||
{Mode: 0400, Path: "check", Data: []byte{0, 0}},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "lib"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "lib/pkgconfig"},
|
||||
}, pkg.MustDecode("qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b"), nil},
|
||||
|
||||
{"sample directory step garbage", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"lib": {Mode: fs.ModeDir | 0500},
|
||||
"lib/check": {Mode: 0400, Data: []byte{}},
|
||||
|
||||
"lib/pkgconfig": {Mode: fs.ModeDir | 0500},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0500, Path: "lib"},
|
||||
{Mode: 0400, Path: "lib/check", Data: []byte{}},
|
||||
|
||||
{Mode: fs.ModeDir | 0500, Path: "lib/pkgconfig"},
|
||||
}, pkg.MustDecode("CUx-3hSbTWPsbMfDhgalG4Ni_GmR9TnVX8F99tY_P5GtkYvczg9RrF5zO0jX9XYT"), nil},
|
||||
|
||||
{"sample directory", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/check": {Mode: 0400, Data: []byte{0, 0}},
|
||||
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/pkgconfig": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
|
||||
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
|
||||
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b"},
|
||||
{Mode: 0400, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/check", Data: []byte{0, 0}},
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/pkgconfig"},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("WVpvsVqVKg9Nsh744x57h51AuWUoUR2nnh8Md-EYBQpk6ziyTuUn6PLtF2e0Eu_d"), nil},
|
||||
|
||||
{"sample tar step unpack", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0500},
|
||||
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
|
||||
"work": {Mode: fs.ModeDir | 0500},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP"},
|
||||
{Mode: 0400, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check", Data: []byte{0, 0}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig"},
|
||||
|
||||
{Mode: fs.ModeDir | 0500, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
|
||||
{Mode: fs.ModeDir | 0500, Path: "work"},
|
||||
}, pkg.MustDecode("cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM"), nil},
|
||||
|
||||
{"sample tar", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||
"identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP"},
|
||||
{Mode: 0400, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check", Data: []byte{0, 0}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work"},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("sxbgyX-bPoezbha214n2lbQhiVfTUBkhZ0EX6zI7mmkMdrCdwuMwhMBJphLQsy94"), nil},
|
||||
|
||||
{"sample tar expand step unpack", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
}, pkg.MustDecode("CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN"), nil},
|
||||
|
||||
{"sample tar expand", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||
"identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("4I8wx_h7NSJTlG5lbuz-GGEXrOg0GYC3M_503LYEBhv5XGWXfNIdIY9Q3eVSYldX"), nil},
|
||||
|
||||
{"testtool", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"check": {Mode: 0400, Data: []byte{0}},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||
|
||||
{Mode: 0400, Path: "check", Data: []byte{0}},
|
||||
}, pkg.MustDecode("GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"), nil},
|
||||
|
||||
{"sample exec container", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/U2cbgVgEtjfRuvHfE1cQnZ3t8yoexULQyo_VLgvxAVJSsobMcNaFIsuDWtmt7kzK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/U2cbgVgEtjfRuvHfE1cQnZ3t8yoexULQyo_VLgvxAVJSsobMcNaFIsuDWtmt7kzK", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("UiV6kMz7KrTsc_yphiyQzFLqjRanHxUOwrBMtkKuWo4mOO6WgPFAcoUEeSp7eVIW"), nil},
|
||||
|
||||
{"testtool net", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"check": {Mode: 0400, Data: []byte("net")},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||
|
||||
{Mode: 0400, Path: "check", Data: []byte("net")},
|
||||
}, pkg.MustDecode("a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W"), nil},
|
||||
|
||||
{"sample exec net container", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
|
||||
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check": {Mode: 0400, Data: []byte("net")},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||
"identifier/QdsJhGgnk5N2xdUNGcndXQxFKifxf1V_2t9X8CQ-pDcg24x6mGJC_BiLfGbs6Qml": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
|
||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W"},
|
||||
{Mode: 0400, Path: "checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check", Data: []byte("net")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/QdsJhGgnk5N2xdUNGcndXQxFKifxf1V_2t9X8CQ-pDcg24x6mGJC_BiLfGbs6Qml", Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("ek4K-0d4iRSArkY2TCs3WK34DbiYeOmhE_4vsJTSu_6roY4ZF3YG6eKRooal-i1o"), nil},
|
||||
|
||||
{"sample exec container overlay root", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/5ey2wpmMpj483YYa7ZZQciYLA2cx3_l167JCqWW4Pd-5DVp81dj9EsBtVTwYptF6": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/5ey2wpmMpj483YYa7ZZQciYLA2cx3_l167JCqWW4Pd-5DVp81dj9EsBtVTwYptF6", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("VIqqpf0ip9jcyw63i6E8lCMGUcLivQBe4Bevt3WusNac-1MSy5bzB647qGUBzl-W"), nil},
|
||||
|
||||
{"sample exec container overlay work", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/acaDzHZv40dZaz4cGAXayqbRMgbEOuiuiUijZL8IgDQvyeCNMFE3onBMYfny-kXA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/acaDzHZv40dZaz4cGAXayqbRMgbEOuiuiUijZL8IgDQvyeCNMFE3onBMYfny-kXA", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("q8x2zQg4YZbKpPqKlEBj_uxXD9vOBaZ852qOuIsl9QdO73I_UMNpuUoPLtunxUYl"), nil},
|
||||
|
||||
{"sample exec container multiple layers", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
|
||||
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check": {Mode: 0400, Data: []byte("layers")},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
"identifier/rXLKjjYfGSyoWmuvEJooHkvGJIZaC0IAWnKGvtPZkM15gBxAgW7mIXcxRVNOXAr4": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
|
||||
"identifier/tfjrsVuBuFgzWgwz-yPppFtylYuC1VFWnKhyBiHbWTGkyz8lt7Ee9QXWaIHPXs4x": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK"},
|
||||
{Mode: 0400, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check", Data: []byte("layers")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/rXLKjjYfGSyoWmuvEJooHkvGJIZaC0IAWnKGvtPZkM15gBxAgW7mIXcxRVNOXAr4", Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/tfjrsVuBuFgzWgwz-yPppFtylYuC1VFWnKhyBiHbWTGkyz8lt7Ee9QXWaIHPXs4x", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("SITnQ6PTV12PAQQjIuLUxkvsXQiC9Gq_HJQlcb4BPL5YnRHnx8lsW7PRM9YMLBsx"), nil},
|
||||
|
||||
{"sample exec container layer promotion", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/1tQZOGmVk_JkpyiG84AKW_BXmlK_MvHUbh5WtMuthGbHUq7i7nL1bvdF-LoJbqNh": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
"identifier/O-6VjlIUxc4PYLf5v35uhIeL8kkYCbHYklqlmDjFPXe0m4j6GkUDg5qwTzBRESnf": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/1tQZOGmVk_JkpyiG84AKW_BXmlK_MvHUbh5WtMuthGbHUq7i7nL1bvdF-LoJbqNh", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/O-6VjlIUxc4PYLf5v35uhIeL8kkYCbHYklqlmDjFPXe0m4j6GkUDg5qwTzBRESnf", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("fuC20BhMKr86TYzNPP2A-9P7mGLvdcOiG10exlhRvZm8ySI7csf0LhW3im_26l1N"), nil},
|
||||
|
||||
{"sample file short", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: 0400, Data: []byte{0}},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/lIx_W4M7tVOcQ8jh08EJOfXf4brRmkEEjvUa7c17vVUzlmtUxlhhrgqmc9aZhjbn": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: 0400, Path: "checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte{0}},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/lIx_W4M7tVOcQ8jh08EJOfXf4brRmkEEjvUa7c17vVUzlmtUxlhhrgqmc9aZhjbn", Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("hnrfmJtivNKcgtETsKnU9gP_OwPgpNY3DSUJnmxnmeOODSO-YBvEBiTgieY4AAd7"), nil},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("roundtrip", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var buf bytes.Buffer
|
||||
if _, err := pkg.Flatten(
|
||||
tc.fsys,
|
||||
".",
|
||||
&buf,
|
||||
); !reflect.DeepEqual(err, tc.err) {
|
||||
t.Fatalf("Flatten: error = %v, want %v", err, tc.err)
|
||||
} else if tc.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
s := pkg.NewDirScanner(bytes.NewReader(buf.Bytes()), true)
|
||||
var got []pkg.FlatEntry
|
||||
for s.Scan() {
|
||||
got = append(got, *s.Entry())
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
t.Fatalf("Err: error = %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(got, tc.entries) {
|
||||
t.Fatalf("Scan: %#v, want %#v", got, tc.entries)
|
||||
}
|
||||
})
|
||||
|
||||
if tc.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
t.Run("hash", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var got pkg.Checksum
|
||||
if err := pkg.HashFS(&got, tc.fsys, "."); err != nil {
|
||||
t.Fatalf("HashFS: error = %v", err)
|
||||
} else if got != tc.sum {
|
||||
t.Fatalf("HashFS: %v", &pkg.ChecksumMismatchError{
|
||||
Got: got,
|
||||
Want: tc.sum,
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
411
internal/pkg/exec.go
Normal file
411
internal/pkg/exec.go
Normal file
@@ -0,0 +1,411 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
"unique"
|
||||
|
||||
"hakurei.app/container"
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/container/fhs"
|
||||
"hakurei.app/container/std"
|
||||
"hakurei.app/message"
|
||||
)
|
||||
|
||||
// AbsWork is the container pathname [CureContext.GetWorkDir] is mounted on.
|
||||
var AbsWork = fhs.AbsRoot.Append("work/")
|
||||
|
||||
// ExecPath is a slice of [Artifact] and the [check.Absolute] pathname to make
|
||||
// it available at under in the container.
|
||||
type ExecPath struct {
|
||||
// Pathname in the container mount namespace.
|
||||
P *check.Absolute
|
||||
// Artifacts to mount on the pathname, must contain at least one [Artifact].
|
||||
// If there are multiple entries or W is true, P is set up as an overlay
|
||||
// mount, and entries of A must not implement [FileArtifact].
|
||||
A []Artifact
|
||||
// Whether to make the mount point writable via the temp directory.
|
||||
W bool
|
||||
}
|
||||
|
||||
// layers returns pathnames collected from A deduplicated by checksum.
|
||||
func (p *ExecPath) layers(f *FContext) []*check.Absolute {
|
||||
msg := f.GetMessage()
|
||||
|
||||
layers := make([]*check.Absolute, 0, len(p.A))
|
||||
checksums := make(map[unique.Handle[Checksum]]struct{}, len(p.A))
|
||||
for i := range p.A {
|
||||
d := p.A[len(p.A)-1-i]
|
||||
pathname, checksum := f.GetArtifact(d)
|
||||
if _, ok := checksums[checksum]; ok {
|
||||
if msg.IsVerbose() {
|
||||
msg.Verbosef(
|
||||
"promoted layer %d as %s",
|
||||
len(p.A)-1-i, reportName(d, f.cache.Ident(d)),
|
||||
)
|
||||
}
|
||||
continue
|
||||
}
|
||||
checksums[checksum] = struct{}{}
|
||||
layers = append(layers, pathname)
|
||||
}
|
||||
slices.Reverse(layers)
|
||||
return layers
|
||||
}
|
||||
|
||||
// Path returns a populated [ExecPath].
|
||||
func Path(pathname *check.Absolute, writable bool, a ...Artifact) ExecPath {
|
||||
return ExecPath{pathname, a, writable}
|
||||
}
|
||||
|
||||
// MustPath is like [Path], but takes a string pathname via [check.MustAbs].
|
||||
func MustPath(pathname string, writable bool, a ...Artifact) ExecPath {
|
||||
return ExecPath{check.MustAbs(pathname), a, writable}
|
||||
}
|
||||
|
||||
const (
|
||||
// ExecTimeoutDefault replaces out of range [NewExec] timeout values.
|
||||
ExecTimeoutDefault = 15 * time.Minute
|
||||
// ExecTimeoutMax is the arbitrary upper bound of [NewExec] timeout.
|
||||
ExecTimeoutMax = 48 * time.Hour
|
||||
)
|
||||
|
||||
// An execArtifact is an [Artifact] that produces output by running a program
|
||||
// part of another [Artifact] in a [container] to produce its output.
|
||||
//
|
||||
// Methods of execArtifact does not modify any struct field or underlying arrays
|
||||
// referred to by slices.
|
||||
type execArtifact struct {
|
||||
// Caller-supplied user-facing reporting name, guaranteed to be nonzero
|
||||
// during initialisation.
|
||||
name string
|
||||
// Caller-supplied inner mount points.
|
||||
paths []ExecPath
|
||||
|
||||
// Passed through to [container.Params].
|
||||
dir *check.Absolute
|
||||
// Passed through to [container.Params].
|
||||
env []string
|
||||
// Passed through to [container.Params].
|
||||
path *check.Absolute
|
||||
// Passed through to [container.Params].
|
||||
args []string
|
||||
|
||||
// Duration the initial process is allowed to run. The zero value is
|
||||
// equivalent to execTimeoutDefault. This value is never encoded in Params
|
||||
// because it cannot affect outcome.
|
||||
timeout time.Duration
|
||||
|
||||
// Caller-supplied exclusivity value, returned as is by IsExclusive.
|
||||
exclusive bool
|
||||
}
|
||||
|
||||
var _ fmt.Stringer = new(execArtifact)
|
||||
|
||||
// execNetArtifact is like execArtifact but implements [KnownChecksum] and has
|
||||
// its resulting container keep the host net namespace.
|
||||
type execNetArtifact struct {
|
||||
checksum Checksum
|
||||
|
||||
execArtifact
|
||||
}
|
||||
|
||||
var _ KnownChecksum = new(execNetArtifact)
|
||||
|
||||
// Checksum returns the caller-supplied checksum.
|
||||
func (a *execNetArtifact) Checksum() Checksum { return a.checksum }
|
||||
|
||||
// Kind returns the hardcoded [Kind] constant.
|
||||
func (*execNetArtifact) Kind() Kind { return KindExecNet }
|
||||
|
||||
// Params is [Checksum] concatenated with [KindExec] params.
|
||||
func (a *execNetArtifact) Params(ctx *IContext) {
|
||||
ctx.GetHash().Write(a.checksum[:])
|
||||
a.execArtifact.Params(ctx)
|
||||
}
|
||||
|
||||
// Cure cures the [Artifact] in the container described by the caller. The
|
||||
// container retains host networking.
|
||||
func (a *execNetArtifact) Cure(f *FContext) error {
|
||||
return a.cure(f, true)
|
||||
}
|
||||
|
||||
// NewExec returns a new [Artifact] that executes the program path in a
|
||||
// container with specified paths bind mounted read-only in order. A private
|
||||
// instance of /proc and /dev is made available to the container.
|
||||
//
|
||||
// The working and temporary directories are both created and mounted writable
|
||||
// on [AbsWork] and [fhs.AbsTmp] respectively. If one or more paths target
|
||||
// [AbsWork], the final entry is set up as a writable overlay mount on /work for
|
||||
// which the upperdir is the host side work directory. In this configuration,
|
||||
// the W field is ignored, and the program must avoid causing whiteout files to
|
||||
// be created. Cure fails if upperdir ends up with entries other than directory,
|
||||
// regular or symlink.
|
||||
//
|
||||
// If checksum is non-nil, the resulting [Artifact] implements [KnownChecksum]
|
||||
// and its container runs in the host net namespace.
|
||||
//
|
||||
// The container is allowed to run for the specified duration before the initial
|
||||
// process and all processes originating from it is terminated. A zero or
|
||||
// negative timeout value is equivalent tp [ExecTimeoutDefault], a timeout value
|
||||
// greater than [ExecTimeoutMax] is equivalent to [ExecTimeoutMax].
|
||||
//
|
||||
// The user-facing name and exclusivity value are not accessible from the
|
||||
// container and does not affect curing outcome. Because of this, it is omitted
|
||||
// from parameter data for computing identifier.
|
||||
func NewExec(
|
||||
name string,
|
||||
checksum *Checksum,
|
||||
timeout time.Duration,
|
||||
exclusive bool,
|
||||
|
||||
dir *check.Absolute,
|
||||
env []string,
|
||||
pathname *check.Absolute,
|
||||
args []string,
|
||||
|
||||
paths ...ExecPath,
|
||||
) Artifact {
|
||||
if name == "" {
|
||||
name = "exec-" + path.Base(pathname.String())
|
||||
}
|
||||
if timeout <= 0 {
|
||||
timeout = ExecTimeoutDefault
|
||||
}
|
||||
if timeout > ExecTimeoutMax {
|
||||
timeout = ExecTimeoutMax
|
||||
}
|
||||
a := execArtifact{name, paths, dir, env, pathname, args, timeout, exclusive}
|
||||
if checksum == nil {
|
||||
return &a
|
||||
}
|
||||
return &execNetArtifact{*checksum, a}
|
||||
}
|
||||
|
||||
// Kind returns the hardcoded [Kind] constant.
|
||||
func (*execArtifact) Kind() Kind { return KindExec }
|
||||
|
||||
// Params writes paths, executable pathname and args.
|
||||
func (a *execArtifact) Params(ctx *IContext) {
|
||||
h := ctx.GetHash()
|
||||
|
||||
_0, _1 := []byte{0}, []byte{1}
|
||||
for _, p := range a.paths {
|
||||
if p.W {
|
||||
h.Write(_1)
|
||||
} else {
|
||||
h.Write(_0)
|
||||
}
|
||||
if p.P != nil {
|
||||
h.Write([]byte(p.P.String()))
|
||||
} else {
|
||||
h.Write([]byte("invalid P\x00"))
|
||||
}
|
||||
h.Write(_0)
|
||||
for _, d := range p.A {
|
||||
ctx.WriteIdent(d)
|
||||
}
|
||||
h.Write(_0)
|
||||
}
|
||||
h.Write(_0)
|
||||
h.Write([]byte(a.dir.String()))
|
||||
h.Write(_0)
|
||||
for _, e := range a.env {
|
||||
h.Write([]byte(e))
|
||||
}
|
||||
h.Write(_0)
|
||||
h.Write([]byte(a.path.String()))
|
||||
h.Write(_0)
|
||||
for _, arg := range a.args {
|
||||
h.Write([]byte(arg))
|
||||
}
|
||||
}
|
||||
|
||||
// Dependencies returns a slice of all artifacts collected from caller-supplied
|
||||
// [ExecPath].
|
||||
func (a *execArtifact) Dependencies() []Artifact {
|
||||
artifacts := make([][]Artifact, 0, len(a.paths))
|
||||
for _, p := range a.paths {
|
||||
artifacts = append(artifacts, p.A)
|
||||
}
|
||||
return slices.Concat(artifacts...)
|
||||
}
|
||||
|
||||
// IsExclusive returns the caller-supplied exclusivity value.
|
||||
func (a *execArtifact) IsExclusive() bool { return a.exclusive }
|
||||
|
||||
// String returns the caller-supplied reporting name.
|
||||
func (a *execArtifact) String() string { return a.name }
|
||||
|
||||
// Cure cures the [Artifact] in the container described by the caller.
|
||||
func (a *execArtifact) Cure(f *FContext) (err error) {
|
||||
return a.cure(f, false)
|
||||
}
|
||||
|
||||
const (
|
||||
// execWaitDelay is passed through to [container.Params].
|
||||
execWaitDelay = time.Nanosecond
|
||||
)
|
||||
|
||||
// scanVerbose prefixes program output for a verbose [message.Msg].
|
||||
func scanVerbose(
|
||||
msg message.Msg,
|
||||
done chan<- struct{},
|
||||
prefix string,
|
||||
r io.Reader,
|
||||
) {
|
||||
defer close(done)
|
||||
s := bufio.NewScanner(r)
|
||||
s.Buffer(
|
||||
make([]byte, bufio.MaxScanTokenSize),
|
||||
bufio.MaxScanTokenSize<<12,
|
||||
)
|
||||
for s.Scan() {
|
||||
msg.Verbose(prefix, s.Text())
|
||||
}
|
||||
if err := s.Err(); err != nil && !errors.Is(err, os.ErrClosed) {
|
||||
msg.Verbose("*"+prefix, err)
|
||||
}
|
||||
}
|
||||
|
||||
// cure is like Cure but allows optional host net namespace. This is used for
|
||||
// the [KnownChecksum] variant where networking is allowed.
|
||||
func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
|
||||
overlayWorkIndex := -1
|
||||
for i, p := range a.paths {
|
||||
if p.P == nil || len(p.A) == 0 {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
if p.P.Is(AbsWork) {
|
||||
overlayWorkIndex = i
|
||||
}
|
||||
}
|
||||
|
||||
var artifactCount int
|
||||
for _, p := range a.paths {
|
||||
artifactCount += len(p.A)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(f.Unwrap(), a.timeout)
|
||||
defer cancel()
|
||||
|
||||
z := container.New(ctx, f.GetMessage())
|
||||
z.WaitDelay = execWaitDelay
|
||||
z.SeccompPresets |= std.PresetStrict & ^std.PresetDenyNS
|
||||
z.ParentPerm = 0700
|
||||
z.HostNet = hostNet
|
||||
z.Hostname = "cure"
|
||||
if z.HostNet {
|
||||
z.Hostname = "cure-net"
|
||||
}
|
||||
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
||||
if msg := f.GetMessage(); msg.IsVerbose() {
|
||||
var stdout, stderr io.ReadCloser
|
||||
if stdout, err = z.StdoutPipe(); err != nil {
|
||||
return
|
||||
}
|
||||
if stderr, err = z.StderrPipe(); err != nil {
|
||||
_ = stdout.Close()
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err != nil && !errors.As(err, new(*exec.ExitError)) {
|
||||
_ = stdout.Close()
|
||||
_ = stderr.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
stdoutDone, stderrDone := make(chan struct{}), make(chan struct{})
|
||||
go scanVerbose(msg, stdoutDone, "("+a.name+":1)", stdout)
|
||||
go scanVerbose(msg, stderrDone, "("+a.name+":2)", stderr)
|
||||
defer func() { <-stdoutDone; <-stderrDone }()
|
||||
}
|
||||
|
||||
z.Dir, z.Env, z.Path, z.Args = a.dir, a.env, a.path, a.args
|
||||
z.Grow(len(a.paths) + 4)
|
||||
|
||||
temp, work := f.GetTempDir(), f.GetWorkDir()
|
||||
for i, b := range a.paths {
|
||||
if i == overlayWorkIndex {
|
||||
if err = os.MkdirAll(work.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
tempWork := temp.Append(".work")
|
||||
if err = os.MkdirAll(tempWork.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
z.Overlay(
|
||||
AbsWork,
|
||||
work,
|
||||
tempWork,
|
||||
b.layers(f)...,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
if a.paths[i].W {
|
||||
tempUpper, tempWork := temp.Append(
|
||||
".upper", strconv.Itoa(i),
|
||||
), temp.Append(
|
||||
".work", strconv.Itoa(i),
|
||||
)
|
||||
if err = os.MkdirAll(tempUpper.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
if err = os.MkdirAll(tempWork.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
z.Overlay(b.P, tempUpper, tempWork, b.layers(f)...)
|
||||
} else if len(b.A) == 1 {
|
||||
pathname, _ := f.GetArtifact(b.A[0])
|
||||
z.Bind(pathname, b.P, 0)
|
||||
} else {
|
||||
z.OverlayReadonly(b.P, b.layers(f)...)
|
||||
}
|
||||
}
|
||||
if overlayWorkIndex < 0 {
|
||||
z.Bind(
|
||||
work,
|
||||
AbsWork,
|
||||
std.BindWritable|std.BindEnsure,
|
||||
)
|
||||
}
|
||||
z.Bind(
|
||||
f.GetTempDir(),
|
||||
fhs.AbsTmp,
|
||||
std.BindWritable|std.BindEnsure,
|
||||
)
|
||||
z.Proc(fhs.AbsProc).Dev(fhs.AbsDev, true)
|
||||
|
||||
if err = z.Start(); err != nil {
|
||||
return
|
||||
}
|
||||
if err = z.Serve(); err != nil {
|
||||
return
|
||||
}
|
||||
if err = z.Wait(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// do not allow empty directories to succeed
|
||||
for {
|
||||
err = syscall.Rmdir(work.String())
|
||||
if err != syscall.EINTR {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil && errors.Is(err, syscall.ENOTEMPTY) {
|
||||
err = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
339
internal/pkg/exec_test.go
Normal file
339
internal/pkg/exec_test.go
Normal file
@@ -0,0 +1,339 @@
|
||||
package pkg_test
|
||||
|
||||
//go:generate env CGO_ENABLED=0 go build -tags testtool -o testdata/testtool ./testdata
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"slices"
|
||||
"testing"
|
||||
"unique"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/container/stub"
|
||||
"hakurei.app/hst"
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
// testtoolBin is the container test tool binary made available to the
|
||||
// execArtifact for testing its curing environment.
|
||||
//
|
||||
//go:embed testdata/testtool
|
||||
var testtoolBin []byte
|
||||
|
||||
func TestExec(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
wantChecksumOffline := pkg.MustDecode(
|
||||
"GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9",
|
||||
)
|
||||
|
||||
checkWithCache(t, []cacheTestCase{
|
||||
{"offline", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
c.SetStrict(true)
|
||||
testtool, testtoolDestroy := newTesttool()
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"container", pkg.NewExec(
|
||||
"exec-offline", nil, 0, false,
|
||||
pkg.AbsWork,
|
||||
[]string{"HAKUREI_TEST=1"},
|
||||
check.MustAbs("/opt/bin/testtool"),
|
||||
[]string{"testtool"},
|
||||
|
||||
pkg.MustPath("/file", false, newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
pkg.ID{0xfe, 0},
|
||||
nil,
|
||||
nil, nil,
|
||||
)),
|
||||
pkg.MustPath("/.hakurei", false, &stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("empty directory"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||
},
|
||||
}),
|
||||
pkg.MustPath("/opt", false, testtool),
|
||||
), ignorePathname, wantChecksumOffline, nil},
|
||||
|
||||
{"error passthrough", pkg.NewExec(
|
||||
"", nil, 0, true,
|
||||
pkg.AbsWork,
|
||||
[]string{"HAKUREI_TEST=1"},
|
||||
check.MustAbs("/opt/bin/testtool"),
|
||||
[]string{"testtool"},
|
||||
|
||||
pkg.MustPath("/proc/nonexistent", false, &stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("doomed artifact"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return stub.UniqueError(0xcafe)
|
||||
},
|
||||
}),
|
||||
), nil, pkg.Checksum{}, &pkg.DependencyCureError{
|
||||
{
|
||||
Ident: unique.Make(pkg.ID(pkg.MustDecode(
|
||||
"CWEoJqnSBpWf8uryC2qnIe3O1a_FZWUWZGbiVPsQFGW7pvDHiSwoK3QCU9-uxN87",
|
||||
))),
|
||||
Err: stub.UniqueError(0xcafe),
|
||||
},
|
||||
}},
|
||||
|
||||
{"invalid paths", pkg.NewExec(
|
||||
"", nil, 0, false,
|
||||
pkg.AbsWork,
|
||||
[]string{"HAKUREI_TEST=1"},
|
||||
check.MustAbs("/opt/bin/testtool"),
|
||||
[]string{"testtool"},
|
||||
|
||||
pkg.ExecPath{},
|
||||
), nil, pkg.Checksum{}, os.ErrInvalid},
|
||||
})
|
||||
|
||||
// check init failure passthrough
|
||||
var exitError *exec.ExitError
|
||||
if _, _, err := c.Cure(pkg.NewExec(
|
||||
"", nil, 0, false,
|
||||
pkg.AbsWork,
|
||||
nil,
|
||||
check.MustAbs("/opt/bin/testtool"),
|
||||
[]string{"testtool"},
|
||||
)); !errors.As(err, &exitError) ||
|
||||
exitError.ExitCode() != hst.ExitFailure {
|
||||
t.Fatalf("Cure: error = %v, want init exit status 1", err)
|
||||
}
|
||||
|
||||
testtoolDestroy(t, base, c)
|
||||
}, pkg.MustDecode("UiV6kMz7KrTsc_yphiyQzFLqjRanHxUOwrBMtkKuWo4mOO6WgPFAcoUEeSp7eVIW")},
|
||||
|
||||
{"net", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
c.SetStrict(true)
|
||||
testtool, testtoolDestroy := newTesttool()
|
||||
|
||||
wantChecksum := pkg.MustDecode(
|
||||
"a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W",
|
||||
)
|
||||
cureMany(t, c, []cureStep{
|
||||
{"container", pkg.NewExec(
|
||||
"exec-net", &wantChecksum, 0, false,
|
||||
pkg.AbsWork,
|
||||
[]string{"HAKUREI_TEST=1"},
|
||||
check.MustAbs("/opt/bin/testtool"),
|
||||
[]string{"testtool", "net"},
|
||||
|
||||
pkg.MustPath("/file", false, newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
pkg.ID{0xfe, 0},
|
||||
nil,
|
||||
nil, nil,
|
||||
)),
|
||||
pkg.MustPath("/.hakurei", false, &stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("empty directory"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||
},
|
||||
}),
|
||||
pkg.MustPath("/opt", false, testtool),
|
||||
), ignorePathname, wantChecksum, nil},
|
||||
})
|
||||
|
||||
testtoolDestroy(t, base, c)
|
||||
}, pkg.MustDecode("ek4K-0d4iRSArkY2TCs3WK34DbiYeOmhE_4vsJTSu_6roY4ZF3YG6eKRooal-i1o")},
|
||||
|
||||
{"overlay root", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
c.SetStrict(true)
|
||||
testtool, testtoolDestroy := newTesttool()
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"container", pkg.NewExec(
|
||||
"exec-overlay-root", nil, 0, false,
|
||||
pkg.AbsWork,
|
||||
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||
check.MustAbs("/opt/bin/testtool"),
|
||||
[]string{"testtool"},
|
||||
|
||||
pkg.MustPath("/", true, &stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("empty directory"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||
},
|
||||
}),
|
||||
pkg.MustPath("/opt", false, testtool),
|
||||
), ignorePathname, wantChecksumOffline, nil},
|
||||
})
|
||||
|
||||
testtoolDestroy(t, base, c)
|
||||
}, pkg.MustDecode("VIqqpf0ip9jcyw63i6E8lCMGUcLivQBe4Bevt3WusNac-1MSy5bzB647qGUBzl-W")},
|
||||
|
||||
{"overlay work", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
c.SetStrict(true)
|
||||
testtool, testtoolDestroy := newTesttool()
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"container", pkg.NewExec(
|
||||
"exec-overlay-work", nil, 0, false,
|
||||
pkg.AbsWork,
|
||||
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||
check.MustAbs("/work/bin/testtool"),
|
||||
[]string{"testtool"},
|
||||
|
||||
pkg.MustPath("/", true, &stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("empty directory"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||
},
|
||||
}), pkg.MustPath("/work/", false, &stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("empty directory"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||
},
|
||||
}), pkg.Path(pkg.AbsWork, false /* ignored */, testtool),
|
||||
), ignorePathname, wantChecksumOffline, nil},
|
||||
})
|
||||
|
||||
testtoolDestroy(t, base, c)
|
||||
}, pkg.MustDecode("q8x2zQg4YZbKpPqKlEBj_uxXD9vOBaZ852qOuIsl9QdO73I_UMNpuUoPLtunxUYl")},
|
||||
|
||||
{"multiple layers", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
c.SetStrict(true)
|
||||
testtool, testtoolDestroy := newTesttool()
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"container", pkg.NewExec(
|
||||
"exec-multiple-layers", nil, 0, false,
|
||||
pkg.AbsWork,
|
||||
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||
check.MustAbs("/opt/bin/testtool"),
|
||||
[]string{"testtool", "layers"},
|
||||
|
||||
pkg.MustPath("/", true, &stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("empty directory"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||
},
|
||||
}, &stubArtifactF{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("test sample with dependencies"),
|
||||
|
||||
deps: slices.Repeat([]pkg.Artifact{newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
pkg.ID{0xfe, 0},
|
||||
nil,
|
||||
nil, nil,
|
||||
), &stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("empty directory"),
|
||||
|
||||
// this is queued and might run instead of the other
|
||||
// one so do not leave it as nil
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||
},
|
||||
}}, 1<<5 /* concurrent cache hits */),
|
||||
|
||||
cure: func(f *pkg.FContext) error {
|
||||
work := f.GetWorkDir()
|
||||
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(work.Append("check").String(), []byte("layers"), 0400)
|
||||
},
|
||||
}),
|
||||
pkg.MustPath("/opt", false, testtool),
|
||||
), ignorePathname, wantChecksumOffline, nil},
|
||||
})
|
||||
|
||||
testtoolDestroy(t, base, c)
|
||||
}, pkg.MustDecode("SITnQ6PTV12PAQQjIuLUxkvsXQiC9Gq_HJQlcb4BPL5YnRHnx8lsW7PRM9YMLBsx")},
|
||||
|
||||
{"overlay layer promotion", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
c.SetStrict(true)
|
||||
testtool, testtoolDestroy := newTesttool()
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"container", pkg.NewExec(
|
||||
"exec-layer-promotion", nil, 0, true,
|
||||
pkg.AbsWork,
|
||||
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||
check.MustAbs("/opt/bin/testtool"),
|
||||
[]string{"testtool", "promote"},
|
||||
|
||||
pkg.MustPath("/", true, &stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("another empty directory"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||
},
|
||||
}, &stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("empty directory"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||
},
|
||||
}),
|
||||
pkg.MustPath("/opt", false, testtool),
|
||||
), ignorePathname, wantChecksumOffline, nil},
|
||||
})
|
||||
|
||||
testtoolDestroy(t, base, c)
|
||||
}, pkg.MustDecode("fuC20BhMKr86TYzNPP2A-9P7mGLvdcOiG10exlhRvZm8ySI7csf0LhW3im_26l1N")},
|
||||
})
|
||||
}
|
||||
|
||||
// newTesttool returns an [Artifact] that cures into testtoolBin. The returned
|
||||
// function must be called at the end of the test but not deferred.
|
||||
func newTesttool() (
|
||||
testtool pkg.Artifact,
|
||||
testtoolDestroy func(t *testing.T, base *check.Absolute, c *pkg.Cache),
|
||||
) {
|
||||
// testtoolBin is built during go:generate and is not deterministic
|
||||
testtool = overrideIdent{pkg.ID{0xfe, 0xff}, &stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
cure: func(t *pkg.TContext) error {
|
||||
work := t.GetWorkDir()
|
||||
if err := os.MkdirAll(
|
||||
work.Append("bin").String(),
|
||||
0700,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ift, err := net.Interfaces(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
var f *os.File
|
||||
if f, err = os.Create(t.GetWorkDir().Append(
|
||||
"ift",
|
||||
).String()); err != nil {
|
||||
return err
|
||||
} else {
|
||||
err = gob.NewEncoder(f).Encode(ift)
|
||||
closeErr := f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return os.WriteFile(t.GetWorkDir().Append(
|
||||
"bin",
|
||||
"testtool",
|
||||
).String(), testtoolBin, 0500)
|
||||
},
|
||||
}}
|
||||
testtoolDestroy = newDestroyArtifactFunc(testtool)
|
||||
return
|
||||
}
|
||||
61
internal/pkg/file.go
Normal file
61
internal/pkg/file.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha512"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// A fileArtifact is an [Artifact] that cures into data known ahead of time.
|
||||
type fileArtifact []byte
|
||||
|
||||
var _ KnownChecksum = new(fileArtifact)
|
||||
|
||||
// fileArtifactNamed embeds fileArtifact alongside a caller-supplied name.
|
||||
type fileArtifactNamed struct {
|
||||
fileArtifact
|
||||
// Caller-supplied user-facing reporting name.
|
||||
name string
|
||||
}
|
||||
|
||||
var _ fmt.Stringer = new(fileArtifactNamed)
|
||||
var _ KnownChecksum = new(fileArtifactNamed)
|
||||
|
||||
// String returns the caller-supplied reporting name.
|
||||
func (a *fileArtifactNamed) String() string { return a.name }
|
||||
|
||||
// NewFile returns a [FileArtifact] that cures into a caller-supplied byte slice.
|
||||
//
|
||||
// Caller must not modify data after NewFile returns.
|
||||
func NewFile(name string, data []byte) FileArtifact {
|
||||
f := fileArtifact(data)
|
||||
if name != "" {
|
||||
return &fileArtifactNamed{f, name}
|
||||
}
|
||||
return &f
|
||||
}
|
||||
|
||||
// Kind returns the hardcoded [Kind] constant.
|
||||
func (*fileArtifact) Kind() Kind { return KindFile }
|
||||
|
||||
// Params writes the result of Cure.
|
||||
func (a *fileArtifact) Params(ctx *IContext) { ctx.GetHash().Write(*a) }
|
||||
|
||||
// Dependencies returns a nil slice.
|
||||
func (*fileArtifact) Dependencies() []Artifact { return nil }
|
||||
|
||||
// IsExclusive returns false: Cure returns a prepopulated buffer.
|
||||
func (*fileArtifact) IsExclusive() bool { return false }
|
||||
|
||||
// Checksum computes and returns the checksum of caller-supplied data.
|
||||
func (a *fileArtifact) Checksum() Checksum {
|
||||
h := sha512.New384()
|
||||
h.Write(*a)
|
||||
return Checksum(h.Sum(nil))
|
||||
}
|
||||
|
||||
// Cure returns the caller-supplied data.
|
||||
func (a *fileArtifact) Cure(*RContext) (io.ReadCloser, error) {
|
||||
return io.NopCloser(bytes.NewReader(*a)), nil
|
||||
}
|
||||
29
internal/pkg/file_test.go
Normal file
29
internal/pkg/file_test.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package pkg_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
func TestFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
checkWithCache(t, []cacheTestCase{
|
||||
{"file", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
c.SetStrict(true)
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"short", pkg.NewFile("null", []byte{0}), base.Append(
|
||||
"identifier",
|
||||
"lIx_W4M7tVOcQ8jh08EJOfXf4brRmkEEjvUa7c17vVUzlmtUxlhhrgqmc9aZhjbn",
|
||||
), pkg.MustDecode(
|
||||
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
|
||||
), nil},
|
||||
})
|
||||
}, pkg.MustDecode(
|
||||
"hnrfmJtivNKcgtETsKnU9gP_OwPgpNY3DSUJnmxnmeOODSO-YBvEBiTgieY4AAd7",
|
||||
)},
|
||||
})
|
||||
}
|
||||
94
internal/pkg/net.go
Normal file
94
internal/pkg/net.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"unique"
|
||||
)
|
||||
|
||||
// An httpArtifact is an [Artifact] backed by a [http] url string. The method is
|
||||
// hardcoded as [http.MethodGet]. Request body is not allowed because it cannot
|
||||
// be deterministically represented by Params.
|
||||
type httpArtifact struct {
|
||||
// Caller-supplied url string.
|
||||
url string
|
||||
|
||||
// Caller-supplied checksum of the response body. This is validated when
|
||||
// closing the [io.ReadCloser] returned by Cure.
|
||||
checksum unique.Handle[Checksum]
|
||||
|
||||
// doFunc is the Do method of [http.Client] supplied by the caller.
|
||||
doFunc func(req *http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
var _ KnownChecksum = new(httpArtifact)
|
||||
var _ fmt.Stringer = new(httpArtifact)
|
||||
|
||||
// NewHTTPGet returns a new [FileArtifact] backed by the supplied client. A GET
|
||||
// request is set up for url. If c is nil, [http.DefaultClient] is used instead.
|
||||
func NewHTTPGet(
|
||||
c *http.Client,
|
||||
url string,
|
||||
checksum Checksum,
|
||||
) FileArtifact {
|
||||
if c == nil {
|
||||
c = http.DefaultClient
|
||||
}
|
||||
return &httpArtifact{url: url, checksum: unique.Make(checksum), doFunc: c.Do}
|
||||
}
|
||||
|
||||
// Kind returns the hardcoded [Kind] constant.
|
||||
func (*httpArtifact) Kind() Kind { return KindHTTPGet }
|
||||
|
||||
// Params writes the backing url string. Client is not represented as it does
|
||||
// not affect [Cache.Cure] outcome.
|
||||
func (a *httpArtifact) Params(ctx *IContext) {
|
||||
ctx.GetHash().Write([]byte(a.url))
|
||||
}
|
||||
|
||||
// Dependencies returns a nil slice.
|
||||
func (*httpArtifact) Dependencies() []Artifact { return nil }
|
||||
|
||||
// IsExclusive returns false: Cure returns as soon as a response is received.
|
||||
func (*httpArtifact) IsExclusive() bool { return false }
|
||||
|
||||
// Checksum returns the caller-supplied checksum.
|
||||
func (a *httpArtifact) Checksum() Checksum { return a.checksum.Value() }
|
||||
|
||||
// String returns [path.Base] over the backing url.
|
||||
func (a *httpArtifact) String() string { return path.Base(a.url) }
|
||||
|
||||
// ResponseStatusError is returned for a response returned by an [http.Client]
|
||||
// with a status code other than [http.StatusOK].
|
||||
type ResponseStatusError int
|
||||
|
||||
func (e ResponseStatusError) Error() string {
|
||||
return "the requested URL returned non-OK status: " + http.StatusText(int(e))
|
||||
}
|
||||
|
||||
// Cure sends the http request and returns the resulting response body reader
|
||||
// wrapped to perform checksum validation. It is valid but not encouraged to
|
||||
// close the resulting [io.ReadCloser] before it is read to EOF, as that causes
|
||||
// Close to block until all remaining data is consumed and validated.
|
||||
func (a *httpArtifact) Cure(r *RContext) (rc io.ReadCloser, err error) {
|
||||
var req *http.Request
|
||||
req, err = http.NewRequestWithContext(r.Unwrap(), http.MethodGet, a.url, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
if resp, err = a.doFunc(req); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
_ = resp.Body.Close()
|
||||
return nil, ResponseStatusError(resp.StatusCode)
|
||||
}
|
||||
|
||||
rc = r.NewMeasuredReader(resp.Body, a.checksum)
|
||||
return
|
||||
}
|
||||
161
internal/pkg/net_test.go
Normal file
161
internal/pkg/net_test.go
Normal file
@@ -0,0 +1,161 @@
|
||||
package pkg_test
|
||||
|
||||
import (
|
||||
"crypto/sha512"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
"unique"
|
||||
"unsafe"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
func TestHTTPGet(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const testdata = "\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69"
|
||||
|
||||
testdataChecksum := func() unique.Handle[pkg.Checksum] {
|
||||
h := sha512.New384()
|
||||
h.Write([]byte(testdata))
|
||||
return unique.Make(pkg.Checksum(h.Sum(nil)))
|
||||
}()
|
||||
|
||||
var transport http.Transport
|
||||
client := http.Client{Transport: &transport}
|
||||
transport.RegisterProtocol("file", http.NewFileTransportFS(fstest.MapFS{
|
||||
"testdata": {Data: []byte(testdata), Mode: 0400},
|
||||
}))
|
||||
|
||||
checkWithCache(t, []cacheTestCase{
|
||||
{"direct", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
var r pkg.RContext
|
||||
rCacheVal := reflect.ValueOf(&r).Elem().FieldByName("cache")
|
||||
reflect.NewAt(
|
||||
rCacheVal.Type(),
|
||||
unsafe.Pointer(rCacheVal.UnsafeAddr()),
|
||||
).Elem().Set(reflect.ValueOf(c))
|
||||
|
||||
f := pkg.NewHTTPGet(
|
||||
&client,
|
||||
"file:///testdata",
|
||||
testdataChecksum.Value(),
|
||||
)
|
||||
var got []byte
|
||||
if rc, err := f.Cure(&r); err != nil {
|
||||
t.Fatalf("Cure: error = %v", err)
|
||||
} else if got, err = io.ReadAll(rc); err != nil {
|
||||
t.Fatalf("ReadAll: error = %v", err)
|
||||
} else if string(got) != testdata {
|
||||
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||
} else if err = rc.Close(); err != nil {
|
||||
t.Fatalf("Close: error = %v", err)
|
||||
}
|
||||
|
||||
// check direct validation
|
||||
f = pkg.NewHTTPGet(
|
||||
&client,
|
||||
"file:///testdata",
|
||||
pkg.Checksum{},
|
||||
)
|
||||
wantErrMismatch := &pkg.ChecksumMismatchError{
|
||||
Got: testdataChecksum.Value(),
|
||||
}
|
||||
if rc, err := f.Cure(&r); err != nil {
|
||||
t.Fatalf("Cure: error = %v", err)
|
||||
} else if got, err = io.ReadAll(rc); err != nil {
|
||||
t.Fatalf("ReadAll: error = %v", err)
|
||||
} else if string(got) != testdata {
|
||||
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||
} else if err = rc.Close(); !reflect.DeepEqual(err, wantErrMismatch) {
|
||||
t.Fatalf("Close: error = %#v, want %#v", err, wantErrMismatch)
|
||||
}
|
||||
|
||||
// check fallback validation
|
||||
if rc, err := f.Cure(&r); err != nil {
|
||||
t.Fatalf("Cure: error = %v", err)
|
||||
} else if err = rc.Close(); !reflect.DeepEqual(err, wantErrMismatch) {
|
||||
t.Fatalf("Close: error = %#v, want %#v", err, wantErrMismatch)
|
||||
}
|
||||
|
||||
// check direct response error
|
||||
f = pkg.NewHTTPGet(
|
||||
&client,
|
||||
"file:///nonexistent",
|
||||
pkg.Checksum{},
|
||||
)
|
||||
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
|
||||
if _, err := f.Cure(&r); !reflect.DeepEqual(err, wantErrNotFound) {
|
||||
t.Fatalf("Cure: error = %#v, want %#v", err, wantErrNotFound)
|
||||
}
|
||||
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
||||
|
||||
{"cure", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
var r pkg.RContext
|
||||
rCacheVal := reflect.ValueOf(&r).Elem().FieldByName("cache")
|
||||
reflect.NewAt(
|
||||
rCacheVal.Type(),
|
||||
unsafe.Pointer(rCacheVal.UnsafeAddr()),
|
||||
).Elem().Set(reflect.ValueOf(c))
|
||||
|
||||
f := pkg.NewHTTPGet(
|
||||
&client,
|
||||
"file:///testdata",
|
||||
testdataChecksum.Value(),
|
||||
)
|
||||
wantPathname := base.Append(
|
||||
"identifier",
|
||||
"NqVORkT6L9HX6Za7kT2zcibY10qFqBaxEjPiYFrBQX-ZFr3yxCzJxbKOP0zVjeWb",
|
||||
)
|
||||
if pathname, checksum, err := c.Cure(f); err != nil {
|
||||
t.Fatalf("Cure: error = %v", err)
|
||||
} else if !pathname.Is(wantPathname) {
|
||||
t.Fatalf("Cure: %q, want %q", pathname, wantPathname)
|
||||
} else if checksum != testdataChecksum {
|
||||
t.Fatalf("Cure: %x, want %x", checksum.Value(), testdataChecksum.Value())
|
||||
}
|
||||
|
||||
var got []byte
|
||||
if rc, err := f.Cure(&r); err != nil {
|
||||
t.Fatalf("Cure: error = %v", err)
|
||||
} else if got, err = io.ReadAll(rc); err != nil {
|
||||
t.Fatalf("ReadAll: error = %v", err)
|
||||
} else if string(got) != testdata {
|
||||
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||
} else if err = rc.Close(); err != nil {
|
||||
t.Fatalf("Close: error = %v", err)
|
||||
}
|
||||
|
||||
// check load from cache
|
||||
f = pkg.NewHTTPGet(
|
||||
&client,
|
||||
"file:///testdata",
|
||||
testdataChecksum.Value(),
|
||||
)
|
||||
if rc, err := f.Cure(&r); err != nil {
|
||||
t.Fatalf("Cure: error = %v", err)
|
||||
} else if got, err = io.ReadAll(rc); err != nil {
|
||||
t.Fatalf("ReadAll: error = %v", err)
|
||||
} else if string(got) != testdata {
|
||||
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||
} else if err = rc.Close(); err != nil {
|
||||
t.Fatalf("Close: error = %v", err)
|
||||
}
|
||||
|
||||
// check error passthrough
|
||||
f = pkg.NewHTTPGet(
|
||||
&client,
|
||||
"file:///nonexistent",
|
||||
pkg.Checksum{},
|
||||
)
|
||||
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
|
||||
if _, _, err := c.Cure(f); !reflect.DeepEqual(err, wantErrNotFound) {
|
||||
t.Fatalf("Pathname: error = %#v, want %#v", err, wantErrNotFound)
|
||||
}
|
||||
}, pkg.MustDecode("bqtn69RkV5E7V7GhhgCFjcvbxmaqrO8DywamM4Tyjf10F6EJBHjXiIa_tFRtF4iN")},
|
||||
})
|
||||
}
|
||||
1790
internal/pkg/pkg.go
Normal file
1790
internal/pkg/pkg.go
Normal file
File diff suppressed because it is too large
Load Diff
1187
internal/pkg/pkg_test.go
Normal file
1187
internal/pkg/pkg_test.go
Normal file
File diff suppressed because it is too large
Load Diff
243
internal/pkg/tar.go
Normal file
243
internal/pkg/tar.go
Normal file
@@ -0,0 +1,243 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
)
|
||||
|
||||
const (
|
||||
// TarUncompressed denotes an uncompressed tarball.
|
||||
TarUncompressed = iota
|
||||
// TarGzip denotes a tarball compressed via [gzip].
|
||||
TarGzip
|
||||
// TarBzip2 denotes a tarball compressed via [bzip2].
|
||||
TarBzip2
|
||||
)
|
||||
|
||||
// A tarArtifact is an [Artifact] unpacking a tarball backed by a [FileArtifact].
|
||||
type tarArtifact struct {
|
||||
// Caller-supplied backing tarball.
|
||||
f Artifact
|
||||
// Compression on top of the tarball.
|
||||
compression uint64
|
||||
}
|
||||
|
||||
// tarArtifactNamed embeds tarArtifact for a [fmt.Stringer] tarball.
|
||||
type tarArtifactNamed struct {
|
||||
tarArtifact
|
||||
// Copied from tarArtifact.f.
|
||||
name string
|
||||
}
|
||||
|
||||
var _ fmt.Stringer = new(tarArtifactNamed)
|
||||
|
||||
// String returns the name of the underlying [Artifact] suffixed with unpack.
|
||||
func (a *tarArtifactNamed) String() string { return a.name + "-unpack" }
|
||||
|
||||
// NewTar returns a new [Artifact] backed by the supplied [Artifact] and
|
||||
// compression method. The source [Artifact] must be compatible with
|
||||
// [TContext.Open].
|
||||
func NewTar(a Artifact, compression uint64) Artifact {
|
||||
ta := tarArtifact{a, compression}
|
||||
if s, ok := a.(fmt.Stringer); ok {
|
||||
if name := s.String(); name != "" {
|
||||
return &tarArtifactNamed{ta, name}
|
||||
}
|
||||
}
|
||||
return &ta
|
||||
}
|
||||
|
||||
// NewHTTPGetTar is abbreviation for NewHTTPGet passed to NewTar.
|
||||
func NewHTTPGetTar(
|
||||
hc *http.Client,
|
||||
url string,
|
||||
checksum Checksum,
|
||||
compression uint64,
|
||||
) Artifact {
|
||||
return NewTar(NewHTTPGet(hc, url, checksum), compression)
|
||||
}
|
||||
|
||||
// Kind returns the hardcoded [Kind] constant.
|
||||
func (a *tarArtifact) Kind() Kind { return KindTar }
|
||||
|
||||
// Params writes compression encoded in little endian.
|
||||
func (a *tarArtifact) Params(ctx *IContext) {
|
||||
ctx.GetHash().Write(binary.LittleEndian.AppendUint64(nil, a.compression))
|
||||
}
|
||||
|
||||
// Dependencies returns a slice containing the backing file.
|
||||
func (a *tarArtifact) Dependencies() []Artifact {
|
||||
return []Artifact{a.f}
|
||||
}
|
||||
|
||||
// IsExclusive returns false: decompressor and tar reader are fully sequential.
|
||||
func (a *tarArtifact) IsExclusive() bool { return false }
|
||||
|
||||
// A DisallowedTypeflagError describes a disallowed typeflag encountered while
|
||||
// unpacking a tarball.
|
||||
type DisallowedTypeflagError byte
|
||||
|
||||
func (e DisallowedTypeflagError) Error() string {
|
||||
return "disallowed typeflag '" + string(e) + "'"
|
||||
}
|
||||
|
||||
// Cure cures the [Artifact], producing a directory located at work.
|
||||
func (a *tarArtifact) Cure(t *TContext) (err error) {
|
||||
temp := t.GetTempDir()
|
||||
var tr io.ReadCloser
|
||||
if tr, err = t.Open(a.f); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer func(f io.ReadCloser) {
|
||||
if err == nil {
|
||||
err = tr.Close()
|
||||
}
|
||||
|
||||
closeErr := f.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}(tr)
|
||||
tr = io.NopCloser(tr)
|
||||
|
||||
switch a.compression {
|
||||
case TarUncompressed:
|
||||
break
|
||||
|
||||
case TarGzip:
|
||||
if tr, err = gzip.NewReader(tr); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
|
||||
case TarBzip2:
|
||||
tr = io.NopCloser(bzip2.NewReader(tr))
|
||||
break
|
||||
|
||||
default:
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
type dirTargetPerm struct {
|
||||
path *check.Absolute
|
||||
mode fs.FileMode
|
||||
}
|
||||
var madeDirectories []dirTargetPerm
|
||||
|
||||
if err = os.MkdirAll(temp.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var header *tar.Header
|
||||
r := tar.NewReader(tr)
|
||||
for header, err = r.Next(); err == nil; header, err = r.Next() {
|
||||
typeflag := header.Typeflag
|
||||
if typeflag == 0 {
|
||||
if len(header.Name) > 0 && header.Name[len(header.Name)-1] == '/' {
|
||||
typeflag = tar.TypeDir
|
||||
} else {
|
||||
typeflag = tar.TypeReg
|
||||
}
|
||||
}
|
||||
|
||||
pathname := temp.Append(header.Name)
|
||||
if typeflag >= '0' && typeflag <= '9' && typeflag != tar.TypeDir {
|
||||
if err = os.MkdirAll(pathname.Dir().String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch typeflag {
|
||||
case tar.TypeReg:
|
||||
var f *os.File
|
||||
if f, err = os.OpenFile(
|
||||
pathname.String(),
|
||||
os.O_CREATE|os.O_EXCL|os.O_WRONLY,
|
||||
header.FileInfo().Mode()&0500,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
if _, err = io.Copy(f, r); err != nil {
|
||||
_ = f.Close()
|
||||
return
|
||||
} else if err = f.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
|
||||
case tar.TypeLink:
|
||||
if err = os.Link(
|
||||
temp.Append(header.Linkname).String(),
|
||||
pathname.String(),
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
|
||||
case tar.TypeSymlink:
|
||||
if err = os.Symlink(header.Linkname, pathname.String()); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
|
||||
case tar.TypeDir:
|
||||
madeDirectories = append(madeDirectories, dirTargetPerm{
|
||||
path: pathname,
|
||||
mode: header.FileInfo().Mode(),
|
||||
})
|
||||
if err = os.MkdirAll(pathname.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
|
||||
case tar.TypeXGlobalHeader:
|
||||
continue // ignore
|
||||
|
||||
default:
|
||||
return DisallowedTypeflagError(typeflag)
|
||||
}
|
||||
}
|
||||
if errors.Is(err, io.EOF) {
|
||||
err = nil
|
||||
}
|
||||
if err == nil {
|
||||
for _, e := range madeDirectories {
|
||||
if err = os.Chmod(e.path.String(), e.mode&0500); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return
|
||||
}
|
||||
|
||||
if err = os.Chmod(temp.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var entries []os.DirEntry
|
||||
if entries, err = os.ReadDir(temp.String()); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(entries) == 1 && entries[0].IsDir() {
|
||||
p := temp.Append(entries[0].Name())
|
||||
if err = os.Chmod(p.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
err = os.Rename(p.String(), t.GetWorkDir().String())
|
||||
} else {
|
||||
err = os.Rename(temp.String(), t.GetWorkDir().String())
|
||||
}
|
||||
return
|
||||
}
|
||||
203
internal/pkg/tar_test.go
Normal file
203
internal/pkg/tar_test.go
Normal file
@@ -0,0 +1,203 @@
|
||||
package pkg_test
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha512"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/container/stub"
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
func TestTar(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
checkWithCache(t, []cacheTestCase{
|
||||
{"http", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
checkTarHTTP(t, base, c, fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, pkg.MustDecode(
|
||||
"cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM",
|
||||
))
|
||||
}, pkg.MustDecode("sxbgyX-bPoezbha214n2lbQhiVfTUBkhZ0EX6zI7mmkMdrCdwuMwhMBJphLQsy94")},
|
||||
|
||||
{"http expand", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
checkTarHTTP(t, base, c, fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"lib": {Mode: fs.ModeDir | 0700},
|
||||
"lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
}, pkg.MustDecode(
|
||||
"CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN",
|
||||
))
|
||||
}, pkg.MustDecode("4I8wx_h7NSJTlG5lbuz-GGEXrOg0GYC3M_503LYEBhv5XGWXfNIdIY9Q3eVSYldX")},
|
||||
})
|
||||
}
|
||||
|
||||
func checkTarHTTP(
|
||||
t *testing.T,
|
||||
base *check.Absolute,
|
||||
c *pkg.Cache,
|
||||
testdataFsys fs.FS,
|
||||
wantChecksum pkg.Checksum,
|
||||
) {
|
||||
var testdata string
|
||||
{
|
||||
var buf bytes.Buffer
|
||||
w := tar.NewWriter(&buf)
|
||||
if err := w.AddFS(testdataFsys); err != nil {
|
||||
t.Fatalf("AddFS: error = %v", err)
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatalf("Close: error = %v", err)
|
||||
}
|
||||
|
||||
var zbuf bytes.Buffer
|
||||
gw := gzip.NewWriter(&zbuf)
|
||||
if _, err := gw.Write(buf.Bytes()); err != nil {
|
||||
t.Fatalf("Write: error = %v", err)
|
||||
}
|
||||
if err := gw.Close(); err != nil {
|
||||
t.Fatalf("Close: error = %v", err)
|
||||
}
|
||||
testdata = zbuf.String()
|
||||
}
|
||||
|
||||
testdataChecksum := func() pkg.Checksum {
|
||||
h := sha512.New384()
|
||||
h.Write([]byte(testdata))
|
||||
return (pkg.Checksum)(h.Sum(nil))
|
||||
}()
|
||||
|
||||
var transport http.Transport
|
||||
client := http.Client{Transport: &transport}
|
||||
transport.RegisterProtocol("file", http.NewFileTransportFS(fstest.MapFS{
|
||||
"testdata": {Data: []byte(testdata), Mode: 0400},
|
||||
}))
|
||||
|
||||
wantIdent := func() pkg.ID {
|
||||
h := sha512.New384()
|
||||
h.Write([]byte{byte(pkg.KindTar), 0, 0, 0, 0, 0, 0, 0})
|
||||
h.Write([]byte{pkg.TarGzip, 0, 0, 0, 0, 0, 0, 0})
|
||||
h.Write([]byte{byte(pkg.KindHTTPGet), 0, 0, 0, 0, 0, 0, 0})
|
||||
|
||||
h0 := sha512.New384()
|
||||
h0.Write([]byte{byte(pkg.KindHTTPGet), 0, 0, 0, 0, 0, 0, 0})
|
||||
h0.Write([]byte("file:///testdata"))
|
||||
h.Write(h0.Sum(nil))
|
||||
return pkg.ID(h.Sum(nil))
|
||||
}()
|
||||
|
||||
a := pkg.NewHTTPGetTar(
|
||||
&client,
|
||||
"file:///testdata",
|
||||
testdataChecksum,
|
||||
pkg.TarGzip,
|
||||
)
|
||||
|
||||
tarDir := stubArtifact{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("directory containing a single regular file"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
work := t.GetWorkDir()
|
||||
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(
|
||||
work.Append("sample.tar.gz").String(),
|
||||
[]byte(testdata),
|
||||
0400,
|
||||
)
|
||||
},
|
||||
}
|
||||
tarDirMulti := stubArtifact{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("directory containing a multiple entries"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
work := t.GetWorkDir()
|
||||
if err := os.MkdirAll(work.Append(
|
||||
"garbage",
|
||||
).String(), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(
|
||||
work.Append("sample.tar.gz").String(),
|
||||
[]byte(testdata),
|
||||
0400,
|
||||
)
|
||||
},
|
||||
}
|
||||
tarDirType := stubArtifact{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("directory containing a symbolic link"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
work := t.GetWorkDir()
|
||||
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Symlink(
|
||||
work.String(),
|
||||
work.Append("sample.tar.gz").String(),
|
||||
)
|
||||
},
|
||||
}
|
||||
// destroy these to avoid including it in flatten test case
|
||||
defer newDestroyArtifactFunc(&tarDir)(t, base, c)
|
||||
defer newDestroyArtifactFunc(&tarDirMulti)(t, base, c)
|
||||
defer newDestroyArtifactFunc(&tarDirType)(t, base, c)
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"file", a, base.Append(
|
||||
"identifier",
|
||||
pkg.Encode(wantIdent),
|
||||
), wantChecksum, nil},
|
||||
|
||||
{"directory", pkg.NewTar(
|
||||
&tarDir,
|
||||
pkg.TarGzip,
|
||||
), ignorePathname, wantChecksum, nil},
|
||||
|
||||
{"multiple entries", pkg.NewTar(
|
||||
&tarDirMulti,
|
||||
pkg.TarGzip,
|
||||
), nil, pkg.Checksum{}, errors.New(
|
||||
"input directory does not contain a single regular file",
|
||||
)},
|
||||
|
||||
{"bad type", pkg.NewTar(
|
||||
&tarDirType,
|
||||
pkg.TarGzip,
|
||||
), nil, pkg.Checksum{}, errors.New(
|
||||
"input directory does not contain a single regular file",
|
||||
)},
|
||||
|
||||
{"error passthrough", pkg.NewTar(&stubArtifact{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("doomed artifact"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return stub.UniqueError(0xcafe)
|
||||
},
|
||||
}, pkg.TarGzip), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
||||
})
|
||||
}
|
||||
268
internal/pkg/testdata/main.go
vendored
Normal file
268
internal/pkg/testdata/main.go
vendored
Normal file
@@ -0,0 +1,268 @@
|
||||
//go:build testtool
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/container/fhs"
|
||||
"hakurei.app/container/vfs"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
log.SetPrefix("testtool: ")
|
||||
|
||||
var hostNet, layers, promote bool
|
||||
if len(os.Args) == 2 && os.Args[0] == "testtool" {
|
||||
switch os.Args[1] {
|
||||
case "net":
|
||||
hostNet = true
|
||||
log.SetPrefix("testtool(net): ")
|
||||
break
|
||||
|
||||
case "layers":
|
||||
layers = true
|
||||
log.SetPrefix("testtool(layers): ")
|
||||
break
|
||||
|
||||
case "promote":
|
||||
promote = true
|
||||
log.SetPrefix("testtool(promote): ")
|
||||
|
||||
default:
|
||||
log.Fatalf("Args: %q", os.Args)
|
||||
return
|
||||
}
|
||||
} else if wantArgs := []string{"testtool"}; !slices.Equal(os.Args, wantArgs) {
|
||||
log.Fatalf("Args: %q, want %q", os.Args, wantArgs)
|
||||
}
|
||||
|
||||
var overlayRoot bool
|
||||
wantEnv := []string{"HAKUREI_TEST=1"}
|
||||
if len(os.Environ()) == 2 {
|
||||
overlayRoot = true
|
||||
if !layers && !promote {
|
||||
log.SetPrefix("testtool(overlay root): ")
|
||||
}
|
||||
wantEnv = []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}
|
||||
}
|
||||
if !slices.Equal(wantEnv, os.Environ()) {
|
||||
log.Fatalf("Environ: %q, want %q", os.Environ(), wantEnv)
|
||||
}
|
||||
|
||||
var overlayWork bool
|
||||
const (
|
||||
wantExec = "/opt/bin/testtool"
|
||||
wantExecWork = "/work/bin/testtool"
|
||||
)
|
||||
var iftPath string
|
||||
if got, err := os.Executable(); err != nil {
|
||||
log.Fatalf("Executable: error = %v", err)
|
||||
} else {
|
||||
iftPath = path.Join(path.Dir(path.Dir(got)), "ift")
|
||||
|
||||
if got != wantExec {
|
||||
switch got {
|
||||
case wantExecWork:
|
||||
overlayWork = true
|
||||
log.SetPrefix("testtool(overlay work): ")
|
||||
|
||||
default:
|
||||
log.Fatalf("Executable: %q, want %q", got, wantExec)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
wantHostname := "cure"
|
||||
if hostNet {
|
||||
wantHostname += "-net"
|
||||
}
|
||||
|
||||
if hostname, err := os.Hostname(); err != nil {
|
||||
log.Fatalf("Hostname: error = %v", err)
|
||||
} else if hostname != wantHostname {
|
||||
log.Fatalf("Hostname: %q, want %q", hostname, wantHostname)
|
||||
}
|
||||
|
||||
var m *vfs.MountInfo
|
||||
if f, err := os.Open(fhs.Proc + "self/mountinfo"); err != nil {
|
||||
log.Fatalf("Open: error = %v", err)
|
||||
} else {
|
||||
err = vfs.NewMountInfoDecoder(f).Decode(&m)
|
||||
closeErr := f.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Decode: error = %v", err)
|
||||
}
|
||||
if closeErr != nil {
|
||||
log.Fatalf("Close: error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if ift, err := net.Interfaces(); err != nil {
|
||||
log.Fatal(err)
|
||||
} else if !hostNet {
|
||||
if len(ift) != 1 || ift[0].Name != "lo" {
|
||||
log.Fatalln("got interfaces", strings.Join(slices.Collect(func(yield func(ifn string) bool) {
|
||||
for _, ifi := range ift {
|
||||
if !yield(ifi.Name) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}), ", "))
|
||||
}
|
||||
} else {
|
||||
var iftParent []net.Interface
|
||||
|
||||
var r *os.File
|
||||
if r, err = os.Open(iftPath); err != nil {
|
||||
log.Fatal(err)
|
||||
} else {
|
||||
err = gob.NewDecoder(r).Decode(&iftParent)
|
||||
closeErr := r.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if closeErr != nil {
|
||||
log.Fatal(closeErr)
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ift, iftParent) {
|
||||
log.Fatalf("Interfaces: %#v, want %#v", ift, iftParent)
|
||||
}
|
||||
}
|
||||
|
||||
const checksumEmptyDir = "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"
|
||||
ident := "U2cbgVgEtjfRuvHfE1cQnZ3t8yoexULQyo_VLgvxAVJSsobMcNaFIsuDWtmt7kzK"
|
||||
log.Println(m)
|
||||
next := func() { m = m.Next; log.Println(m) }
|
||||
|
||||
if overlayRoot {
|
||||
ident = "5ey2wpmMpj483YYa7ZZQciYLA2cx3_l167JCqWW4Pd-5DVp81dj9EsBtVTwYptF6"
|
||||
|
||||
if m.Root != "/" || m.Target != "/" ||
|
||||
m.Source != "overlay" || m.FsType != "overlay" {
|
||||
log.Fatal("unexpected root mount entry")
|
||||
}
|
||||
var lowerdir string
|
||||
for _, o := range strings.Split(m.FsOptstr, ",") {
|
||||
const lowerdirKey = "lowerdir="
|
||||
if strings.HasPrefix(o, lowerdirKey) {
|
||||
lowerdir = o[len(lowerdirKey):]
|
||||
}
|
||||
}
|
||||
if !layers {
|
||||
if path.Base(lowerdir) != checksumEmptyDir {
|
||||
log.Fatal("unexpected artifact checksum")
|
||||
}
|
||||
} else {
|
||||
ident = "tfjrsVuBuFgzWgwz-yPppFtylYuC1VFWnKhyBiHbWTGkyz8lt7Ee9QXWaIHPXs4x"
|
||||
|
||||
lowerdirsEscaped := strings.Split(lowerdir, ":")
|
||||
lowerdirs := lowerdirsEscaped[:0]
|
||||
// ignore the option separator since it does not appear in ident
|
||||
for i, e := range lowerdirsEscaped {
|
||||
if len(e) > 0 &&
|
||||
e[len(e)-1] == check.SpecialOverlayEscape[0] &&
|
||||
(len(e) == 1 || e[len(e)-2] != check.SpecialOverlayEscape[0]) {
|
||||
// ignore escaped pathname separator since it does not
|
||||
// appear in ident
|
||||
|
||||
e = e[:len(e)-1]
|
||||
if len(lowerdirsEscaped) != i {
|
||||
lowerdirsEscaped[i+1] = e + lowerdirsEscaped[i+1]
|
||||
continue
|
||||
}
|
||||
}
|
||||
lowerdirs = append(lowerdirs, e)
|
||||
}
|
||||
|
||||
if len(lowerdirs) != 2 ||
|
||||
path.Base(lowerdirs[0]) != "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU" ||
|
||||
path.Base(lowerdirs[1]) != "nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK" {
|
||||
log.Fatalf("unexpected lowerdirs %s", strings.Join(lowerdirs, ", "))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if hostNet {
|
||||
ident = "QdsJhGgnk5N2xdUNGcndXQxFKifxf1V_2t9X8CQ-pDcg24x6mGJC_BiLfGbs6Qml"
|
||||
}
|
||||
|
||||
if m.Root != "/sysroot" || m.Target != "/" {
|
||||
log.Fatal("unexpected root mount entry")
|
||||
}
|
||||
|
||||
next()
|
||||
if path.Base(m.Root) != "OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb" {
|
||||
log.Fatal("unexpected file artifact checksum")
|
||||
}
|
||||
|
||||
next()
|
||||
if path.Base(m.Root) != checksumEmptyDir {
|
||||
log.Fatal("unexpected artifact checksum")
|
||||
}
|
||||
}
|
||||
|
||||
if promote {
|
||||
ident = "O-6VjlIUxc4PYLf5v35uhIeL8kkYCbHYklqlmDjFPXe0m4j6GkUDg5qwTzBRESnf"
|
||||
}
|
||||
|
||||
next() // testtool artifact
|
||||
|
||||
next()
|
||||
if overlayWork {
|
||||
ident = "acaDzHZv40dZaz4cGAXayqbRMgbEOuiuiUijZL8IgDQvyeCNMFE3onBMYfny-kXA"
|
||||
if m.Root != "/" || m.Target != "/work" ||
|
||||
m.Source != "overlay" || m.FsType != "overlay" {
|
||||
log.Fatal("unexpected work mount entry")
|
||||
}
|
||||
} else {
|
||||
if path.Base(m.Root) != ident || m.Target != "/work" {
|
||||
log.Fatal("unexpected work mount entry")
|
||||
}
|
||||
}
|
||||
|
||||
next()
|
||||
if path.Base(m.Root) != ident || m.Target != "/tmp" {
|
||||
log.Fatal("unexpected temp mount entry")
|
||||
}
|
||||
|
||||
next()
|
||||
if m.Root != "/" || m.Target != "/proc" || m.Source != "proc" || m.FsType != "proc" {
|
||||
log.Fatal("unexpected proc mount entry")
|
||||
}
|
||||
|
||||
next()
|
||||
if m.Root != "/" || m.Target != "/dev" || m.Source != "devtmpfs" || m.FsType != "tmpfs" {
|
||||
log.Fatal("unexpected dev mount entry")
|
||||
}
|
||||
|
||||
for i := 0; i < 9; i++ { // private /dev entries
|
||||
next()
|
||||
}
|
||||
|
||||
if m.Next != nil {
|
||||
log.Println("unexpected extra mount entries")
|
||||
for m.Next != nil {
|
||||
next()
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
checkData := []byte{0}
|
||||
if hostNet {
|
||||
checkData = []byte("net")
|
||||
}
|
||||
if err := os.WriteFile("check", checkData, 0400); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
98
internal/rosa/acl.go
Normal file
98
internal/rosa/acl.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package rosa
|
||||
|
||||
import "hakurei.app/internal/pkg"
|
||||
|
||||
func (t Toolchain) newAttr() pkg.Artifact {
|
||||
const (
|
||||
version = "2.5.2"
|
||||
checksum = "YWEphrz6vg1sUMmHHVr1CRo53pFXRhq_pjN-AlG8UgwZK1y6m7zuDhxqJhD0SV0l"
|
||||
)
|
||||
return t.New("attr-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
t.Load(Perl),
|
||||
}, nil, nil, `
|
||||
ln -s ../../system/bin/perl /usr/bin
|
||||
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/attr/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}" \
|
||||
--enable-static
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("attr"), true, t.NewPatchedSource(
|
||||
"attr", version, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://download.savannah.nongnu.org/releases/attr/"+
|
||||
"attr-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
), true, [2]string{"libgen-basename", `From 8a80d895dfd779373363c3a4b62ecce5a549efb2 Mon Sep 17 00:00:00 2001
|
||||
From: "Haelwenn (lanodan) Monnier" <contact@hacktivis.me>
|
||||
Date: Sat, 30 Mar 2024 10:17:10 +0100
|
||||
Subject: tools/attr.c: Add missing libgen.h include for basename(3)
|
||||
|
||||
Fixes compilation issue with musl and modern C99 compilers.
|
||||
|
||||
See: https://bugs.gentoo.org/926294
|
||||
---
|
||||
tools/attr.c | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/tools/attr.c b/tools/attr.c
|
||||
index f12e4af..6a3c1e9 100644
|
||||
--- a/tools/attr.c
|
||||
+++ b/tools/attr.c
|
||||
@@ -28,6 +28,7 @@
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <locale.h>
|
||||
+#include <libgen.h>
|
||||
|
||||
#include <attr/attributes.h>
|
||||
|
||||
--
|
||||
cgit v1.1`}, [2]string{"musl-errno", `diff --git a/test/attr.test b/test/attr.test
|
||||
index 6ce2f9b..e9bde92 100644
|
||||
--- a/test/attr.test
|
||||
+++ b/test/attr.test
|
||||
@@ -11,7 +11,7 @@ Try various valid and invalid names
|
||||
|
||||
$ touch f
|
||||
$ setfattr -n user -v value f
|
||||
- > setfattr: f: Operation not supported
|
||||
+ > setfattr: f: Not supported
|
||||
|
||||
$ setfattr -n user. -v value f
|
||||
> setfattr: f: Invalid argument
|
||||
`},
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Attr] = Toolchain.newAttr }
|
||||
|
||||
func (t Toolchain) newACL() pkg.Artifact {
|
||||
const (
|
||||
version = "2.3.2"
|
||||
checksum = "-fY5nwH4K8ZHBCRXrzLdguPkqjKI6WIiGu4dBtrZ1o0t6AIU73w8wwJz_UyjIS0P"
|
||||
)
|
||||
return t.New("acl-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
|
||||
t.Load(Attr),
|
||||
}, nil, nil, `
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/acl/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}" \
|
||||
--enable-static
|
||||
make "-j$(nproc)"
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("acl"), true, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://download.savannah.nongnu.org/releases/acl/"+
|
||||
"acl-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[ACL] = Toolchain.newACL }
|
||||
70
internal/rosa/all.go
Normal file
70
internal/rosa/all.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package rosa
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
// PArtifact is a lazily-initialised [pkg.Artifact] preset.
|
||||
type PArtifact int
|
||||
|
||||
const (
|
||||
ACL PArtifact = iota
|
||||
Attr
|
||||
Autoconf
|
||||
Bash
|
||||
Busybox
|
||||
CMake
|
||||
Coreutils
|
||||
Diffutils
|
||||
Gettext
|
||||
Git
|
||||
Go
|
||||
Gperf
|
||||
Hakurei
|
||||
KernelHeaders
|
||||
LibXau
|
||||
Libexpat
|
||||
Libffi
|
||||
Libgd
|
||||
Libseccomp
|
||||
Libxml2
|
||||
M4
|
||||
Make
|
||||
Meson
|
||||
Ninja
|
||||
Patch
|
||||
Perl
|
||||
PkgConfig
|
||||
Python
|
||||
Rsync
|
||||
Setuptools
|
||||
Wayland
|
||||
WaylandProtocols
|
||||
XCB
|
||||
XCBProto
|
||||
Xproto
|
||||
Zlib
|
||||
|
||||
// _presetEnd is the total number of presets and does not denote a preset.
|
||||
_presetEnd
|
||||
)
|
||||
|
||||
var (
|
||||
// artifactsF is an array of functions for the result of [PArtifact].
|
||||
artifactsF [_presetEnd]func(t Toolchain) pkg.Artifact
|
||||
|
||||
// artifacts stores the result of artifactsF.
|
||||
artifacts [_toolchainEnd][len(artifactsF)]pkg.Artifact
|
||||
// artifactsOnce is for lazy initialisation of artifacts.
|
||||
artifactsOnce [_toolchainEnd][len(artifactsF)]sync.Once
|
||||
)
|
||||
|
||||
// Load returns the resulting [pkg.Artifact] of [PArtifact].
|
||||
func (t Toolchain) Load(p PArtifact) pkg.Artifact {
|
||||
artifactsOnce[t][p].Do(func() {
|
||||
artifacts[t][p] = artifactsF[p](t)
|
||||
})
|
||||
return artifacts[t][p]
|
||||
}
|
||||
359
internal/rosa/busybox.go
Normal file
359
internal/rosa/busybox.go
Normal file
@@ -0,0 +1,359 @@
|
||||
package rosa
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"hakurei.app/container/fhs"
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
// busyboxBin is a busybox binary distribution installed under bin/busybox.
|
||||
type busyboxBin struct {
|
||||
// Underlying busybox binary.
|
||||
bin pkg.FileArtifact
|
||||
}
|
||||
|
||||
// Kind returns the hardcoded [pkg.Kind] value.
|
||||
func (a busyboxBin) Kind() pkg.Kind { return kindBusyboxBin }
|
||||
|
||||
// Params is a noop.
|
||||
func (a busyboxBin) Params(*pkg.IContext) {}
|
||||
|
||||
// IsExclusive returns false: Cure performs a trivial filesystem write.
|
||||
func (busyboxBin) IsExclusive() bool { return false }
|
||||
|
||||
// Dependencies returns the underlying busybox [pkg.File].
|
||||
func (a busyboxBin) Dependencies() []pkg.Artifact {
|
||||
return []pkg.Artifact{a.bin}
|
||||
}
|
||||
|
||||
// String returns the reporting name of the underlying file prefixed with expand.
|
||||
func (a busyboxBin) String() string {
|
||||
return "expand-" + a.bin.(fmt.Stringer).String()
|
||||
}
|
||||
|
||||
// Cure installs the underlying busybox [pkg.File] to bin/busybox.
|
||||
func (a busyboxBin) Cure(t *pkg.TContext) (err error) {
|
||||
var r io.ReadCloser
|
||||
if r, err = t.Open(a.bin); err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
closeErr := r.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
binDir := t.GetWorkDir().Append("bin")
|
||||
if err = os.MkdirAll(binDir.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var w *os.File
|
||||
if w, err = os.OpenFile(
|
||||
binDir.Append("busybox").String(),
|
||||
os.O_WRONLY|os.O_CREATE|os.O_EXCL,
|
||||
0500,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
closeErr := w.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = io.Copy(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// newBusyboxBin returns a [pkg.Artifact] containing a busybox installation from
|
||||
// the https://busybox.net/downloads/binaries/ binary release.
|
||||
func newBusyboxBin() pkg.Artifact {
|
||||
const (
|
||||
version = "1.35.0"
|
||||
checksum = "L7OBIsPu9enNHn7FqpBT1kOg_mCLNmetSeNMA3i4Y60Z5jTgnlX3qX3zcQtLx5AB"
|
||||
)
|
||||
return pkg.NewExec(
|
||||
"busybox-bin-"+version, nil, pkg.ExecTimeoutMax, false,
|
||||
fhs.AbsRoot, []string{
|
||||
"PATH=/system/bin",
|
||||
},
|
||||
AbsSystem.Append("bin", "busybox"),
|
||||
[]string{"hush", "-c", "" +
|
||||
"busybox mkdir -p /work/system/bin/ && " +
|
||||
"busybox cp /system/bin/busybox /work/system/bin/ && " +
|
||||
"busybox --install -s /work/system/bin/"},
|
||||
pkg.Path(AbsSystem, true, busyboxBin{pkg.NewHTTPGet(
|
||||
&http.Client{Transport: &http.Transport{
|
||||
// busybox website is really slow to respond
|
||||
TLSHandshakeTimeout: 2 * time.Minute,
|
||||
}},
|
||||
"https://busybox.net/downloads/binaries/"+
|
||||
version+"-"+linuxArch()+"-linux-musl/busybox",
|
||||
mustDecode(checksum),
|
||||
)}),
|
||||
)
|
||||
}
|
||||
|
||||
func (t Toolchain) newBusybox() pkg.Artifact {
|
||||
const (
|
||||
version = "1.37.0"
|
||||
checksum = "Ial94Tnt7esJ_YEeb0AxunVL6MGYFyOw7Rtu2o87CXCi1TLrc6rlznVsN1rZk7it"
|
||||
)
|
||||
|
||||
var env []string
|
||||
if t == toolchainStage3 {
|
||||
env = append(env, "EXTRA_LDFLAGS=-static")
|
||||
}
|
||||
|
||||
return t.New("busybox-"+version, false, stage3Concat(t, []pkg.Artifact{},
|
||||
t.Load(Make),
|
||||
t.Load(KernelHeaders),
|
||||
), nil, slices.Concat([]string{
|
||||
"ROSA_BUSYBOX_ENABLE=" + strings.Join([]string{
|
||||
"STATIC",
|
||||
"PIE",
|
||||
}, " "),
|
||||
"ROSA_BUSYBOX_DISABLE=" + strings.Join([]string{
|
||||
"FEATURE_IPV6",
|
||||
"FEATURE_PREFER_IPV4_ADDRESS",
|
||||
"FEATURE_HWIB",
|
||||
"ARP",
|
||||
"ARPING",
|
||||
"BRCTL",
|
||||
"FEATURE_BRCTL_FANCY",
|
||||
"FEATURE_BRCTL_SHOW",
|
||||
"DNSD",
|
||||
"ETHER_WAKE",
|
||||
"FTPD",
|
||||
"FEATURE_FTPD_WRITE",
|
||||
"FEATURE_FTPD_ACCEPT_BROKEN_LIST",
|
||||
"FEATURE_FTPD_AUTHENTICATION",
|
||||
"FTPGET",
|
||||
"FTPPUT",
|
||||
"FEATURE_FTPGETPUT_LONG_OPTIONS",
|
||||
"HOSTNAME",
|
||||
"DNSDOMAINNAME",
|
||||
"HTTPD",
|
||||
"FEATURE_HTTPD_PORT_DEFAULT",
|
||||
"FEATURE_HTTPD_RANGES",
|
||||
"FEATURE_HTTPD_SETUID",
|
||||
"FEATURE_HTTPD_BASIC_AUTH",
|
||||
"FEATURE_HTTPD_AUTH_MD5",
|
||||
"FEATURE_HTTPD_CGI",
|
||||
"FEATURE_HTTPD_CONFIG_WITH_SCRIPT_INTERPR",
|
||||
"FEATURE_HTTPD_SET_REMOTE_PORT_TO_ENV",
|
||||
"FEATURE_HTTPD_ENCODE_URL_STR",
|
||||
"FEATURE_HTTPD_ERROR_PAGES",
|
||||
"FEATURE_HTTPD_PROXY",
|
||||
"FEATURE_HTTPD_GZIP",
|
||||
"FEATURE_HTTPD_ETAG",
|
||||
"FEATURE_HTTPD_LAST_MODIFIED",
|
||||
"FEATURE_HTTPD_DATE",
|
||||
"FEATURE_HTTPD_ACL_IP",
|
||||
"IFCONFIG",
|
||||
"FEATURE_IFCONFIG_STATUS",
|
||||
"FEATURE_IFCONFIG_SLIP",
|
||||
"FEATURE_IFCONFIG_MEMSTART_IOADDR_IRQ",
|
||||
"FEATURE_IFCONFIG_HW",
|
||||
"FEATURE_IFCONFIG_BROADCAST_PLUS",
|
||||
"IFENSLAVE",
|
||||
"IFPLUGD",
|
||||
"IFUP",
|
||||
"IFDOWN",
|
||||
"IFUPDOWN_IFSTATE_PATH",
|
||||
"FEATURE_IFUPDOWN_IP",
|
||||
"FEATURE_IFUPDOWN_IPV4",
|
||||
"FEATURE_IFUPDOWN_IPV6",
|
||||
"FEATURE_IFUPDOWN_MAPPING",
|
||||
"INETD",
|
||||
"FEATURE_INETD_SUPPORT_BUILTIN_ECHO",
|
||||
"FEATURE_INETD_SUPPORT_BUILTIN_DISCARD",
|
||||
"FEATURE_INETD_SUPPORT_BUILTIN_TIME",
|
||||
"FEATURE_INETD_SUPPORT_BUILTIN_DAYTIME",
|
||||
"FEATURE_INETD_SUPPORT_BUILTIN_CHARGEN",
|
||||
"IP",
|
||||
"IPADDR",
|
||||
"IPLINK",
|
||||
"IPROUTE",
|
||||
"IPTUNNEL",
|
||||
"IPRULE",
|
||||
"IPNEIGH",
|
||||
"FEATURE_IP_ADDRESS",
|
||||
"FEATURE_IP_LINK",
|
||||
"FEATURE_IP_LINK_CAN",
|
||||
"FEATURE_IP_ROUTE",
|
||||
"FEATURE_IP_ROUTE_DIR",
|
||||
"FEATURE_IP_TUNNEL",
|
||||
"FEATURE_IP_RULE",
|
||||
"FEATURE_IP_NEIGH",
|
||||
"IPCALC",
|
||||
"FEATURE_IPCALC_LONG_OPTIONS",
|
||||
"FEATURE_IPCALC_FANCY",
|
||||
"FAKEIDENTD",
|
||||
"NAMEIF",
|
||||
"FEATURE_NAMEIF_EXTENDED",
|
||||
"NBDCLIENT",
|
||||
"NC",
|
||||
"NC_SERVER",
|
||||
"NC_EXTRA",
|
||||
"NC_110_COMPAT",
|
||||
"NETSTAT",
|
||||
"FEATURE_NETSTAT_WIDE",
|
||||
"FEATURE_NETSTAT_PRG",
|
||||
"NSLOOKUP",
|
||||
"FEATURE_NSLOOKUP_BIG",
|
||||
"FEATURE_NSLOOKUP_LONG_OPTIONS",
|
||||
"NTPD",
|
||||
"FEATURE_NTPD_SERVER",
|
||||
"FEATURE_NTPD_CONF",
|
||||
"FEATURE_NTP_AUTH",
|
||||
"PING",
|
||||
"PING6",
|
||||
"FEATURE_FANCY_PING",
|
||||
"PSCAN",
|
||||
"ROUTE",
|
||||
"SLATTACH",
|
||||
"SSL_CLIENT",
|
||||
"TC",
|
||||
"FEATURE_TC_INGRESS",
|
||||
"TCPSVD",
|
||||
"UDPSVD",
|
||||
"TELNET",
|
||||
"FEATURE_TELNET_TTYPE",
|
||||
"FEATURE_TELNET_AUTOLOGIN",
|
||||
"FEATURE_TELNET_WIDTH",
|
||||
"TELNETD",
|
||||
"FEATURE_TELNETD_STANDALONE",
|
||||
"FEATURE_TELNETD_PORT_DEFAULT",
|
||||
"FEATURE_TELNETD_INETD_WAIT",
|
||||
"TFTP",
|
||||
"FEATURE_TFTP_PROGRESS_BAR",
|
||||
"FEATURE_TFTP_HPA_COMPAT",
|
||||
"TFTPD",
|
||||
"FEATURE_TFTP_GET",
|
||||
"FEATURE_TFTP_PUT",
|
||||
"FEATURE_TFTP_BLOCKSIZE",
|
||||
"TLS",
|
||||
"TRACEROUTE",
|
||||
"TRACEROUTE6",
|
||||
"FEATURE_TRACEROUTE_VERBOSE",
|
||||
"FEATURE_TRACEROUTE_USE_ICMP",
|
||||
"TUNCTL",
|
||||
"FEATURE_TUNCTL_UG",
|
||||
"VCONFIG",
|
||||
"WGET",
|
||||
"FEATURE_WGET_LONG_OPTIONS",
|
||||
"FEATURE_WGET_STATUSBAR",
|
||||
"FEATURE_WGET_FTP",
|
||||
"FEATURE_WGET_AUTHENTICATION",
|
||||
"FEATURE_WGET_TIMEOUT",
|
||||
"FEATURE_WGET_HTTPS",
|
||||
"FEATURE_WGET_OPENSSL",
|
||||
"WHOIS",
|
||||
"ZCIP",
|
||||
"UDHCPD",
|
||||
"FEATURE_UDHCPD_BOOTP",
|
||||
"FEATURE_UDHCPD_WRITE_LEASES_EARLY",
|
||||
"DHCPD_LEASES_FILE",
|
||||
"DUMPLEASES",
|
||||
"DHCPRELAY",
|
||||
"UDHCPC",
|
||||
"FEATURE_UDHCPC_ARPING",
|
||||
"FEATURE_UDHCPC_SANITIZEOPT",
|
||||
"UDHCPC_DEFAULT_SCRIPT",
|
||||
"UDHCPC6_DEFAULT_SCRIPT",
|
||||
"UDHCPC6",
|
||||
"FEATURE_UDHCPC6_RFC3646",
|
||||
"FEATURE_UDHCPC6_RFC4704",
|
||||
"FEATURE_UDHCPC6_RFC4833",
|
||||
"FEATURE_UDHCPC6_RFC5970",
|
||||
}, " "),
|
||||
}, env), `
|
||||
config_enable() {
|
||||
for ent in "$@"; do
|
||||
sed "s/^# CONFIG_${ent}.*/CONFIG_${ent}=y/" -i .config
|
||||
shift
|
||||
done
|
||||
}
|
||||
|
||||
config_disable() {
|
||||
for ent in "$@"; do
|
||||
sed "s/^CONFIG_${ent}=y/# CONFIG_${ent} is not set/" -i .config
|
||||
shift
|
||||
done
|
||||
}
|
||||
|
||||
cat > /bin/gcc << EOF
|
||||
exec clang \
|
||||
-Wno-ignored-optimization-argument \
|
||||
${LDFLAGS} \
|
||||
\$@
|
||||
EOF
|
||||
chmod +x /bin/gcc
|
||||
|
||||
cd /usr/src/busybox
|
||||
chmod +w editors editors/awk.c
|
||||
patch -p 1 < /usr/src/patches/awk-fix-literal-backslash.patch
|
||||
|
||||
cd "$(mktemp -d)"
|
||||
make \
|
||||
KBUILD_SRC=/usr/src/busybox \
|
||||
-f /usr/src/busybox/Makefile \
|
||||
defconfig
|
||||
|
||||
config_enable $ROSA_BUSYBOX_ENABLE
|
||||
config_disable $ROSA_BUSYBOX_DISABLE
|
||||
ln -s ../system/bin/pwd /bin/pwd || true
|
||||
make CFLAGS_busybox="${LDFLAGS} ${EXTRA_LDFLAGS}" "-j$(nproc)"
|
||||
|
||||
mkdir -p /system/bin/ /work/bin/
|
||||
cp busybox /system/bin/
|
||||
|
||||
mkdir -pv /work/system/bin/
|
||||
busybox --install -s /work/system/bin/
|
||||
cp -v busybox /work/system/bin/
|
||||
ln -vs ../system/bin/hush /work/bin/sh
|
||||
mkdir -vp /work/usr/bin/
|
||||
ln -vs ../../system/bin/busybox /work/usr/bin/env
|
||||
`, pkg.Path(AbsUsrSrc.Append("busybox"), true, pkg.NewHTTPGetTar(
|
||||
&http.Client{Transport: &http.Transport{
|
||||
// busybox website is really slow to respond
|
||||
TLSHandshakeTimeout: 2 * time.Minute,
|
||||
}},
|
||||
"https://busybox.net/downloads/busybox-"+version+".tar.bz2",
|
||||
mustDecode(checksum),
|
||||
pkg.TarBzip2,
|
||||
)), pkg.Path(
|
||||
AbsUsrSrc.Append("patches", "awk-fix-literal-backslash.patch"), false,
|
||||
pkg.NewFile("awk-fix-literal-backslash.patch", []byte(`diff --git a/editors/awk.c b/editors/awk.c
|
||||
index 64e752f4b..40f5ba7f7 100644
|
||||
--- a/editors/awk.c
|
||||
+++ b/editors/awk.c
|
||||
@@ -2636,8 +2636,13 @@ static int awk_sub(node *rn, const char *repl, int nm, var *src, var *dest /*,in
|
||||
resbuf = qrealloc(resbuf, residx + replen + n, &resbufsize);
|
||||
memcpy(resbuf + residx, sp + pmatch[j].rm_so - start_ofs, n);
|
||||
residx += n;
|
||||
- } else
|
||||
+ } else {
|
||||
+/* '\\' and '&' following a backslash keep its original meaning, any other
|
||||
+ * occurrence of a '\\' should be treated as literal */
|
||||
+ if (bslash && c != '\\' && c != '&')
|
||||
+ resbuf[residx++] = '\\';
|
||||
resbuf[residx++] = c;
|
||||
+ }
|
||||
bslash = 0;
|
||||
}
|
||||
}`)),
|
||||
))
|
||||
}
|
||||
func init() { artifactsF[Busybox] = Toolchain.newBusybox }
|
||||
125
internal/rosa/cmake.go
Normal file
125
internal/rosa/cmake.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package rosa
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
func (t Toolchain) newCMake() pkg.Artifact {
|
||||
const (
|
||||
version = "4.2.1"
|
||||
checksum = "Y3OdbMsob6Xk2y1DCME6z4Fryb5_TkFD7knRT8dTNIRtSqbiCJyyDN9AxggN_I75"
|
||||
)
|
||||
return t.New("cmake-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
t.Load(KernelHeaders),
|
||||
}, nil, nil, `
|
||||
# expected to be writable in the copy made during bootstrap
|
||||
chmod -R +w /usr/src/cmake/Tests
|
||||
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/cmake/bootstrap \
|
||||
--prefix=/system \
|
||||
--parallel="$(nproc)" \
|
||||
-- \
|
||||
-DCMAKE_USE_OPENSSL=OFF
|
||||
make "-j$(nproc)"
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("cmake"), true,
|
||||
pkg.NewHTTPGetTar(
|
||||
nil, "https://github.com/Kitware/CMake/releases/download/"+
|
||||
"v"+version+"/cmake-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[CMake] = Toolchain.newCMake }
|
||||
|
||||
// CMakeAttr holds the project-specific attributes that will be applied to a new
|
||||
// [pkg.Artifact] compiled via CMake.
|
||||
type CMakeAttr struct {
|
||||
// Path elements joined with source.
|
||||
Append []string
|
||||
// Use source tree as scratch space.
|
||||
Writable bool
|
||||
// Dependencies concatenated with the build system itself.
|
||||
Extra []pkg.Artifact
|
||||
|
||||
// CMake CACHE entries.
|
||||
Cache [][2]string
|
||||
// Additional environment variables.
|
||||
Env []string
|
||||
// Runs before cmake.
|
||||
ScriptEarly string
|
||||
// Runs after cmake.
|
||||
Script string
|
||||
|
||||
// Override the default installation prefix [AbsSystem].
|
||||
Prefix *check.Absolute
|
||||
|
||||
// Return an exclusive artifact.
|
||||
Exclusive bool
|
||||
}
|
||||
|
||||
// NewViaCMake returns a [pkg.Artifact] for compiling and installing via CMake.
|
||||
func (t Toolchain) NewViaCMake(
|
||||
name, version, variant string,
|
||||
source pkg.Artifact,
|
||||
attr *CMakeAttr,
|
||||
) pkg.Artifact {
|
||||
if name == "" || version == "" || variant == "" {
|
||||
panic("names must be non-empty")
|
||||
}
|
||||
if attr == nil {
|
||||
attr = &CMakeAttr{
|
||||
Cache: [][2]string{
|
||||
{"CMAKE_BUILD_TYPE", "Release"},
|
||||
},
|
||||
}
|
||||
}
|
||||
if len(attr.Cache) == 0 {
|
||||
panic("CACHE must be non-empty")
|
||||
}
|
||||
|
||||
scriptEarly := attr.ScriptEarly
|
||||
if attr.Writable {
|
||||
scriptEarly = `
|
||||
chmod -R +w "${ROSA_SOURCE}"
|
||||
` + scriptEarly
|
||||
}
|
||||
|
||||
prefix := attr.Prefix
|
||||
if prefix == nil {
|
||||
prefix = AbsSystem
|
||||
}
|
||||
|
||||
sourcePath := AbsUsrSrc.Append(name)
|
||||
return t.New(name+"-"+variant+"-"+version, attr.Exclusive, stage3Concat(t, attr.Extra,
|
||||
t.Load(CMake),
|
||||
t.Load(Ninja),
|
||||
), nil, slices.Concat([]string{
|
||||
"ROSA_SOURCE=" + sourcePath.String(),
|
||||
"ROSA_CMAKE_SOURCE=" + sourcePath.Append(attr.Append...).String(),
|
||||
"ROSA_INSTALL_PREFIX=/work" + prefix.String(),
|
||||
}, attr.Env), scriptEarly+`
|
||||
mkdir /cure && cd /cure
|
||||
cmake -G Ninja \
|
||||
-DCMAKE_C_COMPILER_TARGET="${ROSA_TRIPLE}" \
|
||||
-DCMAKE_CXX_COMPILER_TARGET="${ROSA_TRIPLE}" \
|
||||
-DCMAKE_ASM_COMPILER_TARGET="${ROSA_TRIPLE}" \
|
||||
`+strings.Join(slices.Collect(func(yield func(string) bool) {
|
||||
for _, v := range attr.Cache {
|
||||
if !yield("-D" + v[0] + "=" + v[1]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}), " \\\n\t")+` \
|
||||
-DCMAKE_INSTALL_PREFIX="${ROSA_INSTALL_PREFIX}" \
|
||||
"${ROSA_CMAKE_SOURCE}"
|
||||
cmake --build .
|
||||
cmake --install .
|
||||
`+attr.Script, pkg.Path(sourcePath, attr.Writable, source))
|
||||
}
|
||||
123
internal/rosa/etc.go
Normal file
123
internal/rosa/etc.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package rosa
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
// cureEtc contains deterministic elements of /etc, made available as part of
|
||||
// [Toolchain]. This silences test suites expecting certain standard files to be
|
||||
// available in /etc.
|
||||
type cureEtc struct {
|
||||
// Optional via newIANAEtc.
|
||||
iana pkg.Artifact
|
||||
}
|
||||
|
||||
// Cure writes hardcoded configuration to files under etc.
|
||||
func (a cureEtc) Cure(t *pkg.FContext) (err error) {
|
||||
etc := t.GetWorkDir().Append("etc")
|
||||
if err = os.MkdirAll(etc.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
for _, f := range [][2]string{
|
||||
{"hosts", "127.0.0.1 localhost cure cure-net\n"},
|
||||
{"passwd", `root:x:0:0:System administrator:/proc/nonexistent:/bin/sh
|
||||
cure:x:1023:1023:Cure:/usr/src:/bin/sh
|
||||
nobody:x:65534:65534:Overflow user:/proc/nonexistent:/system/bin/false
|
||||
`},
|
||||
{"group", `root:x:0:
|
||||
cure:x:1023:
|
||||
nobody:x:65534:
|
||||
`},
|
||||
} {
|
||||
if err = os.WriteFile(
|
||||
etc.Append(f[0]).String(),
|
||||
[]byte(f[1]),
|
||||
0400,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if a.iana != nil {
|
||||
iana, _ := t.GetArtifact(a.iana)
|
||||
|
||||
buf := make([]byte, syscall.Getpagesize()<<3)
|
||||
for _, name := range []string{
|
||||
"protocols",
|
||||
"services",
|
||||
} {
|
||||
var dst, src *os.File
|
||||
if dst, err = os.OpenFile(
|
||||
etc.Append(name).String(),
|
||||
syscall.O_CREAT|syscall.O_EXCL|syscall.O_WRONLY,
|
||||
0400,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if src, err = os.Open(
|
||||
iana.Append(name).String(),
|
||||
); err != nil {
|
||||
_ = dst.Close()
|
||||
return
|
||||
}
|
||||
|
||||
_, err = io.CopyBuffer(dst, src, buf)
|
||||
closeErrs := [...]error{
|
||||
dst.Close(),
|
||||
src.Close(),
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
} else if err = errors.Join(closeErrs[:]...); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return os.Chmod(etc.String(), 0500)
|
||||
}
|
||||
|
||||
// Kind returns the hardcoded [pkg.Kind] value.
|
||||
func (cureEtc) Kind() pkg.Kind { return kindEtc }
|
||||
|
||||
// Params is a noop.
|
||||
func (cureEtc) Params(*pkg.IContext) {}
|
||||
|
||||
// IsExclusive returns false: Cure performs a few trivial filesystem writes.
|
||||
func (cureEtc) IsExclusive() bool { return false }
|
||||
|
||||
// Dependencies returns a slice containing the backing iana-etc release.
|
||||
func (a cureEtc) Dependencies() []pkg.Artifact {
|
||||
if a.iana != nil {
|
||||
return []pkg.Artifact{a.iana}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns a hardcoded reporting name.
|
||||
func (a cureEtc) String() string {
|
||||
if a.iana == nil {
|
||||
return "cure-etc-minimal"
|
||||
}
|
||||
return "cure-etc"
|
||||
}
|
||||
|
||||
// newIANAEtc returns an unpacked iana-etc release.
|
||||
func newIANAEtc() pkg.Artifact {
|
||||
const (
|
||||
version = "20251215"
|
||||
checksum = "kvKz0gW_rGG5QaNK9ZWmWu1IEgYAdmhj_wR7DYrh3axDfIql_clGRHmelP7525NJ"
|
||||
)
|
||||
return pkg.NewHTTPGetTar(
|
||||
nil, "https://github.com/Mic92/iana-etc/releases/download/"+
|
||||
version+"/iana-etc-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)
|
||||
}
|
||||
33
internal/rosa/git.go
Normal file
33
internal/rosa/git.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package rosa
|
||||
|
||||
import (
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
func (t Toolchain) newGit() pkg.Artifact {
|
||||
const (
|
||||
version = "2.52.0"
|
||||
checksum = "uH3J1HAN_c6PfGNJd2OBwW4zo36n71wmkdvityYnrh8Ak0D1IifiAvEWz9Vi9DmS"
|
||||
)
|
||||
return t.New("git-"+version, false, stage3Concat(t, []pkg.Artifact{},
|
||||
t.Load(Make),
|
||||
t.Load(Perl),
|
||||
t.Load(M4),
|
||||
t.Load(Autoconf),
|
||||
t.Load(Gettext),
|
||||
|
||||
t.Load(Zlib),
|
||||
), nil, nil, `
|
||||
chmod -R +w /usr/src/git && cd /usr/src/git
|
||||
make configure
|
||||
./configure --prefix=/system
|
||||
make "-j$(nproc)" all
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("git"), true, pkg.NewHTTPGetTar(
|
||||
nil, "https://www.kernel.org/pub/software/scm/git/"+
|
||||
"git-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Git] = Toolchain.newGit }
|
||||
250
internal/rosa/gnu.go
Normal file
250
internal/rosa/gnu.go
Normal file
@@ -0,0 +1,250 @@
|
||||
package rosa
|
||||
|
||||
import "hakurei.app/internal/pkg"
|
||||
|
||||
func (t Toolchain) newMake() pkg.Artifact {
|
||||
const (
|
||||
version = "4.4.1"
|
||||
checksum = "YS_B07ZcAy9PbaK5_vKGj64SrxO2VMpnMKfc9I0Q9IC1rn0RwOH7802pJoj2Mq4a"
|
||||
)
|
||||
return t.New("make-"+version, false, nil, nil, nil, `
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/make/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}" \
|
||||
--disable-dependency-tracking
|
||||
./build.sh
|
||||
./make DESTDIR=/work install check
|
||||
`, pkg.Path(AbsUsrSrc.Append("make"), false, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://ftp.gnu.org/gnu/make/make-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Make] = Toolchain.newMake }
|
||||
|
||||
func (t Toolchain) newM4() pkg.Artifact {
|
||||
const (
|
||||
version = "1.4.20"
|
||||
checksum = "RT0_L3m4Co86bVBY3lCFAEs040yI1WdeNmRylFpah8IZovTm6O4wI7qiHJN3qsW9"
|
||||
)
|
||||
return t.New("m4-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
}, nil, nil, `
|
||||
cd /usr/src/m4
|
||||
chmod +w tests/test-c32ispunct.sh && echo '#!/bin/sh' > tests/test-c32ispunct.sh
|
||||
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/m4/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}"
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("m4"), true, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://ftp.gnu.org/gnu/m4/m4-"+version+".tar.bz2",
|
||||
mustDecode(checksum),
|
||||
pkg.TarBzip2,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[M4] = Toolchain.newM4 }
|
||||
|
||||
func (t Toolchain) newAutoconf() pkg.Artifact {
|
||||
const (
|
||||
version = "2.72"
|
||||
checksum = "-c5blYkC-xLDer3TWEqJTyh1RLbOd1c5dnRLKsDnIrg_wWNOLBpaqMY8FvmUFJ33"
|
||||
)
|
||||
return t.New("autoconf-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
t.Load(M4),
|
||||
t.Load(Perl),
|
||||
}, nil, nil, `
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/autoconf/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}"
|
||||
make \
|
||||
"-j$(nproc)" \
|
||||
TESTSUITEFLAGS="-j$(nproc)" \
|
||||
check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("autoconf"), false, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://ftp.gnu.org/gnu/autoconf/autoconf-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Autoconf] = Toolchain.newAutoconf }
|
||||
|
||||
func (t Toolchain) newGettext() pkg.Artifact {
|
||||
const (
|
||||
version = "0.26"
|
||||
checksum = "IMu7yDZX7xL5UO1ZxXc-iBMbY9LLEUlOroyuSlHMZwg9MKtxG7HIm8F2LheDua0y"
|
||||
)
|
||||
return t.New("gettext-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
}, nil, nil, `
|
||||
cd /usr/src/gettext
|
||||
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||
|
||||
test_disable '#!/bin/sh' gettext-tools/tests/msgcat-22
|
||||
test_disable '#!/bin/sh' gettext-tools/tests/msgconv-2
|
||||
test_disable '#!/bin/sh' gettext-tools/tests/msgconv-8
|
||||
test_disable '#!/bin/sh' gettext-tools/tests/xgettext-python-3
|
||||
test_disable '#!/bin/sh' gettext-tools/tests/msgmerge-compendium-6
|
||||
test_disable '#!/bin/sh' gettext-tools/tests/gettextpo-1
|
||||
test_disable '#!/bin/sh' gettext-tools/tests/format-c-5
|
||||
test_disable '#!/bin/sh' gettext-tools/gnulib-tests/test-c32ispunct.sh
|
||||
test_disable 'int main(){return 0;}' gettext-tools/gnulib-tests/test-stdcountof-h.c
|
||||
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/gettext/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}"
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("gettext"), true, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://ftp.gnu.org/pub/gnu/gettext/gettext-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Gettext] = Toolchain.newGettext }
|
||||
|
||||
func (t Toolchain) newDiffutils() pkg.Artifact {
|
||||
const (
|
||||
version = "3.12"
|
||||
checksum = "9J5VAq5oA7eqwzS1Yvw-l3G5o-TccUrNQR3PvyB_lgdryOFAfxtvQfKfhdpquE44"
|
||||
)
|
||||
return t.New("diffutils-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
}, nil, nil, `
|
||||
cd /usr/src/diffutils
|
||||
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||
|
||||
test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh
|
||||
test_disable 'int main(){return 0;}' gnulib-tests/test-c32ispunct.c
|
||||
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/diffutils/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}"
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("diffutils"), true, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://ftp.gnu.org/gnu/diffutils/diffutils-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Diffutils] = Toolchain.newDiffutils }
|
||||
|
||||
func (t Toolchain) newPatch() pkg.Artifact {
|
||||
const (
|
||||
version = "2.8"
|
||||
checksum = "MA0BQc662i8QYBD-DdGgyyfTwaeALZ1K0yusV9rAmNiIsQdX-69YC4t9JEGXZkeR"
|
||||
)
|
||||
return t.New("patch-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
}, nil, nil, `
|
||||
cd /usr/src/patch
|
||||
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||
|
||||
test_disable '#!/bin/sh' tests/ed-style
|
||||
test_disable '#!/bin/sh' tests/need-filename
|
||||
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/patch/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}"
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("patch"), true, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://ftp.gnu.org/gnu/patch/patch-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Patch] = Toolchain.newPatch }
|
||||
|
||||
func (t Toolchain) newBash() pkg.Artifact {
|
||||
const (
|
||||
version = "5.3"
|
||||
checksum = "4LQ_GRoB_ko-Ih8QPf_xRKA02xAm_TOxQgcJLmFDT6udUPxTAWrsj-ZNeuTusyDq"
|
||||
)
|
||||
return t.New("bash-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
}, nil, nil, `
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/bash/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}" \
|
||||
--without-bash-malloc
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("bash"), true, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://ftp.gnu.org/gnu/bash/bash-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Bash] = Toolchain.newBash }
|
||||
|
||||
func (t Toolchain) newCoreutils() pkg.Artifact {
|
||||
const (
|
||||
version = "9.9"
|
||||
checksum = "B1_TaXj1j5aiVIcazLWu8Ix03wDV54uo2_iBry4qHG6Y-9bjDpUPlkNLmU_3Nvw6"
|
||||
)
|
||||
return t.New("coreutils-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
t.Load(Perl),
|
||||
|
||||
t.Load(KernelHeaders),
|
||||
}, nil, nil, `
|
||||
cd /usr/src/coreutils
|
||||
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||
|
||||
test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh
|
||||
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/coreutils/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}"
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("coreutils"), true, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://ftp.gnu.org/gnu/coreutils/coreutils-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Coreutils] = Toolchain.newCoreutils }
|
||||
|
||||
func (t Toolchain) newGperf() pkg.Artifact {
|
||||
const (
|
||||
version = "3.3"
|
||||
checksum = "RtIy9pPb_Bb8-31J2Nw-rRGso2JlS-lDlVhuNYhqR7Nt4xM_nObznxAlBMnarJv7"
|
||||
)
|
||||
return t.New("gperf-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
}, nil, nil, `
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/gperf/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}"
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("gperf"), true, pkg.NewHTTPGetTar(
|
||||
nil, "https://ftp.gnu.org/pub/gnu/gperf/gperf-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Gperf] = Toolchain.newGperf }
|
||||
126
internal/rosa/go.go
Normal file
126
internal/rosa/go.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package rosa
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"slices"
|
||||
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
// newGoBootstrap returns the Go bootstrap toolchain.
|
||||
func (t Toolchain) newGoBootstrap() pkg.Artifact {
|
||||
const checksum = "8o9JL_ToiQKadCTb04nvBDkp8O1xiWOolAxVEqaTGodieNe4lOFEjlOxN3bwwe23"
|
||||
return t.New("go1.4-bootstrap", false, []pkg.Artifact{
|
||||
t.Load(Bash),
|
||||
}, nil, []string{
|
||||
"CGO_ENABLED=0",
|
||||
}, `
|
||||
mkdir -p /var/tmp
|
||||
cp -r /usr/src/go /work
|
||||
cd /work/go/src
|
||||
chmod -R +w ..
|
||||
|
||||
ln -s ../system/bin/busybox /bin/pwd
|
||||
cat << EOF > /bin/hostname
|
||||
#!/bin/sh
|
||||
echo cure
|
||||
EOF
|
||||
chmod +x /bin/hostname
|
||||
|
||||
rm \
|
||||
cmd/objdump/objdump_test.go \
|
||||
syscall/creds_test.go \
|
||||
net/multicast_test.go
|
||||
|
||||
./all.bash
|
||||
cd /work/
|
||||
mkdir system/
|
||||
mv go/ system/
|
||||
`, pkg.Path(AbsUsrSrc.Append("go"), false, pkg.NewHTTPGetTar(
|
||||
nil, "https://dl.google.com/go/go1.4-bootstrap-20171003.tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
|
||||
// newGo returns a specific version of the Go toolchain.
|
||||
func (t Toolchain) newGo(
|
||||
version, checksum string,
|
||||
env []string,
|
||||
script string,
|
||||
extra ...pkg.Artifact,
|
||||
) pkg.Artifact {
|
||||
return t.New("go"+version, false, slices.Concat([]pkg.Artifact{
|
||||
t.Load(Bash),
|
||||
}, extra), nil, slices.Concat([]string{
|
||||
"CC=cc",
|
||||
"GOCACHE=/tmp/gocache",
|
||||
"GOROOT_BOOTSTRAP=/system/go",
|
||||
"TMPDIR=/dev/shm/go",
|
||||
}, env), `
|
||||
mkdir /work/system "${TMPDIR}"
|
||||
cp -r /usr/src/go /work/system
|
||||
cd /work/system/go/src
|
||||
chmod -R +w ..
|
||||
`+script+`
|
||||
./all.bash
|
||||
|
||||
mkdir /work/system/bin
|
||||
ln -s \
|
||||
../go/bin/go \
|
||||
../go/bin/gofmt \
|
||||
/work/system/bin
|
||||
`, pkg.Path(AbsUsrSrc.Append("go"), false, pkg.NewHTTPGetTar(
|
||||
nil, "https://go.dev/dl/go"+version+".src.tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
|
||||
func (t Toolchain) newGoLatest() pkg.Artifact {
|
||||
go119 := t.newGo(
|
||||
"1.19",
|
||||
"9_e0aFHsIkVxWVGsp9T2RvvjOc3p4n9o9S8tkNe9Cvgzk_zI2FhRQB7ioQkeAAro",
|
||||
[]string{"CGO_ENABLED=0"}, `
|
||||
rm \
|
||||
crypto/tls/handshake_client_test.go
|
||||
`, t.newGoBootstrap(),
|
||||
)
|
||||
|
||||
go121 := t.newGo(
|
||||
"1.21.13",
|
||||
"YtrDka402BOAEwywx03Vz4QlVwoBiguJHzG7PuythMCPHXS8CVMLvzmvgEbu4Tzu",
|
||||
[]string{"CGO_ENABLED=0"}, `
|
||||
sed -i \
|
||||
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
|
||||
cmd/link/internal/`+runtime.GOARCH+`/obj.go
|
||||
|
||||
rm \
|
||||
crypto/tls/handshake_client_test.go \
|
||||
crypto/tls/handshake_server_test.go
|
||||
`, go119,
|
||||
)
|
||||
|
||||
go123 := t.newGo(
|
||||
"1.23.12",
|
||||
"wcI32bl1tkqbgcelGtGWPI4RtlEddd-PTd76Eb-k7nXA5LbE9yTNdIL9QSOOxMOs",
|
||||
nil, `
|
||||
sed -i \
|
||||
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
|
||||
cmd/link/internal/`+runtime.GOARCH+`/obj.go
|
||||
`, go121,
|
||||
)
|
||||
|
||||
go125 := t.newGo(
|
||||
"1.25.6",
|
||||
"x0z430qoDvQbbw_fftjW0rh_GSoh0VJhPzttWk_0hj9yz9AKOjuwRMupF_Q0dbt7",
|
||||
nil, `
|
||||
sed -i \
|
||||
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
|
||||
cmd/link/internal/`+runtime.GOARCH+`/obj.go
|
||||
`, go123,
|
||||
)
|
||||
|
||||
return go125
|
||||
}
|
||||
func init() { artifactsF[Go] = Toolchain.newGoLatest }
|
||||
83
internal/rosa/hakurei.go
Normal file
83
internal/rosa/hakurei.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package rosa
|
||||
|
||||
import (
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
func (t Toolchain) newHakurei() pkg.Artifact {
|
||||
const (
|
||||
version = "0.3.3"
|
||||
checksum = "iMN9qzDB000noZ6dOHh_aSdrhRZPopjyWHd0KFVjxjQLQstAOvLYZEZ74btlL0pu"
|
||||
)
|
||||
return t.New("hakurei-"+version, false, []pkg.Artifact{
|
||||
t.Load(Go),
|
||||
t.Load(PkgConfig),
|
||||
|
||||
t.Load(KernelHeaders),
|
||||
t.Load(Libseccomp),
|
||||
t.Load(ACL),
|
||||
t.Load(Attr),
|
||||
|
||||
t.Load(Xproto),
|
||||
t.Load(LibXau),
|
||||
t.Load(XCBProto),
|
||||
t.Load(XCB),
|
||||
|
||||
t.Load(Libffi),
|
||||
t.Load(Libexpat),
|
||||
t.Load(Libxml2),
|
||||
t.Load(Wayland),
|
||||
t.Load(WaylandProtocols),
|
||||
}, nil, []string{
|
||||
"GOCACHE=/tmp/gocache",
|
||||
"CC=clang -O3 -Werror",
|
||||
}, `
|
||||
echo '# Building test helper (hostname).'
|
||||
go build -v -o /bin/hostname /usr/src/hostname/main.go
|
||||
echo
|
||||
|
||||
chmod -R +w /usr/src/hakurei
|
||||
cd /usr/src/hakurei
|
||||
mkdir -p /work/system/{bin,libexec/hakurei}
|
||||
|
||||
echo '# Building hakurei.'
|
||||
go generate -v ./...
|
||||
go build -trimpath -v -o /work/system/libexec/hakurei -ldflags="-s -w
|
||||
-buildid=
|
||||
-extldflags=-static
|
||||
-X hakurei.app/internal/info.buildVersion='v`+version+`'
|
||||
-X hakurei.app/internal/info.hakureiPath=/system/bin/hakurei
|
||||
-X hakurei.app/internal/info.hsuPath=/system/bin/hsu
|
||||
-X main.hakureiPath=/system/bin/hakurei" ./...
|
||||
echo
|
||||
|
||||
echo '# Testing hakurei.'
|
||||
go test -ldflags='-buildid= -extldflags=-static' ./...
|
||||
echo
|
||||
|
||||
mv \
|
||||
/work/system/libexec/hakurei/{hakurei,hpkg} \
|
||||
/work/system/bin
|
||||
`, pkg.Path(AbsUsrSrc.Append("hakurei"), true, pkg.NewHTTPGetTar(
|
||||
nil, "https://git.gensokyo.uk/security/hakurei/archive/"+
|
||||
"v"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)), pkg.Path(AbsUsrSrc.Append("hostname", "main.go"), false, pkg.NewFile(
|
||||
"hostname.go",
|
||||
[]byte(`
|
||||
package main
|
||||
|
||||
import "os"
|
||||
|
||||
func main() {
|
||||
if name, err := os.Hostname(); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
os.Stdout.WriteString(name)
|
||||
}
|
||||
}
|
||||
`),
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Hakurei] = Toolchain.newHakurei }
|
||||
44
internal/rosa/kernel.go
Normal file
44
internal/rosa/kernel.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package rosa
|
||||
|
||||
import (
|
||||
"slices"
|
||||
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
// newKernel is a helper for interacting with Kbuild.
|
||||
func (t Toolchain) newKernel(
|
||||
exclusive bool,
|
||||
patches [][2]string,
|
||||
script string,
|
||||
extra ...pkg.Artifact,
|
||||
) pkg.Artifact {
|
||||
const (
|
||||
version = "6.18.5"
|
||||
checksum = "-V1e1WWl7HuePkmm84sSKF7nLuHfUs494uNMzMqXEyxcNE_PUE0FICL0oGWn44mM"
|
||||
)
|
||||
return t.New("kernel-"+version, exclusive, slices.Concat([]pkg.Artifact{
|
||||
t.Load(Make),
|
||||
}, extra), nil, nil, `
|
||||
export LLVM=1
|
||||
export HOSTLDFLAGS="${LDFLAGS}"
|
||||
cd /usr/src/linux
|
||||
`+script, pkg.Path(AbsUsrSrc.Append("linux"), true, t.NewPatchedSource(
|
||||
"kernel", version, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/"+
|
||||
"snapshot/linux-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
), false, patches...,
|
||||
)))
|
||||
}
|
||||
|
||||
func (t Toolchain) newKernelHeaders() pkg.Artifact {
|
||||
return t.newKernel(false, nil, `
|
||||
make "-j$(nproc)" \
|
||||
INSTALL_HDR_PATH=/work/system \
|
||||
headers_install
|
||||
`, t.Load(Rsync))
|
||||
}
|
||||
func init() { artifactsF[KernelHeaders] = Toolchain.newKernelHeaders }
|
||||
33
internal/rosa/libexpat.go
Normal file
33
internal/rosa/libexpat.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package rosa
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
func (t Toolchain) newLibexpat() pkg.Artifact {
|
||||
const (
|
||||
version = "2.7.3"
|
||||
checksum = "GmkoD23nRi9cMT0cgG1XRMrZWD82UcOMzkkvP1gkwSFWCBgeSXMuoLpa8-v8kxW-"
|
||||
)
|
||||
return t.New("libexpat-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
t.Load(Bash),
|
||||
}, nil, nil, `
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/libexpat/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}" \
|
||||
--enable-static
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("libexpat"), false, pkg.NewHTTPGetTar(
|
||||
nil, "https://github.com/libexpat/libexpat/releases/download/"+
|
||||
"R_"+strings.ReplaceAll(version, ".", "_")+"/"+
|
||||
"expat-"+version+".tar.bz2",
|
||||
mustDecode(checksum),
|
||||
pkg.TarBzip2,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Libexpat] = Toolchain.newLibexpat }
|
||||
29
internal/rosa/libffi.go
Normal file
29
internal/rosa/libffi.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package rosa
|
||||
|
||||
import "hakurei.app/internal/pkg"
|
||||
|
||||
func (t Toolchain) newLibffi() pkg.Artifact {
|
||||
const (
|
||||
version = "3.4.5"
|
||||
checksum = "apIJzypF4rDudeRoI_n3K7N-zCeBLTbQlHRn9NSAZqdLAWA80mR0gXPTpHsL7oMl"
|
||||
)
|
||||
return t.New("libffi-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
t.Load(KernelHeaders),
|
||||
}, nil, nil, `
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/libffi/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}" \
|
||||
--enable-static
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("libffi"), false, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://github.com/libffi/libffi/releases/download/"+
|
||||
"v"+version+"/libffi-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Libffi] = Toolchain.newLibffi }
|
||||
33
internal/rosa/libgd.go
Normal file
33
internal/rosa/libgd.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package rosa
|
||||
|
||||
import "hakurei.app/internal/pkg"
|
||||
|
||||
func (t Toolchain) newLibgd() pkg.Artifact {
|
||||
const (
|
||||
version = "2.3.3"
|
||||
checksum = "8T-sh1_FJT9K9aajgxzh8ot6vWIF-xxjcKAHvTak9MgGUcsFfzP8cAvvv44u2r36"
|
||||
)
|
||||
return t.New("libgd-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
|
||||
t.Load(Zlib),
|
||||
}, nil, []string{
|
||||
"TMPDIR=/dev/shm/gd",
|
||||
}, `
|
||||
mkdir /dev/shm/gd
|
||||
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/libgd/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}" \
|
||||
--enable-static
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("libgd"), true, pkg.NewHTTPGetTar(
|
||||
nil, "https://github.com/libgd/libgd/releases/download/"+
|
||||
"gd-"+version+"/libgd-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Libgd] = Toolchain.newLibgd }
|
||||
36
internal/rosa/libseccomp.go
Normal file
36
internal/rosa/libseccomp.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package rosa
|
||||
|
||||
import (
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
func (t Toolchain) newLibseccomp() pkg.Artifact {
|
||||
const (
|
||||
version = "2.6.0"
|
||||
checksum = "mMu-iR71guPjFbb31u-YexBaanKE_nYPjPux-vuBiPfS_0kbwJdfCGlkofaUm-EY"
|
||||
)
|
||||
return t.New("libseccomp-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
t.Load(Bash),
|
||||
t.Load(Gperf),
|
||||
|
||||
t.Load(KernelHeaders),
|
||||
}, nil, nil, `
|
||||
ln -s ../system/bin/bash /bin/bash
|
||||
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/libseccomp/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}" \
|
||||
--enable-static
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("libseccomp"), false, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://github.com/seccomp/libseccomp/releases/download/"+
|
||||
"v"+version+"/libseccomp-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Libseccomp] = Toolchain.newLibseccomp }
|
||||
35
internal/rosa/libxml2.go
Normal file
35
internal/rosa/libxml2.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package rosa
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
func (t Toolchain) newLibxml2() pkg.Artifact {
|
||||
const (
|
||||
version = "2.15.1"
|
||||
checksum = "pYzAR3cNrEHezhEMirgiq7jbboLzwMj5GD7SQp0jhSIMdgoU4G9oU9Gxun3zzUIU"
|
||||
)
|
||||
return t.New("libxml2-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
}, nil, nil, `
|
||||
cd /usr/src/
|
||||
tar xf libxml2.tar.xz
|
||||
mv libxml2-`+version+` libxml2
|
||||
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/libxml2/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}" \
|
||||
--enable-static
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("libxml2.tar.xz"), false, pkg.NewHTTPGet(
|
||||
nil, "https://download.gnome.org/sources/libxml2/"+
|
||||
strings.Join(strings.Split(version, ".")[:2], ".")+
|
||||
"/libxml2-"+version+".tar.xz",
|
||||
mustDecode(checksum),
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Libxml2] = Toolchain.newLibxml2 }
|
||||
459
internal/rosa/llvm.go
Normal file
459
internal/rosa/llvm.go
Normal file
@@ -0,0 +1,459 @@
|
||||
package rosa
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
// llvmAttr holds the attributes that will be applied to a new [pkg.Artifact]
|
||||
// containing a LLVM variant.
|
||||
type llvmAttr struct {
|
||||
flags int
|
||||
|
||||
// Concatenated with default environment for CMakeAttr.Env.
|
||||
env []string
|
||||
// Concatenated with generated entries for CMakeAttr.Cache.
|
||||
cmake [][2]string
|
||||
// Override CMakeAttr.Append.
|
||||
append []string
|
||||
// Concatenated with default dependencies for CMakeAttr.Extra.
|
||||
extra []pkg.Artifact
|
||||
// Concatenated with default fixup for CMakeAttr.Script.
|
||||
script string
|
||||
// Passed through to CMakeAttr.Prefix.
|
||||
prefix *check.Absolute
|
||||
|
||||
// Patch name and body pairs.
|
||||
patches [][2]string
|
||||
}
|
||||
|
||||
const (
|
||||
llvmProjectClang = 1 << iota
|
||||
llvmProjectLld
|
||||
|
||||
llvmProjectAll = 1<<iota - 1
|
||||
|
||||
llvmRuntimeCompilerRT = 1 << iota
|
||||
llvmRuntimeLibunwind
|
||||
llvmRuntimeLibc
|
||||
llvmRuntimeLibcxx
|
||||
llvmRuntimeLibcxxABI
|
||||
|
||||
llvmAll = 1<<iota - 1
|
||||
llvmRuntimeAll = llvmAll - (2 * llvmProjectAll) - 1
|
||||
)
|
||||
|
||||
// llvmFlagName resolves a llvmAttr.flags project or runtime flag to its name.
|
||||
func llvmFlagName(flag int) string {
|
||||
switch flag {
|
||||
case llvmProjectClang:
|
||||
return "clang"
|
||||
case llvmProjectLld:
|
||||
return "lld"
|
||||
|
||||
case llvmRuntimeCompilerRT:
|
||||
return "compiler-rt"
|
||||
case llvmRuntimeLibunwind:
|
||||
return "libunwind"
|
||||
case llvmRuntimeLibc:
|
||||
return "libc"
|
||||
case llvmRuntimeLibcxx:
|
||||
return "libcxx"
|
||||
case llvmRuntimeLibcxxABI:
|
||||
return "libcxxabi"
|
||||
|
||||
default:
|
||||
panic("invalid flag " + strconv.Itoa(flag))
|
||||
}
|
||||
}
|
||||
|
||||
// newLLVMVariant returns a [pkg.Artifact] containing a LLVM variant.
|
||||
func (t Toolchain) newLLVMVariant(variant string, attr *llvmAttr) pkg.Artifact {
|
||||
const (
|
||||
version = "21.1.8"
|
||||
checksum = "8SUpqDkcgwOPsqHVtmf9kXfFeVmjVxl4LMn-qSE1AI_Xoeju-9HaoPNGtidyxyka"
|
||||
)
|
||||
if attr == nil {
|
||||
panic("LLVM attr must be non-nil")
|
||||
}
|
||||
|
||||
var projects, runtimes []string
|
||||
for i := 1; i < llvmProjectAll; i <<= 1 {
|
||||
if attr.flags&i != 0 {
|
||||
projects = append(projects, llvmFlagName(i))
|
||||
}
|
||||
}
|
||||
for i := (llvmProjectAll + 1) << 1; i < llvmRuntimeAll; i <<= 1 {
|
||||
if attr.flags&i != 0 {
|
||||
runtimes = append(runtimes, llvmFlagName(i))
|
||||
}
|
||||
}
|
||||
|
||||
var script, scriptEarly string
|
||||
|
||||
cache := [][2]string{
|
||||
{"CMAKE_BUILD_TYPE", "Release"},
|
||||
|
||||
{"LLVM_HOST_TRIPLE", `"${ROSA_TRIPLE}"`},
|
||||
{"LLVM_DEFAULT_TARGET_TRIPLE", `"${ROSA_TRIPLE}"`},
|
||||
}
|
||||
if len(projects) > 0 {
|
||||
cache = append(cache,
|
||||
[2]string{"LLVM_ENABLE_PROJECTS", `"${ROSA_LLVM_PROJECTS}"`})
|
||||
}
|
||||
if len(runtimes) > 0 {
|
||||
cache = append(cache,
|
||||
[2]string{"LLVM_ENABLE_RUNTIMES", `"${ROSA_LLVM_RUNTIMES}"`})
|
||||
}
|
||||
|
||||
cmakeAppend := []string{"llvm"}
|
||||
if attr.append != nil {
|
||||
cmakeAppend = attr.append
|
||||
} else {
|
||||
cache = append(cache,
|
||||
[2]string{"LLVM_ENABLE_LIBCXX", "ON"},
|
||||
[2]string{"LLVM_USE_LINKER", "lld"},
|
||||
|
||||
[2]string{"LLVM_INSTALL_BINUTILS_SYMLINKS", "ON"},
|
||||
[2]string{"LLVM_INSTALL_CCTOOLS_SYMLINKS", "ON"},
|
||||
)
|
||||
}
|
||||
|
||||
if attr.flags&llvmProjectClang != 0 {
|
||||
cache = append(cache,
|
||||
[2]string{"CLANG_DEFAULT_LINKER", "lld"},
|
||||
[2]string{"CLANG_DEFAULT_CXX_STDLIB", "libc++"},
|
||||
[2]string{"CLANG_DEFAULT_RTLIB", "compiler-rt"},
|
||||
[2]string{"CLANG_DEFAULT_UNWINDLIB", "libunwind"},
|
||||
)
|
||||
}
|
||||
if attr.flags&llvmProjectLld != 0 {
|
||||
script += `
|
||||
ln -s ld.lld /work/system/bin/ld
|
||||
`
|
||||
}
|
||||
if attr.flags&llvmRuntimeCompilerRT != 0 {
|
||||
if attr.append == nil {
|
||||
cache = append(cache,
|
||||
[2]string{"COMPILER_RT_USE_LLVM_UNWINDER", "ON"})
|
||||
}
|
||||
}
|
||||
if attr.flags&llvmRuntimeLibunwind != 0 {
|
||||
cache = append(cache,
|
||||
[2]string{"LIBUNWIND_USE_COMPILER_RT", "ON"})
|
||||
}
|
||||
if attr.flags&llvmRuntimeLibcxx != 0 {
|
||||
cache = append(cache,
|
||||
[2]string{"LIBCXX_HAS_MUSL_LIBC", "ON"},
|
||||
[2]string{"LIBCXX_USE_COMPILER_RT", "ON"},
|
||||
)
|
||||
|
||||
if t > toolchainStage3 {
|
||||
// libcxxabi fails to compile if c++ headers not prefixed in /usr
|
||||
// is found by the compiler, and doing this is easier than
|
||||
// overriding CXXFLAGS; not using mv here to avoid chown failures
|
||||
scriptEarly += `
|
||||
cp -r /system/include /usr/include && rm -rf /system/include
|
||||
`
|
||||
}
|
||||
}
|
||||
if attr.flags&llvmRuntimeLibcxxABI != 0 {
|
||||
cache = append(cache,
|
||||
[2]string{"LIBCXXABI_USE_COMPILER_RT", "ON"},
|
||||
[2]string{"LIBCXXABI_USE_LLVM_UNWINDER", "ON"},
|
||||
)
|
||||
}
|
||||
|
||||
return t.NewViaCMake("llvm", version, variant, t.NewPatchedSource(
|
||||
"llvmorg", version, pkg.NewHTTPGetTar(
|
||||
nil, "https://github.com/llvm/llvm-project/archive/refs/tags/"+
|
||||
"llvmorg-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
), true, attr.patches...,
|
||||
), &CMakeAttr{
|
||||
Cache: slices.Concat(cache, attr.cmake),
|
||||
Append: cmakeAppend,
|
||||
Extra: stage3Concat(t, attr.extra,
|
||||
t.Load(Libffi),
|
||||
t.Load(Python),
|
||||
t.Load(Perl),
|
||||
t.Load(Diffutils),
|
||||
t.Load(Bash),
|
||||
t.Load(Coreutils),
|
||||
|
||||
t.Load(KernelHeaders),
|
||||
),
|
||||
Prefix: attr.prefix,
|
||||
|
||||
Env: slices.Concat([]string{
|
||||
"ROSA_LLVM_PROJECTS=" + strings.Join(projects, ";"),
|
||||
"ROSA_LLVM_RUNTIMES=" + strings.Join(runtimes, ";"),
|
||||
}, attr.env),
|
||||
ScriptEarly: scriptEarly, Script: script + attr.script,
|
||||
|
||||
Exclusive: true,
|
||||
})
|
||||
}
|
||||
|
||||
// newLLVM returns LLVM toolchain across multiple [pkg.Artifact].
|
||||
func (t Toolchain) newLLVM() (musl, compilerRT, runtimes, clang pkg.Artifact) {
|
||||
var target string
|
||||
switch runtime.GOARCH {
|
||||
case "386", "amd64":
|
||||
target = "X86"
|
||||
|
||||
default:
|
||||
panic("unsupported target " + runtime.GOARCH)
|
||||
}
|
||||
|
||||
minimalDeps := [][2]string{
|
||||
{"LLVM_ENABLE_ZLIB", "OFF"},
|
||||
{"LLVM_ENABLE_ZSTD", "OFF"},
|
||||
{"LLVM_ENABLE_LIBXML2", "OFF"},
|
||||
}
|
||||
|
||||
compilerRT = t.newLLVMVariant("compiler-rt", &llvmAttr{
|
||||
env: stage3ExclConcat(t, []string{},
|
||||
"LDFLAGS="+earlyLDFLAGS(false),
|
||||
),
|
||||
cmake: [][2]string{
|
||||
// libc++ not yet available
|
||||
{"CMAKE_CXX_COMPILER_TARGET", ""},
|
||||
|
||||
{"COMPILER_RT_BUILD_BUILTINS", "ON"},
|
||||
{"COMPILER_RT_DEFAULT_TARGET_ONLY", "ON"},
|
||||
{"LLVM_ENABLE_PER_TARGET_RUNTIME_DIR", "ON"},
|
||||
|
||||
// does not work without libunwind
|
||||
{"COMPILER_RT_BUILD_CTX_PROFILE", "OFF"},
|
||||
{"COMPILER_RT_BUILD_LIBFUZZER", "OFF"},
|
||||
{"COMPILER_RT_BUILD_MEMPROF", "OFF"},
|
||||
{"COMPILER_RT_BUILD_PROFILE", "OFF"},
|
||||
{"COMPILER_RT_BUILD_SANITIZERS", "OFF"},
|
||||
{"COMPILER_RT_BUILD_XRAY", "OFF"},
|
||||
},
|
||||
append: []string{"compiler-rt"},
|
||||
extra: []pkg.Artifact{t.NewMusl(&MuslAttr{
|
||||
Headers: true,
|
||||
Env: []string{
|
||||
"CC=clang",
|
||||
},
|
||||
})},
|
||||
script: `
|
||||
mkdir -p "${ROSA_INSTALL_PREFIX}/lib/clang/21/lib/"
|
||||
ln -s \
|
||||
"../../../${ROSA_TRIPLE}" \
|
||||
"${ROSA_INSTALL_PREFIX}/lib/clang/21/lib/"
|
||||
|
||||
ln -s \
|
||||
"clang_rt.crtbegin-$(uname -m).o" \
|
||||
"${ROSA_INSTALL_PREFIX}/lib/${ROSA_TRIPLE}/crtbeginS.o"
|
||||
ln -s \
|
||||
"clang_rt.crtend-$(uname -m).o" \
|
||||
"${ROSA_INSTALL_PREFIX}/lib/${ROSA_TRIPLE}/crtendS.o"
|
||||
`,
|
||||
})
|
||||
|
||||
musl = t.NewMusl(&MuslAttr{
|
||||
Extra: []pkg.Artifact{compilerRT},
|
||||
Env: stage3ExclConcat(t, []string{
|
||||
"CC=clang",
|
||||
"LIBCC=/system/lib/clang/21/lib/" +
|
||||
triplet() + "/libclang_rt.builtins.a",
|
||||
"AR=ar",
|
||||
"RANLIB=ranlib",
|
||||
},
|
||||
"LDFLAGS="+earlyLDFLAGS(false),
|
||||
),
|
||||
})
|
||||
|
||||
runtimes = t.newLLVMVariant("runtimes", &llvmAttr{
|
||||
env: stage3ExclConcat(t, []string{},
|
||||
"LDFLAGS="+earlyLDFLAGS(false),
|
||||
),
|
||||
flags: llvmRuntimeLibunwind | llvmRuntimeLibcxx | llvmRuntimeLibcxxABI,
|
||||
cmake: slices.Concat([][2]string{
|
||||
// libc++ not yet available
|
||||
{"CMAKE_CXX_COMPILER_WORKS", "ON"},
|
||||
|
||||
{"LIBCXX_HAS_ATOMIC_LIB", "OFF"},
|
||||
{"LIBCXXABI_HAS_CXA_THREAD_ATEXIT_IMPL", "OFF"},
|
||||
}, minimalDeps),
|
||||
append: []string{"runtimes"},
|
||||
extra: []pkg.Artifact{
|
||||
compilerRT,
|
||||
musl,
|
||||
},
|
||||
})
|
||||
|
||||
clang = t.newLLVMVariant("clang", &llvmAttr{
|
||||
flags: llvmProjectClang | llvmProjectLld,
|
||||
env: stage3ExclConcat(t, []string{},
|
||||
"CFLAGS="+earlyCFLAGS,
|
||||
"CXXFLAGS="+earlyCXXFLAGS(),
|
||||
"LDFLAGS="+earlyLDFLAGS(false),
|
||||
),
|
||||
cmake: slices.Concat([][2]string{
|
||||
{"LLVM_TARGETS_TO_BUILD", target},
|
||||
{"CMAKE_CROSSCOMPILING", "OFF"},
|
||||
{"CXX_SUPPORTS_CUSTOM_LINKER", "ON"},
|
||||
}, minimalDeps),
|
||||
extra: []pkg.Artifact{
|
||||
musl,
|
||||
compilerRT,
|
||||
runtimes,
|
||||
t.Load(Git),
|
||||
},
|
||||
script: `
|
||||
ln -s clang /work/system/bin/cc
|
||||
ln -s clang++ /work/system/bin/c++
|
||||
|
||||
ninja check-all
|
||||
`,
|
||||
|
||||
patches: [][2]string{
|
||||
{"xfail-broken-tests", `diff --git a/clang/test/Modules/timestamps.c b/clang/test/Modules/timestamps.c
|
||||
index 50fdce630255..4b4465a75617 100644
|
||||
--- a/clang/test/Modules/timestamps.c
|
||||
+++ b/clang/test/Modules/timestamps.c
|
||||
@@ -1,3 +1,5 @@
|
||||
+// XFAIL: target={{.*-rosa-linux-musl}}
|
||||
+
|
||||
/// Verify timestamps that gets embedded in the module
|
||||
#include <c-header.h>
|
||||
|
||||
`},
|
||||
|
||||
{"path-system-include", `diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
|
||||
index cdbf21fb9026..dd052858700d 100644
|
||||
--- a/clang/lib/Driver/ToolChains/Linux.cpp
|
||||
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
|
||||
@@ -773,6 +773,12 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
|
||||
addExternCSystemInclude(
|
||||
DriverArgs, CC1Args,
|
||||
concat(SysRoot, "/usr/include", MultiarchIncludeDir));
|
||||
+ if (!MultiarchIncludeDir.empty() &&
|
||||
+ D.getVFS().exists(concat(SysRoot, "/system/include", MultiarchIncludeDir)))
|
||||
+ addExternCSystemInclude(
|
||||
+ DriverArgs, CC1Args,
|
||||
+ concat(SysRoot, "/system/include", MultiarchIncludeDir));
|
||||
+
|
||||
|
||||
if (getTriple().getOS() == llvm::Triple::RTEMS)
|
||||
return;
|
||||
@@ -783,6 +789,7 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
|
||||
addExternCSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/include"));
|
||||
|
||||
addExternCSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/usr/include"));
|
||||
+ addExternCSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/system/include"));
|
||||
|
||||
if (!DriverArgs.hasArg(options::OPT_nobuiltininc) && getTriple().isMusl())
|
||||
addSystemInclude(DriverArgs, CC1Args, ResourceDirInclude);
|
||||
`},
|
||||
|
||||
{"path-system-libraries", `diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
|
||||
index 8d3775de9be5..1e126e2d6f24 100644
|
||||
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
|
||||
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
|
||||
@@ -463,6 +463,15 @@ void tools::AddLinkerInputs(const ToolChain &TC, const InputInfoList &Inputs,
|
||||
if (!TC.isCrossCompiling())
|
||||
addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH");
|
||||
|
||||
+ const std::string RosaSuffix = "-rosa-linux-musl";
|
||||
+ if (TC.getTripleString().size() > RosaSuffix.size() &&
|
||||
+ std::equal(RosaSuffix.rbegin(), RosaSuffix.rend(), TC.getTripleString().rbegin())) {
|
||||
+ CmdArgs.push_back("-rpath");
|
||||
+ CmdArgs.push_back("/system/lib");
|
||||
+ CmdArgs.push_back("-rpath");
|
||||
+ CmdArgs.push_back(("/system/lib/" + TC.getTripleString()).c_str());
|
||||
+ }
|
||||
+
|
||||
for (const auto &II : Inputs) {
|
||||
// If the current tool chain refers to an OpenMP offloading host, we
|
||||
// should ignore inputs that refer to OpenMP offloading devices -
|
||||
diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
|
||||
index 8ac8d4eb9181..795995bb53cb 100644
|
||||
--- a/clang/lib/Driver/ToolChains/Linux.cpp
|
||||
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
|
||||
@@ -324,6 +324,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
|
||||
Generic_GCC::AddMultilibPaths(D, SysRoot, "libo32", MultiarchTriple, Paths);
|
||||
addPathIfExists(D, concat(SysRoot, "/libo32"), Paths);
|
||||
addPathIfExists(D, concat(SysRoot, "/usr/libo32"), Paths);
|
||||
+ addPathIfExists(D, concat(SysRoot, "/system/libo32"), Paths);
|
||||
}
|
||||
Generic_GCC::AddMultilibPaths(D, SysRoot, OSLibDir, MultiarchTriple, Paths);
|
||||
|
||||
@@ -343,16 +344,20 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
|
||||
|
||||
addPathIfExists(D, concat(SysRoot, "/usr/lib", MultiarchTriple), Paths);
|
||||
addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir), Paths);
|
||||
+ addPathIfExists(D, concat(SysRoot, "/system/lib", MultiarchTriple), Paths);
|
||||
+ addPathIfExists(D, concat(SysRoot, "/system", OSLibDir), Paths);
|
||||
if (IsRISCV) {
|
||||
StringRef ABIName = tools::riscv::getRISCVABI(Args, Triple);
|
||||
addPathIfExists(D, concat(SysRoot, "/", OSLibDir, ABIName), Paths);
|
||||
addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir, ABIName), Paths);
|
||||
+ addPathIfExists(D, concat(SysRoot, "/system", OSLibDir, ABIName), Paths);
|
||||
}
|
||||
|
||||
Generic_GCC::AddMultiarchPaths(D, SysRoot, OSLibDir, Paths);
|
||||
|
||||
addPathIfExists(D, concat(SysRoot, "/lib"), Paths);
|
||||
addPathIfExists(D, concat(SysRoot, "/usr/lib"), Paths);
|
||||
+ addPathIfExists(D, concat(SysRoot, "/system/lib"), Paths);
|
||||
}
|
||||
|
||||
ToolChain::RuntimeLibType Linux::GetDefaultRuntimeLibType() const {
|
||||
@@ -457,6 +462,11 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
|
||||
return Triple.isArch64Bit() ? "/system/bin/linker64" : "/system/bin/linker";
|
||||
}
|
||||
if (Triple.isMusl()) {
|
||||
+ const std::string RosaSuffix = "-rosa-linux-musl";
|
||||
+ if (Triple.str().size() > RosaSuffix.size() &&
|
||||
+ std::equal(RosaSuffix.rbegin(), RosaSuffix.rend(), Triple.str().rbegin()))
|
||||
+ return "/system/bin/linker";
|
||||
+
|
||||
std::string ArchName;
|
||||
bool IsArm = false;
|
||||
|
||||
diff --git a/clang/tools/clang-installapi/Options.cpp b/clang/tools/clang-installapi/Options.cpp
|
||||
index 64324a3f8b01..15ce70b68217 100644
|
||||
--- a/clang/tools/clang-installapi/Options.cpp
|
||||
+++ b/clang/tools/clang-installapi/Options.cpp
|
||||
@@ -515,7 +515,7 @@ bool Options::processFrontendOptions(InputArgList &Args) {
|
||||
FEOpts.FwkPaths = std::move(FrameworkPaths);
|
||||
|
||||
// Add default framework/library paths.
|
||||
- PathSeq DefaultLibraryPaths = {"/usr/lib", "/usr/local/lib"};
|
||||
+ PathSeq DefaultLibraryPaths = {"/usr/lib", "/system/lib", "/usr/local/lib"};
|
||||
PathSeq DefaultFrameworkPaths = {"/Library/Frameworks",
|
||||
"/System/Library/Frameworks"};
|
||||
|
||||
`},
|
||||
},
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
// llvm stores the result of Toolchain.newLLVM.
|
||||
llvm [_toolchainEnd][4]pkg.Artifact
|
||||
// llvmOnce is for lazy initialisation of llvm.
|
||||
llvmOnce [_toolchainEnd]sync.Once
|
||||
)
|
||||
|
||||
// NewLLVM returns LLVM toolchain across multiple [pkg.Artifact].
|
||||
func (t Toolchain) NewLLVM() (musl, compilerRT, runtimes, clang pkg.Artifact) {
|
||||
llvmOnce[t].Do(func() {
|
||||
llvm[t][0], llvm[t][1], llvm[t][2], llvm[t][3] = t.newLLVM()
|
||||
})
|
||||
return llvm[t][0], llvm[t][1], llvm[t][2], llvm[t][3]
|
||||
}
|
||||
27
internal/rosa/meson.go
Normal file
27
internal/rosa/meson.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package rosa
|
||||
|
||||
import "hakurei.app/internal/pkg"
|
||||
|
||||
func (t Toolchain) newMeson() pkg.Artifact {
|
||||
const (
|
||||
version = "1.10.1"
|
||||
checksum = "w895BXF_icncnXatT_OLCFe2PYEtg4KrKooMgUYdN-nQVvbFX3PvYWHGEpogsHtd"
|
||||
)
|
||||
return t.New("meson-"+version, false, []pkg.Artifact{
|
||||
t.Load(Python),
|
||||
t.Load(Setuptools),
|
||||
}, nil, nil, `
|
||||
cd /usr/src/meson
|
||||
chmod -R +w meson.egg-info
|
||||
python3 setup.py \
|
||||
install \
|
||||
--prefix=/system \
|
||||
--root=/work
|
||||
`, pkg.Path(AbsUsrSrc.Append("meson"), true, pkg.NewHTTPGetTar(
|
||||
nil, "https://github.com/mesonbuild/meson/releases/download/"+
|
||||
version+"/meson-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Meson] = Toolchain.newMeson }
|
||||
64
internal/rosa/musl.go
Normal file
64
internal/rosa/musl.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package rosa
|
||||
|
||||
import (
|
||||
"slices"
|
||||
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
// MuslAttr holds the attributes that will be applied to musl.
|
||||
type MuslAttr struct {
|
||||
// Install headers only.
|
||||
Headers bool
|
||||
// Environment variables concatenated with defaults.
|
||||
Env []string
|
||||
// Dependencies concatenated with defaults.
|
||||
Extra []pkg.Artifact
|
||||
}
|
||||
|
||||
// NewMusl returns a [pkg.Artifact] containing an installation of musl libc.
|
||||
func (t Toolchain) NewMusl(attr *MuslAttr) pkg.Artifact {
|
||||
const (
|
||||
version = "1.2.5"
|
||||
checksum = "y6USdIeSdHER_Fw2eT2CNjqShEye85oEg2jnOur96D073ukmIpIqDOLmECQroyDb"
|
||||
)
|
||||
|
||||
if attr == nil {
|
||||
attr = new(MuslAttr)
|
||||
}
|
||||
|
||||
target := "install"
|
||||
script := `
|
||||
mkdir -p /work/system/bin
|
||||
COMPAT_LINKER_NAME="ld-musl-$(uname -m).so.1"
|
||||
ln -vs ../lib/libc.so /work/system/bin/linker
|
||||
ln -vs ../lib/libc.so /work/system/bin/ldd
|
||||
ln -vs libc.so "/work/system/lib/${COMPAT_LINKER_NAME}"
|
||||
rm -v "/work/lib/${COMPAT_LINKER_NAME}"
|
||||
rmdir -v /work/lib
|
||||
`
|
||||
if attr.Headers {
|
||||
target = "install-headers"
|
||||
script = ""
|
||||
}
|
||||
|
||||
return t.New("musl-"+version, false, stage3Concat(t, attr.Extra,
|
||||
t.Load(Make),
|
||||
), nil, slices.Concat([]string{
|
||||
"ROSA_MUSL_TARGET=" + target,
|
||||
}, attr.Env), `
|
||||
# expected to be writable in copies
|
||||
chmod -R +w /usr/src/musl/
|
||||
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/musl/configure \
|
||||
--prefix=/system \
|
||||
--target="${ROSA_TRIPLE}"
|
||||
make "-j$(nproc)" DESTDIR=/work "${ROSA_MUSL_TARGET}"
|
||||
`+script, pkg.Path(AbsUsrSrc.Append("musl"), true, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://musl.libc.org/releases/musl-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
35
internal/rosa/ninja.go
Normal file
35
internal/rosa/ninja.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package rosa
|
||||
|
||||
import "hakurei.app/internal/pkg"
|
||||
|
||||
func (t Toolchain) newNinja() pkg.Artifact {
|
||||
const (
|
||||
version = "1.13.2"
|
||||
checksum = "ygKWMa0YV2lWKiFro5hnL-vcKbc_-RACZuPu0Io8qDvgQlZ0dxv7hPNSFkt4214v"
|
||||
)
|
||||
return t.New("ninja-"+version, false, []pkg.Artifact{
|
||||
t.Load(CMake),
|
||||
t.Load(Python),
|
||||
}, nil, nil, `
|
||||
chmod -R +w /usr/src/ninja/
|
||||
mkdir -p /work/system/bin/ && cd /work/system/bin/
|
||||
python3 /usr/src/ninja/configure.py \
|
||||
--bootstrap \
|
||||
--gtest-source-dir=/usr/src/googletest
|
||||
./ninja all
|
||||
./ninja_test
|
||||
`, pkg.Path(AbsUsrSrc.Append("googletest"), false,
|
||||
pkg.NewHTTPGetTar(
|
||||
nil, "https://github.com/google/googletest/releases/download/"+
|
||||
"v1.16.0/googletest-1.16.0.tar.gz",
|
||||
mustDecode("NjLGvSbgPy_B-y-o1hdanlzEzaYeStFcvFGxpYV3KYlhrWWFRcugYhM3ZMzOA9B_"),
|
||||
pkg.TarGzip,
|
||||
)), pkg.Path(AbsUsrSrc.Append("ninja"), true,
|
||||
pkg.NewHTTPGetTar(
|
||||
nil, "https://github.com/ninja-build/ninja/archive/refs/tags/"+
|
||||
"v"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Ninja] = Toolchain.newNinja }
|
||||
37
internal/rosa/perl.go
Normal file
37
internal/rosa/perl.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package rosa
|
||||
|
||||
import "hakurei.app/internal/pkg"
|
||||
|
||||
func (t Toolchain) newPerl() pkg.Artifact {
|
||||
const (
|
||||
version = "5.42.0"
|
||||
checksum = "2KR7Jbpk-ZVn1a30LQRwbgUvg2AXlPQZfzrqCr31qD5-yEsTwVQ_W76eZH-EdxM9"
|
||||
)
|
||||
return t.New("perl-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
}, nil, nil, `
|
||||
chmod -R +w /usr/src/perl && cd /usr/src/perl
|
||||
|
||||
echo 'print STDOUT "1..0 # Skip broken test\n";' > ext/Pod-Html/t/htmldir3.t
|
||||
|
||||
./Configure \
|
||||
-des \
|
||||
-Dprefix=/system \
|
||||
-Dcc="clang" \
|
||||
-Dcflags='--std=gnu99' \
|
||||
-Dldflags="${LDFLAGS}" \
|
||||
-Doptimize='-O2 -fno-strict-aliasing' \
|
||||
-Duseithreads
|
||||
make \
|
||||
"-j$(nproc)" \
|
||||
TEST_JOBS=256 \
|
||||
test_harness
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("perl"), true, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://www.cpan.org/src/5.0/perl-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Perl] = Toolchain.newPerl }
|
||||
29
internal/rosa/pkg-config.go
Normal file
29
internal/rosa/pkg-config.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package rosa
|
||||
|
||||
import "hakurei.app/internal/pkg"
|
||||
|
||||
func (t Toolchain) newPkgConfig() pkg.Artifact {
|
||||
const (
|
||||
version = "0.29.2"
|
||||
checksum = "gi7yAvkwo20Inys1tHbeYZ3Wjdm5VPkrnO0Q6_QZPCAwa1zrA8F4a63cdZDd-717"
|
||||
)
|
||||
return t.New("pkg-config-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
}, nil, nil, `
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/pkg-config/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}" \
|
||||
CFLAGS='-Wno-int-conversion' \
|
||||
--with-internal-glib
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("pkg-config"), true, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://pkgconfig.freedesktop.org/releases/"+
|
||||
"pkg-config-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[PkgConfig] = Toolchain.newPkgConfig }
|
||||
87
internal/rosa/python.go
Normal file
87
internal/rosa/python.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package rosa
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
func (t Toolchain) newPython() pkg.Artifact {
|
||||
const (
|
||||
version = "3.14.2"
|
||||
checksum = "7nZunVMGj0viB-CnxpcRego2C90X5wFsMTgsoewd5z-KSZY2zLuqaBwG-14zmKys"
|
||||
)
|
||||
skipTests := []string{
|
||||
// requires internet access (http://www.pythontest.net/)
|
||||
"test_asyncio",
|
||||
"test_socket",
|
||||
"test_urllib2",
|
||||
"test_urllibnet",
|
||||
"test_urllib2net",
|
||||
|
||||
// makes assumptions about uid_map/gid_map
|
||||
"test_os",
|
||||
"test_subprocess",
|
||||
|
||||
// somehow picks up mtime of source code
|
||||
"test_zipfile",
|
||||
|
||||
// requires gcc
|
||||
"test_ctypes",
|
||||
|
||||
// breaks on llvm
|
||||
"test_dbm_gnu",
|
||||
}
|
||||
return t.New("python-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
t.Load(Zlib),
|
||||
t.Load(Libffi),
|
||||
}, nil, []string{
|
||||
"EXTRATESTOPTS=-j0 -x " + strings.Join(skipTests, " -x "),
|
||||
|
||||
// _ctypes appears to infer something from the linker name
|
||||
"LDFLAGS=-Wl,--dynamic-linker=/system/lib/" +
|
||||
"ld-musl-" + linuxArch() + ".so.1",
|
||||
}, `
|
||||
# test_synopsis_sourceless assumes this is writable and checks __pycache__
|
||||
chmod -R +w /usr/src/python/
|
||||
|
||||
export HOME="$(mktemp -d)"
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/python/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}"
|
||||
make "-j$(nproc)"
|
||||
make test
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("python"), true,
|
||||
pkg.NewHTTPGetTar(
|
||||
nil, "https://www.python.org/ftp/python/"+version+
|
||||
"/Python-"+version+".tgz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Python] = Toolchain.newPython }
|
||||
|
||||
func (t Toolchain) newSetuptools() pkg.Artifact {
|
||||
const (
|
||||
version = "80.10.1"
|
||||
checksum = "p3rlwEmy1krcUH1KabprQz1TCYjJ8ZUjOQknQsWh3q-XEqLGEd3P4VrCc7ouHGXU"
|
||||
)
|
||||
return t.New("setuptools-"+version, false, []pkg.Artifact{
|
||||
t.Load(Python),
|
||||
}, nil, nil, `
|
||||
pip3 install \
|
||||
--no-index \
|
||||
--prefix=/system \
|
||||
--root=/work \
|
||||
/usr/src/setuptools
|
||||
`, pkg.Path(AbsUsrSrc.Append("setuptools"), true, pkg.NewHTTPGetTar(
|
||||
nil, "https://github.com/pypa/setuptools/archive/refs/tags/"+
|
||||
"v"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Setuptools] = Toolchain.newSetuptools }
|
||||
314
internal/rosa/rosa.go
Normal file
314
internal/rosa/rosa.go
Normal file
@@ -0,0 +1,314 @@
|
||||
// Package rosa provides Rosa OS toolchain artifacts and miscellaneous software.
|
||||
package rosa
|
||||
|
||||
import (
|
||||
"log"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"hakurei.app/container/fhs"
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
const (
|
||||
// kindEtc is the kind of [pkg.Artifact] of cureEtc.
|
||||
kindEtc = iota + pkg.KindCustomOffset
|
||||
|
||||
// kindBusyboxBin is the kind of [pkg.Artifact] of busyboxBin.
|
||||
kindBusyboxBin
|
||||
)
|
||||
|
||||
// mustDecode is like [pkg.MustDecode], but replaces the zero value and prints
|
||||
// a warning.
|
||||
func mustDecode(s string) pkg.Checksum {
|
||||
var fallback = pkg.Checksum{}
|
||||
if s == "" {
|
||||
log.Println(
|
||||
"falling back to",
|
||||
pkg.Encode(fallback),
|
||||
"for unpopulated checksum",
|
||||
)
|
||||
return fallback
|
||||
}
|
||||
return pkg.MustDecode(s)
|
||||
}
|
||||
|
||||
var (
|
||||
// AbsUsrSrc is the conventional directory to place source code under.
|
||||
AbsUsrSrc = fhs.AbsUsr.Append("src")
|
||||
|
||||
// AbsSystem is the Rosa OS installation prefix.
|
||||
AbsSystem = fhs.AbsRoot.Append("system")
|
||||
)
|
||||
|
||||
// linuxArch returns the architecture name used by linux corresponding to
|
||||
// [runtime.GOARCH].
|
||||
func linuxArch() string {
|
||||
switch runtime.GOARCH {
|
||||
case "amd64":
|
||||
return "x86_64"
|
||||
|
||||
default:
|
||||
panic("unsupported target " + runtime.GOARCH)
|
||||
}
|
||||
}
|
||||
|
||||
// triplet returns the Rosa OS host triple corresponding to [runtime.GOARCH].
|
||||
func triplet() string {
|
||||
return linuxArch() + "-rosa-linux-musl"
|
||||
}
|
||||
|
||||
const (
|
||||
// EnvTriplet holds the return value of triplet.
|
||||
EnvTriplet = "ROSA_TRIPLE"
|
||||
)
|
||||
|
||||
// earlyLDFLAGS returns LDFLAGS corresponding to triplet.
|
||||
func earlyLDFLAGS(static bool) string {
|
||||
s := "-fuse-ld=lld " +
|
||||
"-L/system/lib -Wl,-rpath=/system/lib " +
|
||||
"-L/system/lib/" + triplet() + " " +
|
||||
"-Wl,-rpath=/system/lib/" + triplet() + " " +
|
||||
"-rtlib=compiler-rt " +
|
||||
"-unwindlib=libunwind " +
|
||||
"-Wl,--as-needed"
|
||||
if !static {
|
||||
s += " -Wl,--dynamic-linker=/system/bin/linker"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// earlyCFLAGS is reference CFLAGS for the stage3 toolchain.
|
||||
const earlyCFLAGS = "-Qunused-arguments " +
|
||||
"-isystem/system/include"
|
||||
|
||||
// earlyCXXFLAGS returns reference CXXFLAGS for the stage3 toolchain
|
||||
// corresponding to [runtime.GOARCH].
|
||||
func earlyCXXFLAGS() string {
|
||||
return "--start-no-unused-arguments " +
|
||||
"-stdlib=libc++ " +
|
||||
"--end-no-unused-arguments " +
|
||||
"-isystem/system/include/c++/v1 " +
|
||||
"-isystem/system/include/" + triplet() + "/c++/v1 " +
|
||||
"-isystem/system/include "
|
||||
}
|
||||
|
||||
// Toolchain denotes the infrastructure to compile a [pkg.Artifact] on.
|
||||
type Toolchain uintptr
|
||||
|
||||
const (
|
||||
// toolchainBusybox denotes a busybox installation from the busyboxBin
|
||||
// binary distribution. This is for decompressing unsupported formats.
|
||||
toolchainBusybox Toolchain = iota
|
||||
|
||||
// toolchainStage3 denotes the Gentoo stage3 toolchain. Special care must be
|
||||
// taken to compile correctly against this toolchain.
|
||||
toolchainStage3
|
||||
|
||||
// toolchainIntermediate denotes the intermediate toolchain compiled against
|
||||
// toolchainStage3. This toolchain should be functionally identical to [Std]
|
||||
// and is used to bootstrap [Std].
|
||||
toolchainIntermediate
|
||||
|
||||
// Std denotes the standard Rosa OS toolchain.
|
||||
Std
|
||||
|
||||
// _toolchainEnd is the total number of toolchains available and does not
|
||||
// denote a valid toolchain.
|
||||
_toolchainEnd
|
||||
)
|
||||
|
||||
// stage3Concat concatenates s and values. If the current toolchain is
|
||||
// toolchainStage3, stage3Concat returns s as is.
|
||||
func stage3Concat[S ~[]E, E any](t Toolchain, s S, values ...E) S {
|
||||
if t == toolchainStage3 {
|
||||
return s
|
||||
}
|
||||
return slices.Concat(s, values)
|
||||
}
|
||||
|
||||
// stage3ExclConcat concatenates s and values. If the current toolchain is not
|
||||
// toolchainStage3, stage3ExclConcat returns s as is.
|
||||
func stage3ExclConcat[S ~[]E, E any](t Toolchain, s S, values ...E) S {
|
||||
if t == toolchainStage3 {
|
||||
return slices.Concat(s, values)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// lastIndexFunc is like [strings.LastIndexFunc] but for [slices].
|
||||
func lastIndexFunc[S ~[]E, E any](s S, f func(E) bool) (i int) {
|
||||
if i = slices.IndexFunc(s, f); i < 0 {
|
||||
return
|
||||
}
|
||||
if i0 := lastIndexFunc[S](s[i+1:], f); i0 >= 0 {
|
||||
i = i0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// fixupEnviron fixes up PATH, prepends extras and returns the resulting slice.
|
||||
func fixupEnviron(env, extras []string, paths ...string) []string {
|
||||
const pathPrefix = "PATH="
|
||||
pathVal := strings.Join(paths, ":")
|
||||
|
||||
if i := lastIndexFunc(env, func(s string) bool {
|
||||
return strings.HasPrefix(s, pathPrefix)
|
||||
}); i < 0 {
|
||||
env = append(env, pathPrefix+pathVal)
|
||||
} else {
|
||||
if len(env[i]) == len(pathPrefix) {
|
||||
env[i] = pathPrefix + pathVal
|
||||
} else {
|
||||
env[i] += ":" + pathVal
|
||||
}
|
||||
}
|
||||
|
||||
return append(extras, env...)
|
||||
}
|
||||
|
||||
// absCureScript is the absolute pathname [Toolchain.New] places the fixed-up
|
||||
// build script under.
|
||||
var absCureScript = fhs.AbsUsrBin.Append(".cure-script")
|
||||
|
||||
// New returns a [pkg.Artifact] compiled on this toolchain.
|
||||
func (t Toolchain) New(
|
||||
name string,
|
||||
exclusive bool,
|
||||
extra []pkg.Artifact,
|
||||
checksum *pkg.Checksum,
|
||||
env []string,
|
||||
script string,
|
||||
|
||||
paths ...pkg.ExecPath,
|
||||
) pkg.Artifact {
|
||||
const lcMessages = "LC_MESSAGES=C.UTF-8"
|
||||
|
||||
var (
|
||||
path = AbsSystem.Append("bin", "busybox")
|
||||
args = []string{"hush", absCureScript.String()}
|
||||
support []pkg.Artifact
|
||||
)
|
||||
switch t {
|
||||
case toolchainBusybox:
|
||||
name += "-early"
|
||||
support = slices.Concat([]pkg.Artifact{newBusyboxBin()}, extra)
|
||||
env = fixupEnviron(env, nil, "/system/bin")
|
||||
|
||||
case toolchainStage3:
|
||||
name += "-boot"
|
||||
const (
|
||||
version = "20260111T160052Z"
|
||||
checksum = "c5_FwMnRN8RZpTdBLGYkL4RR8ampdaZN2JbkgrFLe8-QHQAVQy08APVvIL6eT7KW"
|
||||
)
|
||||
path = fhs.AbsRoot.Append("bin", "bash")
|
||||
args[0] = "bash"
|
||||
support = slices.Concat([]pkg.Artifact{
|
||||
cureEtc{},
|
||||
toolchainBusybox.New("stage3-"+version, false, nil, nil, nil, `
|
||||
tar -C /work -xf /usr/src/stage3.tar.xz
|
||||
rm -rf /work/dev/ /work/proc/
|
||||
ln -vs ../usr/bin /work/bin
|
||||
`, pkg.Path(AbsUsrSrc.Append("stage3.tar.xz"), false,
|
||||
pkg.NewHTTPGet(
|
||||
nil, "https://distfiles.gentoo.org/releases/"+
|
||||
runtime.GOARCH+"/autobuilds/"+version+
|
||||
"/stage3-"+runtime.GOARCH+"-musl-llvm-"+version+".tar.xz",
|
||||
mustDecode(checksum),
|
||||
),
|
||||
)),
|
||||
}, extra)
|
||||
env = fixupEnviron(env, []string{
|
||||
EnvTriplet + "=" + triplet(),
|
||||
lcMessages,
|
||||
"LDFLAGS=" + earlyLDFLAGS(true),
|
||||
}, "/system/bin",
|
||||
"/usr/bin",
|
||||
"/usr/lib/llvm/21/bin",
|
||||
)
|
||||
|
||||
case toolchainIntermediate, Std:
|
||||
if t < Std {
|
||||
name += "-std"
|
||||
}
|
||||
|
||||
boot := t - 1
|
||||
musl, compilerRT, runtimes, clang := boot.NewLLVM()
|
||||
support = slices.Concat(extra, []pkg.Artifact{
|
||||
cureEtc{newIANAEtc()},
|
||||
musl,
|
||||
compilerRT,
|
||||
runtimes,
|
||||
clang,
|
||||
boot.Load(Busybox),
|
||||
})
|
||||
env = fixupEnviron(env, []string{
|
||||
EnvTriplet + "=" + triplet(),
|
||||
lcMessages,
|
||||
|
||||
"AR=ar",
|
||||
"RANLIB=ranlib",
|
||||
"LIBCC=/system/lib/clang/21/lib/" + triplet() +
|
||||
"/libclang_rt.builtins.a",
|
||||
}, "/system/bin", "/bin")
|
||||
|
||||
default:
|
||||
panic("unsupported toolchain " + strconv.Itoa(int(t)))
|
||||
}
|
||||
|
||||
return pkg.NewExec(
|
||||
name, checksum, pkg.ExecTimeoutMax, exclusive,
|
||||
fhs.AbsRoot, env,
|
||||
path, args,
|
||||
|
||||
slices.Concat([]pkg.ExecPath{pkg.Path(
|
||||
fhs.AbsRoot, true,
|
||||
support...,
|
||||
), pkg.Path(
|
||||
absCureScript, false,
|
||||
pkg.NewFile(".cure-script", []byte("set -e\n"+script)),
|
||||
)}, paths)...,
|
||||
)
|
||||
}
|
||||
|
||||
// NewPatchedSource returns [pkg.Artifact] of source with patches applied. If
|
||||
// passthrough is true, source is returned as is for zero length patches.
|
||||
func (t Toolchain) NewPatchedSource(
|
||||
name, version string,
|
||||
source pkg.Artifact,
|
||||
passthrough bool,
|
||||
patches ...[2]string,
|
||||
) pkg.Artifact {
|
||||
if passthrough && len(patches) == 0 {
|
||||
return source
|
||||
}
|
||||
|
||||
paths := make([]pkg.ExecPath, len(patches)+1)
|
||||
for i, p := range patches {
|
||||
paths[i+1] = pkg.Path(
|
||||
AbsUsrSrc.Append(name+"-patches", p[0]+".patch"), false,
|
||||
pkg.NewFile(p[0]+".patch", []byte(p[1])),
|
||||
)
|
||||
}
|
||||
paths[0] = pkg.Path(AbsUsrSrc.Append(name), false, source)
|
||||
|
||||
aname := name + "-" + version + "-src"
|
||||
script := `
|
||||
cp -r /usr/src/` + name + `/. /work/.
|
||||
chmod -R +w /work && cd /work
|
||||
`
|
||||
if len(paths) > 1 {
|
||||
script += `
|
||||
cat /usr/src/` + name + `-patches/* | \
|
||||
patch \
|
||||
-p 1 \
|
||||
--ignore-whitespace
|
||||
`
|
||||
aname += "-patched"
|
||||
}
|
||||
return t.New(aname, false, stage3Concat(t, []pkg.Artifact{},
|
||||
t.Load(Patch),
|
||||
), nil, nil, script, paths...)
|
||||
}
|
||||
29
internal/rosa/rsync.go
Normal file
29
internal/rosa/rsync.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package rosa
|
||||
|
||||
import "hakurei.app/internal/pkg"
|
||||
|
||||
func (t Toolchain) newRsync() pkg.Artifact {
|
||||
const (
|
||||
version = "3.4.1"
|
||||
checksum = "VBlTsBWd9z3r2-ex7GkWeWxkUc5OrlgDzikAC0pK7ufTjAJ0MbmC_N04oSVTGPiv"
|
||||
)
|
||||
return t.New("rsync-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
}, nil, nil, `
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/rsync/configure --prefix=/system \
|
||||
--build="${ROSA_TRIPLE}" \
|
||||
--disable-openssl \
|
||||
--disable-xxhash \
|
||||
--disable-zstd \
|
||||
--disable-lz4
|
||||
make "-j${nproc}"
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("rsync"), false, pkg.NewHTTPGetTar(
|
||||
nil,
|
||||
"https://download.samba.org/pub/rsync/src/rsync-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Rsync] = Toolchain.newRsync }
|
||||
82
internal/rosa/wayland.go
Normal file
82
internal/rosa/wayland.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package rosa
|
||||
|
||||
import "hakurei.app/internal/pkg"
|
||||
|
||||
func (t Toolchain) newWayland() pkg.Artifact {
|
||||
const (
|
||||
version = "1.24.0"
|
||||
checksum = "JxgLiFRRGw2D3uhVw8ZeDbs3V7K_d4z_ypDog2LBqiA_5y2vVbUAk5NT6D5ozm0m"
|
||||
)
|
||||
return t.New("wayland-"+version, false, []pkg.Artifact{
|
||||
t.Load(Python),
|
||||
t.Load(Meson),
|
||||
t.Load(PkgConfig),
|
||||
t.Load(CMake),
|
||||
t.Load(Ninja),
|
||||
|
||||
t.Load(Libffi),
|
||||
t.Load(Libexpat),
|
||||
t.Load(Libxml2),
|
||||
}, nil, nil, `
|
||||
cd /usr/src/wayland
|
||||
chmod +w tests tests/sanity-test.c
|
||||
echo 'int main(){}' > tests/sanity-test.c
|
||||
|
||||
cd "$(mktemp -d)"
|
||||
meson setup \
|
||||
--reconfigure \
|
||||
--buildtype=release \
|
||||
--prefix=/system \
|
||||
--prefer-static \
|
||||
-Ddocumentation=false \
|
||||
-Dtests=true \
|
||||
-Ddefault_library=both \
|
||||
. /usr/src/wayland
|
||||
meson compile
|
||||
meson test
|
||||
meson install \
|
||||
--destdir=/work
|
||||
`, pkg.Path(AbsUsrSrc.Append("wayland"), true, pkg.NewHTTPGetTar(
|
||||
nil, "https://gitlab.freedesktop.org/wayland/wayland/"+
|
||||
"-/archive/"+version+"/wayland-"+version+".tar.bz2",
|
||||
mustDecode(checksum),
|
||||
pkg.TarBzip2,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Wayland] = Toolchain.newWayland }
|
||||
|
||||
func (t Toolchain) newWaylandProtocols() pkg.Artifact {
|
||||
const (
|
||||
version = "1.47"
|
||||
checksum = "B_NodZ7AQfCstcx7kgbaVjpkYOzbAQq0a4NOk-SA8bQixAE20FY3p1-6gsbPgHn9"
|
||||
)
|
||||
return t.New("wayland-protocols-"+version, false, []pkg.Artifact{
|
||||
t.Load(Python),
|
||||
t.Load(Meson),
|
||||
t.Load(PkgConfig),
|
||||
t.Load(CMake),
|
||||
t.Load(Ninja),
|
||||
|
||||
t.Load(Wayland),
|
||||
t.Load(Libffi),
|
||||
t.Load(Libexpat),
|
||||
t.Load(Libxml2),
|
||||
}, nil, nil, `
|
||||
cd "$(mktemp -d)"
|
||||
meson setup \
|
||||
--reconfigure \
|
||||
--buildtype=release \
|
||||
--prefix=/system \
|
||||
--prefer-static \
|
||||
. /usr/src/wayland-protocols
|
||||
meson compile
|
||||
meson install \
|
||||
--destdir=/work
|
||||
`, pkg.Path(AbsUsrSrc.Append("wayland-protocols"), false, pkg.NewHTTPGetTar(
|
||||
nil, "https://gitlab.freedesktop.org/wayland/wayland-protocols/"+
|
||||
"-/archive/"+version+"/wayland-protocols-"+version+".tar.bz2",
|
||||
mustDecode(checksum),
|
||||
pkg.TarBzip2,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[WaylandProtocols] = Toolchain.newWaylandProtocols }
|
||||
52
internal/rosa/x.go
Normal file
52
internal/rosa/x.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package rosa
|
||||
|
||||
import "hakurei.app/internal/pkg"
|
||||
|
||||
func (t Toolchain) newXproto() pkg.Artifact {
|
||||
const (
|
||||
version = "7.0.23"
|
||||
checksum = "goxwWxV0jZ_3pNczXFltZWHAhq92x-aEreUGyp5Ns8dBOoOmgbpeNIu1nv0Zx07z"
|
||||
)
|
||||
return t.New("xproto-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
t.Load(PkgConfig),
|
||||
}, nil, nil, `
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/xproto/configure \
|
||||
--prefix=/system \
|
||||
--enable-static
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("xproto"), true, pkg.NewHTTPGetTar(
|
||||
nil, "https://www.x.org/releases/X11R7.7/src/proto/xproto-"+version+".tar.bz2",
|
||||
mustDecode(checksum),
|
||||
pkg.TarBzip2,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Xproto] = Toolchain.newXproto }
|
||||
|
||||
func (t Toolchain) newLibXau() pkg.Artifact {
|
||||
const (
|
||||
version = "1.0.7"
|
||||
checksum = "bm768RoZZnHRe9VjNU1Dw3BhfE60DyS9D_bgSR-JLkEEyUWT_Hb_lQripxrXto8j"
|
||||
)
|
||||
return t.New("libXau-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
t.Load(PkgConfig),
|
||||
|
||||
t.Load(Xproto),
|
||||
}, nil, nil, `
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/libXau/configure \
|
||||
--prefix=/system \
|
||||
--enable-static
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("libXau"), true, pkg.NewHTTPGetTar(
|
||||
nil, "https://www.x.org/releases/X11R7.7/src/lib/"+
|
||||
"libXau-"+version+".tar.bz2",
|
||||
mustDecode(checksum),
|
||||
pkg.TarBzip2,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[LibXau] = Toolchain.newLibXau }
|
||||
56
internal/rosa/xcb.go
Normal file
56
internal/rosa/xcb.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package rosa
|
||||
|
||||
import "hakurei.app/internal/pkg"
|
||||
|
||||
func (t Toolchain) newXCBProto() pkg.Artifact {
|
||||
const (
|
||||
version = "1.17.0"
|
||||
checksum = "_NtbKaJ_iyT7XiJz25mXQ7y-niTzE8sHPvLXZPcqtNoV_-vTzqkezJ8Hp2U1enCv"
|
||||
)
|
||||
return t.New("xcb-proto-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
t.Load(Python),
|
||||
}, nil, nil, `
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/xcb-proto/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}" \
|
||||
--enable-static
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("xcb-proto"), true, pkg.NewHTTPGetTar(
|
||||
nil, "https://xcb.freedesktop.org/dist/xcb-proto-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[XCBProto] = Toolchain.newXCBProto }
|
||||
|
||||
func (t Toolchain) newXCB() pkg.Artifact {
|
||||
const (
|
||||
version = "1.17.0"
|
||||
checksum = "hjjsc79LpWM_hZjNWbDDS6qRQUXREjjekS6UbUsDq-RR1_AjgNDxhRvZf-1_kzDd"
|
||||
)
|
||||
return t.New("xcb-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
t.Load(Python),
|
||||
t.Load(PkgConfig),
|
||||
|
||||
t.Load(XCBProto),
|
||||
t.Load(Xproto),
|
||||
t.Load(LibXau),
|
||||
}, nil, nil, `
|
||||
cd "$(mktemp -d)"
|
||||
/usr/src/xcb/configure \
|
||||
--prefix=/system \
|
||||
--build="${ROSA_TRIPLE}" \
|
||||
--enable-static
|
||||
make "-j$(nproc)" check
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("xcb"), true, pkg.NewHTTPGetTar(
|
||||
nil, "https://xcb.freedesktop.org/dist/libxcb-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[XCB] = Toolchain.newXCB }
|
||||
25
internal/rosa/zlib.go
Normal file
25
internal/rosa/zlib.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package rosa
|
||||
|
||||
import "hakurei.app/internal/pkg"
|
||||
|
||||
func (t Toolchain) newZlib() pkg.Artifact {
|
||||
const (
|
||||
version = "1.3.1"
|
||||
checksum = "E-eIpNzE8oJ5DsqH4UuA_0GDKuQF5csqI8ooDx2w7Vx-woJ2mb-YtSbEyIMN44mH"
|
||||
)
|
||||
return t.New("zlib-"+version, false, []pkg.Artifact{
|
||||
t.Load(Make),
|
||||
}, nil, nil, `
|
||||
cd "$(mktemp -d)"
|
||||
CC="clang -fPIC" /usr/src/zlib/configure \
|
||||
--prefix /system
|
||||
make "-j$(nproc)" test
|
||||
make DESTDIR=/work install
|
||||
`, pkg.Path(AbsUsrSrc.Append("zlib"), true,
|
||||
pkg.NewHTTPGetTar(
|
||||
nil, "https://zlib.net/zlib-"+version+".tar.gz",
|
||||
mustDecode(checksum),
|
||||
pkg.TarGzip,
|
||||
)))
|
||||
}
|
||||
func init() { artifactsF[Zlib] = Toolchain.newZlib }
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"path"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"hakurei.app/container/stub"
|
||||
"hakurei.app/internal/acl"
|
||||
@@ -497,6 +498,12 @@ type stubPipeWireConn struct {
|
||||
curSendmsg int
|
||||
}
|
||||
|
||||
func (conn *stubPipeWireConn) MightBlock(timeout time.Duration) {
|
||||
if timeout != 5*time.Second {
|
||||
panic("unexpected timeout " + timeout.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Recvmsg marshals and copies a stubMessage prepared ahead of time.
|
||||
func (conn *stubPipeWireConn) Recvmsg(p, _ []byte, _ int) (n, _, recvflags int, err error) {
|
||||
defer func() { conn.curRecvmsg++ }()
|
||||
|
||||
@@ -36,7 +36,7 @@ libzstd.so.1 = /usr/lib/libzstd.so.1 (0x7ff71bfd2000)
|
||||
|
||||
{"path not absolute", `
|
||||
libzstd.so.1 => usr/lib/libzstd.so.1 (0x7ff71bfd2000)
|
||||
`, &check.AbsoluteError{Pathname: "usr/lib/libzstd.so.1"}},
|
||||
`, check.AbsoluteError("usr/lib/libzstd.so.1")},
|
||||
|
||||
{"unexpected segments", `
|
||||
meow libzstd.so.1 => /usr/lib/libzstd.so.1 (0x7ff71bfd2000)
|
||||
|
||||
71
nixos.nix
71
nixos.nix
@@ -24,11 +24,38 @@ let
|
||||
getsubuid = userid: appid: userid * 100000 + 10000 + appid;
|
||||
getsubname = userid: appid: "u${toString userid}_a${toString appid}";
|
||||
getsubhome = userid: appid: "${cfg.stateDir}/u${toString userid}/a${toString appid}";
|
||||
|
||||
mountpoints = {
|
||||
${cfg.sharefs.name} = mkIf (cfg.sharefs.source != null) {
|
||||
depends = [ cfg.sharefs.source ];
|
||||
device = "sharefs";
|
||||
fsType = "fuse.sharefs";
|
||||
noCheck = true;
|
||||
options = [
|
||||
"rw"
|
||||
"noexec"
|
||||
"nosuid"
|
||||
"nodev"
|
||||
"noatime"
|
||||
"allow_other"
|
||||
"mkdir"
|
||||
"source=${cfg.sharefs.source}"
|
||||
"setuid=${toString config.users.users.${cfg.sharefs.user}.uid}"
|
||||
"setgid=${toString config.users.groups.${cfg.sharefs.group}.gid}"
|
||||
];
|
||||
};
|
||||
};
|
||||
in
|
||||
|
||||
{
|
||||
imports = [ (import ./options.nix packages) ];
|
||||
|
||||
options = {
|
||||
# Forward declare a dummy option for VM filesystems since the real one won't exist
|
||||
# unless the VM module is actually imported.
|
||||
virtualisation.fileSystems = lib.mkOption { };
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
assertions = [
|
||||
(
|
||||
@@ -66,6 +93,10 @@ in
|
||||
) "" cfg.users;
|
||||
};
|
||||
|
||||
environment.systemPackages = optional (cfg.sharefs.source != null) cfg.sharefs.package;
|
||||
fileSystems = mountpoints;
|
||||
virtualisation.fileSystems = mountpoints;
|
||||
|
||||
home-manager =
|
||||
let
|
||||
privPackages = mapAttrs (_: userid: {
|
||||
@@ -322,25 +353,57 @@ in
|
||||
in
|
||||
{
|
||||
users = mkMerge (
|
||||
foldlAttrs (
|
||||
foldlAttrs
|
||||
(
|
||||
acc: _: fid:
|
||||
acc
|
||||
++ foldlAttrs (
|
||||
acc': _: app:
|
||||
acc' ++ [ { ${getsubname fid app.identity} = getuser fid app.identity; } ]
|
||||
) [ { ${getsubname fid 0} = getuser fid 0; } ] cfg.apps
|
||||
) [ ] cfg.users
|
||||
)
|
||||
(
|
||||
if (cfg.sharefs.source != null) then
|
||||
[
|
||||
{
|
||||
${cfg.sharefs.user} = {
|
||||
uid = lib.mkDefault 1023;
|
||||
inherit (cfg.sharefs) group;
|
||||
isSystemUser = true;
|
||||
home = cfg.sharefs.source;
|
||||
};
|
||||
|
||||
}
|
||||
]
|
||||
else
|
||||
[ ]
|
||||
)
|
||||
cfg.users
|
||||
);
|
||||
|
||||
groups = mkMerge (
|
||||
foldlAttrs (
|
||||
foldlAttrs
|
||||
(
|
||||
acc: _: fid:
|
||||
acc
|
||||
++ foldlAttrs (
|
||||
acc': _: app:
|
||||
acc' ++ [ { ${getsubname fid app.identity} = getgroup fid app.identity; } ]
|
||||
) [ { ${getsubname fid 0} = getgroup fid 0; } ] cfg.apps
|
||||
) [ ] cfg.users
|
||||
)
|
||||
(
|
||||
if (cfg.sharefs.source != null) then
|
||||
[
|
||||
{
|
||||
${cfg.sharefs.group} = {
|
||||
gid = lib.mkDefault 1023;
|
||||
};
|
||||
}
|
||||
]
|
||||
else
|
||||
[ ]
|
||||
)
|
||||
cfg.users
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
94
options.md
94
options.md
@@ -35,7 +35,7 @@ package
|
||||
|
||||
|
||||
*Default:*
|
||||
` <derivation hakurei-static-x86_64-unknown-linux-musl-0.3.3> `
|
||||
` <derivation hakurei-static-x86_64-unknown-linux-musl-0.3.4> `
|
||||
|
||||
|
||||
|
||||
@@ -805,7 +805,97 @@ package
|
||||
|
||||
|
||||
*Default:*
|
||||
` <derivation hakurei-hsu-0.3.3> `
|
||||
` <derivation hakurei-hsu-0.3.4> `
|
||||
|
||||
|
||||
|
||||
## environment\.hakurei\.sharefs\.package
|
||||
|
||||
|
||||
|
||||
The sharefs package to use\.
|
||||
|
||||
|
||||
|
||||
*Type:*
|
||||
package
|
||||
|
||||
|
||||
|
||||
*Default:*
|
||||
` <derivation sharefs> `
|
||||
|
||||
|
||||
|
||||
## environment\.hakurei\.sharefs\.group
|
||||
|
||||
|
||||
|
||||
Name of the group to run the sharefs daemon as\.
|
||||
|
||||
|
||||
|
||||
*Type:*
|
||||
string
|
||||
|
||||
|
||||
|
||||
*Default:*
|
||||
` "sharefs" `
|
||||
|
||||
|
||||
|
||||
## environment\.hakurei\.sharefs\.name
|
||||
|
||||
|
||||
|
||||
Host path to mount sharefs on\.
|
||||
|
||||
|
||||
|
||||
*Type:*
|
||||
string
|
||||
|
||||
|
||||
|
||||
*Default:*
|
||||
` "/sdcard" `
|
||||
|
||||
|
||||
|
||||
## environment\.hakurei\.sharefs\.source
|
||||
|
||||
|
||||
|
||||
Writable backing directory\. Setting this to null disables sharefs\.
|
||||
|
||||
|
||||
|
||||
*Type:*
|
||||
null or string
|
||||
|
||||
|
||||
|
||||
*Default:*
|
||||
` null `
|
||||
|
||||
|
||||
|
||||
## environment\.hakurei\.sharefs\.user
|
||||
|
||||
|
||||
|
||||
Name of the user to run the sharefs daemon as\.
|
||||
|
||||
|
||||
|
||||
*Type:*
|
||||
string
|
||||
|
||||
|
||||
|
||||
*Default:*
|
||||
` "sharefs" `
|
||||
|
||||
|
||||
|
||||
|
||||
52
options.nix
52
options.nix
@@ -1,8 +1,15 @@
|
||||
packages:
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (lib) types mkOption mkEnableOption;
|
||||
|
||||
cfg = config.environment.hakurei;
|
||||
in
|
||||
|
||||
{
|
||||
@@ -40,6 +47,49 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
sharefs = {
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.linkFarm "sharefs" {
|
||||
"bin/sharefs" = "${cfg.package}/libexec/sharefs";
|
||||
"bin/mount.fuse.sharefs" = "${cfg.package}/libexec/sharefs";
|
||||
};
|
||||
description = "The sharefs package to use.";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "sharefs";
|
||||
description = ''
|
||||
Name of the user to run the sharefs daemon as.
|
||||
'';
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
default = "sharefs";
|
||||
description = ''
|
||||
Name of the group to run the sharefs daemon as.
|
||||
'';
|
||||
};
|
||||
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
default = "/sdcard";
|
||||
description = ''
|
||||
Host path to mount sharefs on.
|
||||
'';
|
||||
};
|
||||
|
||||
source = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
Writable backing directory. Setting this to null disables sharefs.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
apps = mkOption {
|
||||
type =
|
||||
let
|
||||
|
||||
@@ -13,6 +13,9 @@
|
||||
wayland-scanner,
|
||||
xorg,
|
||||
|
||||
# for sharefs
|
||||
fuse3,
|
||||
|
||||
# for hpkg
|
||||
zstd,
|
||||
gnutar,
|
||||
@@ -32,7 +35,7 @@
|
||||
|
||||
buildGoModule rec {
|
||||
pname = "hakurei";
|
||||
version = "0.3.3";
|
||||
version = "0.3.4";
|
||||
|
||||
srcFiltered = builtins.path {
|
||||
name = "${pname}-src";
|
||||
@@ -92,6 +95,7 @@ buildGoModule rec {
|
||||
buildInputs = [
|
||||
libffi
|
||||
libseccomp
|
||||
fuse3
|
||||
acl
|
||||
wayland
|
||||
]
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.hakurei = {
|
||||
environment.hakurei = rec {
|
||||
enable = true;
|
||||
stateDir = "/var/lib/hakurei";
|
||||
sharefs.source = "${stateDir}/sdcard";
|
||||
users.alice = 0;
|
||||
apps = {
|
||||
"cat.gensokyo.extern.foot.noEnablements" = {
|
||||
|
||||
Reference in New Issue
Block a user