Compare commits
95 Commits
8cb0b433b2
...
wip-bootst
| Author | SHA1 | Date | |
|---|---|---|---|
|
088d35e4e6
|
|||
|
1667df9c43
|
|||
|
156dd767ef
|
|||
|
5fe166a4a7
|
|||
|
41a8d03dd2
|
|||
|
610572d0e6
|
|||
|
29951c5174
|
|||
|
91c3594dee
|
|||
|
7ccc2fc5ec
|
|||
|
63e137856e
|
|||
|
e1e46504a1
|
|||
|
ec9343ebd6
|
|||
|
423808ac76
|
|||
|
2494ede106
|
|||
|
da3848b92f
|
|||
|
34cb4ebd3b
|
|||
|
f712466714
|
|||
|
f2430b5f5e
|
|||
|
863e6f5db6
|
|||
|
23df2ab999
|
|||
|
7bd4d7d0e6
|
|||
|
b3c30bcc51
|
|||
|
38059db835
|
|||
|
409fd3149e
|
|||
|
4eea136308
|
|||
|
c86ff02d8d
|
|||
|
e8dda70c41
|
|||
|
7ea4e8b643
|
|||
|
5eefebcb48
|
|||
|
8e08e8f518
|
|||
|
54da6ce03d
|
|||
|
3a21ba1bca
|
|||
|
45301559bf
|
|||
|
0df87ab111
|
|||
|
aa0a949cef
|
|||
|
ce0064384d
|
|||
|
53d80f4b66
|
|||
|
156096ac98
|
|||
|
ceb75538cf
|
|||
|
0741a614ed
|
|||
|
e7e9b4caea
|
|||
|
f6d32e482a
|
|||
|
79adf217f4
|
|||
|
8efffd72f4
|
|||
|
86ad8b72aa
|
|||
|
e91049c3c5
|
|||
|
3d4d32932d
|
|||
|
0ab6c13c77
|
|||
|
834cb0d40b
|
|||
|
7548a627e5
|
|||
|
b98d27f773
|
|||
|
f3aa31e401
|
|||
|
4da26681b5
|
|||
|
4897b0259e
|
|||
|
d6e4f85864
|
|||
|
3eb927823f
|
|||
|
d76b9d04b8
|
|||
|
fa93476896
|
|||
|
bd0ef086b1
|
|||
|
05202cf994
|
|||
|
40081e7a06
|
|||
|
863d3dcf9f
|
|||
|
8ad9909065
|
|||
|
deda16da38
|
|||
|
55465c6e72
|
|||
|
ce249d23f1
|
|||
|
dd5d792d14
|
|||
|
d15d2ec2bd
|
|||
|
3078c41ce7
|
|||
|
e9de5d3aca
|
|||
|
993afde840
|
|||
|
c9cd16fd2a
|
|||
|
e42ea32dbe
|
|||
|
e7982b4ee9
|
|||
|
ef1ebf12d9
|
|||
|
775a9f57c9
|
|||
|
2f8ca83376
|
|||
|
3d720ada92
|
|||
|
2e5362e536
|
|||
|
6d3bd27220
|
|||
|
a27305cb4a
|
|||
|
0e476c5e5b
|
|||
|
54712e0426
|
|||
|
b77c1ecfdb
|
|||
|
dce5839a79
|
|||
|
d597592e1f
|
|||
|
056f5b12d4
|
|||
|
da2bb546ba
|
|||
|
7bfbd59810
|
|||
|
ea815a59e8
|
|||
|
28a8dc67d2
|
|||
|
ec49c63c5f
|
|||
|
5a50bf80ee
|
|||
|
ce06b7b663
|
|||
|
08bdc68f3a
|
@@ -72,6 +72,23 @@ jobs:
|
||||
path: result/*
|
||||
retention-days: 1
|
||||
|
||||
sharefs:
|
||||
name: ShareFS
|
||||
runs-on: nix
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run NixOS test
|
||||
run: nix build --out-link "result" --print-out-paths --print-build-logs .#checks.x86_64-linux.sharefs
|
||||
|
||||
- name: Upload test output
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "sharefs-vm-output"
|
||||
path: result/*
|
||||
retention-days: 1
|
||||
|
||||
hpkg:
|
||||
name: Hpkg
|
||||
runs-on: nix
|
||||
@@ -96,6 +113,7 @@ jobs:
|
||||
- race
|
||||
- sandbox
|
||||
- sandbox-race
|
||||
- sharefs
|
||||
- hpkg
|
||||
runs-on: nix
|
||||
steps:
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -27,6 +27,7 @@ go.work.sum
|
||||
|
||||
# go generate
|
||||
/cmd/hakurei/LICENSE
|
||||
/internal/pkg/testdata/testtool
|
||||
|
||||
# release
|
||||
/dist/hakurei-*
|
||||
|
||||
282
cmd/sharefs/fuse-operations.c
Normal file
282
cmd/sharefs/fuse-operations.c
Normal file
@@ -0,0 +1,282 @@
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE /* O_DIRECT */
|
||||
#endif
|
||||
|
||||
#include <dirent.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
|
||||
/* TODO(ophestra): remove after 05ce67fea99ca09cd4b6625cff7aec9cc222dd5a reaches a release */
|
||||
#include <sys/syscall.h>
|
||||
|
||||
#include "fuse-operations.h"
|
||||
|
||||
/* MUST_TRANSLATE_PATHNAME translates a userspace pathname to a relative pathname;
|
||||
* the resulting address points to a constant string or part of pathname, it is never heap allocated. */
|
||||
#define MUST_TRANSLATE_PATHNAME(pathname) \
|
||||
do { \
|
||||
if (pathname == NULL) \
|
||||
return -EINVAL; \
|
||||
while (*pathname == '/') \
|
||||
pathname++; \
|
||||
if (*pathname == '\0') \
|
||||
pathname = "."; \
|
||||
} while (0)
|
||||
|
||||
/* GET_CONTEXT_PRIV obtains fuse context and private data for the calling thread. */
|
||||
#define GET_CONTEXT_PRIV(ctx, priv) \
|
||||
do { \
|
||||
ctx = fuse_get_context(); \
|
||||
priv = ctx->private_data; \
|
||||
} while (0)
|
||||
|
||||
/* impl_getattr modifies a struct stat from the kernel to present to userspace;
|
||||
* impl_getattr returns a negative errno style error code. */
|
||||
static int impl_getattr(struct fuse_context *ctx, struct stat *statbuf) {
|
||||
/* allowlist of permitted types */
|
||||
if (!S_ISDIR(statbuf->st_mode) && !S_ISREG(statbuf->st_mode) && !S_ISLNK(statbuf->st_mode)) {
|
||||
return -ENOTRECOVERABLE; /* returning an errno causes all operations on the file to return EIO */
|
||||
}
|
||||
|
||||
#define OVERRIDE_PERM(v) (statbuf->st_mode & ~0777) | (v & 0777)
|
||||
if (S_ISDIR(statbuf->st_mode))
|
||||
statbuf->st_mode = OVERRIDE_PERM(SHAREFS_PERM_DIR);
|
||||
else if (S_ISREG(statbuf->st_mode))
|
||||
statbuf->st_mode = OVERRIDE_PERM(SHAREFS_PERM_REG);
|
||||
else
|
||||
statbuf->st_mode = 0; /* should always be symlink in this case */
|
||||
|
||||
statbuf->st_uid = ctx->uid;
|
||||
statbuf->st_gid = SHAREFS_MEDIA_RW_ID;
|
||||
statbuf->st_ctim = statbuf->st_mtim;
|
||||
statbuf->st_nlink = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* fuse_operations implementation */
|
||||
|
||||
int sharefs_getattr(const char *pathname, struct stat *statbuf, struct fuse_file_info *fi) {
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
(void)fi;
|
||||
|
||||
if (fstatat(priv->dirfd, pathname, statbuf, AT_SYMLINK_NOFOLLOW) == -1)
|
||||
return -errno;
|
||||
return impl_getattr(ctx, statbuf);
|
||||
}
|
||||
|
||||
int sharefs_readdir(const char *pathname, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *fi, enum fuse_readdir_flags flags) {
|
||||
int fd;
|
||||
DIR *dp;
|
||||
struct stat st;
|
||||
int ret = 0;
|
||||
struct dirent *de;
|
||||
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
(void)offset;
|
||||
(void)fi;
|
||||
|
||||
if ((fd = openat(priv->dirfd, pathname, O_RDONLY | O_DIRECTORY | O_CLOEXEC)) == -1)
|
||||
return -errno;
|
||||
if ((dp = fdopendir(fd)) == NULL) {
|
||||
close(fd);
|
||||
return -errno;
|
||||
}
|
||||
|
||||
errno = 0; /* for the next readdir call */
|
||||
while ((de = readdir(dp)) != NULL) {
|
||||
if (flags & FUSE_READDIR_PLUS) {
|
||||
if (fstatat(dirfd(dp), de->d_name, &st, AT_SYMLINK_NOFOLLOW) == -1) {
|
||||
ret = -errno;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((ret = impl_getattr(ctx, &st)) < 0)
|
||||
break;
|
||||
|
||||
errno = 0;
|
||||
ret = filler(buf, de->d_name, &st, 0, FUSE_FILL_DIR_PLUS);
|
||||
} else
|
||||
ret = filler(buf, de->d_name, NULL, 0, 0);
|
||||
|
||||
if (ret != 0) {
|
||||
ret = errno != 0 ? -errno : -EIO; /* filler */
|
||||
break;
|
||||
}
|
||||
|
||||
errno = 0; /* for the next readdir call */
|
||||
}
|
||||
if (ret == 0 && errno != 0)
|
||||
ret = -errno; /* readdir */
|
||||
|
||||
closedir(dp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sharefs_mkdir(const char *pathname, mode_t mode) {
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
(void)mode;
|
||||
|
||||
if (mkdirat(priv->dirfd, pathname, SHAREFS_PERM_DIR) == -1)
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sharefs_unlink(const char *pathname) {
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
if (unlinkat(priv->dirfd, pathname, 0) == -1)
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sharefs_rmdir(const char *pathname) {
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
if (unlinkat(priv->dirfd, pathname, AT_REMOVEDIR) == -1)
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sharefs_rename(const char *oldpath, const char *newpath, unsigned int flags) {
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(oldpath);
|
||||
MUST_TRANSLATE_PATHNAME(newpath);
|
||||
|
||||
/* TODO(ophestra): replace with wrapper after 05ce67fea99ca09cd4b6625cff7aec9cc222dd5a reaches a release */
|
||||
if (syscall(__NR_renameat2, priv->dirfd, oldpath, priv->dirfd, newpath, flags) == -1)
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sharefs_truncate(const char *pathname, off_t length, struct fuse_file_info *fi) {
|
||||
int fd;
|
||||
int ret;
|
||||
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
(void)fi;
|
||||
|
||||
if ((fd = openat(priv->dirfd, pathname, O_WRONLY | O_CLOEXEC)) == -1)
|
||||
return -errno;
|
||||
if ((ret = ftruncate(fd, length)) == -1)
|
||||
ret = -errno;
|
||||
close(fd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sharefs_utimens(const char *pathname, const struct timespec times[2], struct fuse_file_info *fi) {
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
(void)fi;
|
||||
|
||||
if (utimensat(priv->dirfd, pathname, times, AT_SYMLINK_NOFOLLOW) == -1)
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sharefs_create(const char *pathname, mode_t mode, struct fuse_file_info *fi) {
|
||||
int fd;
|
||||
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
(void)mode;
|
||||
|
||||
if ((fd = openat(priv->dirfd, pathname, fi->flags & ~SHAREFS_FORBIDDEN_FLAGS, SHAREFS_PERM_REG)) == -1)
|
||||
return -errno;
|
||||
fi->fh = fd;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sharefs_open(const char *pathname, struct fuse_file_info *fi) {
|
||||
int fd;
|
||||
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
if ((fd = openat(priv->dirfd, pathname, fi->flags & ~SHAREFS_FORBIDDEN_FLAGS)) == -1)
|
||||
return -errno;
|
||||
fi->fh = fd;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sharefs_read(const char *pathname, char *buf, size_t count, off_t offset, struct fuse_file_info *fi) {
|
||||
int ret;
|
||||
|
||||
(void)pathname;
|
||||
|
||||
if ((ret = pread(fi->fh, buf, count, offset)) == -1)
|
||||
return -errno;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sharefs_write(const char *pathname, const char *buf, size_t count, off_t offset, struct fuse_file_info *fi) {
|
||||
int ret;
|
||||
|
||||
(void)pathname;
|
||||
|
||||
if ((ret = pwrite(fi->fh, buf, count, offset)) == -1)
|
||||
return -errno;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sharefs_statfs(const char *pathname, struct statvfs *statbuf) {
|
||||
int fd;
|
||||
int ret;
|
||||
|
||||
struct fuse_context *ctx;
|
||||
struct sharefs_private *priv;
|
||||
GET_CONTEXT_PRIV(ctx, priv);
|
||||
MUST_TRANSLATE_PATHNAME(pathname);
|
||||
|
||||
if ((fd = openat(priv->dirfd, pathname, O_RDONLY | O_CLOEXEC)) == -1)
|
||||
return -errno;
|
||||
if ((ret = fstatvfs(fd, statbuf)) == -1)
|
||||
ret = -errno;
|
||||
close(fd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sharefs_release(const char *pathname, struct fuse_file_info *fi) {
|
||||
(void)pathname;
|
||||
|
||||
return close(fi->fh);
|
||||
}
|
||||
|
||||
int sharefs_fsync(const char *pathname, int datasync, struct fuse_file_info *fi) {
|
||||
(void)pathname;
|
||||
|
||||
if (datasync ? fdatasync(fi->fh) : fsync(fi->fh) == -1)
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
34
cmd/sharefs/fuse-operations.h
Normal file
34
cmd/sharefs/fuse-operations.h
Normal file
@@ -0,0 +1,34 @@
|
||||
#define FUSE_USE_VERSION FUSE_MAKE_VERSION(3, 12)
|
||||
#include <fuse.h>
|
||||
#include <fuse_lowlevel.h> /* for fuse_cmdline_help */
|
||||
|
||||
#if (FUSE_VERSION < FUSE_MAKE_VERSION(3, 12))
|
||||
#error This package requires libfuse >= v3.12
|
||||
#endif
|
||||
|
||||
#define SHAREFS_MEDIA_RW_ID (1 << 10) - 1 /* owning gid presented to userspace */
|
||||
#define SHAREFS_PERM_DIR 0700 /* permission bits for directories presented to userspace */
|
||||
#define SHAREFS_PERM_REG 0600 /* permission bits for regular files presented to userspace */
|
||||
#define SHAREFS_FORBIDDEN_FLAGS O_DIRECT /* these open flags are cleared unconditionally */
|
||||
|
||||
/* sharefs_private is populated by sharefs_init and contains process-wide context */
|
||||
struct sharefs_private {
|
||||
int dirfd; /* source dirfd opened during sharefs_init */
|
||||
uintptr_t setup; /* cgo handle of opaque setup state */
|
||||
};
|
||||
|
||||
int sharefs_getattr(const char *pathname, struct stat *statbuf, struct fuse_file_info *fi);
|
||||
int sharefs_readdir(const char *pathname, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *fi, enum fuse_readdir_flags flags);
|
||||
int sharefs_mkdir(const char *pathname, mode_t mode);
|
||||
int sharefs_unlink(const char *pathname);
|
||||
int sharefs_rmdir(const char *pathname);
|
||||
int sharefs_rename(const char *oldpath, const char *newpath, unsigned int flags);
|
||||
int sharefs_truncate(const char *pathname, off_t length, struct fuse_file_info *fi);
|
||||
int sharefs_utimens(const char *pathname, const struct timespec times[2], struct fuse_file_info *fi);
|
||||
int sharefs_create(const char *pathname, mode_t mode, struct fuse_file_info *fi);
|
||||
int sharefs_open(const char *pathname, struct fuse_file_info *fi);
|
||||
int sharefs_read(const char *pathname, char *buf, size_t count, off_t offset, struct fuse_file_info *fi);
|
||||
int sharefs_write(const char *pathname, const char *buf, size_t count, off_t offset, struct fuse_file_info *fi);
|
||||
int sharefs_statfs(const char *pathname, struct statvfs *statbuf);
|
||||
int sharefs_release(const char *pathname, struct fuse_file_info *fi);
|
||||
int sharefs_fsync(const char *pathname, int datasync, struct fuse_file_info *fi);
|
||||
556
cmd/sharefs/fuse.go
Normal file
556
cmd/sharefs/fuse.go
Normal file
@@ -0,0 +1,556 @@
|
||||
package main
|
||||
|
||||
/*
|
||||
#cgo pkg-config: --static fuse3
|
||||
|
||||
#include "fuse-operations.h"
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
extern void *sharefs_init(struct fuse_conn_info *conn, struct fuse_config *cfg);
|
||||
extern void sharefs_destroy(void *private_data);
|
||||
|
||||
typedef void (*closure)();
|
||||
static inline struct fuse_opt _FUSE_OPT_END() { return (struct fuse_opt)FUSE_OPT_END; };
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path"
|
||||
"runtime"
|
||||
"runtime/cgo"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"hakurei.app/container"
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/container/std"
|
||||
"hakurei.app/hst"
|
||||
"hakurei.app/internal/helper/proc"
|
||||
"hakurei.app/internal/info"
|
||||
"hakurei.app/message"
|
||||
)
|
||||
|
||||
type (
|
||||
// closure represents a C function pointer.
|
||||
closure = C.closure
|
||||
|
||||
// fuseArgs represents the fuse_args structure.
|
||||
fuseArgs = C.struct_fuse_args
|
||||
|
||||
// setupState holds state used for setup. Its cgo handle is included in
|
||||
// sharefs_private and considered opaque to non-setup callbacks.
|
||||
setupState struct {
|
||||
// Whether sharefs_init failed.
|
||||
initFailed bool
|
||||
|
||||
// Whether to create source directory as root.
|
||||
mkdir bool
|
||||
|
||||
// Open file descriptor to fuse.
|
||||
Fuse int
|
||||
|
||||
// Pathname to open for dirfd.
|
||||
Source *check.Absolute
|
||||
// New uid and gid to set by sharefs_init when starting as root.
|
||||
Setuid, Setgid int
|
||||
}
|
||||
)
|
||||
|
||||
func init() { gob.Register(new(setupState)) }
|
||||
|
||||
// destroySetup invalidates the setup [cgo.Handle] in a sharefs_private structure.
|
||||
func destroySetup(private_data unsafe.Pointer) (ok bool) {
|
||||
if private_data == nil {
|
||||
return false
|
||||
}
|
||||
priv := (*C.struct_sharefs_private)(private_data)
|
||||
|
||||
if h := cgo.Handle(priv.setup); h != 0 {
|
||||
priv.setup = 0
|
||||
h.Delete()
|
||||
ok = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//export sharefs_init
|
||||
func sharefs_init(_ *C.struct_fuse_conn_info, cfg *C.struct_fuse_config) unsafe.Pointer {
|
||||
ctx := C.fuse_get_context()
|
||||
priv := (*C.struct_sharefs_private)(ctx.private_data)
|
||||
setup := cgo.Handle(priv.setup).Value().(*setupState)
|
||||
|
||||
if os.Geteuid() == 0 {
|
||||
log.Println("filesystem daemon must not run as root")
|
||||
goto fail
|
||||
}
|
||||
|
||||
cfg.use_ino = C.true
|
||||
cfg.direct_io = C.false
|
||||
// getattr is context-dependent
|
||||
cfg.attr_timeout = 0
|
||||
cfg.entry_timeout = 0
|
||||
cfg.negative_timeout = 0
|
||||
|
||||
// all future filesystem operations happen through this dirfd
|
||||
if fd, err := syscall.Open(setup.Source.String(), syscall.O_DIRECTORY|syscall.O_RDONLY|syscall.O_CLOEXEC, 0); err != nil {
|
||||
log.Printf("cannot open %q: %v", setup.Source, err)
|
||||
goto fail
|
||||
} else if err = syscall.Fchdir(fd); err != nil {
|
||||
_ = syscall.Close(fd)
|
||||
log.Printf("cannot enter %q: %s", setup.Source, err)
|
||||
goto fail
|
||||
} else {
|
||||
priv.dirfd = C.int(fd)
|
||||
}
|
||||
|
||||
return ctx.private_data
|
||||
|
||||
fail:
|
||||
setup.initFailed = true
|
||||
C.fuse_exit(ctx.fuse)
|
||||
return nil
|
||||
}
|
||||
|
||||
//export sharefs_destroy
|
||||
func sharefs_destroy(private_data unsafe.Pointer) {
|
||||
if private_data != nil {
|
||||
destroySetup(private_data)
|
||||
priv := (*C.struct_sharefs_private)(private_data)
|
||||
|
||||
if err := syscall.Close(int(priv.dirfd)); err != nil {
|
||||
log.Printf("cannot close source directory: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// showHelp prints the help message.
|
||||
func showHelp(args *fuseArgs) {
|
||||
executableName := sharefsName
|
||||
if args.argc > 0 {
|
||||
executableName = path.Base(C.GoString(*args.argv))
|
||||
} else if name, err := os.Executable(); err == nil {
|
||||
executableName = path.Base(name)
|
||||
}
|
||||
|
||||
fmt.Printf("usage: %s [options] <mountpoint>\n\n", executableName)
|
||||
|
||||
fmt.Println("Filesystem options:")
|
||||
fmt.Println(" -o source=/data/media source directory to be mounted")
|
||||
fmt.Println(" -o setuid=1023 uid to run as when starting as root")
|
||||
fmt.Println(" -o setgid=1023 gid to run as when starting as root")
|
||||
|
||||
fmt.Println("\nFUSE options:")
|
||||
C.fuse_cmdline_help()
|
||||
C.fuse_lib_help(args)
|
||||
}
|
||||
|
||||
// parseOpts parses fuse options via fuse_opt_parse.
|
||||
func parseOpts(args *fuseArgs, setup *setupState, log *log.Logger) (ok bool) {
|
||||
var unsafeOpts struct {
|
||||
// Pathname to writable source directory.
|
||||
source *C.char
|
||||
|
||||
// Whether to create source directory as root.
|
||||
mkdir C.int
|
||||
|
||||
// Decimal string representation of uid to set when running as root.
|
||||
setuid *C.char
|
||||
// Decimal string representation of gid to set when running as root.
|
||||
setgid *C.char
|
||||
|
||||
// Decimal string representation of open file descriptor to read setupState from.
|
||||
// This is an internal detail for containerisation and must not be specified directly.
|
||||
setup *C.char
|
||||
}
|
||||
|
||||
if C.fuse_opt_parse(args, unsafe.Pointer(&unsafeOpts), &[]C.struct_fuse_opt{
|
||||
{templ: C.CString("source=%s"), offset: C.ulong(unsafe.Offsetof(unsafeOpts.source)), value: 0},
|
||||
{templ: C.CString("mkdir"), offset: C.ulong(unsafe.Offsetof(unsafeOpts.mkdir)), value: 1},
|
||||
{templ: C.CString("setuid=%s"), offset: C.ulong(unsafe.Offsetof(unsafeOpts.setuid)), value: 0},
|
||||
{templ: C.CString("setgid=%s"), offset: C.ulong(unsafe.Offsetof(unsafeOpts.setgid)), value: 0},
|
||||
|
||||
{templ: C.CString("setup=%s"), offset: C.ulong(unsafe.Offsetof(unsafeOpts.setup)), value: 0},
|
||||
|
||||
C._FUSE_OPT_END(),
|
||||
}[0], nil) == -1 {
|
||||
return false
|
||||
}
|
||||
|
||||
if unsafeOpts.source != nil {
|
||||
defer C.free(unsafe.Pointer(unsafeOpts.source))
|
||||
}
|
||||
if unsafeOpts.setuid != nil {
|
||||
defer C.free(unsafe.Pointer(unsafeOpts.setuid))
|
||||
}
|
||||
if unsafeOpts.setgid != nil {
|
||||
defer C.free(unsafe.Pointer(unsafeOpts.setgid))
|
||||
}
|
||||
|
||||
if unsafeOpts.setup != nil {
|
||||
defer C.free(unsafe.Pointer(unsafeOpts.setup))
|
||||
|
||||
if v, err := strconv.Atoi(C.GoString(unsafeOpts.setup)); err != nil || v < 3 {
|
||||
log.Println("invalid value for option setup")
|
||||
return false
|
||||
} else {
|
||||
r := os.NewFile(uintptr(v), "setup")
|
||||
defer func() {
|
||||
if err = r.Close(); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}()
|
||||
if err = gob.NewDecoder(r).Decode(setup); err != nil {
|
||||
log.Println(err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
if setup.Fuse < 3 {
|
||||
log.Println("invalid file descriptor", setup.Fuse)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if unsafeOpts.source == nil {
|
||||
showHelp(args)
|
||||
return false
|
||||
} else if a, err := check.NewAbs(C.GoString(unsafeOpts.source)); err != nil {
|
||||
log.Println(err)
|
||||
return false
|
||||
} else {
|
||||
setup.Source = a
|
||||
}
|
||||
setup.mkdir = unsafeOpts.mkdir != 0
|
||||
|
||||
if unsafeOpts.setuid == nil {
|
||||
setup.Setuid = -1
|
||||
} else if v, err := strconv.Atoi(C.GoString(unsafeOpts.setuid)); err != nil || v <= 0 {
|
||||
log.Println("invalid value for option setuid")
|
||||
return false
|
||||
} else {
|
||||
setup.Setuid = v
|
||||
}
|
||||
if unsafeOpts.setgid == nil {
|
||||
setup.Setgid = -1
|
||||
} else if v, err := strconv.Atoi(C.GoString(unsafeOpts.setgid)); err != nil || v <= 0 {
|
||||
log.Println("invalid value for option setgid")
|
||||
return false
|
||||
} else {
|
||||
setup.Setgid = v
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// copyArgs returns a heap allocated copy of an argument slice in fuse_args representation.
|
||||
func copyArgs(s ...string) fuseArgs {
|
||||
if len(s) == 0 {
|
||||
return fuseArgs{argc: 0, argv: nil, allocated: 0}
|
||||
}
|
||||
args := unsafe.Slice((**C.char)(C.malloc(C.size_t(uintptr(len(s))*unsafe.Sizeof(s[0])))), len(s))
|
||||
for i, arg := range s {
|
||||
args[i] = C.CString(arg)
|
||||
}
|
||||
return fuseArgs{argc: C.int(len(s)), argv: &args[0], allocated: 1}
|
||||
}
|
||||
|
||||
// freeArgs frees the contents of argument list.
|
||||
func freeArgs(args *fuseArgs) { C.fuse_opt_free_args(args) }
|
||||
|
||||
// unsafeAddArgument adds an argument to fuseArgs via fuse_opt_add_arg.
|
||||
// The last byte of arg must be 0.
|
||||
func unsafeAddArgument(args *fuseArgs, arg string) {
|
||||
C.fuse_opt_add_arg(args, (*C.char)(unsafe.Pointer(unsafe.StringData(arg))))
|
||||
}
|
||||
|
||||
func _main(s ...string) (exitCode int) {
|
||||
msg := message.New(log.Default())
|
||||
container.TryArgv0(msg)
|
||||
runtime.LockOSThread()
|
||||
|
||||
// don't mask creation mode, kernel already did that
|
||||
syscall.Umask(0)
|
||||
|
||||
var pinner runtime.Pinner
|
||||
defer pinner.Unpin()
|
||||
|
||||
args := copyArgs(s...)
|
||||
defer freeArgs(&args)
|
||||
|
||||
// this causes the kernel to enforce access control based on
|
||||
// struct stat populated by sharefs_getattr
|
||||
unsafeAddArgument(&args, "-odefault_permissions\x00")
|
||||
|
||||
var priv C.struct_sharefs_private
|
||||
pinner.Pin(&priv)
|
||||
var setup setupState
|
||||
priv.setup = C.uintptr_t(cgo.NewHandle(&setup))
|
||||
defer destroySetup(unsafe.Pointer(&priv))
|
||||
|
||||
var opts C.struct_fuse_cmdline_opts
|
||||
if C.fuse_parse_cmdline(&args, &opts) != 0 {
|
||||
return 1
|
||||
}
|
||||
if opts.mountpoint != nil {
|
||||
defer C.free(unsafe.Pointer(opts.mountpoint))
|
||||
}
|
||||
|
||||
if opts.show_version != 0 {
|
||||
fmt.Println("hakurei version", info.Version())
|
||||
fmt.Println("FUSE library version", C.GoString(C.fuse_pkgversion()))
|
||||
C.fuse_lowlevel_version()
|
||||
return 0
|
||||
}
|
||||
|
||||
if opts.show_help != 0 {
|
||||
showHelp(&args)
|
||||
return 0
|
||||
} else if opts.mountpoint == nil {
|
||||
log.Println("no mountpoint specified")
|
||||
return 2
|
||||
} else {
|
||||
// hack to keep fuse_parse_cmdline happy in the container
|
||||
mountpoint := C.GoString(opts.mountpoint)
|
||||
pathnameArg := -1
|
||||
for i, arg := range s {
|
||||
if arg == mountpoint {
|
||||
pathnameArg = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if pathnameArg < 0 {
|
||||
log.Println("mountpoint must be absolute")
|
||||
return 2
|
||||
}
|
||||
s[pathnameArg] = container.Nonexistent
|
||||
}
|
||||
|
||||
if !parseOpts(&args, &setup, msg.GetLogger()) {
|
||||
return 1
|
||||
}
|
||||
asRoot := os.Geteuid() == 0
|
||||
|
||||
if asRoot {
|
||||
if setup.Setuid <= 0 || setup.Setgid <= 0 {
|
||||
log.Println("setuid and setgid must not be 0")
|
||||
return 1
|
||||
}
|
||||
|
||||
if setup.Fuse >= 3 {
|
||||
log.Println("filesystem daemon must not run as root")
|
||||
return 1
|
||||
}
|
||||
|
||||
if setup.mkdir {
|
||||
if err := os.MkdirAll(setup.Source.String(), 0700); err != nil {
|
||||
if !errors.Is(err, os.ErrExist) {
|
||||
log.Println(err)
|
||||
return 1
|
||||
}
|
||||
// skip setup for existing source directory
|
||||
} else if err = os.Chown(setup.Source.String(), setup.Setuid, setup.Setgid); err != nil {
|
||||
log.Println(err)
|
||||
return 1
|
||||
}
|
||||
}
|
||||
} else if setup.Fuse < 3 && (setup.Setuid > 0 || setup.Setgid > 0) {
|
||||
log.Println("setuid and setgid has no effect when not starting as root")
|
||||
return 1
|
||||
} else if setup.mkdir {
|
||||
log.Println("mkdir has no effect when not starting as root")
|
||||
return 1
|
||||
}
|
||||
|
||||
op := C.struct_fuse_operations{
|
||||
init: closure(C.sharefs_init),
|
||||
destroy: closure(C.sharefs_destroy),
|
||||
|
||||
// implemented in fuse-helper.c
|
||||
getattr: closure(C.sharefs_getattr),
|
||||
readdir: closure(C.sharefs_readdir),
|
||||
mkdir: closure(C.sharefs_mkdir),
|
||||
unlink: closure(C.sharefs_unlink),
|
||||
rmdir: closure(C.sharefs_rmdir),
|
||||
rename: closure(C.sharefs_rename),
|
||||
truncate: closure(C.sharefs_truncate),
|
||||
utimens: closure(C.sharefs_utimens),
|
||||
create: closure(C.sharefs_create),
|
||||
open: closure(C.sharefs_open),
|
||||
read: closure(C.sharefs_read),
|
||||
write: closure(C.sharefs_write),
|
||||
statfs: closure(C.sharefs_statfs),
|
||||
release: closure(C.sharefs_release),
|
||||
fsync: closure(C.sharefs_fsync),
|
||||
}
|
||||
|
||||
fuse := C.fuse_new_fn(&args, &op, C.size_t(unsafe.Sizeof(op)), unsafe.Pointer(&priv))
|
||||
if fuse == nil {
|
||||
return 3
|
||||
}
|
||||
defer C.fuse_destroy(fuse)
|
||||
se := C.fuse_get_session(fuse)
|
||||
|
||||
if setup.Fuse < 3 {
|
||||
// unconfined, set up mount point and container
|
||||
if C.fuse_mount(fuse, opts.mountpoint) != 0 {
|
||||
return 4
|
||||
}
|
||||
// unmounted by initial process
|
||||
defer func() {
|
||||
if exitCode == 5 {
|
||||
C.fuse_unmount(fuse)
|
||||
}
|
||||
}()
|
||||
|
||||
if asRoot {
|
||||
if err := syscall.Setresgid(setup.Setgid, setup.Setgid, setup.Setgid); err != nil {
|
||||
log.Printf("cannot set gid: %v", err)
|
||||
return 5
|
||||
}
|
||||
if err := syscall.Setgroups(nil); err != nil {
|
||||
log.Printf("cannot set supplementary groups: %v", err)
|
||||
return 5
|
||||
}
|
||||
if err := syscall.Setresuid(setup.Setuid, setup.Setuid, setup.Setuid); err != nil {
|
||||
log.Printf("cannot set uid: %v", err)
|
||||
return 5
|
||||
}
|
||||
}
|
||||
|
||||
msg.SwapVerbose(opts.debug != 0)
|
||||
ctx := context.Background()
|
||||
if opts.foreground != 0 {
|
||||
c, cancel := signal.NotifyContext(ctx, syscall.SIGINT, syscall.SIGTERM)
|
||||
defer cancel()
|
||||
ctx = c
|
||||
}
|
||||
z := container.New(ctx, msg)
|
||||
z.AllowOrphan = opts.foreground == 0
|
||||
z.Env = os.Environ()
|
||||
|
||||
// keep fuse_parse_cmdline happy in the container
|
||||
z.Tmpfs(check.MustAbs(container.Nonexistent), 1<<10, 0755)
|
||||
|
||||
if a, err := check.NewAbs(container.MustExecutable(msg)); err != nil {
|
||||
log.Println(err)
|
||||
return 5
|
||||
} else {
|
||||
z.Path = a
|
||||
}
|
||||
z.Args = s
|
||||
z.ForwardCancel = true
|
||||
z.SeccompPresets |= std.PresetStrict
|
||||
z.ParentPerm = 0700
|
||||
z.Bind(setup.Source, setup.Source, std.BindWritable)
|
||||
if !z.AllowOrphan {
|
||||
z.WaitDelay = hst.WaitDelayMax
|
||||
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
|
||||
}
|
||||
z.Bind(z.Path, z.Path, 0)
|
||||
setup.Fuse = int(proc.ExtraFileSlice(&z.ExtraFiles, os.NewFile(uintptr(C.fuse_session_fd(se)), "fuse")))
|
||||
|
||||
var setupWriter io.WriteCloser
|
||||
if fd, w, err := container.Setup(&z.ExtraFiles); err != nil {
|
||||
log.Println(err)
|
||||
return 5
|
||||
} else {
|
||||
z.Args = append(z.Args, "-osetup="+strconv.Itoa(fd))
|
||||
setupWriter = w
|
||||
}
|
||||
|
||||
if err := z.Start(); err != nil {
|
||||
if m, ok := message.GetMessage(err); ok {
|
||||
log.Println(m)
|
||||
} else {
|
||||
log.Println(err)
|
||||
}
|
||||
return 5
|
||||
}
|
||||
if err := z.Serve(); err != nil {
|
||||
if m, ok := message.GetMessage(err); ok {
|
||||
log.Println(m)
|
||||
} else {
|
||||
log.Println(err)
|
||||
}
|
||||
return 5
|
||||
}
|
||||
|
||||
if err := gob.NewEncoder(setupWriter).Encode(&setup); err != nil {
|
||||
log.Println(err)
|
||||
return 5
|
||||
} else if err = setupWriter.Close(); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
if !z.AllowOrphan {
|
||||
if err := z.Wait(); err != nil {
|
||||
var exitError *exec.ExitError
|
||||
if !errors.As(err, &exitError) || exitError == nil {
|
||||
log.Println(err)
|
||||
return 5
|
||||
}
|
||||
switch code := exitError.ExitCode(); syscall.Signal(code & 0x7f) {
|
||||
case syscall.SIGINT:
|
||||
case syscall.SIGTERM:
|
||||
|
||||
default:
|
||||
return code
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
} else { // confined
|
||||
C.free(unsafe.Pointer(opts.mountpoint))
|
||||
// must be heap allocated
|
||||
opts.mountpoint = C.CString("/dev/fd/" + strconv.Itoa(setup.Fuse))
|
||||
|
||||
if err := os.Chdir("/"); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
if C.fuse_mount(fuse, opts.mountpoint) != 0 {
|
||||
return 4
|
||||
}
|
||||
defer C.fuse_unmount(fuse)
|
||||
|
||||
if C.fuse_set_signal_handlers(se) != 0 {
|
||||
return 6
|
||||
}
|
||||
defer C.fuse_remove_signal_handlers(se)
|
||||
|
||||
if opts.singlethread != 0 {
|
||||
if C.fuse_loop(fuse) != 0 {
|
||||
return 8
|
||||
}
|
||||
} else {
|
||||
loopConfig := C.fuse_loop_cfg_create()
|
||||
if loopConfig == nil {
|
||||
return 7
|
||||
}
|
||||
defer C.fuse_loop_cfg_destroy(loopConfig)
|
||||
|
||||
C.fuse_loop_cfg_set_clone_fd(loopConfig, C.uint(opts.clone_fd))
|
||||
|
||||
C.fuse_loop_cfg_set_idle_threads(loopConfig, opts.max_idle_threads)
|
||||
C.fuse_loop_cfg_set_max_threads(loopConfig, opts.max_threads)
|
||||
if C.fuse_loop_mt(fuse, loopConfig) != 0 {
|
||||
return 8
|
||||
}
|
||||
}
|
||||
|
||||
if setup.initFailed {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
113
cmd/sharefs/fuse_test.go
Normal file
113
cmd/sharefs/fuse_test.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
)
|
||||
|
||||
func TestParseOpts(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
args []string
|
||||
want setupState
|
||||
wantLog string
|
||||
wantOk bool
|
||||
}{
|
||||
{"zero length", []string{}, setupState{}, "", false},
|
||||
|
||||
{"not absolute", []string{"sharefs",
|
||||
"-o", "source=nonexistent",
|
||||
"-o", "setuid=1023",
|
||||
"-o", "setgid=1023",
|
||||
}, setupState{}, "sharefs: path \"nonexistent\" is not absolute\n", false},
|
||||
|
||||
{"not specified", []string{"sharefs",
|
||||
"-o", "setuid=1023",
|
||||
"-o", "setgid=1023",
|
||||
}, setupState{}, "", false},
|
||||
|
||||
{"invalid setuid", []string{"sharefs",
|
||||
"-o", "source=/proc/nonexistent",
|
||||
"-o", "setuid=ff",
|
||||
"-o", "setgid=1023",
|
||||
}, setupState{
|
||||
Source: check.MustAbs("/proc/nonexistent"),
|
||||
}, "sharefs: invalid value for option setuid\n", false},
|
||||
|
||||
{"invalid setgid", []string{"sharefs",
|
||||
"-o", "source=/proc/nonexistent",
|
||||
"-o", "setuid=1023",
|
||||
"-o", "setgid=ff",
|
||||
}, setupState{
|
||||
Source: check.MustAbs("/proc/nonexistent"),
|
||||
Setuid: 1023,
|
||||
}, "sharefs: invalid value for option setgid\n", false},
|
||||
|
||||
{"simple", []string{"sharefs",
|
||||
"-o", "source=/proc/nonexistent",
|
||||
}, setupState{
|
||||
Source: check.MustAbs("/proc/nonexistent"),
|
||||
Setuid: -1,
|
||||
Setgid: -1,
|
||||
}, "", true},
|
||||
|
||||
{"root", []string{"sharefs",
|
||||
"-o", "source=/proc/nonexistent",
|
||||
"-o", "setuid=1023",
|
||||
"-o", "setgid=1023",
|
||||
}, setupState{
|
||||
Source: check.MustAbs("/proc/nonexistent"),
|
||||
Setuid: 1023,
|
||||
Setgid: 1023,
|
||||
}, "", true},
|
||||
|
||||
{"setuid", []string{"sharefs",
|
||||
"-o", "source=/proc/nonexistent",
|
||||
"-o", "setuid=1023",
|
||||
}, setupState{
|
||||
Source: check.MustAbs("/proc/nonexistent"),
|
||||
Setuid: 1023,
|
||||
Setgid: -1,
|
||||
}, "", true},
|
||||
|
||||
{"setgid", []string{"sharefs",
|
||||
"-o", "source=/proc/nonexistent",
|
||||
"-o", "setgid=1023",
|
||||
}, setupState{
|
||||
Source: check.MustAbs("/proc/nonexistent"),
|
||||
Setuid: -1,
|
||||
Setgid: 1023,
|
||||
}, "", true},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
got setupState
|
||||
buf bytes.Buffer
|
||||
)
|
||||
args := copyArgs(tc.args...)
|
||||
defer freeArgs(&args)
|
||||
unsafeAddArgument(&args, "-odefault_permissions\x00")
|
||||
|
||||
if ok := parseOpts(&args, &got, log.New(&buf, "sharefs: ", 0)); ok != tc.wantOk {
|
||||
t.Errorf("parseOpts: ok = %v, want %v", ok, tc.wantOk)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(&got, &tc.want) {
|
||||
t.Errorf("parseOpts: setup = %#v, want %#v", got, tc.want)
|
||||
}
|
||||
|
||||
if buf.String() != tc.wantLog {
|
||||
t.Errorf("parseOpts: log =\n%s\nwant\n%s", buf.String(), tc.wantLog)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
31
cmd/sharefs/main.go
Normal file
31
cmd/sharefs/main.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// sharefsName is the prefix used by log.std in the sharefs process.
|
||||
const sharefsName = "sharefs"
|
||||
|
||||
// handleMountArgs returns an alternative, libfuse-compatible args slice for
|
||||
// args passed by mount -t fuse.sharefs [options] sharefs <mountpoint>.
|
||||
//
|
||||
// In this case, args always has a length of 5 with index 0 being what comes
|
||||
// after "fuse." in the filesystem type, 1 is the uninterpreted string passed
|
||||
// to mount (sharefsName is used as the magic string to enable this hack),
|
||||
// 2 is passed through to libfuse as mountpoint, and 3 is always "-o".
|
||||
func handleMountArgs(args []string) []string {
|
||||
if len(args) == 5 && args[1] == sharefsName && args[3] == "-o" {
|
||||
return []string{sharefsName, args[2], "-o", args[4]}
|
||||
}
|
||||
return slices.Clone(args)
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
log.SetPrefix(sharefsName + ": ")
|
||||
|
||||
os.Exit(_main(handleMountArgs(os.Args)...))
|
||||
}
|
||||
29
cmd/sharefs/main_test.go
Normal file
29
cmd/sharefs/main_test.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHandleMountArgs(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
args []string
|
||||
want []string
|
||||
}{
|
||||
{"nil", nil, nil},
|
||||
{"passthrough", []string{"sharefs", "-V"}, []string{"sharefs", "-V"}},
|
||||
{"replace", []string{"/sbin/sharefs", "sharefs", "/sdcard", "-o", "rw"}, []string{"sharefs", "/sdcard", "-o", "rw"}},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if got := handleMountArgs(tc.args); !slices.Equal(got, tc.want) {
|
||||
t.Errorf("handleMountArgs: %q, want %q", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
41
cmd/sharefs/test/configuration.nix
Normal file
41
cmd/sharefs/test/configuration.nix
Normal file
@@ -0,0 +1,41 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
users.users = {
|
||||
alice = {
|
||||
isNormalUser = true;
|
||||
description = "Alice Foobar";
|
||||
password = "foobar";
|
||||
uid = 1000;
|
||||
};
|
||||
};
|
||||
|
||||
home-manager.users.alice.home.stateVersion = "24.11";
|
||||
|
||||
# Automatically login on tty1 as a normal user:
|
||||
services.getty.autologinUser = "alice";
|
||||
|
||||
environment = {
|
||||
# For benchmarking sharefs:
|
||||
systemPackages = [ pkgs.fsmark ];
|
||||
};
|
||||
|
||||
virtualisation = {
|
||||
diskSize = 6 * 1024;
|
||||
|
||||
qemu.options = [
|
||||
# Increase test performance:
|
||||
"-smp 8"
|
||||
];
|
||||
};
|
||||
|
||||
environment.hakurei = rec {
|
||||
enable = true;
|
||||
stateDir = "/var/lib/hakurei";
|
||||
sharefs.source = "${stateDir}/sdcard";
|
||||
users.alice = 0;
|
||||
|
||||
extraHomeConfig = {
|
||||
home.stateVersion = "23.05";
|
||||
};
|
||||
};
|
||||
}
|
||||
44
cmd/sharefs/test/default.nix
Normal file
44
cmd/sharefs/test/default.nix
Normal file
@@ -0,0 +1,44 @@
|
||||
{
|
||||
testers,
|
||||
|
||||
system,
|
||||
self,
|
||||
}:
|
||||
testers.nixosTest {
|
||||
name = "sharefs";
|
||||
nodes.machine =
|
||||
{ options, pkgs, ... }:
|
||||
let
|
||||
fhs =
|
||||
let
|
||||
hakurei = options.environment.hakurei.package.default;
|
||||
in
|
||||
pkgs.buildFHSEnv {
|
||||
pname = "hakurei-fhs";
|
||||
inherit (hakurei) version;
|
||||
targetPkgs = _: hakurei.targetPkgs;
|
||||
extraOutputsToInstall = [ "dev" ];
|
||||
profile = ''
|
||||
export PKG_CONFIG_PATH="/usr/share/pkgconfig:$PKG_CONFIG_PATH"
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
environment.systemPackages = [
|
||||
# For go tests:
|
||||
(pkgs.writeShellScriptBin "sharefs-workload-hakurei-tests" ''
|
||||
cp -r "${self.packages.${system}.hakurei.src}" "/sdcard/hakurei" && cd "/sdcard/hakurei"
|
||||
${fhs}/bin/hakurei-fhs -c 'CC="clang -O3 -Werror" go test ./...'
|
||||
'')
|
||||
];
|
||||
|
||||
imports = [
|
||||
./configuration.nix
|
||||
|
||||
self.nixosModules.hakurei
|
||||
self.inputs.home-manager.nixosModules.home-manager
|
||||
];
|
||||
};
|
||||
|
||||
testScript = builtins.readFile ./test.py;
|
||||
}
|
||||
60
cmd/sharefs/test/test.py
Normal file
60
cmd/sharefs/test/test.py
Normal file
@@ -0,0 +1,60 @@
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
|
||||
# To check sharefs version:
|
||||
print(machine.succeed("sharefs -V"))
|
||||
|
||||
# Make sure sharefs started:
|
||||
machine.wait_for_unit("sdcard.mount")
|
||||
|
||||
machine.succeed("mkdir /mnt")
|
||||
def check_bad_opts_output(opts, want, source="/etc", privileged=False):
|
||||
output = machine.fail(("" if privileged else "sudo -u alice -i ") + f"sharefs -f -o source={source},{opts} /mnt 2>&1")
|
||||
if output != want:
|
||||
raise Exception(f"unexpected output: {output}")
|
||||
|
||||
# Malformed setuid/setgid representation:
|
||||
check_bad_opts_output("setuid=ff", "sharefs: invalid value for option setuid\n")
|
||||
check_bad_opts_output("setgid=ff", "sharefs: invalid value for option setgid\n")
|
||||
|
||||
# Bounds check for setuid/setgid:
|
||||
check_bad_opts_output("setuid=0", "sharefs: invalid value for option setuid\n")
|
||||
check_bad_opts_output("setgid=0", "sharefs: invalid value for option setgid\n")
|
||||
check_bad_opts_output("setuid=-1", "sharefs: invalid value for option setuid\n")
|
||||
check_bad_opts_output("setgid=-1", "sharefs: invalid value for option setgid\n")
|
||||
|
||||
# Non-root setuid/setgid:
|
||||
check_bad_opts_output("setuid=1023", "sharefs: setuid and setgid has no effect when not starting as root\n")
|
||||
check_bad_opts_output("setgid=1023", "sharefs: setuid and setgid has no effect when not starting as root\n")
|
||||
check_bad_opts_output("setuid=1023,setgid=1023", "sharefs: setuid and setgid has no effect when not starting as root\n")
|
||||
check_bad_opts_output("mkdir", "sharefs: mkdir has no effect when not starting as root\n")
|
||||
|
||||
# Starting as root without setuid/setgid:
|
||||
check_bad_opts_output("allow_other", "sharefs: setuid and setgid must not be 0\n", privileged=True)
|
||||
check_bad_opts_output("setuid=1023", "sharefs: setuid and setgid must not be 0\n", privileged=True)
|
||||
check_bad_opts_output("setgid=1023", "sharefs: setuid and setgid must not be 0\n", privileged=True)
|
||||
|
||||
# Make sure nothing actually got mounted:
|
||||
machine.fail("umount /mnt")
|
||||
machine.succeed("rmdir /mnt")
|
||||
|
||||
# Unprivileged mount/unmount:
|
||||
machine.succeed("sudo -u alice -i mkdir /home/alice/{sdcard,persistent}")
|
||||
machine.succeed("sudo -u alice -i sharefs -o source=/home/alice/persistent /home/alice/sdcard")
|
||||
machine.succeed("sudo -u alice -i touch /home/alice/sdcard/check")
|
||||
machine.succeed("sudo -u alice -i umount /home/alice/sdcard")
|
||||
machine.succeed("sudo -u alice -i rm /home/alice/persistent/check")
|
||||
machine.succeed("sudo -u alice -i rmdir /home/alice/{sdcard,persistent}")
|
||||
|
||||
# Benchmark sharefs:
|
||||
machine.succeed("fs_mark -v -d /sdcard/fs_mark -l /tmp/fs_log.txt")
|
||||
machine.copy_from_vm("/tmp/fs_log.txt", "")
|
||||
|
||||
# Check permissions:
|
||||
machine.succeed("sudo -u sharefs touch /var/lib/hakurei/sdcard/fs_mark/.check")
|
||||
machine.succeed("sudo -u sharefs rm /var/lib/hakurei/sdcard/fs_mark/.check")
|
||||
machine.succeed("sudo -u alice rm -rf /sdcard/fs_mark")
|
||||
machine.fail("ls /var/lib/hakurei/sdcard/fs_mark")
|
||||
|
||||
# Run hakurei tests on sharefs:
|
||||
machine.succeed("sudo -u alice -i sharefs-workload-hakurei-tests")
|
||||
@@ -14,6 +14,7 @@ const (
|
||||
|
||||
CAP_SYS_ADMIN = 0x15
|
||||
CAP_SETPCAP = 0x8
|
||||
CAP_NET_ADMIN = 0xc
|
||||
CAP_DAC_OVERRIDE = 0x1
|
||||
)
|
||||
|
||||
|
||||
@@ -9,46 +9,60 @@ import (
|
||||
"slices"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unique"
|
||||
)
|
||||
|
||||
// AbsoluteError is returned by [NewAbs] and holds the invalid pathname.
|
||||
type AbsoluteError struct{ Pathname string }
|
||||
type AbsoluteError string
|
||||
|
||||
func (e *AbsoluteError) Error() string { return fmt.Sprintf("path %q is not absolute", e.Pathname) }
|
||||
func (e *AbsoluteError) Is(target error) bool {
|
||||
var ce *AbsoluteError
|
||||
func (e AbsoluteError) Error() string {
|
||||
return fmt.Sprintf("path %q is not absolute", string(e))
|
||||
}
|
||||
|
||||
func (e AbsoluteError) Is(target error) bool {
|
||||
var ce AbsoluteError
|
||||
if !errors.As(target, &ce) {
|
||||
return errors.Is(target, syscall.EINVAL)
|
||||
}
|
||||
return *e == *ce
|
||||
return e == ce
|
||||
}
|
||||
|
||||
// Absolute holds a pathname checked to be absolute.
|
||||
type Absolute struct{ pathname string }
|
||||
type Absolute struct{ pathname unique.Handle[string] }
|
||||
|
||||
// ok returns whether [Absolute] is not the zero value.
|
||||
func (a *Absolute) ok() bool { return a != nil && *a != (Absolute{}) }
|
||||
|
||||
// unsafeAbs returns [check.Absolute] on any string value.
|
||||
func unsafeAbs(pathname string) *Absolute { return &Absolute{pathname} }
|
||||
func unsafeAbs(pathname string) *Absolute {
|
||||
return &Absolute{unique.Make(pathname)}
|
||||
}
|
||||
|
||||
// String returns the checked pathname.
|
||||
func (a *Absolute) String() string {
|
||||
if a.pathname == "" {
|
||||
if !a.ok() {
|
||||
panic("attempted use of zero Absolute")
|
||||
}
|
||||
return a.pathname.Value()
|
||||
}
|
||||
|
||||
// Handle returns the underlying [unique.Handle].
|
||||
func (a *Absolute) Handle() unique.Handle[string] {
|
||||
return a.pathname
|
||||
}
|
||||
|
||||
// Is efficiently compares the underlying pathname.
|
||||
func (a *Absolute) Is(v *Absolute) bool {
|
||||
if a == nil && v == nil {
|
||||
return true
|
||||
}
|
||||
return a != nil && v != nil &&
|
||||
a.pathname != "" && v.pathname != "" &&
|
||||
a.pathname == v.pathname
|
||||
return a.ok() && v.ok() && a.pathname == v.pathname
|
||||
}
|
||||
|
||||
// NewAbs checks pathname and returns a new [Absolute] if pathname is absolute.
|
||||
func NewAbs(pathname string) (*Absolute, error) {
|
||||
if !path.IsAbs(pathname) {
|
||||
return nil, &AbsoluteError{pathname}
|
||||
return nil, AbsoluteError(pathname)
|
||||
}
|
||||
return unsafeAbs(pathname), nil
|
||||
}
|
||||
@@ -70,35 +84,49 @@ func (a *Absolute) Append(elem ...string) *Absolute {
|
||||
// Dir calls [path.Dir] with [Absolute] as its argument.
|
||||
func (a *Absolute) Dir() *Absolute { return unsafeAbs(path.Dir(a.String())) }
|
||||
|
||||
func (a *Absolute) GobEncode() ([]byte, error) { return []byte(a.String()), nil }
|
||||
// GobEncode returns the checked pathname.
|
||||
func (a *Absolute) GobEncode() ([]byte, error) {
|
||||
return []byte(a.String()), nil
|
||||
}
|
||||
|
||||
// GobDecode stores data if it represents an absolute pathname.
|
||||
func (a *Absolute) GobDecode(data []byte) error {
|
||||
pathname := string(data)
|
||||
if !path.IsAbs(pathname) {
|
||||
return &AbsoluteError{pathname}
|
||||
return AbsoluteError(pathname)
|
||||
}
|
||||
a.pathname = pathname
|
||||
a.pathname = unique.Make(pathname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Absolute) MarshalJSON() ([]byte, error) { return json.Marshal(a.String()) }
|
||||
// MarshalJSON returns a JSON representation of the checked pathname.
|
||||
func (a *Absolute) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(a.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON stores data if it represents an absolute pathname.
|
||||
func (a *Absolute) UnmarshalJSON(data []byte) error {
|
||||
var pathname string
|
||||
if err := json.Unmarshal(data, &pathname); err != nil {
|
||||
return err
|
||||
}
|
||||
if !path.IsAbs(pathname) {
|
||||
return &AbsoluteError{pathname}
|
||||
return AbsoluteError(pathname)
|
||||
}
|
||||
a.pathname = pathname
|
||||
a.pathname = unique.Make(pathname)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SortAbs calls [slices.SortFunc] for a slice of [Absolute].
|
||||
func SortAbs(x []*Absolute) {
|
||||
slices.SortFunc(x, func(a, b *Absolute) int { return strings.Compare(a.String(), b.String()) })
|
||||
slices.SortFunc(x, func(a, b *Absolute) int {
|
||||
return strings.Compare(a.String(), b.String())
|
||||
})
|
||||
}
|
||||
|
||||
// CompactAbs calls [slices.CompactFunc] for a slice of [Absolute].
|
||||
func CompactAbs(s []*Absolute) []*Absolute {
|
||||
return slices.CompactFunc(s, func(a *Absolute, b *Absolute) bool { return a.String() == b.String() })
|
||||
return slices.CompactFunc(s, func(a *Absolute, b *Absolute) bool {
|
||||
return a.Is(b)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -31,8 +31,8 @@ func TestAbsoluteError(t *testing.T) {
|
||||
}{
|
||||
{"EINVAL", new(AbsoluteError), syscall.EINVAL, true},
|
||||
{"not EINVAL", new(AbsoluteError), syscall.EBADE, false},
|
||||
{"ne val", new(AbsoluteError), &AbsoluteError{Pathname: "etc"}, false},
|
||||
{"equals", &AbsoluteError{Pathname: "etc"}, &AbsoluteError{Pathname: "etc"}, true},
|
||||
{"ne val", new(AbsoluteError), AbsoluteError("etc"), false},
|
||||
{"equals", AbsoluteError("etc"), AbsoluteError("etc"), true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -45,7 +45,7 @@ func TestAbsoluteError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
want := `path "etc" is not absolute`
|
||||
if got := (&AbsoluteError{Pathname: "etc"}).Error(); got != want {
|
||||
if got := (AbsoluteError("etc")).Error(); got != want {
|
||||
t.Errorf("Error: %q, want %q", got, want)
|
||||
}
|
||||
})
|
||||
@@ -62,8 +62,8 @@ func TestNewAbs(t *testing.T) {
|
||||
wantErr error
|
||||
}{
|
||||
{"good", "/etc", MustAbs("/etc"), nil},
|
||||
{"not absolute", "etc", nil, &AbsoluteError{Pathname: "etc"}},
|
||||
{"zero", "", nil, &AbsoluteError{Pathname: ""}},
|
||||
{"not absolute", "etc", nil, AbsoluteError("etc")},
|
||||
{"zero", "", nil, AbsoluteError("")},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -84,7 +84,7 @@ func TestNewAbs(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
defer func() {
|
||||
wantPanic := &AbsoluteError{Pathname: "etc"}
|
||||
wantPanic := AbsoluteError("etc")
|
||||
|
||||
if r := recover(); !reflect.DeepEqual(r, wantPanic) {
|
||||
t.Errorf("MustAbs: panic = %v; want %v", r, wantPanic)
|
||||
@@ -175,7 +175,7 @@ func TestCodecAbsolute(t *testing.T) {
|
||||
|
||||
`"/etc"`, `{"val":"/etc","magic":3236757504}`},
|
||||
{"not absolute", nil,
|
||||
&AbsoluteError{Pathname: "etc"},
|
||||
AbsoluteError("etc"),
|
||||
"\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\a\xff\x80\x00\x03etc",
|
||||
",\xff\x83\x03\x01\x01\x06sCheck\x01\xff\x84\x00\x01\x02\x01\bPathname\x01\xff\x80\x00\x01\x05Magic\x01\x06\x00\x00\x00\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\x0f\xff\x84\x01\x03etc\x01\xfb\x01\x81\xda\x00\x00\x00",
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ type (
|
||||
// Container represents a container environment being prepared or run.
|
||||
// None of [Container] methods are safe for concurrent use.
|
||||
Container struct {
|
||||
// Whether the container init should stay alive after its parent terminates.
|
||||
AllowOrphan bool
|
||||
// Cgroup fd, nil to disable.
|
||||
Cgroup *int
|
||||
// ExtraFiles passed through to initial process in the container,
|
||||
@@ -253,7 +255,6 @@ func (p *Container) Start() error {
|
||||
p.cmd.Dir = fhs.Root
|
||||
p.cmd.SysProcAttr = &SysProcAttr{
|
||||
Setsid: !p.RetainSession,
|
||||
Pdeathsig: SIGKILL,
|
||||
Cloneflags: CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNS |
|
||||
CLONE_NEWIPC | CLONE_NEWUTS | CLONE_NEWCGROUP,
|
||||
|
||||
@@ -262,12 +263,17 @@ func (p *Container) Start() error {
|
||||
CAP_SYS_ADMIN,
|
||||
// drop capabilities
|
||||
CAP_SETPCAP,
|
||||
// bring up loopback interface
|
||||
CAP_NET_ADMIN,
|
||||
// overlay access to upperdir and workdir
|
||||
CAP_DAC_OVERRIDE,
|
||||
},
|
||||
|
||||
UseCgroupFD: p.Cgroup != nil,
|
||||
}
|
||||
if !p.AllowOrphan {
|
||||
p.cmd.SysProcAttr.Pdeathsig = SIGKILL
|
||||
}
|
||||
if p.cmd.SysProcAttr.UseCgroupFD {
|
||||
p.cmd.SysProcAttr.CgroupFD = *p.Cgroup
|
||||
}
|
||||
|
||||
@@ -61,6 +61,8 @@ type syscallDispatcher interface {
|
||||
mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error
|
||||
// ensureFile provides ensureFile.
|
||||
ensureFile(name string, perm, pperm os.FileMode) error
|
||||
// mustLoopback provides mustLoopback.
|
||||
mustLoopback(msg message.Msg)
|
||||
|
||||
// seccompLoad provides [seccomp.Load].
|
||||
seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error
|
||||
@@ -164,6 +166,7 @@ func (k direct) mountTmpfs(fsname, target string, flags uintptr, size int, perm
|
||||
func (direct) ensureFile(name string, perm, pperm os.FileMode) error {
|
||||
return ensureFile(name, perm, pperm)
|
||||
}
|
||||
func (direct) mustLoopback(msg message.Msg) { mustLoopback(msg) }
|
||||
|
||||
func (direct) seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error {
|
||||
return seccomp.Load(rules, flags)
|
||||
|
||||
@@ -465,6 +465,8 @@ func (k *kstub) ensureFile(name string, perm, pperm os.FileMode) error {
|
||||
stub.CheckArg(k.Stub, "pperm", pperm, 2))
|
||||
}
|
||||
|
||||
func (*kstub) mustLoopback(message.Msg) { /* noop */ }
|
||||
|
||||
func (k *kstub) seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error {
|
||||
k.Helper()
|
||||
return k.Expects("seccompLoad").Error(
|
||||
|
||||
@@ -18,7 +18,7 @@ func messageFromError(err error) (m string, ok bool) {
|
||||
if m, ok = messagePrefixP[os.PathError]("cannot ", err); ok {
|
||||
return
|
||||
}
|
||||
if m, ok = messagePrefixP[check.AbsoluteError](zeroString, err); ok {
|
||||
if m, ok = messagePrefix[check.AbsoluteError](zeroString, err); ok {
|
||||
return
|
||||
}
|
||||
if m, ok = messagePrefix[OpRepeatError](zeroString, err); ok {
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestMessageFromError(t *testing.T) {
|
||||
Err: stub.UniqueError(0xdeadbeef),
|
||||
}, "cannot mount /sysroot: unique error 3735928559 injected by the test suite", true},
|
||||
|
||||
{"absolute", &check.AbsoluteError{Pathname: "etc/mtab"},
|
||||
{"absolute", check.AbsoluteError("etc/mtab"),
|
||||
`path "etc/mtab" is not absolute`, true},
|
||||
|
||||
{"repeat", OpRepeatError("autoetc"),
|
||||
|
||||
@@ -170,6 +170,10 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
||||
offsetSetup = int(setupFd + 1)
|
||||
}
|
||||
|
||||
if !params.HostNet {
|
||||
k.mustLoopback(msg)
|
||||
}
|
||||
|
||||
// write uid/gid map here so parent does not need to set dumpable
|
||||
if err := k.setDumpable(SUID_DUMP_USER); err != nil {
|
||||
k.fatalf(msg, "cannot set SUID_DUMP_USER: %v", err)
|
||||
|
||||
@@ -312,7 +312,10 @@ func TestMountOverlayOp(t *testing.T) {
|
||||
},
|
||||
}},
|
||||
|
||||
{"ephemeral", new(Ops).OverlayEphemeral(check.MustAbs("/nix/store"), check.MustAbs("/mnt-root/nix/.ro-store")), Ops{
|
||||
{"ephemeral", new(Ops).OverlayEphemeral(
|
||||
check.MustAbs("/nix/store"),
|
||||
check.MustAbs("/mnt-root/nix/.ro-store"),
|
||||
), Ops{
|
||||
&MountOverlayOp{
|
||||
Target: check.MustAbs("/nix/store"),
|
||||
Lower: []*check.Absolute{check.MustAbs("/mnt-root/nix/.ro-store")},
|
||||
@@ -320,7 +323,10 @@ func TestMountOverlayOp(t *testing.T) {
|
||||
},
|
||||
}},
|
||||
|
||||
{"readonly", new(Ops).OverlayReadonly(check.MustAbs("/nix/store"), check.MustAbs("/mnt-root/nix/.ro-store")), Ops{
|
||||
{"readonly", new(Ops).OverlayReadonly(
|
||||
check.MustAbs("/nix/store"),
|
||||
check.MustAbs("/mnt-root/nix/.ro-store"),
|
||||
), Ops{
|
||||
&MountOverlayOp{
|
||||
Target: check.MustAbs("/nix/store"),
|
||||
Lower: []*check.Absolute{check.MustAbs("/mnt-root/nix/.ro-store")},
|
||||
|
||||
@@ -31,7 +31,7 @@ func (l *SymlinkOp) Valid() bool { return l != nil && l.Target != nil && l.LinkN
|
||||
func (l *SymlinkOp) early(_ *setupState, k syscallDispatcher) error {
|
||||
if l.Dereference {
|
||||
if !path.IsAbs(l.LinkName) {
|
||||
return &check.AbsoluteError{Pathname: l.LinkName}
|
||||
return check.AbsoluteError(l.LinkName)
|
||||
}
|
||||
if name, err := k.readlink(l.LinkName); err != nil {
|
||||
return err
|
||||
|
||||
@@ -23,7 +23,7 @@ func TestSymlinkOp(t *testing.T) {
|
||||
Target: check.MustAbs("/etc/mtab"),
|
||||
LinkName: "etc/mtab",
|
||||
Dereference: true,
|
||||
}, nil, &check.AbsoluteError{Pathname: "etc/mtab"}, nil, nil},
|
||||
}, nil, check.AbsoluteError("etc/mtab"), nil, nil},
|
||||
|
||||
{"readlink", &Params{ParentPerm: 0755}, &SymlinkOp{
|
||||
Target: check.MustAbs("/etc/mtab"),
|
||||
|
||||
269
container/netlink.go
Normal file
269
container/netlink.go
Normal file
@@ -0,0 +1,269 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"net"
|
||||
"os"
|
||||
. "syscall"
|
||||
"unsafe"
|
||||
|
||||
"hakurei.app/container/std"
|
||||
"hakurei.app/message"
|
||||
)
|
||||
|
||||
// rtnetlink represents a NETLINK_ROUTE socket.
|
||||
type rtnetlink struct {
|
||||
// Sent as part of rtnetlink messages.
|
||||
pid uint32
|
||||
// AF_NETLINK socket.
|
||||
fd int
|
||||
// Whether the socket is open.
|
||||
ok bool
|
||||
// Message sequence number.
|
||||
seq uint32
|
||||
}
|
||||
|
||||
// open creates the underlying NETLINK_ROUTE socket.
|
||||
func (s *rtnetlink) open() (err error) {
|
||||
if s.ok || s.fd < 0 {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
s.pid = uint32(Getpid())
|
||||
if s.fd, err = Socket(
|
||||
AF_NETLINK,
|
||||
SOCK_RAW|SOCK_CLOEXEC,
|
||||
NETLINK_ROUTE,
|
||||
); err != nil {
|
||||
return os.NewSyscallError("socket", err)
|
||||
} else if err = Bind(s.fd, &SockaddrNetlink{
|
||||
Family: AF_NETLINK,
|
||||
Pid: s.pid,
|
||||
}); err != nil {
|
||||
_ = s.close()
|
||||
return os.NewSyscallError("bind", err)
|
||||
} else {
|
||||
s.ok = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// close closes the underlying NETLINK_ROUTE socket.
|
||||
func (s *rtnetlink) close() error {
|
||||
if !s.ok {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
s.ok = false
|
||||
err := Close(s.fd)
|
||||
s.fd = -1
|
||||
return err
|
||||
}
|
||||
|
||||
// roundtrip sends a netlink message and handles the reply.
|
||||
func (s *rtnetlink) roundtrip(data []byte) error {
|
||||
if !s.ok {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
defer func() { s.seq++ }()
|
||||
|
||||
if err := Sendto(s.fd, data, 0, &SockaddrNetlink{
|
||||
Family: AF_NETLINK,
|
||||
}); err != nil {
|
||||
return os.NewSyscallError("sendto", err)
|
||||
}
|
||||
buf := make([]byte, Getpagesize())
|
||||
|
||||
done:
|
||||
for {
|
||||
p := buf
|
||||
if n, _, err := Recvfrom(s.fd, p, 0); err != nil {
|
||||
return os.NewSyscallError("recvfrom", err)
|
||||
} else if n < NLMSG_HDRLEN {
|
||||
return errors.ErrUnsupported
|
||||
} else {
|
||||
p = p[:n]
|
||||
}
|
||||
|
||||
if msgs, err := ParseNetlinkMessage(p); err != nil {
|
||||
return err
|
||||
} else {
|
||||
for _, m := range msgs {
|
||||
if m.Header.Seq != s.seq || m.Header.Pid != s.pid {
|
||||
return errors.ErrUnsupported
|
||||
}
|
||||
if m.Header.Type == NLMSG_DONE {
|
||||
break done
|
||||
}
|
||||
if m.Header.Type == NLMSG_ERROR {
|
||||
if len(m.Data) >= 4 {
|
||||
errno := Errno(-std.ScmpInt(binary.NativeEndian.Uint32(m.Data)))
|
||||
if errno == 0 {
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
||||
return errors.ErrUnsupported
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// mustRoundtrip calls roundtrip and terminates via msg for a non-nil error.
|
||||
func (s *rtnetlink) mustRoundtrip(msg message.Msg, data []byte) {
|
||||
err := s.roundtrip(data)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if closeErr := Close(s.fd); closeErr != nil {
|
||||
msg.Verbosef("cannot close: %v", err)
|
||||
}
|
||||
|
||||
switch err.(type) {
|
||||
case *os.SyscallError:
|
||||
msg.GetLogger().Fatalf("cannot %v", err)
|
||||
|
||||
case Errno:
|
||||
msg.GetLogger().Fatalf("RTNETLINK answers: %v", err)
|
||||
|
||||
default:
|
||||
msg.GetLogger().Fatalln("RTNETLINK answers with unexpected message")
|
||||
}
|
||||
}
|
||||
|
||||
// newaddrLo represents a RTM_NEWADDR message with two addresses.
|
||||
type newaddrLo struct {
|
||||
header NlMsghdr
|
||||
data IfAddrmsg
|
||||
|
||||
r0 RtAttr
|
||||
a0 [4]byte // in_addr
|
||||
r1 RtAttr
|
||||
a1 [4]byte // in_addr
|
||||
}
|
||||
|
||||
// sizeofNewaddrLo is the expected size of newaddrLo.
|
||||
const sizeofNewaddrLo = NLMSG_HDRLEN + SizeofIfAddrmsg + (SizeofRtAttr+4)*2
|
||||
|
||||
// newaddrLo returns the address of a populated newaddrLo.
|
||||
func (s *rtnetlink) newaddrLo(lo int) *newaddrLo {
|
||||
return &newaddrLo{NlMsghdr{
|
||||
Len: sizeofNewaddrLo,
|
||||
Type: RTM_NEWADDR,
|
||||
Flags: NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE | NLM_F_EXCL,
|
||||
Seq: s.seq,
|
||||
Pid: s.pid,
|
||||
}, IfAddrmsg{
|
||||
Family: AF_INET,
|
||||
Prefixlen: 8,
|
||||
Flags: IFA_F_PERMANENT,
|
||||
Scope: RT_SCOPE_HOST,
|
||||
Index: uint32(lo),
|
||||
}, RtAttr{
|
||||
Len: uint16(SizeofRtAttr + len(newaddrLo{}.a0)),
|
||||
Type: IFA_LOCAL,
|
||||
}, [4]byte{127, 0, 0, 1}, RtAttr{
|
||||
Len: uint16(SizeofRtAttr + len(newaddrLo{}.a1)),
|
||||
Type: IFA_ADDRESS,
|
||||
}, [4]byte{127, 0, 0, 1}}
|
||||
}
|
||||
|
||||
func (msg *newaddrLo) toWireFormat() []byte {
|
||||
var buf [sizeofNewaddrLo]byte
|
||||
|
||||
*(*uint32)(unsafe.Pointer(&buf[0:4][0])) = msg.header.Len
|
||||
*(*uint16)(unsafe.Pointer(&buf[4:6][0])) = msg.header.Type
|
||||
*(*uint16)(unsafe.Pointer(&buf[6:8][0])) = msg.header.Flags
|
||||
*(*uint32)(unsafe.Pointer(&buf[8:12][0])) = msg.header.Seq
|
||||
*(*uint32)(unsafe.Pointer(&buf[12:16][0])) = msg.header.Pid
|
||||
|
||||
buf[16] = msg.data.Family
|
||||
buf[17] = msg.data.Prefixlen
|
||||
buf[18] = msg.data.Flags
|
||||
buf[19] = msg.data.Scope
|
||||
*(*uint32)(unsafe.Pointer(&buf[20:24][0])) = msg.data.Index
|
||||
|
||||
*(*uint16)(unsafe.Pointer(&buf[24:26][0])) = msg.r0.Len
|
||||
*(*uint16)(unsafe.Pointer(&buf[26:28][0])) = msg.r0.Type
|
||||
copy(buf[28:32], msg.a0[:])
|
||||
*(*uint16)(unsafe.Pointer(&buf[32:34][0])) = msg.r1.Len
|
||||
*(*uint16)(unsafe.Pointer(&buf[34:36][0])) = msg.r1.Type
|
||||
copy(buf[36:40], msg.a1[:])
|
||||
|
||||
return buf[:]
|
||||
}
|
||||
|
||||
// newlinkLo represents a RTM_NEWLINK message.
|
||||
type newlinkLo struct {
|
||||
header NlMsghdr
|
||||
data IfInfomsg
|
||||
}
|
||||
|
||||
// sizeofNewlinkLo is the expected size of newlinkLo.
|
||||
const sizeofNewlinkLo = NLMSG_HDRLEN + SizeofIfInfomsg
|
||||
|
||||
// newlinkLo returns the address of a populated newlinkLo.
|
||||
func (s *rtnetlink) newlinkLo(lo int) *newlinkLo {
|
||||
return &newlinkLo{NlMsghdr{
|
||||
Len: sizeofNewlinkLo,
|
||||
Type: RTM_NEWLINK,
|
||||
Flags: NLM_F_REQUEST | NLM_F_ACK,
|
||||
Seq: s.seq,
|
||||
Pid: s.pid,
|
||||
}, IfInfomsg{
|
||||
Family: AF_UNSPEC,
|
||||
Index: int32(lo),
|
||||
Flags: IFF_UP,
|
||||
Change: IFF_UP,
|
||||
}}
|
||||
}
|
||||
|
||||
func (msg *newlinkLo) toWireFormat() []byte {
|
||||
var buf [sizeofNewlinkLo]byte
|
||||
|
||||
*(*uint32)(unsafe.Pointer(&buf[0:4][0])) = msg.header.Len
|
||||
*(*uint16)(unsafe.Pointer(&buf[4:6][0])) = msg.header.Type
|
||||
*(*uint16)(unsafe.Pointer(&buf[6:8][0])) = msg.header.Flags
|
||||
*(*uint32)(unsafe.Pointer(&buf[8:12][0])) = msg.header.Seq
|
||||
*(*uint32)(unsafe.Pointer(&buf[12:16][0])) = msg.header.Pid
|
||||
|
||||
buf[16] = msg.data.Family
|
||||
*(*uint16)(unsafe.Pointer(&buf[18:20][0])) = msg.data.Type
|
||||
*(*int32)(unsafe.Pointer(&buf[20:24][0])) = msg.data.Index
|
||||
*(*uint32)(unsafe.Pointer(&buf[24:28][0])) = msg.data.Flags
|
||||
*(*uint32)(unsafe.Pointer(&buf[28:32][0])) = msg.data.Change
|
||||
|
||||
return buf[:]
|
||||
}
|
||||
|
||||
// mustLoopback creates the loopback address and brings the lo interface up.
|
||||
// mustLoopback calls a fatal method of the underlying [log.Logger] of m with a
|
||||
// user-facing error message if RTNETLINK behaves unexpectedly.
|
||||
func mustLoopback(msg message.Msg) {
|
||||
log := msg.GetLogger()
|
||||
|
||||
var lo int
|
||||
if ifi, err := net.InterfaceByName("lo"); err != nil {
|
||||
log.Fatalln(err)
|
||||
} else {
|
||||
lo = ifi.Index
|
||||
}
|
||||
|
||||
var s rtnetlink
|
||||
if err := s.open(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := s.close(); err != nil {
|
||||
msg.Verbosef("cannot close netlink: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
s.mustRoundtrip(msg, s.newaddrLo(lo).toWireFormat())
|
||||
s.mustRoundtrip(msg, s.newlinkLo(lo).toWireFormat())
|
||||
}
|
||||
72
container/netlink_test.go
Normal file
72
container/netlink_test.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestSizeof(t *testing.T) {
|
||||
if got := unsafe.Sizeof(newaddrLo{}); got != sizeofNewaddrLo {
|
||||
t.Fatalf("newaddrLo: sizeof = %#x, want %#x", got, sizeofNewaddrLo)
|
||||
}
|
||||
|
||||
if got := unsafe.Sizeof(newlinkLo{}); got != sizeofNewlinkLo {
|
||||
t.Fatalf("newlinkLo: sizeof = %#x, want %#x", got, sizeofNewlinkLo)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRtnetlinkMessage(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
msg interface{ toWireFormat() []byte }
|
||||
want []byte
|
||||
}{
|
||||
{"newaddrLo", (&rtnetlink{pid: 1, seq: 0}).newaddrLo(1), []byte{
|
||||
/* Len */ 0x28, 0, 0, 0,
|
||||
/* Type */ 0x14, 0,
|
||||
/* Flags */ 5, 6,
|
||||
/* Seq */ 0, 0, 0, 0,
|
||||
/* Pid */ 1, 0, 0, 0,
|
||||
|
||||
/* Family */ 2,
|
||||
/* Prefixlen */ 8,
|
||||
/* Flags */ 0x80,
|
||||
/* Scope */ 0xfe,
|
||||
/* Index */ 1, 0, 0, 0,
|
||||
|
||||
/* Len */ 8, 0,
|
||||
/* Type */ 2, 0,
|
||||
/* in_addr */ 127, 0, 0, 1,
|
||||
|
||||
/* Len */ 8, 0,
|
||||
/* Type */ 1, 0,
|
||||
/* in_addr */ 127, 0, 0, 1,
|
||||
}},
|
||||
|
||||
{"newlinkLo", (&rtnetlink{pid: 1, seq: 1}).newlinkLo(1), []byte{
|
||||
/* Len */ 0x20, 0, 0, 0,
|
||||
/* Type */ 0x10, 0,
|
||||
/* Flags */ 5, 0,
|
||||
/* Seq */ 1, 0, 0, 0,
|
||||
/* Pid */ 1, 0, 0, 0,
|
||||
|
||||
/* Family */ 0,
|
||||
/* pad */ 0,
|
||||
/* Type */ 0, 0,
|
||||
/* Index */ 1, 0, 0, 0,
|
||||
/* Flags */ 1, 0, 0, 0,
|
||||
/* Change */ 1, 0, 0, 0,
|
||||
}},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if got := tc.msg.toWireFormat(); string(got) != string(tc.want) {
|
||||
t.Fatalf("toWireFormat: %#v, want %#v", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
2
dist/install.sh
vendored
2
dist/install.sh
vendored
@@ -2,7 +2,7 @@
|
||||
cd "$(dirname -- "$0")" || exit 1
|
||||
|
||||
install -vDm0755 "bin/hakurei" "${HAKUREI_INSTALL_PREFIX}/usr/bin/hakurei"
|
||||
install -vDm0755 "bin/hpkg" "${HAKUREI_INSTALL_PREFIX}/usr/bin/hpkg"
|
||||
install -vDm0755 "bin/sharefs" "${HAKUREI_INSTALL_PREFIX}/usr/bin/sharefs"
|
||||
|
||||
install -vDm4511 "bin/hsu" "${HAKUREI_INSTALL_PREFIX}/usr/bin/hsu"
|
||||
if [ ! -f "${HAKUREI_INSTALL_PREFIX}/etc/hsurc" ]; then
|
||||
|
||||
11
flake.nix
11
flake.nix
@@ -69,6 +69,8 @@
|
||||
withRace = true;
|
||||
};
|
||||
|
||||
sharefs = callPackage ./cmd/sharefs/test { inherit system self; };
|
||||
|
||||
hpkg = callPackage ./cmd/hpkg/test { inherit system self; };
|
||||
|
||||
formatting = runCommandLocal "check-formatting" { nativeBuildInputs = [ nixfmt-rfc-style ]; } ''
|
||||
@@ -136,6 +138,10 @@
|
||||
;
|
||||
};
|
||||
hsu = pkgs.callPackage ./cmd/hsu/package.nix { inherit (self.packages.${system}) hakurei; };
|
||||
sharefs = pkgs.linkFarm "sharefs" {
|
||||
"bin/sharefs" = "${hakurei}/libexec/sharefs";
|
||||
"bin/mount.fuse.sharefs" = "${hakurei}/libexec/sharefs";
|
||||
};
|
||||
|
||||
dist = pkgs.runCommand "${hakurei.name}-dist" { buildInputs = hakurei.targetPkgs ++ [ pkgs.pkgsStatic.musl ]; } ''
|
||||
# go requires XDG_CACHE_HOME for the build cache
|
||||
@@ -160,7 +166,10 @@
|
||||
pkgs = nixpkgsFor.${system};
|
||||
in
|
||||
{
|
||||
default = pkgs.mkShell { buildInputs = hakurei.targetPkgs; };
|
||||
default = pkgs.mkShell {
|
||||
buildInputs = hakurei.targetPkgs;
|
||||
hardeningDisable = [ "fortify" ];
|
||||
};
|
||||
withPackage = pkgs.mkShell { buildInputs = [ hakurei ] ++ hakurei.targetPkgs; };
|
||||
|
||||
vm =
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestSpPulseOp(t *testing.T) {
|
||||
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, "proc/nonexistent/cookie", nil),
|
||||
}, nil, nil, &hst.AppError{
|
||||
Step: "locate PulseAudio cookie",
|
||||
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/cookie"},
|
||||
Err: check.AbsoluteError("proc/nonexistent/cookie"),
|
||||
}, nil, nil, nil, nil, nil},
|
||||
|
||||
{"cookie loadFile", func(bool, bool) outcomeOp {
|
||||
@@ -272,7 +272,7 @@ func TestDiscoverPulseCookie(t *testing.T) {
|
||||
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
||||
}}, &hst.AppError{
|
||||
Step: "locate PulseAudio cookie",
|
||||
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/pulse-cookie"},
|
||||
Err: check.AbsoluteError("proc/nonexistent/pulse-cookie"),
|
||||
}},
|
||||
|
||||
{"success override", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
||||
@@ -286,7 +286,7 @@ func TestDiscoverPulseCookie(t *testing.T) {
|
||||
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
||||
}}, &hst.AppError{
|
||||
Step: "locate PulseAudio cookie",
|
||||
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/home"},
|
||||
Err: check.AbsoluteError("proc/nonexistent/home"),
|
||||
}},
|
||||
|
||||
{"home stat", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
||||
@@ -321,7 +321,7 @@ func TestDiscoverPulseCookie(t *testing.T) {
|
||||
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
||||
}}, &hst.AppError{
|
||||
Step: "locate PulseAudio cookie",
|
||||
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/xdg"},
|
||||
Err: check.AbsoluteError("proc/nonexistent/xdg"),
|
||||
}},
|
||||
|
||||
{"xdg stat", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
||||
|
||||
@@ -514,7 +514,7 @@ var ErrNotDone = errors.New("did not receive a Core::Done event targeting previo
|
||||
const (
|
||||
// syncTimeout is the maximum duration [Core.Sync] is allowed to take before
|
||||
// receiving [CoreDone] or failing.
|
||||
syncTimeout = 5 * time.Second
|
||||
syncTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// Sync queues a [CoreSync] message for the PipeWire server and initiates a Roundtrip.
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
@@ -27,10 +26,16 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Conn is a low level unix socket interface used by [Context].
|
||||
type Conn interface {
|
||||
// MightBlock informs the implementation that the next call to
|
||||
// Recvmsg or Sendmsg might block. A zero or negative timeout
|
||||
// cancels this behaviour.
|
||||
MightBlock(timeout time.Duration)
|
||||
|
||||
// Recvmsg calls syscall.Recvmsg on the underlying socket.
|
||||
Recvmsg(p, oob []byte, flags int) (n, oobn, recvflags int, err error)
|
||||
|
||||
@@ -138,45 +143,142 @@ func New(conn Conn, props SPADict) (*Context, error) {
|
||||
return &ctx, nil
|
||||
}
|
||||
|
||||
// A SyscallConnCloser is a [syscall.Conn] that implements [io.Closer].
|
||||
type SyscallConnCloser interface {
|
||||
syscall.Conn
|
||||
io.Closer
|
||||
// unixConn is an implementation of the [Conn] interface for connections
|
||||
// to Unix domain sockets.
|
||||
type unixConn struct {
|
||||
fd int
|
||||
|
||||
// Whether creation of a new epoll instance was attempted.
|
||||
epoll bool
|
||||
// File descriptor referring to the new epoll instance.
|
||||
// Valid if epoll is true and epollErr is nil.
|
||||
epollFd int
|
||||
// Error returned by syscall.EpollCreate1.
|
||||
epollErr error
|
||||
// Stores epoll events from the kernel.
|
||||
epollBuf [32]syscall.EpollEvent
|
||||
|
||||
// If non-zero, next call is treated as a blocking call.
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// A SyscallConn is a [Conn] adapter for [syscall.Conn].
|
||||
type SyscallConn struct{ SyscallConnCloser }
|
||||
// Dial connects to a Unix domain socket described by name.
|
||||
func Dial(name string) (Conn, error) {
|
||||
if fd, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC|syscall.SOCK_NONBLOCK, 0); err != nil {
|
||||
return nil, os.NewSyscallError("socket", err)
|
||||
} else if err = syscall.Connect(fd, &syscall.SockaddrUnix{Name: name}); err != nil {
|
||||
_ = syscall.Close(fd)
|
||||
return nil, os.NewSyscallError("connect", err)
|
||||
} else {
|
||||
return &unixConn{fd: fd}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Recvmsg implements [Conn.Recvmsg] via [syscall.Conn.SyscallConn].
|
||||
func (conn SyscallConn) Recvmsg(p, oob []byte, flags int) (n, oobn, recvflags int, err error) {
|
||||
var rc syscall.RawConn
|
||||
if rc, err = conn.SyscallConn(); err != nil {
|
||||
// MightBlock informs the implementation that the next call
|
||||
// might block for a non-zero timeout.
|
||||
func (conn *unixConn) MightBlock(timeout time.Duration) {
|
||||
if timeout < 0 {
|
||||
timeout = 0
|
||||
}
|
||||
conn.timeout = timeout
|
||||
}
|
||||
|
||||
// wantsEpoll is called at the beginning of any method that might use epoll.
|
||||
func (conn *unixConn) wantsEpoll() error {
|
||||
if !conn.epoll {
|
||||
conn.epoll = true
|
||||
conn.epollFd, conn.epollErr = syscall.EpollCreate1(syscall.EPOLL_CLOEXEC)
|
||||
if conn.epollErr == nil {
|
||||
if conn.epollErr = syscall.EpollCtl(conn.epollFd, syscall.EPOLL_CTL_ADD, conn.fd, &syscall.EpollEvent{
|
||||
Events: syscall.EPOLLERR | syscall.EPOLLHUP,
|
||||
Fd: int32(conn.fd),
|
||||
}); conn.epollErr != nil {
|
||||
_ = syscall.Close(conn.epollFd)
|
||||
}
|
||||
}
|
||||
}
|
||||
return conn.epollErr
|
||||
}
|
||||
|
||||
// wait waits for a specific I/O event on fd. Caller must arrange for wantsEpoll
|
||||
// to be called somewhere before wait is called.
|
||||
func (conn *unixConn) wait(event uint32) (err error) {
|
||||
if conn.timeout == 0 {
|
||||
return nil
|
||||
}
|
||||
deadline := time.Now().Add(conn.timeout)
|
||||
conn.timeout = 0
|
||||
|
||||
if err = syscall.EpollCtl(conn.epollFd, syscall.EPOLL_CTL_MOD, conn.fd, &syscall.EpollEvent{
|
||||
Events: event | syscall.EPOLLERR | syscall.EPOLLHUP,
|
||||
Fd: int32(conn.fd),
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if controlErr := rc.Control(func(fd uintptr) {
|
||||
n, oobn, recvflags, _, err = syscall.Recvmsg(int(fd), p, oob, flags)
|
||||
}); controlErr != nil && err == nil {
|
||||
err = controlErr
|
||||
for timeout := deadline.Sub(time.Now()); timeout > 0; timeout = deadline.Sub(time.Now()) {
|
||||
var n int
|
||||
if n, err = syscall.EpollWait(conn.epollFd, conn.epollBuf[:], int(timeout/time.Millisecond)); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch n {
|
||||
case 1: // only the socket fd is ever added
|
||||
if conn.epollBuf[0].Fd != int32(conn.fd) { // unreachable
|
||||
return syscall.ENOTRECOVERABLE
|
||||
}
|
||||
if conn.epollBuf[0].Events&event == event ||
|
||||
conn.epollBuf[0].Events&syscall.EPOLLERR|syscall.EPOLLHUP != 0 {
|
||||
return nil
|
||||
}
|
||||
err = syscall.ETIME
|
||||
continue
|
||||
|
||||
case 0: // timeout
|
||||
return syscall.ETIMEDOUT
|
||||
|
||||
default: // unreachable
|
||||
return syscall.ENOTRECOVERABLE
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Sendmsg implements [Conn.Sendmsg] via [syscall.Conn.SyscallConn].
|
||||
func (conn SyscallConn) Sendmsg(p, oob []byte, flags int) (n int, err error) {
|
||||
var rc syscall.RawConn
|
||||
if rc, err = conn.SyscallConn(); err != nil {
|
||||
// Recvmsg calls syscall.Recvmsg on the underlying socket.
|
||||
func (conn *unixConn) Recvmsg(p, oob []byte, flags int) (n, oobn, recvflags int, err error) {
|
||||
if err = conn.wantsEpoll(); err != nil {
|
||||
return
|
||||
} else if err = conn.wait(syscall.EPOLLIN); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if controlErr := rc.Control(func(fd uintptr) {
|
||||
n, err = syscall.SendmsgN(int(fd), p, oob, nil, flags)
|
||||
}); controlErr != nil && err == nil {
|
||||
err = controlErr
|
||||
}
|
||||
n, oobn, recvflags, _, err = syscall.Recvmsg(conn.fd, p, oob, flags)
|
||||
return
|
||||
}
|
||||
|
||||
// Sendmsg calls syscall.Sendmsg on the underlying socket.
|
||||
func (conn *unixConn) Sendmsg(p, oob []byte, flags int) (n int, err error) {
|
||||
if err = conn.wantsEpoll(); err != nil {
|
||||
return
|
||||
} else if err = conn.wait(syscall.EPOLLOUT); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n, err = syscall.SendmsgN(conn.fd, p, oob, nil, flags)
|
||||
return
|
||||
}
|
||||
|
||||
// Close closes the underlying socket and the epoll fd if populated.
|
||||
func (conn *unixConn) Close() (err error) {
|
||||
if conn.epoll && conn.epollErr == nil {
|
||||
conn.epollErr = syscall.Close(conn.epollFd)
|
||||
}
|
||||
if err = syscall.Close(conn.fd); err != nil {
|
||||
return
|
||||
}
|
||||
return conn.epollErr
|
||||
}
|
||||
|
||||
// MustNew calls [New](conn, props) and panics on error.
|
||||
// It is intended for use in tests with hard-coded strings.
|
||||
func MustNew(conn Conn, props SPADict) *Context {
|
||||
@@ -310,7 +412,7 @@ func (ctx *Context) recvmsg(remaining []byte) (payload []byte, err error) {
|
||||
}
|
||||
if err != syscall.EAGAIN && err != syscall.EWOULDBLOCK {
|
||||
ctx.closeReceivedFiles()
|
||||
return nil, os.NewSyscallError("recvmsg", err)
|
||||
return nil, &ProxyFatalError{Err: os.NewSyscallError("recvmsg", err), ProxyErrs: ctx.cloneAsProxyErrors()}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -347,7 +449,7 @@ func (ctx *Context) sendmsg(p []byte, fds ...int) error {
|
||||
}
|
||||
|
||||
if err != nil && err != syscall.EAGAIN && err != syscall.EWOULDBLOCK {
|
||||
return os.NewSyscallError("sendmsg", err)
|
||||
return &ProxyFatalError{Err: os.NewSyscallError("sendmsg", err), ProxyErrs: ctx.cloneAsProxyErrors()}
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -598,8 +700,15 @@ func (ctx *Context) Roundtrip() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
const (
|
||||
// roundtripTimeout is the maximum duration socket operations during
|
||||
// Context.roundtrip is allowed to block for.
|
||||
roundtripTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
// roundtrip implements the Roundtrip method without checking proxyErrors.
|
||||
func (ctx *Context) roundtrip() (err error) {
|
||||
ctx.conn.MightBlock(roundtripTimeout)
|
||||
if err = ctx.sendmsg(ctx.buf, ctx.pendingFiles...); err != nil {
|
||||
return
|
||||
}
|
||||
@@ -633,6 +742,7 @@ func (ctx *Context) roundtrip() (err error) {
|
||||
}()
|
||||
|
||||
var remaining []byte
|
||||
ctx.conn.MightBlock(roundtripTimeout)
|
||||
for {
|
||||
remaining, err = ctx.consume(remaining)
|
||||
if err == nil {
|
||||
@@ -857,14 +967,14 @@ const Remote = "PIPEWIRE_REMOTE"
|
||||
|
||||
const DEFAULT_SYSTEM_RUNTIME_DIR = "/run/pipewire"
|
||||
|
||||
// connectName connects to a PipeWire remote by name and returns the [net.UnixConn].
|
||||
func connectName(name string, manager bool) (conn *net.UnixConn, err error) {
|
||||
// connectName connects to a PipeWire remote by name and returns the resulting [Conn].
|
||||
func connectName(name string, manager bool) (conn Conn, err error) {
|
||||
if manager && !strings.HasSuffix(name, "-manager") {
|
||||
return connectName(name+"-manager", false)
|
||||
}
|
||||
|
||||
if path.IsAbs(name) || (len(name) > 0 && name[0] == '@') {
|
||||
return net.DialUnix("unix", nil, &net.UnixAddr{Name: name, Net: "unix"})
|
||||
return Dial(name)
|
||||
} else {
|
||||
runtimeDir, ok := os.LookupEnv("PIPEWIRE_RUNTIME_DIR")
|
||||
if !ok || !path.IsAbs(runtimeDir) {
|
||||
@@ -879,7 +989,7 @@ func connectName(name string, manager bool) (conn *net.UnixConn, err error) {
|
||||
if !ok || !path.IsAbs(runtimeDir) {
|
||||
runtimeDir = DEFAULT_SYSTEM_RUNTIME_DIR
|
||||
}
|
||||
return net.DialUnix("unix", nil, &net.UnixAddr{Name: path.Join(runtimeDir, name), Net: "unix"})
|
||||
return Dial(path.Join(runtimeDir, name))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -897,12 +1007,11 @@ func ConnectName(name string, manager bool, props SPADict) (ctx *Context, err er
|
||||
}
|
||||
}
|
||||
|
||||
var conn *net.UnixConn
|
||||
var conn Conn
|
||||
if conn, err = connectName(name, manager); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ctx, err = New(SyscallConn{conn}, props); err != nil {
|
||||
if ctx, err = New(conn, props); err != nil {
|
||||
ctx = nil
|
||||
_ = conn.Close()
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"strconv"
|
||||
. "syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"hakurei.app/container/stub"
|
||||
"hakurei.app/internal/pipewire"
|
||||
@@ -715,6 +716,18 @@ type stubUnixConn struct {
|
||||
current int
|
||||
}
|
||||
|
||||
func (conn *stubUnixConn) MightBlock(timeout time.Duration) {
|
||||
if timeout != 5*time.Second {
|
||||
panic("unexpected timeout " + timeout.String())
|
||||
}
|
||||
if conn.current == 0 ||
|
||||
(conn.samples[conn.current-1].nr == SYS_RECVMSG && conn.samples[conn.current-1].errno == EAGAIN && conn.samples[conn.current].nr == SYS_SENDMSG) ||
|
||||
(conn.samples[conn.current-1].nr == SYS_SENDMSG && conn.samples[conn.current].nr == SYS_RECVMSG) {
|
||||
return
|
||||
}
|
||||
panic("unexpected blocking hint before sample " + strconv.Itoa(conn.current))
|
||||
}
|
||||
|
||||
// nextSample returns the current sample and increments the counter.
|
||||
func (conn *stubUnixConn) nextSample(nr uintptr) (sample *stubUnixConnSample, wantOOB []byte, err error) {
|
||||
sample = &conn.samples[conn.current]
|
||||
|
||||
210
internal/pkg/dir.go
Normal file
210
internal/pkg/dir.go
Normal file
@@ -0,0 +1,210 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"crypto/sha512"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
)
|
||||
|
||||
// FlatEntry is a directory entry to be encoded for [Flatten].
|
||||
type FlatEntry struct {
|
||||
Mode fs.FileMode // file mode bits
|
||||
Path string // pathname of the file
|
||||
Data []byte // file content or symlink destination
|
||||
}
|
||||
|
||||
/*
|
||||
| mode uint32 | path_sz uint32 |
|
||||
| data_sz uint64 |
|
||||
| path string |
|
||||
| data []byte |
|
||||
*/
|
||||
|
||||
// wordSize is the boundary which binary segments are always aligned to.
|
||||
const wordSize = 8
|
||||
|
||||
// alignSize returns the padded size for aligning sz.
|
||||
func alignSize(sz int) int {
|
||||
return sz + (wordSize-(sz)%wordSize)%wordSize
|
||||
}
|
||||
|
||||
// Encode encodes the entry for transmission or hashing.
|
||||
func (ent *FlatEntry) Encode(w io.Writer) (n int, err error) {
|
||||
pPathSize := alignSize(len(ent.Path))
|
||||
if pPathSize > math.MaxUint32 {
|
||||
return 0, syscall.E2BIG
|
||||
}
|
||||
pDataSize := alignSize(len(ent.Data))
|
||||
|
||||
payload := make([]byte, wordSize*2+pPathSize+pDataSize)
|
||||
binary.LittleEndian.PutUint32(payload, uint32(ent.Mode))
|
||||
binary.LittleEndian.PutUint32(payload[wordSize/2:], uint32(len(ent.Path)))
|
||||
binary.LittleEndian.PutUint64(payload[wordSize:], uint64(len(ent.Data)))
|
||||
copy(payload[wordSize*2:], ent.Path)
|
||||
copy(payload[wordSize*2+pPathSize:], ent.Data)
|
||||
return w.Write(payload)
|
||||
}
|
||||
|
||||
// ErrInsecurePath is returned by [FlatEntry.Decode] if validation is requested
|
||||
// and a nonlocal path is encountered in the stream.
|
||||
var ErrInsecurePath = errors.New("insecure file path")
|
||||
|
||||
// Decode decodes the entry from its representation produced by Encode.
|
||||
func (ent *FlatEntry) Decode(r io.Reader, validate bool) (n int, err error) {
|
||||
var nr int
|
||||
|
||||
header := make([]byte, wordSize*2)
|
||||
nr, err = r.Read(header)
|
||||
n += nr
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) && n != 0 {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
ent.Mode = fs.FileMode(binary.LittleEndian.Uint32(header))
|
||||
pathSize := int(binary.LittleEndian.Uint32(header[wordSize/2:]))
|
||||
pPathSize := alignSize(pathSize)
|
||||
dataSize := int(binary.LittleEndian.Uint64(header[wordSize:]))
|
||||
pDataSize := alignSize(dataSize)
|
||||
|
||||
buf := make([]byte, pPathSize+pDataSize)
|
||||
nr, err = r.Read(buf)
|
||||
n += nr
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
if nr != len(buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ent.Path = string(buf[:pathSize])
|
||||
if ent.Mode.IsDir() {
|
||||
ent.Data = nil
|
||||
} else {
|
||||
ent.Data = buf[pPathSize : pPathSize+dataSize]
|
||||
}
|
||||
|
||||
if validate && !filepath.IsLocal(ent.Path) {
|
||||
err = ErrInsecurePath
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// DirScanner provides an efficient interface for reading a stream of encoded
|
||||
// [FlatEntry]. Successive calls to the Scan method will step through the
|
||||
// entries in the stream.
|
||||
type DirScanner struct {
|
||||
// Underlying reader to scan [FlatEntry] representations from.
|
||||
r io.Reader
|
||||
|
||||
// First non-EOF I/O error, returned by the Err method.
|
||||
err error
|
||||
|
||||
// Entry to store results in. Its address is returned by the Entry method
|
||||
// and is updated on every call to Scan.
|
||||
ent FlatEntry
|
||||
|
||||
// Validate pathnames during decoding.
|
||||
validate bool
|
||||
}
|
||||
|
||||
// NewDirScanner returns the address of a new instance of [DirScanner] reading
|
||||
// from r. The caller must no longer read from r after this function returns.
|
||||
func NewDirScanner(r io.Reader, validate bool) *DirScanner {
|
||||
return &DirScanner{r: r, validate: validate}
|
||||
}
|
||||
|
||||
// Err returns the first non-EOF I/O error.
|
||||
func (s *DirScanner) Err() error {
|
||||
if errors.Is(s.err, io.EOF) {
|
||||
return nil
|
||||
}
|
||||
return s.err
|
||||
}
|
||||
|
||||
// Entry returns the address to the [FlatEntry] value storing the last result.
|
||||
func (s *DirScanner) Entry() *FlatEntry { return &s.ent }
|
||||
|
||||
// Scan advances to the next [FlatEntry].
|
||||
func (s *DirScanner) Scan() bool {
|
||||
if s.err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var n int
|
||||
n, s.err = s.ent.Decode(s.r, s.validate)
|
||||
if errors.Is(s.err, io.EOF) {
|
||||
return n != 0
|
||||
}
|
||||
return s.err == nil
|
||||
}
|
||||
|
||||
// Flatten writes a deterministic representation of the contents of fsys to w.
|
||||
// The resulting data can be hashed to produce a deterministic checksum for the
|
||||
// directory.
|
||||
func Flatten(fsys fs.FS, root string, w io.Writer) (n int, err error) {
|
||||
var nr int
|
||||
err = fs.WalkDir(fsys, root, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var fi fs.FileInfo
|
||||
fi, err = d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ent := FlatEntry{
|
||||
Path: path,
|
||||
Mode: fi.Mode(),
|
||||
}
|
||||
if ent.Mode.IsRegular() {
|
||||
if ent.Data, err = fs.ReadFile(fsys, path); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if ent.Mode&fs.ModeSymlink != 0 {
|
||||
var newpath string
|
||||
if newpath, err = fs.ReadLink(fsys, path); err != nil {
|
||||
return err
|
||||
}
|
||||
ent.Data = []byte(newpath)
|
||||
} else if !ent.Mode.IsDir() {
|
||||
return InvalidFileModeError(ent.Mode)
|
||||
}
|
||||
|
||||
nr, err = ent.Encode(w)
|
||||
n += nr
|
||||
return err
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// HashFS returns a checksum produced by hashing the result of [Flatten].
|
||||
func HashFS(fsys fs.FS, root string) (Checksum, error) {
|
||||
h := sha512.New384()
|
||||
if _, err := Flatten(fsys, root, h); err != nil {
|
||||
return Checksum{}, err
|
||||
}
|
||||
return (Checksum)(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// HashDir returns a checksum produced by hashing the result of [Flatten].
|
||||
func HashDir(pathname *check.Absolute) (Checksum, error) {
|
||||
return HashFS(os.DirFS(pathname.String()), ".")
|
||||
}
|
||||
537
internal/pkg/dir_test.go
Normal file
537
internal/pkg/dir_test.go
Normal file
@@ -0,0 +1,537 @@
|
||||
package pkg_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/fs"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
func TestFlatten(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
fsys fs.FS
|
||||
entries []pkg.FlatEntry
|
||||
sum pkg.Checksum
|
||||
err error
|
||||
}{
|
||||
{"bad type", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
"invalid": {Mode: fs.ModeCharDevice | 0400},
|
||||
}, nil, pkg.Checksum{}, pkg.InvalidFileModeError(
|
||||
fs.ModeCharDevice | 0400,
|
||||
)},
|
||||
|
||||
{"empty", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C"), nil},
|
||||
|
||||
{"sample cache file", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: 0400, Data: []byte{0}},
|
||||
"checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq": {Mode: 0400, Data: []byte{0, 0, 0, 0, 0xad, 0xb, 0, 4, 0xfe, 0xfe, 0, 0, 0xfe, 0xca, 0, 0}},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||
"identifier/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||
"identifier/cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||
"identifier/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: 0400, Path: "checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq", Data: []byte{0, 0, 0, 0, 0xad, 0xb, 0, 4, 0xfe, 0xfe, 0, 0, 0xfe, 0xca, 0, 0}},
|
||||
{Mode: 0400, Path: "checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte{0}},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("St9rlE-mGZ5gXwiv_hzQ_B8bZP-UUvSNmf4nHUZzCMOumb6hKnheZSe0dmnuc4Q2"), nil},
|
||||
|
||||
{"sample http get cure", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU": {Mode: 0400, Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/NqVORkT6L9HX6Za7kT2zcibY10qFqBaxEjPiYFrBQX-ZFr3yxCzJxbKOP0zVjeWb": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
|
||||
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: 0400, Path: "checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU", Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/NqVORkT6L9HX6Za7kT2zcibY10qFqBaxEjPiYFrBQX-ZFr3yxCzJxbKOP0zVjeWb", Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("bqtn69RkV5E7V7GhhgCFjcvbxmaqrO8DywamM4Tyjf10F6EJBHjXiIa_tFRtF4iN"), nil},
|
||||
|
||||
{"sample directory step simple", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"check": {Mode: 0400, Data: []byte{0, 0}},
|
||||
|
||||
"lib": {Mode: fs.ModeDir | 0700},
|
||||
"lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
|
||||
"lib/pkgconfig": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||
|
||||
{Mode: 0400, Path: "check", Data: []byte{0, 0}},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "lib"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "lib/pkgconfig"},
|
||||
}, pkg.MustDecode("qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b"), nil},
|
||||
|
||||
{"sample directory step garbage", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"lib": {Mode: fs.ModeDir | 0500},
|
||||
"lib/check": {Mode: 0400, Data: []byte{}},
|
||||
|
||||
"lib/pkgconfig": {Mode: fs.ModeDir | 0500},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0500, Path: "lib"},
|
||||
{Mode: 0400, Path: "lib/check", Data: []byte{}},
|
||||
|
||||
{Mode: fs.ModeDir | 0500, Path: "lib/pkgconfig"},
|
||||
}, pkg.MustDecode("CUx-3hSbTWPsbMfDhgalG4Ni_GmR9TnVX8F99tY_P5GtkYvczg9RrF5zO0jX9XYT"), nil},
|
||||
|
||||
{"sample directory", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/check": {Mode: 0400, Data: []byte{0, 0}},
|
||||
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/pkgconfig": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
|
||||
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
|
||||
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b"},
|
||||
{Mode: 0400, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/check", Data: []byte{0, 0}},
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/pkgconfig"},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("WVpvsVqVKg9Nsh744x57h51AuWUoUR2nnh8Md-EYBQpk6ziyTuUn6PLtF2e0Eu_d"), nil},
|
||||
|
||||
{"sample tar step unpack", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0500},
|
||||
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
|
||||
"work": {Mode: fs.ModeDir | 0500},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP"},
|
||||
{Mode: 0400, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check", Data: []byte{0, 0}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig"},
|
||||
|
||||
{Mode: fs.ModeDir | 0500, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
|
||||
{Mode: fs.ModeDir | 0500, Path: "work"},
|
||||
}, pkg.MustDecode("cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM"), nil},
|
||||
|
||||
{"sample tar", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||
"identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP"},
|
||||
{Mode: 0400, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check", Data: []byte{0, 0}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work"},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("sxbgyX-bPoezbha214n2lbQhiVfTUBkhZ0EX6zI7mmkMdrCdwuMwhMBJphLQsy94"), nil},
|
||||
|
||||
{"sample tar expand step unpack", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
}, pkg.MustDecode("CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN"), nil},
|
||||
|
||||
{"sample tar expand", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||
"identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("4I8wx_h7NSJTlG5lbuz-GGEXrOg0GYC3M_503LYEBhv5XGWXfNIdIY9Q3eVSYldX"), nil},
|
||||
|
||||
{"testtool", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"check": {Mode: 0400, Data: []byte{0}},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||
|
||||
{Mode: 0400, Path: "check", Data: []byte{0}},
|
||||
}, pkg.MustDecode("GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"), nil},
|
||||
|
||||
{"sample exec container", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/U2cbgVgEtjfRuvHfE1cQnZ3t8yoexULQyo_VLgvxAVJSsobMcNaFIsuDWtmt7kzK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/U2cbgVgEtjfRuvHfE1cQnZ3t8yoexULQyo_VLgvxAVJSsobMcNaFIsuDWtmt7kzK", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("UiV6kMz7KrTsc_yphiyQzFLqjRanHxUOwrBMtkKuWo4mOO6WgPFAcoUEeSp7eVIW"), nil},
|
||||
|
||||
{"testtool net", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"check": {Mode: 0400, Data: []byte("net")},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||
|
||||
{Mode: 0400, Path: "check", Data: []byte("net")},
|
||||
}, pkg.MustDecode("a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W"), nil},
|
||||
|
||||
{"sample exec net container", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
|
||||
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check": {Mode: 0400, Data: []byte("net")},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||
"identifier/QdsJhGgnk5N2xdUNGcndXQxFKifxf1V_2t9X8CQ-pDcg24x6mGJC_BiLfGbs6Qml": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
|
||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W"},
|
||||
{Mode: 0400, Path: "checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check", Data: []byte("net")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/QdsJhGgnk5N2xdUNGcndXQxFKifxf1V_2t9X8CQ-pDcg24x6mGJC_BiLfGbs6Qml", Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("ek4K-0d4iRSArkY2TCs3WK34DbiYeOmhE_4vsJTSu_6roY4ZF3YG6eKRooal-i1o"), nil},
|
||||
|
||||
{"sample exec container overlay root", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/5ey2wpmMpj483YYa7ZZQciYLA2cx3_l167JCqWW4Pd-5DVp81dj9EsBtVTwYptF6": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/5ey2wpmMpj483YYa7ZZQciYLA2cx3_l167JCqWW4Pd-5DVp81dj9EsBtVTwYptF6", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("VIqqpf0ip9jcyw63i6E8lCMGUcLivQBe4Bevt3WusNac-1MSy5bzB647qGUBzl-W"), nil},
|
||||
|
||||
{"sample exec container overlay work", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/acaDzHZv40dZaz4cGAXayqbRMgbEOuiuiUijZL8IgDQvyeCNMFE3onBMYfny-kXA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/acaDzHZv40dZaz4cGAXayqbRMgbEOuiuiUijZL8IgDQvyeCNMFE3onBMYfny-kXA", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("q8x2zQg4YZbKpPqKlEBj_uxXD9vOBaZ852qOuIsl9QdO73I_UMNpuUoPLtunxUYl"), nil},
|
||||
|
||||
{"sample exec container multiple layers", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
|
||||
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK": {Mode: fs.ModeDir | 0500},
|
||||
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check": {Mode: 0400, Data: []byte("layers")},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
"identifier/rXLKjjYfGSyoWmuvEJooHkvGJIZaC0IAWnKGvtPZkM15gBxAgW7mIXcxRVNOXAr4": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
|
||||
"identifier/tfjrsVuBuFgzWgwz-yPppFtylYuC1VFWnKhyBiHbWTGkyz8lt7Ee9QXWaIHPXs4x": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
|
||||
{Mode: fs.ModeDir | 0500, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK"},
|
||||
{Mode: 0400, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check", Data: []byte("layers")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/rXLKjjYfGSyoWmuvEJooHkvGJIZaC0IAWnKGvtPZkM15gBxAgW7mIXcxRVNOXAr4", Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/tfjrsVuBuFgzWgwz-yPppFtylYuC1VFWnKhyBiHbWTGkyz8lt7Ee9QXWaIHPXs4x", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("SITnQ6PTV12PAQQjIuLUxkvsXQiC9Gq_HJQlcb4BPL5YnRHnx8lsW7PRM9YMLBsx"), nil},
|
||||
|
||||
{"sample file short", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: 0400, Data: []byte{0}},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/lIx_W4M7tVOcQ8jh08EJOfXf4brRmkEEjvUa7c17vVUzlmtUxlhhrgqmc9aZhjbn": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, []pkg.FlatEntry{
|
||||
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||
{Mode: 0400, Path: "checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte{0}},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/lIx_W4M7tVOcQ8jh08EJOfXf4brRmkEEjvUa7c17vVUzlmtUxlhhrgqmc9aZhjbn", Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("hnrfmJtivNKcgtETsKnU9gP_OwPgpNY3DSUJnmxnmeOODSO-YBvEBiTgieY4AAd7"), nil},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("roundtrip", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var buf bytes.Buffer
|
||||
if _, err := pkg.Flatten(
|
||||
tc.fsys,
|
||||
".",
|
||||
&buf,
|
||||
); !reflect.DeepEqual(err, tc.err) {
|
||||
t.Fatalf("Flatten: error = %v, want %v", err, tc.err)
|
||||
} else if tc.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
s := pkg.NewDirScanner(bytes.NewReader(buf.Bytes()), true)
|
||||
var got []pkg.FlatEntry
|
||||
for s.Scan() {
|
||||
got = append(got, *s.Entry())
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
t.Fatalf("Err: error = %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(got, tc.entries) {
|
||||
t.Fatalf("Scan: %#v, want %#v", got, tc.entries)
|
||||
}
|
||||
})
|
||||
|
||||
if tc.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
t.Run("hash", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if got, err := pkg.HashFS(tc.fsys, "."); err != nil {
|
||||
t.Fatalf("HashFS: error = %v", err)
|
||||
} else if got != tc.sum {
|
||||
t.Fatalf("HashFS: %v", &pkg.ChecksumMismatchError{
|
||||
Got: got,
|
||||
Want: tc.sum,
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
378
internal/pkg/exec.go
Normal file
378
internal/pkg/exec.go
Normal file
@@ -0,0 +1,378 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"hakurei.app/container"
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/container/fhs"
|
||||
"hakurei.app/container/std"
|
||||
"hakurei.app/message"
|
||||
)
|
||||
|
||||
// AbsWork is the container pathname [CureContext.GetWorkDir] is mounted on.
|
||||
var AbsWork = fhs.AbsRoot.Append("work/")
|
||||
|
||||
// ExecPath is a slice of [Artifact] and the [check.Absolute] pathname to make
|
||||
// it available at under in the container.
|
||||
type ExecPath struct {
|
||||
// Pathname in the container mount namespace.
|
||||
P *check.Absolute
|
||||
// Artifacts to mount on the pathname, must contain at least one [Artifact].
|
||||
// If there are multiple entries or W is true, P is set up as an overlay
|
||||
// mount, and entries of A must not implement [File].
|
||||
A []Artifact
|
||||
// Whether to make the mount point writable via the temp directory.
|
||||
W bool
|
||||
}
|
||||
|
||||
// Path returns a populated [ExecPath].
|
||||
func Path(pathname *check.Absolute, writable bool, a ...Artifact) ExecPath {
|
||||
return ExecPath{pathname, a, writable}
|
||||
}
|
||||
|
||||
// MustPath is like [Path], but takes a string pathname via [check.MustAbs].
|
||||
func MustPath(pathname string, writable bool, a ...Artifact) ExecPath {
|
||||
return ExecPath{check.MustAbs(pathname), a, writable}
|
||||
}
|
||||
|
||||
const (
|
||||
// ExecTimeoutDefault replaces out of range [NewExec] timeout values.
|
||||
ExecTimeoutDefault = 15 * time.Minute
|
||||
// ExecTimeoutMax is the arbitrary upper bound of [NewExec] timeout.
|
||||
ExecTimeoutMax = 48 * time.Hour
|
||||
)
|
||||
|
||||
// An execArtifact is an [Artifact] that produces output by running a program
|
||||
// part of another [Artifact] in a [container] to produce its output.
|
||||
//
|
||||
// Methods of execArtifact does not modify any struct field or underlying arrays
|
||||
// referred to by slices.
|
||||
type execArtifact struct {
|
||||
// Caller-supplied user-facing reporting name, guaranteed to be nonzero
|
||||
// during initialisation.
|
||||
name string
|
||||
// Caller-supplied inner mount points.
|
||||
paths []ExecPath
|
||||
|
||||
// Passed through to [container.Params].
|
||||
dir *check.Absolute
|
||||
// Passed through to [container.Params].
|
||||
env []string
|
||||
// Passed through to [container.Params].
|
||||
path *check.Absolute
|
||||
// Passed through to [container.Params].
|
||||
args []string
|
||||
|
||||
// Duration the initial process is allowed to run. The zero value is
|
||||
// equivalent to execTimeoutDefault. This value is never encoded in Params
|
||||
// because it cannot affect outcome.
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
var _ fmt.Stringer = new(execArtifact)
|
||||
|
||||
// execNetArtifact is like execArtifact but implements [KnownChecksum] and has
|
||||
// its resulting container keep the host net namespace.
|
||||
type execNetArtifact struct {
|
||||
checksum Checksum
|
||||
|
||||
execArtifact
|
||||
}
|
||||
|
||||
var _ KnownChecksum = new(execNetArtifact)
|
||||
|
||||
// Checksum returns the caller-supplied checksum.
|
||||
func (a *execNetArtifact) Checksum() Checksum { return a.checksum }
|
||||
|
||||
// Kind returns the hardcoded [Kind] constant.
|
||||
func (a *execNetArtifact) Kind() Kind { return KindExecNet }
|
||||
|
||||
// Params is [Checksum] concatenated with [KindExec] params.
|
||||
func (a *execNetArtifact) Params() []byte {
|
||||
return slices.Concat(a.checksum[:], a.execArtifact.Params())
|
||||
}
|
||||
|
||||
// Cure cures the [Artifact] in the container described by the caller. The
|
||||
// container retains host networking.
|
||||
func (a *execNetArtifact) Cure(f *FContext) error {
|
||||
return a.cure(f, true)
|
||||
}
|
||||
|
||||
// NewExec returns a new [Artifact] that executes the program path in a
|
||||
// container with specified paths bind mounted read-only in order. A private
|
||||
// instance of /proc and /dev is made available to the container.
|
||||
//
|
||||
// The working and temporary directories are both created and mounted writable
|
||||
// on [AbsWork] and [fhs.AbsTmp] respectively. If one or more paths target
|
||||
// [AbsWork], the final entry is set up as a writable overlay mount on /work for
|
||||
// which the upperdir is the host side work directory. In this configuration,
|
||||
// the W field is ignored, and the program must avoid causing whiteout files to
|
||||
// be created. Cure fails if upperdir ends up with entries other than directory,
|
||||
// regular or symlink.
|
||||
//
|
||||
// If checksum is non-nil, the resulting [Artifact] implements [KnownChecksum]
|
||||
// and its container runs in the host net namespace.
|
||||
//
|
||||
// The container is allowed to run for the specified duration before the initial
|
||||
// process and all processes originating from it is terminated. A zero or
|
||||
// negative timeout value is equivalent tp [ExecTimeoutDefault], a timeout value
|
||||
// greater than [ExecTimeoutMax] is equivalent to [ExecTimeoutMax].
|
||||
//
|
||||
// The user-facing name is not accessible from the container and does not
|
||||
// affect curing outcome. Because of this, it is omitted from parameter data
|
||||
// for computing identifier.
|
||||
func NewExec(
|
||||
name string,
|
||||
checksum *Checksum,
|
||||
timeout time.Duration,
|
||||
|
||||
dir *check.Absolute,
|
||||
env []string,
|
||||
pathname *check.Absolute,
|
||||
args []string,
|
||||
|
||||
paths ...ExecPath,
|
||||
) Artifact {
|
||||
if name == "" {
|
||||
name = "exec-" + path.Base(pathname.String())
|
||||
}
|
||||
if timeout <= 0 {
|
||||
timeout = ExecTimeoutDefault
|
||||
}
|
||||
if timeout > ExecTimeoutMax {
|
||||
timeout = ExecTimeoutMax
|
||||
}
|
||||
a := execArtifact{name, paths, dir, env, pathname, args, timeout}
|
||||
if checksum == nil {
|
||||
return &a
|
||||
}
|
||||
return &execNetArtifact{*checksum, a}
|
||||
}
|
||||
|
||||
// Kind returns the hardcoded [Kind] constant.
|
||||
func (a *execArtifact) Kind() Kind { return KindExec }
|
||||
|
||||
// Params returns paths, executable pathname and args concatenated together.
|
||||
func (a *execArtifact) Params() []byte {
|
||||
var buf bytes.Buffer
|
||||
for _, p := range a.paths {
|
||||
if p.W {
|
||||
buf.WriteByte(1)
|
||||
} else {
|
||||
buf.WriteByte(0)
|
||||
}
|
||||
if p.P != nil {
|
||||
buf.WriteString(p.P.String())
|
||||
} else {
|
||||
buf.WriteString("invalid P\x00")
|
||||
}
|
||||
buf.WriteByte(0)
|
||||
for _, d := range p.A {
|
||||
id := Ident(d)
|
||||
buf.Write(id[:])
|
||||
}
|
||||
buf.WriteByte(0)
|
||||
}
|
||||
buf.WriteByte(0)
|
||||
buf.WriteString(a.dir.String())
|
||||
buf.WriteByte(0)
|
||||
for _, e := range a.env {
|
||||
buf.WriteString(e)
|
||||
}
|
||||
buf.WriteByte(0)
|
||||
buf.WriteString(a.path.String())
|
||||
buf.WriteByte(0)
|
||||
for _, arg := range a.args {
|
||||
buf.WriteString(arg)
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// Dependencies returns a slice of all artifacts collected from caller-supplied
|
||||
// [ExecPath].
|
||||
func (a *execArtifact) Dependencies() []Artifact {
|
||||
artifacts := make([][]Artifact, 0, len(a.paths))
|
||||
for _, p := range a.paths {
|
||||
artifacts = append(artifacts, p.A)
|
||||
}
|
||||
return slices.Concat(artifacts...)
|
||||
}
|
||||
|
||||
// String returns the caller-supplied reporting name.
|
||||
func (a *execArtifact) String() string { return a.name }
|
||||
|
||||
// Cure cures the [Artifact] in the container described by the caller.
|
||||
func (a *execArtifact) Cure(f *FContext) (err error) {
|
||||
return a.cure(f, false)
|
||||
}
|
||||
|
||||
const (
|
||||
// execWaitDelay is passed through to [container.Params].
|
||||
execWaitDelay = time.Nanosecond
|
||||
)
|
||||
|
||||
// scanVerbose prefixes program output for a verbose [message.Msg].
|
||||
func scanVerbose(
|
||||
msg message.Msg,
|
||||
done chan<- struct{},
|
||||
prefix string,
|
||||
r io.Reader,
|
||||
) {
|
||||
defer close(done)
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
msg.Verbose(prefix, s.Text())
|
||||
}
|
||||
if err := s.Err(); err != nil && !errors.Is(err, os.ErrClosed) {
|
||||
msg.Verbose("*"+prefix, err)
|
||||
}
|
||||
}
|
||||
|
||||
// cure is like Cure but allows optional host net namespace. This is used for
|
||||
// the [KnownChecksum] variant where networking is allowed.
|
||||
func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
|
||||
overlayWorkIndex := -1
|
||||
for i, p := range a.paths {
|
||||
if p.P == nil || len(p.A) == 0 {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
if p.P.Is(AbsWork) {
|
||||
overlayWorkIndex = i
|
||||
}
|
||||
}
|
||||
|
||||
var artifactCount int
|
||||
for _, p := range a.paths {
|
||||
artifactCount += len(p.A)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(f.Unwrap(), a.timeout)
|
||||
defer cancel()
|
||||
|
||||
z := container.New(ctx, f.GetMessage())
|
||||
z.WaitDelay = execWaitDelay
|
||||
z.SeccompPresets |= std.PresetStrict
|
||||
z.ParentPerm = 0700
|
||||
z.HostNet = hostNet
|
||||
z.Hostname = "cure"
|
||||
if z.HostNet {
|
||||
z.Hostname = "cure-net"
|
||||
}
|
||||
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
||||
if msg := f.GetMessage(); msg.IsVerbose() {
|
||||
var stdout, stderr io.ReadCloser
|
||||
if stdout, err = z.StdoutPipe(); err != nil {
|
||||
return
|
||||
}
|
||||
if stderr, err = z.StderrPipe(); err != nil {
|
||||
_ = stdout.Close()
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err != nil && !errors.As(err, new(*exec.ExitError)) {
|
||||
_ = stdout.Close()
|
||||
_ = stderr.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
stdoutDone, stderrDone := make(chan struct{}), make(chan struct{})
|
||||
go scanVerbose(msg, stdoutDone, "("+a.name+":1)", stdout)
|
||||
go scanVerbose(msg, stderrDone, "("+a.name+":2)", stderr)
|
||||
defer func() { <-stdoutDone; <-stderrDone }()
|
||||
}
|
||||
|
||||
z.Dir, z.Env, z.Path, z.Args = a.dir, a.env, a.path, a.args
|
||||
z.Grow(len(a.paths) + 4)
|
||||
|
||||
temp, work := f.GetTempDir(), f.GetWorkDir()
|
||||
for i, b := range a.paths {
|
||||
layers := make([]*check.Absolute, len(b.A))
|
||||
for j, d := range b.A {
|
||||
layers[j] = f.Pathname(d)
|
||||
}
|
||||
|
||||
if i == overlayWorkIndex {
|
||||
if err = os.MkdirAll(work.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
tempWork := temp.Append(".work")
|
||||
if err = os.MkdirAll(tempWork.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
z.Overlay(
|
||||
AbsWork,
|
||||
work,
|
||||
tempWork,
|
||||
layers...,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
if a.paths[i].W {
|
||||
tempUpper, tempWork := temp.Append(
|
||||
".upper", strconv.Itoa(i),
|
||||
), temp.Append(
|
||||
".work", strconv.Itoa(i),
|
||||
)
|
||||
if err = os.MkdirAll(tempUpper.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
if err = os.MkdirAll(tempWork.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
z.Overlay(b.P, tempUpper, tempWork, layers...)
|
||||
} else if len(layers) == 1 {
|
||||
z.Bind(layers[0], b.P, 0)
|
||||
} else {
|
||||
z.OverlayReadonly(b.P, layers...)
|
||||
}
|
||||
}
|
||||
if overlayWorkIndex < 0 {
|
||||
z.Bind(
|
||||
work,
|
||||
AbsWork,
|
||||
std.BindWritable|std.BindEnsure,
|
||||
)
|
||||
}
|
||||
z.Bind(
|
||||
f.GetTempDir(),
|
||||
fhs.AbsTmp,
|
||||
std.BindWritable|std.BindEnsure,
|
||||
)
|
||||
z.Proc(fhs.AbsProc).Dev(fhs.AbsDev, true)
|
||||
|
||||
if err = z.Start(); err != nil {
|
||||
return
|
||||
}
|
||||
if err = z.Serve(); err != nil {
|
||||
return
|
||||
}
|
||||
if err = z.Wait(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// do not allow empty directories to succeed
|
||||
for {
|
||||
err = syscall.Rmdir(work.String())
|
||||
if err != syscall.EINTR {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil && errors.Is(err, syscall.ENOTEMPTY) {
|
||||
err = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
297
internal/pkg/exec_test.go
Normal file
297
internal/pkg/exec_test.go
Normal file
@@ -0,0 +1,297 @@
|
||||
package pkg_test
|
||||
|
||||
//go:generate env CGO_ENABLED=0 go build -tags testtool -o testdata/testtool ./testdata
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/container/stub"
|
||||
"hakurei.app/hst"
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
// testtoolBin is the container test tool binary made available to the
|
||||
// execArtifact for testing its curing environment.
|
||||
//
|
||||
//go:embed testdata/testtool
|
||||
var testtoolBin []byte
|
||||
|
||||
func TestExec(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
wantChecksumOffline := pkg.MustDecode(
|
||||
"GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9",
|
||||
)
|
||||
|
||||
checkWithCache(t, []cacheTestCase{
|
||||
{"offline", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
c.SetStrict(true)
|
||||
testtool, testtoolDestroy := newTesttool()
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"container", pkg.NewExec(
|
||||
"", nil, 0,
|
||||
pkg.AbsWork,
|
||||
[]string{"HAKUREI_TEST=1"},
|
||||
check.MustAbs("/opt/bin/testtool"),
|
||||
[]string{"testtool"},
|
||||
|
||||
pkg.MustPath("/file", false, newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
pkg.ID{0xfe, 0},
|
||||
nil,
|
||||
nil, nil,
|
||||
)),
|
||||
pkg.MustPath("/.hakurei", false, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("empty directory"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||
},
|
||||
}),
|
||||
pkg.MustPath("/opt", false, testtool),
|
||||
), ignorePathname, wantChecksumOffline, nil},
|
||||
|
||||
{"error passthrough", pkg.NewExec(
|
||||
"", nil, 0,
|
||||
pkg.AbsWork,
|
||||
[]string{"HAKUREI_TEST=1"},
|
||||
check.MustAbs("/opt/bin/testtool"),
|
||||
[]string{"testtool"},
|
||||
|
||||
pkg.MustPath("/proc/nonexistent", false, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("doomed artifact"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return stub.UniqueError(0xcafe)
|
||||
},
|
||||
}),
|
||||
), nil, pkg.Checksum{}, errors.Join(stub.UniqueError(0xcafe))},
|
||||
|
||||
{"invalid paths", pkg.NewExec(
|
||||
"", nil, 0,
|
||||
pkg.AbsWork,
|
||||
[]string{"HAKUREI_TEST=1"},
|
||||
check.MustAbs("/opt/bin/testtool"),
|
||||
[]string{"testtool"},
|
||||
|
||||
pkg.ExecPath{},
|
||||
), nil, pkg.Checksum{}, os.ErrInvalid},
|
||||
})
|
||||
|
||||
// check init failure passthrough
|
||||
var exitError *exec.ExitError
|
||||
if _, _, err := c.Cure(pkg.NewExec(
|
||||
"", nil, 0,
|
||||
pkg.AbsWork,
|
||||
nil,
|
||||
check.MustAbs("/opt/bin/testtool"),
|
||||
[]string{"testtool"},
|
||||
)); !errors.As(err, &exitError) ||
|
||||
exitError.ExitCode() != hst.ExitFailure {
|
||||
t.Fatalf("Cure: error = %v, want init exit status 1", err)
|
||||
}
|
||||
|
||||
testtoolDestroy(t, base, c)
|
||||
}, pkg.MustDecode("UiV6kMz7KrTsc_yphiyQzFLqjRanHxUOwrBMtkKuWo4mOO6WgPFAcoUEeSp7eVIW")},
|
||||
|
||||
{"net", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
c.SetStrict(true)
|
||||
testtool, testtoolDestroy := newTesttool()
|
||||
|
||||
wantChecksum := pkg.MustDecode(
|
||||
"a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W",
|
||||
)
|
||||
cureMany(t, c, []cureStep{
|
||||
{"container", pkg.NewExec(
|
||||
"", &wantChecksum, 0,
|
||||
pkg.AbsWork,
|
||||
[]string{"HAKUREI_TEST=1"},
|
||||
check.MustAbs("/opt/bin/testtool"),
|
||||
[]string{"testtool", "net"},
|
||||
|
||||
pkg.MustPath("/file", false, newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
pkg.ID{0xfe, 0},
|
||||
nil,
|
||||
nil, nil,
|
||||
)),
|
||||
pkg.MustPath("/.hakurei", false, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("empty directory"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||
},
|
||||
}),
|
||||
pkg.MustPath("/opt", false, testtool),
|
||||
), ignorePathname, wantChecksum, nil},
|
||||
})
|
||||
|
||||
testtoolDestroy(t, base, c)
|
||||
}, pkg.MustDecode("ek4K-0d4iRSArkY2TCs3WK34DbiYeOmhE_4vsJTSu_6roY4ZF3YG6eKRooal-i1o")},
|
||||
|
||||
{"overlay root", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
c.SetStrict(true)
|
||||
testtool, testtoolDestroy := newTesttool()
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"container", pkg.NewExec(
|
||||
"", nil, 0,
|
||||
pkg.AbsWork,
|
||||
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||
check.MustAbs("/opt/bin/testtool"),
|
||||
[]string{"testtool"},
|
||||
|
||||
pkg.MustPath("/", true, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("empty directory"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||
},
|
||||
}),
|
||||
pkg.MustPath("/opt", false, testtool),
|
||||
), ignorePathname, wantChecksumOffline, nil},
|
||||
})
|
||||
|
||||
testtoolDestroy(t, base, c)
|
||||
}, pkg.MustDecode("VIqqpf0ip9jcyw63i6E8lCMGUcLivQBe4Bevt3WusNac-1MSy5bzB647qGUBzl-W")},
|
||||
|
||||
{"overlay work", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
c.SetStrict(true)
|
||||
testtool, testtoolDestroy := newTesttool()
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"container", pkg.NewExec(
|
||||
"", nil, 0,
|
||||
pkg.AbsWork,
|
||||
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||
check.MustAbs("/work/bin/testtool"),
|
||||
[]string{"testtool"},
|
||||
|
||||
pkg.MustPath("/", true, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("empty directory"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||
},
|
||||
}), pkg.MustPath("/work/", false, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("empty directory"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||
},
|
||||
}), pkg.Path(pkg.AbsWork, false /* ignored */, testtool),
|
||||
), ignorePathname, wantChecksumOffline, nil},
|
||||
})
|
||||
|
||||
testtoolDestroy(t, base, c)
|
||||
}, pkg.MustDecode("q8x2zQg4YZbKpPqKlEBj_uxXD9vOBaZ852qOuIsl9QdO73I_UMNpuUoPLtunxUYl")},
|
||||
|
||||
{"multiple layers", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
c.SetStrict(true)
|
||||
testtool, testtoolDestroy := newTesttool()
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"container", pkg.NewExec(
|
||||
"", nil, 0,
|
||||
pkg.AbsWork,
|
||||
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||
check.MustAbs("/opt/bin/testtool"),
|
||||
[]string{"testtool", "layers"},
|
||||
|
||||
pkg.MustPath("/", true, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("empty directory"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||
},
|
||||
}, stubArtifactF{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("test sample with dependencies"),
|
||||
|
||||
deps: slices.Repeat([]pkg.Artifact{newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
pkg.ID{0xfe, 0},
|
||||
nil,
|
||||
nil, nil,
|
||||
), stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
params: []byte("empty directory"),
|
||||
|
||||
// this is queued and might run instead of the other
|
||||
// one so do not leave it as nil
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||
},
|
||||
}}, 1<<5 /* concurrent cache hits */), cure: func(f *pkg.FContext) error {
|
||||
work := f.GetWorkDir()
|
||||
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(work.Append("check").String(), []byte("layers"), 0400)
|
||||
},
|
||||
}),
|
||||
pkg.MustPath("/opt", false, testtool),
|
||||
), ignorePathname, wantChecksumOffline, nil},
|
||||
})
|
||||
|
||||
testtoolDestroy(t, base, c)
|
||||
}, pkg.MustDecode("SITnQ6PTV12PAQQjIuLUxkvsXQiC9Gq_HJQlcb4BPL5YnRHnx8lsW7PRM9YMLBsx")},
|
||||
})
|
||||
}
|
||||
|
||||
// newTesttool returns an [Artifact] that cures into testtoolBin. The returned
|
||||
// function must be called at the end of the test but not deferred.
|
||||
func newTesttool() (
|
||||
testtool pkg.Artifact,
|
||||
testtoolDestroy func(t *testing.T, base *check.Absolute, c *pkg.Cache),
|
||||
) {
|
||||
// testtoolBin is built during go:generate and is not deterministic
|
||||
testtool = overrideIdent{pkg.ID{0xfe, 0xff}, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
cure: func(t *pkg.TContext) error {
|
||||
work := t.GetWorkDir()
|
||||
if err := os.MkdirAll(
|
||||
work.Append("bin").String(),
|
||||
0700,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ift, err := net.Interfaces(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
var f *os.File
|
||||
if f, err = os.Create(t.GetWorkDir().Append(
|
||||
"ift",
|
||||
).String()); err != nil {
|
||||
return err
|
||||
} else {
|
||||
err = gob.NewEncoder(f).Encode(ift)
|
||||
closeErr := f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return os.WriteFile(t.GetWorkDir().Append(
|
||||
"bin",
|
||||
"testtool",
|
||||
).String(), testtoolBin, 0500)
|
||||
},
|
||||
}}
|
||||
testtoolDestroy = newDestroyArtifactFunc(testtool)
|
||||
return
|
||||
}
|
||||
54
internal/pkg/file.go
Normal file
54
internal/pkg/file.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha512"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// A fileArtifact is an [Artifact] that cures into data known ahead of time.
|
||||
type fileArtifact []byte
|
||||
|
||||
var _ KnownChecksum = fileArtifact{}
|
||||
|
||||
// fileArtifactNamed embeds fileArtifact alongside a caller-supplied name.
|
||||
type fileArtifactNamed struct {
|
||||
fileArtifact
|
||||
// Caller-supplied user-facing reporting name.
|
||||
name string
|
||||
}
|
||||
|
||||
var _ fmt.Stringer = fileArtifactNamed{}
|
||||
|
||||
// String returns the caller-supplied reporting name.
|
||||
func (a fileArtifactNamed) String() string { return a.name }
|
||||
|
||||
// NewFile returns a [File] that cures into a caller-supplied byte slice.
|
||||
//
|
||||
// Caller must not modify data after NewFile returns.
|
||||
func NewFile(name string, data []byte) File {
|
||||
f := fileArtifact(data)
|
||||
if name != "" {
|
||||
return fileArtifactNamed{f, name}
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// Kind returns the hardcoded [Kind] constant.
|
||||
func (a fileArtifact) Kind() Kind { return KindFile }
|
||||
|
||||
// Params returns the result of Data.
|
||||
func (a fileArtifact) Params() []byte { return a }
|
||||
|
||||
// Dependencies returns a nil slice.
|
||||
func (a fileArtifact) Dependencies() []Artifact { return nil }
|
||||
|
||||
// Checksum computes and returns the checksum of caller-supplied data.
|
||||
func (a fileArtifact) Checksum() Checksum {
|
||||
h := sha512.New384()
|
||||
h.Write(a)
|
||||
return Checksum(h.Sum(nil))
|
||||
}
|
||||
|
||||
// Cure returns the caller-supplied data.
|
||||
func (a fileArtifact) Cure(context.Context) ([]byte, error) { return a, nil }
|
||||
29
internal/pkg/file_test.go
Normal file
29
internal/pkg/file_test.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package pkg_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
func TestFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
checkWithCache(t, []cacheTestCase{
|
||||
{"file", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
c.SetStrict(true)
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"short", pkg.NewFile("null", []byte{0}), base.Append(
|
||||
"identifier",
|
||||
"lIx_W4M7tVOcQ8jh08EJOfXf4brRmkEEjvUa7c17vVUzlmtUxlhhrgqmc9aZhjbn",
|
||||
), pkg.MustDecode(
|
||||
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
|
||||
), nil},
|
||||
})
|
||||
}, pkg.MustDecode(
|
||||
"hnrfmJtivNKcgtETsKnU9gP_OwPgpNY3DSUJnmxnmeOODSO-YBvEBiTgieY4AAd7",
|
||||
)},
|
||||
})
|
||||
}
|
||||
124
internal/pkg/net.go
Normal file
124
internal/pkg/net.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha512"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// An httpArtifact is an [Artifact] backed by a [http] url string. The method is
|
||||
// hardcoded as [http.MethodGet]. Request body is not allowed because it cannot
|
||||
// be deterministically represented by Params.
|
||||
type httpArtifact struct {
|
||||
// Caller-supplied url string.
|
||||
url string
|
||||
|
||||
// Caller-supplied checksum of the response body. This is validated during
|
||||
// curing and the first call to Data.
|
||||
checksum Checksum
|
||||
|
||||
// doFunc is the Do method of [http.Client] supplied by the caller.
|
||||
doFunc func(req *http.Request) (*http.Response, error)
|
||||
|
||||
// Response body read to EOF.
|
||||
data []byte
|
||||
|
||||
// Synchronises access to data.
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
var _ KnownChecksum = new(httpArtifact)
|
||||
var _ fmt.Stringer = new(httpArtifact)
|
||||
|
||||
// NewHTTPGet returns a new [File] backed by the supplied client. A GET request
|
||||
// is set up for url. If c is nil, [http.DefaultClient] is used instead.
|
||||
func NewHTTPGet(
|
||||
c *http.Client,
|
||||
url string,
|
||||
checksum Checksum,
|
||||
) File {
|
||||
if c == nil {
|
||||
c = http.DefaultClient
|
||||
}
|
||||
return &httpArtifact{url: url, checksum: checksum, doFunc: c.Do}
|
||||
}
|
||||
|
||||
// Kind returns the hardcoded [Kind] constant.
|
||||
func (a *httpArtifact) Kind() Kind { return KindHTTPGet }
|
||||
|
||||
// Params returns the backing url string. Context is not represented as it does
|
||||
// not affect [Cache.Cure] outcome.
|
||||
func (a *httpArtifact) Params() []byte { return []byte(a.url) }
|
||||
|
||||
// Dependencies returns a nil slice.
|
||||
func (a *httpArtifact) Dependencies() []Artifact { return nil }
|
||||
|
||||
// Checksum returns the caller-supplied checksum.
|
||||
func (a *httpArtifact) Checksum() Checksum { return a.checksum }
|
||||
|
||||
// String returns [path.Base] over the backing url.
|
||||
func (a *httpArtifact) String() string { return path.Base(a.url) }
|
||||
|
||||
// ResponseStatusError is returned for a response returned by an [http.Client]
|
||||
// with a status code other than [http.StatusOK].
|
||||
type ResponseStatusError int
|
||||
|
||||
func (e ResponseStatusError) Error() string {
|
||||
return "the requested URL returned non-OK status: " + http.StatusText(int(e))
|
||||
}
|
||||
|
||||
// do sends the caller-supplied request on the caller-supplied [http.Client]
|
||||
// and reads its response body to EOF and returns the resulting bytes.
|
||||
func (a *httpArtifact) do(ctx context.Context) (data []byte, err error) {
|
||||
var req *http.Request
|
||||
req, err = http.NewRequestWithContext(ctx, http.MethodGet, a.url, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
if resp, err = a.doFunc(req); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
_ = resp.Body.Close()
|
||||
return nil, ResponseStatusError(resp.StatusCode)
|
||||
}
|
||||
|
||||
if data, err = io.ReadAll(resp.Body); err != nil {
|
||||
_ = resp.Body.Close()
|
||||
return
|
||||
}
|
||||
|
||||
err = resp.Body.Close()
|
||||
return
|
||||
}
|
||||
|
||||
// Cure completes the http request and returns the resulting response body read
|
||||
// to EOF. Data does not interact with the filesystem.
|
||||
func (a *httpArtifact) Cure(ctx context.Context) (data []byte, err error) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
if a.data != nil {
|
||||
// validated by cache or a previous call to Data
|
||||
return a.data, nil
|
||||
}
|
||||
|
||||
if data, err = a.do(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
h := sha512.New384()
|
||||
h.Write(data)
|
||||
if got := (Checksum)(h.Sum(nil)); got != a.checksum {
|
||||
return nil, &ChecksumMismatchError{got, a.checksum}
|
||||
}
|
||||
a.data = data
|
||||
return
|
||||
}
|
||||
133
internal/pkg/net_test.go
Normal file
133
internal/pkg/net_test.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package pkg_test
|
||||
|
||||
import (
|
||||
"crypto/sha512"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
func TestHTTPGet(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const testdata = "\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69"
|
||||
|
||||
testdataChecksum := func() pkg.Checksum {
|
||||
h := sha512.New384()
|
||||
h.Write([]byte(testdata))
|
||||
return (pkg.Checksum)(h.Sum(nil))
|
||||
}()
|
||||
|
||||
var transport http.Transport
|
||||
client := http.Client{Transport: &transport}
|
||||
transport.RegisterProtocol("file", http.NewFileTransportFS(fstest.MapFS{
|
||||
"testdata": {Data: []byte(testdata), Mode: 0400},
|
||||
}))
|
||||
|
||||
checkWithCache(t, []cacheTestCase{
|
||||
{"direct", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
f := pkg.NewHTTPGet(
|
||||
&client,
|
||||
"file:///testdata",
|
||||
testdataChecksum,
|
||||
)
|
||||
wantIdent := pkg.KindHTTPGet.Ident([]byte("file:///testdata"))
|
||||
if got, err := f.Cure(t.Context()); err != nil {
|
||||
t.Fatalf("Cure: error = %v", err)
|
||||
} else if string(got) != testdata {
|
||||
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdent {
|
||||
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdent))
|
||||
}
|
||||
|
||||
// check direct validation
|
||||
f = pkg.NewHTTPGet(
|
||||
&client,
|
||||
"file:///testdata",
|
||||
pkg.Checksum{},
|
||||
)
|
||||
wantErrMismatch := &pkg.ChecksumMismatchError{
|
||||
Got: testdataChecksum,
|
||||
}
|
||||
if _, err := f.Cure(t.Context()); !reflect.DeepEqual(err, wantErrMismatch) {
|
||||
t.Fatalf("Cure: error = %#v, want %#v", err, wantErrMismatch)
|
||||
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdent {
|
||||
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdent))
|
||||
}
|
||||
|
||||
// check direct response error
|
||||
f = pkg.NewHTTPGet(
|
||||
&client,
|
||||
"file:///nonexistent",
|
||||
pkg.Checksum{},
|
||||
)
|
||||
wantIdentNonexistent := pkg.KindHTTPGet.Ident([]byte("file:///nonexistent"))
|
||||
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
|
||||
if _, err := f.Cure(t.Context()); !reflect.DeepEqual(err, wantErrNotFound) {
|
||||
t.Fatalf("Cure: error = %#v, want %#v", err, wantErrNotFound)
|
||||
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdentNonexistent {
|
||||
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdentNonexistent))
|
||||
}
|
||||
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
||||
|
||||
{"cure", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
f := pkg.NewHTTPGet(
|
||||
&client,
|
||||
"file:///testdata",
|
||||
testdataChecksum,
|
||||
)
|
||||
wantIdent := pkg.KindHTTPGet.Ident([]byte("file:///testdata"))
|
||||
wantPathname := base.Append(
|
||||
"identifier",
|
||||
pkg.Encode(wantIdent),
|
||||
)
|
||||
if pathname, checksum, err := c.Cure(f); err != nil {
|
||||
t.Fatalf("Cure: error = %v", err)
|
||||
} else if !pathname.Is(wantPathname) {
|
||||
t.Fatalf("Cure: %q, want %q", pathname, wantPathname)
|
||||
} else if checksum != testdataChecksum {
|
||||
t.Fatalf("Cure: %x, want %x", checksum, testdataChecksum)
|
||||
}
|
||||
|
||||
if got, err := f.Cure(t.Context()); err != nil {
|
||||
t.Fatalf("Cure: error = %v", err)
|
||||
} else if string(got) != testdata {
|
||||
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdent {
|
||||
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdent))
|
||||
}
|
||||
|
||||
// check load from cache
|
||||
f = pkg.NewHTTPGet(
|
||||
&client,
|
||||
"file:///testdata",
|
||||
testdataChecksum,
|
||||
)
|
||||
if got, err := f.Cure(t.Context()); err != nil {
|
||||
t.Fatalf("Cure: error = %v", err)
|
||||
} else if string(got) != testdata {
|
||||
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdent {
|
||||
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdent))
|
||||
}
|
||||
|
||||
// check error passthrough
|
||||
f = pkg.NewHTTPGet(
|
||||
&client,
|
||||
"file:///nonexistent",
|
||||
pkg.Checksum{},
|
||||
)
|
||||
wantIdentNonexistent := pkg.KindHTTPGet.Ident([]byte("file:///nonexistent"))
|
||||
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
|
||||
if _, _, err := c.Cure(f); !reflect.DeepEqual(err, wantErrNotFound) {
|
||||
t.Fatalf("Pathname: error = %#v, want %#v", err, wantErrNotFound)
|
||||
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdentNonexistent {
|
||||
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdentNonexistent))
|
||||
}
|
||||
}, pkg.MustDecode("bqtn69RkV5E7V7GhhgCFjcvbxmaqrO8DywamM4Tyjf10F6EJBHjXiIa_tFRtF4iN")},
|
||||
})
|
||||
}
|
||||
1315
internal/pkg/pkg.go
Normal file
1315
internal/pkg/pkg.go
Normal file
File diff suppressed because it is too large
Load Diff
977
internal/pkg/pkg_test.go
Normal file
977
internal/pkg/pkg_test.go
Normal file
@@ -0,0 +1,977 @@
|
||||
package pkg_test
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha512"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"syscall"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"hakurei.app/container"
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/container/stub"
|
||||
"hakurei.app/internal/pkg"
|
||||
"hakurei.app/message"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) { container.TryArgv0(nil); os.Exit(m.Run()) }
|
||||
|
||||
// overrideIdent overrides the ID method of [Artifact].
|
||||
type overrideIdent struct {
|
||||
id pkg.ID
|
||||
pkg.TrivialArtifact
|
||||
}
|
||||
|
||||
func (a overrideIdent) ID() pkg.ID { return a.id }
|
||||
|
||||
// overrideIdentFile overrides the ID method of [File].
|
||||
type overrideIdentFile struct {
|
||||
id pkg.ID
|
||||
pkg.File
|
||||
}
|
||||
|
||||
func (a overrideIdentFile) ID() pkg.ID { return a.id }
|
||||
|
||||
// A knownIdentArtifact implements [pkg.KnownIdent] and [Artifact]
|
||||
type knownIdentArtifact interface {
|
||||
pkg.KnownIdent
|
||||
pkg.TrivialArtifact
|
||||
}
|
||||
|
||||
// A knownIdentFile implements [pkg.KnownIdent] and [File]
|
||||
type knownIdentFile interface {
|
||||
pkg.KnownIdent
|
||||
pkg.File
|
||||
}
|
||||
|
||||
// overrideChecksum overrides the Checksum method of [Artifact].
|
||||
type overrideChecksum struct {
|
||||
checksum pkg.Checksum
|
||||
knownIdentArtifact
|
||||
}
|
||||
|
||||
func (a overrideChecksum) Checksum() pkg.Checksum { return a.checksum }
|
||||
|
||||
// overrideChecksumFile overrides the Checksum method of [File].
|
||||
type overrideChecksumFile struct {
|
||||
checksum pkg.Checksum
|
||||
knownIdentFile
|
||||
}
|
||||
|
||||
func (a overrideChecksumFile) Checksum() pkg.Checksum { return a.checksum }
|
||||
|
||||
// A stubArtifact implements [TrivialArtifact] with hardcoded behaviour.
|
||||
type stubArtifact struct {
|
||||
kind pkg.Kind
|
||||
params []byte
|
||||
deps []pkg.Artifact
|
||||
|
||||
cure func(t *pkg.TContext) error
|
||||
}
|
||||
|
||||
func (a stubArtifact) Kind() pkg.Kind { return a.kind }
|
||||
func (a stubArtifact) Params() []byte { return a.params }
|
||||
func (a stubArtifact) Dependencies() []pkg.Artifact { return a.deps }
|
||||
func (a stubArtifact) Cure(t *pkg.TContext) error { return a.cure(t) }
|
||||
|
||||
// A stubArtifactF implements [FloodArtifact] with hardcoded behaviour.
|
||||
type stubArtifactF struct {
|
||||
kind pkg.Kind
|
||||
params []byte
|
||||
deps []pkg.Artifact
|
||||
|
||||
cure func(f *pkg.FContext) error
|
||||
}
|
||||
|
||||
func (a stubArtifactF) Kind() pkg.Kind { return a.kind }
|
||||
func (a stubArtifactF) Params() []byte { return a.params }
|
||||
func (a stubArtifactF) Dependencies() []pkg.Artifact { return a.deps }
|
||||
func (a stubArtifactF) Cure(f *pkg.FContext) error { return a.cure(f) }
|
||||
|
||||
// A stubFile implements [File] with hardcoded behaviour.
|
||||
type stubFile struct {
|
||||
data []byte
|
||||
err error
|
||||
|
||||
stubArtifact
|
||||
}
|
||||
|
||||
func (a stubFile) Cure(context.Context) ([]byte, error) { return a.data, a.err }
|
||||
|
||||
// newStubFile returns an implementation of [pkg.File] with hardcoded behaviour.
|
||||
func newStubFile(
|
||||
kind pkg.Kind,
|
||||
id pkg.ID,
|
||||
sum *pkg.Checksum,
|
||||
data []byte,
|
||||
err error,
|
||||
) pkg.File {
|
||||
f := overrideIdentFile{id, stubFile{data, err, stubArtifact{
|
||||
kind,
|
||||
nil,
|
||||
nil,
|
||||
func(*pkg.TContext) error {
|
||||
panic("unreachable")
|
||||
},
|
||||
}}}
|
||||
if sum == nil {
|
||||
return f
|
||||
} else {
|
||||
return overrideChecksumFile{*sum, f}
|
||||
}
|
||||
}
|
||||
|
||||
// destroyArtifact removes all traces of an [Artifact] from the on-disk cache.
|
||||
// Do not use this in a test case without a very good reason to do so.
|
||||
func destroyArtifact(
|
||||
t *testing.T,
|
||||
base *check.Absolute,
|
||||
c *pkg.Cache,
|
||||
a pkg.Artifact,
|
||||
) {
|
||||
if pathname, checksum, err := c.Cure(a); err != nil {
|
||||
t.Fatalf("Cure: error = %v", err)
|
||||
} else if err = os.Remove(pathname.String()); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
p := base.Append(
|
||||
"checksum",
|
||||
pkg.Encode(checksum),
|
||||
)
|
||||
if err = filepath.WalkDir(p.String(), func(
|
||||
path string,
|
||||
d fs.DirEntry,
|
||||
err error,
|
||||
) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.IsDir() {
|
||||
return os.Chmod(path, 0700)
|
||||
}
|
||||
return nil
|
||||
}); err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = os.RemoveAll(p.String()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newDestroyArtifactFunc returns a function that calls destroyArtifact.
|
||||
func newDestroyArtifactFunc(a pkg.Artifact) func(
|
||||
t *testing.T,
|
||||
base *check.Absolute,
|
||||
c *pkg.Cache,
|
||||
) {
|
||||
return func(
|
||||
t *testing.T,
|
||||
base *check.Absolute,
|
||||
c *pkg.Cache,
|
||||
) {
|
||||
destroyArtifact(t, base, c, a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIdent(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
a pkg.Artifact
|
||||
want pkg.ID
|
||||
}{
|
||||
{"tar", stubArtifact{
|
||||
pkg.KindTar,
|
||||
[]byte{pkg.TarGzip, 0, 0, 0, 0, 0, 0, 0},
|
||||
[]pkg.Artifact{
|
||||
overrideIdent{pkg.ID{}, stubArtifact{}},
|
||||
},
|
||||
nil,
|
||||
}, pkg.MustDecode(
|
||||
"HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY",
|
||||
)},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if got := pkg.Ident(tc.a); got != tc.want {
|
||||
t.Errorf("Ident: %s, want %s",
|
||||
pkg.Encode(got),
|
||||
pkg.Encode(tc.want),
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// cacheTestCase is a test case passed to checkWithCache where a new instance
|
||||
// of [pkg.Cache] is prepared for the test case, and is validated and removed
|
||||
// on test completion.
|
||||
type cacheTestCase struct {
|
||||
name string
|
||||
early func(t *testing.T, base *check.Absolute)
|
||||
f func(t *testing.T, base *check.Absolute, c *pkg.Cache)
|
||||
want pkg.Checksum
|
||||
}
|
||||
|
||||
// checkWithCache runs a slice of cacheTestCase.
|
||||
func checkWithCache(t *testing.T, testCases []cacheTestCase) {
|
||||
t.Helper()
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Helper()
|
||||
t.Parallel()
|
||||
|
||||
base := check.MustAbs(t.TempDir())
|
||||
if err := os.Chmod(base.String(), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
if err := filepath.WalkDir(base.String(), func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return nil
|
||||
}
|
||||
if !d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
return os.Chmod(path, 0700)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
msg := message.New(log.New(os.Stderr, "cache: ", 0))
|
||||
msg.SwapVerbose(testing.Verbose())
|
||||
|
||||
var scrubFunc func() error // scrub after hashing
|
||||
if c, err := pkg.New(t.Context(), msg, 0, base); err != nil {
|
||||
t.Fatalf("New: error = %v", err)
|
||||
} else {
|
||||
t.Cleanup(c.Close)
|
||||
if tc.early != nil {
|
||||
tc.early(t, base)
|
||||
}
|
||||
tc.f(t, base, c)
|
||||
scrubFunc = c.Scrub
|
||||
}
|
||||
|
||||
var restoreTemp bool
|
||||
if _, err := os.Lstat(base.Append("temp").String()); err != nil {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
restoreTemp = true
|
||||
}
|
||||
|
||||
if checksum, err := pkg.HashDir(base); err != nil {
|
||||
t.Fatalf("HashDir: error = %v", err)
|
||||
} else if checksum != tc.want {
|
||||
t.Fatalf("HashDir: %v", &pkg.ChecksumMismatchError{
|
||||
Got: checksum,
|
||||
Want: tc.want,
|
||||
})
|
||||
}
|
||||
|
||||
if err := scrubFunc(); err != nil {
|
||||
t.Fatal("cache contains inconsistencies\n\n" + err.Error())
|
||||
}
|
||||
|
||||
if restoreTemp {
|
||||
if err := os.Mkdir(
|
||||
base.Append("temp").String(),
|
||||
0700,
|
||||
); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// validate again to make sure scrub did not condemn anything
|
||||
if checksum, err := pkg.HashDir(base); err != nil {
|
||||
t.Fatalf("HashDir: error = %v", err)
|
||||
} else if checksum != tc.want {
|
||||
t.Fatalf("(scrubbed) HashDir: %v", &pkg.ChecksumMismatchError{
|
||||
Got: checksum,
|
||||
Want: tc.want,
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// A cureStep contains an [Artifact] to be cured, and the expected outcome.
|
||||
type cureStep struct {
|
||||
name string
|
||||
|
||||
a pkg.Artifact
|
||||
|
||||
pathname *check.Absolute
|
||||
checksum pkg.Checksum
|
||||
err error
|
||||
}
|
||||
|
||||
// ignorePathname is passed to cureMany to skip the pathname check.
|
||||
var ignorePathname = check.MustAbs("/\x00")
|
||||
|
||||
// cureMany cures many artifacts against a [Cache] and checks their outcomes.
|
||||
func cureMany(t *testing.T, c *pkg.Cache, steps []cureStep) {
|
||||
t.Helper()
|
||||
|
||||
for _, step := range steps {
|
||||
t.Log("cure step:", step.name)
|
||||
if pathname, checksum, err := c.Cure(step.a); !reflect.DeepEqual(err, step.err) {
|
||||
t.Fatalf("Cure: error = %v, want %v", err, step.err)
|
||||
} else if step.pathname != ignorePathname && !pathname.Is(step.pathname) {
|
||||
t.Fatalf("Cure: pathname = %q, want %q", pathname, step.pathname)
|
||||
} else if checksum != step.checksum {
|
||||
t.Fatalf("Cure: checksum = %s, want %s", pkg.Encode(checksum), pkg.Encode(step.checksum))
|
||||
} else {
|
||||
v := any(err)
|
||||
if err == nil {
|
||||
v = pathname
|
||||
}
|
||||
t.Log(pkg.Encode(checksum)+":", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const testdata = "" +
|
||||
"\x00\x00\x00\x00" +
|
||||
"\xad\x0b\x00" +
|
||||
"\x04" +
|
||||
"\xfe\xfe\x00\x00" +
|
||||
"\xfe\xca\x00\x00"
|
||||
|
||||
testdataChecksum := func() pkg.Checksum {
|
||||
h := sha512.New384()
|
||||
h.Write([]byte(testdata))
|
||||
return (pkg.Checksum)(h.Sum(nil))
|
||||
}()
|
||||
|
||||
testCases := []cacheTestCase{
|
||||
{"file", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
c.SetStrict(true)
|
||||
|
||||
identifier := (pkg.ID)(bytes.Repeat([]byte{
|
||||
0x75, 0xe6, 0x9d, 0x6d, 0xe7, 0x9f,
|
||||
}, 8))
|
||||
wantPathname := base.Append(
|
||||
"identifier",
|
||||
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef",
|
||||
)
|
||||
identifier0 := (pkg.ID)(bytes.Repeat([]byte{
|
||||
0x71, 0xa7, 0xde, 0x6d, 0xa6, 0xde,
|
||||
}, 8))
|
||||
wantPathname0 := base.Append(
|
||||
"identifier",
|
||||
"cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe",
|
||||
)
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"initial file", newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
identifier,
|
||||
&testdataChecksum,
|
||||
[]byte(testdata), nil,
|
||||
), wantPathname, testdataChecksum, nil},
|
||||
|
||||
{"identical content", newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
identifier0,
|
||||
&testdataChecksum,
|
||||
[]byte(testdata), nil,
|
||||
), wantPathname0, testdataChecksum, nil},
|
||||
|
||||
{"existing entry", newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
identifier,
|
||||
&testdataChecksum,
|
||||
[]byte(testdata), nil,
|
||||
), wantPathname, testdataChecksum, nil},
|
||||
|
||||
{"checksum mismatch", newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
pkg.ID{0xff, 0},
|
||||
new(pkg.Checksum),
|
||||
[]byte(testdata), nil,
|
||||
), nil, pkg.Checksum{}, &pkg.ChecksumMismatchError{
|
||||
Got: testdataChecksum,
|
||||
}},
|
||||
|
||||
{"store without validation", newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
pkg.MustDecode("vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX"),
|
||||
nil,
|
||||
[]byte{0}, nil,
|
||||
), base.Append(
|
||||
"identifier",
|
||||
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
|
||||
), pkg.Checksum{
|
||||
0xbe, 0xc0, 0x21, 0xb4, 0xf3, 0x68,
|
||||
0xe3, 0x06, 0x91, 0x34, 0xe0, 0x12,
|
||||
0xc2, 0xb4, 0x30, 0x70, 0x83, 0xd3,
|
||||
0xa9, 0xbd, 0xd2, 0x06, 0xe2, 0x4e,
|
||||
0x5f, 0x0d, 0x86, 0xe1, 0x3d, 0x66,
|
||||
0x36, 0x65, 0x59, 0x33, 0xec, 0x2b,
|
||||
0x41, 0x34, 0x65, 0x96, 0x68, 0x17,
|
||||
0xa9, 0xc2, 0x08, 0xa1, 0x17, 0x17,
|
||||
}, nil},
|
||||
|
||||
{"incomplete implementation", struct{ pkg.Artifact }{stubArtifact{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("artifact overridden to be incomplete"),
|
||||
}}, nil, pkg.Checksum{}, pkg.InvalidArtifactError(pkg.MustDecode(
|
||||
"da4kLKa94g1wN2M0qcKflqgf2-Y2UL36iehhczqsIIW8G0LGvM7S8jjtnBc0ftB0",
|
||||
))},
|
||||
|
||||
{"error passthrough", newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
pkg.ID{0xff, 1},
|
||||
nil,
|
||||
nil, stub.UniqueError(0xcafe),
|
||||
), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
||||
|
||||
{"error caching", newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
pkg.ID{0xff, 1},
|
||||
nil,
|
||||
nil, nil,
|
||||
), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
||||
|
||||
{"cache hit bad type", overrideChecksum{testdataChecksum, overrideIdent{pkg.ID{0xff, 2}, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
}}}, nil, pkg.Checksum{}, pkg.InvalidFileModeError(
|
||||
0400,
|
||||
)},
|
||||
})
|
||||
|
||||
if c0, err := pkg.New(
|
||||
t.Context(),
|
||||
message.New(nil),
|
||||
0, base,
|
||||
); err != nil {
|
||||
t.Fatalf("New: error = %v", err)
|
||||
} else {
|
||||
t.Cleanup(c.Close) // check doubled cancel
|
||||
cureMany(t, c0, []cureStep{
|
||||
{"cache hit ident", overrideIdent{
|
||||
id: identifier,
|
||||
}, wantPathname, testdataChecksum, nil},
|
||||
|
||||
{"cache miss checksum match", newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
testdataChecksum,
|
||||
nil,
|
||||
[]byte(testdata),
|
||||
nil,
|
||||
), base.Append(
|
||||
"identifier",
|
||||
pkg.Encode(testdataChecksum),
|
||||
), testdataChecksum, nil},
|
||||
})
|
||||
|
||||
// cure after close
|
||||
c.Close()
|
||||
if _, _, err = c.Cure(stubArtifactF{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("unreachable artifact cured after cancel"),
|
||||
deps: []pkg.Artifact{pkg.NewFile("", []byte("unreachable dependency"))},
|
||||
}); !reflect.DeepEqual(err, context.Canceled) {
|
||||
t.Fatalf("(closed) Cure: error = %v", err)
|
||||
}
|
||||
}
|
||||
}, pkg.MustDecode("St9rlE-mGZ5gXwiv_hzQ_B8bZP-UUvSNmf4nHUZzCMOumb6hKnheZSe0dmnuc4Q2")},
|
||||
|
||||
{"directory", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
id := pkg.KindTar.Ident(
|
||||
binary.LittleEndian.AppendUint64(nil, pkg.TarGzip),
|
||||
overrideIdent{testdataChecksum, stubArtifact{}},
|
||||
)
|
||||
makeSample := func(t *pkg.TContext) error {
|
||||
work := t.GetWorkDir()
|
||||
if err := os.Mkdir(work.String(), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.WriteFile(
|
||||
work.Append("check").String(),
|
||||
[]byte{0, 0},
|
||||
0400,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(work.Append(
|
||||
"lib",
|
||||
"pkgconfig",
|
||||
).String(), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Symlink(
|
||||
"/proc/nonexistent/libedac.so",
|
||||
work.Append(
|
||||
"lib",
|
||||
"libedac.so",
|
||||
).String(),
|
||||
)
|
||||
}
|
||||
wantChecksum := pkg.MustDecode(
|
||||
"qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b",
|
||||
)
|
||||
wantPathname := base.Append(
|
||||
"identifier",
|
||||
pkg.Encode(id),
|
||||
)
|
||||
|
||||
id0 := pkg.KindTar.Ident(
|
||||
binary.LittleEndian.AppendUint64(nil, pkg.TarGzip),
|
||||
overrideIdent{pkg.ID{}, stubArtifact{}},
|
||||
)
|
||||
wantPathname0 := base.Append(
|
||||
"identifier",
|
||||
pkg.Encode(id0),
|
||||
)
|
||||
|
||||
makeGarbage := func(work *check.Absolute, wantErr error) error {
|
||||
if err := os.Mkdir(work.String(), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mode := fs.FileMode(0)
|
||||
if wantErr == nil {
|
||||
mode = 0500
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(work.Append(
|
||||
"lib",
|
||||
"pkgconfig",
|
||||
).String(), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.WriteFile(work.Append(
|
||||
"lib",
|
||||
"check",
|
||||
).String(), nil, 0400&mode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Chmod(work.Append(
|
||||
"lib",
|
||||
"pkgconfig",
|
||||
).String(), 0500&mode); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Chmod(work.Append(
|
||||
"lib",
|
||||
).String(), 0500&mode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return wantErr
|
||||
}
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"initial directory", overrideChecksum{wantChecksum, overrideIdent{id, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
cure: makeSample,
|
||||
}}}, wantPathname, wantChecksum, nil},
|
||||
|
||||
{"identical identifier", overrideChecksum{wantChecksum, overrideIdent{id, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
}}}, wantPathname, wantChecksum, nil},
|
||||
|
||||
{"identical checksum", overrideIdent{id0, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
cure: makeSample,
|
||||
}}, wantPathname0, wantChecksum, nil},
|
||||
|
||||
{"cure fault", overrideIdent{pkg.ID{0xff, 0}, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return makeGarbage(t.GetWorkDir(), stub.UniqueError(0xcafe))
|
||||
},
|
||||
}}, nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
||||
|
||||
{"checksum mismatch", overrideChecksum{pkg.Checksum{}, overrideIdent{pkg.ID{0xff, 1}, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return makeGarbage(t.GetWorkDir(), nil)
|
||||
},
|
||||
}}}, nil, pkg.Checksum{}, &pkg.ChecksumMismatchError{
|
||||
Got: pkg.MustDecode(
|
||||
"CUx-3hSbTWPsbMfDhgalG4Ni_GmR9TnVX8F99tY_P5GtkYvczg9RrF5zO0jX9XYT",
|
||||
),
|
||||
}},
|
||||
|
||||
{"cache hit bad type", newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
pkg.ID{0xff, 2},
|
||||
&wantChecksum,
|
||||
[]byte(testdata), nil,
|
||||
), nil, pkg.Checksum{}, pkg.InvalidFileModeError(
|
||||
fs.ModeDir | 0500,
|
||||
)},
|
||||
|
||||
{"openFile directory", overrideIdent{pkg.ID{0xff, 3}, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
cure: func(t *pkg.TContext) error {
|
||||
r, err := t.Open(overrideChecksumFile{checksum: wantChecksum})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = io.ReadAll(r)
|
||||
return err
|
||||
},
|
||||
}}, nil, pkg.Checksum{}, &os.PathError{
|
||||
Op: "read",
|
||||
Path: base.Append(
|
||||
"checksum",
|
||||
pkg.Encode(wantChecksum),
|
||||
).String(),
|
||||
Err: syscall.EISDIR,
|
||||
}},
|
||||
|
||||
{"no output", overrideIdent{pkg.ID{0xff, 4}, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return nil
|
||||
},
|
||||
}}, nil, pkg.Checksum{}, pkg.NoOutputError{}},
|
||||
|
||||
{"file output", overrideIdent{pkg.ID{0xff, 5}, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.WriteFile(t.GetWorkDir().String(), []byte{0}, 0400)
|
||||
},
|
||||
}}, nil, pkg.Checksum{}, errors.New("non-file artifact produced regular file")},
|
||||
|
||||
{"symlink output", overrideIdent{pkg.ID{0xff, 6}, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.Symlink(
|
||||
t.GetWorkDir().String(),
|
||||
t.GetWorkDir().String(),
|
||||
)
|
||||
},
|
||||
}}, nil, pkg.Checksum{}, pkg.InvalidFileModeError(
|
||||
fs.ModeSymlink | 0777,
|
||||
)},
|
||||
})
|
||||
}, pkg.MustDecode("WVpvsVqVKg9Nsh744x57h51AuWUoUR2nnh8Md-EYBQpk6ziyTuUn6PLtF2e0Eu_d")},
|
||||
|
||||
{"pending", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
c.SetStrict(true)
|
||||
|
||||
wantErr := stub.UniqueError(0xcafe)
|
||||
n, ready := make(chan struct{}), make(chan struct{})
|
||||
go func() {
|
||||
if _, _, err := c.Cure(overrideIdent{pkg.ID{0xff}, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
cure: func(t *pkg.TContext) error {
|
||||
close(ready)
|
||||
<-n
|
||||
return wantErr
|
||||
},
|
||||
}}); !reflect.DeepEqual(err, wantErr) {
|
||||
panic(fmt.Sprintf("Cure: error = %v, want %v", err, wantErr))
|
||||
}
|
||||
}()
|
||||
|
||||
<-ready
|
||||
wCureDone := make(chan struct{})
|
||||
go func() {
|
||||
if _, _, err := c.Cure(overrideIdent{pkg.ID{0xff}, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
}}); !reflect.DeepEqual(err, wantErr) {
|
||||
panic(fmt.Sprintf("Cure: error = %v, want %v", err, wantErr))
|
||||
}
|
||||
close(wCureDone)
|
||||
}()
|
||||
|
||||
// check cache activity while a cure is blocking
|
||||
cureMany(t, c, []cureStep{
|
||||
{"error passthrough", newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
pkg.ID{0xff, 1},
|
||||
nil,
|
||||
nil, stub.UniqueError(0xbad),
|
||||
), nil, pkg.Checksum{}, stub.UniqueError(0xbad)},
|
||||
|
||||
{"file output", overrideIdent{pkg.ID{0xff, 2}, stubArtifact{
|
||||
kind: pkg.KindTar,
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return os.WriteFile(
|
||||
t.GetWorkDir().String(),
|
||||
[]byte{0},
|
||||
0400,
|
||||
)
|
||||
},
|
||||
}}, nil, pkg.Checksum{}, errors.New(
|
||||
"non-file artifact produced regular file",
|
||||
)},
|
||||
})
|
||||
|
||||
wantErrScrub := &pkg.ScrubError{
|
||||
Errs: []error{errors.New("scrub began with pending artifacts")},
|
||||
}
|
||||
if err := c.Scrub(); !reflect.DeepEqual(err, wantErrScrub) {
|
||||
t.Fatalf("Scrub: error = %#v, want %#v", err, wantErrScrub)
|
||||
}
|
||||
|
||||
identPendingVal := reflect.ValueOf(c).Elem().FieldByName("identPending")
|
||||
identPending := reflect.NewAt(
|
||||
identPendingVal.Type(),
|
||||
unsafe.Pointer(identPendingVal.UnsafeAddr()),
|
||||
).Elem().Interface().(map[pkg.ID]<-chan struct{})
|
||||
notify := identPending[pkg.ID{0xff}]
|
||||
go close(n)
|
||||
<-notify
|
||||
<-wCureDone
|
||||
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
||||
|
||||
{"scrub", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
cureMany(t, c, []cureStep{
|
||||
{"bad measured file", newStubFile(
|
||||
pkg.KindHTTPGet,
|
||||
pkg.Checksum{0xfe, 0},
|
||||
&pkg.Checksum{0xff, 0},
|
||||
[]byte{0}, nil,
|
||||
), base.Append(
|
||||
"identifier",
|
||||
pkg.Encode(pkg.Checksum{0xfe, 0}),
|
||||
), pkg.Checksum{0xff, 0}, nil},
|
||||
})
|
||||
|
||||
for _, p := range [][]string{
|
||||
{"identifier", "invalid"},
|
||||
{"identifier", pkg.Encode(pkg.ID{0xfe, 0xff})},
|
||||
{"checksum", "invalid"},
|
||||
} {
|
||||
if err := os.WriteFile(
|
||||
base.Append(p...).String(),
|
||||
nil,
|
||||
0400,
|
||||
); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, p := range [][]string{
|
||||
{"../nonexistent", "checksum", pkg.Encode(pkg.Checksum{0xff, 0xff})},
|
||||
{"../nonexistent", "identifier", pkg.Encode(pkg.Checksum{0xfe, 0xfe})},
|
||||
} {
|
||||
if err := os.Symlink(
|
||||
p[0],
|
||||
base.Append(p[1:]...).String(),
|
||||
); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
wantErr := &pkg.ScrubError{
|
||||
ChecksumMismatches: []pkg.ChecksumMismatchError{
|
||||
{Got: pkg.MustDecode(
|
||||
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
|
||||
), Want: pkg.Checksum{0xff, 0}},
|
||||
},
|
||||
DanglingIdentifiers: []pkg.ID{
|
||||
{0xfe, 0},
|
||||
{0xfe, 0xfe},
|
||||
{0xfe, 0xff},
|
||||
},
|
||||
Errs: []error{
|
||||
pkg.InvalidFileModeError(fs.ModeSymlink),
|
||||
base64.CorruptInputError(4),
|
||||
base64.CorruptInputError(8),
|
||||
&os.PathError{
|
||||
Op: "readlink",
|
||||
Path: base.Append("identifier", pkg.Encode(pkg.ID{0xfe, 0xff})).String(),
|
||||
Err: syscall.EINVAL,
|
||||
},
|
||||
base64.CorruptInputError(4),
|
||||
},
|
||||
}
|
||||
if err := c.Scrub(); !reflect.DeepEqual(err, wantErr) {
|
||||
t.Fatalf("Scrub: error =\n%s\nwant\n%s", err, wantErr)
|
||||
}
|
||||
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
||||
}
|
||||
checkWithCache(t, testCases)
|
||||
}
|
||||
|
||||
func TestErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
err error
|
||||
want string
|
||||
}{
|
||||
{"InvalidLookupError", pkg.InvalidLookupError{
|
||||
0xff, 0xf0,
|
||||
}, "attempting to look up non-dependency artifact __AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"},
|
||||
|
||||
{"InvalidArtifactError", pkg.InvalidArtifactError{
|
||||
0xff, 0xfd,
|
||||
}, "artifact __0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA cannot be cured"},
|
||||
|
||||
{"ChecksumMismatchError", &pkg.ChecksumMismatchError{
|
||||
Want: (pkg.Checksum)(bytes.Repeat([]byte{
|
||||
0x75, 0xe6, 0x9d, 0x6d, 0xe7, 0x9f,
|
||||
}, 8)),
|
||||
}, "got AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" +
|
||||
" instead of deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"},
|
||||
|
||||
{"ResponseStatusError", pkg.ResponseStatusError(
|
||||
http.StatusNotAcceptable,
|
||||
), "the requested URL returned non-OK status: Not Acceptable"},
|
||||
|
||||
{"DisallowedTypeflagError", pkg.DisallowedTypeflagError(
|
||||
tar.TypeChar,
|
||||
), "disallowed typeflag '3'"},
|
||||
|
||||
{"InvalidFileModeError", pkg.InvalidFileModeError(
|
||||
fs.ModeSymlink | 0777,
|
||||
), "artifact did not produce a regular file or directory"},
|
||||
|
||||
{"NoOutputError", pkg.NoOutputError{
|
||||
// empty struct
|
||||
}, "artifact cured successfully but did not produce any output"},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if got := tc.err.Error(); got != tc.want {
|
||||
t.Errorf("Error: %q, want %q", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestScrubError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
err pkg.ScrubError
|
||||
want string
|
||||
unwrap []error
|
||||
}{
|
||||
{"full", pkg.ScrubError{
|
||||
ChecksumMismatches: []pkg.ChecksumMismatchError{
|
||||
{Want: pkg.MustDecode("CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||
},
|
||||
DanglingIdentifiers: []pkg.ID{
|
||||
(pkg.ID)(bytes.Repeat([]byte{0x75, 0xe6, 0x9d, 0x6d, 0xe7, 0x9f}, 8)),
|
||||
(pkg.ID)(bytes.Repeat([]byte{0x71, 0xa7, 0xde, 0x6d, 0xa6, 0xde}, 8)),
|
||||
},
|
||||
Errs: []error{
|
||||
stub.UniqueError(0xcafe),
|
||||
stub.UniqueError(0xbad),
|
||||
stub.UniqueError(0xff),
|
||||
},
|
||||
}, `checksum mismatches:
|
||||
got AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA instead of CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN
|
||||
|
||||
dangling identifiers:
|
||||
deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef
|
||||
cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe
|
||||
|
||||
errors during scrub:
|
||||
unique error 51966 injected by the test suite
|
||||
unique error 2989 injected by the test suite
|
||||
unique error 255 injected by the test suite
|
||||
`, []error{
|
||||
&pkg.ChecksumMismatchError{Want: pkg.MustDecode("CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||
stub.UniqueError(0xcafe),
|
||||
stub.UniqueError(0xbad),
|
||||
stub.UniqueError(0xff),
|
||||
}},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if got := tc.err.Error(); got != tc.want {
|
||||
t.Errorf("Error:\n\n%s\n\nwant\n\n%s", got, tc.want)
|
||||
}
|
||||
|
||||
if unwrap := tc.err.Unwrap(); !reflect.DeepEqual(unwrap, tc.unwrap) {
|
||||
t.Errorf("Unwrap: %#v, want %#v", unwrap, tc.unwrap)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("nonexistent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
wantErr := &os.PathError{
|
||||
Op: "mkdir",
|
||||
Path: container.Nonexistent,
|
||||
Err: syscall.ENOENT,
|
||||
}
|
||||
if _, err := pkg.New(
|
||||
t.Context(),
|
||||
message.New(nil),
|
||||
0, check.MustAbs(container.Nonexistent),
|
||||
); !reflect.DeepEqual(err, wantErr) {
|
||||
t.Errorf("New: error = %#v, want %#v", err, wantErr)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("permission", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tempDir := check.MustAbs(t.TempDir())
|
||||
if err := os.Chmod(tempDir.String(), 0); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
t.Cleanup(func() {
|
||||
if err = os.Chmod(tempDir.String(), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
wantErr := &os.PathError{
|
||||
Op: "mkdir",
|
||||
Path: tempDir.Append("cache").String(),
|
||||
Err: syscall.EACCES,
|
||||
}
|
||||
if _, err := pkg.New(
|
||||
t.Context(),
|
||||
message.New(nil),
|
||||
0, tempDir.Append("cache"),
|
||||
); !reflect.DeepEqual(err, wantErr) {
|
||||
t.Errorf("New: error = %#v, want %#v", err, wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
238
internal/pkg/tar.go
Normal file
238
internal/pkg/tar.go
Normal file
@@ -0,0 +1,238 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
)
|
||||
|
||||
const (
|
||||
// TarUncompressed denotes an uncompressed tarball.
|
||||
TarUncompressed = iota
|
||||
// TarGzip denotes a tarball compressed via [gzip].
|
||||
TarGzip
|
||||
// TarBzip2 denotes a tarball compressed via [bzip2].
|
||||
TarBzip2
|
||||
)
|
||||
|
||||
// A tarArtifact is an [Artifact] unpacking a tarball backed by a [File].
|
||||
type tarArtifact struct {
|
||||
// Caller-supplied backing tarball.
|
||||
f Artifact
|
||||
// Compression on top of the tarball.
|
||||
compression uint64
|
||||
}
|
||||
|
||||
// tarArtifactNamed embeds tarArtifact for a [fmt.Stringer] tarball.
|
||||
type tarArtifactNamed struct {
|
||||
tarArtifact
|
||||
// Copied from tarArtifact.f.
|
||||
name string
|
||||
}
|
||||
|
||||
var _ fmt.Stringer = new(tarArtifactNamed)
|
||||
|
||||
// String returns the name of the underlying [Artifact] suffixed with unpack.
|
||||
func (a *tarArtifactNamed) String() string { return a.name + "-unpack" }
|
||||
|
||||
// NewTar returns a new [Artifact] backed by the supplied [Artifact] and
|
||||
// compression method. The source [Artifact] must be compatible with
|
||||
// [TContext.Open].
|
||||
func NewTar(a Artifact, compression uint64) Artifact {
|
||||
ta := tarArtifact{a, compression}
|
||||
if s, ok := a.(fmt.Stringer); ok {
|
||||
if name := s.String(); name != "" {
|
||||
return &tarArtifactNamed{ta, name}
|
||||
}
|
||||
}
|
||||
return &ta
|
||||
}
|
||||
|
||||
// NewHTTPGetTar is abbreviation for NewHTTPGet passed to NewTar.
|
||||
func NewHTTPGetTar(
|
||||
hc *http.Client,
|
||||
url string,
|
||||
checksum Checksum,
|
||||
compression uint64,
|
||||
) Artifact {
|
||||
return NewTar(NewHTTPGet(hc, url, checksum), compression)
|
||||
}
|
||||
|
||||
// Kind returns the hardcoded [Kind] constant.
|
||||
func (a *tarArtifact) Kind() Kind { return KindTar }
|
||||
|
||||
// Params returns compression encoded in little endian.
|
||||
func (a *tarArtifact) Params() []byte {
|
||||
return binary.LittleEndian.AppendUint64(nil, a.compression)
|
||||
}
|
||||
|
||||
// Dependencies returns a slice containing the backing file.
|
||||
func (a *tarArtifact) Dependencies() []Artifact {
|
||||
return []Artifact{a.f}
|
||||
}
|
||||
|
||||
// A DisallowedTypeflagError describes a disallowed typeflag encountered while
|
||||
// unpacking a tarball.
|
||||
type DisallowedTypeflagError byte
|
||||
|
||||
func (e DisallowedTypeflagError) Error() string {
|
||||
return "disallowed typeflag '" + string(e) + "'"
|
||||
}
|
||||
|
||||
// Cure cures the [Artifact], producing a directory located at work.
|
||||
func (a *tarArtifact) Cure(t *TContext) (err error) {
|
||||
temp := t.GetTempDir()
|
||||
var tr io.ReadCloser
|
||||
if tr, err = t.Open(a.f); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer func(f io.ReadCloser) {
|
||||
closeErr := tr.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
|
||||
closeErr = f.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}(tr)
|
||||
tr = io.NopCloser(tr)
|
||||
|
||||
switch a.compression {
|
||||
case TarUncompressed:
|
||||
break
|
||||
|
||||
case TarGzip:
|
||||
if tr, err = gzip.NewReader(tr); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
|
||||
case TarBzip2:
|
||||
tr = io.NopCloser(bzip2.NewReader(tr))
|
||||
break
|
||||
|
||||
default:
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
type dirTargetPerm struct {
|
||||
path *check.Absolute
|
||||
mode fs.FileMode
|
||||
}
|
||||
var madeDirectories []dirTargetPerm
|
||||
|
||||
if err = os.MkdirAll(temp.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var header *tar.Header
|
||||
r := tar.NewReader(tr)
|
||||
for header, err = r.Next(); err == nil; header, err = r.Next() {
|
||||
typeflag := header.Typeflag
|
||||
if typeflag == 0 {
|
||||
if len(header.Name) > 0 && header.Name[len(header.Name)-1] == '/' {
|
||||
typeflag = tar.TypeDir
|
||||
} else {
|
||||
typeflag = tar.TypeReg
|
||||
}
|
||||
}
|
||||
|
||||
pathname := temp.Append(header.Name)
|
||||
if typeflag >= '0' && typeflag <= '9' && typeflag != tar.TypeDir {
|
||||
if err = os.MkdirAll(pathname.Dir().String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch typeflag {
|
||||
case tar.TypeReg:
|
||||
var f *os.File
|
||||
if f, err = os.OpenFile(
|
||||
pathname.String(),
|
||||
os.O_CREATE|os.O_EXCL|os.O_WRONLY,
|
||||
header.FileInfo().Mode()&0500,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
if _, err = io.Copy(f, r); err != nil {
|
||||
_ = f.Close()
|
||||
return
|
||||
} else if err = f.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
|
||||
case tar.TypeLink:
|
||||
if err = os.Link(header.Linkname, pathname.String()); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
|
||||
case tar.TypeSymlink:
|
||||
if err = os.Symlink(header.Linkname, pathname.String()); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
|
||||
case tar.TypeDir:
|
||||
madeDirectories = append(madeDirectories, dirTargetPerm{
|
||||
path: pathname,
|
||||
mode: header.FileInfo().Mode(),
|
||||
})
|
||||
if err = os.MkdirAll(pathname.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
|
||||
case tar.TypeXGlobalHeader:
|
||||
continue // ignore
|
||||
|
||||
default:
|
||||
return DisallowedTypeflagError(typeflag)
|
||||
}
|
||||
}
|
||||
if errors.Is(err, io.EOF) {
|
||||
err = nil
|
||||
}
|
||||
if err == nil {
|
||||
for _, e := range madeDirectories {
|
||||
if err = os.Chmod(e.path.String(), e.mode&0500); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return
|
||||
}
|
||||
|
||||
if err = os.Chmod(temp.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var entries []os.DirEntry
|
||||
if entries, err = os.ReadDir(temp.String()); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(entries) == 1 && entries[0].IsDir() {
|
||||
p := temp.Append(entries[0].Name())
|
||||
if err = os.Chmod(p.String(), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
err = os.Rename(p.String(), t.GetWorkDir().String())
|
||||
} else {
|
||||
err = os.Rename(temp.String(), t.GetWorkDir().String())
|
||||
}
|
||||
return
|
||||
}
|
||||
204
internal/pkg/tar_test.go
Normal file
204
internal/pkg/tar_test.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package pkg_test
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha512"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/container/stub"
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
func TestTar(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
checkWithCache(t, []cacheTestCase{
|
||||
{"http", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
checkTarHTTP(t, base, c, fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"checksum": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0700},
|
||||
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
}, pkg.MustDecode(
|
||||
"cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM",
|
||||
))
|
||||
}, pkg.MustDecode("sxbgyX-bPoezbha214n2lbQhiVfTUBkhZ0EX6zI7mmkMdrCdwuMwhMBJphLQsy94")},
|
||||
|
||||
{"http expand", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
checkTarHTTP(t, base, c, fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0700},
|
||||
|
||||
"lib": {Mode: fs.ModeDir | 0700},
|
||||
"lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||
}, pkg.MustDecode(
|
||||
"CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN",
|
||||
))
|
||||
}, pkg.MustDecode("4I8wx_h7NSJTlG5lbuz-GGEXrOg0GYC3M_503LYEBhv5XGWXfNIdIY9Q3eVSYldX")},
|
||||
})
|
||||
}
|
||||
|
||||
func checkTarHTTP(
|
||||
t *testing.T,
|
||||
base *check.Absolute,
|
||||
c *pkg.Cache,
|
||||
testdataFsys fs.FS,
|
||||
wantChecksum pkg.Checksum,
|
||||
) {
|
||||
var testdata string
|
||||
{
|
||||
var buf bytes.Buffer
|
||||
w := tar.NewWriter(&buf)
|
||||
if err := w.AddFS(testdataFsys); err != nil {
|
||||
t.Fatalf("AddFS: error = %v", err)
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatalf("Close: error = %v", err)
|
||||
}
|
||||
|
||||
var zbuf bytes.Buffer
|
||||
gw := gzip.NewWriter(&zbuf)
|
||||
if _, err := gw.Write(buf.Bytes()); err != nil {
|
||||
t.Fatalf("Write: error = %v", err)
|
||||
}
|
||||
if err := gw.Close(); err != nil {
|
||||
t.Fatalf("Close: error = %v", err)
|
||||
}
|
||||
testdata = zbuf.String()
|
||||
}
|
||||
|
||||
testdataChecksum := func() pkg.Checksum {
|
||||
h := sha512.New384()
|
||||
h.Write([]byte(testdata))
|
||||
return (pkg.Checksum)(h.Sum(nil))
|
||||
}()
|
||||
|
||||
var transport http.Transport
|
||||
client := http.Client{Transport: &transport}
|
||||
transport.RegisterProtocol("file", http.NewFileTransportFS(fstest.MapFS{
|
||||
"testdata": {Data: []byte(testdata), Mode: 0400},
|
||||
}))
|
||||
|
||||
wantIdent := func() pkg.ID {
|
||||
h := sha512.New384()
|
||||
h.Write([]byte{byte(pkg.KindTar), 0, 0, 0, 0, 0, 0, 0})
|
||||
h.Write([]byte{pkg.TarGzip, 0, 0, 0, 0, 0, 0, 0})
|
||||
h.Write([]byte{byte(pkg.KindHTTPGet), 0, 0, 0, 0, 0, 0, 0})
|
||||
httpIdent := pkg.KindHTTPGet.Ident([]byte("file:///testdata"))
|
||||
h.Write(httpIdent[:])
|
||||
return pkg.ID(h.Sum(nil))
|
||||
}()
|
||||
|
||||
a := pkg.NewHTTPGetTar(
|
||||
&client,
|
||||
"file:///testdata",
|
||||
testdataChecksum,
|
||||
pkg.TarGzip,
|
||||
)
|
||||
|
||||
if id := pkg.Ident(a); id != wantIdent {
|
||||
t.Fatalf("Ident: %s, want %s", pkg.Encode(id), pkg.Encode(wantIdent))
|
||||
}
|
||||
|
||||
tarDir := stubArtifact{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("directory containing a single regular file"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
work := t.GetWorkDir()
|
||||
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(
|
||||
work.Append("sample.tar.gz").String(),
|
||||
[]byte(testdata),
|
||||
0400,
|
||||
)
|
||||
},
|
||||
}
|
||||
tarDirMulti := stubArtifact{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("directory containing a multiple entries"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
work := t.GetWorkDir()
|
||||
if err := os.MkdirAll(work.Append(
|
||||
"garbage",
|
||||
).String(), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(
|
||||
work.Append("sample.tar.gz").String(),
|
||||
[]byte(testdata),
|
||||
0400,
|
||||
)
|
||||
},
|
||||
}
|
||||
tarDirType := stubArtifact{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("directory containing a symbolic link"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
work := t.GetWorkDir()
|
||||
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Symlink(
|
||||
work.String(),
|
||||
work.Append("sample.tar.gz").String(),
|
||||
)
|
||||
},
|
||||
}
|
||||
// destroy these to avoid including it in flatten test case
|
||||
defer newDestroyArtifactFunc(tarDir)(t, base, c)
|
||||
defer newDestroyArtifactFunc(tarDirMulti)(t, base, c)
|
||||
defer newDestroyArtifactFunc(tarDirType)(t, base, c)
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"file", a, base.Append(
|
||||
"identifier",
|
||||
pkg.Encode(wantIdent),
|
||||
), wantChecksum, nil},
|
||||
|
||||
{"directory", pkg.NewTar(
|
||||
tarDir,
|
||||
pkg.TarGzip,
|
||||
), ignorePathname, wantChecksum, nil},
|
||||
|
||||
{"multiple entries", pkg.NewTar(
|
||||
tarDirMulti,
|
||||
pkg.TarGzip,
|
||||
), nil, pkg.Checksum{}, errors.New(
|
||||
"input directory does not contain a single regular file",
|
||||
)},
|
||||
|
||||
{"bad type", pkg.NewTar(
|
||||
tarDirType,
|
||||
pkg.TarGzip,
|
||||
), nil, pkg.Checksum{}, errors.New(
|
||||
"input directory does not contain a single regular file",
|
||||
)},
|
||||
|
||||
{"error passthrough", pkg.NewTar(stubArtifact{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("doomed artifact"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
return stub.UniqueError(0xcafe)
|
||||
},
|
||||
}, pkg.TarGzip), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
||||
})
|
||||
}
|
||||
260
internal/pkg/testdata/main.go
vendored
Normal file
260
internal/pkg/testdata/main.go
vendored
Normal file
@@ -0,0 +1,260 @@
|
||||
//go:build testtool
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/container/fhs"
|
||||
"hakurei.app/container/vfs"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
log.SetPrefix("testtool: ")
|
||||
|
||||
var hostNet, layers bool
|
||||
if len(os.Args) == 2 && os.Args[0] == "testtool" {
|
||||
switch os.Args[1] {
|
||||
case "net":
|
||||
hostNet = true
|
||||
log.SetPrefix("testtool(net): ")
|
||||
break
|
||||
|
||||
case "layers":
|
||||
layers = true
|
||||
log.SetPrefix("testtool(layers): ")
|
||||
break
|
||||
|
||||
default:
|
||||
log.Fatalf("Args: %q", os.Args)
|
||||
return
|
||||
}
|
||||
} else if wantArgs := []string{"testtool"}; !slices.Equal(os.Args, wantArgs) {
|
||||
log.Fatalf("Args: %q, want %q", os.Args, wantArgs)
|
||||
}
|
||||
|
||||
var overlayRoot bool
|
||||
wantEnv := []string{"HAKUREI_TEST=1"}
|
||||
if len(os.Environ()) == 2 {
|
||||
overlayRoot = true
|
||||
if !layers {
|
||||
log.SetPrefix("testtool(overlay root): ")
|
||||
}
|
||||
wantEnv = []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}
|
||||
}
|
||||
if !slices.Equal(wantEnv, os.Environ()) {
|
||||
log.Fatalf("Environ: %q, want %q", os.Environ(), wantEnv)
|
||||
}
|
||||
|
||||
var overlayWork bool
|
||||
const (
|
||||
wantExec = "/opt/bin/testtool"
|
||||
wantExecWork = "/work/bin/testtool"
|
||||
)
|
||||
var iftPath string
|
||||
if got, err := os.Executable(); err != nil {
|
||||
log.Fatalf("Executable: error = %v", err)
|
||||
} else {
|
||||
iftPath = path.Join(path.Dir(path.Dir(got)), "ift")
|
||||
|
||||
if got != wantExec {
|
||||
switch got {
|
||||
case wantExecWork:
|
||||
overlayWork = true
|
||||
log.SetPrefix("testtool(overlay work): ")
|
||||
|
||||
default:
|
||||
log.Fatalf("Executable: %q, want %q", got, wantExec)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
wantHostname := "cure"
|
||||
if hostNet {
|
||||
wantHostname += "-net"
|
||||
}
|
||||
|
||||
if hostname, err := os.Hostname(); err != nil {
|
||||
log.Fatalf("Hostname: error = %v", err)
|
||||
} else if hostname != wantHostname {
|
||||
log.Fatalf("Hostname: %q, want %q", hostname, wantHostname)
|
||||
}
|
||||
|
||||
var m *vfs.MountInfo
|
||||
if f, err := os.Open(fhs.Proc + "self/mountinfo"); err != nil {
|
||||
log.Fatalf("Open: error = %v", err)
|
||||
} else {
|
||||
err = vfs.NewMountInfoDecoder(f).Decode(&m)
|
||||
closeErr := f.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Decode: error = %v", err)
|
||||
}
|
||||
if closeErr != nil {
|
||||
log.Fatalf("Close: error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if ift, err := net.Interfaces(); err != nil {
|
||||
log.Fatal(err)
|
||||
} else if !hostNet {
|
||||
if len(ift) != 1 || ift[0].Name != "lo" {
|
||||
log.Fatalln("got interfaces", strings.Join(slices.Collect(func(yield func(ifn string) bool) {
|
||||
for _, ifi := range ift {
|
||||
if !yield(ifi.Name) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}), ", "))
|
||||
}
|
||||
} else {
|
||||
var iftParent []net.Interface
|
||||
|
||||
var r *os.File
|
||||
if r, err = os.Open(iftPath); err != nil {
|
||||
log.Fatal(err)
|
||||
} else {
|
||||
err = gob.NewDecoder(r).Decode(&iftParent)
|
||||
closeErr := r.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if closeErr != nil {
|
||||
log.Fatal(closeErr)
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ift, iftParent) {
|
||||
log.Fatalf("Interfaces: %#v, want %#v", ift, iftParent)
|
||||
}
|
||||
}
|
||||
|
||||
const checksumEmptyDir = "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"
|
||||
ident := "U2cbgVgEtjfRuvHfE1cQnZ3t8yoexULQyo_VLgvxAVJSsobMcNaFIsuDWtmt7kzK"
|
||||
log.Println(m)
|
||||
next := func() { m = m.Next; log.Println(m) }
|
||||
|
||||
if overlayRoot {
|
||||
ident = "5ey2wpmMpj483YYa7ZZQciYLA2cx3_l167JCqWW4Pd-5DVp81dj9EsBtVTwYptF6"
|
||||
|
||||
if m.Root != "/" || m.Target != "/" ||
|
||||
m.Source != "overlay" || m.FsType != "overlay" {
|
||||
log.Fatal("unexpected root mount entry")
|
||||
}
|
||||
var lowerdir string
|
||||
for _, o := range strings.Split(m.FsOptstr, ",") {
|
||||
const lowerdirKey = "lowerdir="
|
||||
if strings.HasPrefix(o, lowerdirKey) {
|
||||
lowerdir = o[len(lowerdirKey):]
|
||||
}
|
||||
}
|
||||
if !layers {
|
||||
if path.Base(lowerdir) != checksumEmptyDir {
|
||||
log.Fatal("unexpected artifact checksum")
|
||||
}
|
||||
} else {
|
||||
ident = "tfjrsVuBuFgzWgwz-yPppFtylYuC1VFWnKhyBiHbWTGkyz8lt7Ee9QXWaIHPXs4x"
|
||||
|
||||
lowerdirsEscaped := strings.Split(lowerdir, ":")
|
||||
lowerdirs := lowerdirsEscaped[:0]
|
||||
// ignore the option separator since it does not appear in ident
|
||||
for i, e := range lowerdirsEscaped {
|
||||
if len(e) > 0 &&
|
||||
e[len(e)-1] == check.SpecialOverlayEscape[0] &&
|
||||
(len(e) == 1 || e[len(e)-2] != check.SpecialOverlayEscape[0]) {
|
||||
// ignore escaped pathname separator since it does not
|
||||
// appear in ident
|
||||
|
||||
e = e[:len(e)-1]
|
||||
if len(lowerdirsEscaped) != i {
|
||||
lowerdirsEscaped[i+1] = e + lowerdirsEscaped[i+1]
|
||||
continue
|
||||
}
|
||||
}
|
||||
lowerdirs = append(lowerdirs, e)
|
||||
}
|
||||
|
||||
if len(lowerdirs) != 2 ||
|
||||
path.Base(lowerdirs[0]) != "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU" ||
|
||||
path.Base(lowerdirs[1]) != "nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK" {
|
||||
log.Fatalf("unexpected lowerdirs %s", strings.Join(lowerdirs, ", "))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if hostNet {
|
||||
ident = "QdsJhGgnk5N2xdUNGcndXQxFKifxf1V_2t9X8CQ-pDcg24x6mGJC_BiLfGbs6Qml"
|
||||
}
|
||||
|
||||
if m.Root != "/sysroot" || m.Target != "/" {
|
||||
log.Fatal("unexpected root mount entry")
|
||||
}
|
||||
|
||||
next()
|
||||
if path.Base(m.Root) != "OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb" {
|
||||
log.Fatal("unexpected file artifact checksum")
|
||||
}
|
||||
|
||||
next()
|
||||
if path.Base(m.Root) != checksumEmptyDir {
|
||||
log.Fatal("unexpected artifact checksum")
|
||||
}
|
||||
}
|
||||
|
||||
next() // testtool artifact
|
||||
|
||||
next()
|
||||
if overlayWork {
|
||||
ident = "acaDzHZv40dZaz4cGAXayqbRMgbEOuiuiUijZL8IgDQvyeCNMFE3onBMYfny-kXA"
|
||||
if m.Root != "/" || m.Target != "/work" ||
|
||||
m.Source != "overlay" || m.FsType != "overlay" {
|
||||
log.Fatal("unexpected work mount entry")
|
||||
}
|
||||
} else {
|
||||
if path.Base(m.Root) != ident || m.Target != "/work" {
|
||||
log.Fatal("unexpected work mount entry")
|
||||
}
|
||||
}
|
||||
|
||||
next()
|
||||
if path.Base(m.Root) != ident || m.Target != "/tmp" {
|
||||
log.Fatal("unexpected temp mount entry")
|
||||
}
|
||||
|
||||
next()
|
||||
if m.Root != "/" || m.Target != "/proc" || m.Source != "proc" || m.FsType != "proc" {
|
||||
log.Fatal("unexpected proc mount entry")
|
||||
}
|
||||
|
||||
next()
|
||||
if m.Root != "/" || m.Target != "/dev" || m.Source != "devtmpfs" || m.FsType != "tmpfs" {
|
||||
log.Fatal("unexpected dev mount entry")
|
||||
}
|
||||
|
||||
for i := 0; i < 9; i++ { // private /dev entries
|
||||
next()
|
||||
}
|
||||
|
||||
if m.Next != nil {
|
||||
log.Println("unexpected extra mount entries")
|
||||
for m.Next != nil {
|
||||
next()
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
checkData := []byte{0}
|
||||
if hostNet {
|
||||
checkData = []byte("net")
|
||||
}
|
||||
if err := os.WriteFile("check", checkData, 0400); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"path"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"hakurei.app/container/stub"
|
||||
"hakurei.app/internal/acl"
|
||||
@@ -497,6 +498,12 @@ type stubPipeWireConn struct {
|
||||
curSendmsg int
|
||||
}
|
||||
|
||||
func (conn *stubPipeWireConn) MightBlock(timeout time.Duration) {
|
||||
if timeout != 5*time.Second {
|
||||
panic("unexpected timeout " + timeout.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Recvmsg marshals and copies a stubMessage prepared ahead of time.
|
||||
func (conn *stubPipeWireConn) Recvmsg(p, _ []byte, _ int) (n, _, recvflags int, err error) {
|
||||
defer func() { conn.curRecvmsg++ }()
|
||||
|
||||
@@ -36,7 +36,7 @@ libzstd.so.1 = /usr/lib/libzstd.so.1 (0x7ff71bfd2000)
|
||||
|
||||
{"path not absolute", `
|
||||
libzstd.so.1 => usr/lib/libzstd.so.1 (0x7ff71bfd2000)
|
||||
`, &check.AbsoluteError{Pathname: "usr/lib/libzstd.so.1"}},
|
||||
`, check.AbsoluteError("usr/lib/libzstd.so.1")},
|
||||
|
||||
{"unexpected segments", `
|
||||
meow libzstd.so.1 => /usr/lib/libzstd.so.1 (0x7ff71bfd2000)
|
||||
|
||||
71
nixos.nix
71
nixos.nix
@@ -24,11 +24,38 @@ let
|
||||
getsubuid = userid: appid: userid * 100000 + 10000 + appid;
|
||||
getsubname = userid: appid: "u${toString userid}_a${toString appid}";
|
||||
getsubhome = userid: appid: "${cfg.stateDir}/u${toString userid}/a${toString appid}";
|
||||
|
||||
mountpoints = {
|
||||
${cfg.sharefs.name} = mkIf (cfg.sharefs.source != null) {
|
||||
depends = [ cfg.sharefs.source ];
|
||||
device = "sharefs";
|
||||
fsType = "fuse.sharefs";
|
||||
noCheck = true;
|
||||
options = [
|
||||
"rw"
|
||||
"noexec"
|
||||
"nosuid"
|
||||
"nodev"
|
||||
"noatime"
|
||||
"allow_other"
|
||||
"mkdir"
|
||||
"source=${cfg.sharefs.source}"
|
||||
"setuid=${toString config.users.users.${cfg.sharefs.user}.uid}"
|
||||
"setgid=${toString config.users.groups.${cfg.sharefs.group}.gid}"
|
||||
];
|
||||
};
|
||||
};
|
||||
in
|
||||
|
||||
{
|
||||
imports = [ (import ./options.nix packages) ];
|
||||
|
||||
options = {
|
||||
# Forward declare a dummy option for VM filesystems since the real one won't exist
|
||||
# unless the VM module is actually imported.
|
||||
virtualisation.fileSystems = lib.mkOption { };
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
assertions = [
|
||||
(
|
||||
@@ -66,6 +93,10 @@ in
|
||||
) "" cfg.users;
|
||||
};
|
||||
|
||||
environment.systemPackages = optional (cfg.sharefs.source != null) cfg.sharefs.package;
|
||||
fileSystems = mountpoints;
|
||||
virtualisation.fileSystems = mountpoints;
|
||||
|
||||
home-manager =
|
||||
let
|
||||
privPackages = mapAttrs (_: userid: {
|
||||
@@ -322,25 +353,57 @@ in
|
||||
in
|
||||
{
|
||||
users = mkMerge (
|
||||
foldlAttrs (
|
||||
foldlAttrs
|
||||
(
|
||||
acc: _: fid:
|
||||
acc
|
||||
++ foldlAttrs (
|
||||
acc': _: app:
|
||||
acc' ++ [ { ${getsubname fid app.identity} = getuser fid app.identity; } ]
|
||||
) [ { ${getsubname fid 0} = getuser fid 0; } ] cfg.apps
|
||||
) [ ] cfg.users
|
||||
)
|
||||
(
|
||||
if (cfg.sharefs.source != null) then
|
||||
[
|
||||
{
|
||||
${cfg.sharefs.user} = {
|
||||
uid = lib.mkDefault 1023;
|
||||
inherit (cfg.sharefs) group;
|
||||
isSystemUser = true;
|
||||
home = cfg.sharefs.source;
|
||||
};
|
||||
|
||||
}
|
||||
]
|
||||
else
|
||||
[ ]
|
||||
)
|
||||
cfg.users
|
||||
);
|
||||
|
||||
groups = mkMerge (
|
||||
foldlAttrs (
|
||||
foldlAttrs
|
||||
(
|
||||
acc: _: fid:
|
||||
acc
|
||||
++ foldlAttrs (
|
||||
acc': _: app:
|
||||
acc' ++ [ { ${getsubname fid app.identity} = getgroup fid app.identity; } ]
|
||||
) [ { ${getsubname fid 0} = getgroup fid 0; } ] cfg.apps
|
||||
) [ ] cfg.users
|
||||
)
|
||||
(
|
||||
if (cfg.sharefs.source != null) then
|
||||
[
|
||||
{
|
||||
${cfg.sharefs.group} = {
|
||||
gid = lib.mkDefault 1023;
|
||||
};
|
||||
}
|
||||
]
|
||||
else
|
||||
[ ]
|
||||
)
|
||||
cfg.users
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
90
options.md
90
options.md
@@ -809,6 +809,96 @@ package
|
||||
|
||||
|
||||
|
||||
## environment\.hakurei\.sharefs\.package
|
||||
|
||||
|
||||
|
||||
The sharefs package to use\.
|
||||
|
||||
|
||||
|
||||
*Type:*
|
||||
package
|
||||
|
||||
|
||||
|
||||
*Default:*
|
||||
` <derivation sharefs> `
|
||||
|
||||
|
||||
|
||||
## environment\.hakurei\.sharefs\.group
|
||||
|
||||
|
||||
|
||||
Name of the group to run the sharefs daemon as\.
|
||||
|
||||
|
||||
|
||||
*Type:*
|
||||
string
|
||||
|
||||
|
||||
|
||||
*Default:*
|
||||
` "sharefs" `
|
||||
|
||||
|
||||
|
||||
## environment\.hakurei\.sharefs\.name
|
||||
|
||||
|
||||
|
||||
Host path to mount sharefs on\.
|
||||
|
||||
|
||||
|
||||
*Type:*
|
||||
string
|
||||
|
||||
|
||||
|
||||
*Default:*
|
||||
` "/sdcard" `
|
||||
|
||||
|
||||
|
||||
## environment\.hakurei\.sharefs\.source
|
||||
|
||||
|
||||
|
||||
Writable backing directory\. Setting this to null disables sharefs\.
|
||||
|
||||
|
||||
|
||||
*Type:*
|
||||
null or string
|
||||
|
||||
|
||||
|
||||
*Default:*
|
||||
` null `
|
||||
|
||||
|
||||
|
||||
## environment\.hakurei\.sharefs\.user
|
||||
|
||||
|
||||
|
||||
Name of the user to run the sharefs daemon as\.
|
||||
|
||||
|
||||
|
||||
*Type:*
|
||||
string
|
||||
|
||||
|
||||
|
||||
*Default:*
|
||||
` "sharefs" `
|
||||
|
||||
|
||||
|
||||
## environment\.hakurei\.shell
|
||||
|
||||
|
||||
|
||||
52
options.nix
52
options.nix
@@ -1,8 +1,15 @@
|
||||
packages:
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (lib) types mkOption mkEnableOption;
|
||||
|
||||
cfg = config.environment.hakurei;
|
||||
in
|
||||
|
||||
{
|
||||
@@ -40,6 +47,49 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
sharefs = {
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.linkFarm "sharefs" {
|
||||
"bin/sharefs" = "${cfg.package}/libexec/sharefs";
|
||||
"bin/mount.fuse.sharefs" = "${cfg.package}/libexec/sharefs";
|
||||
};
|
||||
description = "The sharefs package to use.";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "sharefs";
|
||||
description = ''
|
||||
Name of the user to run the sharefs daemon as.
|
||||
'';
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
default = "sharefs";
|
||||
description = ''
|
||||
Name of the group to run the sharefs daemon as.
|
||||
'';
|
||||
};
|
||||
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
default = "/sdcard";
|
||||
description = ''
|
||||
Host path to mount sharefs on.
|
||||
'';
|
||||
};
|
||||
|
||||
source = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
Writable backing directory. Setting this to null disables sharefs.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
apps = mkOption {
|
||||
type =
|
||||
let
|
||||
|
||||
@@ -13,6 +13,9 @@
|
||||
wayland-scanner,
|
||||
xorg,
|
||||
|
||||
# for sharefs
|
||||
fuse3,
|
||||
|
||||
# for hpkg
|
||||
zstd,
|
||||
gnutar,
|
||||
@@ -92,6 +95,7 @@ buildGoModule rec {
|
||||
buildInputs = [
|
||||
libffi
|
||||
libseccomp
|
||||
fuse3
|
||||
acl
|
||||
wayland
|
||||
]
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.hakurei = {
|
||||
environment.hakurei = rec {
|
||||
enable = true;
|
||||
stateDir = "/var/lib/hakurei";
|
||||
sharefs.source = "${stateDir}/sdcard";
|
||||
users.alice = 0;
|
||||
apps = {
|
||||
"cat.gensokyo.extern.foot.noEnablements" = {
|
||||
|
||||
Reference in New Issue
Block a user