forked from rosa/hakurei
Compare commits
41 Commits
pkgserver-
...
9fff868a09
| Author | SHA1 | Date | |
|---|---|---|---|
|
9fff868a09
|
|||
|
a8265d6be6
|
|||
|
5a5c67fa53
|
|||
|
58b6f6c07c
|
|||
|
f2fded0620
|
|||
|
8d759f654d
|
|||
|
82060ac154
|
|||
| c7e195fe64 | |||
| d5db9add98 | |||
| ab8abdc82b | |||
| 770fd46510 | |||
| 99f1c6aab4 | |||
| 9ee629d402 | |||
| f475dde8b9 | |||
| c43a0c41b6 | |||
| 55827f1a85 | |||
| 721bdddfa1 | |||
| fb18e599dd | |||
| ec9005c794 | |||
| c6d35b4003 | |||
| 6401533cc2 | |||
| 5d6c401beb | |||
| 0a2d6aec14 | |||
| 67b11335d6 | |||
| ef3bd1b60a | |||
| beae7c89db | |||
| ed26d1a1c2 | |||
| faa0006d47 | |||
| 796ddbc977 | |||
| 98ab020160 | |||
| 26a346036d | |||
| 4ac9c72132 | |||
| c39c07d440 | |||
| b3fa0fe271 | |||
| 92a90582bb | |||
| 2e5ac56bdf | |||
| 75133e0234 | |||
| c120d4de4f | |||
| d6af8edb4a | |||
| da25d609d5 | |||
| 95ceed0de0 |
39
.gitignore
vendored
39
.gitignore
vendored
@@ -1,16 +1,45 @@
|
|||||||
# produced by tools and text editors
|
# Binaries for programs and plugins
|
||||||
*.qcow2
|
*.exe
|
||||||
|
*.exe~
|
||||||
|
*.dll
|
||||||
|
*.so
|
||||||
|
*.dylib
|
||||||
|
*.pkg
|
||||||
|
/hakurei
|
||||||
|
|
||||||
|
# Test binary, built with `go test -c`
|
||||||
*.test
|
*.test
|
||||||
|
|
||||||
|
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||||
*.out
|
*.out
|
||||||
|
|
||||||
|
# Dependency directories (remove the comment below to include it)
|
||||||
|
# vendor/
|
||||||
|
|
||||||
|
# Go workspace file
|
||||||
|
go.work
|
||||||
|
go.work.sum
|
||||||
|
|
||||||
|
# env file
|
||||||
|
.env
|
||||||
.idea
|
.idea
|
||||||
.vscode
|
.vscode
|
||||||
|
|
||||||
# go generate
|
# go generate
|
||||||
/cmd/hakurei/LICENSE
|
/cmd/hakurei/LICENSE
|
||||||
|
/cmd/pkgserver/.sass-cache
|
||||||
/cmd/pkgserver/ui/static/*.js
|
/cmd/pkgserver/ui/static/*.js
|
||||||
/cmd/pkgserver/ui_test/static
|
/cmd/pkgserver/ui/static/*.css*
|
||||||
|
/cmd/pkgserver/ui/static/*.css.map
|
||||||
|
/cmd/pkgserver/ui_test/*.js
|
||||||
|
/cmd/pkgserver/ui_test/lib/*.js
|
||||||
|
/cmd/pkgserver/ui_test/lib/*.css*
|
||||||
|
/cmd/pkgserver/ui_test/lib/*.css.map
|
||||||
/internal/pkg/testdata/testtool
|
/internal/pkg/testdata/testtool
|
||||||
/internal/rosa/hakurei_current.tar.gz
|
/internal/rosa/hakurei_current.tar.gz
|
||||||
|
|
||||||
# cmd/dist default destination
|
# release
|
||||||
/dist
|
/dist/hakurei-*
|
||||||
|
|
||||||
|
# interactive nixos vm
|
||||||
|
nixos.qcow2
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://git.gensokyo.uk/rosa/hakurei">
|
<a href="https://git.gensokyo.uk/security/hakurei">
|
||||||
<picture>
|
<picture>
|
||||||
<img src="https://basement.gensokyo.uk/images/yukari1.png" width="200px" alt="Yukari">
|
<img src="https://basement.gensokyo.uk/images/yukari1.png" width="200px" alt="Yukari">
|
||||||
</picture>
|
</picture>
|
||||||
@@ -8,16 +8,16 @@
|
|||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://pkg.go.dev/hakurei.app"><img src="https://pkg.go.dev/badge/hakurei.app.svg" alt="Go Reference" /></a>
|
<a href="https://pkg.go.dev/hakurei.app"><img src="https://pkg.go.dev/badge/hakurei.app.svg" alt="Go Reference" /></a>
|
||||||
<a href="https://git.gensokyo.uk/rosa/hakurei/actions"><img src="https://git.gensokyo.uk/rosa/hakurei/actions/workflows/test.yml/badge.svg?branch=staging&style=flat-square" alt="Gitea Workflow Status" /></a>
|
<a href="https://git.gensokyo.uk/security/hakurei/actions"><img src="https://git.gensokyo.uk/security/hakurei/actions/workflows/test.yml/badge.svg?branch=staging&style=flat-square" alt="Gitea Workflow Status" /></a>
|
||||||
<br/>
|
<br/>
|
||||||
<a href="https://git.gensokyo.uk/rosa/hakurei/releases"><img src="https://img.shields.io/gitea/v/release/rosa/hakurei?gitea_url=https%3A%2F%2Fgit.gensokyo.uk&color=purple" alt="Release" /></a>
|
<a href="https://git.gensokyo.uk/security/hakurei/releases"><img src="https://img.shields.io/gitea/v/release/security/hakurei?gitea_url=https%3A%2F%2Fgit.gensokyo.uk&color=purple" alt="Release" /></a>
|
||||||
<a href="https://goreportcard.com/report/hakurei.app"><img src="https://goreportcard.com/badge/hakurei.app" alt="Go Report Card" /></a>
|
<a href="https://goreportcard.com/report/hakurei.app"><img src="https://goreportcard.com/badge/hakurei.app" alt="Go Report Card" /></a>
|
||||||
<a href="https://hakurei.app"><img src="https://img.shields.io/website?url=https%3A%2F%2Fhakurei.app" alt="Website" /></a>
|
<a href="https://hakurei.app"><img src="https://img.shields.io/website?url=https%3A%2F%2Fhakurei.app" alt="Website" /></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
Hakurei is a tool for running sandboxed desktop applications as dedicated
|
Hakurei is a tool for running sandboxed desktop applications as dedicated
|
||||||
subordinate users on the Linux kernel. It implements the application container
|
subordinate users on the Linux kernel. It implements the application container
|
||||||
of [planterette (WIP)](https://git.gensokyo.uk/rosa/planterette), a
|
of [planterette (WIP)](https://git.gensokyo.uk/security/planterette), a
|
||||||
self-contained Android-like package manager with modern security features.
|
self-contained Android-like package manager with modern security features.
|
||||||
|
|
||||||
Interaction with hakurei happens entirely through structures described by
|
Interaction with hakurei happens entirely through structures described by
|
||||||
|
|||||||
6
all.sh
6
all.sh
@@ -1,6 +0,0 @@
|
|||||||
#!/bin/sh -e
|
|
||||||
|
|
||||||
TOOLCHAIN_VERSION="$(go version)"
|
|
||||||
cd "$(dirname -- "$0")/"
|
|
||||||
echo "# Building cmd/dist using ${TOOLCHAIN_VERSION}."
|
|
||||||
go run -v --tags=dist ./cmd/dist
|
|
||||||
237
cmd/dist/main.go
vendored
237
cmd/dist/main.go
vendored
@@ -1,237 +0,0 @@
|
|||||||
//go:build dist
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"compress/gzip"
|
|
||||||
"context"
|
|
||||||
"crypto/sha512"
|
|
||||||
_ "embed"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/fs"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"os/signal"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// getenv looks up an environment variable, and returns fallback if it is unset.
|
|
||||||
func getenv(key, fallback string) string {
|
|
||||||
if v, ok := os.LookupEnv(key); ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
return fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
// mustRun runs a command with the current process's environment and panics
|
|
||||||
// on error or non-zero exit code.
|
|
||||||
func mustRun(ctx context.Context, name string, arg ...string) {
|
|
||||||
cmd := exec.CommandContext(ctx, name, arg...)
|
|
||||||
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:embed comp/_hakurei
|
|
||||||
var comp []byte
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
fmt.Println()
|
|
||||||
log.SetFlags(0)
|
|
||||||
log.SetPrefix("# ")
|
|
||||||
|
|
||||||
version := getenv("HAKUREI_VERSION", "untagged")
|
|
||||||
prefix := getenv("PREFIX", "/usr")
|
|
||||||
destdir := getenv("DESTDIR", "dist")
|
|
||||||
|
|
||||||
if err := os.MkdirAll(destdir, 0755); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
s, err := os.MkdirTemp(destdir, ".dist.*")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
var code int
|
|
||||||
|
|
||||||
if err = os.RemoveAll(s); err != nil {
|
|
||||||
code = 1
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
code = 1
|
|
||||||
log.Println(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
os.Exit(code)
|
|
||||||
}()
|
|
||||||
|
|
||||||
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
log.Println("Building hakurei.")
|
|
||||||
mustRun(ctx, "go", "generate", "./...")
|
|
||||||
mustRun(
|
|
||||||
ctx, "go", "build",
|
|
||||||
"-trimpath",
|
|
||||||
"-v", "-o", s,
|
|
||||||
"-ldflags=-s -w "+
|
|
||||||
"-buildid= -linkmode external -extldflags=-static "+
|
|
||||||
"-X hakurei.app/internal/info.buildVersion="+version+" "+
|
|
||||||
"-X hakurei.app/internal/info.hakureiPath="+prefix+"/bin/hakurei "+
|
|
||||||
"-X hakurei.app/internal/info.hsuPath="+prefix+"/bin/hsu "+
|
|
||||||
"-X main.hakureiPath="+prefix+"/bin/hakurei",
|
|
||||||
"./...",
|
|
||||||
)
|
|
||||||
fmt.Println()
|
|
||||||
|
|
||||||
log.Println("Testing Hakurei.")
|
|
||||||
mustRun(
|
|
||||||
ctx, "go", "test",
|
|
||||||
"-ldflags=-buildid= -linkmode external -extldflags=-static",
|
|
||||||
"./...",
|
|
||||||
)
|
|
||||||
fmt.Println()
|
|
||||||
|
|
||||||
log.Println("Creating distribution.")
|
|
||||||
const suffix = ".tar.gz"
|
|
||||||
distName := "hakurei-" + version + "-" + runtime.GOARCH
|
|
||||||
var f *os.File
|
|
||||||
if f, err = os.OpenFile(
|
|
||||||
filepath.Join(s, distName+suffix),
|
|
||||||
os.O_CREATE|os.O_EXCL|os.O_WRONLY,
|
|
||||||
0644,
|
|
||||||
); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if f == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err = f.Close(); err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
h := sha512.New()
|
|
||||||
gw := gzip.NewWriter(io.MultiWriter(f, h))
|
|
||||||
tw := tar.NewWriter(gw)
|
|
||||||
|
|
||||||
mustWriteHeader := func(name string, size int64, mode os.FileMode) {
|
|
||||||
header := tar.Header{
|
|
||||||
Name: filepath.Join(distName, name),
|
|
||||||
Size: size,
|
|
||||||
Mode: int64(mode),
|
|
||||||
Uname: "root",
|
|
||||||
Gname: "root",
|
|
||||||
}
|
|
||||||
|
|
||||||
if mode&os.ModeDir != 0 {
|
|
||||||
header.Typeflag = tar.TypeDir
|
|
||||||
fmt.Printf("%s %s\n", mode, name)
|
|
||||||
} else {
|
|
||||||
header.Typeflag = tar.TypeReg
|
|
||||||
fmt.Printf("%s %s (%d bytes)\n", mode, name, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = tw.WriteHeader(&header); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mustWriteFile := func(name string, data []byte, mode os.FileMode) {
|
|
||||||
mustWriteHeader(name, int64(len(data)), mode)
|
|
||||||
if mode&os.ModeDir != 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if _, err = tw.Write(data); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mustWriteFromPath := func(dst, src string, mode os.FileMode) {
|
|
||||||
var r *os.File
|
|
||||||
if r, err = os.Open(src); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fi os.FileInfo
|
|
||||||
if fi, err = r.Stat(); err != nil {
|
|
||||||
_ = r.Close()
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if mode == 0 {
|
|
||||||
mode = fi.Mode()
|
|
||||||
}
|
|
||||||
|
|
||||||
mustWriteHeader(dst, fi.Size(), mode)
|
|
||||||
if _, err = io.Copy(tw, r); err != nil {
|
|
||||||
_ = r.Close()
|
|
||||||
panic(err)
|
|
||||||
} else if err = r.Close(); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mustWriteFile(".", nil, fs.ModeDir|0755)
|
|
||||||
mustWriteFile("comp/", nil, os.ModeDir|0755)
|
|
||||||
mustWriteFile("comp/_hakurei", comp, 0644)
|
|
||||||
mustWriteFile("install.sh", []byte(`#!/bin/sh -e
|
|
||||||
cd "$(dirname -- "$0")" || exit 1
|
|
||||||
|
|
||||||
install -vDm0755 "bin/hakurei" "${DESTDIR}`+prefix+`/bin/hakurei"
|
|
||||||
install -vDm0755 "bin/sharefs" "${DESTDIR}`+prefix+`/bin/sharefs"
|
|
||||||
|
|
||||||
install -vDm4511 "bin/hsu" "${DESTDIR}`+prefix+`/bin/hsu"
|
|
||||||
if [ ! -f "${DESTDIR}/etc/hsurc" ]; then
|
|
||||||
install -vDm0400 "hsurc.default" "${DESTDIR}/etc/hsurc"
|
|
||||||
fi
|
|
||||||
|
|
||||||
install -vDm0644 "comp/_hakurei" "${DESTDIR}`+prefix+`/share/zsh/site-functions/_hakurei"
|
|
||||||
`), 0755)
|
|
||||||
|
|
||||||
mustWriteFromPath("README.md", "README.md", 0)
|
|
||||||
mustWriteFile("hsurc.default", []byte("1000 0"), 0400)
|
|
||||||
mustWriteFromPath("bin/hsu", filepath.Join(s, "hsu"), 04511)
|
|
||||||
for _, name := range []string{
|
|
||||||
"hakurei",
|
|
||||||
"sharefs",
|
|
||||||
} {
|
|
||||||
mustWriteFromPath(
|
|
||||||
filepath.Join("bin", name),
|
|
||||||
filepath.Join(s, name),
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = tw.Close(); err != nil {
|
|
||||||
panic(err)
|
|
||||||
} else if err = gw.Close(); err != nil {
|
|
||||||
panic(err)
|
|
||||||
} else if err = f.Close(); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
f = nil
|
|
||||||
|
|
||||||
if err = os.WriteFile(
|
|
||||||
filepath.Join(destdir, distName+suffix+".sha512"),
|
|
||||||
append(hex.AppendEncode(nil, h.Sum(nil)), " "+distName+suffix+"\n"...),
|
|
||||||
0644,
|
|
||||||
); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if err = os.Rename(
|
|
||||||
filepath.Join(s, distName+suffix),
|
|
||||||
filepath.Join(destdir, distName+suffix),
|
|
||||||
); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,14 +1,9 @@
|
|||||||
// The earlyinit is part of the Rosa OS initramfs and serves as the system init.
|
|
||||||
//
|
|
||||||
// This program is an internal detail of Rosa OS and is not usable on its own.
|
|
||||||
// It is not covered by the compatibility promise.
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
|
||||||
. "syscall"
|
. "syscall"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -17,22 +12,6 @@ func main() {
|
|||||||
log.SetFlags(0)
|
log.SetFlags(0)
|
||||||
log.SetPrefix("earlyinit: ")
|
log.SetPrefix("earlyinit: ")
|
||||||
|
|
||||||
var (
|
|
||||||
option map[string]string
|
|
||||||
flags []string
|
|
||||||
)
|
|
||||||
if len(os.Args) > 1 {
|
|
||||||
option = make(map[string]string)
|
|
||||||
for _, s := range os.Args[1:] {
|
|
||||||
key, value, ok := strings.Cut(s, "=")
|
|
||||||
if !ok {
|
|
||||||
flags = append(flags, s)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
option[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := Mount(
|
if err := Mount(
|
||||||
"devtmpfs",
|
"devtmpfs",
|
||||||
"/dev/",
|
"/dev/",
|
||||||
@@ -76,56 +55,4 @@ func main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// staying in rootfs, these are no longer used
|
|
||||||
must(os.Remove("/root"))
|
|
||||||
must(os.Remove("/init"))
|
|
||||||
|
|
||||||
must(os.Mkdir("/proc", 0))
|
|
||||||
mustSyscall("mount proc", Mount(
|
|
||||||
"proc",
|
|
||||||
"/proc",
|
|
||||||
"proc",
|
|
||||||
MS_NOSUID|MS_NOEXEC|MS_NODEV,
|
|
||||||
"hidepid=1",
|
|
||||||
))
|
|
||||||
|
|
||||||
must(os.Mkdir("/sys", 0))
|
|
||||||
mustSyscall("mount sysfs", Mount(
|
|
||||||
"sysfs",
|
|
||||||
"/sys",
|
|
||||||
"sysfs",
|
|
||||||
0,
|
|
||||||
"",
|
|
||||||
))
|
|
||||||
|
|
||||||
// after top level has been set up
|
|
||||||
mustSyscall("remount root", Mount(
|
|
||||||
"",
|
|
||||||
"/",
|
|
||||||
"",
|
|
||||||
MS_REMOUNT|MS_BIND|
|
|
||||||
MS_RDONLY|MS_NODEV|MS_NOSUID|MS_NOEXEC,
|
|
||||||
"",
|
|
||||||
))
|
|
||||||
|
|
||||||
must(os.WriteFile(
|
|
||||||
"/sys/module/firmware_class/parameters/path",
|
|
||||||
[]byte("/system/lib/firmware"),
|
|
||||||
0,
|
|
||||||
))
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// mustSyscall calls [log.Fatalln] if err is non-nil.
|
|
||||||
func mustSyscall(action string, err error) {
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln("cannot "+action+":", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// must calls [log.Fatal] with err if it is non-nil.
|
|
||||||
func must(err error) {
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
@@ -12,11 +11,12 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
_ "unsafe" // for go:linkname
|
||||||
|
|
||||||
"hakurei.app/check"
|
|
||||||
"hakurei.app/command"
|
"hakurei.app/command"
|
||||||
"hakurei.app/ext"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/fhs"
|
"hakurei.app/container/fhs"
|
||||||
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/hst"
|
"hakurei.app/hst"
|
||||||
"hakurei.app/internal/dbus"
|
"hakurei.app/internal/dbus"
|
||||||
"hakurei.app/internal/env"
|
"hakurei.app/internal/env"
|
||||||
@@ -27,19 +27,13 @@ import (
|
|||||||
|
|
||||||
// optionalErrorUnwrap calls [errors.Unwrap] and returns the resulting value
|
// optionalErrorUnwrap calls [errors.Unwrap] and returns the resulting value
|
||||||
// if it is not nil, or the original value if it is.
|
// if it is not nil, or the original value if it is.
|
||||||
func optionalErrorUnwrap(err error) error {
|
//
|
||||||
if underlyingErr := errors.Unwrap(err); underlyingErr != nil {
|
//go:linkname optionalErrorUnwrap hakurei.app/container.optionalErrorUnwrap
|
||||||
return underlyingErr
|
func optionalErrorUnwrap(err error) error
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var errSuccess = errors.New("success")
|
|
||||||
|
|
||||||
func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErrs, out io.Writer) command.Command {
|
func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErrs, out io.Writer) command.Command {
|
||||||
var (
|
var (
|
||||||
flagVerbose bool
|
flagVerbose bool
|
||||||
flagInsecure bool
|
|
||||||
flagJSON bool
|
flagJSON bool
|
||||||
)
|
)
|
||||||
c := command.New(out, log.Printf, "hakurei", func([]string) error {
|
c := command.New(out, log.Printf, "hakurei", func([]string) error {
|
||||||
@@ -58,7 +52,6 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
return nil
|
return nil
|
||||||
}).
|
}).
|
||||||
Flag(&flagVerbose, "v", command.BoolFlag(false), "Increase log verbosity").
|
Flag(&flagVerbose, "v", command.BoolFlag(false), "Increase log verbosity").
|
||||||
Flag(&flagInsecure, "insecure", command.BoolFlag(false), "Allow use of insecure compatibility options").
|
|
||||||
Flag(&flagJSON, "json", command.BoolFlag(false), "Serialise output in JSON when applicable")
|
Flag(&flagJSON, "json", command.BoolFlag(false), "Serialise output in JSON when applicable")
|
||||||
|
|
||||||
c.Command("shim", command.UsageInternal, func([]string) error { outcome.Shim(msg); return errSuccess })
|
c.Command("shim", command.UsageInternal, func([]string) error { outcome.Shim(msg); return errSuccess })
|
||||||
@@ -67,9 +60,9 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
var (
|
var (
|
||||||
flagIdentifierFile int
|
flagIdentifierFile int
|
||||||
)
|
)
|
||||||
c.NewCommand("run", "Load and start container from configuration file", func(args []string) error {
|
c.NewCommand("app", "Load and start container from configuration file", func(args []string) error {
|
||||||
if len(args) < 1 {
|
if len(args) < 1 {
|
||||||
log.Fatal("run requires at least 1 argument")
|
log.Fatal("app requires at least 1 argument")
|
||||||
}
|
}
|
||||||
|
|
||||||
config := tryPath(msg, args[0])
|
config := tryPath(msg, args[0])
|
||||||
@@ -77,12 +70,7 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
config.Container.Args = append(config.Container.Args, args[1:]...)
|
config.Container.Args = append(config.Container.Args, args[1:]...)
|
||||||
}
|
}
|
||||||
|
|
||||||
var flags int
|
outcome.Main(ctx, msg, config, flagIdentifierFile)
|
||||||
if flagInsecure {
|
|
||||||
flags |= hst.VAllowInsecure
|
|
||||||
}
|
|
||||||
|
|
||||||
outcome.Main(ctx, msg, config, flags, flagIdentifierFile)
|
|
||||||
panic("unreachable")
|
panic("unreachable")
|
||||||
}).
|
}).
|
||||||
Flag(&flagIdentifierFile, "identifier-fd", command.IntFlag(-1),
|
Flag(&flagIdentifierFile, "identifier-fd", command.IntFlag(-1),
|
||||||
@@ -110,7 +98,7 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
flagWayland, flagX11, flagDBus, flagPipeWire, flagPulse bool
|
flagWayland, flagX11, flagDBus, flagPipeWire, flagPulse bool
|
||||||
)
|
)
|
||||||
|
|
||||||
c.NewCommand("exec", "Configure and start a permissive container", func(args []string) error {
|
c.NewCommand("run", "Configure and start a permissive container", func(args []string) error {
|
||||||
if flagIdentity < hst.IdentityStart || flagIdentity > hst.IdentityEnd {
|
if flagIdentity < hst.IdentityStart || flagIdentity > hst.IdentityEnd {
|
||||||
log.Fatalf("identity %d out of range", flagIdentity)
|
log.Fatalf("identity %d out of range", flagIdentity)
|
||||||
}
|
}
|
||||||
@@ -152,7 +140,7 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var et hst.Enablements
|
var et hst.Enablement
|
||||||
if flagWayland {
|
if flagWayland {
|
||||||
et |= hst.EWayland
|
et |= hst.EWayland
|
||||||
}
|
}
|
||||||
@@ -170,7 +158,7 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
ID: flagID,
|
ID: flagID,
|
||||||
Identity: flagIdentity,
|
Identity: flagIdentity,
|
||||||
Groups: flagGroups,
|
Groups: flagGroups,
|
||||||
Enablements: &et,
|
Enablements: hst.NewEnablements(et),
|
||||||
|
|
||||||
Container: &hst.ContainerConfig{
|
Container: &hst.ContainerConfig{
|
||||||
Filesystem: []hst.FilesystemConfigJSON{
|
Filesystem: []hst.FilesystemConfigJSON{
|
||||||
@@ -198,7 +186,7 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
); err != nil {
|
); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
config.SchedPriority = ext.Int(flagSchedPriority)
|
config.SchedPriority = std.Int(flagSchedPriority)
|
||||||
|
|
||||||
// bind GPU stuff
|
// bind GPU stuff
|
||||||
if et&(hst.EX11|hst.EWayland) != 0 {
|
if et&(hst.EX11|hst.EWayland) != 0 {
|
||||||
@@ -289,7 +277,7 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
outcome.Main(ctx, msg, &config, 0, -1)
|
outcome.Main(ctx, msg, &config, -1)
|
||||||
panic("unreachable")
|
panic("unreachable")
|
||||||
}).
|
}).
|
||||||
Flag(&flagDBusConfigSession, "dbus-config", command.StringFlag("builtin"),
|
Flag(&flagDBusConfigSession, "dbus-config", command.StringFlag("builtin"),
|
||||||
@@ -335,7 +323,7 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
flagShort bool
|
flagShort bool
|
||||||
flagNoStore bool
|
flagNoStore bool
|
||||||
)
|
)
|
||||||
c.NewCommand("show", "Show live or local instance configuration", func(args []string) error {
|
c.NewCommand("show", "Show live or local app configuration", func(args []string) error {
|
||||||
switch len(args) {
|
switch len(args) {
|
||||||
case 0: // system
|
case 0: // system
|
||||||
printShowSystem(os.Stdout, flagShort, flagJSON)
|
printShowSystem(os.Stdout, flagShort, flagJSON)
|
||||||
|
|||||||
@@ -20,12 +20,12 @@ func TestHelp(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"main", []string{}, `
|
"main", []string{}, `
|
||||||
Usage: hakurei [-h | --help] [-v] [--insecure] [--json] COMMAND [OPTIONS]
|
Usage: hakurei [-h | --help] [-v] [--json] COMMAND [OPTIONS]
|
||||||
|
|
||||||
Commands:
|
Commands:
|
||||||
run Load and start container from configuration file
|
app Load and start container from configuration file
|
||||||
exec Configure and start a permissive container
|
run Configure and start a permissive container
|
||||||
show Show live or local instance configuration
|
show Show live or local app configuration
|
||||||
ps List active instances
|
ps List active instances
|
||||||
version Display version information
|
version Display version information
|
||||||
license Show full license text
|
license Show full license text
|
||||||
@@ -35,8 +35,8 @@ Commands:
|
|||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"exec", []string{"exec", "-h"}, `
|
"run", []string{"run", "-h"}, `
|
||||||
Usage: hakurei exec [-h | --help] [--dbus-config <value>] [--dbus-system <value>] [--mpris] [--dbus-log] [--id <value>] [-a <int>] [-g <value>] [-d <value>] [-u <value>] [--policy <value>] [--priority <int>] [--private-runtime] [--private-tmpdir] [--wayland] [-X] [--dbus] [--pipewire] [--pulse] COMMAND [OPTIONS]
|
Usage: hakurei run [-h | --help] [--dbus-config <value>] [--dbus-system <value>] [--mpris] [--dbus-log] [--id <value>] [-a <int>] [-g <value>] [-d <value>] [-u <value>] [--policy <value>] [--priority <int>] [--private-runtime] [--private-tmpdir] [--wayland] [-X] [--dbus] [--pipewire] [--pulse] COMMAND [OPTIONS]
|
||||||
|
|
||||||
Flags:
|
Flags:
|
||||||
-X Enable direct connection to X11
|
-X Enable direct connection to X11
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/internal/stub"
|
"hakurei.app/container/stub"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDecodeJSON(t *testing.T) {
|
func TestDecodeJSON(t *testing.T) {
|
||||||
|
|||||||
@@ -1,42 +1,8 @@
|
|||||||
// Hakurei runs user-specified containers as subordinate users.
|
|
||||||
//
|
|
||||||
// This program is generally invoked by another, higher level program, which
|
|
||||||
// creates container configuration via package [hst] or an implementation of it.
|
|
||||||
//
|
|
||||||
// The parent may leave files open and specify their file descriptor for various
|
|
||||||
// uses. In these cases, standard streams and netpoll files are treated as
|
|
||||||
// invalid file descriptors and rejected. All string representations must be in
|
|
||||||
// decimal.
|
|
||||||
//
|
|
||||||
// When specifying a [hst.Config] JSON stream or file to the run subcommand, the
|
|
||||||
// argument "-" is equivalent to stdin. Otherwise, file descriptor rules
|
|
||||||
// described above applies. Invalid file descriptors are treated as file names
|
|
||||||
// in their string representation, with the exception that if a netpoll file
|
|
||||||
// descriptor is attempted, the program fails.
|
|
||||||
//
|
|
||||||
// The flag --identifier-fd can be optionally specified to the run subcommand to
|
|
||||||
// receive the identifier of the newly started instance. File descriptor rules
|
|
||||||
// described above applies, and the file must be writable. This is sent after
|
|
||||||
// its state is made available, so the client must not attempt to poll for it.
|
|
||||||
// This uses the internal binary format of [hst.ID].
|
|
||||||
//
|
|
||||||
// For the show and ps subcommands, the flag --json can be applied to the main
|
|
||||||
// hakurei command to serialise output in JSON when applicable. Additionally,
|
|
||||||
// the flag --short targeting each subcommand is used to omit some information
|
|
||||||
// in both JSON and user-facing output. Only JSON-encoded output is covered
|
|
||||||
// under the compatibility promise.
|
|
||||||
//
|
|
||||||
// A template for [hst.Config] demonstrating all available configuration fields
|
|
||||||
// is returned by [hst.Template]. The JSON-encoded equivalent of this can be
|
|
||||||
// obtained via the template subcommand. Fields left unpopulated in the template
|
|
||||||
// (the direct_* family of fields, which are insecure under any configuration if
|
|
||||||
// enabled) are unsupported.
|
|
||||||
//
|
|
||||||
// For simple (but insecure) testing scenarios, the exec subcommand can be used
|
|
||||||
// to generate a simple, permissive configuration in-memory. See its help
|
|
||||||
// message for all available options.
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
|
// this works around go:embed '..' limitation
|
||||||
|
//go:generate cp ../../LICENSE .
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
@@ -47,13 +13,15 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"hakurei.app/container"
|
"hakurei.app/container"
|
||||||
"hakurei.app/ext"
|
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate cp ../../LICENSE .
|
var (
|
||||||
|
errSuccess = errors.New("success")
|
||||||
|
|
||||||
//go:embed LICENSE
|
//go:embed LICENSE
|
||||||
var license string
|
license string
|
||||||
|
)
|
||||||
|
|
||||||
// earlyHardeningErrs are errors collected while setting up early hardening feature.
|
// earlyHardeningErrs are errors collected while setting up early hardening feature.
|
||||||
type earlyHardeningErrs struct{ yamaLSM, dumpable error }
|
type earlyHardeningErrs struct{ yamaLSM, dumpable error }
|
||||||
@@ -62,13 +30,13 @@ func main() {
|
|||||||
// early init path, skips root check and duplicate PR_SET_DUMPABLE
|
// early init path, skips root check and duplicate PR_SET_DUMPABLE
|
||||||
container.TryArgv0(nil)
|
container.TryArgv0(nil)
|
||||||
|
|
||||||
log.SetFlags(0)
|
|
||||||
log.SetPrefix("hakurei: ")
|
log.SetPrefix("hakurei: ")
|
||||||
|
log.SetFlags(0)
|
||||||
msg := message.New(log.Default())
|
msg := message.New(log.Default())
|
||||||
|
|
||||||
early := earlyHardeningErrs{
|
early := earlyHardeningErrs{
|
||||||
yamaLSM: ext.SetPtracer(0),
|
yamaLSM: container.SetPtracer(0),
|
||||||
dumpable: ext.SetDumpable(ext.SUID_DUMP_DISABLE),
|
dumpable: container.SetDumpable(container.SUID_DUMP_DISABLE),
|
||||||
}
|
}
|
||||||
|
|
||||||
if os.Geteuid() == 0 {
|
if os.Geteuid() == 0 {
|
||||||
|
|||||||
@@ -17,9 +17,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// tryPath attempts to read [hst.Config] from multiple sources.
|
// tryPath attempts to read [hst.Config] from multiple sources.
|
||||||
//
|
// tryPath reads from [os.Stdin] if name has value "-".
|
||||||
// tryPath reads from [os.Stdin] if name has value "-". Otherwise, name is
|
// Otherwise, name is passed to tryFd, and if that returns nil, name is passed to [os.Open].
|
||||||
// passed to tryFd, and if that returns nil, name is passed to [os.Open].
|
|
||||||
func tryPath(msg message.Msg, name string) (config *hst.Config) {
|
func tryPath(msg message.Msg, name string) (config *hst.Config) {
|
||||||
var r io.ReadCloser
|
var r io.ReadCloser
|
||||||
config = new(hst.Config)
|
config = new(hst.Config)
|
||||||
@@ -47,8 +46,7 @@ func tryPath(msg message.Msg, name string) (config *hst.Config) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// tryFd returns a [io.ReadCloser] if name represents an integer corresponding
|
// tryFd returns a [io.ReadCloser] if name represents an integer corresponding to a valid file descriptor.
|
||||||
// to a valid file descriptor.
|
|
||||||
func tryFd(msg message.Msg, name string) io.ReadCloser {
|
func tryFd(msg message.Msg, name string) io.ReadCloser {
|
||||||
if v, err := strconv.Atoi(name); err != nil {
|
if v, err := strconv.Atoi(name); err != nil {
|
||||||
if !errors.Is(err, strconv.ErrSyntax) {
|
if !errors.Is(err, strconv.ErrSyntax) {
|
||||||
@@ -62,12 +60,7 @@ func tryFd(msg message.Msg, name string) io.ReadCloser {
|
|||||||
|
|
||||||
msg.Verbosef("trying config stream from %d", v)
|
msg.Verbosef("trying config stream from %d", v)
|
||||||
fd := uintptr(v)
|
fd := uintptr(v)
|
||||||
if _, _, errno := syscall.Syscall(
|
if _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, fd, syscall.F_GETFD, 0); errno != 0 {
|
||||||
syscall.SYS_FCNTL,
|
|
||||||
fd,
|
|
||||||
syscall.F_GETFD,
|
|
||||||
0,
|
|
||||||
); errno != 0 {
|
|
||||||
if errors.Is(errno, syscall.EBADF) { // reject bad fd
|
if errors.Is(errno, syscall.EBADF) { // reject bad fd
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -82,12 +75,10 @@ func tryFd(msg message.Msg, name string) io.ReadCloser {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// shortLengthMin is the minimum length a short form identifier can have and
|
// shortLengthMin is the minimum length a short form identifier can have and still be interpreted as an identifier.
|
||||||
// still be interpreted as an identifier.
|
|
||||||
const shortLengthMin = 1 << 3
|
const shortLengthMin = 1 << 3
|
||||||
|
|
||||||
// shortIdentifier returns an eight character short representation of [hst.ID]
|
// shortIdentifier returns an eight character short representation of [hst.ID] from its random bytes.
|
||||||
// from its random bytes.
|
|
||||||
func shortIdentifier(id *hst.ID) string {
|
func shortIdentifier(id *hst.ID) string {
|
||||||
return shortIdentifierString(id.String())
|
return shortIdentifierString(id.String())
|
||||||
}
|
}
|
||||||
@@ -97,8 +88,7 @@ func shortIdentifierString(s string) string {
|
|||||||
return s[len(hst.ID{}) : len(hst.ID{})+shortLengthMin]
|
return s[len(hst.ID{}) : len(hst.ID{})+shortLengthMin]
|
||||||
}
|
}
|
||||||
|
|
||||||
// tryIdentifier attempts to match [hst.State] from a [hex] representation of
|
// tryIdentifier attempts to match [hst.State] from a [hex] representation of [hst.ID] or a prefix of its lower half.
|
||||||
// [hst.ID] or a prefix of its lower half.
|
|
||||||
func tryIdentifier(msg message.Msg, name string, s *store.Store) *hst.State {
|
func tryIdentifier(msg message.Msg, name string, s *store.Store) *hst.State {
|
||||||
const (
|
const (
|
||||||
likeShort = 1 << iota
|
likeShort = 1 << iota
|
||||||
@@ -106,8 +96,7 @@ func tryIdentifier(msg message.Msg, name string, s *store.Store) *hst.State {
|
|||||||
)
|
)
|
||||||
|
|
||||||
var likely uintptr
|
var likely uintptr
|
||||||
// half the hex representation
|
if len(name) >= shortLengthMin && len(name) <= len(hst.ID{}) { // half the hex representation
|
||||||
if len(name) >= shortLengthMin && len(name) <= len(hst.ID{}) {
|
|
||||||
// cannot safely decode here due to unknown alignment
|
// cannot safely decode here due to unknown alignment
|
||||||
for _, c := range name {
|
for _, c := range name {
|
||||||
if c >= '0' && c <= '9' {
|
if c >= '0' && c <= '9' {
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/hst"
|
"hakurei.app/hst"
|
||||||
"hakurei.app/internal/store"
|
"hakurei.app/internal/store"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ func printShowInstance(
|
|||||||
t := newPrinter(output)
|
t := newPrinter(output)
|
||||||
defer t.MustFlush()
|
defer t.MustFlush()
|
||||||
|
|
||||||
if err := config.Validate(hst.VAllowInsecure); err != nil {
|
if err := config.Validate(); err != nil {
|
||||||
valid = false
|
valid = false
|
||||||
if m, ok := message.GetMessage(err); ok {
|
if m, ok := message.GetMessage(err); ok {
|
||||||
mustPrint(output, "Error: "+m+"!\n\n")
|
mustPrint(output, "Error: "+m+"!\n\n")
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/hst"
|
"hakurei.app/hst"
|
||||||
"hakurei.app/internal/store"
|
"hakurei.app/internal/store"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
@@ -32,7 +32,7 @@ var (
|
|||||||
PID: 0xbeef,
|
PID: 0xbeef,
|
||||||
ShimPID: 0xcafe,
|
ShimPID: 0xcafe,
|
||||||
Config: &hst.Config{
|
Config: &hst.Config{
|
||||||
Enablements: new(hst.EWayland | hst.EPipeWire),
|
Enablements: hst.NewEnablements(hst.EWayland | hst.EPipeWire),
|
||||||
Identity: 1,
|
Identity: 1,
|
||||||
Container: &hst.ContainerConfig{
|
Container: &hst.ContainerConfig{
|
||||||
Shell: check.MustAbs("/bin/sh"),
|
Shell: check.MustAbs("/bin/sh"),
|
||||||
|
|||||||
@@ -1,7 +0,0 @@
|
|||||||
//go:build !rosa
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
// hsuConfPath is an absolute pathname to the hsu configuration file. Its
|
|
||||||
// contents are interpreted by parseConfig.
|
|
||||||
const hsuConfPath = "/etc/hsurc"
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
//go:build rosa
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
// hsuConfPath is the pathname to the hsu configuration file, specific to
|
|
||||||
// Rosa OS. Its contents are interpreted by parseConfig.
|
|
||||||
const hsuConfPath = "/system/etc/hsurc"
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
/* keep in sync with hst */
|
/* copied from hst and must never be changed */
|
||||||
|
|
||||||
const (
|
const (
|
||||||
userOffset = 100000
|
userOffset = 100000
|
||||||
|
|||||||
@@ -1,64 +1,13 @@
|
|||||||
// hsu starts the hakurei shim as the target subordinate user.
|
|
||||||
//
|
|
||||||
// The hsu program must be installed with the setuid and setgid bit set, and
|
|
||||||
// owned by root. A configuration file must be installed at /etc/hsurc with
|
|
||||||
// permission bits 0400, and owned by root. Each line of the file specifies a
|
|
||||||
// hakurei userid to kernel uid mapping. A line consists of the decimal string
|
|
||||||
// representation of the uid of the user wishing to start hakurei containers,
|
|
||||||
// followed by a space, followed by the decimal string representation of its
|
|
||||||
// userid. Duplicate uid entries are ignored, with the first occurrence taking
|
|
||||||
// effect.
|
|
||||||
//
|
|
||||||
// For example, to map the kernel uid 1000 to the hakurei user id 0:
|
|
||||||
//
|
|
||||||
// 1000 0
|
|
||||||
//
|
|
||||||
// # Internals
|
|
||||||
//
|
|
||||||
// Hakurei and hsu holds pathnames pointing to each other set at link time. For
|
|
||||||
// this reason, a distribution of hakurei has fixed installation prefix. Since
|
|
||||||
// this program is never invoked by the user, behaviour described in the
|
|
||||||
// following paragraphs are considered an internal detail and not covered by the
|
|
||||||
// compatibility promise.
|
|
||||||
//
|
|
||||||
// After checking credentials, hsu checks via /proc/ the absolute pathname of
|
|
||||||
// its parent process, and fails if it does not match the hakurei pathname set
|
|
||||||
// at link time. This is not a security feature: the priv-side is considered
|
|
||||||
// trusted, and this feature makes no attempt to address the racy nature of
|
|
||||||
// querying /proc/, or debuggers attached to the parent process. Instead, this
|
|
||||||
// aims to discourage misuse and reduce confusion if the user accidentally
|
|
||||||
// stumbles upon this program. It also prevents accidental use of the incorrect
|
|
||||||
// installation of hsu in some environments.
|
|
||||||
//
|
|
||||||
// Since target container environment variables are set up in shim via the
|
|
||||||
// [container] infrastructure, the environment is used for parameters from the
|
|
||||||
// parent process.
|
|
||||||
//
|
|
||||||
// HAKUREI_SHIM specifies a single byte between '3' and '9' representing the
|
|
||||||
// setup pipe file descriptor. It is passed as is to the shim process and is the
|
|
||||||
// only value in the environment of the shim process. Since hsurc is not
|
|
||||||
// accessible to the parent process, leaving this unset causes hsu to print the
|
|
||||||
// corresponding hakurei user id of the parent and terminate.
|
|
||||||
//
|
|
||||||
// HAKUREI_IDENTITY specifies the identity of the instance being started and is
|
|
||||||
// used to produce the kernel uid alongside hakurei user id looked up from hsurc.
|
|
||||||
//
|
|
||||||
// HAKUREI_GROUPS specifies supplementary groups to inherit from the credentials
|
|
||||||
// of the parent process in a ' ' separated list of decimal string
|
|
||||||
// representations of gid. This has the unfortunate consequence of allowing
|
|
||||||
// users mapped via hsurc to effectively drop group membership, so special care
|
|
||||||
// must be taken to ensure this does not lead to an increase in access. This is
|
|
||||||
// not applicable to Rosa OS since unsigned code execution is not permitted
|
|
||||||
// outside hakurei containers, and is generally nonapplicable to the security
|
|
||||||
// model of hakurei, where all untrusted code runs within containers.
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
|
// minimise imports to avoid inadvertently calling init or global variable functions
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"slices"
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -67,13 +16,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// envShim is the name of the environment variable holding a single byte
|
// envIdentity is the name of the environment variable holding a
|
||||||
// representing the shim setup pipe file descriptor.
|
// single byte representing the shim setup pipe file descriptor.
|
||||||
envShim = "HAKUREI_SHIM"
|
envShim = "HAKUREI_SHIM"
|
||||||
// envIdentity is the name of the environment variable holding a decimal
|
// envGroups holds a ' ' separated list of string representations of
|
||||||
// string representation of the current application identity.
|
|
||||||
envIdentity = "HAKUREI_IDENTITY"
|
|
||||||
// envGroups holds a ' ' separated list of decimal string representations of
|
|
||||||
// supplementary group gid. Membership requirements are enforced.
|
// supplementary group gid. Membership requirements are enforced.
|
||||||
envGroups = "HAKUREI_GROUPS"
|
envGroups = "HAKUREI_GROUPS"
|
||||||
)
|
)
|
||||||
@@ -89,6 +35,7 @@ func main() {
|
|||||||
|
|
||||||
log.SetFlags(0)
|
log.SetFlags(0)
|
||||||
log.SetPrefix("hsu: ")
|
log.SetPrefix("hsu: ")
|
||||||
|
log.SetOutput(os.Stderr)
|
||||||
|
|
||||||
if os.Geteuid() != 0 {
|
if os.Geteuid() != 0 {
|
||||||
log.Fatal("this program must be owned by uid 0 and have the setuid bit set")
|
log.Fatal("this program must be owned by uid 0 and have the setuid bit set")
|
||||||
@@ -102,13 +49,13 @@ func main() {
|
|||||||
log.Fatal("this program must not be started by root")
|
log.Fatal("this program must not be started by root")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !filepath.IsAbs(hakureiPath) {
|
if !path.IsAbs(hakureiPath) {
|
||||||
log.Fatal("this program is compiled incorrectly")
|
log.Fatal("this program is compiled incorrectly")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var toolPath string
|
var toolPath string
|
||||||
pexe := filepath.Join("/proc", strconv.Itoa(os.Getppid()), "exe")
|
pexe := path.Join("/proc", strconv.Itoa(os.Getppid()), "exe")
|
||||||
if p, err := os.Readlink(pexe); err != nil {
|
if p, err := os.Readlink(pexe); err != nil {
|
||||||
log.Fatalf("cannot read parent executable path: %v", err)
|
log.Fatalf("cannot read parent executable path: %v", err)
|
||||||
} else if strings.HasSuffix(p, " (deleted)") {
|
} else if strings.HasSuffix(p, " (deleted)") {
|
||||||
@@ -152,6 +99,8 @@ func main() {
|
|||||||
// last possible uid outcome
|
// last possible uid outcome
|
||||||
uidEnd = 999919999
|
uidEnd = 999919999
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// cast to int for use with library functions
|
||||||
uid := int(toUser(userid, identity))
|
uid := int(toUser(userid, identity))
|
||||||
|
|
||||||
// final bounds check to catch any bugs
|
// final bounds check to catch any bugs
|
||||||
@@ -187,6 +136,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// careful! users in the allowlist is effectively allowed to drop groups via hsu
|
// careful! users in the allowlist is effectively allowed to drop groups via hsu
|
||||||
|
|
||||||
if err := syscall.Setresgid(uid, uid, uid); err != nil {
|
if err := syscall.Setresgid(uid, uid, uid); err != nil {
|
||||||
log.Fatalf("cannot set gid: %v", err)
|
log.Fatalf("cannot set gid: %v", err)
|
||||||
}
|
}
|
||||||
@@ -196,21 +146,10 @@ func main() {
|
|||||||
if err := syscall.Setresuid(uid, uid, uid); err != nil {
|
if err := syscall.Setresuid(uid, uid, uid); err != nil {
|
||||||
log.Fatalf("cannot set uid: %v", err)
|
log.Fatalf("cannot set uid: %v", err)
|
||||||
}
|
}
|
||||||
|
if _, _, errno := syscall.AllThreadsSyscall(syscall.SYS_PRCTL, PR_SET_NO_NEW_PRIVS, 1, 0); errno != 0 {
|
||||||
if _, _, errno := syscall.AllThreadsSyscall(
|
|
||||||
syscall.SYS_PRCTL,
|
|
||||||
PR_SET_NO_NEW_PRIVS, 1,
|
|
||||||
0,
|
|
||||||
); errno != 0 {
|
|
||||||
log.Fatalf("cannot set no_new_privs flag: %s", errno.Error())
|
log.Fatalf("cannot set no_new_privs flag: %s", errno.Error())
|
||||||
}
|
}
|
||||||
|
if err := syscall.Exec(toolPath, []string{"hakurei", "shim"}, []string{envShim + "=" + shimSetupFd}); err != nil {
|
||||||
if err := syscall.Exec(toolPath, []string{
|
|
||||||
"hakurei",
|
|
||||||
"shim",
|
|
||||||
}, []string{
|
|
||||||
envShim + "=" + shimSetupFd,
|
|
||||||
}); err != nil {
|
|
||||||
log.Fatalf("cannot start shim: %v", err)
|
log.Fatalf("cannot start shim: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -18,9 +18,8 @@ const (
|
|||||||
useridEnd = useridStart + rangeSize - 1
|
useridEnd = useridStart + rangeSize - 1
|
||||||
)
|
)
|
||||||
|
|
||||||
// parseUint32Fast parses a string representation of an unsigned 32-bit integer
|
// parseUint32Fast parses a string representation of an unsigned 32-bit integer value
|
||||||
// value using the fast path only. This limits the range of values it is defined
|
// using the fast path only. This limits the range of values it is defined in.
|
||||||
// in but is perfectly adequate for this use case.
|
|
||||||
func parseUint32Fast(s string) (uint32, error) {
|
func parseUint32Fast(s string) (uint32, error) {
|
||||||
sLen := len(s)
|
sLen := len(s)
|
||||||
if sLen < 1 {
|
if sLen < 1 {
|
||||||
@@ -41,14 +40,12 @@ func parseUint32Fast(s string) (uint32, error) {
|
|||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseConfig reads a list of allowed users from r until it encounters puid or
|
// parseConfig reads a list of allowed users from r until it encounters puid or [io.EOF].
|
||||||
// [io.EOF].
|
|
||||||
//
|
//
|
||||||
// Each line of the file specifies a hakurei userid to kernel uid mapping. A
|
// Each line of the file specifies a hakurei userid to kernel uid mapping. A line consists
|
||||||
// line consists of the string representation of the uid of the user wishing to
|
// of the string representation of the uid of the user wishing to start hakurei containers,
|
||||||
// start hakurei containers, followed by a space, followed by the string
|
// followed by a space, followed by the string representation of its userid. Duplicate uid
|
||||||
// representation of its userid. Duplicate uid entries are ignored, with the
|
// entries are ignored, with the first occurrence taking effect.
|
||||||
// first occurrence taking effect.
|
|
||||||
//
|
//
|
||||||
// All string representations are parsed by calling parseUint32Fast.
|
// All string representations are parsed by calling parseUint32Fast.
|
||||||
func parseConfig(r io.Reader, puid uint32) (userid uint32, ok bool, err error) {
|
func parseConfig(r io.Reader, puid uint32) (userid uint32, ok bool, err error) {
|
||||||
@@ -84,6 +81,10 @@ func parseConfig(r io.Reader, puid uint32) (userid uint32, ok bool, err error) {
|
|||||||
return useridEnd + 1, false, s.Err()
|
return useridEnd + 1, false, s.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hsuConfPath is an absolute pathname to the hsu configuration file.
|
||||||
|
// Its contents are interpreted by parseConfig.
|
||||||
|
const hsuConfPath = "/etc/hsurc"
|
||||||
|
|
||||||
// mustParseConfig calls parseConfig to interpret the contents of hsuConfPath,
|
// mustParseConfig calls parseConfig to interpret the contents of hsuConfPath,
|
||||||
// terminating the program if an error is encountered, the syntax is incorrect,
|
// terminating the program if an error is encountered, the syntax is incorrect,
|
||||||
// or the current user is not authorised to use hsu because its uid is missing.
|
// or the current user is not authorised to use hsu because its uid is missing.
|
||||||
@@ -111,6 +112,10 @@ func mustParseConfig(puid int) (userid uint32) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// envIdentity is the name of the environment variable holding a
|
||||||
|
// string representation of the current application identity.
|
||||||
|
var envIdentity = "HAKUREI_IDENTITY"
|
||||||
|
|
||||||
// mustReadIdentity calls parseUint32Fast to interpret the value stored in envIdentity,
|
// mustReadIdentity calls parseUint32Fast to interpret the value stored in envIdentity,
|
||||||
// terminating the program if the value is not set, malformed, or out of bounds.
|
// terminating the program if the value is not set, malformed, or out of bounds.
|
||||||
func mustReadIdentity() uint32 {
|
func mustReadIdentity() uint32 {
|
||||||
|
|||||||
@@ -1,94 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"hakurei.app/check"
|
|
||||||
"hakurei.app/internal/pkg"
|
|
||||||
"hakurei.app/message"
|
|
||||||
)
|
|
||||||
|
|
||||||
// cache refers to an instance of [pkg.Cache] that might be open.
|
|
||||||
type cache struct {
|
|
||||||
ctx context.Context
|
|
||||||
msg message.Msg
|
|
||||||
|
|
||||||
// Should generally not be used directly.
|
|
||||||
c *pkg.Cache
|
|
||||||
|
|
||||||
cures, jobs int
|
|
||||||
hostAbstract, idle bool
|
|
||||||
|
|
||||||
base string
|
|
||||||
}
|
|
||||||
|
|
||||||
// open opens the underlying [pkg.Cache].
|
|
||||||
func (cache *cache) open() (err error) {
|
|
||||||
if cache.c != nil {
|
|
||||||
return os.ErrInvalid
|
|
||||||
}
|
|
||||||
|
|
||||||
if cache.base == "" {
|
|
||||||
cache.base = "cache"
|
|
||||||
}
|
|
||||||
var base *check.Absolute
|
|
||||||
if cache.base, err = filepath.Abs(cache.base); err != nil {
|
|
||||||
return
|
|
||||||
} else if base, err = check.NewAbs(cache.base); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var flags int
|
|
||||||
if cache.idle {
|
|
||||||
flags |= pkg.CSchedIdle
|
|
||||||
}
|
|
||||||
if cache.hostAbstract {
|
|
||||||
flags |= pkg.CHostAbstract
|
|
||||||
}
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
defer close(done)
|
|
||||||
go func() {
|
|
||||||
select {
|
|
||||||
case <-cache.ctx.Done():
|
|
||||||
if testing.Testing() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
os.Exit(2)
|
|
||||||
|
|
||||||
case <-done:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
cache.msg.Verbosef("opening cache at %s", base)
|
|
||||||
cache.c, err = pkg.Open(
|
|
||||||
cache.ctx,
|
|
||||||
cache.msg,
|
|
||||||
flags,
|
|
||||||
cache.cures,
|
|
||||||
cache.jobs,
|
|
||||||
base,
|
|
||||||
)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the underlying [pkg.Cache] if it is open.
|
|
||||||
func (cache *cache) Close() {
|
|
||||||
if cache.c != nil {
|
|
||||||
cache.c.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do calls f on the underlying cache and returns its error value.
|
|
||||||
func (cache *cache) Do(f func(cache *pkg.Cache) error) error {
|
|
||||||
if cache.c == nil {
|
|
||||||
if err := cache.open(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return f(cache.c)
|
|
||||||
}
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"hakurei.app/internal/pkg"
|
|
||||||
"hakurei.app/message"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCache(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
cm := cache{
|
|
||||||
ctx: t.Context(),
|
|
||||||
msg: message.New(log.New(os.Stderr, "check: ", 0)),
|
|
||||||
base: t.TempDir(),
|
|
||||||
|
|
||||||
hostAbstract: true, idle: true,
|
|
||||||
}
|
|
||||||
defer cm.Close()
|
|
||||||
cm.Close()
|
|
||||||
|
|
||||||
if err := cm.open(); err != nil {
|
|
||||||
t.Fatalf("open: error = %v", err)
|
|
||||||
}
|
|
||||||
if err := cm.open(); err != os.ErrInvalid {
|
|
||||||
t.Errorf("(duplicate) open: error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cm.Do(func(cache *pkg.Cache) error {
|
|
||||||
return cache.Scrub(0)
|
|
||||||
}); err != nil {
|
|
||||||
t.Errorf("Scrub: error = %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,343 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"math"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
"unique"
|
|
||||||
|
|
||||||
"hakurei.app/check"
|
|
||||||
"hakurei.app/internal/pkg"
|
|
||||||
)
|
|
||||||
|
|
||||||
// daemonTimeout is the maximum amount of time cureFromIR will wait on I/O.
|
|
||||||
const daemonTimeout = 30 * time.Second
|
|
||||||
|
|
||||||
// daemonDeadline returns the deadline corresponding to daemonTimeout, or the
|
|
||||||
// zero value when running in a test.
|
|
||||||
func daemonDeadline() time.Time {
|
|
||||||
if testing.Testing() {
|
|
||||||
return time.Time{}
|
|
||||||
}
|
|
||||||
return time.Now().Add(daemonTimeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// remoteNoReply notifies that the client will not receive a cure reply.
|
|
||||||
remoteNoReply = 1 << iota
|
|
||||||
)
|
|
||||||
|
|
||||||
// cureFromIR services an IR curing request.
|
|
||||||
func cureFromIR(
|
|
||||||
cache *pkg.Cache,
|
|
||||||
conn net.Conn,
|
|
||||||
flags uint64,
|
|
||||||
) (pkg.Artifact, error) {
|
|
||||||
a, decodeErr := cache.NewDecoder(conn).Decode()
|
|
||||||
if decodeErr != nil {
|
|
||||||
_, err := conn.Write([]byte("\x00" + decodeErr.Error()))
|
|
||||||
return nil, errors.Join(decodeErr, err, conn.Close())
|
|
||||||
}
|
|
||||||
|
|
||||||
pathname, _, cureErr := cache.Cure(a)
|
|
||||||
if flags&remoteNoReply != 0 {
|
|
||||||
return a, errors.Join(cureErr, conn.Close())
|
|
||||||
}
|
|
||||||
if err := conn.SetWriteDeadline(daemonDeadline()); err != nil {
|
|
||||||
return a, errors.Join(cureErr, err, conn.Close())
|
|
||||||
}
|
|
||||||
if cureErr != nil {
|
|
||||||
_, err := conn.Write([]byte("\x00" + cureErr.Error()))
|
|
||||||
return a, errors.Join(cureErr, err, conn.Close())
|
|
||||||
}
|
|
||||||
_, err := conn.Write([]byte(pathname.String()))
|
|
||||||
if testing.Testing() && errors.Is(err, io.ErrClosedPipe) {
|
|
||||||
return a, nil
|
|
||||||
}
|
|
||||||
return a, errors.Join(err, conn.Close())
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// specialCancel is a message consisting of a single identifier referring
|
|
||||||
// to a curing artifact to be cancelled.
|
|
||||||
specialCancel = iota
|
|
||||||
// specialAbort requests for all pending cures to be aborted. It has no
|
|
||||||
// message body.
|
|
||||||
specialAbort
|
|
||||||
|
|
||||||
// remoteSpecial denotes a special message with custom layout.
|
|
||||||
remoteSpecial = math.MaxUint64
|
|
||||||
)
|
|
||||||
|
|
||||||
// writeSpecialHeader writes the header of a remoteSpecial message.
|
|
||||||
func writeSpecialHeader(conn net.Conn, kind uint64) error {
|
|
||||||
var sh [16]byte
|
|
||||||
binary.LittleEndian.PutUint64(sh[:], remoteSpecial)
|
|
||||||
binary.LittleEndian.PutUint64(sh[8:], kind)
|
|
||||||
if n, err := conn.Write(sh[:]); err != nil {
|
|
||||||
return err
|
|
||||||
} else if n != len(sh) {
|
|
||||||
return io.ErrShortWrite
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// cancelIdent reads an identifier from conn and cancels the corresponding cure.
|
|
||||||
func cancelIdent(
|
|
||||||
cache *pkg.Cache,
|
|
||||||
conn net.Conn,
|
|
||||||
) (*pkg.ID, bool, error) {
|
|
||||||
var ident pkg.ID
|
|
||||||
if _, err := io.ReadFull(conn, ident[:]); err != nil {
|
|
||||||
return nil, false, errors.Join(err, conn.Close())
|
|
||||||
} else if err = conn.Close(); err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
return &ident, cache.Cancel(unique.Make(ident)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// serve services connections from a [net.UnixListener].
|
|
||||||
func serve(
|
|
||||||
ctx context.Context,
|
|
||||||
log *log.Logger,
|
|
||||||
cm *cache,
|
|
||||||
ul *net.UnixListener,
|
|
||||||
) error {
|
|
||||||
ul.SetUnlinkOnClose(true)
|
|
||||||
if cm.c == nil {
|
|
||||||
if err := cm.open(); err != nil {
|
|
||||||
return errors.Join(err, ul.Close())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
defer wg.Wait()
|
|
||||||
|
|
||||||
wg.Go(func() {
|
|
||||||
for {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
conn, err := ul.AcceptUnix()
|
|
||||||
if err != nil {
|
|
||||||
if !errors.Is(err, os.ErrDeadlineExceeded) {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
wg.Go(func() {
|
|
||||||
done := make(chan struct{})
|
|
||||||
defer close(done)
|
|
||||||
go func() {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
_ = conn.SetDeadline(time.Now())
|
|
||||||
|
|
||||||
case <-done:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if _err := conn.SetReadDeadline(daemonDeadline()); _err != nil {
|
|
||||||
log.Println(_err)
|
|
||||||
if _err = conn.Close(); _err != nil {
|
|
||||||
log.Println(_err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var word [8]byte
|
|
||||||
if _, _err := io.ReadFull(conn, word[:]); _err != nil {
|
|
||||||
log.Println(_err)
|
|
||||||
if _err = conn.Close(); _err != nil {
|
|
||||||
log.Println(_err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
flags := binary.LittleEndian.Uint64(word[:])
|
|
||||||
|
|
||||||
if flags == remoteSpecial {
|
|
||||||
if _, _err := io.ReadFull(conn, word[:]); _err != nil {
|
|
||||||
log.Println(_err)
|
|
||||||
if _err = conn.Close(); _err != nil {
|
|
||||||
log.Println(_err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch special := binary.LittleEndian.Uint64(word[:]); special {
|
|
||||||
default:
|
|
||||||
log.Printf("invalid special %d", special)
|
|
||||||
|
|
||||||
case specialCancel:
|
|
||||||
if id, ok, _err := cancelIdent(cm.c, conn); _err != nil {
|
|
||||||
log.Println(_err)
|
|
||||||
} else if !ok {
|
|
||||||
log.Println(
|
|
||||||
"attempting to cancel invalid artifact",
|
|
||||||
pkg.Encode(*id),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
log.Println(
|
|
||||||
"cancelled artifact",
|
|
||||||
pkg.Encode(*id),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
case specialAbort:
|
|
||||||
if _err := conn.Close(); _err != nil {
|
|
||||||
log.Println(_err)
|
|
||||||
}
|
|
||||||
log.Println("aborting all pending cures")
|
|
||||||
cm.c.Abort()
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if a, _err := cureFromIR(cm.c, conn, flags); _err != nil {
|
|
||||||
log.Println(_err)
|
|
||||||
} else {
|
|
||||||
log.Printf(
|
|
||||||
"fulfilled artifact %s",
|
|
||||||
pkg.Encode(cm.c.Ident(a).Value()),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
<-ctx.Done()
|
|
||||||
if err := ul.SetDeadline(time.Now()); err != nil {
|
|
||||||
return errors.Join(err, ul.Close())
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
return ul.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// dial wraps [net.DialUnix] with a context.
|
|
||||||
func dial(ctx context.Context, addr *net.UnixAddr) (
|
|
||||||
done chan<- struct{},
|
|
||||||
conn *net.UnixConn,
|
|
||||||
err error,
|
|
||||||
) {
|
|
||||||
conn, err = net.DialUnix("unix", nil, addr)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
d := make(chan struct{})
|
|
||||||
done = d
|
|
||||||
go func() {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
_ = conn.SetDeadline(time.Now())
|
|
||||||
|
|
||||||
case <-d:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// cureRemote cures a [pkg.Artifact] on a daemon.
|
|
||||||
func cureRemote(
|
|
||||||
ctx context.Context,
|
|
||||||
addr *net.UnixAddr,
|
|
||||||
a pkg.Artifact,
|
|
||||||
flags uint64,
|
|
||||||
) (*check.Absolute, error) {
|
|
||||||
if flags == remoteSpecial {
|
|
||||||
return nil, syscall.EINVAL
|
|
||||||
}
|
|
||||||
|
|
||||||
done, conn, err := dial(ctx, addr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer close(done)
|
|
||||||
|
|
||||||
if n, flagErr := conn.Write(binary.LittleEndian.AppendUint64(nil, flags)); flagErr != nil {
|
|
||||||
return nil, errors.Join(flagErr, conn.Close())
|
|
||||||
} else if n != 8 {
|
|
||||||
return nil, errors.Join(io.ErrShortWrite, conn.Close())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = pkg.NewIR().EncodeAll(conn, a); err != nil {
|
|
||||||
return nil, errors.Join(err, conn.Close())
|
|
||||||
} else if err = conn.CloseWrite(); err != nil {
|
|
||||||
return nil, errors.Join(err, conn.Close())
|
|
||||||
}
|
|
||||||
|
|
||||||
if flags&remoteNoReply != 0 {
|
|
||||||
return nil, conn.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
payload, recvErr := io.ReadAll(conn)
|
|
||||||
if err = errors.Join(recvErr, conn.Close()); err != nil {
|
|
||||||
if errors.Is(err, os.ErrDeadlineExceeded) {
|
|
||||||
if cancelErr := ctx.Err(); cancelErr != nil {
|
|
||||||
err = cancelErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(payload) > 0 && payload[0] == 0 {
|
|
||||||
return nil, errors.New(string(payload[1:]))
|
|
||||||
}
|
|
||||||
|
|
||||||
var p *check.Absolute
|
|
||||||
p, err = check.NewAbs(string(payload))
|
|
||||||
return p, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// cancelRemote cancels a [pkg.Artifact] curing on a daemon.
|
|
||||||
func cancelRemote(
|
|
||||||
ctx context.Context,
|
|
||||||
addr *net.UnixAddr,
|
|
||||||
a pkg.Artifact,
|
|
||||||
) error {
|
|
||||||
done, conn, err := dial(ctx, addr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer close(done)
|
|
||||||
|
|
||||||
if err = writeSpecialHeader(conn, specialCancel); err != nil {
|
|
||||||
return errors.Join(err, conn.Close())
|
|
||||||
}
|
|
||||||
|
|
||||||
var n int
|
|
||||||
id := pkg.NewIR().Ident(a).Value()
|
|
||||||
if n, err = conn.Write(id[:]); err != nil {
|
|
||||||
return errors.Join(err, conn.Close())
|
|
||||||
} else if n != len(id) {
|
|
||||||
return errors.Join(io.ErrShortWrite, conn.Close())
|
|
||||||
}
|
|
||||||
return conn.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// abortRemote aborts all [pkg.Artifact] curing on a daemon.
|
|
||||||
func abortRemote(
|
|
||||||
ctx context.Context,
|
|
||||||
addr *net.UnixAddr,
|
|
||||||
) error {
|
|
||||||
done, conn, err := dial(ctx, addr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer close(done)
|
|
||||||
|
|
||||||
err = writeSpecialHeader(conn, specialAbort)
|
|
||||||
return errors.Join(err, conn.Close())
|
|
||||||
}
|
|
||||||
@@ -1,146 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"slices"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"hakurei.app/check"
|
|
||||||
"hakurei.app/internal/pkg"
|
|
||||||
"hakurei.app/message"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNoReply(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
if !daemonDeadline().IsZero() {
|
|
||||||
t.Fatal("daemonDeadline did not return the zero value")
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := pkg.Open(
|
|
||||||
t.Context(),
|
|
||||||
message.New(log.New(os.Stderr, "cir: ", 0)),
|
|
||||||
0, 0, 0,
|
|
||||||
check.MustAbs(t.TempDir()),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Open: error = %v", err)
|
|
||||||
}
|
|
||||||
defer c.Close()
|
|
||||||
|
|
||||||
client, server := net.Pipe()
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
go func() {
|
|
||||||
<-t.Context().Done()
|
|
||||||
if _err := client.SetDeadline(time.Now()); _err != nil && !errors.Is(_err, io.ErrClosedPipe) {
|
|
||||||
panic(_err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if _err := c.EncodeAll(
|
|
||||||
client,
|
|
||||||
pkg.NewFile("check", []byte{0}),
|
|
||||||
); _err != nil {
|
|
||||||
panic(_err)
|
|
||||||
} else if _err = client.Close(); _err != nil {
|
|
||||||
panic(_err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
a, cureErr := cureFromIR(c, server, remoteNoReply)
|
|
||||||
if cureErr != nil {
|
|
||||||
t.Fatalf("cureFromIR: error = %v", cureErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
<-done
|
|
||||||
wantIdent := pkg.MustDecode("fiZf-ZY_Yq6qxJNrHbMiIPYCsGkUiKCRsZrcSELXTqZWtCnESlHmzV5ThhWWGGYG")
|
|
||||||
if gotIdent := c.Ident(a).Value(); gotIdent != wantIdent {
|
|
||||||
t.Errorf(
|
|
||||||
"cureFromIR: %s, want %s",
|
|
||||||
pkg.Encode(gotIdent), pkg.Encode(wantIdent),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDaemon(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
logger := log.New(&buf, "daemon: ", 0)
|
|
||||||
|
|
||||||
addr := net.UnixAddr{
|
|
||||||
Name: filepath.Join(t.TempDir(), "daemon"),
|
|
||||||
Net: "unix",
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(t.Context())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cm := cache{
|
|
||||||
ctx: ctx,
|
|
||||||
msg: message.New(logger),
|
|
||||||
base: t.TempDir(),
|
|
||||||
}
|
|
||||||
defer cm.Close()
|
|
||||||
|
|
||||||
ul, err := net.ListenUnix("unix", &addr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ListenUnix: error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
if _err := serve(ctx, logger, &cm, ul); _err != nil {
|
|
||||||
panic(_err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err = cancelRemote(ctx, &addr, pkg.NewFile("nonexistent", nil)); err != nil {
|
|
||||||
t.Fatalf("cancelRemote: error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = abortRemote(ctx, &addr); err != nil {
|
|
||||||
t.Fatalf("abortRemote: error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// keep this last for synchronisation
|
|
||||||
var p *check.Absolute
|
|
||||||
p, err = cureRemote(ctx, &addr, pkg.NewFile("check", []byte{0}), 0)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("cureRemote: error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cancel()
|
|
||||||
<-done
|
|
||||||
|
|
||||||
const want = "fiZf-ZY_Yq6qxJNrHbMiIPYCsGkUiKCRsZrcSELXTqZWtCnESlHmzV5ThhWWGGYG"
|
|
||||||
if got := filepath.Base(p.String()); got != want {
|
|
||||||
t.Errorf("cureRemote: %s, want %s", got, want)
|
|
||||||
}
|
|
||||||
|
|
||||||
wantLog := []string{
|
|
||||||
"",
|
|
||||||
"daemon: aborting all pending cures",
|
|
||||||
"daemon: attempting to cancel invalid artifact kQm9fmnCmXST1-MMmxzcau2oKZCXXrlZydo4PkeV5hO_2PKfeC8t98hrbV_ZZx_j",
|
|
||||||
"daemon: fulfilled artifact fiZf-ZY_Yq6qxJNrHbMiIPYCsGkUiKCRsZrcSELXTqZWtCnESlHmzV5ThhWWGGYG",
|
|
||||||
}
|
|
||||||
gotLog := strings.Split(buf.String(), "\n")
|
|
||||||
slices.Sort(gotLog)
|
|
||||||
if !slices.Equal(gotLog, wantLog) {
|
|
||||||
t.Errorf(
|
|
||||||
"serve: logged\n%s\nwant\n%s",
|
|
||||||
strings.Join(gotLog, "\n"), strings.Join(wantLog, "\n"),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
127
cmd/mbf/info.go
127
cmd/mbf/info.go
@@ -1,127 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"hakurei.app/internal/pkg"
|
|
||||||
"hakurei.app/internal/rosa"
|
|
||||||
)
|
|
||||||
|
|
||||||
// commandInfo implements the info subcommand.
|
|
||||||
func commandInfo(
|
|
||||||
cm *cache,
|
|
||||||
args []string,
|
|
||||||
w io.Writer,
|
|
||||||
writeStatus bool,
|
|
||||||
reportPath string,
|
|
||||||
) (err error) {
|
|
||||||
if len(args) == 0 {
|
|
||||||
return errors.New("info requires at least 1 argument")
|
|
||||||
}
|
|
||||||
|
|
||||||
var r *rosa.Report
|
|
||||||
if reportPath != "" {
|
|
||||||
if r, err = rosa.OpenReport(reportPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if closeErr := r.Close(); err == nil {
|
|
||||||
err = closeErr
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
defer r.HandleAccess(&err)()
|
|
||||||
}
|
|
||||||
|
|
||||||
// recovered by HandleAccess
|
|
||||||
mustPrintln := func(a ...any) {
|
|
||||||
if _, _err := fmt.Fprintln(w, a...); _err != nil {
|
|
||||||
panic(_err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mustPrint := func(a ...any) {
|
|
||||||
if _, _err := fmt.Fprint(w, a...); _err != nil {
|
|
||||||
panic(_err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, name := range args {
|
|
||||||
if p, ok := rosa.ResolveName(name); !ok {
|
|
||||||
return fmt.Errorf("unknown artifact %q", name)
|
|
||||||
} else {
|
|
||||||
var suffix string
|
|
||||||
if version := rosa.Std.Version(p); version != rosa.Unversioned {
|
|
||||||
suffix += "-" + version
|
|
||||||
}
|
|
||||||
mustPrintln("name : " + name + suffix)
|
|
||||||
|
|
||||||
meta := rosa.GetMetadata(p)
|
|
||||||
mustPrintln("description : " + meta.Description)
|
|
||||||
if meta.Website != "" {
|
|
||||||
mustPrintln("website : " +
|
|
||||||
strings.TrimSuffix(meta.Website, "/"))
|
|
||||||
}
|
|
||||||
if len(meta.Dependencies) > 0 {
|
|
||||||
mustPrint("depends on :")
|
|
||||||
for _, d := range meta.Dependencies {
|
|
||||||
s := rosa.GetMetadata(d).Name
|
|
||||||
if version := rosa.Std.Version(d); version != rosa.Unversioned {
|
|
||||||
s += "-" + version
|
|
||||||
}
|
|
||||||
mustPrint(" " + s)
|
|
||||||
}
|
|
||||||
mustPrintln()
|
|
||||||
}
|
|
||||||
|
|
||||||
const statusPrefix = "status : "
|
|
||||||
if writeStatus {
|
|
||||||
if r == nil {
|
|
||||||
var f io.ReadSeekCloser
|
|
||||||
err = cm.Do(func(cache *pkg.Cache) (err error) {
|
|
||||||
f, err = cache.OpenStatus(rosa.Std.Load(p))
|
|
||||||
return
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
mustPrintln(
|
|
||||||
statusPrefix + "not yet cured",
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
mustPrint(statusPrefix)
|
|
||||||
_, err = io.Copy(w, f)
|
|
||||||
if err = errors.Join(err, f.Close()); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if err = cm.Do(func(cache *pkg.Cache) (err error) {
|
|
||||||
status, n := r.ArtifactOf(cache.Ident(rosa.Std.Load(p)))
|
|
||||||
if status == nil {
|
|
||||||
mustPrintln(
|
|
||||||
statusPrefix + "not in report",
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
mustPrintln("size :", n)
|
|
||||||
mustPrint(statusPrefix)
|
|
||||||
if _, err = w.Write(status); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if i != len(args)-1 {
|
|
||||||
mustPrintln()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,170 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
"testing"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"hakurei.app/internal/pkg"
|
|
||||||
"hakurei.app/internal/rosa"
|
|
||||||
"hakurei.app/message"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestInfo(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
testCases := []struct {
|
|
||||||
name string
|
|
||||||
args []string
|
|
||||||
status map[string]string
|
|
||||||
report string
|
|
||||||
want string
|
|
||||||
wantErr any
|
|
||||||
}{
|
|
||||||
{"qemu", []string{"qemu"}, nil, "", `
|
|
||||||
name : qemu-` + rosa.Std.Version(rosa.QEMU) + `
|
|
||||||
description : a generic and open source machine emulator and virtualizer
|
|
||||||
website : https://www.qemu.org
|
|
||||||
depends on : glib-` + rosa.Std.Version(rosa.GLib) + ` zstd-` + rosa.Std.Version(rosa.Zstd) + `
|
|
||||||
`, nil},
|
|
||||||
|
|
||||||
{"multi", []string{"hakurei", "hakurei-dist"}, nil, "", `
|
|
||||||
name : hakurei-` + rosa.Std.Version(rosa.Hakurei) + `
|
|
||||||
description : low-level userspace tooling for Rosa OS
|
|
||||||
website : https://hakurei.app
|
|
||||||
|
|
||||||
name : hakurei-dist-` + rosa.Std.Version(rosa.HakureiDist) + `
|
|
||||||
description : low-level userspace tooling for Rosa OS (distribution tarball)
|
|
||||||
website : https://hakurei.app
|
|
||||||
`, nil},
|
|
||||||
|
|
||||||
{"nonexistent", []string{"zlib", "\x00"}, nil, "", `
|
|
||||||
name : zlib-` + rosa.Std.Version(rosa.Zlib) + `
|
|
||||||
description : lossless data-compression library
|
|
||||||
website : https://zlib.net
|
|
||||||
|
|
||||||
`, fmt.Errorf("unknown artifact %q", "\x00")},
|
|
||||||
|
|
||||||
{"status cache", []string{"zlib", "zstd"}, map[string]string{
|
|
||||||
"zstd": "internal/pkg (amd64) on satori\n",
|
|
||||||
"hakurei": "internal/pkg (amd64) on satori\n\n",
|
|
||||||
}, "", `
|
|
||||||
name : zlib-` + rosa.Std.Version(rosa.Zlib) + `
|
|
||||||
description : lossless data-compression library
|
|
||||||
website : https://zlib.net
|
|
||||||
status : not yet cured
|
|
||||||
|
|
||||||
name : zstd-` + rosa.Std.Version(rosa.Zstd) + `
|
|
||||||
description : a fast compression algorithm
|
|
||||||
website : https://facebook.github.io/zstd
|
|
||||||
status : internal/pkg (amd64) on satori
|
|
||||||
`, nil},
|
|
||||||
|
|
||||||
{"status cache perm", []string{"zlib"}, map[string]string{
|
|
||||||
"zlib": "\x00",
|
|
||||||
}, "", `
|
|
||||||
name : zlib-` + rosa.Std.Version(rosa.Zlib) + `
|
|
||||||
description : lossless data-compression library
|
|
||||||
website : https://zlib.net
|
|
||||||
`, func(cm *cache) error {
|
|
||||||
return &os.PathError{
|
|
||||||
Op: "open",
|
|
||||||
Path: filepath.Join(cm.base, "status", pkg.Encode(cm.c.Ident(rosa.Std.Load(rosa.Zlib)).Value())),
|
|
||||||
Err: syscall.EACCES,
|
|
||||||
}
|
|
||||||
}},
|
|
||||||
|
|
||||||
{"status report", []string{"zlib"}, nil, strings.Repeat("\x00", len(pkg.Checksum{})+8), `
|
|
||||||
name : zlib-` + rosa.Std.Version(rosa.Zlib) + `
|
|
||||||
description : lossless data-compression library
|
|
||||||
website : https://zlib.net
|
|
||||||
status : not in report
|
|
||||||
`, nil},
|
|
||||||
}
|
|
||||||
for _, tc := range testCases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
var (
|
|
||||||
cm *cache
|
|
||||||
buf strings.Builder
|
|
||||||
rp string
|
|
||||||
)
|
|
||||||
|
|
||||||
if tc.status != nil || tc.report != "" {
|
|
||||||
cm = &cache{
|
|
||||||
ctx: context.Background(),
|
|
||||||
msg: message.New(log.New(os.Stderr, "info: ", 0)),
|
|
||||||
base: t.TempDir(),
|
|
||||||
}
|
|
||||||
defer cm.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
if tc.report != "" {
|
|
||||||
rp = filepath.Join(t.TempDir(), "report")
|
|
||||||
if err := os.WriteFile(
|
|
||||||
rp,
|
|
||||||
unsafe.Slice(unsafe.StringData(tc.report), len(tc.report)),
|
|
||||||
0400,
|
|
||||||
); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if tc.status != nil {
|
|
||||||
for name, status := range tc.status {
|
|
||||||
p, ok := rosa.ResolveName(name)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("invalid name %q", name)
|
|
||||||
}
|
|
||||||
perm := os.FileMode(0400)
|
|
||||||
if status == "\x00" {
|
|
||||||
perm = 0
|
|
||||||
}
|
|
||||||
if err := cm.Do(func(cache *pkg.Cache) error {
|
|
||||||
return os.WriteFile(filepath.Join(
|
|
||||||
cm.base,
|
|
||||||
"status",
|
|
||||||
pkg.Encode(cache.Ident(rosa.Std.Load(p)).Value()),
|
|
||||||
), unsafe.Slice(unsafe.StringData(status), len(status)), perm)
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("Do: error = %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var wantErr error
|
|
||||||
switch c := tc.wantErr.(type) {
|
|
||||||
case error:
|
|
||||||
wantErr = c
|
|
||||||
case func(cm *cache) error:
|
|
||||||
wantErr = c(cm)
|
|
||||||
default:
|
|
||||||
if tc.wantErr != nil {
|
|
||||||
t.Fatalf("invalid wantErr %#v", tc.wantErr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := commandInfo(
|
|
||||||
cm,
|
|
||||||
tc.args,
|
|
||||||
&buf,
|
|
||||||
cm != nil,
|
|
||||||
rp,
|
|
||||||
); !reflect.DeepEqual(err, wantErr) {
|
|
||||||
t.Fatalf("commandInfo: error = %v, want %v", err, wantErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if got := buf.String(); got != strings.TrimPrefix(tc.want, "\n") {
|
|
||||||
t.Errorf("commandInfo:\n%s\nwant\n%s", got, tc.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
419
cmd/mbf/main.go
419
cmd/mbf/main.go
@@ -1,43 +1,29 @@
|
|||||||
// The mbf program is a frontend for [hakurei.app/internal/rosa].
|
|
||||||
//
|
|
||||||
// This program is not covered by the compatibility promise. The command line
|
|
||||||
// interface, available packages and their behaviour, and even the on-disk
|
|
||||||
// format, may change at any time.
|
|
||||||
//
|
|
||||||
// # Name
|
|
||||||
//
|
|
||||||
// The name mbf stands for maiden's best friend, as a tribute to the DOOM source
|
|
||||||
// port of [the same name]. This name is a placeholder and is subject to change.
|
|
||||||
//
|
|
||||||
// [the same name]: https://www.doomwiki.org/wiki/MBF
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha512"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"net"
|
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
"unique"
|
"unique"
|
||||||
|
|
||||||
"hakurei.app/check"
|
|
||||||
"hakurei.app/command"
|
"hakurei.app/command"
|
||||||
"hakurei.app/container"
|
"hakurei.app/container"
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
"hakurei.app/container/seccomp"
|
"hakurei.app/container/seccomp"
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/ext"
|
|
||||||
"hakurei.app/fhs"
|
|
||||||
"hakurei.app/internal/pkg"
|
"hakurei.app/internal/pkg"
|
||||||
"hakurei.app/internal/rosa"
|
"hakurei.app/internal/rosa"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
@@ -54,13 +40,14 @@ func main() {
|
|||||||
log.Fatal("this program must not run as root")
|
log.Fatal("this program must not run as root")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var cache *pkg.Cache
|
||||||
ctx, stop := signal.NotifyContext(context.Background(),
|
ctx, stop := signal.NotifyContext(context.Background(),
|
||||||
syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
|
syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
|
||||||
defer stop()
|
defer stop()
|
||||||
|
|
||||||
var cm cache
|
|
||||||
defer func() {
|
defer func() {
|
||||||
cm.Close()
|
if cache != nil {
|
||||||
|
cache.Close()
|
||||||
|
}
|
||||||
|
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
fmt.Println(r)
|
fmt.Println(r)
|
||||||
@@ -70,65 +57,60 @@ func main() {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
flagQuiet bool
|
flagQuiet bool
|
||||||
|
flagCures int
|
||||||
addr net.UnixAddr
|
flagBase string
|
||||||
|
flagTShift int
|
||||||
|
flagIdle bool
|
||||||
)
|
)
|
||||||
c := command.New(os.Stderr, log.Printf, "mbf", func([]string) error {
|
c := command.New(os.Stderr, log.Printf, "mbf", func([]string) (err error) {
|
||||||
msg.SwapVerbose(!flagQuiet)
|
msg.SwapVerbose(!flagQuiet)
|
||||||
cm.ctx, cm.msg = ctx, msg
|
|
||||||
cm.base = os.ExpandEnv(cm.base)
|
|
||||||
|
|
||||||
addr.Net = "unix"
|
flagBase = os.ExpandEnv(flagBase)
|
||||||
addr.Name = os.ExpandEnv(addr.Name)
|
if flagBase == "" {
|
||||||
if addr.Name == "" {
|
flagBase = "cache"
|
||||||
addr.Name = "daemon"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
var base *check.Absolute
|
||||||
|
if flagBase, err = filepath.Abs(flagBase); err != nil {
|
||||||
|
return
|
||||||
|
} else if base, err = check.NewAbs(flagBase); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if cache, err = pkg.Open(ctx, msg, flagCures, base); err == nil {
|
||||||
|
if flagTShift < 0 {
|
||||||
|
cache.SetThreshold(0)
|
||||||
|
} else if flagTShift > 31 {
|
||||||
|
cache.SetThreshold(1 << 31)
|
||||||
|
} else {
|
||||||
|
cache.SetThreshold(1 << flagTShift)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if flagIdle {
|
||||||
|
pkg.SetSchedIdle = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
}).Flag(
|
}).Flag(
|
||||||
&flagQuiet,
|
&flagQuiet,
|
||||||
"q", command.BoolFlag(false),
|
"q", command.BoolFlag(false),
|
||||||
"Do not print cure messages",
|
"Do not print cure messages",
|
||||||
).Flag(
|
).Flag(
|
||||||
&cm.cures,
|
&flagCures,
|
||||||
"cures", command.IntFlag(0),
|
"cures", command.IntFlag(0),
|
||||||
"Maximum number of dependencies to cure at any given time",
|
"Maximum number of dependencies to cure at any given time",
|
||||||
).Flag(
|
).Flag(
|
||||||
&cm.jobs,
|
&flagBase,
|
||||||
"jobs", command.IntFlag(0),
|
|
||||||
"Preferred number of jobs to run, when applicable",
|
|
||||||
).Flag(
|
|
||||||
&cm.base,
|
|
||||||
"d", command.StringFlag("$MBF_CACHE_DIR"),
|
"d", command.StringFlag("$MBF_CACHE_DIR"),
|
||||||
"Directory to store cured artifacts",
|
"Directory to store cured artifacts",
|
||||||
).Flag(
|
).Flag(
|
||||||
&cm.idle,
|
&flagTShift,
|
||||||
|
"tshift", command.IntFlag(-1),
|
||||||
|
"Dependency graph size exponent, to the power of 2",
|
||||||
|
).Flag(
|
||||||
|
&flagIdle,
|
||||||
"sched-idle", command.BoolFlag(false),
|
"sched-idle", command.BoolFlag(false),
|
||||||
"Set SCHED_IDLE scheduling policy",
|
"Set SCHED_IDLE scheduling policy",
|
||||||
).Flag(
|
|
||||||
&cm.hostAbstract,
|
|
||||||
"host-abstract", command.BoolFlag(
|
|
||||||
os.Getenv("MBF_HOST_ABSTRACT") != "",
|
|
||||||
),
|
|
||||||
"Do not restrict networked cure containers from connecting to host "+
|
|
||||||
"abstract UNIX sockets",
|
|
||||||
).Flag(
|
|
||||||
&addr.Name,
|
|
||||||
"socket", command.StringFlag("$MBF_DAEMON_SOCKET"),
|
|
||||||
"Pathname of socket to bind to",
|
|
||||||
)
|
|
||||||
|
|
||||||
c.NewCommand(
|
|
||||||
"checksum", "Compute checksum of data read from standard input",
|
|
||||||
func([]string) error {
|
|
||||||
go func() { <-ctx.Done(); os.Exit(1) }()
|
|
||||||
h := sha512.New384()
|
|
||||||
if _, err := io.Copy(h, os.Stdin); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Println(pkg.Encode(pkg.Checksum(h.Sum(nil))))
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -142,9 +124,7 @@ func main() {
|
|||||||
if flagShifts < 0 || flagShifts > 31 {
|
if flagShifts < 0 || flagShifts > 31 {
|
||||||
flagShifts = 12
|
flagShifts = 12
|
||||||
}
|
}
|
||||||
return cm.Do(func(cache *pkg.Cache) error {
|
|
||||||
return cache.Scrub(runtime.NumCPU() << flagShifts)
|
return cache.Scrub(runtime.NumCPU() << flagShifts)
|
||||||
})
|
|
||||||
},
|
},
|
||||||
).Flag(
|
).Flag(
|
||||||
&flagShifts,
|
&flagShifts,
|
||||||
@@ -162,13 +142,101 @@ func main() {
|
|||||||
"info",
|
"info",
|
||||||
"Display out-of-band metadata of an artifact",
|
"Display out-of-band metadata of an artifact",
|
||||||
func(args []string) (err error) {
|
func(args []string) (err error) {
|
||||||
return commandInfo(&cm, args, os.Stdout, flagStatus, flagReport)
|
if len(args) == 0 {
|
||||||
|
return errors.New("info requires at least 1 argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
var r *rosa.Report
|
||||||
|
if flagReport != "" {
|
||||||
|
if r, err = rosa.OpenReport(flagReport); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if closeErr := r.Close(); err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
defer r.HandleAccess(&err)()
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, name := range args {
|
||||||
|
if p, ok := rosa.ResolveName(name); !ok {
|
||||||
|
return fmt.Errorf("unknown artifact %q", name)
|
||||||
|
} else {
|
||||||
|
var suffix string
|
||||||
|
if version := rosa.Std.Version(p); version != rosa.Unversioned {
|
||||||
|
suffix += "-" + version
|
||||||
|
}
|
||||||
|
fmt.Println("name : " + name + suffix)
|
||||||
|
|
||||||
|
meta := rosa.GetMetadata(p)
|
||||||
|
fmt.Println("description : " + meta.Description)
|
||||||
|
if meta.Website != "" {
|
||||||
|
fmt.Println("website : " +
|
||||||
|
strings.TrimSuffix(meta.Website, "/"))
|
||||||
|
}
|
||||||
|
if len(meta.Dependencies) > 0 {
|
||||||
|
fmt.Print("depends on :")
|
||||||
|
for _, d := range meta.Dependencies {
|
||||||
|
s := rosa.GetMetadata(d).Name
|
||||||
|
if version := rosa.Std.Version(d); version != rosa.Unversioned {
|
||||||
|
s += "-" + version
|
||||||
|
}
|
||||||
|
fmt.Print(" " + s)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
|
||||||
|
const statusPrefix = "status : "
|
||||||
|
if flagStatus {
|
||||||
|
if r == nil {
|
||||||
|
var f io.ReadSeekCloser
|
||||||
|
f, err = cache.OpenStatus(rosa.Std.Load(p))
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
fmt.Println(
|
||||||
|
statusPrefix + "not yet cured",
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Print(statusPrefix)
|
||||||
|
_, err = io.Copy(os.Stdout, f)
|
||||||
|
if err = errors.Join(err, f.Close()); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
status, n := r.ArtifactOf(cache.Ident(rosa.Std.Load(p)))
|
||||||
|
if status == nil {
|
||||||
|
fmt.Println(
|
||||||
|
statusPrefix + "not in report",
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
fmt.Println("size :", n)
|
||||||
|
fmt.Print(statusPrefix)
|
||||||
|
if _, err = os.Stdout.Write(status); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if i != len(args)-1 {
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
},
|
},
|
||||||
).Flag(
|
).
|
||||||
|
Flag(
|
||||||
&flagStatus,
|
&flagStatus,
|
||||||
"status", command.BoolFlag(false),
|
"status", command.BoolFlag(false),
|
||||||
"Display cure status if available",
|
"Display cure status if available",
|
||||||
).Flag(
|
).
|
||||||
|
Flag(
|
||||||
&flagReport,
|
&flagReport,
|
||||||
"report", command.StringFlag(""),
|
"report", command.StringFlag(""),
|
||||||
"Load cure status from this report file instead of cache",
|
"Load cure status from this report file instead of cache",
|
||||||
@@ -203,12 +271,10 @@ func main() {
|
|||||||
return errors.New("report requires 1 argument")
|
return errors.New("report requires 1 argument")
|
||||||
}
|
}
|
||||||
|
|
||||||
if ext.Isatty(int(w.Fd())) {
|
if container.Isatty(int(w.Fd())) {
|
||||||
return errors.New("output appears to be a terminal")
|
return errors.New("output appears to be a terminal")
|
||||||
}
|
}
|
||||||
return cm.Do(func(cache *pkg.Cache) error {
|
|
||||||
return rosa.WriteReport(msg, w, cache)
|
return rosa.WriteReport(msg, w, cache)
|
||||||
})
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -271,26 +337,14 @@ func main() {
|
|||||||
" package(s) are out of date"))
|
" package(s) are out of date"))
|
||||||
}
|
}
|
||||||
return errors.Join(errs...)
|
return errors.Join(errs...)
|
||||||
}).Flag(
|
}).
|
||||||
|
Flag(
|
||||||
&flagJobs,
|
&flagJobs,
|
||||||
"j", command.IntFlag(32),
|
"j", command.IntFlag(32),
|
||||||
"Maximum number of simultaneous connections",
|
"Maximum number of simultaneous connections",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.NewCommand(
|
|
||||||
"daemon",
|
|
||||||
"Service artifact IR with Rosa OS extensions",
|
|
||||||
func(args []string) error {
|
|
||||||
ul, err := net.ListenUnix("unix", &addr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Printf("listening on pathname socket at %s", addr.Name)
|
|
||||||
return serve(ctx, log.Default(), &cm, ul)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
{
|
{
|
||||||
var (
|
var (
|
||||||
flagGentoo string
|
flagGentoo string
|
||||||
@@ -315,37 +369,25 @@ func main() {
|
|||||||
rosa.SetGentooStage3(flagGentoo, checksum)
|
rosa.SetGentooStage3(flagGentoo, checksum)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_, _, _, stage1 := (t - 2).NewLLVM()
|
||||||
|
_, _, _, stage2 := (t - 1).NewLLVM()
|
||||||
|
_, _, _, stage3 := t.NewLLVM()
|
||||||
var (
|
var (
|
||||||
pathname *check.Absolute
|
pathname *check.Absolute
|
||||||
checksum [2]unique.Handle[pkg.Checksum]
|
checksum [2]unique.Handle[pkg.Checksum]
|
||||||
)
|
)
|
||||||
|
|
||||||
if err = cm.Do(func(cache *pkg.Cache) (err error) {
|
if pathname, _, err = cache.Cure(stage1); err != nil {
|
||||||
pathname, _, err = cache.Cure(
|
return err
|
||||||
(t - 2).Load(rosa.Clang),
|
|
||||||
)
|
|
||||||
return
|
|
||||||
}); err != nil {
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
log.Println("stage1:", pathname)
|
log.Println("stage1:", pathname)
|
||||||
|
|
||||||
if err = cm.Do(func(cache *pkg.Cache) (err error) {
|
if pathname, checksum[0], err = cache.Cure(stage2); err != nil {
|
||||||
pathname, checksum[0], err = cache.Cure(
|
return err
|
||||||
(t - 1).Load(rosa.Clang),
|
|
||||||
)
|
|
||||||
return
|
|
||||||
}); err != nil {
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
log.Println("stage2:", pathname)
|
log.Println("stage2:", pathname)
|
||||||
if err = cm.Do(func(cache *pkg.Cache) (err error) {
|
if pathname, checksum[1], err = cache.Cure(stage3); err != nil {
|
||||||
pathname, checksum[1], err = cache.Cure(
|
return err
|
||||||
t.Load(rosa.Clang),
|
|
||||||
)
|
|
||||||
return
|
|
||||||
}); err != nil {
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
log.Println("stage3:", pathname)
|
log.Println("stage3:", pathname)
|
||||||
|
|
||||||
@@ -362,28 +404,28 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if flagStage0 {
|
if flagStage0 {
|
||||||
if err = cm.Do(func(cache *pkg.Cache) (err error) {
|
if pathname, _, err = cache.Cure(
|
||||||
pathname, _, err = cache.Cure(
|
|
||||||
t.Load(rosa.Stage0),
|
t.Load(rosa.Stage0),
|
||||||
)
|
); err != nil {
|
||||||
return
|
return err
|
||||||
}); err != nil {
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
log.Println(pathname)
|
log.Println(pathname)
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
},
|
},
|
||||||
).Flag(
|
).
|
||||||
|
Flag(
|
||||||
&flagGentoo,
|
&flagGentoo,
|
||||||
"gentoo", command.StringFlag(""),
|
"gentoo", command.StringFlag(""),
|
||||||
"Bootstrap from a Gentoo stage3 tarball",
|
"Bootstrap from a Gentoo stage3 tarball",
|
||||||
).Flag(
|
).
|
||||||
|
Flag(
|
||||||
&flagChecksum,
|
&flagChecksum,
|
||||||
"checksum", command.StringFlag(""),
|
"checksum", command.StringFlag(""),
|
||||||
"Checksum of Gentoo stage3 tarball",
|
"Checksum of Gentoo stage3 tarball",
|
||||||
).Flag(
|
).
|
||||||
|
Flag(
|
||||||
&flagStage0,
|
&flagStage0,
|
||||||
"stage0", command.BoolFlag(false),
|
"stage0", command.BoolFlag(false),
|
||||||
"Create bootstrap stage0 tarball",
|
"Create bootstrap stage0 tarball",
|
||||||
@@ -393,10 +435,7 @@ func main() {
|
|||||||
{
|
{
|
||||||
var (
|
var (
|
||||||
flagDump string
|
flagDump string
|
||||||
flagEnter bool
|
|
||||||
flagExport string
|
flagExport string
|
||||||
flagRemote bool
|
|
||||||
flagNoReply bool
|
|
||||||
)
|
)
|
||||||
c.NewCommand(
|
c.NewCommand(
|
||||||
"cure",
|
"cure",
|
||||||
@@ -405,18 +444,10 @@ func main() {
|
|||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
return errors.New("cure requires 1 argument")
|
return errors.New("cure requires 1 argument")
|
||||||
}
|
}
|
||||||
p, ok := rosa.ResolveName(args[0])
|
if p, ok := rosa.ResolveName(args[0]); !ok {
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("unknown artifact %q", args[0])
|
return fmt.Errorf("unknown artifact %q", args[0])
|
||||||
}
|
} else if flagDump == "" {
|
||||||
|
pathname, _, err := cache.Cure(rosa.Std.Load(p))
|
||||||
switch {
|
|
||||||
default:
|
|
||||||
var pathname *check.Absolute
|
|
||||||
err := cm.Do(func(cache *pkg.Cache) (err error) {
|
|
||||||
pathname, _, err = cache.Cure(rosa.Std.Load(p))
|
|
||||||
return
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -445,8 +476,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
} else {
|
||||||
case flagDump != "":
|
|
||||||
f, err := os.OpenFile(
|
f, err := os.OpenFile(
|
||||||
flagDump,
|
flagDump,
|
||||||
os.O_WRONLY|os.O_CREATE|os.O_EXCL,
|
os.O_WRONLY|os.O_CREATE|os.O_EXCL,
|
||||||
@@ -456,76 +486,27 @@ func main() {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = pkg.NewIR().EncodeAll(f, rosa.Std.Load(p)); err != nil {
|
if err = cache.EncodeAll(f, rosa.Std.Load(p)); err != nil {
|
||||||
_ = f.Close()
|
_ = f.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.Close()
|
return f.Close()
|
||||||
|
|
||||||
case flagEnter:
|
|
||||||
return cm.Do(func(cache *pkg.Cache) error {
|
|
||||||
return cache.EnterExec(
|
|
||||||
ctx,
|
|
||||||
rosa.Std.Load(p),
|
|
||||||
true, os.Stdin, os.Stdout, os.Stderr,
|
|
||||||
rosa.AbsSystem.Append("bin", "mksh"),
|
|
||||||
"sh",
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
case flagRemote:
|
|
||||||
var flags uint64
|
|
||||||
if flagNoReply {
|
|
||||||
flags |= remoteNoReply
|
|
||||||
}
|
|
||||||
a := rosa.Std.Load(p)
|
|
||||||
pathname, err := cureRemote(ctx, &addr, a, flags)
|
|
||||||
if !flagNoReply && err == nil {
|
|
||||||
log.Println(pathname)
|
|
||||||
}
|
|
||||||
|
|
||||||
if errors.Is(err, context.Canceled) {
|
|
||||||
cc, cancel := context.WithDeadline(context.Background(), daemonDeadline())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
if _err := cancelRemote(cc, &addr, a); _err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
).Flag(
|
).
|
||||||
|
Flag(
|
||||||
&flagDump,
|
&flagDump,
|
||||||
"dump", command.StringFlag(""),
|
"dump", command.StringFlag(""),
|
||||||
"Write IR to specified pathname and terminate",
|
"Write IR to specified pathname and terminate",
|
||||||
).Flag(
|
).
|
||||||
|
Flag(
|
||||||
&flagExport,
|
&flagExport,
|
||||||
"export", command.StringFlag(""),
|
"export", command.StringFlag(""),
|
||||||
"Export cured artifact to specified pathname",
|
"Export cured artifact to specified pathname",
|
||||||
).Flag(
|
|
||||||
&flagEnter,
|
|
||||||
"enter", command.BoolFlag(false),
|
|
||||||
"Enter cure container with an interactive shell",
|
|
||||||
).Flag(
|
|
||||||
&flagRemote,
|
|
||||||
"daemon", command.BoolFlag(false),
|
|
||||||
"Cure artifact on the daemon",
|
|
||||||
).Flag(
|
|
||||||
&flagNoReply,
|
|
||||||
"no-reply", command.BoolFlag(false),
|
|
||||||
"Do not receive a reply from the daemon",
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.NewCommand(
|
|
||||||
"abort",
|
|
||||||
"Abort all pending cures on the daemon",
|
|
||||||
func([]string) error { return abortRemote(ctx, &addr) },
|
|
||||||
)
|
|
||||||
|
|
||||||
{
|
{
|
||||||
var (
|
var (
|
||||||
flagNet bool
|
flagNet bool
|
||||||
@@ -537,7 +518,7 @@ func main() {
|
|||||||
"shell",
|
"shell",
|
||||||
"Interactive shell in the specified Rosa OS environment",
|
"Interactive shell in the specified Rosa OS environment",
|
||||||
func(args []string) error {
|
func(args []string) error {
|
||||||
presets := make([]rosa.PArtifact, len(args)+3)
|
presets := make([]rosa.PArtifact, len(args))
|
||||||
for i, arg := range args {
|
for i, arg := range args {
|
||||||
p, ok := rosa.ResolveName(arg)
|
p, ok := rosa.ResolveName(arg)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -545,26 +526,23 @@ func main() {
|
|||||||
}
|
}
|
||||||
presets[i] = p
|
presets[i] = p
|
||||||
}
|
}
|
||||||
|
root := make(rosa.Collect, 0, 6+len(args))
|
||||||
base := rosa.Clang
|
|
||||||
if !flagWithToolchain {
|
|
||||||
base = rosa.Musl
|
|
||||||
}
|
|
||||||
presets = append(presets,
|
|
||||||
base,
|
|
||||||
rosa.Mksh,
|
|
||||||
rosa.Toybox,
|
|
||||||
)
|
|
||||||
|
|
||||||
root := make(pkg.Collect, 0, 6+len(args))
|
|
||||||
root = rosa.Std.AppendPresets(root, presets...)
|
root = rosa.Std.AppendPresets(root, presets...)
|
||||||
|
|
||||||
if err := cm.Do(func(cache *pkg.Cache) error {
|
if flagWithToolchain {
|
||||||
_, _, err := cache.Cure(&root)
|
musl, compilerRT, runtimes, clang := (rosa.Std - 1).NewLLVM()
|
||||||
return err
|
root = append(root, musl, compilerRT, runtimes, clang)
|
||||||
}); err == nil {
|
} else {
|
||||||
|
root = append(root, rosa.Std.Load(rosa.Musl))
|
||||||
|
}
|
||||||
|
root = append(root,
|
||||||
|
rosa.Std.Load(rosa.Mksh),
|
||||||
|
rosa.Std.Load(rosa.Toybox),
|
||||||
|
)
|
||||||
|
|
||||||
|
if _, _, err := cache.Cure(&root); err == nil {
|
||||||
return errors.New("unreachable")
|
return errors.New("unreachable")
|
||||||
} else if !pkg.IsCollected(err) {
|
} else if !errors.Is(err, rosa.Collected{}) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -574,23 +552,12 @@ func main() {
|
|||||||
}
|
}
|
||||||
cured := make(map[pkg.Artifact]cureRes)
|
cured := make(map[pkg.Artifact]cureRes)
|
||||||
for _, a := range root {
|
for _, a := range root {
|
||||||
if err := cm.Do(func(cache *pkg.Cache) error {
|
|
||||||
pathname, checksum, err := cache.Cure(a)
|
pathname, checksum, err := cache.Cure(a)
|
||||||
if err == nil {
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
cured[a] = cureRes{pathname, checksum}
|
cured[a] = cureRes{pathname, checksum}
|
||||||
}
|
}
|
||||||
return err
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// explicitly open for direct error-free use from this point
|
|
||||||
if cm.c == nil {
|
|
||||||
if err := cm.open(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
layers := pkg.PromoteLayers(root, func(a pkg.Artifact) (
|
layers := pkg.PromoteLayers(root, func(a pkg.Artifact) (
|
||||||
*check.Absolute,
|
*check.Absolute,
|
||||||
@@ -599,7 +566,7 @@ func main() {
|
|||||||
res := cured[a]
|
res := cured[a]
|
||||||
return res.pathname, res.checksum
|
return res.pathname, res.checksum
|
||||||
}, func(i int, d pkg.Artifact) {
|
}, func(i int, d pkg.Artifact) {
|
||||||
r := pkg.Encode(cm.c.Ident(d).Value())
|
r := pkg.Encode(cache.Ident(d).Value())
|
||||||
if s, ok := d.(fmt.Stringer); ok {
|
if s, ok := d.(fmt.Stringer); ok {
|
||||||
if name := s.String(); name != "" {
|
if name := s.String(); name != "" {
|
||||||
r += "-" + name
|
r += "-" + name
|
||||||
@@ -618,9 +585,6 @@ func main() {
|
|||||||
z.Hostname = "localhost"
|
z.Hostname = "localhost"
|
||||||
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
||||||
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
|
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
|
||||||
if s, ok := os.LookupEnv("TERM"); ok {
|
|
||||||
z.Env = append(z.Env, "TERM="+s)
|
|
||||||
}
|
|
||||||
|
|
||||||
var tempdir *check.Absolute
|
var tempdir *check.Absolute
|
||||||
if s, err := filepath.Abs(os.TempDir()); err != nil {
|
if s, err := filepath.Abs(os.TempDir()); err != nil {
|
||||||
@@ -663,18 +627,21 @@ func main() {
|
|||||||
}
|
}
|
||||||
return z.Wait()
|
return z.Wait()
|
||||||
},
|
},
|
||||||
).Flag(
|
).
|
||||||
|
Flag(
|
||||||
&flagNet,
|
&flagNet,
|
||||||
"net", command.BoolFlag(false),
|
"net", command.BoolFlag(false),
|
||||||
"Share host net namespace",
|
"Share host net namespace",
|
||||||
).Flag(
|
).
|
||||||
|
Flag(
|
||||||
&flagSession,
|
&flagSession,
|
||||||
"session", command.BoolFlag(true),
|
"session", command.BoolFlag(false),
|
||||||
"Retain session",
|
"Retain session",
|
||||||
).Flag(
|
).
|
||||||
|
Flag(
|
||||||
&flagWithToolchain,
|
&flagWithToolchain,
|
||||||
"with-toolchain", command.BoolFlag(false),
|
"with-toolchain", command.BoolFlag(false),
|
||||||
"Include the stage2 LLVM toolchain",
|
"Include the stage3 LLVM toolchain",
|
||||||
)
|
)
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -686,7 +653,9 @@ func main() {
|
|||||||
)
|
)
|
||||||
|
|
||||||
c.MustParse(os.Args[1:], func(err error) {
|
c.MustParse(os.Args[1:], func(err error) {
|
||||||
cm.Close()
|
if cache != nil {
|
||||||
|
cache.Close()
|
||||||
|
}
|
||||||
if w, ok := err.(interface{ Unwrap() []error }); !ok {
|
if w, ok := err.(interface{ Unwrap() []error }); !ok {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -127,7 +127,7 @@ func (index *packageIndex) handleSearch(w http.ResponseWriter, r *http.Request)
|
|||||||
)
|
)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
search, err := url.QueryUnescape(q.Get("search"))
|
search, err := url.PathUnescape(q.Get("search"))
|
||||||
if len(search) > 100 || err != nil {
|
if len(search) > 100 || err != nil {
|
||||||
http.Error(
|
http.Error(
|
||||||
w, "search must be a string between 0 and 100 characters long",
|
w, "search must be a string between 0 and 100 characters long",
|
||||||
@@ -142,7 +142,7 @@ func (index *packageIndex) handleSearch(w http.ResponseWriter, r *http.Request)
|
|||||||
}
|
}
|
||||||
writeAPIPayload(w, &struct {
|
writeAPIPayload(w, &struct {
|
||||||
Count int `json:"count"`
|
Count int `json:"count"`
|
||||||
Values []searchResult `json:"values"`
|
Results []searchResult `json:"results"`
|
||||||
}{n, res})
|
}{n, res})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -10,8 +10,8 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"hakurei.app/check"
|
|
||||||
"hakurei.app/command"
|
"hakurei.app/command"
|
||||||
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/internal/pkg"
|
"hakurei.app/internal/pkg"
|
||||||
"hakurei.app/internal/rosa"
|
"hakurei.app/internal/rosa"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
@@ -47,7 +47,7 @@ func main() {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
cache, err = pkg.Open(ctx, msg, 0, 0, 0, baseDir)
|
cache, err = pkg.Open(ctx, msg, 0, baseDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -82,7 +82,7 @@ func main() {
|
|||||||
}()
|
}()
|
||||||
var mux http.ServeMux
|
var mux http.ServeMux
|
||||||
uiRoutes(&mux)
|
uiRoutes(&mux)
|
||||||
testUIRoutes(&mux)
|
testUiRoutes(&mux)
|
||||||
index.registerAPI(&mux)
|
index.registerAPI(&mux)
|
||||||
server := http.Server{
|
server := http.Server{
|
||||||
Addr: flagAddr,
|
Addr: flagAddr,
|
||||||
|
|||||||
@@ -22,13 +22,9 @@ type searchCacheEntry struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (index *packageIndex) performSearchQuery(limit int, i int, search string, desc bool) (int, []searchResult, error) {
|
func (index *packageIndex) performSearchQuery(limit int, i int, search string, desc bool) (int, []searchResult, error) {
|
||||||
query := search
|
entry, ok := index.search[search]
|
||||||
if desc {
|
if ok {
|
||||||
query += ";withDesc"
|
return len(entry.results), entry.results[i:min(i+limit, len(entry.results))], nil
|
||||||
}
|
|
||||||
entry, ok := index.search[query]
|
|
||||||
if ok && len(entry.results) > 0 {
|
|
||||||
return len(entry.results), entry.results[min(i, len(entry.results)-1):min(i+limit, len(entry.results))], nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
regex, err := regexp.Compile(search)
|
regex, err := regexp.Compile(search)
|
||||||
@@ -63,7 +59,7 @@ func (index *packageIndex) performSearchQuery(limit int, i int, search string, d
|
|||||||
results: res,
|
results: res,
|
||||||
expiry: expiry,
|
expiry: expiry,
|
||||||
}
|
}
|
||||||
index.search[query] = entry
|
index.search[search] = entry
|
||||||
|
|
||||||
return len(res), res[i:min(i+limit, len(entry.results))], nil
|
return len(res), res[i:min(i+limit, len(entry.results))], nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,32 +4,94 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"embed"
|
"embed"
|
||||||
"io/fs"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Always remove ui_test/ui; if the previous tsc run failed, the rm never
|
|
||||||
// executes.
|
|
||||||
|
|
||||||
//go:generate sh -c "rm -r ui_test/ui/ 2>/dev/null || true"
|
|
||||||
//go:generate mkdir ui_test/ui
|
|
||||||
//go:generate sh -c "cp ui/static/*.ts ui_test/ui/"
|
|
||||||
//go:generate tsc -p ui_test
|
//go:generate tsc -p ui_test
|
||||||
//go:generate rm -r ui_test/ui/
|
//go:generate sass ui_test/lib/ui.scss ui_test/lib/ui.css
|
||||||
//go:generate cp ui_test/lib/ui.css ui_test/static/style.css
|
//go:embed ui_test/*
|
||||||
//go:generate cp ui_test/lib/ui.html ui_test/static/index.html
|
var test_content embed.FS
|
||||||
//go:generate sh -c "cd ui_test/lib && cp *.svg ../static/"
|
|
||||||
//go:embed ui_test/static
|
|
||||||
var _staticTest embed.FS
|
|
||||||
|
|
||||||
var staticTest = func() fs.FS {
|
func serveTestWebUI(w http.ResponseWriter, r *http.Request) {
|
||||||
if f, err := fs.Sub(_staticTest, "ui_test/static"); err != nil {
|
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||||
panic(err)
|
w.Header().Set("X-XSS-Protection", "1")
|
||||||
} else {
|
w.Header().Set("X-Frame-Options", "DENY")
|
||||||
return f
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
func testUIRoutes(mux *http.ServeMux) {
|
http.ServeFileFS(w, r, test_content, "ui_test/lib/ui.html")
|
||||||
mux.Handle("GET /test/", http.StripPrefix("/test", http.FileServer(http.FS(staticTest))))
|
}
|
||||||
|
|
||||||
|
func serveTestWebUIStaticContent(w http.ResponseWriter, r *http.Request) {
|
||||||
|
switch r.URL.Path {
|
||||||
|
case "/testui/style.css":
|
||||||
|
http.ServeFileFS(w, r, test_content, "ui_test/lib/ui.css")
|
||||||
|
case "/testui/skip-closed.svg":
|
||||||
|
http.ServeFileFS(w, r, test_content, "ui_test/lib/skip-closed.svg")
|
||||||
|
case "/testui/skip-open.svg":
|
||||||
|
http.ServeFileFS(w, r, test_content, "ui_test/lib/skip-open.svg")
|
||||||
|
case "/testui/success-closed.svg":
|
||||||
|
http.ServeFileFS(w, r, test_content, "ui_test/lib/success-closed.svg")
|
||||||
|
case "/testui/success-open.svg":
|
||||||
|
http.ServeFileFS(w, r, test_content, "ui_test/lib/success-open.svg")
|
||||||
|
case "/testui/failure-closed.svg":
|
||||||
|
http.ServeFileFS(w, r, test_content, "ui_test/lib/failure-closed.svg")
|
||||||
|
case "/testui/failure-open.svg":
|
||||||
|
http.ServeFileFS(w, r, test_content, "ui_test/lib/failure-open.svg")
|
||||||
|
default:
|
||||||
|
http.NotFound(w, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func serveTestLibrary(w http.ResponseWriter, r *http.Request) {
|
||||||
|
switch r.URL.Path {
|
||||||
|
case "/test/lib/test.js":
|
||||||
|
http.ServeFileFS(w, r, test_content, "ui_test/lib/test.js")
|
||||||
|
default:
|
||||||
|
http.NotFound(w, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func serveTests(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if r.URL.Path == "/test/" {
|
||||||
|
http.Redirect(w, r, "/test.html", http.StatusMovedPermanently)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
testPath := strings.TrimPrefix(r.URL.Path, "/test/")
|
||||||
|
|
||||||
|
if path.Ext(testPath) != ".js" {
|
||||||
|
http.Error(w, "403 forbidden", http.StatusForbidden)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
||||||
|
w.Header().Set("Pragma", "no-cache")
|
||||||
|
w.Header().Set("Expires", "0")
|
||||||
|
|
||||||
|
http.ServeFileFS(w, r, test_content, "ui_test/"+testPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func redirectUI(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// The base path should not redirect to the root.
|
||||||
|
if r.URL.Path == "/ui/" {
|
||||||
|
http.NotFound(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if path.Ext(r.URL.Path) != ".js" {
|
||||||
|
http.Error(w, "403 forbidden", http.StatusForbidden)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
||||||
|
w.Header().Set("Pragma", "no-cache")
|
||||||
|
w.Header().Set("Expires", "0")
|
||||||
|
|
||||||
|
http.Redirect(w, r, strings.TrimPrefix(r.URL.Path, "/ui"), http.StatusFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUiRoutes(mux *http.ServeMux) {
|
||||||
|
mux.HandleFunc("GET /test.html", serveTestWebUI)
|
||||||
|
mux.HandleFunc("GET /testui/", serveTestWebUIStaticContent)
|
||||||
|
mux.HandleFunc("GET /test/lib/", serveTestLibrary)
|
||||||
|
mux.HandleFunc("GET /test/", serveTests)
|
||||||
|
mux.HandleFunc("GET /ui/", redirectUI)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,4 +4,4 @@ package main
|
|||||||
|
|
||||||
import "net/http"
|
import "net/http"
|
||||||
|
|
||||||
func testUIRoutes(mux *http.ServeMux) {}
|
func testUiRoutes(mux *http.ServeMux) {}
|
||||||
|
|||||||
@@ -15,7 +15,12 @@ func serveWebUI(w http.ResponseWriter, r *http.Request) {
|
|||||||
func serveStaticContent(w http.ResponseWriter, r *http.Request) {
|
func serveStaticContent(w http.ResponseWriter, r *http.Request) {
|
||||||
switch r.URL.Path {
|
switch r.URL.Path {
|
||||||
case "/static/style.css":
|
case "/static/style.css":
|
||||||
http.ServeFileFS(w, r, content, "ui/static/style.css")
|
darkTheme := r.CookiesNamed("dark_theme")
|
||||||
|
if len(darkTheme) > 0 && darkTheme[0].Value == "true" {
|
||||||
|
http.ServeFileFS(w, r, content, "ui/static/dark.css")
|
||||||
|
} else {
|
||||||
|
http.ServeFileFS(w, r, content, "ui/static/light.css")
|
||||||
|
}
|
||||||
case "/favicon.ico":
|
case "/favicon.ico":
|
||||||
http.ServeFileFS(w, r, content, "ui/static/favicon.ico")
|
http.ServeFileFS(w, r, content, "ui/static/favicon.ico")
|
||||||
case "/static/index.js":
|
case "/static/index.js":
|
||||||
|
|||||||
@@ -2,56 +2,34 @@
|
|||||||
<html lang="en">
|
<html lang="en">
|
||||||
<head>
|
<head>
|
||||||
<meta charset="UTF-8">
|
<meta charset="UTF-8">
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
||||||
<link rel="stylesheet" href="static/style.css">
|
<link rel="stylesheet" href="static/style.css">
|
||||||
<title>Hakurei PkgServer</title>
|
<title>Hakurei PkgServer</title>
|
||||||
<script src="static/index.js"></script>
|
<script src="static/index.js"></script>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<h1>Hakurei PkgServer</h1>
|
<h1>Hakurei PkgServer</h1>
|
||||||
<div class="top-controls" id="top-controls-regular">
|
|
||||||
|
<table id="pkg-list">
|
||||||
|
<tr><td>Loading...</td></tr>
|
||||||
|
</table>
|
||||||
<p>Showing entries <span id="entry-counter"></span>.</p>
|
<p>Showing entries <span id="entry-counter"></span>.</p>
|
||||||
<span id="search-bar">
|
<span class="bottom-nav"><a href="javascript:prevPage()">« Previous</a> <span id="page-number">1</span> <a href="javascript:nextPage()">Next »</a></span>
|
||||||
<label for="search">Search: </label>
|
<span><label for="count">Entries per page: </label><select name="count" id="count">
|
||||||
<input type="text" name="search" id="search"/>
|
|
||||||
<button onclick="doSearch()">Find</button>
|
|
||||||
<label for="include-desc">Include descriptions: </label>
|
|
||||||
<input type="checkbox" name="include-desc" id="include-desc" checked/>
|
|
||||||
</span>
|
|
||||||
<div><label for="count">Entries per page: </label><select name="count" id="count">
|
|
||||||
<option value="10">10</option>
|
<option value="10">10</option>
|
||||||
<option value="20">20</option>
|
<option value="20">20</option>
|
||||||
<option value="30">30</option>
|
<option value="30">30</option>
|
||||||
<option value="50">50</option>
|
<option value="50">50</option>
|
||||||
</select></div>
|
</select></span>
|
||||||
<div><label for="sort">Sort by: </label><select name="sort" id="sort">
|
<span><label for="sort">Sort by: </label><select name="sort" id="sort">
|
||||||
<option value="0">Definition (ascending)</option>
|
<option value="0">Definition (ascending)</option>
|
||||||
<option value="1">Definition (descending)</option>
|
<option value="1">Definition (descending)</option>
|
||||||
<option value="2">Name (ascending)</option>
|
<option value="2">Name (ascending)</option>
|
||||||
<option value="3">Name (descending)</option>
|
<option value="3">Name (descending)</option>
|
||||||
<option value="4">Size (ascending)</option>
|
<option value="4">Size (ascending)</option>
|
||||||
<option value="5">Size (descending)</option>
|
<option value="5">Size (descending)</option>
|
||||||
</select></div>
|
</select></span>
|
||||||
</div>
|
</body>
|
||||||
<div class="top-controls" id="search-top-controls" hidden>
|
|
||||||
<p>Showing search results <span id="search-entry-counter"></span> for query "<span id="search-query"></span>".</p>
|
|
||||||
<button onclick="exitSearch()">Back</button>
|
|
||||||
<div><label for="search-count">Entries per page: </label><select name="search-count" id="search-count">
|
|
||||||
<option value="10">10</option>
|
|
||||||
<option value="20">20</option>
|
|
||||||
<option value="30">30</option>
|
|
||||||
<option value="50">50</option>
|
|
||||||
</select></div>
|
|
||||||
<p>Sorted by best match</p>
|
|
||||||
</div>
|
|
||||||
<div class="page-controls"><a href="javascript:prevPage()">« Previous</a> <input type="text" class="page-number" value="1"/> <a href="javascript:nextPage()">Next »</a></div>
|
|
||||||
<table id="pkg-list">
|
|
||||||
<tr><td>Loading...</td></tr>
|
|
||||||
</table>
|
|
||||||
<div class="page-controls"><a href="javascript:prevPage()">« Previous</a> <input type="text" class="page-number" value="1"/> <a href="javascript:nextPage()">Next »</a></div>
|
|
||||||
<footer>
|
<footer>
|
||||||
<p>©<a href="https://hakurei.app/">Hakurei</a> (<span id="hakurei-version">unknown</span>). Licensed under the MIT license.</p>
|
<p>©<a href="https://hakurei.app/">Hakurei</a> (<span id="hakurei-version">unknown</span>). Licensed under the MIT license.</p>
|
||||||
</footer>
|
</footer>
|
||||||
<script>main();</script>
|
|
||||||
</body>
|
|
||||||
</html>
|
</html>
|
||||||
@@ -1,331 +0,0 @@
|
|||||||
interface PackageIndexEntry {
|
|
||||||
name: string
|
|
||||||
size?: number
|
|
||||||
description?: string
|
|
||||||
website?: string
|
|
||||||
version?: string
|
|
||||||
report?: boolean
|
|
||||||
}
|
|
||||||
|
|
||||||
function entryToHTML(entry: PackageIndexEntry | SearchResult): HTMLTableRowElement {
|
|
||||||
let v = entry.version != null ? `<span>${escapeHtml(entry.version)}</span>` : ""
|
|
||||||
let s = entry.size != null && entry.size > 0 ? `<p>Size: ${toByteSizeString(entry.size)} (${entry.size})</p>` : ""
|
|
||||||
let n: string
|
|
||||||
let d: string
|
|
||||||
if ('name_matches' in entry) {
|
|
||||||
n = `<h2>${nameMatches(entry as SearchResult)} ${v}</h2>`
|
|
||||||
} else {
|
|
||||||
n = `<h2>${escapeHtml(entry.name)} ${v}</h2>`
|
|
||||||
}
|
|
||||||
if ('desc_matches' in entry && STATE.getIncludeDescriptions()) {
|
|
||||||
d = descMatches(entry as SearchResult)
|
|
||||||
} else {
|
|
||||||
d = (entry as PackageIndexEntry).description != null ? `<p>${escapeHtml((entry as PackageIndexEntry).description)}</p>` : ""
|
|
||||||
}
|
|
||||||
let w = entry.website != null ? `<a href="${encodeURI(entry.website)}">Website</a>` : ""
|
|
||||||
let r = entry.report ? `Log (<a href=\"${encodeURI('/api/v1/status/' + entry.name)}\">View</a> | <a href=\"${encodeURI('/status/' + entry.name)}\">Download</a>)` : ""
|
|
||||||
let row = <HTMLTableRowElement>(document.createElement('tr'))
|
|
||||||
row.innerHTML = `<td>
|
|
||||||
${n}
|
|
||||||
${d}
|
|
||||||
${s}
|
|
||||||
${w}
|
|
||||||
${r}
|
|
||||||
</td>`
|
|
||||||
return row
|
|
||||||
}
|
|
||||||
|
|
||||||
function nameMatches(sr: SearchResult): string {
|
|
||||||
return markMatches(sr.name, sr.name_matches)
|
|
||||||
}
|
|
||||||
|
|
||||||
function descMatches(sr: SearchResult): string {
|
|
||||||
return markMatches(sr.description!, sr.desc_matches)
|
|
||||||
}
|
|
||||||
|
|
||||||
function markMatches(str: string, indices: [number, number][]): string {
|
|
||||||
if (indices == null) {
|
|
||||||
return str
|
|
||||||
}
|
|
||||||
let out: string = ""
|
|
||||||
let j = 0
|
|
||||||
for (let i = 0; i < str.length; i++) {
|
|
||||||
if (j < indices.length) {
|
|
||||||
if (i === indices[j][0]) {
|
|
||||||
out += `<mark>${escapeHtmlChar(str[i])}`
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if (i === indices[j][1]) {
|
|
||||||
out += `</mark>${escapeHtmlChar(str[i])}`
|
|
||||||
j++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
out += escapeHtmlChar(str[i])
|
|
||||||
}
|
|
||||||
if (indices[j] !== undefined) {
|
|
||||||
out += "</mark>"
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
function toByteSizeString(bytes: number): string {
|
|
||||||
if (bytes == null) return `unspecified`
|
|
||||||
if (bytes < 1024) return `${bytes}B`
|
|
||||||
if (bytes < Math.pow(1024, 2)) return `${(bytes / 1024).toFixed(2)}kiB`
|
|
||||||
if (bytes < Math.pow(1024, 3)) return `${(bytes / Math.pow(1024, 2)).toFixed(2)}MiB`
|
|
||||||
if (bytes < Math.pow(1024, 4)) return `${(bytes / Math.pow(1024, 3)).toFixed(2)}GiB`
|
|
||||||
if (bytes < Math.pow(1024, 5)) return `${(bytes / Math.pow(1024, 4)).toFixed(2)}TiB`
|
|
||||||
return "not only is it big, it's large"
|
|
||||||
}
|
|
||||||
|
|
||||||
const API_VERSION = 1
|
|
||||||
const ENDPOINT = `/api/v${API_VERSION}`
|
|
||||||
|
|
||||||
interface InfoPayload {
|
|
||||||
count?: number
|
|
||||||
hakurei_version?: string
|
|
||||||
}
|
|
||||||
|
|
||||||
async function infoRequest(): Promise<InfoPayload> {
|
|
||||||
const res = await fetch(`${ENDPOINT}/info`)
|
|
||||||
const payload = await res.json()
|
|
||||||
return payload as InfoPayload
|
|
||||||
}
|
|
||||||
|
|
||||||
interface GetPayload {
|
|
||||||
values?: PackageIndexEntry[]
|
|
||||||
}
|
|
||||||
|
|
||||||
enum SortOrders {
|
|
||||||
DeclarationAscending,
|
|
||||||
DeclarationDescending,
|
|
||||||
NameAscending,
|
|
||||||
NameDescending
|
|
||||||
}
|
|
||||||
|
|
||||||
async function getRequest(limit: number, index: number, sort: SortOrders): Promise<GetPayload> {
|
|
||||||
const res = await fetch(`${ENDPOINT}/get?limit=${limit}&index=${index}&sort=${sort.valueOf()}`)
|
|
||||||
const payload = await res.json()
|
|
||||||
return payload as GetPayload
|
|
||||||
}
|
|
||||||
|
|
||||||
interface SearchResult extends PackageIndexEntry {
|
|
||||||
name_matches: [number, number][]
|
|
||||||
desc_matches: [number, number][]
|
|
||||||
score: number
|
|
||||||
}
|
|
||||||
|
|
||||||
interface SearchPayload {
|
|
||||||
count?: number
|
|
||||||
values?: SearchResult[]
|
|
||||||
}
|
|
||||||
|
|
||||||
async function searchRequest(limit: number, index: number, search: string, desc: boolean): Promise<SearchPayload> {
|
|
||||||
const res = await fetch(`${ENDPOINT}/search?limit=${limit}&index=${index}&search=${encodeURIComponent(search)}&desc=${desc}`)
|
|
||||||
if (!res.ok) {
|
|
||||||
exitSearch()
|
|
||||||
alert("invalid search query!")
|
|
||||||
return Promise.reject(res.statusText)
|
|
||||||
}
|
|
||||||
const payload = await res.json()
|
|
||||||
return payload as SearchPayload
|
|
||||||
}
|
|
||||||
|
|
||||||
class State {
|
|
||||||
entriesPerPage: number = 10
|
|
||||||
entryIndex: number = 0
|
|
||||||
maxTotal: number = 0
|
|
||||||
maxEntries: number = 0
|
|
||||||
sort: SortOrders = SortOrders.DeclarationAscending
|
|
||||||
search: boolean = false
|
|
||||||
|
|
||||||
getEntriesPerPage(): number {
|
|
||||||
return this.entriesPerPage
|
|
||||||
}
|
|
||||||
|
|
||||||
setEntriesPerPage(entriesPerPage: number) {
|
|
||||||
this.entriesPerPage = entriesPerPage
|
|
||||||
this.setEntryIndex(Math.floor(this.getEntryIndex() / entriesPerPage) * entriesPerPage)
|
|
||||||
}
|
|
||||||
|
|
||||||
getEntryIndex(): number {
|
|
||||||
return this.entryIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
setEntryIndex(entryIndex: number) {
|
|
||||||
this.entryIndex = entryIndex
|
|
||||||
this.updatePage()
|
|
||||||
this.updateRange()
|
|
||||||
this.updateListings()
|
|
||||||
}
|
|
||||||
|
|
||||||
getMaxTotal(): number {
|
|
||||||
return this.maxTotal
|
|
||||||
}
|
|
||||||
|
|
||||||
setMaxTotal(max: number) {
|
|
||||||
this.maxTotal = max
|
|
||||||
}
|
|
||||||
|
|
||||||
getSortOrder(): SortOrders {
|
|
||||||
return this.sort
|
|
||||||
}
|
|
||||||
|
|
||||||
setSortOrder(sortOrder: SortOrders) {
|
|
||||||
this.sort = sortOrder
|
|
||||||
this.setEntryIndex(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
updatePage() {
|
|
||||||
let page = Math.ceil(((this.getEntryIndex() + this.getEntriesPerPage()) - 1) / this.getEntriesPerPage())
|
|
||||||
for (let e of document.getElementsByClassName("page-number")) {
|
|
||||||
(e as HTMLInputElement).value = String(page)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
updateRange() {
|
|
||||||
let max = Math.min(this.getEntryIndex() + this.getEntriesPerPage(), this.getMaxTotal())
|
|
||||||
document.getElementById("entry-counter")!.textContent = `${this.getEntryIndex() + 1}-${max} of ${this.getMaxTotal()}`
|
|
||||||
if (this.search) {
|
|
||||||
document.getElementById("search-entry-counter")!.textContent = `${this.getEntryIndex() + 1}-${max} of ${this.maxTotal}/${this.maxEntries}`
|
|
||||||
document.getElementById("search-query")!.innerHTML = `<code>${escapeHtml(this.getSearchQuery())}</code>`
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
getSearchQuery(): string {
|
|
||||||
let queryString = document.getElementById("search")!;
|
|
||||||
return (queryString as HTMLInputElement).value
|
|
||||||
}
|
|
||||||
|
|
||||||
getIncludeDescriptions(): boolean {
|
|
||||||
let includeDesc = document.getElementById("include-desc")!;
|
|
||||||
return (includeDesc as HTMLInputElement).checked
|
|
||||||
}
|
|
||||||
|
|
||||||
updateListings() {
|
|
||||||
if (this.search) {
|
|
||||||
searchRequest(this.getEntriesPerPage(), this.getEntryIndex(), this.getSearchQuery(), this.getIncludeDescriptions())
|
|
||||||
.then(res => {
|
|
||||||
let table = document.getElementById("pkg-list")!
|
|
||||||
table.innerHTML = ''
|
|
||||||
for (let row of res.values!) {
|
|
||||||
table.appendChild(entryToHTML(row))
|
|
||||||
}
|
|
||||||
STATE.maxTotal = res.count!
|
|
||||||
STATE.updateRange()
|
|
||||||
if(res.count! < 1) {
|
|
||||||
exitSearch()
|
|
||||||
alert("no results found!")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
getRequest(this.getEntriesPerPage(), this.getEntryIndex(), this.getSortOrder())
|
|
||||||
.then(res => {
|
|
||||||
let table = document.getElementById("pkg-list")!
|
|
||||||
table.innerHTML = ''
|
|
||||||
for (let row of res.values!) {
|
|
||||||
table.appendChild(entryToHTML(row))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let STATE: State
|
|
||||||
|
|
||||||
|
|
||||||
function lastPageIndex(): number {
|
|
||||||
return Math.floor(STATE.getMaxTotal() / STATE.getEntriesPerPage()) * STATE.getEntriesPerPage()
|
|
||||||
}
|
|
||||||
|
|
||||||
function setPage(page: number) {
|
|
||||||
STATE.setEntryIndex(Math.max(0, Math.min(STATE.getEntriesPerPage() * (page - 1), lastPageIndex())))
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
function escapeHtml(str?: string): string {
|
|
||||||
let out: string = ''
|
|
||||||
if (str == undefined) return ""
|
|
||||||
for (let i = 0; i < str.length; i++) {
|
|
||||||
out += escapeHtmlChar(str[i])
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
function escapeHtmlChar(char: string): string {
|
|
||||||
if (char.length != 1) return char
|
|
||||||
switch (char[0]) {
|
|
||||||
case '&':
|
|
||||||
return "&"
|
|
||||||
case '<':
|
|
||||||
return "<"
|
|
||||||
case '>':
|
|
||||||
return ">"
|
|
||||||
case '"':
|
|
||||||
return """
|
|
||||||
case "'":
|
|
||||||
return "'"
|
|
||||||
default:
|
|
||||||
return char
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function firstPage() {
|
|
||||||
STATE.setEntryIndex(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
function prevPage() {
|
|
||||||
let index = STATE.getEntryIndex()
|
|
||||||
STATE.setEntryIndex(Math.max(0, index - STATE.getEntriesPerPage()))
|
|
||||||
}
|
|
||||||
|
|
||||||
function lastPage() {
|
|
||||||
STATE.setEntryIndex(lastPageIndex())
|
|
||||||
}
|
|
||||||
|
|
||||||
function nextPage() {
|
|
||||||
let index = STATE.getEntryIndex()
|
|
||||||
STATE.setEntryIndex(Math.min(lastPageIndex(), index + STATE.getEntriesPerPage()))
|
|
||||||
}
|
|
||||||
|
|
||||||
function doSearch() {
|
|
||||||
document.getElementById("top-controls-regular")!.toggleAttribute("hidden");
|
|
||||||
document.getElementById("search-top-controls")!.toggleAttribute("hidden");
|
|
||||||
STATE.search = true;
|
|
||||||
STATE.setEntryIndex(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
function exitSearch() {
|
|
||||||
document.getElementById("top-controls-regular")!.toggleAttribute("hidden");
|
|
||||||
document.getElementById("search-top-controls")!.toggleAttribute("hidden");
|
|
||||||
STATE.search = false;
|
|
||||||
STATE.setMaxTotal(STATE.maxEntries)
|
|
||||||
STATE.setEntryIndex(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
function main() {
|
|
||||||
STATE = new State()
|
|
||||||
infoRequest()
|
|
||||||
.then(res => {
|
|
||||||
STATE.maxEntries = res.count!
|
|
||||||
STATE.setMaxTotal(STATE.maxEntries)
|
|
||||||
document.getElementById("hakurei-version")!.textContent = res.hakurei_version!
|
|
||||||
STATE.updateRange()
|
|
||||||
STATE.updateListings()
|
|
||||||
})
|
|
||||||
for (let e of document.getElementsByClassName("page-number")) {
|
|
||||||
e.addEventListener("change", (_) => {
|
|
||||||
setPage(parseInt((e as HTMLInputElement).value))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
document.getElementById("count")?.addEventListener("change", (event) => {
|
|
||||||
STATE.setEntriesPerPage(parseInt((event.target as HTMLSelectElement).value))
|
|
||||||
})
|
|
||||||
document.getElementById("sort")?.addEventListener("change", (event) => {
|
|
||||||
STATE.setSortOrder(parseInt((event.target as HTMLSelectElement).value))
|
|
||||||
})
|
|
||||||
document.getElementById("search")?.addEventListener("keyup", (event) => {
|
|
||||||
if (event.key === 'Enter') doSearch()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
0
cmd/pkgserver/ui/static/_common.scss
Normal file
0
cmd/pkgserver/ui/static/_common.scss
Normal file
6
cmd/pkgserver/ui/static/dark.scss
Normal file
6
cmd/pkgserver/ui/static/dark.scss
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
@use 'common';
|
||||||
|
|
||||||
|
html {
|
||||||
|
background-color: #2c2c2c;
|
||||||
|
color: ghostwhite;
|
||||||
|
}
|
||||||
161
cmd/pkgserver/ui/static/index.ts
Normal file
161
cmd/pkgserver/ui/static/index.ts
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
function assertGetElementById(id: string): HTMLElement {
|
||||||
|
let elem = document.getElementById(id)
|
||||||
|
if(elem == null) throw new ReferenceError(`element with ID '${id}' missing from DOM`)
|
||||||
|
return elem
|
||||||
|
}
|
||||||
|
|
||||||
|
interface PackageIndexEntry {
|
||||||
|
name: string
|
||||||
|
size: number | null
|
||||||
|
description: string | null
|
||||||
|
website: string | null
|
||||||
|
version: string | null
|
||||||
|
report: boolean
|
||||||
|
}
|
||||||
|
function toHTML(entry: PackageIndexEntry): HTMLTableRowElement {
|
||||||
|
let v = entry.version != null ? `<span>${escapeHtml(entry.version)}</span>` : ""
|
||||||
|
let s = entry.size != null ? `<p>Size: ${toByteSizeString(entry.size)} (${entry.size})</p>` : ""
|
||||||
|
let d = entry.description != null ? `<p>${escapeHtml(entry.description)}</p>` : ""
|
||||||
|
let w = entry.website != null ? `<a href="${encodeURI(entry.website)}">Website</a>` : ""
|
||||||
|
let r = entry.report ? `Log (<a href=\"${encodeURI('/api/v1/status/' + entry.name)}\">View</a> | <a href=\"${encodeURI('/status/' + entry.name)}\">Download</a>)` : ""
|
||||||
|
let row = <HTMLTableRowElement>(document.createElement('tr'))
|
||||||
|
row.innerHTML = `<td>
|
||||||
|
<h2>${escapeHtml(entry.name)} ${v}</h2>
|
||||||
|
${d}
|
||||||
|
${s}
|
||||||
|
${w}
|
||||||
|
${r}
|
||||||
|
</td>`
|
||||||
|
return row
|
||||||
|
}
|
||||||
|
|
||||||
|
function toByteSizeString(bytes: number): string {
|
||||||
|
if(bytes == null || bytes < 1024) return `${bytes}B`
|
||||||
|
if(bytes < Math.pow(1024, 2)) return `${(bytes/1024).toFixed(2)}kiB`
|
||||||
|
if(bytes < Math.pow(1024, 3)) return `${(bytes/Math.pow(1024, 2)).toFixed(2)}MiB`
|
||||||
|
if(bytes < Math.pow(1024, 4)) return `${(bytes/Math.pow(1024, 3)).toFixed(2)}GiB`
|
||||||
|
if(bytes < Math.pow(1024, 5)) return `${(bytes/Math.pow(1024, 4)).toFixed(2)}TiB`
|
||||||
|
return "not only is it big, it's large"
|
||||||
|
}
|
||||||
|
|
||||||
|
const API_VERSION = 1
|
||||||
|
const ENDPOINT = `/api/v${API_VERSION}`
|
||||||
|
interface InfoPayload {
|
||||||
|
count: number
|
||||||
|
hakurei_version: string
|
||||||
|
}
|
||||||
|
|
||||||
|
async function infoRequest(): Promise<InfoPayload> {
|
||||||
|
const res = await fetch(`${ENDPOINT}/info`)
|
||||||
|
const payload = await res.json()
|
||||||
|
return payload
|
||||||
|
}
|
||||||
|
interface GetPayload {
|
||||||
|
values: PackageIndexEntry[]
|
||||||
|
}
|
||||||
|
|
||||||
|
enum SortOrders {
|
||||||
|
DeclarationAscending,
|
||||||
|
DeclarationDescending,
|
||||||
|
NameAscending,
|
||||||
|
NameDescending
|
||||||
|
}
|
||||||
|
async function getRequest(limit: number, index: number, sort: SortOrders): Promise<GetPayload> {
|
||||||
|
const res = await fetch(`${ENDPOINT}/get?limit=${limit}&index=${index}&sort=${sort.valueOf()}`)
|
||||||
|
const payload = await res.json()
|
||||||
|
return payload
|
||||||
|
}
|
||||||
|
class State {
|
||||||
|
entriesPerPage: number = 10
|
||||||
|
entryIndex: number = 0
|
||||||
|
maxEntries: number = 0
|
||||||
|
sort: SortOrders = SortOrders.DeclarationAscending
|
||||||
|
|
||||||
|
getEntriesPerPage(): number {
|
||||||
|
return this.entriesPerPage
|
||||||
|
}
|
||||||
|
setEntriesPerPage(entriesPerPage: number) {
|
||||||
|
this.entriesPerPage = entriesPerPage
|
||||||
|
this.setEntryIndex(Math.floor(this.getEntryIndex() / entriesPerPage) * entriesPerPage)
|
||||||
|
}
|
||||||
|
getEntryIndex(): number {
|
||||||
|
return this.entryIndex
|
||||||
|
}
|
||||||
|
setEntryIndex(entryIndex: number) {
|
||||||
|
this.entryIndex = entryIndex
|
||||||
|
this.updatePage()
|
||||||
|
this.updateRange()
|
||||||
|
this.updateListings()
|
||||||
|
}
|
||||||
|
getMaxEntries(): number {
|
||||||
|
return this.maxEntries
|
||||||
|
}
|
||||||
|
setMaxEntries(max: number) {
|
||||||
|
this.maxEntries = max
|
||||||
|
}
|
||||||
|
getSortOrder(): SortOrders {
|
||||||
|
return this.sort
|
||||||
|
}
|
||||||
|
setSortOrder(sortOrder: SortOrders) {
|
||||||
|
this.sort = sortOrder
|
||||||
|
this.setEntryIndex(0)
|
||||||
|
}
|
||||||
|
updatePage() {
|
||||||
|
let page = Math.ceil(((this.getEntryIndex() + this.getEntriesPerPage()) - 1) / this.getEntriesPerPage())
|
||||||
|
assertGetElementById("page-number").innerText = String(page)
|
||||||
|
}
|
||||||
|
updateRange() {
|
||||||
|
let max = Math.min(this.getEntryIndex() + this.getEntriesPerPage(), this.getMaxEntries())
|
||||||
|
assertGetElementById("entry-counter").innerText = `${this.getEntryIndex() + 1}-${max} of ${this.getMaxEntries()}`
|
||||||
|
}
|
||||||
|
updateListings() {
|
||||||
|
getRequest(this.getEntriesPerPage(), this.getEntryIndex(), this.getSortOrder())
|
||||||
|
.then(res => {
|
||||||
|
let table = assertGetElementById("pkg-list")
|
||||||
|
table.innerHTML = ''
|
||||||
|
res.values.forEach((row) => {
|
||||||
|
table.appendChild(toHTML(row))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
let STATE: State
|
||||||
|
|
||||||
|
function prevPage() {
|
||||||
|
let index = STATE.getEntryIndex()
|
||||||
|
STATE.setEntryIndex(Math.max(0, index - STATE.getEntriesPerPage()))
|
||||||
|
}
|
||||||
|
function nextPage() {
|
||||||
|
let index = STATE.getEntryIndex()
|
||||||
|
STATE.setEntryIndex(Math.min((Math.ceil(STATE.getMaxEntries() / STATE.getEntriesPerPage()) * STATE.getEntriesPerPage()) - STATE.getEntriesPerPage(), index + STATE.getEntriesPerPage()))
|
||||||
|
}
|
||||||
|
|
||||||
|
function escapeHtml(str: string): string {
|
||||||
|
if(str === undefined) return ""
|
||||||
|
return str
|
||||||
|
.replace(/&/g, '&')
|
||||||
|
.replace(/</g, '<')
|
||||||
|
.replace(/>/g, '>')
|
||||||
|
.replace(/"/g, '"')
|
||||||
|
.replace(/'/g, ''')
|
||||||
|
}
|
||||||
|
|
||||||
|
document.addEventListener("DOMContentLoaded", () => {
|
||||||
|
STATE = new State()
|
||||||
|
infoRequest()
|
||||||
|
.then(res => {
|
||||||
|
STATE.setMaxEntries(res.count)
|
||||||
|
assertGetElementById("hakurei-version").innerText = res.hakurei_version
|
||||||
|
STATE.updateRange()
|
||||||
|
STATE.updateListings()
|
||||||
|
})
|
||||||
|
|
||||||
|
assertGetElementById("count").addEventListener("change", (event) => {
|
||||||
|
STATE.setEntriesPerPage(parseInt((event.target as HTMLSelectElement).value))
|
||||||
|
})
|
||||||
|
assertGetElementById("sort").addEventListener("change", (event) => {
|
||||||
|
STATE.setSortOrder(parseInt((event.target as HTMLSelectElement).value))
|
||||||
|
})
|
||||||
|
})
|
||||||
6
cmd/pkgserver/ui/static/light.scss
Normal file
6
cmd/pkgserver/ui/static/light.scss
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
@use 'common';
|
||||||
|
|
||||||
|
html {
|
||||||
|
background-color: #d3d3d3;
|
||||||
|
color: black;
|
||||||
|
}
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
.page-number {
|
|
||||||
width: 2em;
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
.page-number {
|
|
||||||
width: 2em;
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
|
|
||||||
@media (prefers-color-scheme: dark) {
|
|
||||||
html {
|
|
||||||
background-color: #2c2c2c;
|
|
||||||
color: ghostwhite;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@media (prefers-color-scheme: light) {
|
|
||||||
html {
|
|
||||||
background-color: #d3d3d3;
|
|
||||||
color: black;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
6
cmd/pkgserver/ui/static/tsconfig.json
Normal file
6
cmd/pkgserver/ui/static/tsconfig.json
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"compilerOptions": {
|
||||||
|
"strict": true,
|
||||||
|
"target": "ES2024"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
{
|
|
||||||
"compilerOptions": {
|
|
||||||
"target": "ES2024",
|
|
||||||
"strict": true,
|
|
||||||
"alwaysStrict": true,
|
|
||||||
"outDir": "static"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -4,6 +4,6 @@ package main
|
|||||||
|
|
||||||
import "embed"
|
import "embed"
|
||||||
|
|
||||||
//go:generate tsc -p ui
|
//go:generate sh -c "sass ui/static/dark.scss ui/static/dark.css && sass ui/static/light.scss ui/static/light.css && tsc -p ui/static"
|
||||||
//go:embed ui/*
|
//go:embed ui/*
|
||||||
var content embed.FS
|
var content embed.FS
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
// Import all test files to register their test suites.
|
// Import all test files to register their test suites.
|
||||||
import "./index_test.js";
|
import "./sample_tests.js";
|
||||||
|
|||||||
@@ -1,2 +0,0 @@
|
|||||||
import { suite, test } from "./lib/test.js";
|
|
||||||
import "./ui/index.js";
|
|
||||||
@@ -2,8 +2,8 @@
|
|||||||
// DSL
|
// DSL
|
||||||
|
|
||||||
type TestTree = TestGroup | Test;
|
type TestTree = TestGroup | Test;
|
||||||
type TestGroup = { name: string; children: TestTree[] };
|
type TestGroup = { name: string, children: TestTree[] };
|
||||||
type Test = { name: string; test: (t: TestController) => void };
|
type Test = { name: string, test: (t: TestController) => void };
|
||||||
|
|
||||||
export class TestRegistrar {
|
export class TestRegistrar {
|
||||||
#suites: TestGroup[];
|
#suites: TestGroup[];
|
||||||
@@ -13,7 +13,7 @@ export class TestRegistrar {
|
|||||||
}
|
}
|
||||||
|
|
||||||
suite(name: string, children: TestTree[]) {
|
suite(name: string, children: TestTree[]) {
|
||||||
checkDuplicates(name, children);
|
checkDuplicates(name, children)
|
||||||
this.#suites.push({ name, children });
|
this.#suites.push({ name, children });
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -230,8 +230,8 @@ export class StreamReporter implements Reporter {
|
|||||||
this.#displaySection("skips", this.#skips);
|
this.#displaySection("skips", this.#skips);
|
||||||
this.stream.writeln("");
|
this.stream.writeln("");
|
||||||
this.stream.writeln(
|
this.stream.writeln(
|
||||||
`${this.#successes.length} succeeded, ${this.#failures.length} failed` +
|
`${this.#successes.length} succeeded, ${this.#failures.length} failed`
|
||||||
(this.#skips.length ? `, ${this.#skips.length} skipped` : ""),
|
+ (this.#skips.length ? `, ${this.#skips.length} skipped` : "")
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,87 +0,0 @@
|
|||||||
/*
|
|
||||||
* When updating the theme colors, also update them in success-closed.svg and
|
|
||||||
* success-open.svg!
|
|
||||||
*/
|
|
||||||
|
|
||||||
:root {
|
|
||||||
--bg: #d3d3d3;
|
|
||||||
--fg: black;
|
|
||||||
}
|
|
||||||
|
|
||||||
@media (prefers-color-scheme: dark) {
|
|
||||||
:root {
|
|
||||||
--bg: #2c2c2c;
|
|
||||||
--fg: ghostwhite;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
html {
|
|
||||||
background-color: var(--bg);
|
|
||||||
color: var(--fg);
|
|
||||||
}
|
|
||||||
|
|
||||||
h1, p, summary, noscript {
|
|
||||||
font-family: sans-serif;
|
|
||||||
}
|
|
||||||
|
|
||||||
noscript {
|
|
||||||
font-size: 16pt;
|
|
||||||
}
|
|
||||||
|
|
||||||
.root {
|
|
||||||
margin: 1rem 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
details.test-node {
|
|
||||||
margin-left: 1rem;
|
|
||||||
padding: 0.2rem 0.5rem;
|
|
||||||
border-left: 2px dashed var(--fg);
|
|
||||||
> summary {
|
|
||||||
cursor: pointer;
|
|
||||||
}
|
|
||||||
&.success > summary::marker {
|
|
||||||
/*
|
|
||||||
* WebKit only supports color and font-size properties in ::marker [1], and
|
|
||||||
* its ::-webkit-details-marker only supports hiding the marker entirely
|
|
||||||
* [2], contrary to mdn's example [3]; thus, set a color as a fallback:
|
|
||||||
* while it may not be accessible for colorblind individuals, it's better
|
|
||||||
* than no indication of a test's state for anyone, as that there's no other
|
|
||||||
* way to include an indication in the marker on WebKit.
|
|
||||||
*
|
|
||||||
* [1]: https://developer.mozilla.org/en-US/docs/Web/CSS/Reference/Selectors/::marker#browser_compatibility
|
|
||||||
* [2]: https://developer.mozilla.org/en-US/docs/Web/HTML/Reference/Elements/summary#default_style
|
|
||||||
* [3]: https://developer.mozilla.org/en-US/docs/Web/HTML/Reference/Elements/summary#changing_the_summarys_icon
|
|
||||||
*/
|
|
||||||
color: var(--fg);
|
|
||||||
content: url("/test/success-closed.svg") / "success";
|
|
||||||
}
|
|
||||||
&.success[open] > summary::marker {
|
|
||||||
content: url("/test/success-open.svg") / "success";
|
|
||||||
}
|
|
||||||
&.failure > summary::marker {
|
|
||||||
color: red;
|
|
||||||
content: url("/test/failure-closed.svg") / "failure";
|
|
||||||
}
|
|
||||||
&.failure[open] > summary::marker {
|
|
||||||
content: url("/test/failure-open.svg") / "failure";
|
|
||||||
}
|
|
||||||
&.skip > summary::marker {
|
|
||||||
color: blue;
|
|
||||||
content: url("/test/skip-closed.svg") / "skip";
|
|
||||||
}
|
|
||||||
&.skip[open] > summary::marker {
|
|
||||||
content: url("/test/skip-open.svg") / "skip";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p.test-desc {
|
|
||||||
margin: 0 0 0 1rem;
|
|
||||||
padding: 2px 0;
|
|
||||||
> pre {
|
|
||||||
margin: 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.italic {
|
|
||||||
font-style: italic;
|
|
||||||
}
|
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
<head>
|
<head>
|
||||||
<meta charset="UTF-8">
|
<meta charset="UTF-8">
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
<link rel="stylesheet" href="/test/style.css">
|
<link rel="stylesheet" href="/testui/style.css">
|
||||||
<title>PkgServer Tests</title>
|
<title>PkgServer Tests</title>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
|
|||||||
88
cmd/pkgserver/ui_test/lib/ui.scss
Normal file
88
cmd/pkgserver/ui_test/lib/ui.scss
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
/*
|
||||||
|
* If updating the theme colors, also update them in success-closed.svg and
|
||||||
|
* success-open.svg!
|
||||||
|
*/
|
||||||
|
|
||||||
|
:root {
|
||||||
|
--bg: #d3d3d3;
|
||||||
|
--fg: black;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
:root {
|
||||||
|
--bg: #2c2c2c;
|
||||||
|
--fg: ghostwhite;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
html {
|
||||||
|
background-color: var(--bg);
|
||||||
|
color: var(--fg);
|
||||||
|
}
|
||||||
|
|
||||||
|
h1, p, summary, noscript {
|
||||||
|
font-family: sans-serif;
|
||||||
|
}
|
||||||
|
|
||||||
|
noscript {
|
||||||
|
font-size: 16pt;
|
||||||
|
}
|
||||||
|
|
||||||
|
.root {
|
||||||
|
margin: 1rem 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
details.test-node {
|
||||||
|
margin-left: 1rem;
|
||||||
|
padding: 0.2rem 0.5rem;
|
||||||
|
border-left: 2px dashed var(--fg);
|
||||||
|
> summary {
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
&.success > summary::marker {
|
||||||
|
/*
|
||||||
|
* WebKit only supports color and font-size properties in ::marker [1],
|
||||||
|
* and its ::-webkit-details-marker only supports hiding the marker
|
||||||
|
* entirely [2], contrary to mdn's example [3]; thus, set a color as
|
||||||
|
* a fallback: while it may not be accessible for colorblind
|
||||||
|
* individuals, it's better than no indication of a test's state for
|
||||||
|
* anyone, as that there's no other way to include an indication in the
|
||||||
|
* marker on WebKit.
|
||||||
|
*
|
||||||
|
* [1]: https://developer.mozilla.org/en-US/docs/Web/CSS/Reference/Selectors/::marker#browser_compatibility
|
||||||
|
* [2]: https://developer.mozilla.org/en-US/docs/Web/HTML/Reference/Elements/summary#default_style
|
||||||
|
* [3]: https://developer.mozilla.org/en-US/docs/Web/HTML/Reference/Elements/summary#changing_the_summarys_icon
|
||||||
|
*/
|
||||||
|
color: var(--fg);
|
||||||
|
content: url("/testui/success-closed.svg") / "success";
|
||||||
|
}
|
||||||
|
&.success[open] > summary::marker {
|
||||||
|
content: url("/testui/success-open.svg") / "success";
|
||||||
|
}
|
||||||
|
&.failure > summary::marker {
|
||||||
|
color: red;
|
||||||
|
content: url("/testui/failure-closed.svg") / "failure";
|
||||||
|
}
|
||||||
|
&.failure[open] > summary::marker {
|
||||||
|
content: url("/testui/failure-open.svg") / "failure";
|
||||||
|
}
|
||||||
|
&.skip > summary::marker {
|
||||||
|
color: blue;
|
||||||
|
content: url("/testui/skip-closed.svg") / "skip";
|
||||||
|
}
|
||||||
|
&.skip[open] > summary::marker {
|
||||||
|
content: url("/testui/skip-open.svg") / "skip";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p.test-desc {
|
||||||
|
margin: 0 0 0 1rem;
|
||||||
|
padding: 2px 0;
|
||||||
|
> pre {
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
.italic {
|
||||||
|
font-style: italic;
|
||||||
|
}
|
||||||
85
cmd/pkgserver/ui_test/sample_tests.ts
Normal file
85
cmd/pkgserver/ui_test/sample_tests.ts
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
import { NoOpReporter, TestRegistrar, context, group, suite, test } from "./lib/test.js";
|
||||||
|
|
||||||
|
suite("dog", [
|
||||||
|
group("tail", [
|
||||||
|
test("wags when happy", (t) => {
|
||||||
|
if (0 / 0 !== Infinity / Infinity) {
|
||||||
|
t.fatal("undefined must not be defined");
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test("idle when down", (t) => {
|
||||||
|
t.log("test test");
|
||||||
|
t.error("dog whining noises go here");
|
||||||
|
}),
|
||||||
|
]),
|
||||||
|
test("likes headpats", (t) => {
|
||||||
|
if (2 !== 2) {
|
||||||
|
t.error("IEEE 754 violated: 2 is NaN");
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
context("near cat", [
|
||||||
|
test("is ecstatic", (t) => {
|
||||||
|
if (("b" + "a" + + "a" + "a").toLowerCase() === "banana") {
|
||||||
|
t.error("🍌🍌🍌");
|
||||||
|
t.error("🍌🍌🍌");
|
||||||
|
t.error("🍌🍌🍌");
|
||||||
|
t.failNow();
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test("playfully bites cats' tails", (t) => {
|
||||||
|
t.log("arf!");
|
||||||
|
throw new Error("nom");
|
||||||
|
}),
|
||||||
|
]),
|
||||||
|
]);
|
||||||
|
|
||||||
|
suite("cat", [
|
||||||
|
test("likes headpats", (t) => {
|
||||||
|
t.log("meow");
|
||||||
|
}),
|
||||||
|
test("owns skipping rope", (t) => {
|
||||||
|
t.skip("this cat is stuck in your machine!");
|
||||||
|
t.log("never logged");
|
||||||
|
}),
|
||||||
|
test("tester tester", (t) => {
|
||||||
|
const r = new TestRegistrar();
|
||||||
|
r.suite("explod", [
|
||||||
|
test("with yarn", (t) => {
|
||||||
|
t.log("YAY");
|
||||||
|
}),
|
||||||
|
]);
|
||||||
|
const reporter = new NoOpReporter();
|
||||||
|
r.run(reporter);
|
||||||
|
if (reporter.suites.length !== 1) {
|
||||||
|
t.fatal(`incorrect number of suites registered got=${reporter.suites.length} want=1`);
|
||||||
|
}
|
||||||
|
const suite = reporter.suites[0];
|
||||||
|
if (suite.name !== "explod") {
|
||||||
|
t.error(`suite name incorrect got='${suite.name}' want='explod'`);
|
||||||
|
}
|
||||||
|
if (suite.children.length !== 1) {
|
||||||
|
t.fatal(`incorrect number of suite children got=${suite.children.length} want=1`);
|
||||||
|
}
|
||||||
|
const test_ = suite.children[0];
|
||||||
|
if (test_.name !== "with yarn") {
|
||||||
|
t.error(`incorrect test name got='${test_.name}' want='with yarn'`);
|
||||||
|
}
|
||||||
|
if ("children" in test_) {
|
||||||
|
t.error(`expected leaf node, got group of ${test_.children.length} children`);
|
||||||
|
}
|
||||||
|
if (!reporter.finalized) t.error(`expected reporter to have been finalized`);
|
||||||
|
if (reporter.results.length !== 1) {
|
||||||
|
t.fatal(`incorrect result count got=${reporter.results.length} want=1`);
|
||||||
|
}
|
||||||
|
const result = reporter.results[0];
|
||||||
|
if (!(result.path.length === 2 &&
|
||||||
|
result.path[0] === "explod" &&
|
||||||
|
result.path[1] === "with yarn")) {
|
||||||
|
t.error(`incorrect result path got=${result.path} want=["explod", "with yarn"]`);
|
||||||
|
}
|
||||||
|
if (result.state !== "success") t.error(`expected test to succeed`);
|
||||||
|
if (!(result.logs.length === 1 && result.logs[0] === "YAY")) {
|
||||||
|
t.error(`incorrect result logs got=${result.logs} want=["YAY"]`);
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
]);
|
||||||
@@ -1,8 +1,6 @@
|
|||||||
{
|
{
|
||||||
"compilerOptions": {
|
"compilerOptions": {
|
||||||
"target": "ES2024",
|
|
||||||
"strict": true,
|
"strict": true,
|
||||||
"alwaysStrict": true,
|
"target": "ES2024"
|
||||||
"outDir": "static"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,8 +7,8 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define SHAREFS_MEDIA_RW_ID (1 << 10) - 1 /* owning gid presented to userspace */
|
#define SHAREFS_MEDIA_RW_ID (1 << 10) - 1 /* owning gid presented to userspace */
|
||||||
#define SHAREFS_PERM_DIR 0770 /* permission bits for directories presented to userspace */
|
#define SHAREFS_PERM_DIR 0700 /* permission bits for directories presented to userspace */
|
||||||
#define SHAREFS_PERM_REG 0660 /* permission bits for regular files presented to userspace */
|
#define SHAREFS_PERM_REG 0600 /* permission bits for regular files presented to userspace */
|
||||||
#define SHAREFS_FORBIDDEN_FLAGS O_DIRECT /* these open flags are cleared unconditionally */
|
#define SHAREFS_FORBIDDEN_FLAGS O_DIRECT /* these open flags are cleared unconditionally */
|
||||||
|
|
||||||
/* sharefs_private is populated by sharefs_init and contains process-wide context */
|
/* sharefs_private is populated by sharefs_init and contains process-wide context */
|
||||||
|
|||||||
@@ -19,21 +19,22 @@ import (
|
|||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/cgo"
|
"runtime/cgo"
|
||||||
"strconv"
|
"strconv"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"hakurei.app/check"
|
|
||||||
"hakurei.app/container"
|
"hakurei.app/container"
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/fhs"
|
|
||||||
"hakurei.app/hst"
|
"hakurei.app/hst"
|
||||||
"hakurei.app/internal/helper/proc"
|
"hakurei.app/internal/helper/proc"
|
||||||
"hakurei.app/internal/info"
|
"hakurei.app/internal/info"
|
||||||
@@ -84,10 +85,7 @@ func destroySetup(private_data unsafe.Pointer) (ok bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//export sharefs_init
|
//export sharefs_init
|
||||||
func sharefs_init(
|
func sharefs_init(_ *C.struct_fuse_conn_info, cfg *C.struct_fuse_config) unsafe.Pointer {
|
||||||
_ *C.struct_fuse_conn_info,
|
|
||||||
cfg *C.struct_fuse_config,
|
|
||||||
) unsafe.Pointer {
|
|
||||||
ctx := C.fuse_get_context()
|
ctx := C.fuse_get_context()
|
||||||
priv := (*C.struct_sharefs_private)(ctx.private_data)
|
priv := (*C.struct_sharefs_private)(ctx.private_data)
|
||||||
setup := cgo.Handle(priv.setup).Value().(*setupState)
|
setup := cgo.Handle(priv.setup).Value().(*setupState)
|
||||||
@@ -105,11 +103,7 @@ func sharefs_init(
|
|||||||
cfg.negative_timeout = 0
|
cfg.negative_timeout = 0
|
||||||
|
|
||||||
// all future filesystem operations happen through this dirfd
|
// all future filesystem operations happen through this dirfd
|
||||||
if fd, err := syscall.Open(
|
if fd, err := syscall.Open(setup.Source.String(), syscall.O_DIRECTORY|syscall.O_RDONLY|syscall.O_CLOEXEC, 0); err != nil {
|
||||||
setup.Source.String(),
|
|
||||||
syscall.O_DIRECTORY|syscall.O_RDONLY|syscall.O_CLOEXEC,
|
|
||||||
0,
|
|
||||||
); err != nil {
|
|
||||||
log.Printf("cannot open %q: %v", setup.Source, err)
|
log.Printf("cannot open %q: %v", setup.Source, err)
|
||||||
goto fail
|
goto fail
|
||||||
} else if err = syscall.Fchdir(fd); err != nil {
|
} else if err = syscall.Fchdir(fd); err != nil {
|
||||||
@@ -144,9 +138,9 @@ func sharefs_destroy(private_data unsafe.Pointer) {
|
|||||||
func showHelp(args *fuseArgs) {
|
func showHelp(args *fuseArgs) {
|
||||||
executableName := sharefsName
|
executableName := sharefsName
|
||||||
if args.argc > 0 {
|
if args.argc > 0 {
|
||||||
executableName = filepath.Base(C.GoString(*args.argv))
|
executableName = path.Base(C.GoString(*args.argv))
|
||||||
} else if name, err := os.Executable(); err == nil {
|
} else if name, err := os.Executable(); err == nil {
|
||||||
executableName = filepath.Base(name)
|
executableName = path.Base(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("usage: %s [options] <mountpoint>\n\n", executableName)
|
fmt.Printf("usage: %s [options] <mountpoint>\n\n", executableName)
|
||||||
@@ -175,11 +169,8 @@ func parseOpts(args *fuseArgs, setup *setupState, log *log.Logger) (ok bool) {
|
|||||||
// Decimal string representation of gid to set when running as root.
|
// Decimal string representation of gid to set when running as root.
|
||||||
setgid *C.char
|
setgid *C.char
|
||||||
|
|
||||||
// Decimal string representation of open file descriptor to read
|
// Decimal string representation of open file descriptor to read setupState from.
|
||||||
// setupState from.
|
// This is an internal detail for containerisation and must not be specified directly.
|
||||||
//
|
|
||||||
// This is an internal detail for containerisation and must not be
|
|
||||||
// specified directly.
|
|
||||||
setup *C.char
|
setup *C.char
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -262,8 +253,7 @@ func parseOpts(args *fuseArgs, setup *setupState, log *log.Logger) (ok bool) {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// copyArgs returns a heap allocated copy of an argument slice in fuse_args
|
// copyArgs returns a heap allocated copy of an argument slice in fuse_args representation.
|
||||||
// representation.
|
|
||||||
func copyArgs(s ...string) fuseArgs {
|
func copyArgs(s ...string) fuseArgs {
|
||||||
if len(s) == 0 {
|
if len(s) == 0 {
|
||||||
return fuseArgs{argc: 0, argv: nil, allocated: 0}
|
return fuseArgs{argc: 0, argv: nil, allocated: 0}
|
||||||
@@ -279,7 +269,6 @@ func copyArgs(s ...string) fuseArgs {
|
|||||||
func freeArgs(args *fuseArgs) { C.fuse_opt_free_args(args) }
|
func freeArgs(args *fuseArgs) { C.fuse_opt_free_args(args) }
|
||||||
|
|
||||||
// unsafeAddArgument adds an argument to fuseArgs via fuse_opt_add_arg.
|
// unsafeAddArgument adds an argument to fuseArgs via fuse_opt_add_arg.
|
||||||
//
|
|
||||||
// The last byte of arg must be 0.
|
// The last byte of arg must be 0.
|
||||||
func unsafeAddArgument(args *fuseArgs, arg string) {
|
func unsafeAddArgument(args *fuseArgs, arg string) {
|
||||||
C.fuse_opt_add_arg(args, (*C.char)(unsafe.Pointer(unsafe.StringData(arg))))
|
C.fuse_opt_add_arg(args, (*C.char)(unsafe.Pointer(unsafe.StringData(arg))))
|
||||||
@@ -299,8 +288,8 @@ func _main(s ...string) (exitCode int) {
|
|||||||
args := copyArgs(s...)
|
args := copyArgs(s...)
|
||||||
defer freeArgs(&args)
|
defer freeArgs(&args)
|
||||||
|
|
||||||
// this causes the kernel to enforce access control based on struct stat
|
// this causes the kernel to enforce access control based on
|
||||||
// populated by sharefs_getattr
|
// struct stat populated by sharefs_getattr
|
||||||
unsafeAddArgument(&args, "-odefault_permissions\x00")
|
unsafeAddArgument(&args, "-odefault_permissions\x00")
|
||||||
|
|
||||||
var priv C.struct_sharefs_private
|
var priv C.struct_sharefs_private
|
||||||
@@ -464,19 +453,15 @@ func _main(s ...string) (exitCode int) {
|
|||||||
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
|
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
|
||||||
}
|
}
|
||||||
z.Bind(z.Path, z.Path, 0)
|
z.Bind(z.Path, z.Path, 0)
|
||||||
setup.Fuse = int(proc.ExtraFileSlice(
|
setup.Fuse = int(proc.ExtraFileSlice(&z.ExtraFiles, os.NewFile(uintptr(C.fuse_session_fd(se)), "fuse")))
|
||||||
&z.ExtraFiles,
|
|
||||||
os.NewFile(uintptr(C.fuse_session_fd(se)), "fuse"),
|
|
||||||
))
|
|
||||||
|
|
||||||
var setupPipe [2]*os.File
|
var setupWriter io.WriteCloser
|
||||||
if r, w, err := os.Pipe(); err != nil {
|
if fd, w, err := container.Setup(&z.ExtraFiles); err != nil {
|
||||||
log.Println(err)
|
log.Println(err)
|
||||||
return 5
|
return 5
|
||||||
} else {
|
} else {
|
||||||
z.Args = append(z.Args, "-osetup="+strconv.Itoa(3+len(z.ExtraFiles)))
|
z.Args = append(z.Args, "-osetup="+strconv.Itoa(fd))
|
||||||
z.ExtraFiles = append(z.ExtraFiles, r)
|
setupWriter = w
|
||||||
setupPipe[0], setupPipe[1] = r, w
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := z.Start(); err != nil {
|
if err := z.Start(); err != nil {
|
||||||
@@ -487,9 +472,6 @@ func _main(s ...string) (exitCode int) {
|
|||||||
}
|
}
|
||||||
return 5
|
return 5
|
||||||
}
|
}
|
||||||
if err := setupPipe[0].Close(); err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
if err := z.Serve(); err != nil {
|
if err := z.Serve(); err != nil {
|
||||||
if m, ok := message.GetMessage(err); ok {
|
if m, ok := message.GetMessage(err); ok {
|
||||||
log.Println(m)
|
log.Println(m)
|
||||||
@@ -499,10 +481,10 @@ func _main(s ...string) (exitCode int) {
|
|||||||
return 5
|
return 5
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := gob.NewEncoder(setupPipe[1]).Encode(&setup); err != nil {
|
if err := gob.NewEncoder(setupWriter).Encode(&setup); err != nil {
|
||||||
log.Println(err)
|
log.Println(err)
|
||||||
return 5
|
return 5
|
||||||
} else if err = setupPipe[1].Close(); err != nil {
|
} else if err = setupWriter.Close(); err != nil {
|
||||||
log.Println(err)
|
log.Println(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParseOpts(t *testing.T) {
|
func TestParseOpts(t *testing.T) {
|
||||||
|
|||||||
@@ -1,10 +1,3 @@
|
|||||||
// The sharefs FUSE filesystem is a permissionless shared filesystem.
|
|
||||||
//
|
|
||||||
// This filesystem is the primary means of file sharing between hakurei
|
|
||||||
// application containers. It serves the same purpose in Rosa OS as /sdcard
|
|
||||||
// does in AOSP.
|
|
||||||
//
|
|
||||||
// See help message for all available options.
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -1,122 +0,0 @@
|
|||||||
//go:build raceattr
|
|
||||||
|
|
||||||
// The raceattr program reproduces vfs inode file attribute race.
|
|
||||||
//
|
|
||||||
// Even though libfuse high-level API presents the address of a struct stat
|
|
||||||
// alongside struct fuse_context, file attributes are actually inherent to the
|
|
||||||
// inode, instead of the specific call from userspace. The kernel implementation
|
|
||||||
// in fs/fuse/xattr.c appears to make stale data in the inode (set by a previous
|
|
||||||
// call) impossible or very unlikely to reach userspace via the stat family of
|
|
||||||
// syscalls. However, when using default_permissions to have the VFS check
|
|
||||||
// permissions, this race still happens, despite the resulting struct stat being
|
|
||||||
// correct when overriding the check via capabilities otherwise.
|
|
||||||
//
|
|
||||||
// This program reproduces the failure, but because of its continuous nature, it
|
|
||||||
// is provided independent of the vm integration test suite.
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"flag"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
func newStatAs(
|
|
||||||
ctx context.Context, cancel context.CancelFunc,
|
|
||||||
n *atomic.Uint64, ok *atomic.Bool,
|
|
||||||
uid uint32, pathname string,
|
|
||||||
continuous bool,
|
|
||||||
) func() {
|
|
||||||
return func() {
|
|
||||||
runtime.LockOSThread()
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
if _, _, errno := syscall.Syscall(
|
|
||||||
syscall.SYS_SETUID, uintptr(uid),
|
|
||||||
0, 0,
|
|
||||||
); errno != 0 {
|
|
||||||
cancel()
|
|
||||||
log.Printf("cannot set uid to %d: %s", uid, errno)
|
|
||||||
}
|
|
||||||
|
|
||||||
var stat syscall.Stat_t
|
|
||||||
for {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := syscall.Lstat(pathname, &stat); err != nil {
|
|
||||||
// SHAREFS_PERM_DIR not world executable, or
|
|
||||||
// SHAREFS_PERM_REG not world readable
|
|
||||||
if !continuous {
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
ok.Store(true)
|
|
||||||
log.Printf("uid %d: %v", uid, err)
|
|
||||||
} else if stat.Uid != uid {
|
|
||||||
// appears to be unreachable
|
|
||||||
if !continuous {
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
ok.Store(true)
|
|
||||||
log.Printf("got uid %d instead of %d", stat.Uid, uid)
|
|
||||||
}
|
|
||||||
n.Add(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
log.SetFlags(0)
|
|
||||||
log.SetPrefix("raceattr: ")
|
|
||||||
|
|
||||||
p := flag.String("target", "/sdcard/raceattr", "pathname of test file")
|
|
||||||
u0 := flag.Int("uid0", 1<<10-1, "first uid")
|
|
||||||
u1 := flag.Int("uid1", 1<<10-2, "second uid")
|
|
||||||
count := flag.Int("count", 1, "threads per uid")
|
|
||||||
continuous := flag.Bool("continuous", false, "keep running even after reproduce")
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
if os.Geteuid() != 0 {
|
|
||||||
log.Fatal("this program must run as root")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := signal.NotifyContext(
|
|
||||||
context.Background(),
|
|
||||||
syscall.SIGINT,
|
|
||||||
syscall.SIGTERM,
|
|
||||||
syscall.SIGHUP,
|
|
||||||
)
|
|
||||||
|
|
||||||
if err := os.WriteFile(*p, nil, 0); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
wg sync.WaitGroup
|
|
||||||
|
|
||||||
n atomic.Uint64
|
|
||||||
ok atomic.Bool
|
|
||||||
)
|
|
||||||
|
|
||||||
if *count < 1 {
|
|
||||||
*count = 1
|
|
||||||
}
|
|
||||||
for range *count {
|
|
||||||
wg.Go(newStatAs(ctx, cancel, &n, &ok, uint32(*u0), *p, *continuous))
|
|
||||||
if *u1 >= 0 {
|
|
||||||
wg.Go(newStatAs(ctx, cancel, &n, &ok, uint32(*u1), *p, *continuous))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
if !*continuous && ok.Load() {
|
|
||||||
log.Printf("reproduced after %d calls", n.Load())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/fhs"
|
"hakurei.app/container/fhs"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() { gob.Register(new(AutoEtcOp)) }
|
func init() { gob.Register(new(AutoEtcOp)) }
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/internal/stub"
|
"hakurei.app/container/stub"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAutoEtcOp(t *testing.T) {
|
func TestAutoEtcOp(t *testing.T) {
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/fhs"
|
"hakurei.app/container/fhs"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -5,9 +5,9 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/internal/stub"
|
"hakurei.app/container/stub"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -3,8 +3,6 @@ package container
|
|||||||
import (
|
import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"hakurei.app/ext"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -53,15 +51,15 @@ func capset(hdrp *capHeader, datap *[2]capData) error {
|
|||||||
|
|
||||||
// capBoundingSetDrop drops a capability from the calling thread's capability bounding set.
|
// capBoundingSetDrop drops a capability from the calling thread's capability bounding set.
|
||||||
func capBoundingSetDrop(cap uintptr) error {
|
func capBoundingSetDrop(cap uintptr) error {
|
||||||
return ext.Prctl(syscall.PR_CAPBSET_DROP, cap, 0)
|
return Prctl(syscall.PR_CAPBSET_DROP, cap, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// capAmbientClearAll clears the ambient capability set of the calling thread.
|
// capAmbientClearAll clears the ambient capability set of the calling thread.
|
||||||
func capAmbientClearAll() error {
|
func capAmbientClearAll() error {
|
||||||
return ext.Prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_CLEAR_ALL, 0)
|
return Prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_CLEAR_ALL, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// capAmbientRaise adds to the ambient capability set of the calling thread.
|
// capAmbientRaise adds to the ambient capability set of the calling thread.
|
||||||
func capAmbientRaise(cap uintptr) error {
|
func capAmbientRaise(cap uintptr) error {
|
||||||
return ext.Prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, cap)
|
return Prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, cap)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,10 +2,10 @@
|
|||||||
package check
|
package check
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
@@ -30,16 +30,6 @@ func (e AbsoluteError) Is(target error) bool {
|
|||||||
// Absolute holds a pathname checked to be absolute.
|
// Absolute holds a pathname checked to be absolute.
|
||||||
type Absolute struct{ pathname unique.Handle[string] }
|
type Absolute struct{ pathname unique.Handle[string] }
|
||||||
|
|
||||||
var (
|
|
||||||
_ encoding.TextAppender = new(Absolute)
|
|
||||||
_ encoding.TextMarshaler = new(Absolute)
|
|
||||||
_ encoding.TextUnmarshaler = new(Absolute)
|
|
||||||
|
|
||||||
_ encoding.BinaryAppender = new(Absolute)
|
|
||||||
_ encoding.BinaryMarshaler = new(Absolute)
|
|
||||||
_ encoding.BinaryUnmarshaler = new(Absolute)
|
|
||||||
)
|
|
||||||
|
|
||||||
// ok returns whether [Absolute] is not the zero value.
|
// ok returns whether [Absolute] is not the zero value.
|
||||||
func (a *Absolute) ok() bool { return a != nil && *a != (Absolute{}) }
|
func (a *Absolute) ok() bool { return a != nil && *a != (Absolute{}) }
|
||||||
|
|
||||||
@@ -71,7 +61,7 @@ func (a *Absolute) Is(v *Absolute) bool {
|
|||||||
|
|
||||||
// NewAbs checks pathname and returns a new [Absolute] if pathname is absolute.
|
// NewAbs checks pathname and returns a new [Absolute] if pathname is absolute.
|
||||||
func NewAbs(pathname string) (*Absolute, error) {
|
func NewAbs(pathname string) (*Absolute, error) {
|
||||||
if !filepath.IsAbs(pathname) {
|
if !path.IsAbs(pathname) {
|
||||||
return nil, AbsoluteError(pathname)
|
return nil, AbsoluteError(pathname)
|
||||||
}
|
}
|
||||||
return unsafeAbs(pathname), nil
|
return unsafeAbs(pathname), nil
|
||||||
@@ -86,35 +76,46 @@ func MustAbs(pathname string) *Absolute {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Append calls [filepath.Join] with [Absolute] as the first element.
|
// Append calls [path.Join] with [Absolute] as the first element.
|
||||||
func (a *Absolute) Append(elem ...string) *Absolute {
|
func (a *Absolute) Append(elem ...string) *Absolute {
|
||||||
return unsafeAbs(filepath.Join(append([]string{a.String()}, elem...)...))
|
return unsafeAbs(path.Join(append([]string{a.String()}, elem...)...))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dir calls [filepath.Dir] with [Absolute] as its argument.
|
// Dir calls [path.Dir] with [Absolute] as its argument.
|
||||||
func (a *Absolute) Dir() *Absolute { return unsafeAbs(filepath.Dir(a.String())) }
|
func (a *Absolute) Dir() *Absolute { return unsafeAbs(path.Dir(a.String())) }
|
||||||
|
|
||||||
// AppendText appends the checked pathname.
|
// GobEncode returns the checked pathname.
|
||||||
func (a *Absolute) AppendText(data []byte) ([]byte, error) {
|
func (a *Absolute) GobEncode() ([]byte, error) {
|
||||||
return append(data, a.String()...), nil
|
return []byte(a.String()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalText returns the checked pathname.
|
// GobDecode stores data if it represents an absolute pathname.
|
||||||
func (a *Absolute) MarshalText() ([]byte, error) { return a.AppendText(nil) }
|
func (a *Absolute) GobDecode(data []byte) error {
|
||||||
|
|
||||||
// UnmarshalText stores data if it represents an absolute pathname.
|
|
||||||
func (a *Absolute) UnmarshalText(data []byte) error {
|
|
||||||
pathname := string(data)
|
pathname := string(data)
|
||||||
if !filepath.IsAbs(pathname) {
|
if !path.IsAbs(pathname) {
|
||||||
return AbsoluteError(pathname)
|
return AbsoluteError(pathname)
|
||||||
}
|
}
|
||||||
a.pathname = unique.Make(pathname)
|
a.pathname = unique.Make(pathname)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Absolute) AppendBinary(data []byte) ([]byte, error) { return a.AppendText(data) }
|
// MarshalJSON returns a JSON representation of the checked pathname.
|
||||||
func (a *Absolute) MarshalBinary() ([]byte, error) { return a.MarshalText() }
|
func (a *Absolute) MarshalJSON() ([]byte, error) {
|
||||||
func (a *Absolute) UnmarshalBinary(data []byte) error { return a.UnmarshalText(data) }
|
return json.Marshal(a.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON stores data if it represents an absolute pathname.
|
||||||
|
func (a *Absolute) UnmarshalJSON(data []byte) error {
|
||||||
|
var pathname string
|
||||||
|
if err := json.Unmarshal(data, &pathname); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !path.IsAbs(pathname) {
|
||||||
|
return AbsoluteError(pathname)
|
||||||
|
}
|
||||||
|
a.pathname = unique.Make(pathname)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// SortAbs calls [slices.SortFunc] for a slice of [Absolute].
|
// SortAbs calls [slices.SortFunc] for a slice of [Absolute].
|
||||||
func SortAbs(x []*Absolute) {
|
func SortAbs(x []*Absolute) {
|
||||||
@@ -11,12 +11,12 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
_ "unsafe" // for go:linkname
|
_ "unsafe" // for go:linkname
|
||||||
|
|
||||||
. "hakurei.app/check"
|
. "hakurei.app/container/check"
|
||||||
)
|
)
|
||||||
|
|
||||||
// unsafeAbs returns check.Absolute on any string value.
|
// unsafeAbs returns check.Absolute on any string value.
|
||||||
//
|
//
|
||||||
//go:linkname unsafeAbs hakurei.app/check.unsafeAbs
|
//go:linkname unsafeAbs hakurei.app/container/check.unsafeAbs
|
||||||
func unsafeAbs(pathname string) *Absolute
|
func unsafeAbs(pathname string) *Absolute
|
||||||
|
|
||||||
func TestAbsoluteError(t *testing.T) {
|
func TestAbsoluteError(t *testing.T) {
|
||||||
@@ -170,20 +170,20 @@ func TestCodecAbsolute(t *testing.T) {
|
|||||||
|
|
||||||
{"good", MustAbs("/etc"),
|
{"good", MustAbs("/etc"),
|
||||||
nil,
|
nil,
|
||||||
"\t\x7f\x06\x01\x02\xff\x82\x00\x00\x00\b\xff\x80\x00\x04/etc",
|
"\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\b\xff\x80\x00\x04/etc",
|
||||||
",\xff\x83\x03\x01\x01\x06sCheck\x01\xff\x84\x00\x01\x02\x01\bPathname\x01\xff\x80\x00\x01\x05Magic\x01\x06\x00\x00\x00\t\x7f\x06\x01\x02\xff\x82\x00\x00\x00\x0f\xff\x84\x01\x04/etc\x01\xfc\xc0\xed\x00\x00\x00",
|
",\xff\x83\x03\x01\x01\x06sCheck\x01\xff\x84\x00\x01\x02\x01\bPathname\x01\xff\x80\x00\x01\x05Magic\x01\x06\x00\x00\x00\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\x0f\xff\x84\x01\x04/etc\x01\xfc\xc0\xed\x00\x00\x00",
|
||||||
|
|
||||||
`"/etc"`, `{"val":"/etc","magic":3236757504}`},
|
`"/etc"`, `{"val":"/etc","magic":3236757504}`},
|
||||||
{"not absolute", nil,
|
{"not absolute", nil,
|
||||||
AbsoluteError("etc"),
|
AbsoluteError("etc"),
|
||||||
"\t\x7f\x06\x01\x02\xff\x82\x00\x00\x00\a\xff\x80\x00\x03etc",
|
"\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\a\xff\x80\x00\x03etc",
|
||||||
",\xff\x83\x03\x01\x01\x06sCheck\x01\xff\x84\x00\x01\x02\x01\bPathname\x01\xff\x80\x00\x01\x05Magic\x01\x06\x00\x00\x00\t\x7f\x06\x01\x02\xff\x82\x00\x00\x00\x0f\xff\x84\x01\x03etc\x01\xfb\x01\x81\xda\x00\x00\x00",
|
",\xff\x83\x03\x01\x01\x06sCheck\x01\xff\x84\x00\x01\x02\x01\bPathname\x01\xff\x80\x00\x01\x05Magic\x01\x06\x00\x00\x00\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\x0f\xff\x84\x01\x03etc\x01\xfb\x01\x81\xda\x00\x00\x00",
|
||||||
|
|
||||||
`"etc"`, `{"val":"etc","magic":3236757504}`},
|
`"etc"`, `{"val":"etc","magic":3236757504}`},
|
||||||
{"zero", nil,
|
{"zero", nil,
|
||||||
new(AbsoluteError),
|
new(AbsoluteError),
|
||||||
"\t\x7f\x06\x01\x02\xff\x82\x00\x00\x00\x04\xff\x80\x00\x00",
|
"\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\x04\xff\x80\x00\x00",
|
||||||
",\xff\x83\x03\x01\x01\x06sCheck\x01\xff\x84\x00\x01\x02\x01\bPathname\x01\xff\x80\x00\x01\x05Magic\x01\x06\x00\x00\x00\t\x7f\x06\x01\x02\xff\x82\x00\x00\x00\f\xff\x84\x01\x00\x01\xfb\x01\x81\xda\x00\x00\x00",
|
",\xff\x83\x03\x01\x01\x06sCheck\x01\xff\x84\x00\x01\x02\x01\bPathname\x01\xff\x80\x00\x01\x05Magic\x01\x06\x00\x00\x00\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\f\xff\x84\x01\x00\x01\xfb\x01\x81\xda\x00\x00\x00",
|
||||||
`""`, `{"val":"","magic":3236757504}`},
|
`""`, `{"val":"","magic":3236757504}`},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -347,6 +347,15 @@ func TestCodecAbsolute(t *testing.T) {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t.Run("json passthrough", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
wantErr := "invalid character ':' looking for beginning of value"
|
||||||
|
if err := new(Absolute).UnmarshalJSON([]byte(":3")); err == nil || err.Error() != wantErr {
|
||||||
|
t.Errorf("UnmarshalJSON: error = %v, want %s", err, wantErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAbsoluteWrap(t *testing.T) {
|
func TestAbsoluteWrap(t *testing.T) {
|
||||||
@@ -3,7 +3,7 @@ package check_test
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestEscapeOverlayDataSegment(t *testing.T) {
|
func TestEscapeOverlayDataSegment(t *testing.T) {
|
||||||
@@ -16,12 +16,10 @@ import (
|
|||||||
. "syscall"
|
. "syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
"hakurei.app/container/seccomp"
|
"hakurei.app/container/seccomp"
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/ext"
|
|
||||||
"hakurei.app/fhs"
|
|
||||||
"hakurei.app/internal/landlock"
|
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -29,6 +27,9 @@ const (
|
|||||||
// CancelSignal is the signal expected by container init on context cancel.
|
// CancelSignal is the signal expected by container init on context cancel.
|
||||||
// A custom [Container.Cancel] function must eventually deliver this signal.
|
// A custom [Container.Cancel] function must eventually deliver this signal.
|
||||||
CancelSignal = SIGUSR2
|
CancelSignal = SIGUSR2
|
||||||
|
|
||||||
|
// Timeout for writing initParams to Container.setup.
|
||||||
|
initSetupTimeout = 5 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@@ -40,10 +41,10 @@ type (
|
|||||||
// Whether to set SchedPolicy and SchedPriority via sched_setscheduler(2).
|
// Whether to set SchedPolicy and SchedPriority via sched_setscheduler(2).
|
||||||
SetScheduler bool
|
SetScheduler bool
|
||||||
// Scheduling policy to set via sched_setscheduler(2).
|
// Scheduling policy to set via sched_setscheduler(2).
|
||||||
SchedPolicy ext.SchedPolicy
|
SchedPolicy std.SchedPolicy
|
||||||
// Scheduling priority to set via sched_setscheduler(2). The zero value
|
// Scheduling priority to set via sched_setscheduler(2). The zero value
|
||||||
// implies the minimum value supported by the current SchedPolicy.
|
// implies the minimum value supported by the current SchedPolicy.
|
||||||
SchedPriority ext.Int
|
SchedPriority std.Int
|
||||||
// Cgroup fd, nil to disable.
|
// Cgroup fd, nil to disable.
|
||||||
Cgroup *int
|
Cgroup *int
|
||||||
// ExtraFiles passed through to initial process in the container, with
|
// ExtraFiles passed through to initial process in the container, with
|
||||||
@@ -51,7 +52,7 @@ type (
|
|||||||
ExtraFiles []*os.File
|
ExtraFiles []*os.File
|
||||||
|
|
||||||
// Write end of a pipe connected to the init to deliver [Params].
|
// Write end of a pipe connected to the init to deliver [Params].
|
||||||
setup [2]*os.File
|
setup *os.File
|
||||||
// Cancels the context passed to the underlying cmd.
|
// Cancels the context passed to the underlying cmd.
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
// Closed after Wait returns. Keeps the spawning thread alive.
|
// Closed after Wait returns. Keeps the spawning thread alive.
|
||||||
@@ -184,24 +185,31 @@ var (
|
|||||||
closeOnExecErr error
|
closeOnExecErr error
|
||||||
)
|
)
|
||||||
|
|
||||||
// ensureCloseOnExec ensures all currently open file descriptors have the
|
// ensureCloseOnExec ensures all currently open file descriptors have the syscall.FD_CLOEXEC flag set.
|
||||||
// syscall.FD_CLOEXEC flag set.
|
// This is only ran once as it is intended to handle files left open by the parent, and any file opened
|
||||||
//
|
// on this side should already have syscall.FD_CLOEXEC set.
|
||||||
// This is only ran once as it is intended to handle files left open by the
|
|
||||||
// parent, and any file opened on this side should already have
|
|
||||||
// syscall.FD_CLOEXEC set.
|
|
||||||
func ensureCloseOnExec() error {
|
func ensureCloseOnExec() error {
|
||||||
closeOnExecOnce.Do(func() { closeOnExecErr = doCloseOnExec() })
|
closeOnExecOnce.Do(func() {
|
||||||
|
const fdPrefixPath = "/proc/self/fd/"
|
||||||
|
|
||||||
|
var entries []os.DirEntry
|
||||||
|
if entries, closeOnExecErr = os.ReadDir(fdPrefixPath); closeOnExecErr != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var fd int
|
||||||
|
for _, ent := range entries {
|
||||||
|
if fd, closeOnExecErr = strconv.Atoi(ent.Name()); closeOnExecErr != nil {
|
||||||
|
break // not reached
|
||||||
|
}
|
||||||
|
CloseOnExec(fd)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
if closeOnExecErr == nil {
|
if closeOnExecErr == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return &StartError{
|
return &StartError{Fatal: true, Step: "set FD_CLOEXEC on all open files", Err: closeOnExecErr, Passthrough: true}
|
||||||
Fatal: true,
|
|
||||||
Step: "set FD_CLOEXEC on all open files",
|
|
||||||
Err: closeOnExecErr,
|
|
||||||
Passthrough: true,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts the container init. The init process blocks until Serve is called.
|
// Start starts the container init. The init process blocks until Serve is called.
|
||||||
@@ -285,16 +293,14 @@ func (p *Container) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// place setup pipe before user supplied extra files, this is later restored by init
|
// place setup pipe before user supplied extra files, this is later restored by init
|
||||||
if r, w, err := os.Pipe(); err != nil {
|
if fd, f, err := Setup(&p.cmd.ExtraFiles); err != nil {
|
||||||
return &StartError{
|
return &StartError{
|
||||||
Fatal: true,
|
Fatal: true,
|
||||||
Step: "set up params stream",
|
Step: "set up params stream",
|
||||||
Err: err,
|
Err: err,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fd := 3 + len(p.cmd.ExtraFiles)
|
p.setup = f
|
||||||
p.cmd.ExtraFiles = append(p.cmd.ExtraFiles, r)
|
|
||||||
p.setup[0], p.setup[1] = r, w
|
|
||||||
p.cmd.Env = []string{setupEnv + "=" + strconv.Itoa(fd)}
|
p.cmd.Env = []string{setupEnv + "=" + strconv.Itoa(fd)}
|
||||||
}
|
}
|
||||||
p.cmd.ExtraFiles = append(p.cmd.ExtraFiles, p.ExtraFiles...)
|
p.cmd.ExtraFiles = append(p.cmd.ExtraFiles, p.ExtraFiles...)
|
||||||
@@ -308,7 +314,7 @@ func (p *Container) Start() error {
|
|||||||
done <- func() error {
|
done <- func() error {
|
||||||
// PR_SET_NO_NEW_PRIVS: thread-directed but acts on all processes
|
// PR_SET_NO_NEW_PRIVS: thread-directed but acts on all processes
|
||||||
// created from the calling thread
|
// created from the calling thread
|
||||||
if err := setNoNewPrivs(); err != nil {
|
if err := SetNoNewPrivs(); err != nil {
|
||||||
return &StartError{
|
return &StartError{
|
||||||
Fatal: true,
|
Fatal: true,
|
||||||
Step: "prctl(PR_SET_NO_NEW_PRIVS)",
|
Step: "prctl(PR_SET_NO_NEW_PRIVS)",
|
||||||
@@ -318,17 +324,15 @@ func (p *Container) Start() error {
|
|||||||
|
|
||||||
// landlock: depends on per-thread state but acts on a process group
|
// landlock: depends on per-thread state but acts on a process group
|
||||||
{
|
{
|
||||||
rulesetAttr := &landlock.RulesetAttr{
|
rulesetAttr := &RulesetAttr{Scoped: LANDLOCK_SCOPE_SIGNAL}
|
||||||
Scoped: landlock.LANDLOCK_SCOPE_SIGNAL,
|
|
||||||
}
|
|
||||||
if !p.HostAbstract {
|
if !p.HostAbstract {
|
||||||
rulesetAttr.Scoped |= landlock.LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET
|
rulesetAttr.Scoped |= LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET
|
||||||
}
|
}
|
||||||
|
|
||||||
if abi, err := landlock.GetABI(); err != nil {
|
if abi, err := LandlockGetABI(); err != nil {
|
||||||
if p.HostAbstract || !p.HostNet {
|
if p.HostAbstract {
|
||||||
// landlock can be skipped here as it restricts access
|
// landlock can be skipped here as it restricts access
|
||||||
// to resources already covered by namespaces (pid, net)
|
// to resources already covered by namespaces (pid)
|
||||||
goto landlockOut
|
goto landlockOut
|
||||||
}
|
}
|
||||||
return &StartError{Step: "get landlock ABI", Err: err}
|
return &StartError{Step: "get landlock ABI", Err: err}
|
||||||
@@ -354,7 +358,7 @@ func (p *Container) Start() error {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
p.msg.Verbosef("enforcing landlock ruleset %s", rulesetAttr)
|
p.msg.Verbosef("enforcing landlock ruleset %s", rulesetAttr)
|
||||||
if err = landlock.RestrictSelf(rulesetFd, 0); err != nil {
|
if err = LandlockRestrictSelf(rulesetFd, 0); err != nil {
|
||||||
_ = Close(rulesetFd)
|
_ = Close(rulesetFd)
|
||||||
return &StartError{
|
return &StartError{
|
||||||
Fatal: true,
|
Fatal: true,
|
||||||
@@ -374,7 +378,7 @@ func (p *Container) Start() error {
|
|||||||
// sched_setscheduler: thread-directed but acts on all processes
|
// sched_setscheduler: thread-directed but acts on all processes
|
||||||
// created from the calling thread
|
// created from the calling thread
|
||||||
if p.SetScheduler {
|
if p.SetScheduler {
|
||||||
if p.SchedPolicy < 0 || p.SchedPolicy > ext.SCHED_LAST {
|
if p.SchedPolicy < 0 || p.SchedPolicy > std.SCHED_LAST {
|
||||||
return &StartError{
|
return &StartError{
|
||||||
Fatal: false,
|
Fatal: false,
|
||||||
Step: "set scheduling policy",
|
Step: "set scheduling policy",
|
||||||
@@ -430,33 +434,24 @@ func (p *Container) Start() error {
|
|||||||
// Serve serves [Container.Params] to the container init.
|
// Serve serves [Container.Params] to the container init.
|
||||||
//
|
//
|
||||||
// Serve must only be called once.
|
// Serve must only be called once.
|
||||||
func (p *Container) Serve() (err error) {
|
func (p *Container) Serve() error {
|
||||||
if p.setup[0] == nil || p.setup[1] == nil {
|
if p.setup == nil {
|
||||||
panic("invalid serve")
|
panic("invalid serve")
|
||||||
}
|
}
|
||||||
|
|
||||||
done := make(chan struct{})
|
setup := p.setup
|
||||||
defer func() {
|
p.setup = nil
|
||||||
if closeErr := p.setup[1].Close(); err == nil {
|
if err := setup.SetDeadline(time.Now().Add(initSetupTimeout)); err != nil {
|
||||||
err = closeErr
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
p.cancel()
|
|
||||||
}
|
|
||||||
close(done)
|
|
||||||
p.setup[0], p.setup[1] = nil, nil
|
|
||||||
}()
|
|
||||||
if err = p.setup[0].Close(); err != nil {
|
|
||||||
return &StartError{
|
return &StartError{
|
||||||
Fatal: true,
|
Fatal: true,
|
||||||
Step: "close read end of init pipe",
|
Step: "set init pipe deadline",
|
||||||
Err: err,
|
Err: err,
|
||||||
Passthrough: true,
|
Passthrough: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.Path == nil {
|
if p.Path == nil {
|
||||||
|
p.cancel()
|
||||||
return &StartError{
|
return &StartError{
|
||||||
Step: "invalid executable pathname",
|
Step: "invalid executable pathname",
|
||||||
Err: EINVAL,
|
Err: EINVAL,
|
||||||
@@ -472,27 +467,18 @@ func (p *Container) Serve() (err error) {
|
|||||||
p.SeccompRules = make([]std.NativeRule, 0)
|
p.SeccompRules = make([]std.NativeRule, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
t := time.Now().UTC()
|
err := gob.NewEncoder(setup).Encode(&initParams{
|
||||||
go func(f *os.File) {
|
|
||||||
select {
|
|
||||||
case <-p.ctx.Done():
|
|
||||||
if cancelErr := f.SetWriteDeadline(t); cancelErr != nil {
|
|
||||||
p.msg.Verbose(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-done:
|
|
||||||
p.msg.Verbose("setup payload took", time.Since(t))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}(p.setup[1])
|
|
||||||
|
|
||||||
return gob.NewEncoder(p.setup[1]).Encode(&initParams{
|
|
||||||
p.Params,
|
p.Params,
|
||||||
Getuid(),
|
Getuid(),
|
||||||
Getgid(),
|
Getgid(),
|
||||||
len(p.ExtraFiles),
|
len(p.ExtraFiles),
|
||||||
p.msg.IsVerbose(),
|
p.msg.IsVerbose(),
|
||||||
})
|
})
|
||||||
|
_ = setup.Close()
|
||||||
|
if err != nil {
|
||||||
|
p.cancel()
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait blocks until the container init process to exit and releases any
|
// Wait blocks until the container init process to exit and releases any
|
||||||
|
|||||||
@@ -16,21 +16,18 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"hakurei.app/check"
|
|
||||||
"hakurei.app/command"
|
"hakurei.app/command"
|
||||||
"hakurei.app/container"
|
"hakurei.app/container"
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
"hakurei.app/container/seccomp"
|
"hakurei.app/container/seccomp"
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/ext"
|
"hakurei.app/container/vfs"
|
||||||
"hakurei.app/fhs"
|
|
||||||
"hakurei.app/hst"
|
"hakurei.app/hst"
|
||||||
"hakurei.app/internal/info"
|
|
||||||
"hakurei.app/internal/landlock"
|
|
||||||
"hakurei.app/internal/params"
|
|
||||||
"hakurei.app/ldd"
|
"hakurei.app/ldd"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
"hakurei.app/vfs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Note: this package requires cgo, which is unavailable in the Go playground.
|
// Note: this package requires cgo, which is unavailable in the Go playground.
|
||||||
@@ -86,9 +83,9 @@ func TestStartError(t *testing.T) {
|
|||||||
{"params env", &container.StartError{
|
{"params env", &container.StartError{
|
||||||
Fatal: true,
|
Fatal: true,
|
||||||
Step: "set up params stream",
|
Step: "set up params stream",
|
||||||
Err: params.ErrReceiveEnv,
|
Err: container.ErrReceiveEnv,
|
||||||
}, "set up params stream: environment variable not set",
|
}, "set up params stream: environment variable not set",
|
||||||
params.ErrReceiveEnv, syscall.EBADF,
|
container.ErrReceiveEnv, syscall.EBADF,
|
||||||
"cannot set up params stream: environment variable not set"},
|
"cannot set up params stream: environment variable not set"},
|
||||||
|
|
||||||
{"params", &container.StartError{
|
{"params", &container.StartError{
|
||||||
@@ -261,7 +258,7 @@ var containerTestCases = []struct {
|
|||||||
1000, 100, nil, 0, std.PresetExt},
|
1000, 100, nil, 0, std.PresetExt},
|
||||||
{"custom rules", true, true, true, false,
|
{"custom rules", true, true, true, false,
|
||||||
emptyOps, emptyMnt,
|
emptyOps, emptyMnt,
|
||||||
1, 31, []std.NativeRule{{Syscall: ext.SyscallNum(syscall.SYS_SETUID), Errno: std.ScmpErrno(syscall.EPERM)}}, 0, std.PresetExt},
|
1, 31, []std.NativeRule{{Syscall: std.ScmpSyscall(syscall.SYS_SETUID), Errno: std.ScmpErrno(syscall.EPERM)}}, 0, std.PresetExt},
|
||||||
|
|
||||||
{"tmpfs", true, false, false, true,
|
{"tmpfs", true, false, false, true,
|
||||||
earlyOps(new(container.Ops).
|
earlyOps(new(container.Ops).
|
||||||
@@ -438,8 +435,11 @@ func TestContainer(t *testing.T) {
|
|||||||
wantOps, wantOpsCtx := tc.ops(t)
|
wantOps, wantOpsCtx := tc.ops(t)
|
||||||
wantMnt := tc.mnt(t, wantOpsCtx)
|
wantMnt := tc.mnt(t, wantOpsCtx)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(t.Context(), helperDefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
var libPaths []*check.Absolute
|
var libPaths []*check.Absolute
|
||||||
c := helperNewContainerLibPaths(t.Context(), &libPaths, "container", strconv.Itoa(i))
|
c := helperNewContainerLibPaths(ctx, &libPaths, "container", strconv.Itoa(i))
|
||||||
c.Uid = tc.uid
|
c.Uid = tc.uid
|
||||||
c.Gid = tc.gid
|
c.Gid = tc.gid
|
||||||
c.Hostname = hostnameFromTestCase(tc.name)
|
c.Hostname = hostnameFromTestCase(tc.name)
|
||||||
@@ -449,6 +449,7 @@ func TestContainer(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
c.Stdout, c.Stderr = os.Stdout, os.Stderr
|
c.Stdout, c.Stderr = os.Stdout, os.Stderr
|
||||||
}
|
}
|
||||||
|
c.WaitDelay = helperDefaultTimeout
|
||||||
*c.Ops = append(*c.Ops, *wantOps...)
|
*c.Ops = append(*c.Ops, *wantOps...)
|
||||||
c.SeccompRules = tc.rules
|
c.SeccompRules = tc.rules
|
||||||
c.SeccompFlags = tc.flags | seccomp.AllowMultiarch
|
c.SeccompFlags = tc.flags | seccomp.AllowMultiarch
|
||||||
@@ -456,15 +457,6 @@ func TestContainer(t *testing.T) {
|
|||||||
c.SeccompDisable = !tc.filter
|
c.SeccompDisable = !tc.filter
|
||||||
c.RetainSession = tc.session
|
c.RetainSession = tc.session
|
||||||
c.HostNet = tc.net
|
c.HostNet = tc.net
|
||||||
if info.CanDegrade {
|
|
||||||
if _, err := landlock.GetABI(); err != nil {
|
|
||||||
if !errors.Is(err, syscall.ENOSYS) {
|
|
||||||
t.Fatalf("LandlockGetABI: error = %v", err)
|
|
||||||
}
|
|
||||||
c.HostAbstract = true
|
|
||||||
t.Log("Landlock LSM is unavailable, enabling HostAbstract")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.
|
c.
|
||||||
Readonly(check.MustAbs(pathReadonly), 0755).
|
Readonly(check.MustAbs(pathReadonly), 0755).
|
||||||
@@ -560,10 +552,11 @@ func testContainerCancel(
|
|||||||
) func(t *testing.T) {
|
) func(t *testing.T) {
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
ctx, cancel := context.WithCancel(t.Context())
|
ctx, cancel := context.WithTimeout(t.Context(), helperDefaultTimeout)
|
||||||
|
|
||||||
c := helperNewContainer(ctx, "block")
|
c := helperNewContainer(ctx, "block")
|
||||||
c.Stdout, c.Stderr = os.Stdout, os.Stderr
|
c.Stdout, c.Stderr = os.Stdout, os.Stderr
|
||||||
|
c.WaitDelay = helperDefaultTimeout
|
||||||
if containerExtra != nil {
|
if containerExtra != nil {
|
||||||
containerExtra(c)
|
containerExtra(c)
|
||||||
}
|
}
|
||||||
@@ -744,6 +737,7 @@ func init() {
|
|||||||
const (
|
const (
|
||||||
envDoCheck = "HAKUREI_TEST_DO_CHECK"
|
envDoCheck = "HAKUREI_TEST_DO_CHECK"
|
||||||
|
|
||||||
|
helperDefaultTimeout = 5 * time.Second
|
||||||
helperInnerPath = "/usr/bin/helper"
|
helperInnerPath = "/usr/bin/helper"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,8 @@
|
|||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"net"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
@@ -14,9 +12,6 @@ import (
|
|||||||
|
|
||||||
"hakurei.app/container/seccomp"
|
"hakurei.app/container/seccomp"
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/ext"
|
|
||||||
"hakurei.app/internal/netlink"
|
|
||||||
"hakurei.app/internal/params"
|
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -57,7 +52,7 @@ type syscallDispatcher interface {
|
|||||||
// isatty provides [Isatty].
|
// isatty provides [Isatty].
|
||||||
isatty(fd int) bool
|
isatty(fd int) bool
|
||||||
// receive provides [Receive].
|
// receive provides [Receive].
|
||||||
receive(key string, e any, fdp *int) (closeFunc func() error, err error)
|
receive(key string, e any, fdp *uintptr) (closeFunc func() error, err error)
|
||||||
|
|
||||||
// bindMount provides procPaths.bindMount.
|
// bindMount provides procPaths.bindMount.
|
||||||
bindMount(msg message.Msg, source, target string, flags uintptr) error
|
bindMount(msg message.Msg, source, target string, flags uintptr) error
|
||||||
@@ -68,7 +63,7 @@ type syscallDispatcher interface {
|
|||||||
// ensureFile provides ensureFile.
|
// ensureFile provides ensureFile.
|
||||||
ensureFile(name string, perm, pperm os.FileMode) error
|
ensureFile(name string, perm, pperm os.FileMode) error
|
||||||
// mustLoopback provides mustLoopback.
|
// mustLoopback provides mustLoopback.
|
||||||
mustLoopback(ctx context.Context, msg message.Msg)
|
mustLoopback(msg message.Msg)
|
||||||
|
|
||||||
// seccompLoad provides [seccomp.Load].
|
// seccompLoad provides [seccomp.Load].
|
||||||
seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error
|
seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error
|
||||||
@@ -146,18 +141,18 @@ func (k direct) new(f func(k syscallDispatcher)) { go f(k) }
|
|||||||
|
|
||||||
func (direct) lockOSThread() { runtime.LockOSThread() }
|
func (direct) lockOSThread() { runtime.LockOSThread() }
|
||||||
|
|
||||||
func (direct) setPtracer(pid uintptr) error { return ext.SetPtracer(pid) }
|
func (direct) setPtracer(pid uintptr) error { return SetPtracer(pid) }
|
||||||
func (direct) setDumpable(dumpable uintptr) error { return ext.SetDumpable(dumpable) }
|
func (direct) setDumpable(dumpable uintptr) error { return SetDumpable(dumpable) }
|
||||||
func (direct) setNoNewPrivs() error { return setNoNewPrivs() }
|
func (direct) setNoNewPrivs() error { return SetNoNewPrivs() }
|
||||||
|
|
||||||
func (direct) lastcap(msg message.Msg) uintptr { return LastCap(msg) }
|
func (direct) lastcap(msg message.Msg) uintptr { return LastCap(msg) }
|
||||||
func (direct) capset(hdrp *capHeader, datap *[2]capData) error { return capset(hdrp, datap) }
|
func (direct) capset(hdrp *capHeader, datap *[2]capData) error { return capset(hdrp, datap) }
|
||||||
func (direct) capBoundingSetDrop(cap uintptr) error { return capBoundingSetDrop(cap) }
|
func (direct) capBoundingSetDrop(cap uintptr) error { return capBoundingSetDrop(cap) }
|
||||||
func (direct) capAmbientClearAll() error { return capAmbientClearAll() }
|
func (direct) capAmbientClearAll() error { return capAmbientClearAll() }
|
||||||
func (direct) capAmbientRaise(cap uintptr) error { return capAmbientRaise(cap) }
|
func (direct) capAmbientRaise(cap uintptr) error { return capAmbientRaise(cap) }
|
||||||
func (direct) isatty(fd int) bool { return ext.Isatty(fd) }
|
func (direct) isatty(fd int) bool { return Isatty(fd) }
|
||||||
func (direct) receive(key string, e any, fdp *int) (func() error, error) {
|
func (direct) receive(key string, e any, fdp *uintptr) (func() error, error) {
|
||||||
return params.Receive(key, e, fdp)
|
return Receive(key, e, fdp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (direct) bindMount(msg message.Msg, source, target string, flags uintptr) error {
|
func (direct) bindMount(msg message.Msg, source, target string, flags uintptr) error {
|
||||||
@@ -172,50 +167,7 @@ func (k direct) mountTmpfs(fsname, target string, flags uintptr, size int, perm
|
|||||||
func (direct) ensureFile(name string, perm, pperm os.FileMode) error {
|
func (direct) ensureFile(name string, perm, pperm os.FileMode) error {
|
||||||
return ensureFile(name, perm, pperm)
|
return ensureFile(name, perm, pperm)
|
||||||
}
|
}
|
||||||
func (direct) mustLoopback(ctx context.Context, msg message.Msg) {
|
func (direct) mustLoopback(msg message.Msg) { mustLoopback(msg) }
|
||||||
var lo int
|
|
||||||
if ifi, err := net.InterfaceByName("lo"); err != nil {
|
|
||||||
msg.GetLogger().Fatalln(err)
|
|
||||||
} else {
|
|
||||||
lo = ifi.Index
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := netlink.DialRoute(0)
|
|
||||||
if err != nil {
|
|
||||||
msg.GetLogger().Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
must := func(err error) {
|
|
||||||
if err == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if closeErr := c.Close(); closeErr != nil {
|
|
||||||
msg.Verbosef("cannot close RTNETLINK: %v", closeErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch err.(type) {
|
|
||||||
case *os.SyscallError:
|
|
||||||
msg.GetLogger().Fatalf("cannot %v", err)
|
|
||||||
|
|
||||||
case syscall.Errno:
|
|
||||||
msg.GetLogger().Fatalf("RTNETLINK answers: %v", err)
|
|
||||||
|
|
||||||
default:
|
|
||||||
if err == context.DeadlineExceeded || err == context.Canceled {
|
|
||||||
msg.GetLogger().Fatalf("interrupted RTNETLINK operation")
|
|
||||||
}
|
|
||||||
msg.GetLogger().Fatal("RTNETLINK answers with malformed message")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
must(c.SendNewaddrLo(ctx, uint32(lo)))
|
|
||||||
must(c.SendIfInfomsg(ctx, syscall.RTM_NEWLINK, 0, &syscall.IfInfomsg{
|
|
||||||
Family: syscall.AF_UNSPEC,
|
|
||||||
Index: int32(lo),
|
|
||||||
Flags: syscall.IFF_UP,
|
|
||||||
Change: syscall.IFF_UP,
|
|
||||||
}))
|
|
||||||
must(c.Close())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (direct) seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error {
|
func (direct) seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error {
|
||||||
return seccomp.Load(rules, flags)
|
return seccomp.Load(rules, flags)
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package container
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
@@ -19,7 +18,7 @@ import (
|
|||||||
|
|
||||||
"hakurei.app/container/seccomp"
|
"hakurei.app/container/seccomp"
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/internal/stub"
|
"hakurei.app/container/stub"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -390,7 +389,7 @@ func (k *kstub) isatty(fd int) bool {
|
|||||||
return expect.Ret.(bool)
|
return expect.Ret.(bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *kstub) receive(key string, e any, fdp *int) (closeFunc func() error, err error) {
|
func (k *kstub) receive(key string, e any, fdp *uintptr) (closeFunc func() error, err error) {
|
||||||
k.Helper()
|
k.Helper()
|
||||||
expect := k.Expects("receive")
|
expect := k.Expects("receive")
|
||||||
|
|
||||||
@@ -408,17 +407,10 @@ func (k *kstub) receive(key string, e any, fdp *int) (closeFunc func() error, er
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// avoid changing test cases
|
|
||||||
var fdpComp *uintptr
|
|
||||||
if fdp != nil {
|
|
||||||
fdpComp = new(uintptr(*fdp))
|
|
||||||
}
|
|
||||||
|
|
||||||
err = expect.Error(
|
err = expect.Error(
|
||||||
stub.CheckArg(k.Stub, "key", key, 0),
|
stub.CheckArg(k.Stub, "key", key, 0),
|
||||||
stub.CheckArgReflect(k.Stub, "e", e, 1),
|
stub.CheckArgReflect(k.Stub, "e", e, 1),
|
||||||
stub.CheckArgReflect(k.Stub, "fdp", fdpComp, 2))
|
stub.CheckArgReflect(k.Stub, "fdp", fdp, 2))
|
||||||
|
|
||||||
// 3 is unused so stores params
|
// 3 is unused so stores params
|
||||||
if expect.Args[3] != nil {
|
if expect.Args[3] != nil {
|
||||||
@@ -433,7 +425,7 @@ func (k *kstub) receive(key string, e any, fdp *int) (closeFunc func() error, er
|
|||||||
if expect.Args[4] != nil {
|
if expect.Args[4] != nil {
|
||||||
if v, ok := expect.Args[4].(uintptr); ok && v >= 3 {
|
if v, ok := expect.Args[4].(uintptr); ok && v >= 3 {
|
||||||
if fdp != nil {
|
if fdp != nil {
|
||||||
*fdp = int(v)
|
*fdp = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -476,7 +468,7 @@ func (k *kstub) ensureFile(name string, perm, pperm os.FileMode) error {
|
|||||||
stub.CheckArg(k.Stub, "pperm", pperm, 2))
|
stub.CheckArg(k.Stub, "pperm", pperm, 2))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*kstub) mustLoopback(context.Context, message.Msg) { /* noop */ }
|
func (*kstub) mustLoopback(message.Msg) { /* noop */ }
|
||||||
|
|
||||||
func (k *kstub) seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error {
|
func (k *kstub) seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error {
|
||||||
k.Helper()
|
k.Helper()
|
||||||
|
|||||||
@@ -5,9 +5,9 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/vfs"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
"hakurei.app/vfs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// messageFromError returns a printable error message for a supported concrete type.
|
// messageFromError returns a printable error message for a supported concrete type.
|
||||||
|
|||||||
@@ -8,9 +8,9 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/internal/stub"
|
"hakurei.app/container/stub"
|
||||||
"hakurei.app/vfs"
|
"hakurei.app/container/vfs"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMessageFromError(t *testing.T) {
|
func TestMessageFromError(t *testing.T) {
|
||||||
|
|||||||
37
container/executable.go
Normal file
37
container/executable.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
executable string
|
||||||
|
executableOnce sync.Once
|
||||||
|
)
|
||||||
|
|
||||||
|
func copyExecutable(msg message.Msg) {
|
||||||
|
if name, err := os.Executable(); err != nil {
|
||||||
|
m := fmt.Sprintf("cannot read executable path: %v", err)
|
||||||
|
if msg != nil {
|
||||||
|
msg.BeforeExit()
|
||||||
|
msg.GetLogger().Fatal(m)
|
||||||
|
} else {
|
||||||
|
log.Fatal(m)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
executable = name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustExecutable calls [os.Executable] and terminates the process on error.
|
||||||
|
//
|
||||||
|
// Deprecated: This is no longer used and will be removed in 0.4.
|
||||||
|
func MustExecutable(msg message.Msg) string {
|
||||||
|
executableOnce.Do(func() { copyExecutable(msg) })
|
||||||
|
return executable
|
||||||
|
}
|
||||||
18
container/executable_test.go
Normal file
18
container/executable_test.go
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
package container_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"hakurei.app/container"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestExecutable(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
for i := 0; i < 16; i++ {
|
||||||
|
if got := container.MustExecutable(message.New(nil)); got != os.Args[0] {
|
||||||
|
t.Errorf("MustExecutable: %q, want %q", got, os.Args[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,14 +3,14 @@ package fhs
|
|||||||
import (
|
import (
|
||||||
_ "unsafe" // for go:linkname
|
_ "unsafe" // for go:linkname
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
)
|
)
|
||||||
|
|
||||||
/* constants in this file bypass abs check, be extremely careful when changing them! */
|
/* constants in this file bypass abs check, be extremely careful when changing them! */
|
||||||
|
|
||||||
// unsafeAbs returns check.Absolute on any string value.
|
// unsafeAbs returns check.Absolute on any string value.
|
||||||
//
|
//
|
||||||
//go:linkname unsafeAbs hakurei.app/check.unsafeAbs
|
//go:linkname unsafeAbs hakurei.app/container/check.unsafeAbs
|
||||||
func unsafeAbs(pathname string) *check.Absolute
|
func unsafeAbs(pathname string) *check.Absolute
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -7,8 +7,7 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"os/signal"
|
"path"
|
||||||
"path/filepath"
|
|
||||||
"slices"
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -16,10 +15,8 @@ import (
|
|||||||
. "syscall"
|
. "syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
"hakurei.app/container/seccomp"
|
"hakurei.app/container/seccomp"
|
||||||
"hakurei.app/ext"
|
|
||||||
"hakurei.app/fhs"
|
|
||||||
"hakurei.app/internal/params"
|
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -148,46 +145,44 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
param initParams
|
params initParams
|
||||||
closeSetup func() error
|
closeSetup func() error
|
||||||
setupFd int
|
setupFd uintptr
|
||||||
|
offsetSetup int
|
||||||
)
|
)
|
||||||
if f, err := k.receive(setupEnv, ¶m, &setupFd); err != nil {
|
if f, err := k.receive(setupEnv, ¶ms, &setupFd); err != nil {
|
||||||
if errors.Is(err, EBADF) {
|
if errors.Is(err, EBADF) {
|
||||||
k.fatal(msg, "invalid setup descriptor")
|
k.fatal(msg, "invalid setup descriptor")
|
||||||
}
|
}
|
||||||
if errors.Is(err, params.ErrReceiveEnv) {
|
if errors.Is(err, ErrReceiveEnv) {
|
||||||
k.fatal(msg, setupEnv+" not set")
|
k.fatal(msg, setupEnv+" not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
k.fatalf(msg, "cannot decode init setup payload: %v", err)
|
k.fatalf(msg, "cannot decode init setup payload: %v", err)
|
||||||
} else {
|
} else {
|
||||||
if param.Ops == nil {
|
if params.Ops == nil {
|
||||||
k.fatal(msg, "invalid setup parameters")
|
k.fatal(msg, "invalid setup parameters")
|
||||||
}
|
}
|
||||||
if param.ParentPerm == 0 {
|
if params.ParentPerm == 0 {
|
||||||
param.ParentPerm = 0755
|
params.ParentPerm = 0755
|
||||||
}
|
}
|
||||||
|
|
||||||
msg.SwapVerbose(param.Verbose)
|
msg.SwapVerbose(params.Verbose)
|
||||||
msg.Verbose("received setup parameters")
|
msg.Verbose("received setup parameters")
|
||||||
closeSetup = f
|
closeSetup = f
|
||||||
|
offsetSetup = int(setupFd + 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !param.HostNet {
|
if !params.HostNet {
|
||||||
ctx, cancel := signal.NotifyContext(context.Background(), CancelSignal,
|
k.mustLoopback(msg)
|
||||||
os.Interrupt, SIGTERM, SIGQUIT)
|
|
||||||
defer cancel() // for panics
|
|
||||||
k.mustLoopback(ctx, msg)
|
|
||||||
cancel()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// write uid/gid map here so parent does not need to set dumpable
|
// write uid/gid map here so parent does not need to set dumpable
|
||||||
if err := k.setDumpable(ext.SUID_DUMP_USER); err != nil {
|
if err := k.setDumpable(SUID_DUMP_USER); err != nil {
|
||||||
k.fatalf(msg, "cannot set SUID_DUMP_USER: %v", err)
|
k.fatalf(msg, "cannot set SUID_DUMP_USER: %v", err)
|
||||||
}
|
}
|
||||||
if err := k.writeFile(fhs.Proc+"self/uid_map",
|
if err := k.writeFile(fhs.Proc+"self/uid_map",
|
||||||
append([]byte{}, strconv.Itoa(param.Uid)+" "+strconv.Itoa(param.HostUid)+" 1\n"...),
|
append([]byte{}, strconv.Itoa(params.Uid)+" "+strconv.Itoa(params.HostUid)+" 1\n"...),
|
||||||
0); err != nil {
|
0); err != nil {
|
||||||
k.fatalf(msg, "%v", err)
|
k.fatalf(msg, "%v", err)
|
||||||
}
|
}
|
||||||
@@ -197,17 +192,17 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
k.fatalf(msg, "%v", err)
|
k.fatalf(msg, "%v", err)
|
||||||
}
|
}
|
||||||
if err := k.writeFile(fhs.Proc+"self/gid_map",
|
if err := k.writeFile(fhs.Proc+"self/gid_map",
|
||||||
append([]byte{}, strconv.Itoa(param.Gid)+" "+strconv.Itoa(param.HostGid)+" 1\n"...),
|
append([]byte{}, strconv.Itoa(params.Gid)+" "+strconv.Itoa(params.HostGid)+" 1\n"...),
|
||||||
0); err != nil {
|
0); err != nil {
|
||||||
k.fatalf(msg, "%v", err)
|
k.fatalf(msg, "%v", err)
|
||||||
}
|
}
|
||||||
if err := k.setDumpable(ext.SUID_DUMP_DISABLE); err != nil {
|
if err := k.setDumpable(SUID_DUMP_DISABLE); err != nil {
|
||||||
k.fatalf(msg, "cannot set SUID_DUMP_DISABLE: %v", err)
|
k.fatalf(msg, "cannot set SUID_DUMP_DISABLE: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
oldmask := k.umask(0)
|
oldmask := k.umask(0)
|
||||||
if param.Hostname != "" {
|
if params.Hostname != "" {
|
||||||
if err := k.sethostname([]byte(param.Hostname)); err != nil {
|
if err := k.sethostname([]byte(params.Hostname)); err != nil {
|
||||||
k.fatalf(msg, "cannot set hostname: %v", err)
|
k.fatalf(msg, "cannot set hostname: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -220,7 +215,7 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
state := &setupState{process: make(map[int]WaitStatus), Params: ¶m.Params, Msg: msg, Context: ctx}
|
state := &setupState{process: make(map[int]WaitStatus), Params: ¶ms.Params, Msg: msg, Context: ctx}
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
/* early is called right before pivot_root into intermediate root;
|
/* early is called right before pivot_root into intermediate root;
|
||||||
@@ -228,7 +223,7 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
difficult to obtain via library functions after pivot_root, and
|
difficult to obtain via library functions after pivot_root, and
|
||||||
implementations are expected to avoid changing the state of the mount
|
implementations are expected to avoid changing the state of the mount
|
||||||
namespace */
|
namespace */
|
||||||
for i, op := range *param.Ops {
|
for i, op := range *params.Ops {
|
||||||
if op == nil || !op.Valid() {
|
if op == nil || !op.Valid() {
|
||||||
k.fatalf(msg, "invalid op at index %d", i)
|
k.fatalf(msg, "invalid op at index %d", i)
|
||||||
}
|
}
|
||||||
@@ -271,7 +266,7 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
step sets up the container filesystem, and implementations are expected to
|
step sets up the container filesystem, and implementations are expected to
|
||||||
keep the host root and sysroot mount points intact but otherwise can do
|
keep the host root and sysroot mount points intact but otherwise can do
|
||||||
whatever they need to. Calling chdir is allowed but discouraged. */
|
whatever they need to. Calling chdir is allowed but discouraged. */
|
||||||
for i, op := range *param.Ops {
|
for i, op := range *params.Ops {
|
||||||
// ops already checked during early setup
|
// ops already checked during early setup
|
||||||
if prefix, ok := op.prefix(); ok {
|
if prefix, ok := op.prefix(); ok {
|
||||||
msg.Verbosef("%s %s", prefix, op)
|
msg.Verbosef("%s %s", prefix, op)
|
||||||
@@ -295,7 +290,7 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
|
|
||||||
{
|
{
|
||||||
var fd int
|
var fd int
|
||||||
if err := ext.IgnoringEINTR(func() (err error) {
|
if err := IgnoringEINTR(func() (err error) {
|
||||||
fd, err = k.open(fhs.Root, O_DIRECTORY|O_RDONLY, 0)
|
fd, err = k.open(fhs.Root, O_DIRECTORY|O_RDONLY, 0)
|
||||||
return
|
return
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
@@ -327,7 +322,7 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
k.fatalf(msg, "cannot clear the ambient capability set: %v", err)
|
k.fatalf(msg, "cannot clear the ambient capability set: %v", err)
|
||||||
}
|
}
|
||||||
for i := uintptr(0); i <= lastcap; i++ {
|
for i := uintptr(0); i <= lastcap; i++ {
|
||||||
if param.Privileged && i == CAP_SYS_ADMIN {
|
if params.Privileged && i == CAP_SYS_ADMIN {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := k.capBoundingSetDrop(i); err != nil {
|
if err := k.capBoundingSetDrop(i); err != nil {
|
||||||
@@ -336,7 +331,7 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var keep [2]uint32
|
var keep [2]uint32
|
||||||
if param.Privileged {
|
if params.Privileged {
|
||||||
keep[capToIndex(CAP_SYS_ADMIN)] |= capToMask(CAP_SYS_ADMIN)
|
keep[capToIndex(CAP_SYS_ADMIN)] |= capToMask(CAP_SYS_ADMIN)
|
||||||
|
|
||||||
if err := k.capAmbientRaise(CAP_SYS_ADMIN); err != nil {
|
if err := k.capAmbientRaise(CAP_SYS_ADMIN); err != nil {
|
||||||
@@ -350,13 +345,13 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
k.fatalf(msg, "cannot capset: %v", err)
|
k.fatalf(msg, "cannot capset: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !param.SeccompDisable {
|
if !params.SeccompDisable {
|
||||||
rules := param.SeccompRules
|
rules := params.SeccompRules
|
||||||
if len(rules) == 0 { // non-empty rules slice always overrides presets
|
if len(rules) == 0 { // non-empty rules slice always overrides presets
|
||||||
msg.Verbosef("resolving presets %#x", param.SeccompPresets)
|
msg.Verbosef("resolving presets %#x", params.SeccompPresets)
|
||||||
rules = seccomp.Preset(param.SeccompPresets, param.SeccompFlags)
|
rules = seccomp.Preset(params.SeccompPresets, params.SeccompFlags)
|
||||||
}
|
}
|
||||||
if err := k.seccompLoad(rules, param.SeccompFlags); err != nil {
|
if err := k.seccompLoad(rules, params.SeccompFlags); err != nil {
|
||||||
// this also indirectly asserts PR_SET_NO_NEW_PRIVS
|
// this also indirectly asserts PR_SET_NO_NEW_PRIVS
|
||||||
k.fatalf(msg, "cannot load syscall filter: %v", err)
|
k.fatalf(msg, "cannot load syscall filter: %v", err)
|
||||||
}
|
}
|
||||||
@@ -365,10 +360,10 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
msg.Verbose("syscall filter not configured")
|
msg.Verbose("syscall filter not configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
extraFiles := make([]*os.File, param.Count)
|
extraFiles := make([]*os.File, params.Count)
|
||||||
for i := range extraFiles {
|
for i := range extraFiles {
|
||||||
// setup fd is placed before all extra files
|
// setup fd is placed before all extra files
|
||||||
extraFiles[i] = k.newFile(uintptr(setupFd+1+i), "extra file "+strconv.Itoa(i))
|
extraFiles[i] = k.newFile(uintptr(offsetSetup+i), "extra file "+strconv.Itoa(i))
|
||||||
}
|
}
|
||||||
k.umask(oldmask)
|
k.umask(oldmask)
|
||||||
|
|
||||||
@@ -446,7 +441,7 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
|
|
||||||
// called right before startup of initial process, all state changes to the
|
// called right before startup of initial process, all state changes to the
|
||||||
// current process is prohibited during late
|
// current process is prohibited during late
|
||||||
for i, op := range *param.Ops {
|
for i, op := range *params.Ops {
|
||||||
// ops already checked during early setup
|
// ops already checked during early setup
|
||||||
if err := op.late(state, k); err != nil {
|
if err := op.late(state, k); err != nil {
|
||||||
if m, ok := messageFromError(err); ok {
|
if m, ok := messageFromError(err); ok {
|
||||||
@@ -467,14 +462,14 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
k.fatalf(msg, "cannot close setup pipe: %v", err)
|
k.fatalf(msg, "cannot close setup pipe: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := exec.Command(param.Path.String())
|
cmd := exec.Command(params.Path.String())
|
||||||
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
|
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
|
||||||
cmd.Args = param.Args
|
cmd.Args = params.Args
|
||||||
cmd.Env = param.Env
|
cmd.Env = params.Env
|
||||||
cmd.ExtraFiles = extraFiles
|
cmd.ExtraFiles = extraFiles
|
||||||
cmd.Dir = param.Dir.String()
|
cmd.Dir = params.Dir.String()
|
||||||
|
|
||||||
msg.Verbosef("starting initial process %s", param.Path)
|
msg.Verbosef("starting initial process %s", params.Path)
|
||||||
if err := k.start(cmd); err != nil {
|
if err := k.start(cmd); err != nil {
|
||||||
k.fatalf(msg, "%v", err)
|
k.fatalf(msg, "%v", err)
|
||||||
}
|
}
|
||||||
@@ -492,9 +487,9 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case s := <-sig:
|
case s := <-sig:
|
||||||
if s == CancelSignal && param.ForwardCancel && cmd.Process != nil {
|
if s == CancelSignal && params.ForwardCancel && cmd.Process != nil {
|
||||||
msg.Verbose("forwarding context cancellation")
|
msg.Verbose("forwarding context cancellation")
|
||||||
if err := k.signal(cmd, os.Interrupt); err != nil && !errors.Is(err, os.ErrProcessDone) {
|
if err := k.signal(cmd, os.Interrupt); err != nil {
|
||||||
k.printf(msg, "cannot forward cancellation: %v", err)
|
k.printf(msg, "cannot forward cancellation: %v", err)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
@@ -524,7 +519,7 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
// start timeout early
|
// start timeout early
|
||||||
go func() { time.Sleep(param.AdoptWaitDelay); close(timeout) }()
|
go func() { time.Sleep(params.AdoptWaitDelay); close(timeout) }()
|
||||||
|
|
||||||
// close initial process files; this also keeps them alive
|
// close initial process files; this also keeps them alive
|
||||||
for _, f := range extraFiles {
|
for _, f := range extraFiles {
|
||||||
@@ -568,7 +563,7 @@ func TryArgv0(msg message.Msg) {
|
|||||||
msg = message.New(log.Default())
|
msg = message.New(log.Default())
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(os.Args) > 0 && filepath.Base(os.Args[0]) == initName {
|
if len(os.Args) > 0 && path.Base(os.Args[0]) == initName {
|
||||||
Init(msg)
|
Init(msg)
|
||||||
msg.BeforeExit()
|
msg.BeforeExit()
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
|
|||||||
@@ -7,11 +7,10 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/container/seccomp"
|
"hakurei.app/container/seccomp"
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/internal/params"
|
"hakurei.app/container/stub"
|
||||||
"hakurei.app/internal/stub"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestInitEntrypoint(t *testing.T) {
|
func TestInitEntrypoint(t *testing.T) {
|
||||||
@@ -41,7 +40,7 @@ func TestInitEntrypoint(t *testing.T) {
|
|||||||
call("lockOSThread", stub.ExpectArgs{}, nil, nil),
|
call("lockOSThread", stub.ExpectArgs{}, nil, nil),
|
||||||
call("getpid", stub.ExpectArgs{}, 1, nil),
|
call("getpid", stub.ExpectArgs{}, 1, nil),
|
||||||
call("setPtracer", stub.ExpectArgs{uintptr(0)}, nil, nil),
|
call("setPtracer", stub.ExpectArgs{uintptr(0)}, nil, nil),
|
||||||
call("receive", stub.ExpectArgs{"HAKUREI_SETUP", new(initParams), new(uintptr)}, nil, params.ErrReceiveEnv),
|
call("receive", stub.ExpectArgs{"HAKUREI_SETUP", new(initParams), new(uintptr)}, nil, ErrReceiveEnv),
|
||||||
call("fatal", stub.ExpectArgs{[]any{"HAKUREI_SETUP not set"}}, nil, nil),
|
call("fatal", stub.ExpectArgs{[]any{"HAKUREI_SETUP not set"}}, nil, nil),
|
||||||
},
|
},
|
||||||
}, nil},
|
}, nil},
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -6,9 +6,9 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/internal/stub"
|
"hakurei.app/container/stub"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBindMountOp(t *testing.T) {
|
func TestBindMountOp(t *testing.T) {
|
||||||
|
|||||||
@@ -12,8 +12,8 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/fhs"
|
"hakurei.app/container/fhs"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() { gob.Register(new(DaemonOp)) }
|
func init() { gob.Register(new(DaemonOp)) }
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/internal/stub"
|
"hakurei.app/container/stub"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -3,11 +3,11 @@ package container
|
|||||||
import (
|
import (
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path"
|
||||||
. "syscall"
|
. "syscall"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/fhs"
|
"hakurei.app/container/fhs"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() { gob.Register(new(MountDevOp)) }
|
func init() { gob.Register(new(MountDevOp)) }
|
||||||
@@ -46,7 +46,7 @@ func (d *MountDevOp) apply(state *setupState, k syscallDispatcher) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, name := range []string{"null", "zero", "full", "random", "urandom", "tty"} {
|
for _, name := range []string{"null", "zero", "full", "random", "urandom", "tty"} {
|
||||||
targetPath := filepath.Join(target, name)
|
targetPath := path.Join(target, name)
|
||||||
if err := k.ensureFile(targetPath, 0444, state.ParentPerm); err != nil {
|
if err := k.ensureFile(targetPath, 0444, state.ParentPerm); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -62,7 +62,7 @@ func (d *MountDevOp) apply(state *setupState, k syscallDispatcher) error {
|
|||||||
for i, name := range []string{"stdin", "stdout", "stderr"} {
|
for i, name := range []string{"stdin", "stdout", "stderr"} {
|
||||||
if err := k.symlink(
|
if err := k.symlink(
|
||||||
fhs.Proc+"self/fd/"+string(rune(i+'0')),
|
fhs.Proc+"self/fd/"+string(rune(i+'0')),
|
||||||
filepath.Join(target, name),
|
path.Join(target, name),
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -72,13 +72,13 @@ func (d *MountDevOp) apply(state *setupState, k syscallDispatcher) error {
|
|||||||
{fhs.Proc + "kcore", "core"},
|
{fhs.Proc + "kcore", "core"},
|
||||||
{"pts/ptmx", "ptmx"},
|
{"pts/ptmx", "ptmx"},
|
||||||
} {
|
} {
|
||||||
if err := k.symlink(pair[0], filepath.Join(target, pair[1])); err != nil {
|
if err := k.symlink(pair[0], path.Join(target, pair[1])); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
devShmPath := filepath.Join(target, "shm")
|
devShmPath := path.Join(target, "shm")
|
||||||
devPtsPath := filepath.Join(target, "pts")
|
devPtsPath := path.Join(target, "pts")
|
||||||
for _, name := range []string{devShmPath, devPtsPath} {
|
for _, name := range []string{devShmPath, devPtsPath} {
|
||||||
if err := k.mkdir(name, state.ParentPerm); err != nil {
|
if err := k.mkdir(name, state.ParentPerm); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -92,7 +92,7 @@ func (d *MountDevOp) apply(state *setupState, k syscallDispatcher) error {
|
|||||||
|
|
||||||
if state.RetainSession {
|
if state.RetainSession {
|
||||||
if k.isatty(Stdout) {
|
if k.isatty(Stdout) {
|
||||||
consolePath := filepath.Join(target, "console")
|
consolePath := path.Join(target, "console")
|
||||||
if err := k.ensureFile(consolePath, 0444, state.ParentPerm); err != nil {
|
if err := k.ensureFile(consolePath, 0444, state.ParentPerm); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -110,7 +110,7 @@ func (d *MountDevOp) apply(state *setupState, k syscallDispatcher) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if d.Mqueue {
|
if d.Mqueue {
|
||||||
mqueueTarget := filepath.Join(target, "mqueue")
|
mqueueTarget := path.Join(target, "mqueue")
|
||||||
if err := k.mkdir(mqueueTarget, state.ParentPerm); err != nil {
|
if err := k.mkdir(mqueueTarget, state.ParentPerm); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/internal/stub"
|
"hakurei.app/container/stub"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMountDevOp(t *testing.T) {
|
func TestMountDevOp(t *testing.T) {
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() { gob.Register(new(MkdirOp)) }
|
func init() { gob.Register(new(MkdirOp)) }
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/internal/stub"
|
"hakurei.app/container/stub"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMkdirOp(t *testing.T) {
|
func TestMkdirOp(t *testing.T) {
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ import (
|
|||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/fhs"
|
"hakurei.app/container/fhs"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/internal/stub"
|
"hakurei.app/container/stub"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMountOverlayOp(t *testing.T) {
|
func TestMountOverlayOp(t *testing.T) {
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/fhs"
|
"hakurei.app/container/fhs"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/internal/stub"
|
"hakurei.app/container/stub"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTmpfileOp(t *testing.T) {
|
func TestTmpfileOp(t *testing.T) {
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
. "syscall"
|
. "syscall"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() { gob.Register(new(MountProcOp)) }
|
func init() { gob.Register(new(MountProcOp)) }
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/internal/stub"
|
"hakurei.app/container/stub"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMountProcOp(t *testing.T) {
|
func TestMountProcOp(t *testing.T) {
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import (
|
|||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() { gob.Register(new(RemountOp)) }
|
func init() { gob.Register(new(RemountOp)) }
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/internal/stub"
|
"hakurei.app/container/stub"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRemountOp(t *testing.T) {
|
func TestRemountOp(t *testing.T) {
|
||||||
|
|||||||
@@ -3,9 +3,9 @@ package container
|
|||||||
import (
|
import (
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() { gob.Register(new(SymlinkOp)) }
|
func init() { gob.Register(new(SymlinkOp)) }
|
||||||
@@ -30,7 +30,7 @@ func (l *SymlinkOp) Valid() bool { return l != nil && l.Target != nil && l.LinkN
|
|||||||
|
|
||||||
func (l *SymlinkOp) early(_ *setupState, k syscallDispatcher) error {
|
func (l *SymlinkOp) early(_ *setupState, k syscallDispatcher) error {
|
||||||
if l.Dereference {
|
if l.Dereference {
|
||||||
if !filepath.IsAbs(l.LinkName) {
|
if !path.IsAbs(l.LinkName) {
|
||||||
return check.AbsoluteError(l.LinkName)
|
return check.AbsoluteError(l.LinkName)
|
||||||
}
|
}
|
||||||
if name, err := k.readlink(l.LinkName); err != nil {
|
if name, err := k.readlink(l.LinkName); err != nil {
|
||||||
@@ -44,7 +44,7 @@ func (l *SymlinkOp) early(_ *setupState, k syscallDispatcher) error {
|
|||||||
|
|
||||||
func (l *SymlinkOp) apply(state *setupState, k syscallDispatcher) error {
|
func (l *SymlinkOp) apply(state *setupState, k syscallDispatcher) error {
|
||||||
target := toSysroot(l.Target.String())
|
target := toSysroot(l.Target.String())
|
||||||
if err := k.mkdirAll(filepath.Dir(target), state.ParentPerm); err != nil {
|
if err := k.mkdirAll(path.Dir(target), state.ParentPerm); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return k.symlink(l.LinkName, target)
|
return k.symlink(l.LinkName, target)
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/internal/stub"
|
"hakurei.app/container/stub"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSymlinkOp(t *testing.T) {
|
func TestSymlinkOp(t *testing.T) {
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
. "syscall"
|
. "syscall"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() { gob.Register(new(MountTmpfsOp)) }
|
func init() { gob.Register(new(MountTmpfsOp)) }
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/internal/stub"
|
"hakurei.app/container/stub"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMountTmpfsOp(t *testing.T) {
|
func TestMountTmpfsOp(t *testing.T) {
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
package landlock
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"hakurei.app/ext"
|
"hakurei.app/container/std"
|
||||||
)
|
)
|
||||||
|
|
||||||
// include/uapi/linux/landlock.h
|
// include/uapi/linux/landlock.h
|
||||||
@@ -14,11 +14,11 @@ const (
|
|||||||
LANDLOCK_CREATE_RULESET_VERSION = 1 << iota
|
LANDLOCK_CREATE_RULESET_VERSION = 1 << iota
|
||||||
)
|
)
|
||||||
|
|
||||||
// AccessFS is bitmask of handled filesystem actions.
|
// LandlockAccessFS is bitmask of handled filesystem actions.
|
||||||
type AccessFS uint64
|
type LandlockAccessFS uint64
|
||||||
|
|
||||||
const (
|
const (
|
||||||
LANDLOCK_ACCESS_FS_EXECUTE AccessFS = 1 << iota
|
LANDLOCK_ACCESS_FS_EXECUTE LandlockAccessFS = 1 << iota
|
||||||
LANDLOCK_ACCESS_FS_WRITE_FILE
|
LANDLOCK_ACCESS_FS_WRITE_FILE
|
||||||
LANDLOCK_ACCESS_FS_READ_FILE
|
LANDLOCK_ACCESS_FS_READ_FILE
|
||||||
LANDLOCK_ACCESS_FS_READ_DIR
|
LANDLOCK_ACCESS_FS_READ_DIR
|
||||||
@@ -38,8 +38,8 @@ const (
|
|||||||
_LANDLOCK_ACCESS_FS_DELIM
|
_LANDLOCK_ACCESS_FS_DELIM
|
||||||
)
|
)
|
||||||
|
|
||||||
// String returns a space-separated string of [AccessFS] flags.
|
// String returns a space-separated string of [LandlockAccessFS] flags.
|
||||||
func (f AccessFS) String() string {
|
func (f LandlockAccessFS) String() string {
|
||||||
switch f {
|
switch f {
|
||||||
case LANDLOCK_ACCESS_FS_EXECUTE:
|
case LANDLOCK_ACCESS_FS_EXECUTE:
|
||||||
return "execute"
|
return "execute"
|
||||||
@@ -90,8 +90,8 @@ func (f AccessFS) String() string {
|
|||||||
return "fs_ioctl_dev"
|
return "fs_ioctl_dev"
|
||||||
|
|
||||||
default:
|
default:
|
||||||
var c []AccessFS
|
var c []LandlockAccessFS
|
||||||
for i := AccessFS(1); i < _LANDLOCK_ACCESS_FS_DELIM; i <<= 1 {
|
for i := LandlockAccessFS(1); i < _LANDLOCK_ACCESS_FS_DELIM; i <<= 1 {
|
||||||
if f&i != 0 {
|
if f&i != 0 {
|
||||||
c = append(c, i)
|
c = append(c, i)
|
||||||
}
|
}
|
||||||
@@ -107,18 +107,18 @@ func (f AccessFS) String() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// AccessNet is bitmask of handled network actions.
|
// LandlockAccessNet is bitmask of handled network actions.
|
||||||
type AccessNet uint64
|
type LandlockAccessNet uint64
|
||||||
|
|
||||||
const (
|
const (
|
||||||
LANDLOCK_ACCESS_NET_BIND_TCP AccessNet = 1 << iota
|
LANDLOCK_ACCESS_NET_BIND_TCP LandlockAccessNet = 1 << iota
|
||||||
LANDLOCK_ACCESS_NET_CONNECT_TCP
|
LANDLOCK_ACCESS_NET_CONNECT_TCP
|
||||||
|
|
||||||
_LANDLOCK_ACCESS_NET_DELIM
|
_LANDLOCK_ACCESS_NET_DELIM
|
||||||
)
|
)
|
||||||
|
|
||||||
// String returns a space-separated string of [AccessNet] flags.
|
// String returns a space-separated string of [LandlockAccessNet] flags.
|
||||||
func (f AccessNet) String() string {
|
func (f LandlockAccessNet) String() string {
|
||||||
switch f {
|
switch f {
|
||||||
case LANDLOCK_ACCESS_NET_BIND_TCP:
|
case LANDLOCK_ACCESS_NET_BIND_TCP:
|
||||||
return "bind_tcp"
|
return "bind_tcp"
|
||||||
@@ -127,8 +127,8 @@ func (f AccessNet) String() string {
|
|||||||
return "connect_tcp"
|
return "connect_tcp"
|
||||||
|
|
||||||
default:
|
default:
|
||||||
var c []AccessNet
|
var c []LandlockAccessNet
|
||||||
for i := AccessNet(1); i < _LANDLOCK_ACCESS_NET_DELIM; i <<= 1 {
|
for i := LandlockAccessNet(1); i < _LANDLOCK_ACCESS_NET_DELIM; i <<= 1 {
|
||||||
if f&i != 0 {
|
if f&i != 0 {
|
||||||
c = append(c, i)
|
c = append(c, i)
|
||||||
}
|
}
|
||||||
@@ -144,18 +144,18 @@ func (f AccessNet) String() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scope is bitmask of scopes restricting a Landlock domain from accessing outside resources.
|
// LandlockScope is bitmask of scopes restricting a Landlock domain from accessing outside resources.
|
||||||
type Scope uint64
|
type LandlockScope uint64
|
||||||
|
|
||||||
const (
|
const (
|
||||||
LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET Scope = 1 << iota
|
LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET LandlockScope = 1 << iota
|
||||||
LANDLOCK_SCOPE_SIGNAL
|
LANDLOCK_SCOPE_SIGNAL
|
||||||
|
|
||||||
_LANDLOCK_SCOPE_DELIM
|
_LANDLOCK_SCOPE_DELIM
|
||||||
)
|
)
|
||||||
|
|
||||||
// String returns a space-separated string of [Scope] flags.
|
// String returns a space-separated string of [LandlockScope] flags.
|
||||||
func (f Scope) String() string {
|
func (f LandlockScope) String() string {
|
||||||
switch f {
|
switch f {
|
||||||
case LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET:
|
case LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET:
|
||||||
return "abstract_unix_socket"
|
return "abstract_unix_socket"
|
||||||
@@ -164,8 +164,8 @@ func (f Scope) String() string {
|
|||||||
return "signal"
|
return "signal"
|
||||||
|
|
||||||
default:
|
default:
|
||||||
var c []Scope
|
var c []LandlockScope
|
||||||
for i := Scope(1); i < _LANDLOCK_SCOPE_DELIM; i <<= 1 {
|
for i := LandlockScope(1); i < _LANDLOCK_SCOPE_DELIM; i <<= 1 {
|
||||||
if f&i != 0 {
|
if f&i != 0 {
|
||||||
c = append(c, i)
|
c = append(c, i)
|
||||||
}
|
}
|
||||||
@@ -184,12 +184,12 @@ func (f Scope) String() string {
|
|||||||
// RulesetAttr is equivalent to struct landlock_ruleset_attr.
|
// RulesetAttr is equivalent to struct landlock_ruleset_attr.
|
||||||
type RulesetAttr struct {
|
type RulesetAttr struct {
|
||||||
// Bitmask of handled filesystem actions.
|
// Bitmask of handled filesystem actions.
|
||||||
HandledAccessFS AccessFS
|
HandledAccessFS LandlockAccessFS
|
||||||
// Bitmask of handled network actions.
|
// Bitmask of handled network actions.
|
||||||
HandledAccessNet AccessNet
|
HandledAccessNet LandlockAccessNet
|
||||||
// Bitmask of scopes restricting a Landlock domain from accessing outside
|
// Bitmask of scopes restricting a Landlock domain from accessing outside
|
||||||
// resources (e.g. IPCs).
|
// resources (e.g. IPCs).
|
||||||
Scoped Scope
|
Scoped LandlockScope
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns a user-facing description of [RulesetAttr].
|
// String returns a user-facing description of [RulesetAttr].
|
||||||
@@ -223,7 +223,7 @@ func (rulesetAttr *RulesetAttr) Create(flags uintptr) (fd int, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
rulesetFd, _, errno := syscall.Syscall(
|
rulesetFd, _, errno := syscall.Syscall(
|
||||||
ext.SYS_LANDLOCK_CREATE_RULESET,
|
std.SYS_LANDLOCK_CREATE_RULESET,
|
||||||
pointer, size,
|
pointer, size,
|
||||||
flags,
|
flags,
|
||||||
)
|
)
|
||||||
@@ -239,15 +239,15 @@ func (rulesetAttr *RulesetAttr) Create(flags uintptr) (fd int, err error) {
|
|||||||
return fd, nil
|
return fd, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetABI returns the ABI version supported by the kernel.
|
// LandlockGetABI returns the ABI version supported by the kernel.
|
||||||
func GetABI() (int, error) {
|
func LandlockGetABI() (int, error) {
|
||||||
return (*RulesetAttr)(nil).Create(LANDLOCK_CREATE_RULESET_VERSION)
|
return (*RulesetAttr)(nil).Create(LANDLOCK_CREATE_RULESET_VERSION)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RestrictSelf applies a loaded ruleset to the calling thread.
|
// LandlockRestrictSelf applies a loaded ruleset to the calling thread.
|
||||||
func RestrictSelf(rulesetFd int, flags uintptr) error {
|
func LandlockRestrictSelf(rulesetFd int, flags uintptr) error {
|
||||||
r, _, errno := syscall.Syscall(
|
r, _, errno := syscall.Syscall(
|
||||||
ext.SYS_LANDLOCK_RESTRICT_SELF,
|
std.SYS_LANDLOCK_RESTRICT_SELF,
|
||||||
uintptr(rulesetFd),
|
uintptr(rulesetFd),
|
||||||
flags,
|
flags,
|
||||||
0,
|
0,
|
||||||
65
container/landlock_test.go
Normal file
65
container/landlock_test.go
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
package container_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"hakurei.app/container"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLandlockString(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
rulesetAttr *container.RulesetAttr
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"nil", nil, "NULL"},
|
||||||
|
{"zero", new(container.RulesetAttr), "0"},
|
||||||
|
{"some", &container.RulesetAttr{Scoped: container.LANDLOCK_SCOPE_SIGNAL}, "scoped: signal"},
|
||||||
|
{"set", &container.RulesetAttr{
|
||||||
|
HandledAccessFS: container.LANDLOCK_ACCESS_FS_MAKE_SYM | container.LANDLOCK_ACCESS_FS_IOCTL_DEV | container.LANDLOCK_ACCESS_FS_WRITE_FILE,
|
||||||
|
HandledAccessNet: container.LANDLOCK_ACCESS_NET_BIND_TCP,
|
||||||
|
Scoped: container.LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET | container.LANDLOCK_SCOPE_SIGNAL,
|
||||||
|
}, "fs: write_file make_sym fs_ioctl_dev, net: bind_tcp, scoped: abstract_unix_socket signal"},
|
||||||
|
{"all", &container.RulesetAttr{
|
||||||
|
HandledAccessFS: container.LANDLOCK_ACCESS_FS_EXECUTE |
|
||||||
|
container.LANDLOCK_ACCESS_FS_WRITE_FILE |
|
||||||
|
container.LANDLOCK_ACCESS_FS_READ_FILE |
|
||||||
|
container.LANDLOCK_ACCESS_FS_READ_DIR |
|
||||||
|
container.LANDLOCK_ACCESS_FS_REMOVE_DIR |
|
||||||
|
container.LANDLOCK_ACCESS_FS_REMOVE_FILE |
|
||||||
|
container.LANDLOCK_ACCESS_FS_MAKE_CHAR |
|
||||||
|
container.LANDLOCK_ACCESS_FS_MAKE_DIR |
|
||||||
|
container.LANDLOCK_ACCESS_FS_MAKE_REG |
|
||||||
|
container.LANDLOCK_ACCESS_FS_MAKE_SOCK |
|
||||||
|
container.LANDLOCK_ACCESS_FS_MAKE_FIFO |
|
||||||
|
container.LANDLOCK_ACCESS_FS_MAKE_BLOCK |
|
||||||
|
container.LANDLOCK_ACCESS_FS_MAKE_SYM |
|
||||||
|
container.LANDLOCK_ACCESS_FS_REFER |
|
||||||
|
container.LANDLOCK_ACCESS_FS_TRUNCATE |
|
||||||
|
container.LANDLOCK_ACCESS_FS_IOCTL_DEV,
|
||||||
|
HandledAccessNet: container.LANDLOCK_ACCESS_NET_BIND_TCP |
|
||||||
|
container.LANDLOCK_ACCESS_NET_CONNECT_TCP,
|
||||||
|
Scoped: container.LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET |
|
||||||
|
container.LANDLOCK_SCOPE_SIGNAL,
|
||||||
|
}, "fs: execute write_file read_file read_dir remove_dir remove_file make_char make_dir make_reg make_sock make_fifo make_block make_sym fs_refer fs_truncate fs_ioctl_dev, net: bind_tcp connect_tcp, scoped: abstract_unix_socket signal"},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
if got := tc.rulesetAttr.String(); got != tc.want {
|
||||||
|
t.Errorf("String: %s, want %s", got, tc.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLandlockAttrSize(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
want := 24
|
||||||
|
if got := unsafe.Sizeof(container.RulesetAttr{}); got != uintptr(want) {
|
||||||
|
t.Errorf("Sizeof: %d, want %d", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -6,9 +6,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
. "syscall"
|
. "syscall"
|
||||||
|
|
||||||
"hakurei.app/ext"
|
"hakurei.app/container/vfs"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
"hakurei.app/vfs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -116,7 +115,7 @@ func (p *procPaths) remount(msg message.Msg, target string, flags uintptr) error
|
|||||||
var targetKFinal string
|
var targetKFinal string
|
||||||
{
|
{
|
||||||
var destFd int
|
var destFd int
|
||||||
if err := ext.IgnoringEINTR(func() (err error) {
|
if err := IgnoringEINTR(func() (err error) {
|
||||||
destFd, err = p.k.open(targetFinal, O_PATH|O_CLOEXEC, 0)
|
destFd, err = p.k.open(targetFinal, O_PATH|O_CLOEXEC, 0)
|
||||||
return
|
return
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/internal/stub"
|
"hakurei.app/container/stub"
|
||||||
"hakurei.app/vfs"
|
"hakurei.app/container/vfs"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBindMount(t *testing.T) {
|
func TestBindMount(t *testing.T) {
|
||||||
|
|||||||
269
container/netlink.go
Normal file
269
container/netlink.go
Normal file
@@ -0,0 +1,269 @@
|
|||||||
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
. "syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"hakurei.app/container/std"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
// rtnetlink represents a NETLINK_ROUTE socket.
|
||||||
|
type rtnetlink struct {
|
||||||
|
// Sent as part of rtnetlink messages.
|
||||||
|
pid uint32
|
||||||
|
// AF_NETLINK socket.
|
||||||
|
fd int
|
||||||
|
// Whether the socket is open.
|
||||||
|
ok bool
|
||||||
|
// Message sequence number.
|
||||||
|
seq uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// open creates the underlying NETLINK_ROUTE socket.
|
||||||
|
func (s *rtnetlink) open() (err error) {
|
||||||
|
if s.ok || s.fd < 0 {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
s.pid = uint32(Getpid())
|
||||||
|
if s.fd, err = Socket(
|
||||||
|
AF_NETLINK,
|
||||||
|
SOCK_RAW|SOCK_CLOEXEC,
|
||||||
|
NETLINK_ROUTE,
|
||||||
|
); err != nil {
|
||||||
|
return os.NewSyscallError("socket", err)
|
||||||
|
} else if err = Bind(s.fd, &SockaddrNetlink{
|
||||||
|
Family: AF_NETLINK,
|
||||||
|
Pid: s.pid,
|
||||||
|
}); err != nil {
|
||||||
|
_ = s.close()
|
||||||
|
return os.NewSyscallError("bind", err)
|
||||||
|
} else {
|
||||||
|
s.ok = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// close closes the underlying NETLINK_ROUTE socket.
|
||||||
|
func (s *rtnetlink) close() error {
|
||||||
|
if !s.ok {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
s.ok = false
|
||||||
|
err := Close(s.fd)
|
||||||
|
s.fd = -1
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// roundtrip sends a netlink message and handles the reply.
|
||||||
|
func (s *rtnetlink) roundtrip(data []byte) error {
|
||||||
|
if !s.ok {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() { s.seq++ }()
|
||||||
|
|
||||||
|
if err := Sendto(s.fd, data, 0, &SockaddrNetlink{
|
||||||
|
Family: AF_NETLINK,
|
||||||
|
}); err != nil {
|
||||||
|
return os.NewSyscallError("sendto", err)
|
||||||
|
}
|
||||||
|
buf := make([]byte, Getpagesize())
|
||||||
|
|
||||||
|
done:
|
||||||
|
for {
|
||||||
|
p := buf
|
||||||
|
if n, _, err := Recvfrom(s.fd, p, 0); err != nil {
|
||||||
|
return os.NewSyscallError("recvfrom", err)
|
||||||
|
} else if n < NLMSG_HDRLEN {
|
||||||
|
return errors.ErrUnsupported
|
||||||
|
} else {
|
||||||
|
p = p[:n]
|
||||||
|
}
|
||||||
|
|
||||||
|
if msgs, err := ParseNetlinkMessage(p); err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
for _, m := range msgs {
|
||||||
|
if m.Header.Seq != s.seq || m.Header.Pid != s.pid {
|
||||||
|
return errors.ErrUnsupported
|
||||||
|
}
|
||||||
|
if m.Header.Type == NLMSG_DONE {
|
||||||
|
break done
|
||||||
|
}
|
||||||
|
if m.Header.Type == NLMSG_ERROR {
|
||||||
|
if len(m.Data) >= 4 {
|
||||||
|
errno := Errno(-std.Int(binary.NativeEndian.Uint32(m.Data)))
|
||||||
|
if errno == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return errors.ErrUnsupported
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustRoundtrip calls roundtrip and terminates via msg for a non-nil error.
|
||||||
|
func (s *rtnetlink) mustRoundtrip(msg message.Msg, data []byte) {
|
||||||
|
err := s.roundtrip(data)
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if closeErr := Close(s.fd); closeErr != nil {
|
||||||
|
msg.Verbosef("cannot close: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch err.(type) {
|
||||||
|
case *os.SyscallError:
|
||||||
|
msg.GetLogger().Fatalf("cannot %v", err)
|
||||||
|
|
||||||
|
case Errno:
|
||||||
|
msg.GetLogger().Fatalf("RTNETLINK answers: %v", err)
|
||||||
|
|
||||||
|
default:
|
||||||
|
msg.GetLogger().Fatalln("RTNETLINK answers with unexpected message")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newaddrLo represents a RTM_NEWADDR message with two addresses.
|
||||||
|
type newaddrLo struct {
|
||||||
|
header NlMsghdr
|
||||||
|
data IfAddrmsg
|
||||||
|
|
||||||
|
r0 RtAttr
|
||||||
|
a0 [4]byte // in_addr
|
||||||
|
r1 RtAttr
|
||||||
|
a1 [4]byte // in_addr
|
||||||
|
}
|
||||||
|
|
||||||
|
// sizeofNewaddrLo is the expected size of newaddrLo.
|
||||||
|
const sizeofNewaddrLo = NLMSG_HDRLEN + SizeofIfAddrmsg + (SizeofRtAttr+4)*2
|
||||||
|
|
||||||
|
// newaddrLo returns the address of a populated newaddrLo.
|
||||||
|
func (s *rtnetlink) newaddrLo(lo int) *newaddrLo {
|
||||||
|
return &newaddrLo{NlMsghdr{
|
||||||
|
Len: sizeofNewaddrLo,
|
||||||
|
Type: RTM_NEWADDR,
|
||||||
|
Flags: NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE | NLM_F_EXCL,
|
||||||
|
Seq: s.seq,
|
||||||
|
Pid: s.pid,
|
||||||
|
}, IfAddrmsg{
|
||||||
|
Family: AF_INET,
|
||||||
|
Prefixlen: 8,
|
||||||
|
Flags: IFA_F_PERMANENT,
|
||||||
|
Scope: RT_SCOPE_HOST,
|
||||||
|
Index: uint32(lo),
|
||||||
|
}, RtAttr{
|
||||||
|
Len: uint16(SizeofRtAttr + len(newaddrLo{}.a0)),
|
||||||
|
Type: IFA_LOCAL,
|
||||||
|
}, [4]byte{127, 0, 0, 1}, RtAttr{
|
||||||
|
Len: uint16(SizeofRtAttr + len(newaddrLo{}.a1)),
|
||||||
|
Type: IFA_ADDRESS,
|
||||||
|
}, [4]byte{127, 0, 0, 1}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (msg *newaddrLo) toWireFormat() []byte {
|
||||||
|
var buf [sizeofNewaddrLo]byte
|
||||||
|
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[0:4][0])) = msg.header.Len
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[4:6][0])) = msg.header.Type
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[6:8][0])) = msg.header.Flags
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[8:12][0])) = msg.header.Seq
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[12:16][0])) = msg.header.Pid
|
||||||
|
|
||||||
|
buf[16] = msg.data.Family
|
||||||
|
buf[17] = msg.data.Prefixlen
|
||||||
|
buf[18] = msg.data.Flags
|
||||||
|
buf[19] = msg.data.Scope
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[20:24][0])) = msg.data.Index
|
||||||
|
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[24:26][0])) = msg.r0.Len
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[26:28][0])) = msg.r0.Type
|
||||||
|
copy(buf[28:32], msg.a0[:])
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[32:34][0])) = msg.r1.Len
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[34:36][0])) = msg.r1.Type
|
||||||
|
copy(buf[36:40], msg.a1[:])
|
||||||
|
|
||||||
|
return buf[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// newlinkLo represents a RTM_NEWLINK message.
|
||||||
|
type newlinkLo struct {
|
||||||
|
header NlMsghdr
|
||||||
|
data IfInfomsg
|
||||||
|
}
|
||||||
|
|
||||||
|
// sizeofNewlinkLo is the expected size of newlinkLo.
|
||||||
|
const sizeofNewlinkLo = NLMSG_HDRLEN + SizeofIfInfomsg
|
||||||
|
|
||||||
|
// newlinkLo returns the address of a populated newlinkLo.
|
||||||
|
func (s *rtnetlink) newlinkLo(lo int) *newlinkLo {
|
||||||
|
return &newlinkLo{NlMsghdr{
|
||||||
|
Len: sizeofNewlinkLo,
|
||||||
|
Type: RTM_NEWLINK,
|
||||||
|
Flags: NLM_F_REQUEST | NLM_F_ACK,
|
||||||
|
Seq: s.seq,
|
||||||
|
Pid: s.pid,
|
||||||
|
}, IfInfomsg{
|
||||||
|
Family: AF_UNSPEC,
|
||||||
|
Index: int32(lo),
|
||||||
|
Flags: IFF_UP,
|
||||||
|
Change: IFF_UP,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (msg *newlinkLo) toWireFormat() []byte {
|
||||||
|
var buf [sizeofNewlinkLo]byte
|
||||||
|
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[0:4][0])) = msg.header.Len
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[4:6][0])) = msg.header.Type
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[6:8][0])) = msg.header.Flags
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[8:12][0])) = msg.header.Seq
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[12:16][0])) = msg.header.Pid
|
||||||
|
|
||||||
|
buf[16] = msg.data.Family
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[18:20][0])) = msg.data.Type
|
||||||
|
*(*int32)(unsafe.Pointer(&buf[20:24][0])) = msg.data.Index
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[24:28][0])) = msg.data.Flags
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[28:32][0])) = msg.data.Change
|
||||||
|
|
||||||
|
return buf[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustLoopback creates the loopback address and brings the lo interface up.
|
||||||
|
// mustLoopback calls a fatal method of the underlying [log.Logger] of m with a
|
||||||
|
// user-facing error message if RTNETLINK behaves unexpectedly.
|
||||||
|
func mustLoopback(msg message.Msg) {
|
||||||
|
log := msg.GetLogger()
|
||||||
|
|
||||||
|
var lo int
|
||||||
|
if ifi, err := net.InterfaceByName("lo"); err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
} else {
|
||||||
|
lo = ifi.Index
|
||||||
|
}
|
||||||
|
|
||||||
|
var s rtnetlink
|
||||||
|
if err := s.open(); err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := s.close(); err != nil {
|
||||||
|
msg.Verbosef("cannot close netlink: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
s.mustRoundtrip(msg, s.newaddrLo(lo).toWireFormat())
|
||||||
|
s.mustRoundtrip(msg, s.newlinkLo(lo).toWireFormat())
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user