internal/outcome: rename from app
All checks were successful
Test / Sandbox (race detector) (push) Successful in 4m7s
Test / Hakurei (race detector) (push) Successful in 4m55s
Test / Flake checks (push) Successful in 1m27s
Test / Create distribution (push) Successful in 33s
Test / Sandbox (push) Successful in 2m11s
Test / Hakurei (push) Successful in 3m9s
Test / Hpkg (push) Successful in 4m1s

This is less ambiguous, and more accurately describes the purpose of the package.

Signed-off-by: Ophestra <cat@gensokyo.uk>
This commit is contained in:
2025-10-29 04:32:43 +09:00
parent a52f7038e5
commit a0b4e47acc
45 changed files with 65 additions and 64 deletions

View File

@@ -1,28 +0,0 @@
// Package app implements high-level hakurei container behaviour.
package app
import (
"context"
"log"
"os"
"hakurei.app/hst"
"hakurei.app/message"
)
// Main runs an app according to [hst.Config] and terminates. Main does not return.
func Main(ctx context.Context, msg message.Msg, config *hst.Config) {
var id hst.ID
if err := hst.NewInstanceID(&id); err != nil {
log.Fatal(err.Error())
}
seal := outcome{syscallDispatcher: direct{msg}}
if err := seal.finalise(ctx, msg, &id, config); err != nil {
printMessageError("cannot seal app:", err)
os.Exit(1)
}
seal.main(msg)
panic("unreachable")
}

View File

@@ -1,940 +0,0 @@
package app
import (
"bytes"
"encoding/gob"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"log"
"os/exec"
"os/user"
"reflect"
"syscall"
"testing"
"time"
"hakurei.app/container"
"hakurei.app/container/check"
"hakurei.app/container/comp"
"hakurei.app/container/fhs"
"hakurei.app/container/seccomp"
"hakurei.app/hst"
"hakurei.app/message"
"hakurei.app/system"
"hakurei.app/system/acl"
"hakurei.app/system/dbus"
)
func TestApp(t *testing.T) {
t.Parallel()
msg := message.NewMsg(nil)
msg.SwapVerbose(testing.Verbose())
testCases := []struct {
name string
k syscallDispatcher
config *hst.Config
id hst.ID
wantSys *system.I
wantParams *container.Params
}{
{"template", new(stubNixOS), hst.Template(), checkExpectInstanceId, system.New(panicMsgContext{}, message.NewMsg(nil), 1000009).
// spParamsOp
Ensure(m("/tmp/hakurei.0"), 0711).
// spRuntimeOp
Ensure(m("/tmp/hakurei.0/runtime"), 0700).
UpdatePermType(system.User, m("/tmp/hakurei.0/runtime"), acl.Execute).
Ensure(m("/tmp/hakurei.0/runtime/9"), 0700).
UpdatePermType(system.User, m("/tmp/hakurei.0/runtime/9"), acl.Read, acl.Write, acl.Execute).
// spTmpdirOp
Ensure(m("/tmp/hakurei.0/tmpdir"), 0700).
UpdatePermType(system.User, m("/tmp/hakurei.0/tmpdir"), acl.Execute).
Ensure(m("/tmp/hakurei.0/tmpdir/9"), 01700).
UpdatePermType(system.User, m("/tmp/hakurei.0/tmpdir/9"), acl.Read, acl.Write, acl.Execute).
// instance
Ephemeral(system.Process, m("/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 0711).
// spWaylandOp
Wayland(
m("/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/wayland"),
m("/run/user/1971/wayland-0"),
"org.chromium.Chromium",
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
).
// ensureRuntimeDir
Ensure(m("/run/user/1971/hakurei"), 0700).
UpdatePermType(system.User, m("/run/user/1971/hakurei"), acl.Execute).
Ensure(m("/run/user/1971"), 0700).
UpdatePermType(system.User, m("/run/user/1971"), acl.Execute).
// runtime
Ephemeral(system.Process, m("/run/user/1971/hakurei/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 0700).
UpdatePerm(m("/run/user/1971/hakurei/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), acl.Execute).
// spPulseOp
Link(m("/run/user/1971/pulse/native"), m("/run/user/1971/hakurei/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/pulse")).
// spDBusOp
MustProxyDBus(
hst.Template().SessionBus,
hst.Template().SystemBus, dbus.ProxyPair{
"unix:path=/run/user/1971/bus",
"/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bus",
}, dbus.ProxyPair{
"unix:path=/var/run/dbus/system_bus_socket",
"/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/system_bus_socket",
},
).UpdatePerm(m("/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bus"), acl.Read, acl.Write).
UpdatePerm(m("/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/system_bus_socket"), acl.Read, acl.Write).
// spFilesystemOp
Ensure(m("/var/lib/hakurei/u0"), 0700).
UpdatePermType(system.User, m("/var/lib/hakurei/u0"), acl.Execute).
UpdatePermType(system.User, m("/var/lib/hakurei/u0/org.chromium.Chromium"), acl.Read, acl.Write, acl.Execute), &container.Params{
Dir: m("/data/data/org.chromium.Chromium"),
Env: []string{
"DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/1971/bus",
"DBUS_SYSTEM_BUS_ADDRESS=unix:path=/var/run/dbus/system_bus_socket",
"GOOGLE_API_KEY=AIzaSyBHDrl33hwRp4rMQY0ziRbj8K9LPA6vUCY",
"GOOGLE_DEFAULT_CLIENT_ID=77185425430.apps.googleusercontent.com",
"GOOGLE_DEFAULT_CLIENT_SECRET=OTJgUOQcT7lO7GsGZq2G4IlT",
"HOME=/data/data/org.chromium.Chromium",
"PULSE_COOKIE=/.hakurei/pulse-cookie",
"PULSE_SERVER=unix:/run/user/1971/pulse/native",
"SHELL=/run/current-system/sw/bin/zsh",
"TERM=xterm-256color",
"USER=chronos",
"WAYLAND_DISPLAY=wayland-0",
"XDG_RUNTIME_DIR=/run/user/1971",
"XDG_SESSION_CLASS=user",
"XDG_SESSION_TYPE=wayland",
},
// spParamsOp
Hostname: "localhost",
RetainSession: true,
HostNet: true,
HostAbstract: true,
Path: m("/run/current-system/sw/bin/chromium"),
Args: []string{
"chromium",
"--ignore-gpu-blocklist",
"--disable-smooth-scrolling",
"--enable-features=UseOzonePlatform",
"--ozone-platform=wayland",
},
SeccompFlags: seccomp.AllowMultiarch,
Uid: 1971,
Gid: 100,
Ops: new(container.Ops).
// resolveRoot
Root(m("/var/lib/hakurei/base/org.debian"), comp.BindWritable).
// spParamsOp
Proc(fhs.AbsProc).
Tmpfs(hst.AbsPrivateTmp, 1<<12, 0755).
Bind(fhs.AbsDev, fhs.AbsDev, comp.BindWritable|comp.BindDevice).
Tmpfs(fhs.AbsDev.Append("shm"), 0, 01777).
// spRuntimeOp
Tmpfs(fhs.AbsRunUser, 1<<12, 0755).
Bind(m("/tmp/hakurei.0/runtime/9"), m("/run/user/1971"), comp.BindWritable).
// spTmpdirOp
Bind(m("/tmp/hakurei.0/tmpdir/9"), fhs.AbsTmp, comp.BindWritable).
// spAccountOp
Place(m("/etc/passwd"), []byte("chronos:x:1971:100:Hakurei:/data/data/org.chromium.Chromium:/run/current-system/sw/bin/zsh\n")).
Place(m("/etc/group"), []byte("hakurei:x:100:\n")).
// spWaylandOp
Bind(m("/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/wayland"), m("/run/user/1971/wayland-0"), 0).
// spPulseOp
Bind(m("/run/user/1971/hakurei/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/pulse"), m("/run/user/1971/pulse/native"), 0).
Place(m("/.hakurei/pulse-cookie"), bytes.Repeat([]byte{0}, pulseCookieSizeMax)).
// spDBusOp
Bind(m("/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bus"), m("/run/user/1971/bus"), 0).
Bind(m("/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/system_bus_socket"), m("/var/run/dbus/system_bus_socket"), 0).
// spFilesystemOp
Etc(fhs.AbsEtc, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").
Tmpfs(fhs.AbsTmp, 0, 0755).
Overlay(m("/nix/store"),
fhs.AbsVarLib.Append("hakurei/nix/u0/org.chromium.Chromium/rw-store/upper"),
fhs.AbsVarLib.Append("hakurei/nix/u0/org.chromium.Chromium/rw-store/work"),
fhs.AbsVarLib.Append("hakurei/base/org.nixos/ro-store")).
Link(m("/run/current-system"), "/run/current-system", true).
Link(m("/run/opengl-driver"), "/run/opengl-driver", true).
Bind(fhs.AbsVarLib.Append("hakurei/u0/org.chromium.Chromium"),
m("/data/data/org.chromium.Chromium"),
comp.BindWritable|comp.BindEnsure).
Bind(fhs.AbsDev.Append("dri"), fhs.AbsDev.Append("dri"),
comp.BindOptional|comp.BindWritable|comp.BindDevice).
Remount(fhs.AbsRoot, syscall.MS_RDONLY),
}},
{"nixos permissive defaults no enablements", new(stubNixOS), &hst.Config{Container: &hst.ContainerConfig{
Filesystem: []hst.FilesystemConfigJSON{
{FilesystemConfig: &hst.FSBind{
Target: fhs.AbsRoot,
Source: fhs.AbsRoot,
Write: true,
Special: true,
}},
{FilesystemConfig: &hst.FSBind{
Source: fhs.AbsDev.Append("kvm"),
Device: true,
Optional: true,
}},
{FilesystemConfig: &hst.FSBind{
Target: fhs.AbsEtc,
Source: fhs.AbsEtc,
Special: true,
}},
},
Username: "chronos",
Shell: m("/run/current-system/sw/bin/zsh"),
Home: m("/home/chronos"),
Path: m("/run/current-system/sw/bin/zsh"),
Args: []string{"/run/current-system/sw/bin/zsh"},
Flags: hst.FUserns | hst.FHostNet | hst.FHostAbstract | hst.FTty | hst.FShareRuntime | hst.FShareTmpdir,
}}, hst.ID{
0x4a, 0x45, 0x0b, 0x65,
0x96, 0xd7, 0xbc, 0x15,
0xbd, 0x01, 0x78, 0x0e,
0xb9, 0xa6, 0x07, 0xac,
}, system.New(t.Context(), msg, 1000000).
Ensure(m("/tmp/hakurei.0"), 0711).
Ensure(m("/tmp/hakurei.0/runtime"), 0700).
UpdatePermType(system.User, m("/tmp/hakurei.0/runtime"), acl.Execute).
Ensure(m("/tmp/hakurei.0/runtime/0"), 0700).
UpdatePermType(system.User, m("/tmp/hakurei.0/runtime/0"), acl.Read, acl.Write, acl.Execute).
Ensure(m("/tmp/hakurei.0/tmpdir"), 0700).
UpdatePermType(system.User, m("/tmp/hakurei.0/tmpdir"), acl.Execute).
Ensure(m("/tmp/hakurei.0/tmpdir/0"), 01700).
UpdatePermType(system.User, m("/tmp/hakurei.0/tmpdir/0"), acl.Read, acl.Write, acl.Execute), &container.Params{
Dir: m("/home/chronos"),
Path: m("/run/current-system/sw/bin/zsh"),
Args: []string{"/run/current-system/sw/bin/zsh"},
Env: []string{
"HOME=/home/chronos",
"SHELL=/run/current-system/sw/bin/zsh",
"TERM=xterm-256color",
"USER=chronos",
"XDG_RUNTIME_DIR=/run/user/65534",
"XDG_SESSION_CLASS=user",
"XDG_SESSION_TYPE=tty",
},
Ops: new(container.Ops).
Root(m("/"), comp.BindWritable).
Proc(m("/proc/")).
Tmpfs(hst.AbsPrivateTmp, 4096, 0755).
DevWritable(m("/dev/"), true).
Tmpfs(m("/dev/shm"), 0, 01777).
Tmpfs(m("/run/user/"), 4096, 0755).
Bind(m("/tmp/hakurei.0/runtime/0"), m("/run/user/65534"), comp.BindWritable).
Bind(m("/tmp/hakurei.0/tmpdir/0"), m("/tmp/"), comp.BindWritable).
Place(m("/etc/passwd"), []byte("chronos:x:65534:65534:Hakurei:/home/chronos:/run/current-system/sw/bin/zsh\n")).
Place(m("/etc/group"), []byte("hakurei:x:65534:\n")).
Bind(m("/dev/kvm"), m("/dev/kvm"), comp.BindWritable|comp.BindDevice|comp.BindOptional).
Etc(m("/etc/"), "4a450b6596d7bc15bd01780eb9a607ac").
Tmpfs(m("/run/user/1971"), 8192, 0755).
Tmpfs(m("/run/nscd"), 8192, 0755).
Tmpfs(m("/run/dbus"), 8192, 0755).
Remount(m("/dev/"), syscall.MS_RDONLY).
Remount(m("/"), syscall.MS_RDONLY),
SeccompPresets: comp.PresetExt | comp.PresetDenyDevel,
HostNet: true,
HostAbstract: true,
RetainSession: true,
ForwardCancel: true,
}},
{"nixos permissive defaults chromium", new(stubNixOS), &hst.Config{
ID: "org.chromium.Chromium",
Identity: 9,
Groups: []string{"video"},
SessionBus: &hst.BusConfig{
Talk: []string{
"org.freedesktop.Notifications",
"org.freedesktop.FileManager1",
"org.freedesktop.ScreenSaver",
"org.freedesktop.secrets",
"org.kde.kwalletd5",
"org.kde.kwalletd6",
"org.gnome.SessionManager",
},
Own: []string{
"org.chromium.Chromium.*",
"org.mpris.MediaPlayer2.org.chromium.Chromium.*",
"org.mpris.MediaPlayer2.chromium.*",
},
Call: map[string]string{
"org.freedesktop.portal.*": "*",
},
Broadcast: map[string]string{
"org.freedesktop.portal.*": "@/org/freedesktop/portal/*",
},
Filter: true,
},
SystemBus: &hst.BusConfig{
Talk: []string{
"org.bluez",
"org.freedesktop.Avahi",
"org.freedesktop.UPower",
},
Filter: true,
},
Enablements: hst.NewEnablements(hst.EWayland | hst.EDBus | hst.EPulse),
Container: &hst.ContainerConfig{
Filesystem: []hst.FilesystemConfigJSON{
{FilesystemConfig: &hst.FSBind{
Target: fhs.AbsRoot,
Source: fhs.AbsRoot,
Write: true,
Special: true,
}},
{FilesystemConfig: &hst.FSBind{
Source: fhs.AbsDev.Append("dri"),
Device: true,
Optional: true,
}},
{FilesystemConfig: &hst.FSBind{
Source: fhs.AbsDev.Append("kvm"),
Device: true,
Optional: true,
}},
{FilesystemConfig: &hst.FSBind{
Target: fhs.AbsEtc,
Source: fhs.AbsEtc,
Special: true,
}},
},
Username: "chronos",
Shell: m("/run/current-system/sw/bin/zsh"),
Home: m("/home/chronos"),
Path: m("/run/current-system/sw/bin/zsh"),
Args: []string{"zsh", "-c", "exec chromium "},
Flags: hst.FUserns | hst.FHostNet | hst.FHostAbstract | hst.FTty | hst.FShareRuntime | hst.FShareTmpdir,
},
}, hst.ID{
0xeb, 0xf0, 0x83, 0xd1,
0xb1, 0x75, 0x91, 0x17,
0x82, 0xd4, 0x13, 0x36,
0x9b, 0x64, 0xce, 0x7c,
}, system.New(t.Context(), msg, 1000009).
Ensure(m("/tmp/hakurei.0"), 0711).
Ensure(m("/tmp/hakurei.0/runtime"), 0700).UpdatePermType(system.User, m("/tmp/hakurei.0/runtime"), acl.Execute).
Ensure(m("/tmp/hakurei.0/runtime/9"), 0700).UpdatePermType(system.User, m("/tmp/hakurei.0/runtime/9"), acl.Read, acl.Write, acl.Execute).
Ensure(m("/tmp/hakurei.0/tmpdir"), 0700).UpdatePermType(system.User, m("/tmp/hakurei.0/tmpdir"), acl.Execute).
Ensure(m("/tmp/hakurei.0/tmpdir/9"), 01700).UpdatePermType(system.User, m("/tmp/hakurei.0/tmpdir/9"), acl.Read, acl.Write, acl.Execute).
Ephemeral(system.Process, m("/tmp/hakurei.0/ebf083d1b175911782d413369b64ce7c"), 0711).
Wayland(m("/tmp/hakurei.0/ebf083d1b175911782d413369b64ce7c/wayland"), m("/run/user/1971/wayland-0"), "org.chromium.Chromium", "ebf083d1b175911782d413369b64ce7c").
Ensure(m("/run/user/1971/hakurei"), 0700).UpdatePermType(system.User, m("/run/user/1971/hakurei"), acl.Execute).
Ensure(m("/run/user/1971"), 0700).UpdatePermType(system.User, m("/run/user/1971"), acl.Execute). // this is ordered as is because the previous Ensure only calls mkdir if XDG_RUNTIME_DIR is unset
Ephemeral(system.Process, m("/run/user/1971/hakurei/ebf083d1b175911782d413369b64ce7c"), 0700).UpdatePermType(system.Process, m("/run/user/1971/hakurei/ebf083d1b175911782d413369b64ce7c"), acl.Execute).
Link(m("/run/user/1971/pulse/native"), m("/run/user/1971/hakurei/ebf083d1b175911782d413369b64ce7c/pulse")).
MustProxyDBus(&hst.BusConfig{
Talk: []string{
"org.freedesktop.Notifications",
"org.freedesktop.FileManager1",
"org.freedesktop.ScreenSaver",
"org.freedesktop.secrets",
"org.kde.kwalletd5",
"org.kde.kwalletd6",
"org.gnome.SessionManager",
},
Own: []string{
"org.chromium.Chromium.*",
"org.mpris.MediaPlayer2.org.chromium.Chromium.*",
"org.mpris.MediaPlayer2.chromium.*",
},
Call: map[string]string{
"org.freedesktop.portal.*": "*",
},
Broadcast: map[string]string{
"org.freedesktop.portal.*": "@/org/freedesktop/portal/*",
},
Filter: true,
}, &hst.BusConfig{
Talk: []string{
"org.bluez",
"org.freedesktop.Avahi",
"org.freedesktop.UPower",
},
Filter: true,
}, dbus.ProxyPair{
"unix:path=/run/user/1971/bus",
"/tmp/hakurei.0/ebf083d1b175911782d413369b64ce7c/bus",
}, dbus.ProxyPair{
"unix:path=/var/run/dbus/system_bus_socket",
"/tmp/hakurei.0/ebf083d1b175911782d413369b64ce7c/system_bus_socket",
}).
UpdatePerm(m("/tmp/hakurei.0/ebf083d1b175911782d413369b64ce7c/bus"), acl.Read, acl.Write).
UpdatePerm(m("/tmp/hakurei.0/ebf083d1b175911782d413369b64ce7c/system_bus_socket"), acl.Read, acl.Write), &container.Params{
Dir: m("/home/chronos"),
Path: m("/run/current-system/sw/bin/zsh"),
Args: []string{"zsh", "-c", "exec chromium "},
Env: []string{
"DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/65534/bus",
"DBUS_SYSTEM_BUS_ADDRESS=unix:path=/var/run/dbus/system_bus_socket",
"HOME=/home/chronos",
"PULSE_COOKIE=" + hst.PrivateTmp + "/pulse-cookie",
"PULSE_SERVER=unix:/run/user/65534/pulse/native",
"SHELL=/run/current-system/sw/bin/zsh",
"TERM=xterm-256color",
"USER=chronos",
"WAYLAND_DISPLAY=wayland-0",
"XDG_RUNTIME_DIR=/run/user/65534",
"XDG_SESSION_CLASS=user",
"XDG_SESSION_TYPE=wayland",
},
Ops: new(container.Ops).
Root(m("/"), comp.BindWritable).
Proc(m("/proc/")).
Tmpfs(hst.AbsPrivateTmp, 4096, 0755).
DevWritable(m("/dev/"), true).
Tmpfs(m("/dev/shm"), 0, 01777).
Tmpfs(m("/run/user/"), 4096, 0755).
Bind(m("/tmp/hakurei.0/runtime/9"), m("/run/user/65534"), comp.BindWritable).
Bind(m("/tmp/hakurei.0/tmpdir/9"), m("/tmp/"), comp.BindWritable).
Place(m("/etc/passwd"), []byte("chronos:x:65534:65534:Hakurei:/home/chronos:/run/current-system/sw/bin/zsh\n")).
Place(m("/etc/group"), []byte("hakurei:x:65534:\n")).
Bind(m("/tmp/hakurei.0/ebf083d1b175911782d413369b64ce7c/wayland"), m("/run/user/65534/wayland-0"), 0).
Bind(m("/run/user/1971/hakurei/ebf083d1b175911782d413369b64ce7c/pulse"), m("/run/user/65534/pulse/native"), 0).
Place(m(hst.PrivateTmp+"/pulse-cookie"), bytes.Repeat([]byte{0}, pulseCookieSizeMax)).
Bind(m("/tmp/hakurei.0/ebf083d1b175911782d413369b64ce7c/bus"), m("/run/user/65534/bus"), 0).
Bind(m("/tmp/hakurei.0/ebf083d1b175911782d413369b64ce7c/system_bus_socket"), m("/var/run/dbus/system_bus_socket"), 0).
Bind(m("/dev/dri"), m("/dev/dri"), comp.BindWritable|comp.BindDevice|comp.BindOptional).
Bind(m("/dev/kvm"), m("/dev/kvm"), comp.BindWritable|comp.BindDevice|comp.BindOptional).
Etc(m("/etc/"), "ebf083d1b175911782d413369b64ce7c").
Tmpfs(m("/run/user/1971"), 8192, 0755).
Tmpfs(m("/run/nscd"), 8192, 0755).
Tmpfs(m("/run/dbus"), 8192, 0755).
Remount(m("/dev/"), syscall.MS_RDONLY).
Remount(m("/"), syscall.MS_RDONLY),
SeccompPresets: comp.PresetExt | comp.PresetDenyDevel,
HostNet: true,
HostAbstract: true,
RetainSession: true,
ForwardCancel: true,
}},
{"nixos chromium direct wayland", new(stubNixOS), &hst.Config{
ID: "org.chromium.Chromium",
Enablements: hst.NewEnablements(hst.EWayland | hst.EDBus | hst.EPulse),
Container: &hst.ContainerConfig{
Env: nil,
Filesystem: []hst.FilesystemConfigJSON{
f(&hst.FSBind{Source: m("/bin")}),
f(&hst.FSBind{Source: m("/usr/bin/")}),
f(&hst.FSBind{Source: m("/nix/store")}),
f(&hst.FSBind{Source: m("/run/current-system")}),
f(&hst.FSBind{Source: m("/sys/block"), Optional: true}),
f(&hst.FSBind{Source: m("/sys/bus"), Optional: true}),
f(&hst.FSBind{Source: m("/sys/class"), Optional: true}),
f(&hst.FSBind{Source: m("/sys/dev"), Optional: true}),
f(&hst.FSBind{Source: m("/sys/devices"), Optional: true}),
f(&hst.FSBind{Source: m("/run/opengl-driver")}),
f(&hst.FSBind{Source: m("/dev/dri"), Device: true, Optional: true}),
f(&hst.FSBind{Source: m("/etc/"), Target: m("/etc/"), Special: true}),
f(&hst.FSBind{Source: m("/var/lib/persist/module/hakurei/0/1"), Write: true, Ensure: true}),
},
Username: "u0_a1",
Shell: m("/run/current-system/sw/bin/zsh"),
Home: m("/var/lib/persist/module/hakurei/0/1"),
Path: m("/nix/store/yqivzpzzn7z5x0lq9hmbzygh45d8rhqd-chromium-start"),
Flags: hst.FUserns | hst.FHostNet | hst.FMapRealUID | hst.FShareRuntime | hst.FShareTmpdir,
},
SystemBus: &hst.BusConfig{
Talk: []string{"org.bluez", "org.freedesktop.Avahi", "org.freedesktop.UPower"},
Filter: true,
},
SessionBus: &hst.BusConfig{
Talk: []string{
"org.freedesktop.FileManager1", "org.freedesktop.Notifications",
"org.freedesktop.ScreenSaver", "org.freedesktop.secrets",
"org.kde.kwalletd5", "org.kde.kwalletd6",
},
Own: []string{
"org.chromium.Chromium.*",
"org.mpris.MediaPlayer2.org.chromium.Chromium.*",
"org.mpris.MediaPlayer2.chromium.*",
},
Call: map[string]string{}, Broadcast: map[string]string{},
Filter: true,
},
DirectWayland: true,
Identity: 1, Groups: []string{},
}, hst.ID{
0x8e, 0x2c, 0x76, 0xb0,
0x66, 0xda, 0xbe, 0x57,
0x4c, 0xf0, 0x73, 0xbd,
0xb4, 0x6e, 0xb5, 0xc1,
}, system.New(t.Context(), msg, 1000001).
Ensure(m("/tmp/hakurei.0"), 0711).
Ensure(m("/tmp/hakurei.0/runtime"), 0700).UpdatePermType(system.User, m("/tmp/hakurei.0/runtime"), acl.Execute).
Ensure(m("/tmp/hakurei.0/runtime/1"), 0700).UpdatePermType(system.User, m("/tmp/hakurei.0/runtime/1"), acl.Read, acl.Write, acl.Execute).
Ensure(m("/tmp/hakurei.0/tmpdir"), 0700).UpdatePermType(system.User, m("/tmp/hakurei.0/tmpdir"), acl.Execute).
Ensure(m("/tmp/hakurei.0/tmpdir/1"), 01700).UpdatePermType(system.User, m("/tmp/hakurei.0/tmpdir/1"), acl.Read, acl.Write, acl.Execute).
Ensure(m("/run/user/1971/hakurei"), 0700).UpdatePermType(system.User, m("/run/user/1971/hakurei"), acl.Execute).
Ensure(m("/run/user/1971"), 0700).UpdatePermType(system.User, m("/run/user/1971"), acl.Execute). // this is ordered as is because the previous Ensure only calls mkdir if XDG_RUNTIME_DIR is unset
UpdatePermType(hst.EWayland, m("/run/user/1971/wayland-0"), acl.Read, acl.Write, acl.Execute).
Ephemeral(system.Process, m("/run/user/1971/hakurei/8e2c76b066dabe574cf073bdb46eb5c1"), 0700).UpdatePermType(system.Process, m("/run/user/1971/hakurei/8e2c76b066dabe574cf073bdb46eb5c1"), acl.Execute).
Link(m("/run/user/1971/pulse/native"), m("/run/user/1971/hakurei/8e2c76b066dabe574cf073bdb46eb5c1/pulse")).
Ephemeral(system.Process, m("/tmp/hakurei.0/8e2c76b066dabe574cf073bdb46eb5c1"), 0711).
MustProxyDBus(&hst.BusConfig{
Talk: []string{
"org.freedesktop.FileManager1", "org.freedesktop.Notifications",
"org.freedesktop.ScreenSaver", "org.freedesktop.secrets",
"org.kde.kwalletd5", "org.kde.kwalletd6",
},
Own: []string{
"org.chromium.Chromium.*",
"org.mpris.MediaPlayer2.org.chromium.Chromium.*",
"org.mpris.MediaPlayer2.chromium.*",
},
Call: map[string]string{}, Broadcast: map[string]string{},
Filter: true,
}, &hst.BusConfig{
Talk: []string{
"org.bluez",
"org.freedesktop.Avahi",
"org.freedesktop.UPower",
},
Filter: true,
}, dbus.ProxyPair{
"unix:path=/run/user/1971/bus",
"/tmp/hakurei.0/8e2c76b066dabe574cf073bdb46eb5c1/bus",
}, dbus.ProxyPair{
"unix:path=/var/run/dbus/system_bus_socket",
"/tmp/hakurei.0/8e2c76b066dabe574cf073bdb46eb5c1/system_bus_socket",
}).
UpdatePerm(m("/tmp/hakurei.0/8e2c76b066dabe574cf073bdb46eb5c1/bus"), acl.Read, acl.Write).
UpdatePerm(m("/tmp/hakurei.0/8e2c76b066dabe574cf073bdb46eb5c1/system_bus_socket"), acl.Read, acl.Write), &container.Params{
Uid: 1971,
Gid: 100,
Dir: m("/var/lib/persist/module/hakurei/0/1"),
Path: m("/nix/store/yqivzpzzn7z5x0lq9hmbzygh45d8rhqd-chromium-start"),
Args: []string{"/nix/store/yqivzpzzn7z5x0lq9hmbzygh45d8rhqd-chromium-start"},
Env: []string{
"DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/1971/bus",
"DBUS_SYSTEM_BUS_ADDRESS=unix:path=/var/run/dbus/system_bus_socket",
"HOME=/var/lib/persist/module/hakurei/0/1",
"PULSE_COOKIE=" + hst.PrivateTmp + "/pulse-cookie",
"PULSE_SERVER=unix:/run/user/1971/pulse/native",
"SHELL=/run/current-system/sw/bin/zsh",
"TERM=xterm-256color",
"USER=u0_a1",
"WAYLAND_DISPLAY=wayland-0",
"XDG_RUNTIME_DIR=/run/user/1971",
"XDG_SESSION_CLASS=user",
"XDG_SESSION_TYPE=wayland",
},
Ops: new(container.Ops).
Proc(m("/proc/")).
Tmpfs(hst.AbsPrivateTmp, 4096, 0755).
DevWritable(m("/dev/"), true).
Tmpfs(m("/dev/shm"), 0, 01777).
Tmpfs(m("/run/user/"), 4096, 0755).
Bind(m("/tmp/hakurei.0/runtime/1"), m("/run/user/1971"), comp.BindWritable).
Bind(m("/tmp/hakurei.0/tmpdir/1"), m("/tmp/"), comp.BindWritable).
Place(m("/etc/passwd"), []byte("u0_a1:x:1971:100:Hakurei:/var/lib/persist/module/hakurei/0/1:/run/current-system/sw/bin/zsh\n")).
Place(m("/etc/group"), []byte("hakurei:x:100:\n")).
Bind(m("/run/user/1971/wayland-0"), m("/run/user/1971/wayland-0"), 0).
Bind(m("/run/user/1971/hakurei/8e2c76b066dabe574cf073bdb46eb5c1/pulse"), m("/run/user/1971/pulse/native"), 0).
Place(m(hst.PrivateTmp+"/pulse-cookie"), bytes.Repeat([]byte{0}, pulseCookieSizeMax)).
Bind(m("/tmp/hakurei.0/8e2c76b066dabe574cf073bdb46eb5c1/bus"), m("/run/user/1971/bus"), 0).
Bind(m("/tmp/hakurei.0/8e2c76b066dabe574cf073bdb46eb5c1/system_bus_socket"), m("/var/run/dbus/system_bus_socket"), 0).
Bind(m("/bin"), m("/bin"), 0).
Bind(m("/usr/bin/"), m("/usr/bin/"), 0).
Bind(m("/nix/store"), m("/nix/store"), 0).
Bind(m("/run/current-system"), m("/run/current-system"), 0).
Bind(m("/sys/block"), m("/sys/block"), comp.BindOptional).
Bind(m("/sys/bus"), m("/sys/bus"), comp.BindOptional).
Bind(m("/sys/class"), m("/sys/class"), comp.BindOptional).
Bind(m("/sys/dev"), m("/sys/dev"), comp.BindOptional).
Bind(m("/sys/devices"), m("/sys/devices"), comp.BindOptional).
Bind(m("/run/opengl-driver"), m("/run/opengl-driver"), 0).
Bind(m("/dev/dri"), m("/dev/dri"), comp.BindDevice|comp.BindWritable|comp.BindOptional).
Etc(m("/etc/"), "8e2c76b066dabe574cf073bdb46eb5c1").
Bind(m("/var/lib/persist/module/hakurei/0/1"), m("/var/lib/persist/module/hakurei/0/1"), comp.BindWritable|comp.BindEnsure).
Remount(m("/dev/"), syscall.MS_RDONLY).
Remount(m("/"), syscall.MS_RDONLY),
SeccompPresets: comp.PresetExt | comp.PresetDenyTTY | comp.PresetDenyDevel,
HostNet: true,
ForwardCancel: true,
}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
gr, gw := io.Pipe()
var gotSys *system.I
{
sPriv := newOutcomeState(tc.k, msg, &tc.id, tc.config, &Hsu{k: tc.k})
if err := sPriv.populateLocal(tc.k, msg); err != nil {
t.Fatalf("populateLocal: error = %#v", err)
}
gotSys = system.New(t.Context(), msg, sPriv.uid.unwrap())
if err := sPriv.newSys(tc.config, gotSys).toSystem(); err != nil {
t.Fatalf("toSystem: error = %#v", err)
}
go func() {
e := gob.NewEncoder(gw)
if err := errors.Join(e.Encode(&sPriv)); err != nil {
t.Errorf("Encode: error = %v", err)
panic("unexpected encode fault")
}
}()
}
var gotParams *container.Params
{
var sShim outcomeState
d := gob.NewDecoder(gr)
if err := errors.Join(d.Decode(&sShim)); err != nil {
t.Fatalf("Decode: error = %v", err)
}
if err := sShim.populateLocal(tc.k, msg); err != nil {
t.Fatalf("populateLocal: error = %#v", err)
}
stateParams := sShim.newParams()
for _, op := range sShim.Shim.Ops {
if err := op.toContainer(stateParams); err != nil {
t.Fatalf("toContainer: error = %#v", err)
}
}
gotParams = stateParams.params
}
t.Run("sys", func(t *testing.T) {
if !gotSys.Equal(tc.wantSys) {
t.Errorf("toSystem: sys = %#v, want %#v", gotSys, tc.wantSys)
}
})
t.Run("params", func(t *testing.T) {
if !reflect.DeepEqual(gotParams, tc.wantParams) {
t.Errorf("toContainer: params =\n%s\n, want\n%s", mustMarshal(gotParams), mustMarshal(tc.wantParams))
}
})
})
}
}
func mustMarshal(v any) string {
if b, err := json.Marshal(v); err != nil {
panic(err.Error())
} else {
return string(b)
}
}
func stubDirEntries(names ...string) (e []fs.DirEntry, err error) {
e = make([]fs.DirEntry, len(names))
for i, name := range names {
e[i] = stubDirEntryPath(name)
}
return
}
type stubDirEntryPath string
func (p stubDirEntryPath) Name() string { return string(p) }
func (p stubDirEntryPath) IsDir() bool { panic("attempted to call IsDir") }
func (p stubDirEntryPath) Type() fs.FileMode { panic("attempted to call Type") }
func (p stubDirEntryPath) Info() (fs.FileInfo, error) { panic("attempted to call Info") }
type stubFileInfoMode fs.FileMode
func (s stubFileInfoMode) Name() string { panic("attempted to call Name") }
func (s stubFileInfoMode) Size() int64 { panic("attempted to call Size") }
func (s stubFileInfoMode) Mode() fs.FileMode { return fs.FileMode(s) }
func (s stubFileInfoMode) ModTime() time.Time { panic("attempted to call ModTime") }
func (s stubFileInfoMode) IsDir() bool { panic("attempted to call IsDir") }
func (s stubFileInfoMode) Sys() any { panic("attempted to call Sys") }
type stubFileInfoIsDir bool
func (s stubFileInfoIsDir) Name() string { panic("attempted to call Name") }
func (s stubFileInfoIsDir) Size() int64 { panic("attempted to call Size") }
func (s stubFileInfoIsDir) Mode() fs.FileMode { panic("attempted to call Mode") }
func (s stubFileInfoIsDir) ModTime() time.Time { panic("attempted to call ModTime") }
func (s stubFileInfoIsDir) IsDir() bool { return bool(s) }
func (s stubFileInfoIsDir) Sys() any { panic("attempted to call Sys") }
type stubFileInfoPulseCookie struct{ stubFileInfoIsDir }
func (s stubFileInfoPulseCookie) Size() int64 { return pulseCookieSizeMax }
type stubOsFileReadCloser struct{ io.ReadCloser }
func (s stubOsFileReadCloser) Name() string { panic("attempting to call Name") }
func (s stubOsFileReadCloser) Write([]byte) (int, error) { panic("attempting to call Write") }
func (s stubOsFileReadCloser) Stat() (fs.FileInfo, error) { panic("attempting to call Stat") }
type stubNixOS struct {
usernameErr map[string]error
panicDispatcher
}
func (k *stubNixOS) getpid() int { return 0xdeadbeef }
func (k *stubNixOS) getuid() int { return 1971 }
func (k *stubNixOS) getgid() int { return 100 }
func (k *stubNixOS) lookupEnv(key string) (string, bool) {
switch key {
case "SHELL":
return "/run/current-system/sw/bin/zsh", true
case "TERM":
return "xterm-256color", true
case "WAYLAND_DISPLAY":
return "wayland-0", true
case "PULSE_COOKIE":
return "", false
case "HOME":
return "/home/ophestra", true
case "XDG_RUNTIME_DIR":
return "/run/user/1971", true
case "XDG_CONFIG_HOME":
return "/home/ophestra/xdg/config", true
case "DBUS_SYSTEM_BUS_ADDRESS":
return "", false
default:
panic(fmt.Sprintf("attempted to access unexpected environment variable %q", key))
}
}
func (k *stubNixOS) stat(name string) (fs.FileInfo, error) {
switch name {
case "/var/run/nscd":
return nil, nil
case "/run/user/1971/pulse":
return nil, nil
case "/run/user/1971/pulse/native":
return stubFileInfoMode(0666), nil
case "/home/ophestra/.pulse-cookie":
return stubFileInfoIsDir(true), nil
case "/home/ophestra/xdg/config/pulse/cookie":
return stubFileInfoPulseCookie{false}, nil
default:
panic(fmt.Sprintf("attempted to stat unexpected path %q", name))
}
}
func (k *stubNixOS) open(name string) (osFile, error) {
switch name {
case "/home/ophestra/xdg/config/pulse/cookie":
return stubOsFileReadCloser{io.NopCloser(bytes.NewReader(bytes.Repeat([]byte{0}, pulseCookieSizeMax)))}, nil
default:
panic(fmt.Sprintf("attempted to open unexpected path %q", name))
}
}
func (k *stubNixOS) readdir(name string) ([]fs.DirEntry, error) {
switch name {
case "/":
return stubDirEntries("bin", "boot", "dev", "etc", "home", "lib",
"lib64", "nix", "proc", "root", "run", "srv", "sys", "tmp", "usr", "var")
case "/run":
return stubDirEntries("agetty.reload", "binfmt", "booted-system",
"credentials", "cryptsetup", "current-system", "dbus", "host", "keys",
"libvirt", "libvirtd.pid", "lock", "log", "lvm", "mount", "NetworkManager",
"nginx", "nixos", "nscd", "opengl-driver", "pppd", "resolvconf", "sddm",
"store", "syncoid", "system", "systemd", "tmpfiles.d", "udev", "udisks2",
"user", "utmp", "virtlogd.pid", "wrappers", "zed.pid", "zed.state")
case "/etc":
return stubDirEntries("alsa", "bashrc", "binfmt.d", "dbus-1", "default",
"ethertypes", "fonts", "fstab", "fuse.conf", "group", "host.conf", "hostid",
"hostname", "hostname.CHECKSUM", "hosts", "inputrc", "ipsec.d", "issue", "kbd",
"libblockdev", "locale.conf", "localtime", "login.defs", "lsb-release", "lvm",
"machine-id", "man_db.conf", "modprobe.d", "modules-load.d", "mtab", "nanorc",
"netgroup", "NetworkManager", "nix", "nixos", "NIXOS", "nscd.conf", "nsswitch.conf",
"opensnitchd", "os-release", "pam", "pam.d", "passwd", "pipewire", "pki", "polkit-1",
"profile", "protocols", "qemu", "resolv.conf", "resolvconf.conf", "rpc", "samba",
"sddm.conf", "secureboot", "services", "set-environment", "shadow", "shells", "ssh",
"ssl", "static", "subgid", "subuid", "sudoers", "sysctl.d", "systemd", "terminfo",
"tmpfiles.d", "udev", "udisks2", "UPower", "vconsole.conf", "X11", "zfs", "zinputrc",
"zoneinfo", "zprofile", "zshenv", "zshrc")
case "/var/lib/hakurei/base/org.debian":
return stubDirEntries("bin", "dev", "etc", "home", "lib64", "lost+found",
"mnt", "nix", "proc", "root", "run", "srv", "sys", "tmp", "usr", "var")
default:
panic(fmt.Sprintf("attempted to read unexpected directory %q", name))
}
}
func (k *stubNixOS) tempdir() string { return "/tmp/" }
func (k *stubNixOS) evalSymlinks(path string) (string, error) {
switch path {
case "/var/run/nscd":
return "/run/nscd", nil
case "/run/user/1971":
return "/run/user/1971", nil
case "/tmp/hakurei.0":
return "/tmp/hakurei.0", nil
case "/var/run/dbus":
return "/run/dbus", nil
case "/dev/kvm":
return "/dev/kvm", nil
case "/etc/":
return "/etc/", nil
case "/bin":
return "/bin", nil
case "/boot":
return "/boot", nil
case "/home":
return "/home", nil
case "/lib":
return "/lib", nil
case "/lib64":
return "/lib64", nil
case "/nix":
return "/nix", nil
case "/root":
return "/root", nil
case "/run":
return "/run", nil
case "/srv":
return "/srv", nil
case "/sys":
return "/sys", nil
case "/usr":
return "/usr", nil
case "/var":
return "/var", nil
case "/dev/dri":
return "/dev/dri", nil
case "/usr/bin/":
return "/usr/bin/", nil
case "/nix/store":
return "/nix/store", nil
case "/run/current-system":
return "/nix/store/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-nixos-system-satori-25.05.99999999.aaaaaaa", nil
case "/sys/block":
return "/sys/block", nil
case "/sys/bus":
return "/sys/bus", nil
case "/sys/class":
return "/sys/class", nil
case "/sys/dev":
return "/sys/dev", nil
case "/sys/devices":
return "/sys/devices", nil
case "/run/opengl-driver":
return "/nix/store/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-graphics-drivers", nil
case "/var/lib/persist/module/hakurei/0/1":
return "/var/lib/persist/module/hakurei/0/1", nil
case "/var/lib/hakurei/nix/u0/org.chromium.Chromium/rw-store/upper":
return "/var/lib/hakurei/nix/u0/org.chromium.Chromium/rw-store/upper", nil
case "/var/lib/hakurei/nix/u0/org.chromium.Chromium/rw-store/work":
return "/var/lib/hakurei/nix/u0/org.chromium.Chromium/rw-store/work", nil
case "/var/lib/hakurei/base/org.nixos/ro-store":
return "/var/lib/hakurei/base/org.nixos/ro-store", nil
case "/var/lib/hakurei/u0/org.chromium.Chromium":
return "/var/lib/hakurei/u0/org.chromium.Chromium", nil
case "/var/lib/hakurei/base/org.debian/bin":
return "/var/lib/hakurei/base/org.debian/bin", nil
case "/var/lib/hakurei/base/org.debian/home":
return "/var/lib/hakurei/base/org.debian/home", nil
case "/var/lib/hakurei/base/org.debian/lib64":
return "/var/lib/hakurei/base/org.debian/lib64", nil
case "/var/lib/hakurei/base/org.debian/lost+found":
return "/var/lib/hakurei/base/org.debian/lost+found", nil
case "/var/lib/hakurei/base/org.debian/nix":
return "/var/lib/hakurei/base/org.debian/nix", nil
case "/var/lib/hakurei/base/org.debian/root":
return "/var/lib/hakurei/base/org.debian/root", nil
case "/var/lib/hakurei/base/org.debian/run":
return "/var/lib/hakurei/base/org.debian/run", nil
case "/var/lib/hakurei/base/org.debian/srv":
return "/var/lib/hakurei/base/org.debian/srv", nil
case "/var/lib/hakurei/base/org.debian/sys":
return "/var/lib/hakurei/base/org.debian/sys", nil
case "/var/lib/hakurei/base/org.debian/usr":
return "/var/lib/hakurei/base/org.debian/usr", nil
case "/var/lib/hakurei/base/org.debian/var":
return "/var/lib/hakurei/base/org.debian/var", nil
default:
panic(fmt.Sprintf("attempted to evaluate unexpected path %q", path))
}
}
func (k *stubNixOS) lookupGroupId(name string) (string, error) {
switch name {
case "video":
return "26", nil
default:
return "", user.UnknownGroupError(name)
}
}
func (k *stubNixOS) cmdOutput(cmd *exec.Cmd) ([]byte, error) {
switch cmd.Path {
case "/proc/nonexistent/hsu":
return []byte{'0'}, nil
default:
panic(fmt.Sprintf("unexpected cmd %#v", cmd))
}
}
func (k *stubNixOS) overflowUid(message.Msg) int { return 65534 }
func (k *stubNixOS) overflowGid(message.Msg) int { return 65534 }
func (k *stubNixOS) mustHsuPath() *check.Absolute { return m("/proc/nonexistent/hsu") }
func (k *stubNixOS) dbusAddress() (string, string) {
return "unix:path=/run/user/1971/bus", "unix:path=/var/run/dbus/system_bus_socket"
}
func (k *stubNixOS) fatalf(format string, v ...any) { panic(fmt.Sprintf(format, v...)) }
func (k *stubNixOS) isVerbose() bool { return true }
func (k *stubNixOS) verbose(v ...any) { log.Print(v...) }
func (k *stubNixOS) verbosef(format string, v ...any) { log.Printf(format, v...) }
func m(pathname string) *check.Absolute {
return check.MustAbs(pathname)
}
func f(c hst.FilesystemConfig) hst.FilesystemConfigJSON {
return hst.FilesystemConfigJSON{FilesystemConfig: c}
}

View File

@@ -1,163 +0,0 @@
package app
import (
"context"
"io"
"io/fs"
"os"
"os/exec"
"os/signal"
"os/user"
"path/filepath"
"hakurei.app/container"
"hakurei.app/container/check"
"hakurei.app/container/seccomp"
"hakurei.app/internal"
"hakurei.app/message"
"hakurei.app/system/dbus"
)
// osFile represents [os.File].
type osFile interface {
Name() string
io.Writer
fs.File
}
// syscallDispatcher provides methods that make state-dependent system calls as part of their behaviour.
type syscallDispatcher interface {
// new starts a goroutine with a new instance of syscallDispatcher.
// A syscallDispatcher must never be used in any goroutine other than the one owning it,
// just synchronising access is not enough, as this is for test instrumentation.
new(f func(k syscallDispatcher, msg message.Msg))
// getpid provides [os.Getpid].
getpid() int
// getuid provides [os.Getuid].
getuid() int
// getgid provides [os.Getgid].
getgid() int
// lookupEnv provides [os.LookupEnv].
lookupEnv(key string) (string, bool)
// pipe provides os.Pipe.
pipe() (r, w *os.File, err error)
// stat provides [os.Stat].
stat(name string) (os.FileInfo, error)
// open provides [os.Open].
open(name string) (osFile, error)
// readdir provides [os.ReadDir].
readdir(name string) ([]os.DirEntry, error)
// tempdir provides [os.TempDir].
tempdir() string
// exit provides [os.Exit].
exit(code int)
// evalSymlinks provides [filepath.EvalSymlinks].
evalSymlinks(path string) (string, error)
// lookupGroupId calls [user.LookupGroup] and returns the Gid field of the resulting [user.Group] struct.
lookupGroupId(name string) (string, error)
// cmdOutput provides the Output method of [exec.Cmd].
cmdOutput(cmd *exec.Cmd) ([]byte, error)
// notifyContext provides [signal.NotifyContext].
notifyContext(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc)
// prctl provides [container.Prctl].
prctl(op, arg2, arg3 uintptr) error
// overflowUid provides [container.OverflowUid].
overflowUid(msg message.Msg) int
// overflowGid provides [container.OverflowGid].
overflowGid(msg message.Msg) int
// setDumpable provides [container.SetDumpable].
setDumpable(dumpable uintptr) error
// receive provides [container.Receive].
receive(key string, e any, fdp *uintptr) (closeFunc func() error, err error)
// containerStart provides the Start method of [container.Container].
containerStart(z *container.Container) error
// containerStart provides the Serve method of [container.Container].
containerServe(z *container.Container) error
// containerStart provides the Wait method of [container.Container].
containerWait(z *container.Container) error
// seccompLoad provides [seccomp.Load].
seccompLoad(rules []seccomp.NativeRule, flags seccomp.ExportFlag) error
// mustHsuPath provides [internal.MustHsuPath].
mustHsuPath() *check.Absolute
// dbusAddress provides [dbus.Address].
dbusAddress() (session, system string)
// setupContSignal provides setupContSignal.
setupContSignal(pid int) (io.ReadCloser, func(), error)
// getMsg returns the [message.Msg] held by syscallDispatcher.
getMsg() message.Msg
// fatal provides [log.Fatal].
fatal(v ...any)
// fatalf provides [log.Fatalf].
fatalf(format string, v ...any)
}
// direct implements syscallDispatcher on the current kernel.
type direct struct{ msg message.Msg }
func (k direct) new(f func(k syscallDispatcher, msg message.Msg)) { go f(k, k.msg) }
func (direct) getpid() int { return os.Getpid() }
func (direct) getuid() int { return os.Getuid() }
func (direct) getgid() int { return os.Getgid() }
func (direct) lookupEnv(key string) (string, bool) { return os.LookupEnv(key) }
func (direct) pipe() (r, w *os.File, err error) { return os.Pipe() }
func (direct) stat(name string) (os.FileInfo, error) { return os.Stat(name) }
func (direct) open(name string) (osFile, error) { return os.Open(name) }
func (direct) readdir(name string) ([]os.DirEntry, error) { return os.ReadDir(name) }
func (direct) tempdir() string { return os.TempDir() }
func (direct) exit(code int) { os.Exit(code) }
func (direct) evalSymlinks(path string) (string, error) { return filepath.EvalSymlinks(path) }
func (direct) lookupGroupId(name string) (gid string, err error) {
var group *user.Group
group, err = user.LookupGroup(name)
if group != nil {
gid = group.Gid
}
return
}
func (direct) cmdOutput(cmd *exec.Cmd) ([]byte, error) { return cmd.Output() }
func (direct) notifyContext(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc) {
return signal.NotifyContext(parent, signals...)
}
func (direct) prctl(op, arg2, arg3 uintptr) error { return container.Prctl(op, arg2, arg3) }
func (direct) overflowUid(msg message.Msg) int { return container.OverflowUid(msg) }
func (direct) overflowGid(msg message.Msg) int { return container.OverflowGid(msg) }
func (direct) setDumpable(dumpable uintptr) error { return container.SetDumpable(dumpable) }
func (direct) receive(key string, e any, fdp *uintptr) (func() error, error) {
return container.Receive(key, e, fdp)
}
func (direct) containerStart(z *container.Container) error { return z.Start() }
func (direct) containerServe(z *container.Container) error { return z.Serve() }
func (direct) containerWait(z *container.Container) error { return z.Wait() }
func (direct) seccompLoad(rules []seccomp.NativeRule, flags seccomp.ExportFlag) error {
return seccomp.Load(rules, flags)
}
func (direct) mustHsuPath() *check.Absolute { return internal.MustHsuPath() }
func (direct) dbusAddress() (session, system string) { return dbus.Address() }
func (direct) setupContSignal(pid int) (io.ReadCloser, func(), error) { return setupContSignal(pid) }
func (k direct) getMsg() message.Msg { return k.msg }
func (k direct) fatal(v ...any) { k.msg.GetLogger().Fatal(v...) }
func (k direct) fatalf(format string, v ...any) { k.msg.GetLogger().Fatalf(format, v...) }

View File

@@ -1,644 +0,0 @@
package app
import (
"bytes"
"context"
"io"
"io/fs"
"log"
"maps"
"os"
"os/exec"
"reflect"
"slices"
"sync"
"testing"
"time"
"unsafe"
"hakurei.app/container"
"hakurei.app/container/check"
"hakurei.app/container/seccomp"
"hakurei.app/container/stub"
"hakurei.app/hst"
"hakurei.app/message"
"hakurei.app/system"
)
// call initialises a [stub.Call].
// This keeps composites analysis happy without making the test cases too bloated.
func call(name string, args stub.ExpectArgs, ret any, err error) stub.Call {
return stub.Call{Name: name, Args: args, Ret: ret, Err: err}
}
const (
// checkExpectUid is the uid value used by checkOpBehaviour to initialise [system.I].
checkExpectUid = 0xcafebabe
// wantAutoEtcPrefix is the autoetc prefix corresponding to checkExpectInstanceId.
wantAutoEtcPrefix = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
// wantInstancePrefix is the SharePath corresponding to checkExpectInstanceId.
wantInstancePrefix = container.Nonexistent + "/tmp/hakurei.0/" + wantAutoEtcPrefix
// wantRuntimePath is the XDG_RUNTIME_DIR value returned during testing.
wantRuntimePath = "/proc/nonexistent/xdg_runtime_dir"
// wantRunDirPath is the RunDirPath value resolved during testing.
wantRunDirPath = wantRuntimePath + "/hakurei"
// wantRuntimeSharePath is the runtimeSharePath value resolved during testing.
wantRuntimeSharePath = wantRunDirPath + "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
)
// checkExpectInstanceId is the [hst.ID] value used by checkOpBehaviour to initialise outcomeState.
var checkExpectInstanceId = *(*hst.ID)(bytes.Repeat([]byte{0xaa}, len(hst.ID{})))
type (
// pStateSysFunc is called before each test case is run to prepare outcomeStateSys.
pStateSysFunc = func(state *outcomeStateSys)
// pStateContainerFunc is called before each test case is run to prepare outcomeStateParams.
pStateContainerFunc = func(state *outcomeStateParams)
// extraCheckSysFunc is called to check outcomeStateSys and must not have side effects.
extraCheckSysFunc = func(t *testing.T, state *outcomeStateSys)
// extraCheckParamsFunc is called to check outcomeStateParams and must not have side effects.
extraCheckParamsFunc = func(t *testing.T, state *outcomeStateParams)
)
// insertsOps prepares outcomeStateParams to allow [container.Op] to be inserted.
func insertsOps(next pStateContainerFunc) pStateContainerFunc {
return func(state *outcomeStateParams) {
state.params.Ops = new(container.Ops)
if next != nil {
next(state)
}
}
}
// afterSpRuntimeOp prepares outcomeStateParams for an outcomeOp meant to run after spRuntimeOp.
func afterSpRuntimeOp(next pStateContainerFunc) pStateContainerFunc {
return func(state *outcomeStateParams) {
// emulates spRuntimeOp
state.runtimeDir = m("/run/user/1000")
if next != nil {
next(state)
}
}
}
// sysUsesInstance checks for use of the outcomeStateSys.instance method.
func sysUsesInstance(next extraCheckSysFunc) extraCheckSysFunc {
return func(t *testing.T, state *outcomeStateSys) {
if want := m(wantInstancePrefix); !reflect.DeepEqual(state.sharePath, want) {
t.Errorf("outcomeStateSys: sharePath = %v, want %v", state.sharePath, want)
}
if next != nil {
next(t, state)
}
}
}
// sysUsesRuntime checks for use of the outcomeStateSys.runtime method.
func sysUsesRuntime(next extraCheckSysFunc) extraCheckSysFunc {
return func(t *testing.T, state *outcomeStateSys) {
if want := m(wantRuntimeSharePath); !reflect.DeepEqual(state.runtimeSharePath, want) {
t.Errorf("outcomeStateSys: runtimeSharePath = %v, want %v", state.runtimeSharePath, want)
}
if next != nil {
next(t, state)
}
}
}
// paramsWantEnv checks outcomeStateParams.env for inserted entries on top of [hst.Config].
func paramsWantEnv(config *hst.Config, wantEnv map[string]string, next extraCheckParamsFunc) extraCheckParamsFunc {
want := make(map[string]string, len(wantEnv)+len(config.Container.Env))
maps.Copy(want, wantEnv)
maps.Copy(want, config.Container.Env)
return func(t *testing.T, state *outcomeStateParams) {
if !maps.Equal(state.env, want) {
t.Errorf("toContainer: env = %#v, want %#v", state.env, want)
}
if next != nil {
next(t, state)
}
}
}
// opBehaviourTestCase checks outcomeOp behaviour against outcomeStateSys and outcomeStateParams.
type opBehaviourTestCase struct {
name string
// newOp returns a new instance of outcomeOp under testing that is safe to clobber.
newOp func(isShim, clearUnexported bool) outcomeOp
// newConfig returns a new instance of [hst.Config] that is checked not to be clobbered by outcomeOp.
newConfig func() *hst.Config
// pStateSys is called before outcomeOp.toSystem to prepare outcomeStateSys.
pStateSys pStateSysFunc
// toSystem are expected syscallDispatcher calls during outcomeOp.toSystem.
toSystem []stub.Call
// wantSys is the expected [system.I] state after outcomeOp.toSystem.
wantSys *system.I
// extraCheckSys is called after outcomeOp.toSystem to check the state of outcomeStateSys.
extraCheckSys extraCheckSysFunc
// wantErrSystem is the expected error value returned by outcomeOp.toSystem.
// Further testing is skipped if not nil.
wantErrSystem error
// pStateContainer is called before outcomeOp.toContainer to prepare outcomeStateParams.
pStateContainer pStateContainerFunc
// toContainer are expected syscallDispatcher calls during outcomeOp.toContainer.
toContainer []stub.Call
// wantParams is the expected [container.Params] after outcomeOp.toContainer.
wantParams *container.Params
// extraCheckParams is called after outcomeOp.toContainer to check the state of outcomeStateParams.
extraCheckParams extraCheckParamsFunc
// wantErrContainer is the expected error value returned by outcomeOp.toContainer.
wantErrContainer error
}
// checkOpBehaviour runs a slice of opBehaviourTestCase.
func checkOpBehaviour(t *testing.T, testCases []opBehaviourTestCase) {
t.Helper()
wantNewState := []stub.Call{
// newOutcomeState
call("getpid", stub.ExpectArgs{}, 0xdead, nil),
call("isVerbose", stub.ExpectArgs{}, true, nil),
call("mustHsuPath", stub.ExpectArgs{}, m(container.Nonexistent), nil),
call("cmdOutput", stub.ExpectArgs{container.Nonexistent, os.Stderr, []string{}, "/"}, []byte("0"), nil),
call("tempdir", stub.ExpectArgs{}, container.Nonexistent+"/tmp", nil),
call("lookupEnv", stub.ExpectArgs{"XDG_RUNTIME_DIR"}, wantRuntimePath, nil),
call("getuid", stub.ExpectArgs{}, 1000, nil),
call("getgid", stub.ExpectArgs{}, 100, nil),
// populateLocal
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{
m(container.Nonexistent + "/tmp/hakurei.0"),
m(container.Nonexistent + "/xdg_runtime_dir/hakurei"),
}}, nil, nil),
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Helper()
t.Parallel()
wantCallsFull := slices.Concat(wantNewState, tc.toSystem, []stub.Call{{Name: stub.CallSeparator}})
if tc.wantErrSystem == nil {
wantCallsFull = append(wantCallsFull, slices.Concat(wantNewState, tc.toContainer)...)
}
wantConfig := tc.newConfig()
k := &kstub{panicDispatcher{}, stub.New(t,
func(s *stub.Stub[syscallDispatcher]) syscallDispatcher { return &kstub{panicDispatcher{}, s} },
stub.Expect{Calls: wantCallsFull},
)}
defer stub.HandleExit(t)
{
config := tc.newConfig()
s := newOutcomeState(k, k, &checkExpectInstanceId, config, &Hsu{k: k})
if err := s.populateLocal(k, k); err != nil {
t.Fatalf("populateLocal: error = %v", err)
}
stateSys := s.newSys(config, system.New(panicMsgContext{}, k, checkExpectUid))
if tc.pStateSys != nil {
tc.pStateSys(stateSys)
}
op := tc.newOp(false, true)
if err := op.toSystem(stateSys); !reflect.DeepEqual(err, tc.wantErrSystem) {
t.Fatalf("toSystem: error = %#v, want %#v", err, tc.wantErrSystem)
}
k.Expects(stub.CallSeparator)
if !reflect.DeepEqual(config, wantConfig) {
t.Errorf("toSystem clobbered config: %#v, want %#v", config, wantConfig)
}
if tc.wantErrSystem != nil {
goto out
}
if !stateSys.sys.Equal(tc.wantSys) {
t.Errorf("toSystem: %#v, want %#v", stateSys.sys, tc.wantSys)
}
if tc.extraCheckSys != nil {
tc.extraCheckSys(t, stateSys)
}
if wantOpSys := tc.newOp(true, false); !reflect.DeepEqual(op, wantOpSys) {
t.Errorf("toSystem: op = %#v, want %#v", op, wantOpSys)
}
}
{
config := tc.newConfig()
s := newOutcomeState(k, k, &checkExpectInstanceId, config, &Hsu{k: k})
stateParams := s.newParams()
if err := s.populateLocal(k, k); err != nil {
t.Fatalf("populateLocal: error = %v", err)
}
if tc.pStateContainer != nil {
tc.pStateContainer(stateParams)
}
op := tc.newOp(true, true)
if err := op.toContainer(stateParams); !reflect.DeepEqual(err, tc.wantErrContainer) {
t.Fatalf("toContainer: error = %#v, want %#v", err, tc.wantErrContainer)
}
if tc.wantErrContainer != nil {
goto out
}
if !reflect.DeepEqual(stateParams.params, tc.wantParams) {
t.Errorf("toContainer:\n%s\nwant\n%s", mustMarshal(stateParams.params), mustMarshal(tc.wantParams))
}
if tc.extraCheckParams != nil {
tc.extraCheckParams(t, stateParams)
}
}
out:
k.VisitIncomplete(func(s *stub.Stub[syscallDispatcher]) {
count := k.Pos() - 1 // separator
if count-len(wantNewState) < len(tc.toSystem) {
t.Errorf("toSystem: %d calls, want %d", count-len(wantNewState), len(tc.toSystem))
} else {
t.Errorf("toContainer: %d calls, want %d", count-len(tc.toSystem)-2*len(wantNewState), len(tc.toContainer))
}
})
})
}
}
func newI() *system.I { return system.New(panicMsgContext{}, panicMsgContext{}, checkExpectUid) }
// simpleTestCase is a simple freeform test case utilising kstub.
type simpleTestCase struct {
name string
f func(k *kstub) error
// want are expected syscallDispatcher calls during f.
want stub.Expect
// wantErr is the expected error value returned by f.
wantErr error
}
// checkSimple runs a slice of simpleTestCase.
func checkSimple(t *testing.T, fname string, testCases []simpleTestCase) {
t.Helper()
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Helper()
t.Parallel()
defer stub.HandleExit(t)
k := &kstub{panicDispatcher{}, stub.New(t, func(s *stub.Stub[syscallDispatcher]) syscallDispatcher { return &kstub{panicDispatcher{}, s} }, tc.want)}
if err := tc.f(k); !reflect.DeepEqual(err, tc.wantErr) {
t.Errorf("%s: error = %#v, want %#v", fname, err, tc.wantErr)
}
k.VisitIncomplete(func(s *stub.Stub[syscallDispatcher]) {
t.Helper()
t.Errorf("%s: %d calls, want %d", fname, s.Pos(), s.Len())
})
})
}
}
// kstub partially implements syscallDispatcher via [stub.Stub].
type kstub struct {
panicDispatcher
*stub.Stub[syscallDispatcher]
}
func (k *kstub) new(f func(k syscallDispatcher, msg message.Msg)) {
k.New(func(k syscallDispatcher) { f(k, k.(*kstub)) })
}
func (k *kstub) getpid() int { k.Helper(); return k.Expects("getpid").Ret.(int) }
func (k *kstub) getuid() int { k.Helper(); return k.Expects("getuid").Ret.(int) }
func (k *kstub) getgid() int { k.Helper(); return k.Expects("getgid").Ret.(int) }
func (k *kstub) lookupEnv(key string) (string, bool) {
k.Helper()
expect := k.Expects("lookupEnv")
if expect.Error(
stub.CheckArg(k.Stub, "key", key, 0)) != nil {
k.FailNow()
}
if expect.Ret == nil {
return "\x00", false
}
return expect.Ret.(string), true
}
func (k *kstub) stat(name string) (os.FileInfo, error) {
k.Helper()
expect := k.Expects("stat")
return expect.Ret.(os.FileInfo), expect.Error(
stub.CheckArg(k.Stub, "name", name, 0))
}
func (k *kstub) open(name string) (osFile, error) {
k.Helper()
expect := k.Expects("open")
return expect.Ret.(osFile), expect.Error(
stub.CheckArg(k.Stub, "name", name, 0))
}
func (k *kstub) readdir(name string) ([]os.DirEntry, error) {
k.Helper()
expect := k.Expects("readdir")
return expect.Ret.([]os.DirEntry), expect.Error(
stub.CheckArg(k.Stub, "name", name, 0))
}
func (k *kstub) tempdir() string { k.Helper(); return k.Expects("tempdir").Ret.(string) }
func (k *kstub) evalSymlinks(path string) (string, error) {
k.Helper()
expect := k.Expects("evalSymlinks")
return expect.Ret.(string), expect.Error(
stub.CheckArg(k.Stub, "path", path, 0))
}
func (k *kstub) prctl(op, arg2, arg3 uintptr) error {
k.Helper()
return k.Expects("prctl").Error(
stub.CheckArg(k.Stub, "op", op, 0),
stub.CheckArg(k.Stub, "arg2", arg2, 1),
stub.CheckArg(k.Stub, "arg3", arg3, 2))
}
func (k *kstub) setDumpable(dumpable uintptr) error {
k.Helper()
return k.Expects("setDumpable").Error(
stub.CheckArg(k.Stub, "dumpable", dumpable, 0))
}
func (k *kstub) receive(key string, e any, fdp *uintptr) (closeFunc func() error, err error) {
k.Helper()
expect := k.Expects("receive")
reflect.ValueOf(e).Elem().Set(reflect.ValueOf(expect.Args[1]))
if expect.Args[2] != nil {
*fdp = expect.Args[2].(uintptr)
}
return func() error { return k.Expects("closeReceive").Err }, expect.Error(
stub.CheckArg(k.Stub, "key", key, 0))
}
func (k *kstub) expectCheckContainer(expect *stub.Call, z *container.Container) error {
k.Helper()
err := expect.Error(
stub.CheckArgReflect(k.Stub, "params", &z.Params, 0))
if err != nil {
k.Errorf("params:\n%s\n%s", mustMarshal(&z.Params), mustMarshal(expect.Args[0]))
}
return err
}
func (k *kstub) containerStart(z *container.Container) error {
k.Helper()
return k.expectCheckContainer(k.Expects("containerStart"), z)
}
func (k *kstub) containerServe(z *container.Container) error {
k.Helper()
return k.expectCheckContainer(k.Expects("containerServe"), z)
}
func (k *kstub) containerWait(z *container.Container) error {
k.Helper()
return k.expectCheckContainer(k.Expects("containerWait"), z)
}
func (k *kstub) seccompLoad(rules []seccomp.NativeRule, flags seccomp.ExportFlag) error {
k.Helper()
return k.Expects("seccompLoad").Error(
stub.CheckArgReflect(k.Stub, "rules", rules, 0),
stub.CheckArg(k.Stub, "flags", flags, 1))
}
func (k *kstub) cmdOutput(cmd *exec.Cmd) ([]byte, error) {
k.Helper()
expect := k.Expects("cmdOutput")
return expect.Ret.([]byte), expect.Error(
stub.CheckArg(k.Stub, "cmd.Path", cmd.Path, 0),
stub.CheckArgReflect(k.Stub, "cmd.Stderr", cmd.Stderr, 1),
stub.CheckArgReflect(k.Stub, "cmd.Env", cmd.Env, 2),
stub.CheckArg(k.Stub, "cmd.Dir", cmd.Dir, 3))
}
func (k *kstub) notifyContext(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc) {
k.Helper()
if k.Expects("notifyContext").Error(
stub.CheckArgReflect(k.Stub, "parent", parent, 0),
stub.CheckArgReflect(k.Stub, "signals", signals, 1)) != nil {
k.FailNow()
}
return k.Context(), func() { k.Helper(); k.Expects("notifyContextStop") }
}
func (k *kstub) mustHsuPath() *check.Absolute {
k.Helper()
return k.Expects("mustHsuPath").Ret.(*check.Absolute)
}
func (k *kstub) dbusAddress() (session, system string) {
k.Helper()
ret := k.Expects("dbusAddress").Ret.([2]string)
return ret[0], ret[1]
}
// stubTrackReader embeds kstub but switches the underlying [stub.Stub] index to sub on its first Read.
// The resulting kstub does not share any state with the instance passed to the instrumented goroutine.
// Therefore, any method making use of such must not be called.
type stubTrackReader struct {
sub int
subOnce sync.Once
*kstub
}
func (r *stubTrackReader) Read(p []byte) (n int, err error) {
r.subOnce.Do(func() {
subVal := reflect.ValueOf(r.kstub.Stub).Elem().FieldByName("sub")
r.kstub = &kstub{panicDispatcher{}, reflect.
NewAt(subVal.Type(), unsafe.Pointer(subVal.UnsafeAddr())).Elem().
Interface().([]*stub.Stub[syscallDispatcher])[r.sub]}
})
return r.kstub.Read(p)
}
func (k *kstub) setupContSignal(pid int) (io.ReadCloser, func(), error) {
k.Helper()
expect := k.Expects("setupContSignal")
return &stubTrackReader{sub: expect.Ret.(int), kstub: k}, func() { k.Expects("wKeepAlive") }, expect.Error(
stub.CheckArg(k.Stub, "pid", pid, 0))
}
func (k *kstub) getMsg() message.Msg { k.Helper(); k.Expects("getMsg"); return k }
func (k *kstub) Close() error { k.Helper(); return k.Expects("rcClose").Err }
func (k *kstub) Read(p []byte) (n int, err error) {
k.Helper()
expect := k.Expects("rcRead")
// special case to terminate exit outcomes goroutine
// to proceed with further testing of the entrypoint
if expect.Ret == nil {
panic(stub.PanicExit)
}
return copy(p, expect.Ret.([]byte)), expect.Err
}
func (k *kstub) GetLogger() *log.Logger { k.Helper(); return k.Expects("getLogger").Ret.(*log.Logger) }
func (k *kstub) IsVerbose() bool { k.Helper(); return k.Expects("isVerbose").Ret.(bool) }
func (k *kstub) SwapVerbose(verbose bool) bool {
k.Helper()
expect := k.Expects("swapVerbose")
if expect.Error(
stub.CheckArg(k.Stub, "verbose", verbose, 0)) != nil {
k.FailNow()
}
return expect.Ret.(bool)
}
// ignoreValue marks a value to be ignored by the test suite.
type ignoreValue struct{}
func (k *kstub) Verbose(v ...any) {
k.Helper()
expect := k.Expects("verbose")
// translate ignores in v
if want, ok := expect.Args[0].([]any); ok && len(v) == len(want) {
for i, a := range want {
if _, ok = a.(ignoreValue); ok {
v[i] = ignoreValue{}
}
}
}
if expect.Error(
stub.CheckArgReflect(k.Stub, "v", v, 0)) != nil {
k.FailNow()
}
}
func (k *kstub) Verbosef(format string, v ...any) {
k.Helper()
if k.Expects("verbosef").Error(
stub.CheckArg(k.Stub, "format", format, 0),
stub.CheckArgReflect(k.Stub, "v", v, 1)) != nil {
k.FailNow()
}
}
func (k *kstub) Suspend() bool { k.Helper(); return k.Expects("suspend").Ret.(bool) }
func (k *kstub) Resume() bool { k.Helper(); return k.Expects("resume").Ret.(bool) }
func (k *kstub) BeforeExit() { k.Helper(); k.Expects("beforeExit") }
// stubOsFile partially implements osFile.
type stubOsFile struct {
closeErr error
io.Reader
io.Writer
}
func (f *stubOsFile) Close() error { return f.closeErr }
func (f *stubOsFile) Name() string { panic("unreachable") }
func (f *stubOsFile) Stat() (fs.FileInfo, error) { panic("unreachable") }
// stubFi partially implements [os.FileInfo]. Can be passed as nil to assert all methods unreachable.
type stubFi struct {
size int64
mode os.FileMode
isDir bool
}
func (fi *stubFi) Name() string { panic("unreachable") }
func (fi *stubFi) ModTime() time.Time { panic("unreachable") }
func (fi *stubFi) Sys() any { panic("unreachable") }
func (fi *stubFi) Size() int64 { return fi.size }
func (fi *stubFi) Mode() os.FileMode { return fi.mode }
func (fi *stubFi) IsDir() bool { return fi.isDir }
// stubDir returns a slice of [os.DirEntry] with only their Name method implemented.
func stubDir(names ...string) []os.DirEntry {
d := make([]os.DirEntry, len(names))
for i, name := range names {
d[i] = nameDentry(name)
}
return d
}
// nameDentry implements the Name method on [os.DirEntry].
type nameDentry string
func (e nameDentry) Name() string { return string(e) }
func (nameDentry) IsDir() bool { panic("unreachable") }
func (nameDentry) Type() fs.FileMode { panic("unreachable") }
func (nameDentry) Info() (fs.FileInfo, error) { panic("unreachable") }
// errorReader implements [io.Reader] that unconditionally returns -1, val.
type errorReader struct{ val error }
func (r errorReader) Read([]byte) (int, error) { return -1, r.val }
// panicMsgContext implements [message.Msg] and [context.Context] with methods wrapping panic.
// This should be assigned to test cases to be checked against.
type panicMsgContext struct{}
func (panicMsgContext) GetLogger() *log.Logger { panic("unreachable") }
func (panicMsgContext) IsVerbose() bool { panic("unreachable") }
func (panicMsgContext) SwapVerbose(bool) bool { panic("unreachable") }
func (panicMsgContext) Verbose(...any) { panic("unreachable") }
func (panicMsgContext) Verbosef(string, ...any) { panic("unreachable") }
func (panicMsgContext) Suspend() bool { panic("unreachable") }
func (panicMsgContext) Resume() bool { panic("unreachable") }
func (panicMsgContext) BeforeExit() { panic("unreachable") }
func (panicMsgContext) Deadline() (time.Time, bool) { panic("unreachable") }
func (panicMsgContext) Done() <-chan struct{} { panic("unreachable") }
func (panicMsgContext) Err() error { panic("unreachable") }
func (panicMsgContext) Value(any) any { panic("unreachable") }
// panicDispatcher implements syscallDispatcher with methods wrapping panic.
// This type is meant to be embedded in partial syscallDispatcher implementations.
type panicDispatcher struct{}
func (panicDispatcher) new(func(k syscallDispatcher, msg message.Msg)) { panic("unreachable") }
func (panicDispatcher) getpid() int { panic("unreachable") }
func (panicDispatcher) getuid() int { panic("unreachable") }
func (panicDispatcher) getgid() int { panic("unreachable") }
func (panicDispatcher) lookupEnv(string) (string, bool) { panic("unreachable") }
func (panicDispatcher) pipe() (*os.File, *os.File, error) { panic("unreachable") }
func (panicDispatcher) stat(string) (os.FileInfo, error) { panic("unreachable") }
func (panicDispatcher) open(string) (osFile, error) { panic("unreachable") }
func (panicDispatcher) readdir(string) ([]os.DirEntry, error) { panic("unreachable") }
func (panicDispatcher) tempdir() string { panic("unreachable") }
func (panicDispatcher) exit(int) { panic("unreachable") }
func (panicDispatcher) evalSymlinks(string) (string, error) { panic("unreachable") }
func (panicDispatcher) prctl(uintptr, uintptr, uintptr) error { panic("unreachable") }
func (panicDispatcher) lookupGroupId(string) (string, error) { panic("unreachable") }
func (panicDispatcher) cmdOutput(*exec.Cmd) ([]byte, error) { panic("unreachable") }
func (panicDispatcher) overflowUid(message.Msg) int { panic("unreachable") }
func (panicDispatcher) overflowGid(message.Msg) int { panic("unreachable") }
func (panicDispatcher) setDumpable(uintptr) error { panic("unreachable") }
func (panicDispatcher) receive(string, any, *uintptr) (func() error, error) { panic("unreachable") }
func (panicDispatcher) containerStart(*container.Container) error { panic("unreachable") }
func (panicDispatcher) containerServe(*container.Container) error { panic("unreachable") }
func (panicDispatcher) containerWait(*container.Container) error { panic("unreachable") }
func (panicDispatcher) mustHsuPath() *check.Absolute { panic("unreachable") }
func (panicDispatcher) dbusAddress() (string, string) { panic("unreachable") }
func (panicDispatcher) setupContSignal(int) (io.ReadCloser, func(), error) { panic("unreachable") }
func (panicDispatcher) getMsg() message.Msg { panic("unreachable") }
func (panicDispatcher) fatal(...any) { panic("unreachable") }
func (panicDispatcher) fatalf(string, ...any) { panic("unreachable") }
func (panicDispatcher) notifyContext(context.Context, ...os.Signal) (context.Context, context.CancelFunc) {
panic("unreachable")
}
func (panicDispatcher) seccompLoad([]seccomp.NativeRule, seccomp.ExportFlag) error {
panic("unreachable")
}

View File

@@ -1,85 +0,0 @@
package app
import (
"context"
"errors"
"fmt"
"os"
"os/user"
"sync/atomic"
"hakurei.app/hst"
"hakurei.app/message"
"hakurei.app/system"
)
func newWithMessage(msg string) error { return newWithMessageError(msg, os.ErrInvalid) }
func newWithMessageError(msg string, err error) error {
return &hst.AppError{Step: "finalise", Err: err, Msg: msg}
}
// An outcome is the runnable state of a hakurei container via [hst.Config].
type outcome struct {
// Supplementary group ids. Populated during finalise.
supp []string
// Resolved priv side operating system interactions. Populated during finalise.
sys *system.I
// Transmitted to shim. Populated during finalise.
state *outcomeState
// Kept for saving to [state].
config *hst.Config
// Whether the current process is in outcome.main.
active atomic.Bool
ctx context.Context
syscallDispatcher
}
func (k *outcome) finalise(ctx context.Context, msg message.Msg, id *hst.ID, config *hst.Config) error {
if ctx == nil || id == nil {
// unreachable
panic("invalid call to finalise")
}
if k.ctx != nil || k.sys != nil || k.state != nil {
// unreachable
panic("attempting to finalise twice")
}
k.ctx = ctx
if err := config.Validate(); err != nil {
return err
}
// hsu expects numerical group ids
supp := make([]string, len(config.Groups))
for i, name := range config.Groups {
if gid, err := k.lookupGroupId(name); err != nil {
var unknownGroupError user.UnknownGroupError
if errors.As(err, &unknownGroupError) {
return newWithMessageError(fmt.Sprintf("unknown group %q", name), unknownGroupError)
} else {
return &hst.AppError{Step: "look up group by name", Err: err}
}
} else {
supp[i] = gid
}
}
// early validation complete at this point
s := newOutcomeState(k.syscallDispatcher, msg, id, config, &Hsu{k: k})
if err := s.populateLocal(k.syscallDispatcher, msg); err != nil {
return err
}
sys := system.New(k.ctx, msg, s.uid.unwrap())
if err := s.newSys(config, sys).toSystem(); err != nil {
return err
}
k.sys = sys
k.supp = supp
k.state = s
k.config = config
return nil
}

View File

@@ -1,101 +0,0 @@
package app
import (
"errors"
"fmt"
"log"
"os"
"os/exec"
"strconv"
"sync"
"hakurei.app/container/fhs"
"hakurei.app/hst"
"hakurei.app/message"
)
// Hsu caches responses from cmd/hsu.
type Hsu struct {
idOnce sync.Once
idErr error
id int
kOnce sync.Once
// msg is not populated
k syscallDispatcher
}
var ErrHsuAccess = errors.New("current user is not in the hsurc file")
// ensureDispatcher ensures Hsu.k is not nil.
func (h *Hsu) ensureDispatcher() {
h.kOnce.Do(func() {
if h.k == nil {
h.k = direct{}
}
})
}
// ID returns the current user hsurc identifier.
// [ErrHsuAccess] is returned if the current user is not in hsurc.
func (h *Hsu) ID() (int, error) {
h.ensureDispatcher()
h.idOnce.Do(func() {
h.id = -1
hsuPath := h.k.mustHsuPath().String()
cmd := exec.Command(hsuPath)
cmd.Path = hsuPath
cmd.Stderr = os.Stderr // pass through fatal messages
cmd.Env = make([]string, 0)
cmd.Dir = fhs.Root
var (
p []byte
exitError *exec.ExitError
)
const step = "obtain uid from hsu"
if p, h.idErr = h.k.cmdOutput(cmd); h.idErr == nil {
h.id, h.idErr = strconv.Atoi(string(p))
if h.idErr != nil {
h.idErr = &hst.AppError{Step: step, Err: h.idErr, Msg: "invalid uid string from hsu"}
}
} else if errors.As(h.idErr, &exitError) && exitError != nil && exitError.ExitCode() == 1 {
// hsu prints an error message in this case
h.idErr = &hst.AppError{Step: step, Err: ErrHsuAccess}
} else if errors.Is(h.idErr, os.ErrNotExist) {
h.idErr = &hst.AppError{Step: step, Err: h.idErr,
Msg: fmt.Sprintf("the setuid helper is missing: %s", hsuPath)}
}
})
return h.id, h.idErr
}
// MustID calls [Hsu.ID] and terminates on error.
func (h *Hsu) MustID(msg message.Msg) int {
id, err := h.ID()
if err == nil {
return id
}
const fallback = "cannot retrieve user id from setuid wrapper:"
if errors.Is(err, ErrHsuAccess) {
if msg != nil {
msg.Verbose("*"+fallback, err)
}
os.Exit(1)
return -0xdeadbeef // not reached
} else if m, ok := message.GetMessage(err); ok {
log.Fatal(m)
return -0xdeadbeef // not reached
} else {
log.Fatalln(fallback, err)
return -0xdeadbeef // not reached
}
}
// HsuUid returns target uid for the stable hsu uid format.
// No bounds check is performed, a value retrieved by [Hsu] is expected.
func HsuUid(id, identity int) int { return 1000000 + id*10000 + identity }

View File

@@ -1,84 +0,0 @@
package app
import (
"os"
"os/exec"
"reflect"
"strconv"
"syscall"
"testing"
"unsafe"
"hakurei.app/container/stub"
"hakurei.app/hst"
)
func TestHsu(t *testing.T) {
t.Parallel()
t.Run("ensure dispatcher", func(t *testing.T) {
hsu := new(Hsu)
hsu.ensureDispatcher()
k := direct{}
if !reflect.DeepEqual(hsu.k, k) {
t.Errorf("ensureDispatcher: k = %#v, want %#v", hsu.k, k)
}
})
fCheckID := func(k *kstub) error {
hsu := &Hsu{k: k}
id, err := hsu.ID()
k.Verbose(id)
if id0, err0 := hsu.ID(); id0 != id || !reflect.DeepEqual(err0, err) {
t.Fatalf("ID: id0 = %d, err0 = %#v, id = %d, err = %#v", id0, err0, id, err)
}
return err
}
checkSimple(t, "Hsu.ID", []simpleTestCase{
{"hsu nonexistent", fCheckID, stub.Expect{Calls: []stub.Call{
call("mustHsuPath", stub.ExpectArgs{}, m("/run/wrappers/bin/hsu"), nil),
call("cmdOutput", stub.ExpectArgs{"/run/wrappers/bin/hsu", os.Stderr, []string{}, "/"}, ([]byte)(nil), os.ErrNotExist),
call("verbose", stub.ExpectArgs{[]any{-1}}, nil, nil),
}}, &hst.AppError{
Step: "obtain uid from hsu",
Err: os.ErrNotExist,
Msg: "the setuid helper is missing: /run/wrappers/bin/hsu",
}},
{"access", fCheckID, stub.Expect{Calls: []stub.Call{
call("mustHsuPath", stub.ExpectArgs{}, m("/run/wrappers/bin/hsu"), nil),
call("cmdOutput", stub.ExpectArgs{"/run/wrappers/bin/hsu", os.Stderr, []string{}, "/"}, ([]byte)(nil), makeExitError(1<<8)),
call("verbose", stub.ExpectArgs{[]any{-1}}, nil, nil),
}}, &hst.AppError{
Step: "obtain uid from hsu",
Err: ErrHsuAccess,
}},
{"invalid output", fCheckID, stub.Expect{Calls: []stub.Call{
call("mustHsuPath", stub.ExpectArgs{}, m("/run/wrappers/bin/hsu"), nil),
call("cmdOutput", stub.ExpectArgs{"/run/wrappers/bin/hsu", os.Stderr, []string{}, "/"}, []byte{0}, nil),
call("verbose", stub.ExpectArgs{[]any{0}}, nil, nil),
}}, &hst.AppError{
Step: "obtain uid from hsu",
Err: &strconv.NumError{Func: "Atoi", Num: "\x00", Err: strconv.ErrSyntax},
Msg: "invalid uid string from hsu",
}},
{"success", fCheckID, stub.Expect{Calls: []stub.Call{
call("mustHsuPath", stub.ExpectArgs{}, m("/run/wrappers/bin/hsu"), nil),
call("cmdOutput", stub.ExpectArgs{"/run/wrappers/bin/hsu", os.Stderr, []string{}, "/"}, []byte{'0'}, nil),
call("verbose", stub.ExpectArgs{[]any{0}}, nil, nil),
}}, nil},
})
}
// makeExitError populates syscall.WaitStatus in an [exec.ExitError].
// Do not reuse this function in a cross-platform package.
func makeExitError(status syscall.WaitStatus) error {
ps := new(os.ProcessState)
statusV := reflect.ValueOf(ps).Elem().FieldByName("status")
*reflect.NewAt(statusV.Type(), unsafe.Pointer(statusV.UnsafeAddr())).Interface().(*syscall.WaitStatus) = status
return &exec.ExitError{ProcessState: ps}
}

View File

@@ -1,302 +0,0 @@
package app
import (
"errors"
"maps"
"strconv"
"hakurei.app/container"
"hakurei.app/container/check"
"hakurei.app/hst"
"hakurei.app/internal/env"
"hakurei.app/message"
"hakurei.app/system"
"hakurei.app/system/acl"
)
// envAllocSize is the initial size of the env map pre-allocated when the configured env map is nil.
// It should be large enough to fit all insertions by outcomeOp.toContainer.
const envAllocSize = 1 << 6
func newInt(v int) *stringPair[int] { return &stringPair[int]{v, strconv.Itoa(v)} }
// stringPair stores a value and its string representation.
type stringPair[T comparable] struct {
v T
s string
}
func (s *stringPair[T]) unwrap() T { return s.v }
func (s *stringPair[T]) String() string { return s.s }
// outcomeState is copied to the shim process and available while applying outcomeOp.
// This is transmitted from the priv side to the shim, so exported fields should be kept to a minimum.
type outcomeState struct {
// Params only used by the shim process. Populated by populateEarly.
Shim *shimParams
// Generated and accounted for by the caller.
ID *hst.ID
// Copied from ID.
id *stringPair[hst.ID]
// Copied from the [hst.Config] field of the same name.
Identity int
// Copied from Identity.
identity *stringPair[int]
// Returned by [Hsu.MustID].
UserID int
// Target init namespace uid resolved from UserID and identity.
uid *stringPair[int]
// Included as part of [hst.Config], transmitted as-is unless permissive defaults.
Container *hst.ContainerConfig
// Mapped credentials within container user namespace.
Mapuid, Mapgid int
// Copied from their respective exported values.
mapuid, mapgid *stringPair[int]
// Copied from [EnvPaths] per-process.
sc hst.Paths
*env.Paths
// Copied via populateLocal.
k syscallDispatcher
// Copied via populateLocal.
msg message.Msg
}
// valid checks outcomeState to be safe for use with outcomeOp.
func (s *outcomeState) valid() bool {
return s != nil &&
s.Shim.valid() &&
s.ID != nil &&
s.Container != nil &&
s.Paths != nil
}
// newOutcomeState returns the address of a new outcomeState with its exported fields populated via syscallDispatcher.
func newOutcomeState(k syscallDispatcher, msg message.Msg, id *hst.ID, config *hst.Config, hsu *Hsu) *outcomeState {
s := outcomeState{
Shim: &shimParams{PrivPID: k.getpid(), Verbose: msg.IsVerbose()},
ID: id,
Identity: config.Identity,
UserID: hsu.MustID(msg),
Paths: env.CopyPathsFunc(k.fatalf, k.tempdir, func(key string) string { v, _ := k.lookupEnv(key); return v }),
Container: config.Container,
}
// enforce bounds and default early
if s.Container.WaitDelay < 0 {
s.Shim.WaitDelay = 0
} else if s.Container.WaitDelay == 0 {
s.Shim.WaitDelay = hst.WaitDelayDefault
} else if s.Container.WaitDelay > hst.WaitDelayMax {
s.Shim.WaitDelay = hst.WaitDelayMax
} else {
s.Shim.WaitDelay = s.Container.WaitDelay
}
if s.Container.Flags&hst.FMapRealUID != 0 {
s.Mapuid, s.Mapgid = k.getuid(), k.getgid()
} else {
s.Mapuid, s.Mapgid = k.overflowUid(msg), k.overflowGid(msg)
}
return &s
}
// populateLocal populates unexported fields from transmitted exported fields.
// These fields are cheaper to recompute per-process.
func (s *outcomeState) populateLocal(k syscallDispatcher, msg message.Msg) error {
if !s.valid() || k == nil || msg == nil {
return newWithMessage("impossible outcome state reached")
}
if s.k != nil || s.msg != nil {
panic("attempting to call populateLocal twice")
}
s.k = k
s.msg = msg
s.id = &stringPair[hst.ID]{*s.ID, s.ID.String()}
s.Copy(&s.sc, s.UserID)
msg.Verbosef("process share directory at %q, runtime directory at %q", s.sc.SharePath, s.sc.RunDirPath)
s.identity = newInt(s.Identity)
s.mapuid, s.mapgid = newInt(s.Mapuid), newInt(s.Mapgid)
s.uid = newInt(HsuUid(s.UserID, s.identity.unwrap()))
return nil
}
// instancePath returns a path formatted for outcomeStateSys.instance.
// This method must only be called from outcomeOp.toContainer if
// outcomeOp.toSystem has already called outcomeStateSys.instance.
func (s *outcomeState) instancePath() *check.Absolute { return s.sc.SharePath.Append(s.id.String()) }
// runtimePath returns a path formatted for outcomeStateSys.runtime.
// This method must only be called from outcomeOp.toContainer if
// outcomeOp.toSystem has already called outcomeStateSys.runtime.
func (s *outcomeState) runtimePath() *check.Absolute { return s.sc.RunDirPath.Append(s.id.String()) }
// outcomeStateSys wraps outcomeState and [system.I]. Used on the priv side only.
// Implementations of outcomeOp must not access fields other than sys unless explicitly stated.
type outcomeStateSys struct {
// Whether XDG_RUNTIME_DIR is used post hsu.
useRuntimeDir bool
// Process-specific directory in TMPDIR, nil if unused.
sharePath *check.Absolute
// Process-specific directory in XDG_RUNTIME_DIR, nil if unused.
runtimeSharePath *check.Absolute
// Copied from [hst.Config]. Safe for read by outcomeOp.toSystem.
appId string
// Copied from [hst.Config]. Safe for read by outcomeOp.toSystem.
et hst.Enablement
// Copied from [hst.Config]. Safe for read by spWaylandOp.toSystem only.
directWayland bool
// Copied header from [hst.Config]. Safe for read by spFilesystemOp.toSystem only.
extraPerms []hst.ExtraPermConfig
// Copied address from [hst.Config]. Safe for read by spDBusOp.toSystem only.
sessionBus, systemBus *hst.BusConfig
sys *system.I
*outcomeState
}
// newSys returns the address of a new outcomeStateSys embedding the current outcomeState.
func (s *outcomeState) newSys(config *hst.Config, sys *system.I) *outcomeStateSys {
return &outcomeStateSys{
appId: config.ID, et: config.Enablements.Unwrap(),
directWayland: config.DirectWayland, extraPerms: config.ExtraPerms,
sessionBus: config.SessionBus, systemBus: config.SystemBus,
sys: sys, outcomeState: s,
}
}
// newParams returns the address of a new outcomeStateParams embedding the current outcomeState.
func (s *outcomeState) newParams() *outcomeStateParams {
stateParams := outcomeStateParams{params: new(container.Params), outcomeState: s}
if s.Container.Env == nil {
stateParams.env = make(map[string]string, envAllocSize)
} else {
stateParams.env = maps.Clone(s.Container.Env)
}
return &stateParams
}
// ensureRuntimeDir must be called if access to paths within XDG_RUNTIME_DIR is required.
func (state *outcomeStateSys) ensureRuntimeDir() {
if state.useRuntimeDir {
return
}
state.useRuntimeDir = true
state.sys.Ensure(state.sc.RunDirPath, 0700)
state.sys.UpdatePermType(system.User, state.sc.RunDirPath, acl.Execute)
state.sys.Ensure(state.sc.RuntimePath, 0700) // ensure this dir in case XDG_RUNTIME_DIR is unset
state.sys.UpdatePermType(system.User, state.sc.RuntimePath, acl.Execute)
}
// instance returns the pathname to a process-specific directory within TMPDIR.
// This directory must only hold entries bound to [system.Process].
func (state *outcomeStateSys) instance() *check.Absolute {
if state.sharePath != nil {
return state.sharePath
}
state.sharePath = state.instancePath()
state.sys.Ephemeral(system.Process, state.sharePath, 0711)
return state.sharePath
}
// runtime returns the pathname to a process-specific directory within XDG_RUNTIME_DIR.
// This directory must only hold entries bound to [system.Process].
func (state *outcomeStateSys) runtime() *check.Absolute {
if state.runtimeSharePath != nil {
return state.runtimeSharePath
}
state.ensureRuntimeDir()
state.runtimeSharePath = state.runtimePath()
state.sys.Ephemeral(system.Process, state.runtimeSharePath, 0700)
state.sys.UpdatePerm(state.runtimeSharePath, acl.Execute)
return state.runtimeSharePath
}
// outcomeStateParams wraps outcomeState and [container.Params]. Used on the shim side only.
type outcomeStateParams struct {
// Overrides the embedded [container.Params] in [container.Container]. The Env field must not be used.
params *container.Params
// Collapsed into the Env slice in [container.Params] by the final outcomeOp.
env map[string]string
// Filesystems with the optional root sliced off if present. Populated by spParamsOp.
// Safe for use by spFilesystemOp.
filesystem []hst.FilesystemConfigJSON
// Inner XDG_RUNTIME_DIR default formatting of `/run/user/%d` via mapped uid.
// Populated by spRuntimeOp.
runtimeDir *check.Absolute
as hst.ApplyState
*outcomeState
}
// errNotEnabled is returned by outcomeOp.toSystem and used internally to exclude an outcomeOp from transmission.
var errNotEnabled = errors.New("op not enabled in the configuration")
// An outcomeOp inflicts an outcome on [system.I] and contains enough information to
// inflict it on [container.Params] in a separate process.
// An implementation of outcomeOp must store cross-process states in exported fields only.
type outcomeOp interface {
// toSystem inflicts the current outcome on [system.I] in the priv side process.
toSystem(state *outcomeStateSys) error
// toContainer inflicts the current outcome on [container.Params] in the shim process.
// The implementation must not write to the Env field of [container.Params] as it will be overwritten
// by flattened env map.
toContainer(state *outcomeStateParams) error
}
// toSystem calls the outcomeOp.toSystem method on all outcomeOp implementations and populates shimParams.Ops.
// This function assumes the caller has already called the Validate method on [hst.Config]
// and checked that it returns nil.
func (state *outcomeStateSys) toSystem() error {
if state.Shim == nil || state.Shim.Ops != nil {
return newWithMessage("invalid ops state reached")
}
ops := [...]outcomeOp{
// must run first
&spParamsOp{},
&spRuntimeOp{},
spTmpdirOp{},
spAccountOp{},
// optional via enablements
&spWaylandOp{},
&spX11Op{},
&spPulseOp{},
&spDBusOp{},
// must run last
&spFilesystemOp{},
}
state.Shim.Ops = make([]outcomeOp, 0, len(ops))
for _, op := range ops {
if err := op.toSystem(state); err != nil {
// this error is used internally to exclude this outcomeOp from transmission
if errors.Is(err, errNotEnabled) {
continue
}
return err
}
state.Shim.Ops = append(state.Shim.Ops, op)
}
return nil
}

View File

@@ -1,34 +0,0 @@
package app
import (
"testing"
"hakurei.app/hst"
"hakurei.app/internal/env"
)
func TestOutcomeStateValid(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
s *outcomeState
want bool
}{
{"nil", nil, false},
{"zero", new(outcomeState), false},
{"shim", &outcomeState{Shim: &shimParams{PrivPID: -1, Ops: []outcomeOp{}}, Container: new(hst.ContainerConfig), Paths: new(env.Paths)}, false},
{"id", &outcomeState{Shim: &shimParams{PrivPID: 1, Ops: []outcomeOp{}}, Container: new(hst.ContainerConfig), Paths: new(env.Paths)}, false},
{"container", &outcomeState{Shim: &shimParams{PrivPID: 1, Ops: []outcomeOp{}}, ID: new(hst.ID), Paths: new(env.Paths)}, false},
{"envpaths", &outcomeState{Shim: &shimParams{PrivPID: 1, Ops: []outcomeOp{}}, ID: new(hst.ID), Container: new(hst.ContainerConfig)}, false},
{"valid", &outcomeState{Shim: &shimParams{PrivPID: 1, Ops: []outcomeOp{}}, ID: new(hst.ID), Container: new(hst.ContainerConfig), Paths: new(env.Paths)}, true},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
if got := tc.s.valid(); got != tc.want {
t.Errorf("valid: %v, want %v", got, tc.want)
}
})
}
}

View File

@@ -1,321 +0,0 @@
package app
import (
"context"
"encoding/gob"
"errors"
"log"
"os"
"os/exec"
"strconv"
"strings"
"syscall"
"time"
"hakurei.app/container"
"hakurei.app/container/fhs"
"hakurei.app/hst"
"hakurei.app/internal"
"hakurei.app/internal/app/state"
"hakurei.app/message"
"hakurei.app/system"
)
// Duration to wait for shim to exit on top of container WaitDelay.
const shimWaitTimeout = 5 * time.Second
// mainState holds persistent state bound to outcome.main.
type mainState struct {
// done is whether beforeExit has been called already.
done bool
// Time is the exact point in time where the process was created.
// Location must be set to UTC.
//
// Time is nil if no process was ever created.
Time *time.Time
store state.Store
cancel context.CancelFunc
cmd *exec.Cmd
cmdWait chan error
k *outcome
message.Msg
uintptr
}
const (
// mainNeedsRevert indicates the call to Commit has succeeded.
mainNeedsRevert uintptr = 1 << iota
// mainNeedsDestroy indicates the instance state entry is present in the store.
mainNeedsDestroy
)
// beforeExit must be called immediately before a call to [os.Exit].
func (ms mainState) beforeExit(isFault bool) {
if ms.done {
panic("attempting to call beforeExit twice")
}
ms.done = true
defer ms.BeforeExit()
if isFault && ms.cancel != nil {
ms.cancel()
}
var hasErr bool
// updates hasErr but does not terminate
perror := func(err error, message string) {
hasErr = true
printMessageError("cannot "+message+":", err)
}
exitCode := 1
defer func() {
if hasErr {
os.Exit(exitCode)
}
}()
// this also handles wait for a non-fault termination
if ms.cmd != nil && ms.cmdWait != nil {
waitDone := make(chan struct{})
// this ties waitDone to ctx with the additional compensated timeout duration
go func() { <-ms.k.ctx.Done(); time.Sleep(ms.k.state.Shim.WaitDelay + shimWaitTimeout); close(waitDone) }()
select {
case err := <-ms.cmdWait:
wstatus, ok := ms.cmd.ProcessState.Sys().(syscall.WaitStatus)
if ok {
if v := wstatus.ExitStatus(); v != 0 {
hasErr = true
exitCode = v
}
}
if ms.IsVerbose() {
if !ok {
if err != nil {
ms.Verbosef("wait: %v", err)
}
} else {
switch {
case wstatus.Exited():
ms.Verbosef("process %d exited with code %d", ms.cmd.Process.Pid, wstatus.ExitStatus())
case wstatus.CoreDump():
ms.Verbosef("process %d dumped core", ms.cmd.Process.Pid)
case wstatus.Signaled():
ms.Verbosef("process %d got %s", ms.cmd.Process.Pid, wstatus.Signal())
default:
ms.Verbosef("process %d exited with status %#x", ms.cmd.Process.Pid, wstatus)
}
}
}
case <-waitDone:
ms.Resume()
// this is only reachable when shim did not exit within shimWaitTimeout, after its WaitDelay has elapsed.
// This is different from the container failing to terminate within its timeout period, as that is enforced
// by the shim. This path is instead reached when there is a lockup in shim preventing it from completing.
log.Printf("process %d did not terminate", ms.cmd.Process.Pid)
}
ms.Resume()
}
if ms.uintptr&mainNeedsRevert != 0 {
if ok, err := ms.store.Do(ms.k.state.identity.unwrap(), func(c state.Cursor) {
if ms.uintptr&mainNeedsDestroy != 0 {
if err := c.Destroy(ms.k.state.id.unwrap()); err != nil {
perror(err, "destroy state entry")
}
}
var rt hst.Enablement
if states, err := c.Load(); err != nil {
// it is impossible to continue from this point;
// revert per-process state here to limit damage
ec := system.Process
if revertErr := ms.k.sys.Revert((*system.Criteria)(&ec)); revertErr != nil {
var joinError interface {
Unwrap() []error
error
}
if !errors.As(revertErr, &joinError) || joinError == nil {
perror(revertErr, "revert system setup")
} else {
for _, v := range joinError.Unwrap() {
perror(v, "revert system setup step")
}
}
}
perror(err, "load instance states")
} else {
ec := system.Process
if l := len(states); l == 0 {
ec |= system.User
} else {
ms.Verbosef("found %d instances, cleaning up without user-scoped operations", l)
}
// accumulate enablements of remaining launchers
for i, s := range states {
if s.Config != nil {
rt |= s.Config.Enablements.Unwrap()
} else {
log.Printf("state entry %d does not contain config", i)
}
}
ec |= rt ^ (hst.EWayland | hst.EX11 | hst.EDBus | hst.EPulse)
if ms.IsVerbose() {
if ec > 0 {
ms.Verbose("reverting operations scope", system.TypeString(ec))
}
}
if err = ms.k.sys.Revert((*system.Criteria)(&ec)); err != nil {
perror(err, "revert system setup")
}
}
}); err != nil {
if ok {
perror(err, "unlock state store")
} else {
perror(err, "open state store")
}
}
} else if ms.uintptr&mainNeedsDestroy != 0 {
panic("unreachable")
}
}
// fatal calls printMessageError, performs necessary cleanup, followed by a call to [os.Exit](1).
func (ms mainState) fatal(fallback string, ferr error) {
printMessageError(fallback, ferr)
ms.beforeExit(true)
os.Exit(1)
}
// main carries out outcome and terminates. main does not return.
func (k *outcome) main(msg message.Msg) {
if !k.active.CompareAndSwap(false, true) {
panic("outcome: attempted to run twice")
}
if k.ctx == nil || k.sys == nil || k.state == nil {
panic("outcome: did not finalise")
}
// read comp value early for early failure
hsuPath := internal.MustHsuPath()
// ms.beforeExit required beyond this point
ms := &mainState{Msg: msg, k: k}
if err := k.sys.Commit(); err != nil {
ms.fatal("cannot commit system setup:", err)
}
ms.uintptr |= mainNeedsRevert
ms.store = state.NewMulti(msg, k.state.sc.RunDirPath)
ctx, cancel := context.WithCancel(k.ctx)
defer cancel()
ms.cancel = cancel
ms.cmd = exec.CommandContext(ctx, hsuPath.String())
ms.cmd.Stdin, ms.cmd.Stdout, ms.cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
ms.cmd.Dir = fhs.Root // container init enters final working directory
// shim runs in the same session as monitor; see shim.go for behaviour
ms.cmd.Cancel = func() error { return ms.cmd.Process.Signal(syscall.SIGCONT) }
var e *gob.Encoder
if fd, encoder, err := container.Setup(&ms.cmd.ExtraFiles); err != nil {
ms.fatal("cannot create shim setup pipe:", err)
} else {
e = encoder
ms.cmd.Env = []string{
// passed through to shim by hsu
shimEnv + "=" + strconv.Itoa(fd),
// interpreted by hsu
"HAKUREI_IDENTITY=" + k.state.identity.String(),
}
}
if len(k.supp) > 0 {
msg.Verbosef("attaching supplementary group ids %s", k.supp)
// interpreted by hsu
ms.cmd.Env = append(ms.cmd.Env, "HAKUREI_GROUPS="+strings.Join(k.supp, " "))
}
msg.Verbosef("setuid helper at %s", hsuPath)
msg.Suspend()
if err := ms.cmd.Start(); err != nil {
ms.fatal("cannot start setuid wrapper:", err)
}
startTime := time.Now().UTC()
ms.cmdWait = make(chan error, 1)
// this ties context back to the life of the process
go func() { ms.cmdWait <- ms.cmd.Wait(); cancel() }()
ms.Time = &startTime
// unfortunately the I/O here cannot be directly canceled;
// the cancellation path leads to fatal in this case so that is fine
select {
case err := <-func() (setupErr chan error) {
setupErr = make(chan error, 1)
go func() { setupErr <- e.Encode(k.state) }()
return
}():
if err != nil {
msg.Resume()
ms.fatal("cannot transmit shim config:", err)
}
case <-ctx.Done():
msg.Resume()
ms.fatal("shim context canceled:", newWithMessageError("shim setup canceled", ctx.Err()))
}
// shim accepted setup payload, create process state
if ok, err := ms.store.Do(k.state.identity.unwrap(), func(c state.Cursor) {
if err := c.Save(&hst.State{
ID: k.state.id.unwrap(),
PID: os.Getpid(),
ShimPID: ms.cmd.Process.Pid,
Config: k.config,
Time: *ms.Time,
}); err != nil {
ms.fatal("cannot save state entry:", err)
}
}); err != nil {
if ok {
ms.uintptr |= mainNeedsDestroy
ms.fatal("cannot unlock state store:", err)
} else {
ms.fatal("cannot open state store:", err)
}
}
// state in store at this point, destroy defunct state entry on termination
ms.uintptr |= mainNeedsDestroy
// beforeExit ties shim process to context
ms.beforeExit(false)
os.Exit(0)
}
// printMessageError prints the error message according to [message.GetMessage],
// or fallback prepended to err if an error message is not available.
func printMessageError(fallback string, err error) {
m, ok := message.GetMessage(err)
if !ok {
log.Println(fallback, err)
return
}
log.Print(m)
}

View File

@@ -1,65 +0,0 @@
#include "shim-signal.h"
#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
static pid_t hakurei_shim_param_ppid = -1;
static int hakurei_shim_fd = -1;
static ssize_t hakurei_shim_write(const void *buf, size_t count) {
int savedErrno = errno;
ssize_t ret = write(hakurei_shim_fd, buf, count);
if (ret == -1 && errno != EAGAIN)
exit(EXIT_FAILURE);
errno = savedErrno;
return ret;
}
/* see shim_linux.go for handling of the value */
static void hakurei_shim_sigaction(int sig, siginfo_t *si, void *ucontext) {
if (sig != SIGCONT || si == NULL) {
/* unreachable */
hakurei_shim_write("\2", 1);
return;
}
if (si->si_pid == hakurei_shim_param_ppid) {
/* monitor requests shim exit */
hakurei_shim_write("\0", 1);
return;
}
/* unexpected si_pid */
hakurei_shim_write("\3", 1);
if (getppid() != hakurei_shim_param_ppid)
/* shim orphaned before monitor delivers a signal */
hakurei_shim_write("\1", 1);
}
void hakurei_shim_setup_cont_signal(pid_t ppid, int fd) {
if (hakurei_shim_param_ppid != -1 || hakurei_shim_fd != -1)
*(int *)NULL = 0; /* unreachable */
struct sigaction new_action = {0}, old_action = {0};
if (sigaction(SIGCONT, NULL, &old_action) != 0)
return;
if (old_action.sa_handler != SIG_DFL) {
errno = ENOTRECOVERABLE;
return;
}
new_action.sa_sigaction = hakurei_shim_sigaction;
if (sigemptyset(&new_action.sa_mask) != 0)
return;
new_action.sa_flags = SA_ONSTACK | SA_SIGINFO;
if (sigaction(SIGCONT, &new_action, NULL) != 0)
return;
errno = 0;
hakurei_shim_param_ppid = ppid;
hakurei_shim_fd = fd;
}

View File

@@ -1,3 +0,0 @@
#include <signal.h>
void hakurei_shim_setup_cont_signal(pid_t ppid, int fd);

View File

@@ -1,235 +0,0 @@
package app
import (
"context"
"errors"
"io"
"log"
"os"
"os/exec"
"runtime"
"sync/atomic"
"syscall"
"time"
"hakurei.app/container"
"hakurei.app/container/comp"
"hakurei.app/container/seccomp"
"hakurei.app/hst"
"hakurei.app/message"
)
//#include "shim-signal.h"
import "C"
// setupContSignal sets up the SIGCONT signal handler for the cross-uid shim exit hack.
// The signal handler is implemented in C, signals can be processed by reading from the returned reader.
// The returned function must be called after all signal processing concludes.
func setupContSignal(pid int) (io.ReadCloser, func(), error) {
if r, w, err := os.Pipe(); err != nil {
return nil, nil, err
} else if _, err = C.hakurei_shim_setup_cont_signal(C.pid_t(pid), C.int(w.Fd())); err != nil {
_, _ = r.Close(), w.Close()
return nil, nil, err
} else {
return r, func() { runtime.KeepAlive(w) }, nil
}
}
// shimEnv is the name of the environment variable storing decimal representation of
// setup pipe fd for [container.Receive].
const shimEnv = "HAKUREI_SHIM"
// shimParams is embedded in outcomeState and transmitted from priv side to shim.
type shimParams struct {
// Priv side pid, checked against ppid in signal handler for the syscall.SIGCONT hack.
PrivPID int
// Duration to wait for after the initial process receives os.Interrupt before the container is killed.
// Limits are enforced on the priv side.
WaitDelay time.Duration
// Verbosity pass through from [message.Msg].
Verbose bool
// Outcome setup ops, contains setup state. Populated by outcome.finalise.
Ops []outcomeOp
}
// valid checks shimParams to be safe for use.
func (p *shimParams) valid() bool { return p != nil && p.PrivPID > 0 }
// shimName is the prefix used by log.std in the shim process.
const shimName = "shim"
// Shim is called by the main function of the shim process and runs as the unconstrained target user.
// Shim does not return.
func Shim(msg message.Msg) {
if msg == nil {
msg = message.NewMsg(log.Default())
}
shimEntrypoint(direct{msg})
}
func shimEntrypoint(k syscallDispatcher) {
msg := k.getMsg()
if msg == nil {
panic("attempting to call shimEntrypoint with nil msg")
} else if logger := msg.GetLogger(); logger != nil {
logger.SetPrefix(shimName + ": ")
logger.SetFlags(0)
}
if err := k.setDumpable(container.SUID_DUMP_DISABLE); err != nil {
k.fatalf("cannot set SUID_DUMP_DISABLE: %s", err)
}
var (
state outcomeState
closeSetup func() error
)
if f, err := k.receive(shimEnv, &state, nil); err != nil {
if errors.Is(err, syscall.EBADF) {
k.fatal("invalid config descriptor")
}
if errors.Is(err, container.ErrReceiveEnv) {
k.fatal(shimEnv + " not set")
}
k.fatalf("cannot receive shim setup params: %v", err)
} else {
msg.SwapVerbose(state.Shim.Verbose)
closeSetup = f
if err = state.populateLocal(k, msg); err != nil {
if m, ok := message.GetMessage(err); ok {
k.fatal(m)
} else {
k.fatalf("cannot populate local state: %v", err)
}
}
}
// the Go runtime does not expose siginfo_t so SIGCONT is handled in C to check si_pid
var signalPipe io.ReadCloser
if r, wKeepAlive, err := k.setupContSignal(state.Shim.PrivPID); err != nil {
switch {
case errors.As(err, new(*os.SyscallError)): // returned by os.Pipe
k.fatal(err.Error())
return
case errors.As(err, new(syscall.Errno)): // returned by hakurei_shim_setup_cont_signal
k.fatalf("cannot install SIGCONT handler: %v", err)
return
default: // unreachable
k.fatalf("cannot set up exit request: %v", err)
return
}
} else {
defer wKeepAlive()
signalPipe = r
}
// pdeath_signal delivery is checked as if the dying process called kill(2), see kernel/exit.c
if err := k.prctl(syscall.PR_SET_PDEATHSIG, uintptr(syscall.SIGCONT), 0); err != nil {
k.fatalf("cannot set parent-death signal: %v", err)
}
stateParams := state.newParams()
for _, op := range state.Shim.Ops {
if err := op.toContainer(stateParams); err != nil {
if m, ok := message.GetMessage(err); ok {
k.fatal(m)
} else {
k.fatalf("cannot create container state: %v", err)
}
}
}
// shim exit outcomes
var cancelContainer atomic.Pointer[context.CancelFunc]
k.new(func(k syscallDispatcher, msg message.Msg) {
buf := make([]byte, 1)
for {
if _, err := signalPipe.Read(buf); err != nil {
k.fatalf("cannot read from signal pipe: %v", err)
}
switch buf[0] {
case 0: // got SIGCONT from monitor: shim exit requested
if fp := cancelContainer.Load(); stateParams.params.ForwardCancel && fp != nil && *fp != nil {
(*fp)()
// shim now bound by ShimWaitDelay, implemented below
continue
}
// setup has not completed, terminate immediately
msg.Resume()
k.exit(hst.ExitRequest)
return
case 1: // got SIGCONT after adoption: monitor died before delivering signal
msg.BeforeExit()
k.exit(hst.ExitOrphan)
return
case 2: // unreachable
msg.Verbose("sa_sigaction got invalid siginfo")
case 3: // got SIGCONT from unexpected process: hopefully the terminal driver
msg.Verbose("got SIGCONT from unexpected process")
default: // unreachable
k.fatalf("got invalid message %d from signal handler", buf[0])
}
}
})
if stateParams.params.Ops == nil {
k.fatal("invalid container params")
}
// close setup socket
if err := closeSetup(); err != nil {
msg.Verbosef("cannot close setup pipe: %v", err)
// not fatal
}
ctx, stop := k.notifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
cancelContainer.Store(&stop)
z := container.New(ctx, msg)
z.Params = *stateParams.params
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
// bounds and default enforced in finalise.go
z.WaitDelay = state.Shim.WaitDelay
if err := k.containerStart(z); err != nil {
printMessageError("cannot start container:", err)
k.exit(hst.ExitFailure)
}
if err := k.containerServe(z); err != nil {
printMessageError("cannot configure container:", err)
}
if err := k.seccompLoad(
seccomp.Preset(comp.PresetStrict, seccomp.AllowMultiarch),
seccomp.AllowMultiarch,
); err != nil {
k.fatalf("cannot load syscall filter: %v", err)
}
if err := k.containerWait(z); err != nil {
var exitError *exec.ExitError
if !errors.As(err, &exitError) {
if errors.Is(err, context.Canceled) {
k.exit(hst.ExitCancel)
}
msg.Verbosef("cannot wait: %v", err)
k.exit(127)
}
k.exit(exitError.ExitCode())
}
}

View File

@@ -1,156 +0,0 @@
package app
import (
"bytes"
"context"
"log"
"os"
"syscall"
"testing"
"hakurei.app/container"
"hakurei.app/container/comp"
"hakurei.app/container/fhs"
"hakurei.app/container/seccomp"
"hakurei.app/container/stub"
"hakurei.app/hst"
"hakurei.app/internal/env"
)
func TestShimEntrypoint(t *testing.T) {
t.Parallel()
shimPreset := seccomp.Preset(comp.PresetStrict, seccomp.AllowMultiarch)
templateParams := &container.Params{
Dir: m("/data/data/org.chromium.Chromium"),
Env: []string{
"DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/1000/bus",
"DBUS_SYSTEM_BUS_ADDRESS=unix:path=/var/run/dbus/system_bus_socket",
"GOOGLE_API_KEY=AIzaSyBHDrl33hwRp4rMQY0ziRbj8K9LPA6vUCY",
"GOOGLE_DEFAULT_CLIENT_ID=77185425430.apps.googleusercontent.com",
"GOOGLE_DEFAULT_CLIENT_SECRET=OTJgUOQcT7lO7GsGZq2G4IlT",
"HOME=/data/data/org.chromium.Chromium",
"PULSE_COOKIE=/.hakurei/pulse-cookie",
"PULSE_SERVER=unix:/run/user/1000/pulse/native",
"SHELL=/run/current-system/sw/bin/zsh",
"TERM=xterm-256color",
"USER=chronos",
"WAYLAND_DISPLAY=wayland-0",
"XDG_RUNTIME_DIR=/run/user/1000",
"XDG_SESSION_CLASS=user",
"XDG_SESSION_TYPE=wayland",
},
// spParamsOp
Hostname: "localhost",
RetainSession: true,
HostNet: true,
HostAbstract: true,
ForwardCancel: true,
Path: m("/run/current-system/sw/bin/chromium"),
Args: []string{
"chromium",
"--ignore-gpu-blocklist",
"--disable-smooth-scrolling",
"--enable-features=UseOzonePlatform",
"--ozone-platform=wayland",
},
SeccompFlags: seccomp.AllowMultiarch,
Uid: 1000,
Gid: 100,
Ops: new(container.Ops).
// resolveRoot
Root(m("/var/lib/hakurei/base/org.debian"), comp.BindWritable).
// spParamsOp
Proc(fhs.AbsProc).
Tmpfs(hst.AbsPrivateTmp, 1<<12, 0755).
Bind(fhs.AbsDev, fhs.AbsDev, comp.BindWritable|comp.BindDevice).
Tmpfs(fhs.AbsDev.Append("shm"), 0, 01777).
// spRuntimeOp
Tmpfs(fhs.AbsRunUser, 1<<12, 0755).
Bind(m("/tmp/hakurei.10/runtime/9999"), m("/run/user/1000"), comp.BindWritable).
// spTmpdirOp
Bind(m("/tmp/hakurei.10/tmpdir/9999"), fhs.AbsTmp, comp.BindWritable).
// spAccountOp
Place(m("/etc/passwd"), []byte("chronos:x:1000:100:Hakurei:/data/data/org.chromium.Chromium:/run/current-system/sw/bin/zsh\n")).
Place(m("/etc/group"), []byte("hakurei:x:100:\n")).
// spWaylandOp
Bind(m("/tmp/hakurei.10/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/wayland"), m("/run/user/1000/wayland-0"), 0).
// spPulseOp
Bind(m("/run/user/1000/hakurei/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/pulse"), m("/run/user/1000/pulse/native"), 0).
Place(m("/.hakurei/pulse-cookie"), bytes.Repeat([]byte{0}, pulseCookieSizeMax)).
// spDBusOp
Bind(m("/tmp/hakurei.10/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bus"), m("/run/user/1000/bus"), 0).
Bind(m("/tmp/hakurei.10/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/system_bus_socket"), m("/var/run/dbus/system_bus_socket"), 0).
// spFilesystemOp
Etc(fhs.AbsEtc, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").
Tmpfs(fhs.AbsTmp, 0, 0755).
Overlay(m("/nix/store"),
fhs.AbsVarLib.Append("hakurei/nix/u0/org.chromium.Chromium/rw-store/upper"),
fhs.AbsVarLib.Append("hakurei/nix/u0/org.chromium.Chromium/rw-store/work"),
fhs.AbsVarLib.Append("hakurei/base/org.nixos/ro-store")).
Link(m("/run/current-system"), "/run/current-system", true).
Link(m("/run/opengl-driver"), "/run/opengl-driver", true).
Bind(fhs.AbsVarLib.Append("hakurei/u0/org.chromium.Chromium"),
m("/data/data/org.chromium.Chromium"),
comp.BindWritable|comp.BindEnsure).
Bind(fhs.AbsDev.Append("dri"), fhs.AbsDev.Append("dri"),
comp.BindOptional|comp.BindWritable|comp.BindDevice).
Remount(fhs.AbsRoot, syscall.MS_RDONLY),
}
checkSimple(t, "shimEntrypoint", []simpleTestCase{
{"success", func(k *kstub) error { shimEntrypoint(k); return nil }, stub.Expect{Calls: []stub.Call{
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", outcomeState{
Shim: &shimParams{PrivPID: 0xbad, WaitDelay: 0xf, Verbose: true, Ops: []outcomeOp{
&spParamsOp{"xterm-256color", true},
&spRuntimeOp{sessionTypeWayland},
spTmpdirOp{},
spAccountOp{},
&spWaylandOp{},
&spPulseOp{(*[256]byte)(bytes.Repeat([]byte{0}, pulseCookieSizeMax)), pulseCookieSizeMax},
&spDBusOp{true},
&spFilesystemOp{},
}},
ID: &checkExpectInstanceId,
Identity: hst.IdentityMax,
UserID: 10,
Container: hst.Template().Container,
Mapuid: 1000,
Mapgid: 100,
Paths: &env.Paths{TempDir: fhs.AbsTmp, RuntimePath: fhs.AbsRunUser.Append("1000")},
}, nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{m("/tmp/hakurei.10"), m("/run/user/1000/hakurei")}}, nil, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("prctl", stub.ExpectArgs{uintptr(syscall.PR_SET_PDEATHSIG), uintptr(syscall.SIGCONT), uintptr(0)}, nil, nil),
call("New", stub.ExpectArgs{}, nil, nil),
call("closeReceive", stub.ExpectArgs{}, nil, nil),
call("notifyContext", stub.ExpectArgs{context.Background(), []os.Signal{os.Interrupt, syscall.SIGTERM}}, nil, nil),
call("containerStart", stub.ExpectArgs{templateParams}, nil, nil),
call("containerServe", stub.ExpectArgs{templateParams}, nil, nil),
call("seccompLoad", stub.ExpectArgs{shimPreset, seccomp.AllowMultiarch}, nil, nil),
call("containerWait", stub.ExpectArgs{templateParams}, nil, nil),
// deferred
call("wKeepAlive", stub.ExpectArgs{}, nil, nil),
}, Tracks: []stub.Expect{{Calls: []stub.Call{
call("rcRead", stub.ExpectArgs{}, []byte{2}, nil),
call("verbose", stub.ExpectArgs{[]any{"sa_sigaction got invalid siginfo"}}, nil, nil),
call("rcRead", stub.ExpectArgs{}, []byte{3}, nil),
call("verbose", stub.ExpectArgs{[]any{"got SIGCONT from unexpected process"}}, nil, nil),
call("rcRead", stub.ExpectArgs{}, nil, nil), // stub terminates this goroutine
}}}}, nil},
})
}

View File

@@ -1,56 +0,0 @@
package app
import (
"encoding/gob"
"fmt"
"syscall"
"hakurei.app/container/fhs"
"hakurei.app/internal/validate"
)
func init() { gob.Register(spAccountOp{}) }
// spAccountOp sets up user account emulation inside the container.
type spAccountOp struct{}
func (s spAccountOp) toSystem(state *outcomeStateSys) error {
// do checks here to fail before fork/exec
if state.Container == nil || state.Container.Home == nil || state.Container.Shell == nil {
// unreachable
return syscall.ENOTRECOVERABLE
}
// default is applied in toContainer
if state.Container.Username != "" && !validate.IsValidUsername(state.Container.Username) {
return newWithMessage(fmt.Sprintf("invalid user name %q", state.Container.Username))
}
return nil
}
func (s spAccountOp) toContainer(state *outcomeStateParams) error {
const fallbackUsername = "chronos"
username := state.Container.Username
if username == "" {
username = fallbackUsername
}
state.params.Dir = state.Container.Home
state.env["HOME"] = state.Container.Home.String()
state.env["USER"] = username
state.env["SHELL"] = state.Container.Shell.String()
state.params.
Place(fhs.AbsEtc.Append("passwd"),
[]byte(username+":x:"+
state.mapuid.String()+":"+
state.mapgid.String()+
":Hakurei:"+
state.Container.Home.String()+":"+
state.Container.Shell.String()+"\n")).
Place(fhs.AbsEtc.Append("group"),
[]byte("hakurei:x:"+state.mapgid.String()+":\n"))
return nil
}

View File

@@ -1,72 +0,0 @@
package app
import (
"os"
"syscall"
"testing"
"hakurei.app/container"
"hakurei.app/container/stub"
"hakurei.app/hst"
)
func TestSpAccountOp(t *testing.T) {
t.Parallel()
config := hst.Template()
checkOpBehaviour(t, []opBehaviourTestCase{
{"invalid state", func(bool, bool) outcomeOp { return spAccountOp{} }, func() *hst.Config {
c := hst.Template()
c.Container.Shell = nil
return c
}, nil, []stub.Call{
// this op performs basic validation and does not make calls during toSystem
}, nil, nil, syscall.ENOTRECOVERABLE, nil, nil, nil, nil, nil},
{"invalid user name", func(bool, bool) outcomeOp { return spAccountOp{} }, func() *hst.Config {
c := hst.Template()
c.Container.Username = "9"
return c
}, nil, []stub.Call{
// this op performs basic validation and does not make calls during toSystem
}, nil, nil, &hst.AppError{
Step: "finalise",
Err: os.ErrInvalid,
Msg: `invalid user name "9"`,
}, nil, nil, nil, nil, nil},
{"success fallback username", func(bool, bool) outcomeOp { return spAccountOp{} }, func() *hst.Config {
c := hst.Template()
c.Container.Username = ""
return c
}, nil, []stub.Call{
// this op performs basic validation and does not make calls during toSystem
}, newI(), nil, nil, insertsOps(nil), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Dir: config.Container.Home,
Ops: new(container.Ops).
Place(m("/etc/passwd"), []byte("chronos:x:1000:100:Hakurei:/data/data/org.chromium.Chromium:/run/current-system/sw/bin/zsh\n")).
Place(m("/etc/group"), []byte("hakurei:x:100:\n")),
}, paramsWantEnv(config, map[string]string{
"HOME": config.Container.Home.String(),
"USER": config.Container.Username,
"SHELL": config.Container.Shell.String(),
}, nil), nil},
{"success", func(bool, bool) outcomeOp { return spAccountOp{} }, hst.Template, nil, []stub.Call{
// this op performs basic validation and does not make calls during toSystem
}, newI(), nil, nil, insertsOps(nil), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Dir: config.Container.Home,
Ops: new(container.Ops).
Place(m("/etc/passwd"), []byte("chronos:x:1000:100:Hakurei:/data/data/org.chromium.Chromium:/run/current-system/sw/bin/zsh\n")).
Place(m("/etc/group"), []byte("hakurei:x:100:\n")),
}, paramsWantEnv(config, map[string]string{
"HOME": config.Container.Home.String(),
"USER": config.Container.Username,
"SHELL": config.Container.Shell.String(),
}, nil), nil},
})
}

View File

@@ -1,391 +0,0 @@
package app
import (
"encoding/gob"
"errors"
"io/fs"
"os"
"path"
"slices"
"strconv"
"syscall"
"hakurei.app/container"
"hakurei.app/container/check"
"hakurei.app/container/comp"
"hakurei.app/container/fhs"
"hakurei.app/container/seccomp"
"hakurei.app/hst"
"hakurei.app/internal/validate"
"hakurei.app/message"
"hakurei.app/system"
"hakurei.app/system/acl"
"hakurei.app/system/dbus"
)
const varRunNscd = fhs.Var + "run/nscd"
func init() { gob.Register(new(spParamsOp)) }
// spParamsOp initialises unordered fields of [container.Params] and the optional root filesystem.
// This outcomeOp is hardcoded to always run first.
type spParamsOp struct {
// Value of $TERM, stored during toSystem.
Term string
// Whether $TERM is set, stored during toSystem.
TermSet bool
}
func (s *spParamsOp) toSystem(state *outcomeStateSys) error {
s.Term, s.TermSet = state.k.lookupEnv("TERM")
state.sys.Ensure(state.sc.SharePath, 0711)
return nil
}
func (s *spParamsOp) toContainer(state *outcomeStateParams) error {
// pass $TERM for proper terminal I/O in initial process
if s.TermSet {
state.env["TERM"] = s.Term
}
// in practice there should be less than 30 system mount points
const preallocateOpsCount = 1 << 5
state.params.Hostname = state.Container.Hostname
state.params.RetainSession = state.Container.Flags&hst.FTty != 0
state.params.HostNet = state.Container.Flags&hst.FHostNet != 0
state.params.HostAbstract = state.Container.Flags&hst.FHostAbstract != 0
if state.Container.Path == nil {
return newWithMessage("invalid program path")
}
state.params.Path = state.Container.Path
if len(state.Container.Args) == 0 {
state.params.Args = []string{state.Container.Path.String()}
} else {
state.params.Args = state.Container.Args
}
// the container is canceled when shim is requested to exit or receives an interrupt or termination signal;
// this behaviour is implemented in the shim
state.params.ForwardCancel = state.Shim.WaitDelay > 0
if state.Container.Flags&hst.FMultiarch != 0 {
state.params.SeccompFlags |= seccomp.AllowMultiarch
}
if state.Container.Flags&hst.FSeccompCompat == 0 {
state.params.SeccompPresets |= comp.PresetExt
}
if state.Container.Flags&hst.FDevel == 0 {
state.params.SeccompPresets |= comp.PresetDenyDevel
}
if state.Container.Flags&hst.FUserns == 0 {
state.params.SeccompPresets |= comp.PresetDenyNS
}
if state.Container.Flags&hst.FTty == 0 {
state.params.SeccompPresets |= comp.PresetDenyTTY
}
if state.Container.Flags&hst.FMapRealUID != 0 {
state.params.Uid = state.Mapuid
state.params.Gid = state.Mapgid
}
{
state.as.AutoEtcPrefix = state.id.String()
ops := make(container.Ops, 0, preallocateOpsCount+len(state.Container.Filesystem))
state.params.Ops = &ops
state.as.Ops = opsAdapter{&ops}
}
rootfs, filesystem, _ := resolveRoot(state.Container)
state.filesystem = filesystem
if rootfs != nil {
rootfs.Apply(&state.as)
}
// early mount points
state.params.
Proc(fhs.AbsProc).
Tmpfs(hst.AbsPrivateTmp, 1<<12, 0755)
if state.Container.Flags&hst.FDevice == 0 {
state.params.DevWritable(fhs.AbsDev, true)
} else {
state.params.Bind(fhs.AbsDev, fhs.AbsDev, comp.BindWritable|comp.BindDevice)
}
// /dev is mounted readonly later on, this prevents /dev/shm from going readonly with it
state.params.Tmpfs(fhs.AbsDev.Append("shm"), 0, 01777)
return nil
}
func init() { gob.Register(new(spFilesystemOp)) }
// spFilesystemOp applies configured filesystems to [container.Params], excluding the optional root filesystem.
// This outcomeOp is hardcoded to always run last.
type spFilesystemOp struct {
// Matched paths to cover. Stored during toSystem.
HidePaths []*check.Absolute
}
func (s *spFilesystemOp) toSystem(state *outcomeStateSys) error {
/* retrieve paths and hide them if they're made available in the sandbox;
this feature tries to improve user experience of permissive defaults, and
to warn about issues in custom configuration; it is NOT a security feature
and should not be treated as such, ALWAYS be careful with what you bind */
hidePaths := []string{
state.sc.RuntimePath.String(),
state.sc.SharePath.String(),
// this causes emulated passwd database to be bypassed on some /etc/ setups
varRunNscd,
}
// dbus.Address does not go through syscallDispatcher
systemBusAddr := dbus.FallbackSystemBusAddress
if addr, ok := state.k.lookupEnv(dbus.SystemBusAddress); ok {
systemBusAddr = addr
}
if entries, err := dbus.Parse([]byte(systemBusAddr)); err != nil {
return &hst.AppError{Step: "parse dbus address", Err: err}
} else {
// there is usually only one, do not preallocate
for _, entry := range entries {
if entry.Method != "unix" {
continue
}
for _, pair := range entry.Values {
if pair[0] == "path" {
if path.IsAbs(pair[1]) {
// get parent dir of socket
dir := path.Dir(pair[1])
if dir == "." || dir == fhs.Root {
state.msg.Verbosef("dbus socket %q is in an unusual location", pair[1])
}
hidePaths = append(hidePaths, dir)
} else {
state.msg.Verbosef("dbus socket %q is not absolute", pair[1])
}
}
}
}
}
hidePathMatch := make([]bool, len(hidePaths))
for i := range hidePaths {
if err := evalSymlinks(state.msg, state.k, &hidePaths[i]); err != nil {
return &hst.AppError{Step: "evaluate path hiding target", Err: err}
}
}
_, filesystem, autoroot := resolveRoot(state.Container)
var hidePathSourceCount int
for i, c := range filesystem {
if !c.Valid() {
return newWithMessage("invalid filesystem at index " + strconv.Itoa(i))
}
// fs counter
hidePathSourceCount += len(c.Host())
}
// AutoRootOp is a collection of many BindMountOp internally
var autoRootEntries []fs.DirEntry
if autoroot != nil {
if d, err := state.k.readdir(autoroot.Source.String()); err != nil {
return &hst.AppError{Step: "access autoroot source", Err: err}
} else {
// autoroot counter
hidePathSourceCount += len(d)
autoRootEntries = d
}
}
hidePathSource := make([]*check.Absolute, 0, hidePathSourceCount)
// fs append
for _, c := range filesystem {
// all entries already checked above
hidePathSource = append(hidePathSource, c.Host()...)
}
// autoroot append
if autoroot != nil {
for _, ent := range autoRootEntries {
name := ent.Name()
if container.IsAutoRootBindable(state.msg, name) {
hidePathSource = append(hidePathSource, autoroot.Source.Append(name))
}
}
}
// evaluated path, input path
hidePathSourceEval := make([][2]string, len(hidePathSource))
for i, a := range hidePathSource {
if a == nil {
// unreachable
return newWithMessage("impossible path hiding state reached")
}
hidePathSourceEval[i] = [2]string{a.String(), a.String()}
if err := evalSymlinks(state.msg, state.k, &hidePathSourceEval[i][0]); err != nil {
return &hst.AppError{Step: "evaluate path hiding source", Err: err}
}
}
for _, p := range hidePathSourceEval {
for i := range hidePaths {
// skip matched entries
if hidePathMatch[i] {
continue
}
if ok, err := validate.DeepContainsH(p[0], hidePaths[i]); err != nil {
return &hst.AppError{Step: "determine path hiding outcome", Err: err}
} else if ok {
hidePathMatch[i] = true
state.msg.Verbosef("hiding path %q from %q", hidePaths[i], p[1])
}
}
}
// copy matched paths for shim
for i, ok := range hidePathMatch {
if ok {
if a, err := check.NewAbs(hidePaths[i]); err != nil {
return newWithMessage("invalid path hiding candidate " + strconv.Quote(hidePaths[i]))
} else {
s.HidePaths = append(s.HidePaths, a)
}
}
}
// append ExtraPerms last
flattenExtraPerms(state.sys, state.extraPerms)
return nil
}
func (s *spFilesystemOp) toContainer(state *outcomeStateParams) error {
for i, c := range state.filesystem {
if !c.Valid() {
return newWithMessage("invalid filesystem at index " + strconv.Itoa(i))
}
c.Apply(&state.as)
}
for _, a := range s.HidePaths {
state.params.Tmpfs(a, 1<<13, 0755)
}
// no more configured paths beyond this point
if state.Container.Flags&hst.FDevice == 0 {
state.params.Remount(fhs.AbsDev, syscall.MS_RDONLY)
}
state.params.Remount(fhs.AbsRoot, syscall.MS_RDONLY)
state.params.Env = make([]string, 0, len(state.env))
for key, value := range state.env {
// key validated early via hst
state.params.Env = append(state.params.Env, key+"="+value)
}
slices.Sort(state.params.Env)
return nil
}
// resolveRoot handles the root filesystem special case for [hst.FilesystemConfig] and additionally resolves autoroot
// as it requires special handling during path hiding.
func resolveRoot(c *hst.ContainerConfig) (rootfs hst.FilesystemConfig, filesystem []hst.FilesystemConfigJSON, autoroot *hst.FSBind) {
// root filesystem special case
filesystem = c.Filesystem
// valid happens late, so root gets it here
if len(filesystem) > 0 && filesystem[0].Valid() && filesystem[0].Path().String() == fhs.Root {
// if the first element targets /, it is inserted early and excluded from path hiding
rootfs = filesystem[0].FilesystemConfig
filesystem = filesystem[1:]
// autoroot requires special handling during path hiding
if b, ok := rootfs.(*hst.FSBind); ok && b.IsAutoRoot() {
autoroot = b
}
}
return
}
// evalSymlinks calls syscallDispatcher.evalSymlinks but discards errors unwrapping to [fs.ErrNotExist].
func evalSymlinks(msg message.Msg, k syscallDispatcher, v *string) error {
if p, err := k.evalSymlinks(*v); err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return err
}
msg.Verbosef("path %q does not yet exist", *v)
} else {
*v = p
}
return nil
}
// flattenExtraPerms expands a slice of [hst.ExtraPermConfig] into [system.I].
func flattenExtraPerms(sys *system.I, extraPerms []hst.ExtraPermConfig) {
for i := range extraPerms {
p := &extraPerms[i]
if p.Path == nil {
continue
}
if p.Ensure {
sys.Ensure(p.Path, 0700)
}
perms := make(acl.Perms, 0, 3)
if p.Read {
perms = append(perms, acl.Read)
}
if p.Write {
perms = append(perms, acl.Write)
}
if p.Execute {
perms = append(perms, acl.Execute)
}
sys.UpdatePermType(system.User, p.Path, perms...)
}
}
// opsAdapter implements [hst.Ops] on [container.Ops].
type opsAdapter struct{ *container.Ops }
func (p opsAdapter) Tmpfs(target *check.Absolute, size int, perm os.FileMode) hst.Ops {
return opsAdapter{p.Ops.Tmpfs(target, size, perm)}
}
func (p opsAdapter) Readonly(target *check.Absolute, perm os.FileMode) hst.Ops {
return opsAdapter{p.Ops.Readonly(target, perm)}
}
func (p opsAdapter) Bind(source, target *check.Absolute, flags int) hst.Ops {
return opsAdapter{p.Ops.Bind(source, target, flags)}
}
func (p opsAdapter) Overlay(target, state, work *check.Absolute, layers ...*check.Absolute) hst.Ops {
return opsAdapter{p.Ops.Overlay(target, state, work, layers...)}
}
func (p opsAdapter) OverlayReadonly(target *check.Absolute, layers ...*check.Absolute) hst.Ops {
return opsAdapter{p.Ops.OverlayReadonly(target, layers...)}
}
func (p opsAdapter) Link(target *check.Absolute, linkName string, dereference bool) hst.Ops {
return opsAdapter{p.Ops.Link(target, linkName, dereference)}
}
func (p opsAdapter) Root(host *check.Absolute, flags int) hst.Ops {
return opsAdapter{p.Ops.Root(host, flags)}
}
func (p opsAdapter) Etc(host *check.Absolute, prefix string) hst.Ops {
return opsAdapter{p.Ops.Etc(host, prefix)}
}

View File

@@ -1,477 +0,0 @@
package app
import (
"errors"
"os"
"reflect"
"syscall"
"testing"
"hakurei.app/container"
"hakurei.app/container/check"
"hakurei.app/container/comp"
"hakurei.app/container/fhs"
"hakurei.app/container/seccomp"
"hakurei.app/container/stub"
"hakurei.app/hst"
"hakurei.app/system"
"hakurei.app/system/acl"
"hakurei.app/system/dbus"
)
func TestSpParamsOp(t *testing.T) {
t.Parallel()
config := hst.Template()
checkOpBehaviour(t, []opBehaviourTestCase{
{"invalid program path", func(isShim, _ bool) outcomeOp {
if !isShim {
return new(spParamsOp)
}
return &spParamsOp{Term: "xterm", TermSet: true}
}, func() *hst.Config {
c := hst.Template()
c.Container.Path = nil
return c
}, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{"TERM"}, "xterm", nil),
}, newI().
Ensure(m(container.Nonexistent+"/tmp/hakurei.0"), 0711), nil, nil, nil, []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, nil, nil, &hst.AppError{
Step: "finalise",
Err: os.ErrInvalid,
Msg: "invalid program path",
}},
{"success defaultargs secure", func(isShim, _ bool) outcomeOp {
if !isShim {
return new(spParamsOp)
}
return &spParamsOp{Term: "xterm", TermSet: true}
}, func() *hst.Config {
c := hst.Template()
c.Container.Args = nil
c.Container.Flags = hst.FHostNet | hst.FHostAbstract | hst.FMapRealUID
return c
}, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{"TERM"}, "xterm", nil),
}, newI().
Ensure(m(container.Nonexistent+"/tmp/hakurei.0"), 0711), nil, nil, nil, []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Hostname: config.Container.Hostname,
HostNet: true,
HostAbstract: true,
Path: config.Container.Path,
Args: []string{config.Container.Path.String()},
SeccompPresets: comp.PresetExt | comp.PresetDenyDevel | comp.PresetDenyNS | comp.PresetDenyTTY,
Uid: 1000,
Gid: 100,
Ops: new(container.Ops).
Root(m("/var/lib/hakurei/base/org.debian"), comp.BindWritable).
Proc(fhs.AbsProc).Tmpfs(hst.AbsPrivateTmp, 1<<12, 0755).
DevWritable(fhs.AbsDev, true).
Tmpfs(fhs.AbsDev.Append("shm"), 0, 01777),
}, paramsWantEnv(config, map[string]string{
"TERM": "xterm",
}, func(t *testing.T, state *outcomeStateParams) {
if state.as.AutoEtcPrefix != wantAutoEtcPrefix {
t.Errorf("toContainer: as.AutoEtcPrefix = %q, want %q", state.as.AutoEtcPrefix, wantAutoEtcPrefix)
}
wantFilesystems := config.Container.Filesystem[1:]
if !reflect.DeepEqual(state.filesystem, wantFilesystems) {
t.Errorf("toContainer: filesystem = %#v, want %#v", state.filesystem, wantFilesystems)
}
}), nil},
{"success", func(isShim, _ bool) outcomeOp {
if !isShim {
return new(spParamsOp)
}
return &spParamsOp{Term: "xterm", TermSet: true}
}, hst.Template, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{"TERM"}, "xterm", nil),
}, newI().
Ensure(m(container.Nonexistent+"/tmp/hakurei.0"), 0711), nil, nil, nil, []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Hostname: config.Container.Hostname,
RetainSession: true,
HostNet: true,
HostAbstract: true,
Path: config.Container.Path,
Args: config.Container.Args,
SeccompFlags: seccomp.AllowMultiarch,
Uid: 1000,
Gid: 100,
Ops: new(container.Ops).
Root(m("/var/lib/hakurei/base/org.debian"), comp.BindWritable).
Proc(fhs.AbsProc).Tmpfs(hst.AbsPrivateTmp, 1<<12, 0755).
Bind(fhs.AbsDev, fhs.AbsDev, comp.BindWritable|comp.BindDevice).
Tmpfs(fhs.AbsDev.Append("shm"), 0, 01777),
}, paramsWantEnv(config, map[string]string{
"TERM": "xterm",
}, func(t *testing.T, state *outcomeStateParams) {
if state.as.AutoEtcPrefix != wantAutoEtcPrefix {
t.Errorf("toContainer: as.AutoEtcPrefix = %q, want %q", state.as.AutoEtcPrefix, wantAutoEtcPrefix)
}
wantFilesystems := config.Container.Filesystem[1:]
if !reflect.DeepEqual(state.filesystem, wantFilesystems) {
t.Errorf("toContainer: filesystem = %#v, want %#v", state.filesystem, wantFilesystems)
}
}), nil},
})
}
func TestSpFilesystemOp(t *testing.T) {
const nePrefix = container.Nonexistent + "/eval"
var stubDebianRoot = stubDir("bin", "dev", "etc", "home", "lib64", "lost+found",
"mnt", "nix", "proc", "root", "run", "srv", "sys", "tmp", "usr", "var")
config := hst.Template()
newConfigSmall := func() *hst.Config {
c := hst.Template()
c.Container.Filesystem = []hst.FilesystemConfigJSON{
{FilesystemConfig: &hst.FSBind{Target: fhs.AbsEtc, Source: fhs.AbsEtc, Special: true}},
{FilesystemConfig: &hst.FSOverlay{Target: m("/nix/store"), Lower: []*check.Absolute{
fhs.AbsVarLib.Append("hakurei/base/org.nixos/.ro-store"),
fhs.AbsVarLib.Append("hakurei/base/org.nixos/org.chromium.Chromium"),
}}},
{FilesystemConfig: &hst.FSEphemeral{Target: hst.AbsPrivateTmp}},
}
c.Container.Flags &= ^hst.FDevice
return c
}
configSmall := newConfigSmall()
needsApplyState := func(next pStateContainerFunc) pStateContainerFunc {
return func(state *outcomeStateParams) {
state.as = hst.ApplyState{AutoEtcPrefix: wantAutoEtcPrefix, Ops: opsAdapter{state.params.Ops}}
if next != nil {
next(state)
}
}
}
checkOpBehaviour(t, []opBehaviourTestCase{
{"readdir", func(bool, bool) outcomeOp {
return new(spFilesystemOp)
}, hst.Template, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{dbus.SystemBusAddress}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/xdg_runtime_dir"}, nePrefix+"/xdg_runtime_dir", nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/tmp/hakurei.0"}, nePrefix+"/tmp/hakurei.0", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/run/nscd"}, "", &os.PathError{Op: "lstat", Path: "/var/run/nscd", Err: os.ErrNotExist}),
call("verbosef", stub.ExpectArgs{"path %q does not yet exist", []any{"/var/run/nscd"}}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{"/var/run/dbus"}, nePrefix+"/run/dbus", nil),
call("readdir", stub.ExpectArgs{"/var/lib/hakurei/base/org.debian"}, []os.DirEntry{}, stub.UniqueError(2)),
}, nil, nil, &hst.AppError{
Step: "access autoroot source",
Err: stub.UniqueError(2),
}, nil, nil, nil, nil, nil},
{"invalid dbus address", func(bool, bool) outcomeOp { return new(spFilesystemOp) }, func() *hst.Config {
c := newConfigSmall()
c.Container.Filesystem = append(c.Container.Filesystem, hst.FilesystemConfigJSON{FilesystemConfig: invalidFSHost(false)})
return c
}, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{dbus.SystemBusAddress}, "invalid", nil),
}, nil, nil, &hst.AppError{
Step: "parse dbus address",
Err: &dbus.BadAddressError{
Type: dbus.ErrNoColon,
EntryVal: []byte("invalid"),
PairPos: -1,
},
}, nil, nil, nil, nil, nil},
{"invalid fs early", func(bool, bool) outcomeOp { return new(spFilesystemOp) }, func() *hst.Config {
c := newConfigSmall()
c.Container.Filesystem = append(c.Container.Filesystem, hst.FilesystemConfigJSON{FilesystemConfig: invalidFSHost(false)})
return c
}, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{dbus.SystemBusAddress}, "invalid:meow=0;unix:path=/system_bus_socket;unix:path=system_bus_socket", nil),
call("verbosef", stub.ExpectArgs{"dbus socket %q is in an unusual location", []any{"/system_bus_socket"}}, nil, nil),
call("verbosef", stub.ExpectArgs{"dbus socket %q is not absolute", []any{"system_bus_socket"}}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/xdg_runtime_dir"}, nePrefix+"/xdg_runtime_dir", nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/tmp/hakurei.0"}, nePrefix+"/tmp/hakurei.0", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/run/nscd"}, "", &os.PathError{Op: "lstat", Path: "/var/run/nscd", Err: os.ErrNotExist}),
call("verbosef", stub.ExpectArgs{"path %q does not yet exist", []any{"/var/run/nscd"}}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{"/"}, nePrefix+"/etc/dbus", nil), // to match hidePaths
}, nil, nil, &hst.AppError{
Step: "finalise",
Err: os.ErrInvalid,
Msg: "invalid filesystem at index 3",
}, nil, nil, nil, nil, nil},
{"evalSymlinks early", func(bool, bool) outcomeOp { return new(spFilesystemOp) }, newConfigSmall, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{dbus.SystemBusAddress}, "invalid:meow=0;unix:path=/system_bus_socket;unix:path=system_bus_socket", nil),
call("verbosef", stub.ExpectArgs{"dbus socket %q is in an unusual location", []any{"/system_bus_socket"}}, nil, nil),
call("verbosef", stub.ExpectArgs{"dbus socket %q is not absolute", []any{"system_bus_socket"}}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/xdg_runtime_dir"}, "", stub.UniqueError(0)),
}, nil, nil, &hst.AppError{
Step: "evaluate path hiding target",
Err: stub.UniqueError(0),
}, nil, nil, nil, nil, nil},
{"host nil abs", func(bool, bool) outcomeOp { return new(spFilesystemOp) }, func() *hst.Config {
c := newConfigSmall()
c.Container.Filesystem = append(c.Container.Filesystem, hst.FilesystemConfigJSON{FilesystemConfig: invalidFSHost(true)})
return c
}, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{dbus.SystemBusAddress}, "invalid:meow=0;unix:path=/system_bus_socket;unix:path=system_bus_socket", nil),
call("verbosef", stub.ExpectArgs{"dbus socket %q is in an unusual location", []any{"/system_bus_socket"}}, nil, nil),
call("verbosef", stub.ExpectArgs{"dbus socket %q is not absolute", []any{"system_bus_socket"}}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/xdg_runtime_dir"}, nePrefix+"/xdg_runtime_dir", nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/tmp/hakurei.0"}, nePrefix+"/tmp/hakurei.0", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/run/nscd"}, "", &os.PathError{Op: "lstat", Path: "/var/run/nscd", Err: os.ErrNotExist}),
call("verbosef", stub.ExpectArgs{"path %q does not yet exist", []any{"/var/run/nscd"}}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{"/"}, nePrefix+"/etc/dbus", nil), // to match hidePaths
call("evalSymlinks", stub.ExpectArgs{"/etc/"}, nePrefix+"/etc", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.nixos/.ro-store"}, nePrefix+"/var/lib/hakurei/base/org.nixos/.ro-store", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.nixos/org.chromium.Chromium"}, "var/lib/hakurei/base/org.nixos/org.chromium.Chromium", nil),
}, nil, nil, &hst.AppError{
Step: "finalise",
Err: os.ErrInvalid,
Msg: "impossible path hiding state reached",
}, nil, nil, nil, nil, nil},
{"evalSymlinks late", func(bool, bool) outcomeOp { return new(spFilesystemOp) }, newConfigSmall, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{dbus.SystemBusAddress}, "invalid:meow=0;unix:path=/system_bus_socket;unix:path=system_bus_socket", nil),
call("verbosef", stub.ExpectArgs{"dbus socket %q is in an unusual location", []any{"/system_bus_socket"}}, nil, nil),
call("verbosef", stub.ExpectArgs{"dbus socket %q is not absolute", []any{"system_bus_socket"}}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/xdg_runtime_dir"}, nePrefix+"/xdg_runtime_dir", nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/tmp/hakurei.0"}, nePrefix+"/tmp/hakurei.0", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/run/nscd"}, "", &os.PathError{Op: "lstat", Path: "/var/run/nscd", Err: os.ErrNotExist}),
call("verbosef", stub.ExpectArgs{"path %q does not yet exist", []any{"/var/run/nscd"}}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{"/"}, nePrefix+"/etc/dbus", nil), // to match hidePaths
call("evalSymlinks", stub.ExpectArgs{"/etc/"}, nePrefix+"/etc", stub.UniqueError(1)),
}, nil, nil, &hst.AppError{
Step: "evaluate path hiding source",
Err: stub.UniqueError(1),
}, nil, nil, nil, nil, nil},
{"invalid contains", func(bool, bool) outcomeOp { return new(spFilesystemOp) }, newConfigSmall, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{dbus.SystemBusAddress}, "invalid:meow=0;unix:path=/system_bus_socket;unix:path=system_bus_socket", nil),
call("verbosef", stub.ExpectArgs{"dbus socket %q is in an unusual location", []any{"/system_bus_socket"}}, nil, nil),
call("verbosef", stub.ExpectArgs{"dbus socket %q is not absolute", []any{"system_bus_socket"}}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/xdg_runtime_dir"}, nePrefix+"/xdg_runtime_dir", nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/tmp/hakurei.0"}, nePrefix+"/tmp/hakurei.0", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/run/nscd"}, "", &os.PathError{Op: "lstat", Path: "/var/run/nscd", Err: os.ErrNotExist}),
call("verbosef", stub.ExpectArgs{"path %q does not yet exist", []any{"/var/run/nscd"}}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{"/"}, nePrefix+"/etc/dbus", nil), // to match hidePaths
call("evalSymlinks", stub.ExpectArgs{"/etc/"}, nePrefix+"/etc", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.nixos/.ro-store"}, nePrefix+"/var/lib/hakurei/base/org.nixos/.ro-store", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.nixos/org.chromium.Chromium"}, "var/lib/hakurei/base/org.nixos/org.chromium.Chromium", nil),
call("verbosef", stub.ExpectArgs{"hiding path %q from %q", []any{"/proc/nonexistent/eval/etc/dbus", "/etc/"}}, nil, nil),
}, nil, nil, &hst.AppError{
Step: "determine path hiding outcome",
Err: errors.New("Rel: can't make /proc/nonexistent/eval/xdg_runtime_dir relative to var/lib/hakurei/base/org.nixos/org.chromium.Chromium"),
}, nil, nil, nil, nil, nil},
{"invalid hide", func(bool, bool) outcomeOp { return new(spFilesystemOp) }, newConfigSmall, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{dbus.SystemBusAddress}, "invalid:meow=0;unix:path=/system_bus_socket;unix:path=system_bus_socket", nil),
call("verbosef", stub.ExpectArgs{"dbus socket %q is in an unusual location", []any{"/system_bus_socket"}}, nil, nil),
call("verbosef", stub.ExpectArgs{"dbus socket %q is not absolute", []any{"system_bus_socket"}}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/xdg_runtime_dir"}, "xdg_runtime_dir", nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/tmp/hakurei.0"}, "tmp/hakurei.0", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/run/nscd"}, "nscd", nil),
call("evalSymlinks", stub.ExpectArgs{"/"}, "nonexistent/dbus", nil),
call("evalSymlinks", stub.ExpectArgs{"/etc/"}, "nonexistent", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.nixos/.ro-store"}, ".ro-store", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.nixos/org.chromium.Chromium"}, "org.chromium.Chromium", nil),
call("verbosef", stub.ExpectArgs{"hiding path %q from %q", []any{"nonexistent/dbus", "/etc/"}}, nil, nil),
}, nil, nil, &hst.AppError{
Step: "finalise",
Err: os.ErrInvalid,
Msg: `invalid path hiding candidate "nonexistent/dbus"`,
}, nil, nil, nil, nil, nil},
{"invalid fs", func(isShim, clearUnexported bool) outcomeOp {
if !isShim {
return new(spFilesystemOp)
}
return &spFilesystemOp{HidePaths: []*check.Absolute{m("/proc/nonexistent/eval/etc/dbus")}}
}, newConfigSmall, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{dbus.SystemBusAddress}, "invalid:meow=0;unix:path=/system_bus_socket;unix:path=system_bus_socket", nil),
call("verbosef", stub.ExpectArgs{"dbus socket %q is in an unusual location", []any{"/system_bus_socket"}}, nil, nil),
call("verbosef", stub.ExpectArgs{"dbus socket %q is not absolute", []any{"system_bus_socket"}}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/xdg_runtime_dir"}, nePrefix+"/xdg_runtime_dir", nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/tmp/hakurei.0"}, nePrefix+"/tmp/hakurei.0", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/run/nscd"}, "", &os.PathError{Op: "lstat", Path: "/var/run/nscd", Err: os.ErrNotExist}),
call("verbosef", stub.ExpectArgs{"path %q does not yet exist", []any{"/var/run/nscd"}}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{"/"}, nePrefix+"/etc/dbus", nil), // to match hidePaths
call("evalSymlinks", stub.ExpectArgs{"/etc/"}, nePrefix+"/etc", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.nixos/.ro-store"}, nePrefix+"/var/lib/hakurei/base/org.nixos/.ro-store", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.nixos/org.chromium.Chromium"}, nePrefix+"/var/lib/hakurei/base/org.nixos/org.chromium.Chromium", nil),
call("verbosef", stub.ExpectArgs{"hiding path %q from %q", []any{"/proc/nonexistent/eval/etc/dbus", "/etc/"}}, nil, nil),
}, newI().
Ensure(m("/var/lib/hakurei/u0"), 0700).
UpdatePermType(system.User, m("/var/lib/hakurei/u0"),
acl.Execute).
UpdatePermType(system.User, m("/var/lib/hakurei/u0/org.chromium.Chromium"),
acl.Read, acl.Write, acl.Execute), nil, nil, insertsOps(needsApplyState(func(state *outcomeStateParams) {
state.filesystem = append(configSmall.Container.Filesystem, hst.FilesystemConfigJSON{})
})), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, nil, nil, &hst.AppError{
Step: "finalise",
Err: os.ErrInvalid,
Msg: "invalid filesystem at index 3",
}},
{"success noroot nodev envdbus strangedbus dbusnotabs hide", func(isShim, clearUnexported bool) outcomeOp {
if !isShim {
return new(spFilesystemOp)
}
return &spFilesystemOp{HidePaths: []*check.Absolute{m("/proc/nonexistent/eval/etc/dbus")}}
}, newConfigSmall, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{dbus.SystemBusAddress}, "invalid:meow=0;unix:path=/system_bus_socket;unix:path=system_bus_socket", nil),
call("verbosef", stub.ExpectArgs{"dbus socket %q is in an unusual location", []any{"/system_bus_socket"}}, nil, nil),
call("verbosef", stub.ExpectArgs{"dbus socket %q is not absolute", []any{"system_bus_socket"}}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/xdg_runtime_dir"}, nePrefix+"/xdg_runtime_dir", nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/tmp/hakurei.0"}, nePrefix+"/tmp/hakurei.0", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/run/nscd"}, "", &os.PathError{Op: "lstat", Path: "/var/run/nscd", Err: os.ErrNotExist}),
call("verbosef", stub.ExpectArgs{"path %q does not yet exist", []any{"/var/run/nscd"}}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{"/"}, nePrefix+"/etc/dbus", nil), // to match hidePaths
call("evalSymlinks", stub.ExpectArgs{"/etc/"}, nePrefix+"/etc", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.nixos/.ro-store"}, nePrefix+"/var/lib/hakurei/base/org.nixos/.ro-store", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.nixos/org.chromium.Chromium"}, nePrefix+"/var/lib/hakurei/base/org.nixos/org.chromium.Chromium", nil),
call("verbosef", stub.ExpectArgs{"hiding path %q from %q", []any{"/proc/nonexistent/eval/etc/dbus", "/etc/"}}, nil, nil),
}, newI().
Ensure(m("/var/lib/hakurei/u0"), 0700).
UpdatePermType(system.User, m("/var/lib/hakurei/u0"),
acl.Execute).
UpdatePermType(system.User, m("/var/lib/hakurei/u0/org.chromium.Chromium"),
acl.Read, acl.Write, acl.Execute), nil, nil, insertsOps(needsApplyState(func(state *outcomeStateParams) {
state.filesystem = configSmall.Container.Filesystem
})), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Env: []string{
"GOOGLE_API_KEY=AIzaSyBHDrl33hwRp4rMQY0ziRbj8K9LPA6vUCY",
"GOOGLE_DEFAULT_CLIENT_ID=77185425430.apps.googleusercontent.com",
"GOOGLE_DEFAULT_CLIENT_SECRET=OTJgUOQcT7lO7GsGZq2G4IlT",
},
Ops: new(container.Ops).
Etc(fhs.AbsEtc, wantAutoEtcPrefix).
OverlayReadonly(
check.MustAbs("/nix/store"),
fhs.AbsVarLib.Append("hakurei/base/org.nixos/.ro-store"),
fhs.AbsVarLib.Append("hakurei/base/org.nixos/org.chromium.Chromium")).
Readonly(hst.AbsPrivateTmp, 0755).
Tmpfs(m("/proc/nonexistent/eval/etc/dbus"), 1<<13, 0755).
Remount(fhs.AbsDev, syscall.MS_RDONLY).
Remount(fhs.AbsRoot, syscall.MS_RDONLY),
}, nil, nil},
{"success", func(bool, bool) outcomeOp {
return new(spFilesystemOp)
}, hst.Template, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{dbus.SystemBusAddress}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/xdg_runtime_dir"}, nePrefix+"/xdg_runtime_dir", nil),
call("evalSymlinks", stub.ExpectArgs{container.Nonexistent + "/tmp/hakurei.0"}, nePrefix+"/tmp/hakurei.0", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/run/nscd"}, "", &os.PathError{Op: "lstat", Path: "/var/run/nscd", Err: os.ErrNotExist}),
call("verbosef", stub.ExpectArgs{"path %q does not yet exist", []any{"/var/run/nscd"}}, nil, nil),
call("evalSymlinks", stub.ExpectArgs{"/var/run/dbus"}, nePrefix+"/run/dbus", nil),
call("readdir", stub.ExpectArgs{"/var/lib/hakurei/base/org.debian"}, stubDebianRoot, nil),
call("evalSymlinks", stub.ExpectArgs{"/etc/"}, nePrefix+"/etc", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/nix/u0/org.chromium.Chromium/rw-store/upper"}, nePrefix+"/var/lib/hakurei/nix/u0/org.chromium.Chromium/rw-store/upper", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/nix/u0/org.chromium.Chromium/rw-store/work"}, nePrefix+"/var/lib/hakurei/nix/u0/org.chromium.Chromium/rw-store/work", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.nixos/ro-store"}, nePrefix+"/var/lib/hakurei/base/org.nixos/ro-store", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/u0/org.chromium.Chromium"}, nePrefix+"/var/lib/hakurei/u0/org.chromium.Chromium", nil),
call("evalSymlinks", stub.ExpectArgs{"/dev/dri"}, nePrefix+"/dev/dri", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.debian/bin"}, nePrefix+"/var/lib/hakurei/base/org.debian/bin", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.debian/home"}, nePrefix+"/var/lib/hakurei/base/org.debian/home", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.debian/lib64"}, nePrefix+"/var/lib/hakurei/base/org.debian/lib64", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.debian/lost+found"}, nePrefix+"/var/lib/hakurei/base/org.debian/lost+found", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.debian/nix"}, nePrefix+"/var/lib/hakurei/base/org.debian/nix", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.debian/root"}, nePrefix+"/var/lib/hakurei/base/org.debian/root", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.debian/run"}, nePrefix+"/var/lib/hakurei/base/org.debian/run", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.debian/srv"}, nePrefix+"/var/lib/hakurei/base/org.debian/srv", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.debian/sys"}, nePrefix+"/var/lib/hakurei/base/org.debian/sys", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.debian/usr"}, nePrefix+"/var/lib/hakurei/base/org.debian/usr", nil),
call("evalSymlinks", stub.ExpectArgs{"/var/lib/hakurei/base/org.debian/var"}, nePrefix+"/var/lib/hakurei/base/org.debian/var", nil),
}, newI().
Ensure(m("/var/lib/hakurei/u0"), 0700).
UpdatePermType(system.User, m("/var/lib/hakurei/u0"),
acl.Execute).
UpdatePermType(system.User, m("/var/lib/hakurei/u0/org.chromium.Chromium"),
acl.Read, acl.Write, acl.Execute), nil, nil, insertsOps(needsApplyState(func(state *outcomeStateParams) {
state.filesystem = config.Container.Filesystem[1:]
})), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Env: []string{
"GOOGLE_API_KEY=AIzaSyBHDrl33hwRp4rMQY0ziRbj8K9LPA6vUCY",
"GOOGLE_DEFAULT_CLIENT_ID=77185425430.apps.googleusercontent.com",
"GOOGLE_DEFAULT_CLIENT_SECRET=OTJgUOQcT7lO7GsGZq2G4IlT",
},
Ops: new(container.Ops).
Etc(fhs.AbsEtc, wantAutoEtcPrefix).
Tmpfs(fhs.AbsTmp, 0, 0755).
Overlay(
check.MustAbs("/nix/store"),
fhs.AbsVarLib.Append("hakurei/nix/u0/org.chromium.Chromium/rw-store/upper"),
fhs.AbsVarLib.Append("hakurei/nix/u0/org.chromium.Chromium/rw-store/work"),
fhs.AbsVarLib.Append("hakurei/base/org.nixos/ro-store")).
Link(fhs.AbsRun.Append("current-system"), "/run/current-system", true).
Link(fhs.AbsRun.Append("opengl-driver"), "/run/opengl-driver", true).
Bind(
fhs.AbsVarLib.Append("hakurei/u0/org.chromium.Chromium"),
check.MustAbs("/data/data/org.chromium.Chromium"),
comp.BindWritable|comp.BindEnsure).
Bind(fhs.AbsDev.Append("dri"), fhs.AbsDev.Append("dri"), comp.BindDevice|comp.BindWritable|comp.BindOptional).
Remount(fhs.AbsRoot, syscall.MS_RDONLY),
}, nil, nil},
})
}
func TestFlattenExtraPerms(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
perms []hst.ExtraPermConfig
want *system.I
}{
{"path nil check", append(hst.Template().ExtraPerms, hst.ExtraPermConfig{}), newI().
Ensure(m("/var/lib/hakurei/u0"), 0700).
UpdatePermType(system.User, m("/var/lib/hakurei/u0"),
acl.Execute).
UpdatePermType(system.User, m("/var/lib/hakurei/u0/org.chromium.Chromium"),
acl.Read, acl.Write, acl.Execute)},
{"template", hst.Template().ExtraPerms, newI().
Ensure(m("/var/lib/hakurei/u0"), 0700).
UpdatePermType(system.User, m("/var/lib/hakurei/u0"),
acl.Execute).
UpdatePermType(system.User, m("/var/lib/hakurei/u0/org.chromium.Chromium"),
acl.Read, acl.Write, acl.Execute)},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
got := newI()
flattenExtraPerms(got, tc.perms)
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("flattenExtraPerms: sys = %#v, want %#v", got, tc.want)
}
})
}
}
// invalidFSHost implements the Host method of [hst.FilesystemConfig] with an invalid response.
type invalidFSHost bool
func (f invalidFSHost) Valid() bool { return bool(f) }
func (invalidFSHost) Path() *check.Absolute { panic("unreachable") }
func (invalidFSHost) Host() []*check.Absolute { return []*check.Absolute{nil} }
func (invalidFSHost) Apply(*hst.ApplyState) { panic("unreachable") }
func (invalidFSHost) String() string { panic("unreachable") }

View File

@@ -1,58 +0,0 @@
package app
import (
"encoding/gob"
"hakurei.app/container/fhs"
"hakurei.app/hst"
"hakurei.app/system/acl"
"hakurei.app/system/dbus"
)
func init() { gob.Register(new(spDBusOp)) }
// spDBusOp maintains an xdg-dbus-proxy instance for the container.
// Runs after spRuntimeOp.
type spDBusOp struct {
// Whether to bind the system bus socket. Populated during toSystem.
ProxySystem bool
}
func (s *spDBusOp) toSystem(state *outcomeStateSys) error {
if state.et&hst.EDBus == 0 {
return errNotEnabled
}
if state.sessionBus == nil {
state.sessionBus = dbus.NewConfig(state.appId, true, true)
}
// downstream socket paths
sessionPath, systemPath := state.instance().Append("bus"), state.instance().Append("system_bus_socket")
var sessionBus, systemBus dbus.ProxyPair
sessionBus[0], systemBus[0] = state.k.dbusAddress()
sessionBus[1], systemBus[1] = sessionPath.String(), systemPath.String()
if err := state.sys.ProxyDBus(state.sessionBus, state.systemBus, sessionBus, systemBus); err != nil {
return err
}
state.sys.UpdatePerm(sessionPath, acl.Read, acl.Write)
if state.systemBus != nil {
s.ProxySystem = true
state.sys.UpdatePerm(systemPath, acl.Read, acl.Write)
}
return nil
}
func (s *spDBusOp) toContainer(state *outcomeStateParams) error {
sessionInner := state.runtimeDir.Append("bus")
state.env["DBUS_SESSION_BUS_ADDRESS"] = "unix:path=" + sessionInner.String()
state.params.Bind(state.instancePath().Append("bus"), sessionInner, 0)
if s.ProxySystem {
systemInner := fhs.AbsVar.Append("run/dbus/system_bus_socket")
state.env["DBUS_SYSTEM_BUS_ADDRESS"] = "unix:path=" + systemInner.String()
state.params.Bind(state.instancePath().Append("system_bus_socket"), systemInner, 0)
}
return nil
}

View File

@@ -1,190 +0,0 @@
package app
import (
"syscall"
"testing"
"hakurei.app/container"
"hakurei.app/container/stub"
"hakurei.app/helper"
"hakurei.app/hst"
"hakurei.app/message"
"hakurei.app/system"
"hakurei.app/system/acl"
"hakurei.app/system/dbus"
)
func TestSpDBusOp(t *testing.T) {
config := hst.Template()
checkOpBehaviour(t, []opBehaviourTestCase{
{"not enabled", func(bool, bool) outcomeOp {
return new(spDBusOp)
}, func() *hst.Config {
c := hst.Template()
*c.Enablements = 0
return c
}, nil, nil, nil, nil, errNotEnabled, nil, nil, nil, nil, nil},
{"invalid", func(bool, bool) outcomeOp {
return new(spDBusOp)
}, func() *hst.Config {
c := hst.Template()
c.SessionBus.Talk[0] += "\x00"
c.SystemBus = nil
return c
}, nil, []stub.Call{
call("dbusAddress", stub.ExpectArgs{}, [2]string{
"unix:path=/run/user/1000/bus",
"unix:path=/var/run/dbus/system_bus_socket",
}, nil),
}, nil, sysUsesInstance(nil), &system.OpError{
Op: "dbus",
Err: syscall.EINVAL,
Msg: "message bus proxy configuration contains NUL byte",
Revert: false,
}, nil, nil, nil, nil, nil},
{"success default", func(bool, bool) outcomeOp {
return new(spDBusOp)
}, func() *hst.Config {
c := hst.Template()
c.SessionBus, c.SystemBus = nil, nil
return c
}, nil, []stub.Call{
call("dbusAddress", stub.ExpectArgs{}, [2]string{
"unix:path=/run/user/1000/bus",
"unix:path=/var/run/dbus/system_bus_socket",
}, nil),
call("isVerbose", stub.ExpectArgs{}, true, nil),
call("verbose", stub.ExpectArgs{[]any{"session bus proxy:", []string{
"unix:path=/run/user/1000/bus",
wantInstancePrefix + "/bus",
"--filter",
"--talk=org.freedesktop.DBus",
"--talk=org.freedesktop.Notifications",
"--own=org.chromium.Chromium.*",
"--own=org.mpris.MediaPlayer2.org.chromium.Chromium.*",
"--call=org.freedesktop.portal.*=*",
"--broadcast=org.freedesktop.portal.*=@/org/freedesktop/portal/*",
}}}, nil, nil),
call("verbose", stub.ExpectArgs{[]any{"message bus proxy final args:", helper.MustNewCheckedArgs(
"unix:path=/run/user/1000/bus",
wantInstancePrefix+"/bus",
"--filter",
"--talk=org.freedesktop.DBus",
"--talk=org.freedesktop.Notifications",
"--own=org.chromium.Chromium.*",
"--own=org.mpris.MediaPlayer2.org.chromium.Chromium.*",
"--call=org.freedesktop.portal.*=*",
"--broadcast=org.freedesktop.portal.*=@/org/freedesktop/portal/*",
)}}, nil, nil),
}, func() *system.I {
sys := system.New(panicMsgContext{}, message.NewMsg(nil), checkExpectUid)
sys.Ephemeral(system.Process, m(wantInstancePrefix), 0711)
if err := sys.ProxyDBus(
dbus.NewConfig(config.ID, true, true), nil,
dbus.ProxyPair{"unix:path=/run/user/1000/bus", wantInstancePrefix + "/bus"},
dbus.ProxyPair{"unix:path=/var/run/dbus/system_bus_socket", wantInstancePrefix + "/system_bus_socket"},
); err != nil {
t.Fatalf("cannot prepare sys: %v", err)
}
sys.UpdatePerm(m(wantInstancePrefix+"/bus"), acl.Read, acl.Write)
return sys
}(), sysUsesInstance(nil), nil, insertsOps(afterSpRuntimeOp(nil)), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Ops: new(container.Ops).
Bind(m(wantInstancePrefix+"/bus"),
m("/run/user/1000/bus"), 0),
}, paramsWantEnv(config, map[string]string{
"DBUS_SESSION_BUS_ADDRESS": "unix:path=/run/user/1000/bus",
}, nil), nil},
{"success", func(isShim, _ bool) outcomeOp {
if !isShim {
return new(spDBusOp)
}
return &spDBusOp{ProxySystem: true}
}, hst.Template, nil, []stub.Call{
call("dbusAddress", stub.ExpectArgs{}, [2]string{
"unix:path=/run/user/1000/bus",
"unix:path=/var/run/dbus/system_bus_socket",
}, nil),
call("isVerbose", stub.ExpectArgs{}, true, nil),
call("verbose", stub.ExpectArgs{[]any{"session bus proxy:", []string{
"unix:path=/run/user/1000/bus",
wantInstancePrefix + "/bus",
"--filter",
"--talk=org.freedesktop.Notifications",
"--talk=org.freedesktop.FileManager1",
"--talk=org.freedesktop.ScreenSaver",
"--talk=org.freedesktop.secrets",
"--talk=org.kde.kwalletd5",
"--talk=org.kde.kwalletd6",
"--talk=org.gnome.SessionManager",
"--own=org.chromium.Chromium.*",
"--own=org.mpris.MediaPlayer2.org.chromium.Chromium.*",
"--own=org.mpris.MediaPlayer2.chromium.*",
"--call=org.freedesktop.portal.*=*",
"--broadcast=org.freedesktop.portal.*=@/org/freedesktop/portal/*",
}}}, nil, nil),
call("verbose", stub.ExpectArgs{[]any{"system bus proxy:", []string{
"unix:path=/var/run/dbus/system_bus_socket",
wantInstancePrefix + "/system_bus_socket",
"--filter",
"--talk=org.bluez",
"--talk=org.freedesktop.Avahi",
"--talk=org.freedesktop.UPower",
}}}, nil, nil),
call("verbose", stub.ExpectArgs{[]any{"message bus proxy final args:", helper.MustNewCheckedArgs(
"unix:path=/run/user/1000/bus",
wantInstancePrefix+"/bus",
"--filter",
"--talk=org.freedesktop.Notifications",
"--talk=org.freedesktop.FileManager1",
"--talk=org.freedesktop.ScreenSaver",
"--talk=org.freedesktop.secrets",
"--talk=org.kde.kwalletd5",
"--talk=org.kde.kwalletd6",
"--talk=org.gnome.SessionManager",
"--own=org.chromium.Chromium.*",
"--own=org.mpris.MediaPlayer2.org.chromium.Chromium.*",
"--own=org.mpris.MediaPlayer2.chromium.*",
"--call=org.freedesktop.portal.*=*",
"--broadcast=org.freedesktop.portal.*=@/org/freedesktop/portal/*",
"unix:path=/var/run/dbus/system_bus_socket",
wantInstancePrefix+"/system_bus_socket",
"--filter",
"--talk=org.bluez",
"--talk=org.freedesktop.Avahi",
"--talk=org.freedesktop.UPower",
)}}, nil, nil),
}, func() *system.I {
sys := system.New(panicMsgContext{}, message.NewMsg(nil), checkExpectUid)
sys.Ephemeral(system.Process, m(wantInstancePrefix), 0711)
if err := sys.ProxyDBus(
config.SessionBus, config.SystemBus,
dbus.ProxyPair{"unix:path=/run/user/1000/bus", wantInstancePrefix + "/bus"},
dbus.ProxyPair{"unix:path=/var/run/dbus/system_bus_socket", wantInstancePrefix + "/system_bus_socket"},
); err != nil {
t.Fatalf("cannot prepare sys: %v", err)
}
sys.UpdatePerm(m(wantInstancePrefix+"/bus"), acl.Read, acl.Write).
UpdatePerm(m(wantInstancePrefix+"/system_bus_socket"), acl.Read, acl.Write)
return sys
}(), sysUsesInstance(nil), nil, insertsOps(afterSpRuntimeOp(nil)), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Ops: new(container.Ops).
Bind(m(wantInstancePrefix+"/bus"),
m("/run/user/1000/bus"), 0).
Bind(m(wantInstancePrefix+"/system_bus_socket"),
m("/var/run/dbus/system_bus_socket"), 0),
}, paramsWantEnv(config, map[string]string{
"DBUS_SESSION_BUS_ADDRESS": "unix:path=/run/user/1000/bus",
"DBUS_SYSTEM_BUS_ADDRESS": "unix:path=/var/run/dbus/system_bus_socket",
}, nil), nil},
})
}

View File

@@ -1,210 +0,0 @@
package app
import (
"encoding/gob"
"errors"
"fmt"
"io"
"io/fs"
"os"
"strconv"
"syscall"
"hakurei.app/container/check"
"hakurei.app/hst"
"hakurei.app/message"
)
const pulseCookieSizeMax = 1 << 8
func init() { gob.Register(new(spPulseOp)) }
// spPulseOp exports the PulseAudio server to the container.
// Runs after spRuntimeOp.
type spPulseOp struct {
// PulseAudio cookie data, populated during toSystem if a cookie is present.
Cookie *[pulseCookieSizeMax]byte
// PulseAudio cookie size, populated during toSystem if a cookie is present.
CookieSize int
}
func (s *spPulseOp) toSystem(state *outcomeStateSys) error {
if state.et&hst.EPulse == 0 {
return errNotEnabled
}
pulseRuntimeDir, pulseSocket := s.commonPaths(state.outcomeState)
if _, err := state.k.stat(pulseRuntimeDir.String()); err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return &hst.AppError{Step: fmt.Sprintf("access PulseAudio directory %q", pulseRuntimeDir), Err: err}
}
return newWithMessageError(fmt.Sprintf("PulseAudio directory %q not found", pulseRuntimeDir), err)
}
if fi, err := state.k.stat(pulseSocket.String()); err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return &hst.AppError{Step: fmt.Sprintf("access PulseAudio socket %q", pulseSocket), Err: err}
}
return newWithMessageError(fmt.Sprintf("PulseAudio directory %q found but socket does not exist", pulseRuntimeDir), err)
} else {
if m := fi.Mode(); m&0o006 != 0o006 {
return newWithMessage(fmt.Sprintf("unexpected permissions on %q: %s", pulseSocket, m))
}
}
// pulse socket is world writable and its parent directory DAC permissions prevents access;
// hard link to target-executable share directory to grant access
state.sys.Link(pulseSocket, state.runtime().Append("pulse"))
// load up to pulseCookieSizeMax bytes of pulse cookie for transmission to shim
if a, err := discoverPulseCookie(state.k); err != nil {
return err
} else if a != nil {
s.Cookie = new([pulseCookieSizeMax]byte)
if s.CookieSize, err = loadFile(state.msg, state.k, "PulseAudio cookie", a.String(), s.Cookie[:]); err != nil {
return err
}
} else {
state.msg.Verbose("cannot locate PulseAudio cookie (tried " +
"$PULSE_COOKIE, " +
"$XDG_CONFIG_HOME/pulse/cookie, " +
"$HOME/.pulse-cookie)")
}
return nil
}
func (s *spPulseOp) toContainer(state *outcomeStateParams) error {
innerPulseSocket := state.runtimeDir.Append("pulse", "native")
state.params.Bind(state.runtimePath().Append("pulse"), innerPulseSocket, 0)
state.env["PULSE_SERVER"] = "unix:" + innerPulseSocket.String()
if s.Cookie != nil {
innerDst := hst.AbsPrivateTmp.Append("/pulse-cookie")
if s.CookieSize < 0 || s.CookieSize > pulseCookieSizeMax {
return newWithMessage("unexpected PulseAudio cookie size")
}
state.env["PULSE_COOKIE"] = innerDst.String()
state.params.Place(innerDst, s.Cookie[:s.CookieSize])
}
return nil
}
func (s *spPulseOp) commonPaths(state *outcomeState) (pulseRuntimeDir, pulseSocket *check.Absolute) {
// PulseAudio runtime directory (usually `/run/user/%d/pulse`)
pulseRuntimeDir = state.sc.RuntimePath.Append("pulse")
// PulseAudio socket (usually `/run/user/%d/pulse/native`)
pulseSocket = pulseRuntimeDir.Append("native")
return
}
// discoverPulseCookie attempts to discover the pathname of the PulseAudio cookie of the current user.
// If both returned pathname and error are nil, the cookie is likely unavailable and can be silently skipped.
func discoverPulseCookie(k syscallDispatcher) (*check.Absolute, error) {
const paLocateStep = "locate PulseAudio cookie"
// from environment
if p, ok := k.lookupEnv("PULSE_COOKIE"); ok {
if a, err := check.NewAbs(p); err != nil {
return nil, &hst.AppError{Step: paLocateStep, Err: err}
} else {
// this takes precedence, do not verify whether the file is accessible
return a, nil
}
}
// $HOME/.pulse-cookie
if p, ok := k.lookupEnv("HOME"); ok {
var pulseCookiePath *check.Absolute
if a, err := check.NewAbs(p); err != nil {
return nil, &hst.AppError{Step: paLocateStep, Err: err}
} else {
pulseCookiePath = a.Append(".pulse-cookie")
}
if fi, err := k.stat(pulseCookiePath.String()); err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return nil, &hst.AppError{Step: "access PulseAudio cookie", Err: err}
}
// fallthrough
} else if fi.IsDir() {
// fallthrough
} else {
return pulseCookiePath, nil
}
}
// $XDG_CONFIG_HOME/pulse/cookie
if p, ok := k.lookupEnv("XDG_CONFIG_HOME"); ok {
var pulseCookiePath *check.Absolute
if a, err := check.NewAbs(p); err != nil {
return nil, &hst.AppError{Step: paLocateStep, Err: err}
} else {
pulseCookiePath = a.Append("pulse", "cookie")
}
if fi, err := k.stat(pulseCookiePath.String()); err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return nil, &hst.AppError{Step: "access PulseAudio cookie", Err: err}
}
// fallthrough
} else if fi.IsDir() {
// fallthrough
} else {
return pulseCookiePath, nil
}
}
// cookie not present
// not fatal: authentication is disabled
return nil, nil
}
// loadFile reads up to len(buf) bytes from the file at pathname.
func loadFile(
msg message.Msg, k syscallDispatcher,
description, pathname string, buf []byte,
) (int, error) {
n := len(buf)
if n == 0 {
return -1, errors.New("invalid buffer")
}
if fi, err := k.stat(pathname); err != nil {
return -1, &hst.AppError{Step: "access " + description, Err: err}
} else {
if fi.IsDir() {
return -1, &hst.AppError{Step: "read " + description,
Err: &os.PathError{Op: "stat", Path: pathname, Err: syscall.EISDIR}}
}
if s := fi.Size(); s > int64(n) {
return -1, newWithMessageError(
description+" at "+strconv.Quote(pathname)+" exceeds expected size",
&os.PathError{Op: "stat", Path: pathname, Err: syscall.ENOMEM},
)
} else if s < int64(n) {
msg.Verbosef("%s at %q is %d bytes shorter than expected", description, pathname, int64(n)-s)
} else {
msg.Verbosef("loading %d bytes from %q", n, pathname)
}
}
if f, err := k.open(pathname); err != nil {
return -1, &hst.AppError{Step: "open " + description, Err: err}
} else {
if n, err = f.Read(buf); err != nil {
if !errors.Is(err, io.EOF) {
_ = f.Close()
return n, &hst.AppError{Step: "read " + description, Err: err}
}
}
if err = f.Close(); err != nil {
return n, &hst.AppError{Step: "close " + description, Err: err}
}
return n, nil
}
}

View File

@@ -1,460 +0,0 @@
package app
import (
"bytes"
"errors"
"os"
"syscall"
"testing"
"hakurei.app/container"
"hakurei.app/container/check"
"hakurei.app/container/stub"
"hakurei.app/hst"
"hakurei.app/system"
"hakurei.app/system/acl"
)
func TestSpPulseOp(t *testing.T) {
t.Parallel()
config := hst.Template()
sampleCookie := bytes.Repeat([]byte{0xfc}, pulseCookieSizeMax)
checkOpBehaviour(t, []opBehaviourTestCase{
{"not enabled", func(bool, bool) outcomeOp {
return new(spPulseOp)
}, func() *hst.Config {
c := hst.Template()
*c.Enablements = 0
return c
}, nil, nil, nil, nil, errNotEnabled, nil, nil, nil, nil, nil},
{"socketDir stat", func(isShim, _ bool) outcomeOp {
if !isShim {
return new(spPulseOp)
}
return &spPulseOp{Cookie: (*[256]byte)(sampleCookie)}
}, hst.Template, nil, []stub.Call{
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse"}, (*stubFi)(nil), stub.UniqueError(2)),
}, nil, nil, &hst.AppError{
Step: `access PulseAudio directory "/proc/nonexistent/xdg_runtime_dir/pulse"`,
Err: stub.UniqueError(2),
}, nil, nil, nil, nil, nil},
{"socketDir nonexistent", func(bool, bool) outcomeOp {
return new(spPulseOp)
}, hst.Template, nil, []stub.Call{
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse"}, (*stubFi)(nil), os.ErrNotExist),
}, nil, nil, &hst.AppError{
Step: "finalise",
Err: os.ErrNotExist,
Msg: `PulseAudio directory "/proc/nonexistent/xdg_runtime_dir/pulse" not found`,
}, nil, nil, nil, nil, nil},
{"socket stat", func(bool, bool) outcomeOp {
return new(spPulseOp)
}, hst.Template, nil, []stub.Call{
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse"}, (*stubFi)(nil), nil),
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse/native"}, (*stubFi)(nil), stub.UniqueError(1)),
}, nil, nil, &hst.AppError{
Step: `access PulseAudio socket "/proc/nonexistent/xdg_runtime_dir/pulse/native"`,
Err: stub.UniqueError(1),
}, nil, nil, nil, nil, nil},
{"socket nonexistent", func(bool, bool) outcomeOp {
return new(spPulseOp)
}, hst.Template, nil, []stub.Call{
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse"}, (*stubFi)(nil), nil),
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse/native"}, (*stubFi)(nil), os.ErrNotExist),
}, nil, nil, &hst.AppError{
Step: "finalise",
Err: os.ErrNotExist,
Msg: `PulseAudio directory "/proc/nonexistent/xdg_runtime_dir/pulse" found but socket does not exist`,
}, nil, nil, nil, nil, nil},
{"socket mode", func(bool, bool) outcomeOp {
return new(spPulseOp)
}, hst.Template, nil, []stub.Call{
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse"}, (*stubFi)(nil), nil),
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse/native"}, &stubFi{mode: 0660}, nil),
}, nil, nil, &hst.AppError{
Step: "finalise",
Err: os.ErrInvalid,
Msg: `unexpected permissions on "/proc/nonexistent/xdg_runtime_dir/pulse/native": -rw-rw----`,
}, nil, nil, nil, nil, nil},
{"cookie notAbs", func(bool, bool) outcomeOp {
return new(spPulseOp)
}, hst.Template, nil, []stub.Call{
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse"}, (*stubFi)(nil), nil),
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse/native"}, &stubFi{mode: 0666}, nil),
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, "proc/nonexistent/cookie", nil),
}, nil, nil, &hst.AppError{
Step: "locate PulseAudio cookie",
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/cookie"},
}, nil, nil, nil, nil, nil},
{"cookie loadFile", func(bool, bool) outcomeOp {
return new(spPulseOp)
}, hst.Template, nil, []stub.Call{
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse"}, (*stubFi)(nil), nil),
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse/native"}, &stubFi{mode: 0666}, nil),
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, "/proc/nonexistent/cookie", nil),
call("stat", stub.ExpectArgs{"/proc/nonexistent/cookie"}, &stubFi{isDir: false, size: 1 << 8}, nil),
call("verbosef", stub.ExpectArgs{"loading %d bytes from %q", []any{1 << 8, "/proc/nonexistent/cookie"}}, nil, nil),
call("open", stub.ExpectArgs{"/proc/nonexistent/cookie"}, (*stubOsFile)(nil), stub.UniqueError(0)),
}, nil, nil, &hst.AppError{
Step: "open PulseAudio cookie",
Err: stub.UniqueError(0),
}, nil, nil, nil, nil, nil},
{"cookie bad shim size", func(isShim, clearUnexported bool) outcomeOp {
if !isShim {
return new(spPulseOp)
}
op := &spPulseOp{Cookie: (*[pulseCookieSizeMax]byte)(sampleCookie), CookieSize: pulseCookieSizeMax}
if clearUnexported {
op.CookieSize += +0xfd
}
return op
}, hst.Template, nil, []stub.Call{
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse"}, (*stubFi)(nil), nil),
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse/native"}, &stubFi{mode: 0666}, nil),
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, "/proc/nonexistent/cookie", nil),
call("stat", stub.ExpectArgs{"/proc/nonexistent/cookie"}, &stubFi{isDir: false, size: 1 << 8}, nil),
call("verbosef", stub.ExpectArgs{"loading %d bytes from %q", []any{1 << 8, "/proc/nonexistent/cookie"}}, nil, nil),
call("open", stub.ExpectArgs{"/proc/nonexistent/cookie"}, &stubOsFile{Reader: bytes.NewReader(sampleCookie)}, nil),
}, newI().
// state.ensureRuntimeDir
Ensure(m(wantRunDirPath), 0700).
UpdatePermType(system.User, m(wantRunDirPath), acl.Execute).
Ensure(m(wantRuntimePath), 0700).
UpdatePermType(system.User, m(wantRuntimePath), acl.Execute).
// state.runtime
Ephemeral(system.Process, m(wantRuntimeSharePath), 0700).
UpdatePerm(m(wantRuntimeSharePath), acl.Execute).
// toSystem
Link(m(wantRuntimePath+"/pulse/native"), m(wantRuntimeSharePath+"/pulse")), sysUsesRuntime(nil), nil, insertsOps(afterSpRuntimeOp(nil)), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, nil, nil, &hst.AppError{
Step: "finalise",
Err: os.ErrInvalid,
Msg: "unexpected PulseAudio cookie size",
}},
{"success cookie short", func(isShim, _ bool) outcomeOp {
if !isShim {
return new(spPulseOp)
}
sampleCookieTrunc := make([]byte, pulseCookieSizeMax)
copy(sampleCookieTrunc, sampleCookie[:len(sampleCookie)-0xe])
return &spPulseOp{Cookie: (*[pulseCookieSizeMax]byte)(sampleCookieTrunc), CookieSize: pulseCookieSizeMax - 0xe}
}, hst.Template, nil, []stub.Call{
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse"}, (*stubFi)(nil), nil),
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse/native"}, &stubFi{mode: 0666}, nil),
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, "/proc/nonexistent/cookie", nil),
call("stat", stub.ExpectArgs{"/proc/nonexistent/cookie"}, &stubFi{isDir: false, size: pulseCookieSizeMax - 0xe}, nil),
call("verbosef", stub.ExpectArgs{"%s at %q is %d bytes shorter than expected", []any{"PulseAudio cookie", "/proc/nonexistent/cookie", int64(0xe)}}, nil, nil),
call("open", stub.ExpectArgs{"/proc/nonexistent/cookie"}, &stubOsFile{Reader: bytes.NewReader(sampleCookie[:len(sampleCookie)-0xe])}, nil),
}, newI().
// state.ensureRuntimeDir
Ensure(m(wantRunDirPath), 0700).
UpdatePermType(system.User, m(wantRunDirPath), acl.Execute).
Ensure(m(wantRuntimePath), 0700).
UpdatePermType(system.User, m(wantRuntimePath), acl.Execute).
// state.runtime
Ephemeral(system.Process, m(wantRuntimeSharePath), 0700).
UpdatePerm(m(wantRuntimeSharePath), acl.Execute).
// toSystem
Link(m(wantRuntimePath+"/pulse/native"), m(wantRuntimeSharePath+"/pulse")), sysUsesRuntime(nil), nil, insertsOps(afterSpRuntimeOp(nil)), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Ops: new(container.Ops).
Bind(m(wantRuntimeSharePath+"/pulse"), m("/run/user/1000/pulse/native"), 0).
Place(m("/.hakurei/pulse-cookie"), sampleCookie[:len(sampleCookie)-0xe]),
}, paramsWantEnv(config, map[string]string{
"PULSE_SERVER": "unix:/run/user/1000/pulse/native",
"PULSE_COOKIE": "/.hakurei/pulse-cookie",
}, nil), nil},
{"success cookie", func(isShim, _ bool) outcomeOp {
if !isShim {
return new(spPulseOp)
}
return &spPulseOp{Cookie: (*[pulseCookieSizeMax]byte)(sampleCookie), CookieSize: pulseCookieSizeMax}
}, hst.Template, nil, []stub.Call{
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse"}, (*stubFi)(nil), nil),
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse/native"}, &stubFi{mode: 0666}, nil),
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, "/proc/nonexistent/cookie", nil),
call("stat", stub.ExpectArgs{"/proc/nonexistent/cookie"}, &stubFi{isDir: false, size: 1 << 8}, nil),
call("verbosef", stub.ExpectArgs{"loading %d bytes from %q", []any{1 << 8, "/proc/nonexistent/cookie"}}, nil, nil),
call("open", stub.ExpectArgs{"/proc/nonexistent/cookie"}, &stubOsFile{Reader: bytes.NewReader(sampleCookie)}, nil),
}, newI().
// state.ensureRuntimeDir
Ensure(m(wantRunDirPath), 0700).
UpdatePermType(system.User, m(wantRunDirPath), acl.Execute).
Ensure(m(wantRuntimePath), 0700).
UpdatePermType(system.User, m(wantRuntimePath), acl.Execute).
// state.runtime
Ephemeral(system.Process, m(wantRuntimeSharePath), 0700).
UpdatePerm(m(wantRuntimeSharePath), acl.Execute).
// toSystem
Link(m(wantRuntimePath+"/pulse/native"), m(wantRuntimeSharePath+"/pulse")), sysUsesRuntime(nil), nil, insertsOps(afterSpRuntimeOp(nil)), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Ops: new(container.Ops).
Bind(m(wantRuntimeSharePath+"/pulse"), m("/run/user/1000/pulse/native"), 0).
Place(m("/.hakurei/pulse-cookie"), sampleCookie),
}, paramsWantEnv(config, map[string]string{
"PULSE_SERVER": "unix:/run/user/1000/pulse/native",
"PULSE_COOKIE": "/.hakurei/pulse-cookie",
}, nil), nil},
{"success", func(bool, bool) outcomeOp {
return new(spPulseOp)
}, hst.Template, nil, []stub.Call{
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse"}, (*stubFi)(nil), nil),
call("stat", stub.ExpectArgs{wantRuntimePath + "/pulse/native"}, &stubFi{mode: 0666}, nil),
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, nil, nil),
call("lookupEnv", stub.ExpectArgs{"HOME"}, nil, nil),
call("lookupEnv", stub.ExpectArgs{"XDG_CONFIG_HOME"}, nil, nil),
call("verbose", stub.ExpectArgs{[]any{"cannot locate PulseAudio cookie (tried $PULSE_COOKIE, $XDG_CONFIG_HOME/pulse/cookie, $HOME/.pulse-cookie)"}}, nil, nil),
}, newI().
// state.ensureRuntimeDir
Ensure(m(wantRunDirPath), 0700).
UpdatePermType(system.User, m(wantRunDirPath), acl.Execute).
Ensure(m(wantRuntimePath), 0700).
UpdatePermType(system.User, m(wantRuntimePath), acl.Execute).
// state.runtime
Ephemeral(system.Process, m(wantRuntimeSharePath), 0700).
UpdatePerm(m(wantRuntimeSharePath), acl.Execute).
// toSystem
Link(m(wantRuntimePath+"/pulse/native"), m(wantRuntimeSharePath+"/pulse")), sysUsesRuntime(nil), nil, insertsOps(afterSpRuntimeOp(nil)), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Ops: new(container.Ops).
Bind(m(wantRuntimeSharePath+"/pulse"), m("/run/user/1000/pulse/native"), 0),
}, paramsWantEnv(config, map[string]string{
"PULSE_SERVER": "unix:/run/user/1000/pulse/native",
}, nil), nil},
})
}
func TestDiscoverPulseCookie(t *testing.T) {
t.Parallel()
fCheckPathname := func(k *kstub) error {
a, err := discoverPulseCookie(k)
k.Verbose(a)
return err
}
checkSimple(t, "discoverPulseCookie", []simpleTestCase{
{"override notAbs", fCheckPathname, stub.Expect{Calls: []stub.Call{
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, "proc/nonexistent/pulse-cookie", nil),
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
}}, &hst.AppError{
Step: "locate PulseAudio cookie",
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/pulse-cookie"},
}},
{"success override", fCheckPathname, stub.Expect{Calls: []stub.Call{
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, "/proc/nonexistent/pulse-cookie", nil),
call("verbose", stub.ExpectArgs{[]any{m("/proc/nonexistent/pulse-cookie")}}, nil, nil),
}}, nil},
{"home notAbs", fCheckPathname, stub.Expect{Calls: []stub.Call{
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, nil, nil),
call("lookupEnv", stub.ExpectArgs{"HOME"}, "proc/nonexistent/home", nil),
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
}}, &hst.AppError{
Step: "locate PulseAudio cookie",
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/home"},
}},
{"home stat", fCheckPathname, stub.Expect{Calls: []stub.Call{
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, nil, nil),
call("lookupEnv", stub.ExpectArgs{"HOME"}, "/proc/nonexistent/home", nil),
call("stat", stub.ExpectArgs{"/proc/nonexistent/home/.pulse-cookie"}, (*stubFi)(nil), stub.UniqueError(1)),
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
}}, &hst.AppError{
Step: "access PulseAudio cookie",
Err: stub.UniqueError(1),
}},
{"home nonexistent", fCheckPathname, stub.Expect{Calls: []stub.Call{
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, nil, nil),
call("lookupEnv", stub.ExpectArgs{"HOME"}, "/proc/nonexistent/home", nil),
call("stat", stub.ExpectArgs{"/proc/nonexistent/home/.pulse-cookie"}, (*stubFi)(nil), os.ErrNotExist),
call("lookupEnv", stub.ExpectArgs{"XDG_CONFIG_HOME"}, nil, nil),
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
}}, nil},
{"success home", fCheckPathname, stub.Expect{Calls: []stub.Call{
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, nil, nil),
call("lookupEnv", stub.ExpectArgs{"HOME"}, "/proc/nonexistent/home", nil),
call("stat", stub.ExpectArgs{"/proc/nonexistent/home/.pulse-cookie"}, &stubFi{}, nil),
call("verbose", stub.ExpectArgs{[]any{m("/proc/nonexistent/home/.pulse-cookie")}}, nil, nil),
}}, nil},
{"xdg notAbs", fCheckPathname, stub.Expect{Calls: []stub.Call{
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, nil, nil),
call("lookupEnv", stub.ExpectArgs{"HOME"}, nil, nil),
call("lookupEnv", stub.ExpectArgs{"XDG_CONFIG_HOME"}, "proc/nonexistent/xdg", nil),
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
}}, &hst.AppError{
Step: "locate PulseAudio cookie",
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/xdg"},
}},
{"xdg stat", fCheckPathname, stub.Expect{Calls: []stub.Call{
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, nil, nil),
call("lookupEnv", stub.ExpectArgs{"HOME"}, nil, nil),
call("lookupEnv", stub.ExpectArgs{"XDG_CONFIG_HOME"}, "/proc/nonexistent/xdg", nil),
call("stat", stub.ExpectArgs{"/proc/nonexistent/xdg/pulse/cookie"}, (*stubFi)(nil), stub.UniqueError(0)),
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
}}, &hst.AppError{
Step: "access PulseAudio cookie",
Err: stub.UniqueError(0),
}},
{"xdg dir", fCheckPathname, stub.Expect{Calls: []stub.Call{
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, nil, nil),
call("lookupEnv", stub.ExpectArgs{"HOME"}, nil, nil),
call("lookupEnv", stub.ExpectArgs{"XDG_CONFIG_HOME"}, "/proc/nonexistent/xdg", nil),
call("stat", stub.ExpectArgs{"/proc/nonexistent/xdg/pulse/cookie"}, &stubFi{isDir: true}, nil),
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
}}, nil},
{"success home dir xdg nonexistent", fCheckPathname, stub.Expect{Calls: []stub.Call{
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, nil, nil),
call("lookupEnv", stub.ExpectArgs{"HOME"}, "/proc/nonexistent/home", nil),
call("stat", stub.ExpectArgs{"/proc/nonexistent/home/.pulse-cookie"}, &stubFi{isDir: true}, nil),
call("lookupEnv", stub.ExpectArgs{"XDG_CONFIG_HOME"}, "/proc/nonexistent/xdg", nil),
call("stat", stub.ExpectArgs{"/proc/nonexistent/xdg/pulse/cookie"}, (*stubFi)(nil), os.ErrNotExist),
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
}}, nil},
{"success home nonexistent xdg", fCheckPathname, stub.Expect{Calls: []stub.Call{
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, nil, nil),
call("lookupEnv", stub.ExpectArgs{"HOME"}, "/proc/nonexistent/home", nil),
call("stat", stub.ExpectArgs{"/proc/nonexistent/home/.pulse-cookie"}, (*stubFi)(nil), os.ErrNotExist),
call("lookupEnv", stub.ExpectArgs{"XDG_CONFIG_HOME"}, "/proc/nonexistent/xdg", nil),
call("stat", stub.ExpectArgs{"/proc/nonexistent/xdg/pulse/cookie"}, &stubFi{}, nil),
call("verbose", stub.ExpectArgs{[]any{m("/proc/nonexistent/xdg/pulse/cookie")}}, nil, nil),
}}, nil},
{"success empty environ", fCheckPathname, stub.Expect{Calls: []stub.Call{
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, nil, nil),
call("lookupEnv", stub.ExpectArgs{"HOME"}, nil, nil),
call("lookupEnv", stub.ExpectArgs{"XDG_CONFIG_HOME"}, nil, nil),
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
}}, nil},
})
}
func TestLoadFile(t *testing.T) {
t.Parallel()
fAfterWriteExact := func(k *kstub) error {
buf := make([]byte, 1<<8)
n, err := loadFile(k, k,
"simulated PulseAudio cookie",
"/home/ophestra/xdg/config/pulse/cookie",
buf)
k.Verbose(buf[:n])
return err
}
fAfterWrite := func(k *kstub) error {
buf := make([]byte, 1<<8+0xfd)
n, err := loadFile(k, k,
"simulated PulseAudio cookie",
"/home/ophestra/xdg/config/pulse/cookie",
buf)
k.Verbose(buf[:n])
return err
}
fBeforeWrite := func(k *kstub) error {
buf := make([]byte, 1<<8+0xfd)
n, err := loadFile(k, k,
"simulated PulseAudio cookie",
"/home/ophestra/xdg/config/pulse/cookie",
buf)
k.Verbose(n)
if !bytes.Equal(buf, make([]byte, len(buf))) {
t.Errorf("loadFile: buf = %#v", buf)
}
return err
}
sampleCookie := bytes.Repeat([]byte{0xfc}, pulseCookieSizeMax)
checkSimple(t, "loadFile", []simpleTestCase{
{"buf", func(k *kstub) error {
n, err := loadFile(k, k,
"simulated PulseAudio cookie",
"/home/ophestra/xdg/config/pulse/cookie",
nil)
k.Verbose(n)
return err
}, stub.Expect{Calls: []stub.Call{
call("verbose", stub.ExpectArgs{[]any{-1}}, nil, nil),
}}, errors.New("invalid buffer")},
{"stat", fBeforeWrite, stub.Expect{Calls: []stub.Call{
call("stat", stub.ExpectArgs{"/home/ophestra/xdg/config/pulse/cookie"}, (*stubFi)(nil), stub.UniqueError(3)),
call("verbose", stub.ExpectArgs{[]any{-1}}, nil, nil),
}}, &hst.AppError{
Step: "access simulated PulseAudio cookie",
Err: stub.UniqueError(3),
}},
{"dir", fBeforeWrite, stub.Expect{Calls: []stub.Call{
call("stat", stub.ExpectArgs{"/home/ophestra/xdg/config/pulse/cookie"}, &stubFi{isDir: true}, nil),
call("verbose", stub.ExpectArgs{[]any{-1}}, nil, nil),
}}, &hst.AppError{
Step: "read simulated PulseAudio cookie",
Err: &os.PathError{Op: "stat", Path: "/home/ophestra/xdg/config/pulse/cookie", Err: syscall.EISDIR},
}},
{"oob", fBeforeWrite, stub.Expect{Calls: []stub.Call{
call("stat", stub.ExpectArgs{"/home/ophestra/xdg/config/pulse/cookie"}, &stubFi{size: 1<<8 + 0xff}, nil),
call("verbose", stub.ExpectArgs{[]any{-1}}, nil, nil),
}}, &hst.AppError{
Step: "finalise",
Err: &os.PathError{Op: "stat", Path: "/home/ophestra/xdg/config/pulse/cookie", Err: syscall.ENOMEM},
Msg: `simulated PulseAudio cookie at "/home/ophestra/xdg/config/pulse/cookie" exceeds expected size`,
}},
{"open", fBeforeWrite, stub.Expect{Calls: []stub.Call{
call("stat", stub.ExpectArgs{"/home/ophestra/xdg/config/pulse/cookie"}, &stubFi{size: 1 << 8}, nil),
call("verbosef", stub.ExpectArgs{"%s at %q is %d bytes shorter than expected", []any{"simulated PulseAudio cookie", "/home/ophestra/xdg/config/pulse/cookie", int64(0xfd)}}, nil, nil),
call("open", stub.ExpectArgs{"/home/ophestra/xdg/config/pulse/cookie"}, (*stubOsFile)(nil), stub.UniqueError(2)),
call("verbose", stub.ExpectArgs{[]any{-1}}, nil, nil),
}}, &hst.AppError{Step: "open simulated PulseAudio cookie", Err: stub.UniqueError(2)}},
{"read", fBeforeWrite, stub.Expect{Calls: []stub.Call{
call("stat", stub.ExpectArgs{"/home/ophestra/xdg/config/pulse/cookie"}, &stubFi{size: 1 << 8}, nil),
call("verbosef", stub.ExpectArgs{"%s at %q is %d bytes shorter than expected", []any{"simulated PulseAudio cookie", "/home/ophestra/xdg/config/pulse/cookie", int64(0xfd)}}, nil, nil),
call("open", stub.ExpectArgs{"/home/ophestra/xdg/config/pulse/cookie"}, &stubOsFile{Reader: errorReader{stub.UniqueError(1)}}, nil),
call("verbose", stub.ExpectArgs{[]any{-1}}, nil, nil),
}}, &hst.AppError{Step: "read simulated PulseAudio cookie", Err: stub.UniqueError(1)}},
{"short close", fAfterWrite, stub.Expect{Calls: []stub.Call{
call("stat", stub.ExpectArgs{"/home/ophestra/xdg/config/pulse/cookie"}, &stubFi{size: 1 << 8}, nil),
call("verbosef", stub.ExpectArgs{"%s at %q is %d bytes shorter than expected", []any{"simulated PulseAudio cookie", "/home/ophestra/xdg/config/pulse/cookie", int64(0xfd)}}, nil, nil),
call("open", stub.ExpectArgs{"/home/ophestra/xdg/config/pulse/cookie"}, &stubOsFile{closeErr: stub.UniqueError(0), Reader: bytes.NewReader(sampleCookie)}, nil),
call("verbose", stub.ExpectArgs{[]any{sampleCookie}}, nil, nil),
}}, &hst.AppError{Step: "close simulated PulseAudio cookie", Err: stub.UniqueError(0)}},
{"success", fAfterWriteExact, stub.Expect{Calls: []stub.Call{
call("stat", stub.ExpectArgs{"/home/ophestra/xdg/config/pulse/cookie"}, &stubFi{size: 1 << 8}, nil),
call("verbosef", stub.ExpectArgs{"loading %d bytes from %q", []any{1 << 8, "/home/ophestra/xdg/config/pulse/cookie"}}, nil, nil),
call("open", stub.ExpectArgs{"/home/ophestra/xdg/config/pulse/cookie"}, &stubOsFile{Reader: bytes.NewReader(sampleCookie)}, nil),
call("verbose", stub.ExpectArgs{[]any{sampleCookie}}, nil, nil),
}}, nil},
})
}

View File

@@ -1,125 +0,0 @@
package app
import (
"encoding/gob"
"hakurei.app/container/check"
"hakurei.app/container/comp"
"hakurei.app/container/fhs"
"hakurei.app/hst"
"hakurei.app/system"
"hakurei.app/system/acl"
)
const (
/*
Path to a user-private user-writable directory that is bound
to the user login time on the machine. It is automatically
created the first time a user logs in and removed on the
user's final logout. If a user logs in twice at the same time,
both sessions will see the same $XDG_RUNTIME_DIR and the same
contents. If a user logs in once, then logs out again, and
logs in again, the directory contents will have been lost in
between, but applications should not rely on this behavior and
must be able to deal with stale files. To store
session-private data in this directory, the user should
include the value of $XDG_SESSION_ID in the filename. This
directory shall be used for runtime file system objects such
as AF_UNIX sockets, FIFOs, PID files and similar. It is
guaranteed that this directory is local and offers the
greatest possible file system feature set the operating system
provides. For further details, see the XDG Base Directory
Specification[3]. $XDG_RUNTIME_DIR is not set if the current
user is not the original user of the session.
*/
envXDGRuntimeDir = "XDG_RUNTIME_DIR"
/*
The session class. This may be used instead of class= on the
module parameter line, and is usually preferred.
*/
envXDGSessionClass = "XDG_SESSION_CLASS"
/*
A regular interactive user session. This is the default class
for sessions for which a TTY or X display is known at session
registration time.
*/
xdgSessionClassUser = "user"
/*
The session type. This may be used instead of type= on the
module parameter line, and is usually preferred.
One of "unspecified", "tty", "x11", "wayland", "mir", or "web".
*/
envXDGSessionType = "XDG_SESSION_TYPE"
)
func init() { gob.Register(new(spRuntimeOp)) }
const (
sessionTypeUnspec = iota
sessionTypeTTY
sessionTypeX11
sessionTypeWayland
)
// spRuntimeOp sets up XDG_RUNTIME_DIR inside the container.
type spRuntimeOp struct {
// SessionType determines the value of envXDGSessionType. Populated during toSystem.
SessionType uintptr
}
func (s *spRuntimeOp) toSystem(state *outcomeStateSys) error {
if state.Container.Flags&hst.FShareRuntime != 0 {
runtimeDir, runtimeDirInst := s.commonPaths(state.outcomeState)
state.sys.Ensure(runtimeDir, 0700)
state.sys.UpdatePermType(system.User, runtimeDir, acl.Execute)
state.sys.Ensure(runtimeDirInst, 0700)
state.sys.UpdatePermType(system.User, runtimeDirInst, acl.Read, acl.Write, acl.Execute)
}
if state.et&hst.EWayland != 0 {
s.SessionType = sessionTypeWayland
} else if state.et&hst.EX11 != 0 {
s.SessionType = sessionTypeX11
} else {
s.SessionType = sessionTypeTTY
}
return nil
}
func (s *spRuntimeOp) toContainer(state *outcomeStateParams) error {
state.runtimeDir = fhs.AbsRunUser.Append(state.mapuid.String())
state.env[envXDGRuntimeDir] = state.runtimeDir.String()
state.env[envXDGSessionClass] = xdgSessionClassUser
switch s.SessionType {
case sessionTypeUnspec:
state.env[envXDGSessionType] = "unspecified"
case sessionTypeTTY:
state.env[envXDGSessionType] = "tty"
case sessionTypeX11:
state.env[envXDGSessionType] = "x11"
case sessionTypeWayland:
state.env[envXDGSessionType] = "wayland"
}
state.params.Tmpfs(fhs.AbsRunUser, 1<<12, 0755)
if state.Container.Flags&hst.FShareRuntime != 0 {
_, runtimeDirInst := s.commonPaths(state.outcomeState)
state.params.Bind(runtimeDirInst, state.runtimeDir, comp.BindWritable)
} else {
state.params.Mkdir(state.runtimeDir, 0700)
}
return nil
}
func (s *spRuntimeOp) commonPaths(state *outcomeState) (runtimeDir, runtimeDirInst *check.Absolute) {
runtimeDir = state.sc.SharePath.Append("runtime")
runtimeDirInst = runtimeDir.Append(state.identity.String())
return
}

View File

@@ -1,128 +0,0 @@
package app
import (
"testing"
"hakurei.app/container"
"hakurei.app/container/comp"
"hakurei.app/container/fhs"
"hakurei.app/container/stub"
"hakurei.app/hst"
"hakurei.app/system"
"hakurei.app/system/acl"
)
func TestSpRuntimeOp(t *testing.T) {
t.Parallel()
config := hst.Template()
checkOpBehaviour(t, []opBehaviourTestCase{
{"success zero", func(isShim bool, clearUnexported bool) outcomeOp {
if !isShim {
return new(spRuntimeOp)
}
op := &spRuntimeOp{sessionTypeTTY}
if clearUnexported {
op.SessionType = sessionTypeUnspec
}
return op
}, func() *hst.Config {
c := hst.Template()
*c.Enablements = 0
return c
}, nil, []stub.Call{
// this op configures the system state and does not make calls during toSystem
}, newI().
Ensure(m("/proc/nonexistent/tmp/hakurei.0/runtime"), 0700).
UpdatePermType(system.User, m("/proc/nonexistent/tmp/hakurei.0/runtime"), acl.Execute).
Ensure(m("/proc/nonexistent/tmp/hakurei.0/runtime/9"), 0700).
UpdatePermType(system.User, m("/proc/nonexistent/tmp/hakurei.0/runtime/9"), acl.Read, acl.Write, acl.Execute), nil, nil, insertsOps(nil), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Ops: new(container.Ops).
Tmpfs(fhs.AbsRunUser, 1<<12, 0755).
Bind(m("/proc/nonexistent/tmp/hakurei.0/runtime/9"), m("/run/user/1000"), comp.BindWritable),
}, paramsWantEnv(config, map[string]string{
"XDG_RUNTIME_DIR": "/run/user/1000",
"XDG_SESSION_CLASS": "user",
"XDG_SESSION_TYPE": "unspecified",
}, nil), nil},
{"success tty", func(isShim, _ bool) outcomeOp {
if !isShim {
return new(spRuntimeOp)
}
return &spRuntimeOp{sessionTypeTTY}
}, func() *hst.Config {
c := hst.Template()
*c.Enablements = 0
return c
}, nil, []stub.Call{
// this op configures the system state and does not make calls during toSystem
}, newI().
Ensure(m("/proc/nonexistent/tmp/hakurei.0/runtime"), 0700).
UpdatePermType(system.User, m("/proc/nonexistent/tmp/hakurei.0/runtime"), acl.Execute).
Ensure(m("/proc/nonexistent/tmp/hakurei.0/runtime/9"), 0700).
UpdatePermType(system.User, m("/proc/nonexistent/tmp/hakurei.0/runtime/9"), acl.Read, acl.Write, acl.Execute), nil, nil, insertsOps(nil), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Ops: new(container.Ops).
Tmpfs(fhs.AbsRunUser, 1<<12, 0755).
Bind(m("/proc/nonexistent/tmp/hakurei.0/runtime/9"), m("/run/user/1000"), comp.BindWritable),
}, paramsWantEnv(config, map[string]string{
"XDG_RUNTIME_DIR": "/run/user/1000",
"XDG_SESSION_CLASS": "user",
"XDG_SESSION_TYPE": "tty",
}, nil), nil},
{"success x11", func(isShim, _ bool) outcomeOp {
if !isShim {
return new(spRuntimeOp)
}
return &spRuntimeOp{sessionTypeX11}
}, func() *hst.Config {
c := hst.Template()
*c.Enablements = hst.Enablements(hst.EX11)
return c
}, nil, []stub.Call{
// this op configures the system state and does not make calls during toSystem
}, newI().
Ensure(m("/proc/nonexistent/tmp/hakurei.0/runtime"), 0700).
UpdatePermType(system.User, m("/proc/nonexistent/tmp/hakurei.0/runtime"), acl.Execute).
Ensure(m("/proc/nonexistent/tmp/hakurei.0/runtime/9"), 0700).
UpdatePermType(system.User, m("/proc/nonexistent/tmp/hakurei.0/runtime/9"), acl.Read, acl.Write, acl.Execute), nil, nil, insertsOps(nil), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Ops: new(container.Ops).
Tmpfs(fhs.AbsRunUser, 1<<12, 0755).
Bind(m("/proc/nonexistent/tmp/hakurei.0/runtime/9"), m("/run/user/1000"), comp.BindWritable),
}, paramsWantEnv(config, map[string]string{
"XDG_RUNTIME_DIR": "/run/user/1000",
"XDG_SESSION_CLASS": "user",
"XDG_SESSION_TYPE": "x11",
}, nil), nil},
{"success", func(isShim, _ bool) outcomeOp {
if !isShim {
return new(spRuntimeOp)
}
return &spRuntimeOp{sessionTypeWayland}
}, hst.Template, nil, []stub.Call{
// this op configures the system state and does not make calls during toSystem
}, newI().
Ensure(m("/proc/nonexistent/tmp/hakurei.0/runtime"), 0700).
UpdatePermType(system.User, m("/proc/nonexistent/tmp/hakurei.0/runtime"), acl.Execute).
Ensure(m("/proc/nonexistent/tmp/hakurei.0/runtime/9"), 0700).
UpdatePermType(system.User, m("/proc/nonexistent/tmp/hakurei.0/runtime/9"), acl.Read, acl.Write, acl.Execute), nil, nil, insertsOps(nil), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Ops: new(container.Ops).
Tmpfs(fhs.AbsRunUser, 1<<12, 0755).
Bind(m("/proc/nonexistent/tmp/hakurei.0/runtime/9"), m("/run/user/1000"), comp.BindWritable),
}, paramsWantEnv(config, map[string]string{
"XDG_RUNTIME_DIR": "/run/user/1000",
"XDG_SESSION_CLASS": "user",
"XDG_SESSION_TYPE": "wayland",
}, nil), nil},
})
}

View File

@@ -1,44 +0,0 @@
package app
import (
"encoding/gob"
"hakurei.app/container/check"
"hakurei.app/container/comp"
"hakurei.app/container/fhs"
"hakurei.app/hst"
"hakurei.app/system"
"hakurei.app/system/acl"
)
func init() { gob.Register(spTmpdirOp{}) }
// spTmpdirOp sets up TMPDIR inside the container.
type spTmpdirOp struct{}
func (s spTmpdirOp) toSystem(state *outcomeStateSys) error {
if state.Container.Flags&hst.FShareTmpdir != 0 {
tmpdir, tmpdirInst := s.commonPaths(state.outcomeState)
state.sys.Ensure(tmpdir, 0700)
state.sys.UpdatePermType(system.User, tmpdir, acl.Execute)
state.sys.Ensure(tmpdirInst, 01700)
state.sys.UpdatePermType(system.User, tmpdirInst, acl.Read, acl.Write, acl.Execute)
}
return nil
}
func (s spTmpdirOp) toContainer(state *outcomeStateParams) error {
if state.Container.Flags&hst.FShareTmpdir != 0 {
_, tmpdirInst := s.commonPaths(state.outcomeState)
state.params.Bind(tmpdirInst, fhs.AbsTmp, comp.BindWritable)
} else {
state.params.Tmpfs(fhs.AbsTmp, 0, 01777)
}
return nil
}
func (s spTmpdirOp) commonPaths(state *outcomeState) (tmpdir, tmpdirInst *check.Absolute) {
tmpdir = state.sc.SharePath.Append("tmpdir")
tmpdirInst = tmpdir.Append(state.identity.String())
return
}

View File

@@ -1,34 +0,0 @@
package app
import (
"testing"
"hakurei.app/container"
"hakurei.app/container/comp"
"hakurei.app/container/fhs"
"hakurei.app/container/stub"
"hakurei.app/hst"
"hakurei.app/system"
"hakurei.app/system/acl"
)
func TestSpTmpdirOp(t *testing.T) {
t.Parallel()
checkOpBehaviour(t, []opBehaviourTestCase{
{"success", func(bool, bool) outcomeOp {
return spTmpdirOp{}
}, hst.Template, nil, []stub.Call{
// this op configures the system state and does not make calls during toSystem
}, newI().
Ensure(m("/proc/nonexistent/tmp/hakurei.0/tmpdir"), 0700).
UpdatePermType(system.User, m("/proc/nonexistent/tmp/hakurei.0/tmpdir"), acl.Execute).
Ensure(m("/proc/nonexistent/tmp/hakurei.0/tmpdir/9"), 01700).
UpdatePermType(system.User, m("/proc/nonexistent/tmp/hakurei.0/tmpdir/9"), acl.Read, acl.Write, acl.Execute), nil, nil, insertsOps(nil), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Ops: new(container.Ops).
Bind(m("/proc/nonexistent/tmp/hakurei.0/tmpdir/9"), fhs.AbsTmp, comp.BindWritable),
}, nil, nil},
})
}

View File

@@ -1,63 +0,0 @@
package app
import (
"encoding/gob"
"hakurei.app/container/check"
"hakurei.app/hst"
"hakurei.app/system/acl"
"hakurei.app/system/wayland"
)
func init() { gob.Register(new(spWaylandOp)) }
// spWaylandOp exports the Wayland display server to the container.
// Runs after spRuntimeOp.
type spWaylandOp struct {
// Path to host wayland socket. Populated during toSystem if DirectWayland is true.
SocketPath *check.Absolute
}
func (s *spWaylandOp) toSystem(state *outcomeStateSys) error {
if state.et&hst.EWayland == 0 {
return errNotEnabled
}
// outer wayland socket (usually `/run/user/%d/wayland-%d`)
var socketPath *check.Absolute
if name, ok := state.k.lookupEnv(wayland.WaylandDisplay); !ok {
state.msg.Verbose(wayland.WaylandDisplay + " is not set, assuming " + wayland.FallbackName)
socketPath = state.sc.RuntimePath.Append(wayland.FallbackName)
} else if a, err := check.NewAbs(name); err != nil {
socketPath = state.sc.RuntimePath.Append(name)
} else {
socketPath = a
}
if !state.directWayland { // set up security-context-v1
appId := state.appId
if appId == "" {
// use instance ID in case app id is not set
appId = "app.hakurei." + state.id.String()
}
// downstream socket paths
state.sys.Wayland(state.instance().Append("wayland"), socketPath, appId, state.id.String())
} else { // bind mount wayland socket (insecure)
state.msg.Verbose("direct wayland access, PROCEED WITH CAUTION")
state.ensureRuntimeDir()
s.SocketPath = socketPath
state.sys.UpdatePermType(hst.EWayland, socketPath, acl.Read, acl.Write, acl.Execute)
}
return nil
}
func (s *spWaylandOp) toContainer(state *outcomeStateParams) error {
innerPath := state.runtimeDir.Append(wayland.FallbackName)
state.env[wayland.WaylandDisplay] = wayland.FallbackName
if s.SocketPath == nil {
state.params.Bind(state.instancePath().Append("wayland"), innerPath, 0)
} else {
state.params.Bind(s.SocketPath, innerPath, 0)
}
return nil
}

View File

@@ -1,104 +0,0 @@
package app
import (
"testing"
"hakurei.app/container"
"hakurei.app/container/stub"
"hakurei.app/hst"
"hakurei.app/system"
"hakurei.app/system/acl"
"hakurei.app/system/wayland"
)
func TestSpWaylandOp(t *testing.T) {
t.Parallel()
config := hst.Template()
checkOpBehaviour(t, []opBehaviourTestCase{
{"not enabled", func(bool, bool) outcomeOp {
return new(spWaylandOp)
}, func() *hst.Config {
c := hst.Template()
*c.Enablements = 0
return c
}, nil, nil, nil, nil, errNotEnabled, nil, nil, nil, nil, nil},
{"success notAbs defaultAppId", func(bool, bool) outcomeOp {
return new(spWaylandOp)
}, func() *hst.Config {
c := hst.Template()
c.ID = ""
return c
}, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{"WAYLAND_DISPLAY"}, "wayland-1", nil),
}, newI().
// state.instance
Ephemeral(system.Process, m(wantInstancePrefix), 0711).
// toSystem
Wayland(
m(wantInstancePrefix+"/wayland"),
m(wantRuntimePath+"/wayland-1"),
"app.hakurei."+wantAutoEtcPrefix,
wantAutoEtcPrefix,
), sysUsesInstance(nil), nil, insertsOps(afterSpRuntimeOp(nil)), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Ops: new(container.Ops).
Bind(m(wantInstancePrefix+"/wayland"), m("/run/user/1000/wayland-0"), 0),
}, paramsWantEnv(config, map[string]string{
wayland.WaylandDisplay: wayland.FallbackName,
}, nil), nil},
{"success direct", func(isShim, _ bool) outcomeOp {
if !isShim {
return new(spWaylandOp)
}
return &spWaylandOp{SocketPath: m("/proc/nonexistent/wayland")}
}, func() *hst.Config {
c := hst.Template()
c.DirectWayland = true
return c
}, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{"WAYLAND_DISPLAY"}, "/proc/nonexistent/wayland", nil),
call("verbose", stub.ExpectArgs{[]any{"direct wayland access, PROCEED WITH CAUTION"}}, nil, nil),
}, newI().
// state.ensureRuntimeDir
Ensure(m(wantRunDirPath), 0700).
UpdatePermType(system.User, m(wantRunDirPath), acl.Execute).
Ensure(m(wantRuntimePath), 0700).
UpdatePermType(system.User, m(wantRuntimePath), acl.Execute).
// toSystem
UpdatePermType(hst.EWayland, m("/proc/nonexistent/wayland"), acl.Read, acl.Write, acl.Execute), nil, nil, insertsOps(afterSpRuntimeOp(nil)), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Ops: new(container.Ops).
Bind(m("/proc/nonexistent/wayland"), m("/run/user/1000/wayland-0"), 0),
}, paramsWantEnv(config, map[string]string{
wayland.WaylandDisplay: wayland.FallbackName,
}, nil), nil},
{"success", func(bool, bool) outcomeOp {
return new(spWaylandOp)
}, hst.Template, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{"WAYLAND_DISPLAY"}, nil, nil),
call("verbose", stub.ExpectArgs{[]any{"WAYLAND_DISPLAY is not set, assuming wayland-0"}}, nil, nil),
}, newI().
// state.instance
Ephemeral(system.Process, m(wantInstancePrefix), 0711).
// toSystem
Wayland(
m(wantInstancePrefix+"/wayland"),
m(wantRuntimePath+"/"+wayland.FallbackName),
"org.chromium.Chromium",
wantAutoEtcPrefix,
), sysUsesInstance(nil), nil, insertsOps(afterSpRuntimeOp(nil)), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Ops: new(container.Ops).
Bind(m(wantInstancePrefix+"/wayland"), m("/run/user/1000/wayland-0"), 0),
}, paramsWantEnv(config, map[string]string{
wayland.WaylandDisplay: wayland.FallbackName,
}, nil), nil},
})
}

View File

@@ -1,71 +0,0 @@
package app
import (
"encoding/gob"
"errors"
"fmt"
"io/fs"
"strconv"
"strings"
"hakurei.app/container/check"
"hakurei.app/container/fhs"
"hakurei.app/hst"
"hakurei.app/system/acl"
)
var absX11SocketDir = fhs.AbsTmp.Append(".X11-unix")
func init() { gob.Register(new(spX11Op)) }
// spX11Op exports the X11 display server to the container.
type spX11Op struct {
// Value of $DISPLAY, stored during toSystem
Display string
}
func (s *spX11Op) toSystem(state *outcomeStateSys) error {
if state.et&hst.EX11 == 0 {
return errNotEnabled
}
if d, ok := state.k.lookupEnv("DISPLAY"); !ok {
return newWithMessage("DISPLAY is not set")
} else {
s.Display = d
}
// the socket file at `/tmp/.X11-unix/X%d` is typically owned by the priv user
// and not accessible by the target user
var socketPath *check.Absolute
if len(s.Display) > 1 && s.Display[0] == ':' { // `:%d`
if n, err := strconv.Atoi(s.Display[1:]); err == nil && n >= 0 {
socketPath = absX11SocketDir.Append("X" + strconv.Itoa(n))
}
} else if len(s.Display) > 5 && strings.HasPrefix(s.Display, "unix:") { // `unix:%s`
if a, err := check.NewAbs(s.Display[5:]); err == nil {
socketPath = a
}
}
if socketPath != nil {
if _, err := state.k.stat(socketPath.String()); err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return &hst.AppError{Step: fmt.Sprintf("access X11 socket %q", socketPath), Err: err}
}
} else {
state.sys.UpdatePermType(hst.EX11, socketPath, acl.Read, acl.Write, acl.Execute)
if state.Container.Flags&hst.FHostAbstract == 0 {
s.Display = "unix:" + socketPath.String()
}
}
}
state.sys.ChangeHosts("#" + state.uid.String())
return nil
}
func (s *spX11Op) toContainer(state *outcomeStateParams) error {
state.env["DISPLAY"] = s.Display
state.params.Bind(absX11SocketDir, absX11SocketDir, 0)
return nil
}

View File

@@ -1,119 +0,0 @@
package app
import (
"os"
"testing"
"hakurei.app/container"
"hakurei.app/container/stub"
"hakurei.app/hst"
"hakurei.app/system/acl"
)
func TestSpX11Op(t *testing.T) {
t.Parallel()
config := hst.Template()
checkOpBehaviour(t, []opBehaviourTestCase{
{"not enabled", func(bool, bool) outcomeOp {
return new(spX11Op)
}, hst.Template, nil, nil, nil, nil, errNotEnabled, nil, nil, nil, nil, nil},
{"lookupEnv", func(bool, bool) outcomeOp {
return new(spX11Op)
}, func() *hst.Config {
c := hst.Template()
*c.Enablements |= hst.Enablements(hst.EX11)
return c
}, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{"DISPLAY"}, nil, nil),
}, nil, nil, &hst.AppError{
Step: "finalise",
Err: os.ErrInvalid,
Msg: "DISPLAY is not set",
}, nil, nil, nil, nil, nil},
{"abs stat", func(bool, bool) outcomeOp {
return new(spX11Op)
}, func() *hst.Config {
c := hst.Template()
*c.Enablements |= hst.Enablements(hst.EX11)
return c
}, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{"DISPLAY"}, "unix:/tmp/.X11-unix/X0", nil),
call("stat", stub.ExpectArgs{"/tmp/.X11-unix/X0"}, (*stubFi)(nil), stub.UniqueError(0)),
}, nil, nil, &hst.AppError{
Step: `access X11 socket "/tmp/.X11-unix/X0"`,
Err: stub.UniqueError(0),
}, nil, nil, nil, nil, nil},
{"success abs nonexistent", func(isShim, _ bool) outcomeOp {
if !isShim {
return new(spX11Op)
}
return &spX11Op{Display: "unix:/tmp/.X11-unix/X0"}
}, func() *hst.Config {
c := hst.Template()
*c.Enablements |= hst.Enablements(hst.EX11)
return c
}, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{"DISPLAY"}, "unix:/tmp/.X11-unix/X0", nil),
call("stat", stub.ExpectArgs{"/tmp/.X11-unix/X0"}, (*stubFi)(nil), os.ErrNotExist),
}, newI().
ChangeHosts("#1000009"), nil, nil, insertsOps(nil), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Ops: new(container.Ops).
Bind(absX11SocketDir, absX11SocketDir, 0),
}, paramsWantEnv(config, map[string]string{
"DISPLAY": "unix:/tmp/.X11-unix/X0",
}, nil), nil},
{"success abs abstract", func(isShim, _ bool) outcomeOp {
if !isShim {
return new(spX11Op)
}
return &spX11Op{Display: "unix:/tmp/.X11-unix/X0"}
}, func() *hst.Config {
c := hst.Template()
*c.Enablements |= hst.Enablements(hst.EX11)
c.Container.Flags &= ^hst.FHostAbstract
return c
}, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{"DISPLAY"}, "unix:/tmp/.X11-unix/X0", nil),
call("stat", stub.ExpectArgs{"/tmp/.X11-unix/X0"}, (*stubFi)(nil), nil),
}, newI().
UpdatePermType(hst.EX11, m("/tmp/.X11-unix/X0"), acl.Read, acl.Write, acl.Execute).
ChangeHosts("#1000009"), nil, nil, insertsOps(nil), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Ops: new(container.Ops).
Bind(absX11SocketDir, absX11SocketDir, 0),
}, paramsWantEnv(config, map[string]string{
"DISPLAY": "unix:/tmp/.X11-unix/X0",
}, nil), nil},
{"success", func(isShim, _ bool) outcomeOp {
if !isShim {
return new(spX11Op)
}
return &spX11Op{Display: ":0"}
}, func() *hst.Config {
c := hst.Template()
*c.Enablements |= hst.Enablements(hst.EX11)
return c
}, nil, []stub.Call{
call("lookupEnv", stub.ExpectArgs{"DISPLAY"}, ":0", nil),
call("stat", stub.ExpectArgs{"/tmp/.X11-unix/X0"}, (*stubFi)(nil), nil),
}, newI().
UpdatePermType(hst.EX11, m("/tmp/.X11-unix/X0"), acl.Read, acl.Write, acl.Execute).
ChangeHosts("#1000009"), nil, nil, insertsOps(nil), []stub.Call{
// this op configures the container state and does not make calls during toContainer
}, &container.Params{
Ops: new(container.Ops).
Bind(absX11SocketDir, absX11SocketDir, 0),
}, paramsWantEnv(config, map[string]string{
"DISPLAY": ":0",
}, nil), nil},
})
}

View File

@@ -1,52 +0,0 @@
package state
import (
"encoding/gob"
"fmt"
"io"
"os"
"hakurei.app/hst"
)
// entryEncode encodes [hst.State] into [io.Writer] with the state entry header.
// entryEncode does not validate the embedded [hst.Config] value.
//
// A non-nil error returned by entryEncode is of type [hst.AppError].
func entryEncode(w io.Writer, s *hst.State) error {
if err := entryWriteHeader(w, s.Enablements.Unwrap()); err != nil {
return &hst.AppError{Step: "encode state header", Err: err}
} else if err = gob.NewEncoder(w).Encode(s); err != nil {
return &hst.AppError{Step: "encode state body", Err: err}
} else {
return nil
}
}
// entryDecodeHeader calls entryReadHeader, returning [hst.AppError] for a non-nil error.
func entryDecodeHeader(r io.Reader) (hst.Enablement, error) {
if et, err := entryReadHeader(r); err != nil {
return 0, &hst.AppError{Step: "decode state header", Err: err}
} else {
return et, nil
}
}
// entryDecode decodes [hst.State] from [io.Reader] and stores the result in the value pointed to by p.
// entryDecode validates the embedded [hst.Config] value.
//
// A non-nil error returned by entryDecode is of type [hst.AppError].
func entryDecode(r io.Reader, p *hst.State) (hst.Enablement, error) {
if et, err := entryDecodeHeader(r); err != nil {
return et, err
} else if err = gob.NewDecoder(r).Decode(&p); err != nil {
return et, &hst.AppError{Step: "decode state body", Err: err}
} else if err = p.Config.Validate(); err != nil {
return et, err
} else if p.Enablements.Unwrap() != et {
return et, &hst.AppError{Step: "validate state enablement", Err: os.ErrInvalid,
Msg: fmt.Sprintf("state entry %s has unexpected enablement byte %#x, %#x", p.ID.String(), byte(p.Enablements.Unwrap()), byte(et))}
} else {
return et, nil
}
}

View File

@@ -1,145 +0,0 @@
package state
import (
"bytes"
"encoding/gob"
"errors"
"io"
"os"
"reflect"
"strings"
"testing"
"time"
"hakurei.app/container/stub"
"hakurei.app/hst"
)
func TestEntryData(t *testing.T) {
t.Parallel()
mustEncodeGob := func(e any) string {
var buf bytes.Buffer
if err := gob.NewEncoder(&buf).Encode(e); err != nil {
t.Fatalf("cannot encode invalid state: %v", err)
return "\x00" // not reached
} else {
return buf.String()
}
}
templateStateGob := mustEncodeGob(newTemplateState())
testCases := []struct {
name string
data string
s *hst.State
err error
}{
{"invalid header", "\x00\xff\xca\xfe\xff\xff\xff\x00", nil, &hst.AppError{
Step: "decode state header", Err: errors.New("unexpected revision ffff")}},
{"invalid gob", "\x00\xff\xca\xfe\x00\x00\xff\x00", nil, &hst.AppError{
Step: "decode state body", Err: io.EOF}},
{"invalid config", "\x00\xff\xca\xfe\x00\x00\xff\x00" + mustEncodeGob(new(hst.State)), new(hst.State), &hst.AppError{
Step: "validate configuration", Err: hst.ErrConfigNull,
Msg: "invalid configuration"}},
{"inconsistent enablement", "\x00\xff\xca\xfe\x00\x00\xff\x00" + templateStateGob, newTemplateState(), &hst.AppError{
Step: "validate state enablement", Err: os.ErrInvalid,
Msg: "state entry aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa has unexpected enablement byte 0xd, 0xff"}},
{"template", "\x00\xff\xca\xfe\x00\x00\x0d\xf2" + templateStateGob, newTemplateState(), nil},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
t.Run("encode", func(t *testing.T) {
if tc.s == nil || tc.s.Config == nil {
return
}
t.Parallel()
var buf bytes.Buffer
if err := entryEncode(&buf, tc.s); err != nil {
t.Fatalf("entryEncode: error = %v", err)
}
if tc.err == nil {
// Gob encoding is not guaranteed to be deterministic.
// While the current implementation mostly is, it has randomised order
// for iterating over maps, and hst.Config holds a map for environ.
var got hst.State
if et, err := entryDecode(&buf, &got); err != nil {
t.Fatalf("entryDecode: error = %v", err)
} else if stateEt := got.Enablements.Unwrap(); et != stateEt {
t.Fatalf("entryDecode: et = %x, state %x", et, stateEt)
}
if !reflect.DeepEqual(&got, tc.s) {
t.Errorf("entryEncode: %x", buf.Bytes())
}
} else if testing.Verbose() {
t.Logf("%x", buf.String())
}
})
t.Run("decode", func(t *testing.T) {
t.Parallel()
var got hst.State
if et, err := entryDecode(strings.NewReader(tc.data), &got); !reflect.DeepEqual(err, tc.err) {
t.Fatalf("entryDecode: error = %#v, want %#v", err, tc.err)
} else if err != nil {
return
} else if stateEt := got.Enablements.Unwrap(); et != stateEt {
t.Fatalf("entryDecode: et = %x, state %x", et, stateEt)
}
if !reflect.DeepEqual(&got, tc.s) {
t.Errorf("entryDecode: %#v, want %#v", &got, tc.s)
}
})
})
}
t.Run("encode fault", func(t *testing.T) {
t.Parallel()
s := newTemplateState()
t.Run("gob", func(t *testing.T) {
var want = &hst.AppError{Step: "encode state body", Err: stub.UniqueError(0xcafe)}
if err := entryEncode(stubNErrorWriter(entryHeaderSize), s); !reflect.DeepEqual(err, want) {
t.Errorf("entryEncode: error = %#v, want %#v", err, want)
}
})
t.Run("header", func(t *testing.T) {
var want = &hst.AppError{Step: "encode state header", Err: stub.UniqueError(0xcafe)}
if err := entryEncode(stubNErrorWriter(entryHeaderSize-1), s); !reflect.DeepEqual(err, want) {
t.Errorf("entryEncode: error = %#v, want %#v", err, want)
}
})
})
}
// newTemplateState returns the address of a new template [hst.State] struct.
func newTemplateState() *hst.State {
return &hst.State{
ID: hst.ID(bytes.Repeat([]byte{0xaa}, len(hst.ID{}))),
PID: 0xcafebabe,
ShimPID: 0xdeadbeef,
Config: hst.Template(),
Time: time.Unix(0, 0),
}
}
// stubNErrorWriter returns an error for writes above a certain size.
type stubNErrorWriter int
func (w stubNErrorWriter) Write(p []byte) (n int, err error) {
if len(p) > int(w) {
return int(w), stub.UniqueError(0xcafe)
}
return io.Discard.Write(p)
}

View File

@@ -1,86 +0,0 @@
package state
import (
"encoding/hex"
"errors"
"io"
"os"
"strconv"
"syscall"
"hakurei.app/hst"
)
const (
// entryHeaderMagic are magic bytes at the beginning of the state entry file.
entryHeaderMagic = "\x00\xff\xca\xfe"
// entryHeaderRevision follows entryHeaderMagic and is incremented for revisions of the format.
entryHeaderRevision = "\x00\x00"
// entryHeaderSize is the fixed size of the header in bytes, including the enablement byte and its complement.
entryHeaderSize = len(entryHeaderMagic+entryHeaderRevision) + 2
)
// entryHeaderEncode encodes a state entry header for a [hst.Enablement] byte.
func entryHeaderEncode(et hst.Enablement) *[entryHeaderSize]byte {
data := [entryHeaderSize]byte([]byte(
entryHeaderMagic + entryHeaderRevision + string([]hst.Enablement{et, ^et}),
))
return &data
}
// entryHeaderDecode validates a state entry header and returns the [hst.Enablement] byte.
func entryHeaderDecode(data *[entryHeaderSize]byte) (hst.Enablement, error) {
if magic := data[:len(entryHeaderMagic)]; string(magic) != entryHeaderMagic {
return 0, errors.New("invalid header " + hex.EncodeToString(magic))
}
if revision := data[len(entryHeaderMagic):len(entryHeaderMagic+entryHeaderRevision)]; string(revision) != entryHeaderRevision {
return 0, errors.New("unexpected revision " + hex.EncodeToString(revision))
}
et := data[len(entryHeaderMagic+entryHeaderRevision)]
if et != ^data[len(entryHeaderMagic+entryHeaderRevision)+1] {
return 0, errors.New("header enablement value is inconsistent")
}
return hst.Enablement(et), nil
}
// EntrySizeError is returned for a file too small to hold a state entry header.
type EntrySizeError struct {
Name string
Size int64
}
func (e *EntrySizeError) Error() string {
if e.Name == "" {
return "state entry file is too short"
}
return "state entry file " + strconv.Quote(e.Name) + " is too short"
}
// entryCheckFile checks whether [os.FileInfo] refers to a file that might hold [hst.State].
func entryCheckFile(fi os.FileInfo) error {
if fi.IsDir() {
return syscall.EISDIR
}
if s := fi.Size(); s <= int64(entryHeaderSize) {
return &EntrySizeError{Name: fi.Name(), Size: s}
}
return nil
}
// entryReadHeader reads [hst.Enablement] from an [io.Reader].
func entryReadHeader(r io.Reader) (hst.Enablement, error) {
var data [entryHeaderSize]byte
if n, err := r.Read(data[:]); err != nil {
return 0, err
} else if n != entryHeaderSize {
return 0, &EntrySizeError{Size: int64(n)}
}
return entryHeaderDecode(&data)
}
// entryWriteHeader writes [hst.Enablement] header to an [io.Writer].
func entryWriteHeader(w io.Writer, et hst.Enablement) error {
_, err := w.Write(entryHeaderEncode(et)[:])
return err
}

View File

@@ -1,184 +0,0 @@
package state
import (
"bytes"
"errors"
"io"
"io/fs"
"os"
"reflect"
"syscall"
"testing"
"time"
"hakurei.app/hst"
)
func TestEntryHeader(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
data [entryHeaderSize]byte
et hst.Enablement
err error
}{
{"complement mismatch", [entryHeaderSize]byte{0x00, 0xff, 0xca, 0xfe, 0x00, 0x00,
0x0a, 0xf6}, 0,
errors.New("header enablement value is inconsistent")},
{"unexpected revision", [entryHeaderSize]byte{0x00, 0xff, 0xca, 0xfe, 0xff, 0xff}, 0,
errors.New("unexpected revision ffff")},
{"invalid header", [entryHeaderSize]byte{0x00, 0xfe, 0xca, 0xfe}, 0,
errors.New("invalid header 00fecafe")},
{"success high", [entryHeaderSize]byte{0x00, 0xff, 0xca, 0xfe, 0x00, 0x00,
0xff, 0x00}, 0xff, nil},
{"success", [entryHeaderSize]byte{0x00, 0xff, 0xca, 0xfe, 0x00, 0x00,
0x09, 0xf6}, hst.EWayland | hst.EPulse, nil},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
t.Run("encode", func(t *testing.T) {
if tc.err != nil {
return
}
t.Parallel()
if got := entryHeaderEncode(tc.et); *got != tc.data {
t.Errorf("entryHeaderEncode: %x, want %x", *got, tc.data)
}
t.Run("write", func(t *testing.T) {
var buf bytes.Buffer
if err := entryWriteHeader(&buf, tc.et); err != nil {
t.Fatalf("entryWriteHeader: error = %v", err)
}
if got := ([entryHeaderSize]byte)(buf.Bytes()); got != tc.data {
t.Errorf("entryWriteHeader: %x, want %x", got, tc.data)
}
})
})
t.Run("decode", func(t *testing.T) {
t.Parallel()
got, err := entryHeaderDecode(&tc.data)
if !reflect.DeepEqual(err, tc.err) {
t.Fatalf("entryHeaderDecode: error = %#v, want %#v", err, tc.err)
}
if err != nil {
return
}
if got != tc.et {
t.Errorf("entryHeaderDecode: et = %q, want %q", got, tc.et)
}
if got, err = entryReadHeader(bytes.NewReader(tc.data[:])); err != nil {
t.Fatalf("entryReadHeader: error = %#v", err)
} else if got != tc.et {
t.Errorf("entryReadHeader: et = %q, want %q", got, tc.et)
}
})
})
}
}
func TestEntrySizeError(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
err error
want string
}{
{"size only", &EntrySizeError{Size: 0xdeadbeef},
`state entry file is too short`},
{"full", &EntrySizeError{Name: "nonexistent", Size: 0xdeadbeef},
`state entry file "nonexistent" is too short`},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
if got := tc.err.Error(); got != tc.want {
t.Errorf("Error: %s, want %s", got, tc.want)
}
})
}
}
func TestEntryCheckFile(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
fi os.FileInfo
err error
}{
{"dir", &stubFi{name: "dir", isDir: true},
syscall.EISDIR},
{"short", stubFi{name: "short", size: 8},
&EntrySizeError{Name: "short", Size: 8}},
{"success", stubFi{size: 9}, nil},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
if err := entryCheckFile(tc.fi); !reflect.DeepEqual(err, tc.err) {
t.Errorf("entryCheckFile: error = %#v, want %#v", err, tc.err)
}
})
}
}
func TestEntryReadHeader(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
newR func() io.Reader
err error
}{
{"eof", func() io.Reader { return bytes.NewReader([]byte{}) }, io.EOF},
{"short", func() io.Reader { return bytes.NewReader([]byte{0}) }, &EntrySizeError{Size: 1}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
if _, err := entryReadHeader(tc.newR()); !reflect.DeepEqual(err, tc.err) {
t.Errorf("entryReadHeader: error = %#v, want %#v", err, tc.err)
}
})
}
}
// stubFi partially implements [os.FileInfo] using hardcoded values.
type stubFi struct {
name string
size int64
isDir bool
}
func (fi stubFi) Name() string {
if fi.name == "" {
panic("unreachable")
}
return fi.name
}
func (fi stubFi) Size() int64 {
if fi.size < 0 {
panic("unreachable")
}
return fi.size
}
func (fi stubFi) IsDir() bool { return fi.isDir }
func (fi stubFi) Mode() fs.FileMode { panic("unreachable") }
func (fi stubFi) ModTime() time.Time { panic("unreachable") }
func (fi stubFi) Sys() any { panic("unreachable") }

View File

@@ -1,64 +0,0 @@
package state
import (
"errors"
"maps"
"hakurei.app/hst"
)
var (
ErrDuplicate = errors.New("store contains duplicates")
)
/*
Joiner is the interface that wraps the Join method.
The Join function uses Joiner if available.
*/
type Joiner interface {
Join() (map[hst.ID]*hst.State, error)
}
// Join returns joined state entries of all active identities.
func Join(s Store) (map[hst.ID]*hst.State, error) {
if j, ok := s.(Joiner); ok {
return j.Join()
}
var (
aids []int
entries = make(map[hst.ID]*hst.State)
el int
res map[hst.ID]*hst.State
loadErr error
)
if ln, err := s.List(); err != nil {
return nil, err
} else {
aids = ln
}
for _, aid := range aids {
if _, err := s.Do(aid, func(c Cursor) {
res, loadErr = c.Load()
}); err != nil {
return nil, err
}
if loadErr != nil {
return nil, loadErr
}
// save expected length
el = len(entries) + len(res)
maps.Copy(entries, res)
if len(entries) != el {
return nil, ErrDuplicate
}
}
return entries, nil
}

View File

@@ -1,161 +0,0 @@
package state
import (
"errors"
"fmt"
"iter"
"os"
"strconv"
"sync"
"hakurei.app/container/check"
"hakurei.app/hst"
"hakurei.app/internal/lockedfile"
)
// stateEntryHandle is a handle on a state entry retrieved from a storeHandle.
// Must only be used while its parent storeHandle.fileMu is held.
type stateEntryHandle struct {
// Error returned while decoding pathname.
// A non-nil value disables stateEntryHandle.
decodeErr error
// Checked path to entry file.
pathname *check.Absolute
hst.ID
}
// open opens the underlying state entry file, returning [hst.AppError] for a non-nil error.
func (eh *stateEntryHandle) open(flag int, perm os.FileMode) (*os.File, error) {
if eh.decodeErr != nil {
return nil, eh.decodeErr
}
if f, err := os.OpenFile(eh.pathname.String(), flag, perm); err != nil {
return nil, &hst.AppError{Step: "open state entry", Err: err}
} else {
return f, nil
}
}
// destroy removes the underlying state entry file, returning [hst.AppError] for a non-nil error.
func (eh *stateEntryHandle) destroy() error {
// destroy does not go through open
if eh.decodeErr != nil {
return eh.decodeErr
}
if err := os.Remove(eh.pathname.String()); err != nil {
return &hst.AppError{Step: "destroy state entry", Err: err}
}
return nil
}
// save encodes [hst.State] and writes it to the underlying file.
// An error is returned if a file already exists with the same identifier.
// save does not validate the embedded [hst.Config].
func (eh *stateEntryHandle) save(state *hst.State) error {
f, err := eh.open(os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return err
}
err = entryEncode(f, state)
if closeErr := f.Close(); closeErr != nil && err == nil {
err = &hst.AppError{Step: "close state file", Err: closeErr}
}
return err
}
// load loads and validates the state entry header, and returns the [hst.Enablement] byte.
// for a non-nil v, the full state payload is decoded and stored in the value pointed to by v.
// load validates the embedded hst.Config value.
func (eh *stateEntryHandle) load(v *hst.State) (hst.Enablement, error) {
f, err := eh.open(os.O_RDONLY, 0)
if err != nil {
return 0, err
}
var et hst.Enablement
if v != nil {
et, err = entryDecode(f, v)
if err == nil && v.ID != eh.ID {
err = &hst.AppError{Step: "validate state identifier", Err: os.ErrInvalid,
Msg: fmt.Sprintf("state entry %s has unexpected id %s", eh.ID.String(), v.ID.String())}
}
} else {
et, err = entryDecodeHeader(f)
}
if closeErr := f.Close(); closeErr != nil && err == nil {
err = &hst.AppError{Step: "close state file", Err: closeErr}
}
return et, err
}
// storeHandle is a handle on a stateStore segment.
// Initialised by stateStore.identityHandle.
type storeHandle struct {
// Identity of instances tracked by this segment.
identity int
// Pathname of directory that the segment referred to by storeHandle is rooted in.
path *check.Absolute
// Inter-process mutex to synchronise operations against resources in this segment.
fileMu *lockedfile.Mutex
// Must be held alongside fileMu.
mu sync.Mutex
}
// entries returns an iterator over all stateEntryHandle held in this segment.
// Must be called while holding a lock on mu and fileMu.
// A non-nil error attached to a stateEntryHandle indicates a malformed identifier and is of type [hst.AppError].
// A non-nil error returned by entries is of type [hst.AppError].
func (h *storeHandle) entries() (iter.Seq[*stateEntryHandle], int, error) {
// for error reporting
const step = "read store segment entries"
// read directory contents, should only contain storeMutexName and identifier
var entries []os.DirEntry
if pl, err := os.ReadDir(h.path.String()); err != nil {
return nil, -1, &hst.AppError{Step: step, Err: err}
} else {
entries = pl
}
// expects lock file
l := len(entries)
if l > 0 {
l--
}
return func(yield func(*stateEntryHandle) bool) {
for _, ent := range entries {
var eh = stateEntryHandle{pathname: h.path.Append(ent.Name())}
// this should never happen
if ent.IsDir() {
eh.decodeErr = &hst.AppError{Step: step,
Err: errors.New("unexpected directory " + strconv.Quote(ent.Name()) + " in store")}
goto out
}
// silently skip lock file
if ent.Name() == storeMutexName {
continue
}
// this either indicates a serious bug or external interference
if err := eh.ID.UnmarshalText([]byte(ent.Name())); err != nil {
eh.decodeErr = &hst.AppError{Step: "decode store segment entry", Err: err}
goto out
}
out:
if !yield(&eh) {
break
}
}
}, l, nil
}

View File

@@ -1,259 +0,0 @@
package state
import (
"errors"
"iter"
"os"
"reflect"
"slices"
"strings"
"syscall"
"testing"
"hakurei.app/container/check"
"hakurei.app/container/stub"
"hakurei.app/hst"
"hakurei.app/internal/lockedfile"
)
func TestStateEntryHandle(t *testing.T) {
t.Parallel()
t.Run("lockout", func(t *testing.T) {
t.Parallel()
wantErr := func() error { return stub.UniqueError(0) }
eh := stateEntryHandle{decodeErr: wantErr(), pathname: check.MustAbs("/proc/nonexistent")}
if _, err := eh.open(-1, 0); !reflect.DeepEqual(err, wantErr()) {
t.Errorf("open: error = %v, want %v", err, wantErr())
}
if err := eh.destroy(); !reflect.DeepEqual(err, wantErr()) {
t.Errorf("destroy: error = %v, want %v", err, wantErr())
}
if err := eh.save(nil); !reflect.DeepEqual(err, wantErr()) {
t.Errorf("save: error = %v, want %v", err, wantErr())
}
if _, err := eh.load(nil); !reflect.DeepEqual(err, wantErr()) {
t.Errorf("load: error = %v, want %v", err, wantErr())
}
})
t.Run("od", func(t *testing.T) {
t.Parallel()
{
eh := stateEntryHandle{pathname: check.MustAbs(t.TempDir()).Append("entry")}
if f, err := eh.open(os.O_CREATE|syscall.O_EXCL, 0); err != nil {
t.Fatalf("open: error = %v", err)
} else if err = f.Close(); err != nil {
t.Errorf("Close: error = %v", err)
}
if err := eh.destroy(); err != nil {
t.Fatalf("destroy: error = %v", err)
}
}
t.Run("nonexistent", func(t *testing.T) {
t.Parallel()
eh := stateEntryHandle{pathname: check.MustAbs("/proc/nonexistent")}
wantErrOpen := &hst.AppError{Step: "open state entry",
Err: &os.PathError{Op: "open", Path: "/proc/nonexistent", Err: syscall.ENOENT}}
if _, err := eh.open(os.O_CREATE|syscall.O_EXCL, 0); !reflect.DeepEqual(err, wantErrOpen) {
t.Errorf("open: error = %#v, want %#v", err, wantErrOpen)
}
wantErrDestroy := &hst.AppError{Step: "destroy state entry",
Err: &os.PathError{Op: "remove", Path: "/proc/nonexistent", Err: syscall.ENOENT}}
if err := eh.destroy(); !reflect.DeepEqual(err, wantErrDestroy) {
t.Errorf("destroy: error = %#v, want %#v", err, wantErrDestroy)
}
})
})
t.Run("saveload", func(t *testing.T) {
t.Parallel()
eh := stateEntryHandle{pathname: check.MustAbs(t.TempDir()).Append("entry"),
ID: newTemplateState().ID}
if err := eh.save(newTemplateState()); err != nil {
t.Fatalf("save: error = %v", err)
}
t.Run("validate", func(t *testing.T) {
t.Parallel()
t.Run("internal", func(t *testing.T) {
t.Parallel()
var got hst.State
if f, err := os.Open(eh.pathname.String()); err != nil {
t.Fatal(err.Error())
} else if _, err = entryDecode(f, &got); err != nil {
t.Fatalf("entryDecode: error = %v", err)
} else if err = f.Close(); err != nil {
t.Fatal(f.Close())
}
if want := newTemplateState(); !reflect.DeepEqual(&got, want) {
t.Errorf("entryDecode: %#v, want %#v", &got, want)
}
})
t.Run("load header only", func(t *testing.T) {
t.Parallel()
if et, err := eh.load(nil); err != nil {
t.Fatalf("load: error = %v", err)
} else if want := newTemplateState().Enablements.Unwrap(); et != want {
t.Errorf("load: et = %x, want %x", et, want)
}
})
t.Run("load", func(t *testing.T) {
t.Parallel()
var got hst.State
if _, err := eh.load(&got); err != nil {
t.Fatalf("load: error = %v", err)
} else if want := newTemplateState(); !reflect.DeepEqual(&got, want) {
t.Errorf("load: %#v, want %#v", &got, want)
}
})
t.Run("load inconsistent", func(t *testing.T) {
t.Parallel()
wantErr := &hst.AppError{Step: "validate state identifier", Err: os.ErrInvalid,
Msg: "state entry 00000000000000000000000000000000 has unexpected id aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}
ehi := stateEntryHandle{pathname: eh.pathname}
if _, err := ehi.load(new(hst.State)); !reflect.DeepEqual(err, wantErr) {
t.Errorf("load: error = %#v, want %#v", err, wantErr)
}
})
})
})
}
func TestStoreHandle(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
ents [2][]string
want func(newEh func(err error, name string) *stateEntryHandle) []*stateEntryHandle
ext func(t *testing.T, entries iter.Seq[*stateEntryHandle], n int)
}{
{"errors", [2][]string{{
"e81eb203b4190ac5c3842ef44d429945",
"lock",
"f0-invalid",
}, {
"f1-directory",
}}, func(newEh func(err error, name string) *stateEntryHandle) []*stateEntryHandle {
return []*stateEntryHandle{
newEh(nil, "e81eb203b4190ac5c3842ef44d429945"),
newEh(&hst.AppError{Step: "decode store segment entry",
Err: hst.IdentifierDecodeError{Err: hst.ErrIdentifierLength}}, "f0-invalid"),
newEh(&hst.AppError{Step: "read store segment entries",
Err: errors.New(`unexpected directory "f1-directory" in store`)}, "f1-directory"),
}
}, nil},
{"success", [2][]string{{
"e81eb203b4190ac5c3842ef44d429945",
"7958cfbb9272d9cf9cfd61c85afa13f1",
"d0b5f7446dd5bd3424ff2f7ac9cace1e",
"c8c8e2c4aea5c32fe47240ff8caa874e",
"fa0d30b249d80f155a1f80ceddcc32f2",
"lock",
}}, func(newEh func(err error, name string) *stateEntryHandle) []*stateEntryHandle {
return []*stateEntryHandle{
newEh(nil, "7958cfbb9272d9cf9cfd61c85afa13f1"),
newEh(nil, "c8c8e2c4aea5c32fe47240ff8caa874e"),
newEh(nil, "d0b5f7446dd5bd3424ff2f7ac9cace1e"),
newEh(nil, "e81eb203b4190ac5c3842ef44d429945"),
newEh(nil, "fa0d30b249d80f155a1f80ceddcc32f2"),
}
}, func(t *testing.T, entries iter.Seq[*stateEntryHandle], n int) {
if n != 5 {
t.Fatalf("entries: n = %d", n)
}
// check partial drain
for range entries {
break
}
}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
p := check.MustAbs(t.TempDir()).Append("segment")
if err := os.Mkdir(p.String(), 0700); err != nil {
t.Fatal(err.Error())
}
createEntries(t, p, tc.ents)
var got []*stateEntryHandle
if entries, n, err := (&storeHandle{
identity: -0xbad,
path: p,
fileMu: lockedfile.MutexAt(p.Append("lock").String()),
}).entries(); err != nil {
t.Fatalf("entries: error = %v", err)
} else {
got = slices.AppendSeq(make([]*stateEntryHandle, 0, n), entries)
if tc.ext != nil {
tc.ext(t, entries, n)
}
}
slices.SortFunc(got, func(a, b *stateEntryHandle) int { return strings.Compare(a.pathname.String(), b.pathname.String()) })
want := tc.want(func(err error, name string) *stateEntryHandle {
eh := stateEntryHandle{decodeErr: err, pathname: p.Append(name)}
if err == nil {
if err = eh.UnmarshalText([]byte(name)); err != nil {
t.Fatalf("UnmarshalText: error = %v", err)
}
}
return &eh
})
if !reflect.DeepEqual(got, want) {
t.Errorf("entries: %q, want %q", got, want)
}
})
}
t.Run("nonexistent", func(t *testing.T) {
var wantErr = &hst.AppError{Step: "read store segment entries", Err: &os.PathError{
Op: "open",
Path: "/proc/nonexistent",
Err: syscall.ENOENT,
}}
if _, _, err := (&storeHandle{
identity: -0xbad,
path: check.MustAbs("/proc/nonexistent"),
}).entries(); !reflect.DeepEqual(err, wantErr) {
t.Fatalf("entries: error = %#v, want %#v", err, wantErr)
}
})
}
// createEntries creates file and directory entries in the specified prefix.
func createEntries(t *testing.T, prefix *check.Absolute, ents [2][]string) {
for _, s := range ents[0] {
if f, err := os.OpenFile(prefix.Append(s).String(), os.O_CREATE|os.O_EXCL, 0600); err != nil {
t.Fatal(err.Error())
} else if err = f.Close(); err != nil {
t.Fatal(err.Error())
}
}
for _, s := range ents[1] {
if err := os.Mkdir(prefix.Append(s).String(), 0700); err != nil {
t.Fatal(err.Error())
}
}
}

View File

@@ -1,131 +0,0 @@
// Package state provides cross-process state tracking for hakurei container instances.
package state
import (
"strconv"
"hakurei.app/container/check"
"hakurei.app/hst"
"hakurei.app/message"
)
/* this provides an implementation of Store on top of the improved state tracking to ease in the changes */
type Store interface {
// Do calls f exactly once and ensures store exclusivity until f returns.
// Returns whether f is called and any errors during the locking process.
// Cursor provided to f becomes invalid as soon as f returns.
Do(identity int, f func(c Cursor)) (ok bool, err error)
// List queries the store and returns a list of identities known to the store.
// Note that some or all returned identities might not have any active apps.
List() (identities []int, err error)
}
func (s *stateStore) Do(identity int, f func(c Cursor)) (bool, error) {
if h, err := s.identityHandle(identity); err != nil {
return false, err
} else {
return h.do(f)
}
}
// storeAdapter satisfies [Store] via stateStore.
type storeAdapter struct {
msg message.Msg
*stateStore
}
func (s storeAdapter) List() ([]int, error) {
segments, n, err := s.segments()
if err != nil {
return nil, err
}
identities := make([]int, 0, n)
for si := range segments {
if si.err != nil {
if m, ok := message.GetMessage(err); ok {
s.msg.Verbose(m)
} else {
// unreachable
return nil, err
}
continue
}
identities = append(identities, si.identity)
}
return identities, nil
}
// NewMulti returns an instance of the multi-file store.
func NewMulti(msg message.Msg, prefix *check.Absolute) Store {
return storeAdapter{msg, newStore(prefix.Append("state"))}
}
// Cursor provides access to the store of an identity.
type Cursor interface {
Save(state *hst.State) error
Destroy(id hst.ID) error
Load() (map[hst.ID]*hst.State, error)
Len() (int, error)
}
// do implements stateStore.Do on storeHandle.
func (h *storeHandle) do(f func(c Cursor)) (bool, error) {
if unlock, err := h.fileMu.Lock(); err != nil {
return false, &hst.AppError{Step: "acquire lock on store segment " + strconv.Itoa(h.identity), Err: err}
} else {
defer unlock()
}
f(h)
return true, nil
}
/* these compatibility methods must only be called while fileMu is held */
func (h *storeHandle) Save(state *hst.State) error {
return (&stateEntryHandle{nil, h.path.Append(state.ID.String()), state.ID}).save(state)
}
func (h *storeHandle) Destroy(id hst.ID) error {
return (&stateEntryHandle{nil, h.path.Append(id.String()), id}).destroy()
}
func (h *storeHandle) Load() (map[hst.ID]*hst.State, error) {
entries, n, err := h.entries()
if err != nil {
return nil, err
}
r := make(map[hst.ID]*hst.State, n)
for eh := range entries {
if eh.decodeErr != nil {
err = eh.decodeErr
break
}
var s hst.State
if _, err = eh.load(&s); err != nil {
break
}
r[eh.ID] = &s
}
return r, err
}
func (h *storeHandle) Len() (int, error) {
entries, _, err := h.entries()
if err != nil {
return -1, err
}
var n int
for eh := range entries {
if eh.decodeErr != nil {
err = eh.decodeErr
}
n++
}
return n, err
}

View File

@@ -1,120 +0,0 @@
package state_test
import (
"log"
"math/rand"
"reflect"
"slices"
"testing"
"time"
"hakurei.app/container/check"
"hakurei.app/hst"
"hakurei.app/internal/app/state"
"hakurei.app/message"
)
func TestMulti(t *testing.T) {
s := state.NewMulti(message.NewMsg(log.New(log.Writer(), "multi: ", 0)), check.MustAbs(t.TempDir()))
t.Run("list empty store", func(t *testing.T) {
if identities, err := s.List(); err != nil {
t.Fatalf("List: error = %v", err)
} else if len(identities) != 0 {
t.Fatalf("List: identities = %#v", identities)
}
})
const (
insertEntryChecked = iota
insertEntryNoCheck
insertEntryOtherApp
tl
)
var tc [tl]hst.State
for i := 0; i < tl; i++ {
if err := hst.NewInstanceID(&tc[i].ID); err != nil {
t.Fatalf("cannot create dummy state: %v", err)
}
tc[i].PID = rand.Int()
tc[i].Config = hst.Template()
tc[i].Time = time.Now()
}
do := func(identity int, f func(c state.Cursor)) {
if ok, err := s.Do(identity, f); err != nil {
t.Fatalf("Do: ok = %v, error = %v", ok, err)
}
}
insert := func(i, identity int) {
do(identity, func(c state.Cursor) {
if err := c.Save(&tc[i]); err != nil {
t.Fatalf("Save: error = %v", err)
}
})
}
check := func(i, identity int) {
do(identity, func(c state.Cursor) {
if entries, err := c.Load(); err != nil {
t.Fatalf("Load: error = %v", err)
} else if got, ok := entries[tc[i].ID]; !ok {
t.Fatalf("Load: entry %s missing", &tc[i].ID)
} else {
got.Time = tc[i].Time
if !reflect.DeepEqual(got, &tc[i]) {
t.Fatalf("Load: entry %s got %#v, want %#v", &tc[i].ID, got, &tc[i])
}
}
})
}
// insert entry checked
insert(insertEntryChecked, 0)
check(insertEntryChecked, 0)
// insert entry unchecked
insert(insertEntryNoCheck, 0)
// insert entry different identity
insert(insertEntryOtherApp, 1)
check(insertEntryOtherApp, 1)
// check previous insertion
check(insertEntryNoCheck, 0)
// list identities
if identities, err := s.List(); err != nil {
t.Fatalf("List: error = %v", err)
} else {
slices.Sort(identities)
want := []int{0, 1}
if !slices.Equal(identities, want) {
t.Fatalf("List() = %#v, want %#v", identities, want)
}
}
// join store
if entries, err := state.Join(s); err != nil {
t.Fatalf("Join: error = %v", err)
} else if len(entries) != 3 {
t.Fatalf("Join(s) = %#v", entries)
}
// clear identity 1
do(1, func(c state.Cursor) {
if err := c.Destroy(tc[insertEntryOtherApp].ID); err != nil {
t.Fatalf("Destroy: error = %v", err)
}
})
do(1, func(c state.Cursor) {
if l, err := c.Len(); err != nil {
t.Fatalf("Len: error = %v", err)
} else if l != 0 {
t.Fatalf("Len: %d, want 0", l)
}
})
}

View File

@@ -1,162 +0,0 @@
package state
import (
"errors"
"io/fs"
"iter"
"os"
"strconv"
"sync"
"syscall"
"hakurei.app/container/check"
"hakurei.app/hst"
"hakurei.app/internal/lockedfile"
)
// storeMutexName is the pathname of the file backing [lockedfile.Mutex] of a stateStore and storeHandle.
const storeMutexName = "lock"
// A stateStore keeps track of [hst.State] via a well-known filesystem accessible to all hakurei priv-side processes.
// Access to store data and related resources are synchronised on a per-segment basis via storeHandle.
type stateStore struct {
// Pathname of directory that the store is rooted in.
base *check.Absolute
// All currently known instances of storeHandle, keyed by their identity.
handles sync.Map
// Inter-process mutex to synchronise operations against the entire store.
// Held during List and when initialising previously unknown identities during Do.
// Must not be accessed directly. Callers should use the bigLock method instead.
fileMu *lockedfile.Mutex
// For creating the base directory.
mkdirOnce sync.Once
// Stored error value via mkdirOnce.
mkdirErr error
}
// bigLock acquires fileMu on stateStore.
// A non-nil error returned by bigLock is of type [hst.AppError].
func (s *stateStore) bigLock() (unlock func(), err error) {
s.mkdirOnce.Do(func() { s.mkdirErr = os.MkdirAll(s.base.String(), 0700) })
if s.mkdirErr != nil {
return nil, &hst.AppError{Step: "create state store directory", Err: s.mkdirErr}
}
if unlock, err = s.fileMu.Lock(); err != nil {
return nil, &hst.AppError{Step: "acquire lock on the state store", Err: err}
}
return
}
// identityHandle loads or initialises a storeHandle for identity.
// A non-nil error returned by identityHandle is of type [hst.AppError].
func (s *stateStore) identityHandle(identity int) (*storeHandle, error) {
h := new(storeHandle)
h.mu.Lock()
if v, ok := s.handles.LoadOrStore(identity, h); ok {
h = v.(*storeHandle)
} else {
// acquire big lock to initialise previously unknown segment handle
if unlock, err := s.bigLock(); err != nil {
return nil, err
} else {
defer unlock()
}
h.identity = identity
h.path = s.base.Append(strconv.Itoa(identity))
h.fileMu = lockedfile.MutexAt(h.path.Append(storeMutexName).String())
err := os.MkdirAll(h.path.String(), 0700)
h.mu.Unlock()
if err != nil && !errors.Is(err, fs.ErrExist) {
// handle methods will likely return ENOENT
s.handles.CompareAndDelete(identity, h)
return nil, &hst.AppError{Step: "create store segment directory", Err: err}
}
}
return h, nil
}
// segmentIdentity is produced by the iterator returned by stateStore.segments.
type segmentIdentity struct {
// Identity of the current segment.
identity int
// Error encountered while processing this segment.
err error
}
// segments returns an iterator over all segmentIdentity known to the store.
// To obtain a storeHandle on a segment, caller must then call identityHandle.
// A non-nil error returned by segments is of type [hst.AppError].
func (s *stateStore) segments() (iter.Seq[segmentIdentity], int, error) {
// read directory contents, should only contain storeMutexName and identity
var entries []os.DirEntry
// acquire big lock to read store segment list
if unlock, err := s.bigLock(); err != nil {
return nil, -1, err
} else {
entries, err = os.ReadDir(s.base.String())
unlock()
if err != nil && !errors.Is(err, os.ErrNotExist) {
return nil, -1, &hst.AppError{Step: "read store segments", Err: err}
}
}
// expects lock file
l := len(entries)
if l > 0 {
l--
}
return func(yield func(segmentIdentity) bool) {
// for error reporting
const step = "process store segment"
for _, ent := range entries {
si := segmentIdentity{identity: -1}
// should only be the big lock
if !ent.IsDir() {
if ent.Name() == storeMutexName {
continue
}
// this should never happen
si.err = &hst.AppError{Step: step, Err: syscall.EISDIR,
Msg: "skipped non-directory entry " + strconv.Quote(ent.Name())}
goto out
}
// failure paths either indicates a serious bug or external interference
if v, err := strconv.Atoi(ent.Name()); err != nil {
si.err = &hst.AppError{Step: step, Err: err,
Msg: "skipped non-identity entry " + strconv.Quote(ent.Name())}
goto out
} else if v < hst.IdentityMin || v > hst.IdentityMax {
si.err = &hst.AppError{Step: step, Err: syscall.ERANGE,
Msg: "skipped out of bounds entry " + strconv.Itoa(v)}
goto out
} else {
si.identity = v
}
out:
if !yield(si) {
break
}
}
}, l, nil
}
// newStore returns the address of a new instance of stateStore.
// Multiple instances of stateStore rooted in the same directory is supported, but discouraged.
func newStore(base *check.Absolute) *stateStore {
return &stateStore{base: base, fileMu: lockedfile.MutexAt(base.Append(storeMutexName).String())}
}

View File

@@ -1,254 +0,0 @@
package state
import (
"cmp"
"iter"
"os"
"reflect"
"slices"
"strconv"
"strings"
"syscall"
"testing"
"hakurei.app/container/check"
"hakurei.app/hst"
)
func TestStateStoreBigLock(t *testing.T) {
t.Parallel()
{
s := newStore(check.MustAbs(t.TempDir()).Append("state"))
for i := 0; i < 2; i++ { // check once behaviour
if unlock, err := s.bigLock(); err != nil {
t.Fatalf("bigLock: error = %v", err)
} else {
unlock()
}
}
}
t.Run("mkdir", func(t *testing.T) {
t.Parallel()
wantErr := &hst.AppError{Step: "create state store directory",
Err: &os.PathError{Op: "mkdir", Path: "/proc/nonexistent", Err: syscall.ENOENT}}
for i := 0; i < 2; i++ { // check once behaviour
if _, err := newStore(check.MustAbs("/proc/nonexistent")).bigLock(); !reflect.DeepEqual(err, wantErr) {
t.Errorf("bigLock: error = %#v, want %#v", err, wantErr)
}
}
})
t.Run("access", func(t *testing.T) {
t.Parallel()
base := check.MustAbs(t.TempDir()).Append("inaccessible")
if err := os.MkdirAll(base.String(), 0); err != nil {
t.Fatal(err.Error())
}
wantErr := &hst.AppError{Step: "acquire lock on the state store",
Err: &os.PathError{Op: "open", Path: base.Append(storeMutexName).String(), Err: syscall.EACCES}}
if _, err := newStore(base).bigLock(); !reflect.DeepEqual(err, wantErr) {
t.Errorf("bigLock: error = %#v, want %#v", err, wantErr)
}
})
}
func TestStateStoreIdentityHandle(t *testing.T) {
t.Parallel()
t.Run("loadstore", func(t *testing.T) {
t.Parallel()
s := newStore(check.MustAbs(t.TempDir()).Append("store"))
var handleAddr [8]*storeHandle
checkHandle := func(identity int, load bool) {
if h, err := s.identityHandle(identity); err != nil {
t.Fatalf("identityHandle: error = %v", err)
} else if load != (handleAddr[identity] != nil) {
t.Fatalf("identityHandle: load = %v, want %v", load, handleAddr[identity] != nil)
} else if !load {
handleAddr[identity] = h
if h.identity != identity {
t.Errorf("identityHandle: identity = %d, want %d", h.identity, identity)
}
} else if h != handleAddr[identity] {
t.Fatalf("identityHandle: %p, want %p", h, handleAddr[identity])
}
}
checkHandle(0, false)
checkHandle(1, false)
checkHandle(2, false)
checkHandle(3, false)
checkHandle(7, false)
checkHandle(7, true)
checkHandle(2, true)
checkHandle(1, true)
checkHandle(2, true)
checkHandle(0, true)
})
t.Run("access", func(t *testing.T) {
t.Parallel()
base := check.MustAbs(t.TempDir()).Append("inaccessible")
if err := os.MkdirAll(base.String(), 0); err != nil {
t.Fatal(err.Error())
}
wantErr := &hst.AppError{Step: "acquire lock on the state store",
Err: &os.PathError{Op: "open", Path: base.Append(storeMutexName).String(), Err: syscall.EACCES}}
if _, err := newStore(base).identityHandle(0); !reflect.DeepEqual(err, wantErr) {
t.Errorf("identityHandle: error = %#v, want %#v", err, wantErr)
}
})
t.Run("access segment", func(t *testing.T) {
t.Parallel()
base := check.MustAbs(t.TempDir()).Append("inaccessible")
if err := os.MkdirAll(base.String(), 0700); err != nil {
t.Fatal(err.Error())
}
if f, err := os.Create(base.Append(storeMutexName).String()); err != nil {
t.Fatal(err.Error())
} else if err = f.Close(); err != nil {
t.Fatal(err.Error())
}
if err := os.Chmod(base.String(), 0100); err != nil {
t.Fatal(err.Error())
}
t.Cleanup(func() {
if err := os.Chmod(base.String(), 0700); err != nil {
t.Fatal(err.Error())
}
})
wantErr := &hst.AppError{Step: "create store segment directory",
Err: &os.PathError{Op: "mkdir", Path: base.Append("0").String(), Err: syscall.EACCES}}
if _, err := newStore(base).identityHandle(0); !reflect.DeepEqual(err, wantErr) {
t.Errorf("identityHandle: error = %#v, want %#v", err, wantErr)
}
})
}
func TestStateStoreSegments(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
ents [2][]string
want []segmentIdentity
ext func(t *testing.T, segments iter.Seq[segmentIdentity], n int)
}{
{"errors", [2][]string{{
"f0-invalid-file",
}, {
"f1-invalid-syntax",
"9999",
"16384",
}}, []segmentIdentity{
{-1, &hst.AppError{Step: "process store segment", Err: syscall.EISDIR,
Msg: `skipped non-directory entry "f0-invalid-file"`}},
{-1, &hst.AppError{Step: "process store segment", Err: syscall.ERANGE,
Msg: `skipped out of bounds entry 16384`}},
{-1, &hst.AppError{Step: "process store segment",
Err: &strconv.NumError{Func: "Atoi", Num: "f1-invalid-syntax", Err: strconv.ErrSyntax},
Msg: `skipped non-identity entry "f1-invalid-syntax"`}},
{9999, nil},
}, nil},
{"success", [2][]string{{
"lock",
}, {
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"9",
"13",
"20",
"31",
"197",
}}, []segmentIdentity{
{0, nil},
{1, nil},
{2, nil},
{3, nil},
{4, nil},
{5, nil},
{6, nil},
{7, nil},
{9, nil},
{13, nil},
{20, nil},
{31, nil},
{197, nil},
}, func(t *testing.T, segments iter.Seq[segmentIdentity], n int) {
if n != 13 {
t.Fatalf("segments: n = %d", n)
}
// check partial drain
for range segments {
break
}
}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
base := check.MustAbs(t.TempDir()).Append("store")
if err := os.Mkdir(base.String(), 0700); err != nil {
t.Fatal(err.Error())
}
createEntries(t, base, tc.ents)
var got []segmentIdentity
if segments, n, err := newStore(base).segments(); err != nil {
t.Fatalf("segments: error = %v", err)
} else {
got = slices.AppendSeq(make([]segmentIdentity, 0, n), segments)
if tc.ext != nil {
tc.ext(t, segments, n)
}
}
slices.SortFunc(got, func(a, b segmentIdentity) int {
if a.identity == b.identity {
return strings.Compare(a.err.Error(), b.err.Error())
}
return cmp.Compare(a.identity, b.identity)
})
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("segments: %#v, want %#v", got, tc.want)
}
})
}
t.Run("access", func(t *testing.T) {
t.Parallel()
base := check.MustAbs(t.TempDir()).Append("inaccessible")
if err := os.MkdirAll(base.String(), 0); err != nil {
t.Fatal(err.Error())
}
wantErr := &hst.AppError{Step: "acquire lock on the state store",
Err: &os.PathError{Op: "open", Path: base.Append(storeMutexName).String(), Err: syscall.EACCES}}
if _, _, err := newStore(base).segments(); !reflect.DeepEqual(err, wantErr) {
t.Errorf("segments: error = %#v, want %#v", err, wantErr)
}
})
}