5 Commits

Author SHA1 Message Date
6931ad95c3 internal/outcome/shim: EOF as exit request fallback
All checks were successful
Test / Create distribution (push) Successful in 35s
Test / Sandbox (push) Successful in 55s
Test / Sandbox (race detector) (push) Successful in 53s
Test / Hpkg (push) Successful in 53s
Test / Hakurei (race detector) (push) Successful in 1m1s
Test / Hakurei (push) Successful in 1m3s
Test / Flake checks (push) Successful in 1m34s
In some cases the signal might be delivered before the signal handler is installed, and synchronising against such a case is too expensive. Instead, use the pipe being closed as a fallback to the regular exit request. This change also moves installation of the signal handler early.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2025-11-02 04:41:26 +09:00
2ba599b399 internal/outcome/process: use new store interface
All checks were successful
Test / Create distribution (push) Successful in 42s
Test / Sandbox (push) Successful in 2m26s
Test / Hakurei (push) Successful in 3m20s
Test / Hpkg (push) Successful in 4m7s
Test / Sandbox (race detector) (push) Successful in 4m15s
Test / Flake checks (push) Successful in 1m32s
Test / Hakurei (race detector) (push) Successful in 5m5s
This change also spawns shim before committing system state, leaving it blocking on the setup pipe. The internal/outcome/process structure is also entirely reworked to be much more readable and less error-prone, while enabling basic performance measurements. A long-standing bug where segment lock is not held during Commit is also resolved.

Closes #19.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2025-11-02 04:25:45 +09:00
d3d3417125 internal/outcome/process: relocate start and serve
All checks were successful
Test / Create distribution (push) Successful in 34s
Test / Sandbox (push) Successful in 2m14s
Test / Hakurei (push) Successful in 3m11s
Test / Hpkg (push) Successful in 4m2s
Test / Sandbox (race detector) (push) Successful in 4m5s
Test / Flake checks (push) Successful in 1m30s
Test / Hakurei (race detector) (push) Successful in 4m57s
This is useful for reordering these operations for further cleanup.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2025-11-01 19:14:59 +09:00
651cdf9ccb internal/outcome: remove guard on main
All checks were successful
Test / Create distribution (push) Successful in 33s
Test / Sandbox (push) Successful in 2m20s
Test / Hakurei (push) Successful in 3m7s
Test / Sandbox (race detector) (push) Successful in 4m8s
Test / Hpkg (push) Successful in 4m9s
Test / Hakurei (race detector) (push) Successful in 4m54s
Test / Flake checks (push) Successful in 1m29s
This is no longer exported. Such a check is pointless.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2025-10-31 22:58:26 +09:00
68ff0a2ba6 container/params: expose pipe
All checks were successful
Test / Hpkg (push) Successful in 4m11s
Test / Sandbox (race detector) (push) Successful in 4m13s
Test / Hakurei (race detector) (push) Successful in 5m3s
Test / Flake checks (push) Successful in 1m30s
Test / Create distribution (push) Successful in 36s
Test / Sandbox (push) Successful in 2m16s
Test / Hakurei (push) Successful in 3m16s
This increases flexibility of how caller wants to handle the I/O. Also makes it no longer rely on finalizer.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2025-10-31 22:39:02 +09:00
10 changed files with 432 additions and 317 deletions

View File

@@ -25,6 +25,9 @@ const (
// CancelSignal is the signal expected by container init on context cancel.
// A custom [Container.Cancel] function must eventually deliver this signal.
CancelSignal = SIGUSR2
// Timeout for writing initParams to Container.setup.
initSetupTimeout = 5 * time.Second
)
type (
@@ -37,8 +40,8 @@ type (
// with behaviour identical to its [exec.Cmd] counterpart.
ExtraFiles []*os.File
// param encoder for shim and init
setup *gob.Encoder
// param pipe for shim and init
setup *os.File
// cancels cmd
cancel context.CancelFunc
// closed after Wait returns
@@ -228,10 +231,10 @@ func (p *Container) Start() error {
}
// place setup pipe before user supplied extra files, this is later restored by init
if fd, e, err := Setup(&p.cmd.ExtraFiles); err != nil {
if fd, f, err := Setup(&p.cmd.ExtraFiles); err != nil {
return &StartError{true, "set up params stream", err, false, false}
} else {
p.setup = e
p.setup = f
p.cmd.Env = []string{setupEnv + "=" + strconv.Itoa(fd)}
}
p.cmd.ExtraFiles = append(p.cmd.ExtraFiles, p.ExtraFiles...)
@@ -310,6 +313,9 @@ func (p *Container) Serve() error {
setup := p.setup
p.setup = nil
if err := setup.SetDeadline(time.Now().Add(initSetupTimeout)); err != nil {
return &StartError{true, "set init pipe deadline", err, false, true}
}
if p.Path == nil {
p.cancel()
@@ -324,15 +330,14 @@ func (p *Container) Serve() error {
p.SeccompRules = make([]seccomp.NativeRule, 0)
}
err := setup.Encode(
&initParams{
p.Params,
Getuid(),
Getgid(),
len(p.ExtraFiles),
p.msg.IsVerbose(),
},
)
err := gob.NewEncoder(setup).Encode(&initParams{
p.Params,
Getuid(),
Getgid(),
len(p.ExtraFiles),
p.msg.IsVerbose(),
})
_ = setup.Close()
if err != nil {
p.cancel()
}

View File

@@ -9,13 +9,13 @@ import (
)
// Setup appends the read end of a pipe for setup params transmission and returns its fd.
func Setup(extraFiles *[]*os.File) (int, *gob.Encoder, error) {
func Setup(extraFiles *[]*os.File) (int, *os.File, error) {
if r, w, err := os.Pipe(); err != nil {
return -1, nil, err
} else {
fd := 3 + len(*extraFiles)
*extraFiles = append(*extraFiles, r)
return fd, gob.NewEncoder(w), nil
return fd, w, nil
}
}

View File

@@ -1,6 +1,7 @@
package container_test
import (
"encoding/gob"
"errors"
"os"
"slices"
@@ -59,12 +60,16 @@ func TestSetupReceive(t *testing.T) {
encoderDone := make(chan error, 1)
extraFiles := make([]*os.File, 0, 1)
if fd, encoder, err := container.Setup(&extraFiles); err != nil {
deadline, _ := t.Deadline()
if fd, f, err := container.Setup(&extraFiles); err != nil {
t.Fatalf("Setup: error = %v", err)
} else if fd != 3 {
t.Fatalf("Setup: fd = %d, want 3", fd)
} else {
go func() { encoderDone <- encoder.Encode(payload) }()
if err = f.SetDeadline(deadline); err != nil {
t.Fatal(err.Error())
}
go func() { encoderDone <- gob.NewEncoder(f).Encode(payload) }()
}
if len(extraFiles) != 1 {

View File

@@ -32,6 +32,8 @@ type syscallDispatcher interface {
// just synchronising access is not enough, as this is for test instrumentation.
new(f func(k syscallDispatcher, msg message.Msg))
// getppid provides [os.Getppid].
getppid() int
// getpid provides [os.Getpid].
getpid() int
// getuid provides [os.Getuid].
@@ -108,6 +110,7 @@ type direct struct{ msg message.Msg }
func (k direct) new(f func(k syscallDispatcher, msg message.Msg)) { go f(k, k.msg) }
func (direct) getppid() int { return os.Getppid() }
func (direct) getpid() int { return os.Getpid() }
func (direct) getuid() int { return os.Getuid() }
func (direct) getgid() int { return os.Getgid() }

View File

@@ -331,9 +331,10 @@ func (k *kstub) new(f func(k syscallDispatcher, msg message.Msg)) {
k.New(func(k syscallDispatcher) { f(k, k.(*kstub)) })
}
func (k *kstub) getpid() int { k.Helper(); return k.Expects("getpid").Ret.(int) }
func (k *kstub) getuid() int { k.Helper(); return k.Expects("getuid").Ret.(int) }
func (k *kstub) getgid() int { k.Helper(); return k.Expects("getgid").Ret.(int) }
func (k *kstub) getppid() int { k.Helper(); return k.Expects("getppid").Ret.(int) }
func (k *kstub) getpid() int { k.Helper(); return k.Expects("getpid").Ret.(int) }
func (k *kstub) getuid() int { k.Helper(); return k.Expects("getuid").Ret.(int) }
func (k *kstub) getgid() int { k.Helper(); return k.Expects("getgid").Ret.(int) }
func (k *kstub) lookupEnv(key string) (string, bool) {
k.Helper()
expect := k.Expects("lookupEnv")

View File

@@ -6,7 +6,6 @@ import (
"fmt"
"os"
"os/user"
"sync/atomic"
"hakurei.app/hst"
"hakurei.app/message"
@@ -26,12 +25,9 @@ type outcome struct {
sys *system.I
// Transmitted to shim. Populated during finalise.
state *outcomeState
// Kept for saving to [state].
// Retained for registering current instance.
config *hst.Config
// Whether the current process is in outcome.main.
active atomic.Bool
ctx context.Context
syscallDispatcher
}

View File

@@ -700,9 +700,10 @@ type stubNixOS struct {
panicDispatcher
}
func (k *stubNixOS) getpid() int { return 0xdeadbeef }
func (k *stubNixOS) getuid() int { return 1971 }
func (k *stubNixOS) getgid() int { return 100 }
func (k *stubNixOS) getppid() int { return 0xbad }
func (k *stubNixOS) getpid() int { return 0xdeadbeef }
func (k *stubNixOS) getuid() int { return 1971 }
func (k *stubNixOS) getgid() int { return 100 }
func (k *stubNixOS) lookupEnv(key string) (string, bool) {
switch key {

View File

@@ -4,6 +4,8 @@ import (
"context"
"encoding/gob"
"errors"
"iter"
"math"
"os"
"os/exec"
"strconv"
@@ -12,6 +14,7 @@ import (
"time"
"hakurei.app/container"
"hakurei.app/container/check"
"hakurei.app/container/fhs"
"hakurei.app/hst"
"hakurei.app/internal"
@@ -20,192 +23,15 @@ import (
"hakurei.app/system"
)
// Duration to wait for shim to exit on top of container WaitDelay.
const shimWaitTimeout = 5 * time.Second
// mainState holds persistent state bound to outcome.main.
type mainState struct {
// done is whether beforeExit has been called already.
done bool
// Time is the exact point in time where the process was created.
// Location must be set to UTC.
//
// Time is nil if no process was ever created.
Time *time.Time
store store.Compat
cancel context.CancelFunc
cmd *exec.Cmd
cmdWait chan error
k *outcome
message.Msg
uintptr
}
const (
// mainNeedsRevert indicates the call to Commit has succeeded.
mainNeedsRevert uintptr = 1 << iota
// mainNeedsDestroy indicates the instance state entry is present in the store.
mainNeedsDestroy
// Duration to wait for shim to exit on top of container WaitDelay.
shimWaitTimeout = 5 * time.Second
// Timeout for writing outcomeState to the shim setup pipe.
shimSetupTimeout = 5 * time.Second
)
// beforeExit must be called immediately before a call to [os.Exit].
func (ms mainState) beforeExit(isFault bool) {
if ms.done {
panic("attempting to call beforeExit twice")
}
ms.done = true
defer ms.BeforeExit()
if isFault && ms.cancel != nil {
ms.cancel()
}
var hasErr bool
// updates hasErr but does not terminate
perror := func(err error, message string) {
hasErr = true
printMessageError(ms.GetLogger().Println, "cannot "+message+":", err)
}
exitCode := 1
defer func() {
if hasErr {
os.Exit(exitCode)
}
}()
// this also handles wait for a non-fault termination
if ms.cmd != nil && ms.cmdWait != nil {
waitDone := make(chan struct{})
// this ties waitDone to ctx with the additional compensated timeout duration
go func() { <-ms.k.ctx.Done(); time.Sleep(ms.k.state.Shim.WaitDelay + shimWaitTimeout); close(waitDone) }()
select {
case err := <-ms.cmdWait:
wstatus, ok := ms.cmd.ProcessState.Sys().(syscall.WaitStatus)
if ok {
if v := wstatus.ExitStatus(); v != 0 {
hasErr = true
exitCode = v
}
}
if ms.IsVerbose() {
if !ok {
if err != nil {
ms.Verbosef("wait: %v", err)
}
} else {
switch {
case wstatus.Exited():
ms.Verbosef("process %d exited with code %d", ms.cmd.Process.Pid, wstatus.ExitStatus())
case wstatus.CoreDump():
ms.Verbosef("process %d dumped core", ms.cmd.Process.Pid)
case wstatus.Signaled():
ms.Verbosef("process %d got %s", ms.cmd.Process.Pid, wstatus.Signal())
default:
ms.Verbosef("process %d exited with status %#x", ms.cmd.Process.Pid, wstatus)
}
}
}
case <-waitDone:
ms.Resume()
// this is only reachable when shim did not exit within shimWaitTimeout, after its WaitDelay has elapsed.
// This is different from the container failing to terminate within its timeout period, as that is enforced
// by the shim. This path is instead reached when there is a lockup in shim preventing it from completing.
ms.GetLogger().Printf("process %d did not terminate", ms.cmd.Process.Pid)
}
ms.Resume()
}
if ms.uintptr&mainNeedsRevert != 0 {
if ok, err := ms.store.Do(ms.k.state.identity.unwrap(), func(c store.Cursor) {
if ms.uintptr&mainNeedsDestroy != 0 {
if err := c.Destroy(ms.k.state.id.unwrap()); err != nil {
perror(err, "destroy state entry")
}
}
var rt hst.Enablement
if states, err := c.Load(); err != nil {
// it is impossible to continue from this point;
// revert per-process state here to limit damage
ec := system.Process
if revertErr := ms.k.sys.Revert((*system.Criteria)(&ec)); revertErr != nil {
var joinError interface {
Unwrap() []error
error
}
if !errors.As(revertErr, &joinError) || joinError == nil {
perror(revertErr, "revert system setup")
} else {
for _, v := range joinError.Unwrap() {
perror(v, "revert system setup step")
}
}
}
perror(err, "load instance states")
} else {
ec := system.Process
if l := len(states); l == 0 {
ec |= system.User
} else {
ms.Verbosef("found %d instances, cleaning up without user-scoped operations", l)
}
// accumulate enablements of remaining launchers
for i, s := range states {
if s.Config != nil {
rt |= s.Config.Enablements.Unwrap()
} else {
ms.GetLogger().Printf("state entry %d does not contain config", i)
}
}
ec |= rt ^ (hst.EWayland | hst.EX11 | hst.EDBus | hst.EPulse)
if ms.IsVerbose() {
if ec > 0 {
ms.Verbose("reverting operations scope", system.TypeString(ec))
}
}
if err = ms.k.sys.Revert((*system.Criteria)(&ec)); err != nil {
perror(err, "revert system setup")
}
}
}); err != nil {
if ok {
perror(err, "unlock state store")
} else {
perror(err, "open state store")
}
}
} else if ms.uintptr&mainNeedsDestroy != 0 {
panic("unreachable")
}
}
// fatal calls printMessageError, performs necessary cleanup, followed by a call to [os.Exit](1).
func (ms mainState) fatal(fallback string, ferr error) {
printMessageError(ms.GetLogger().Println, fallback, ferr)
ms.beforeExit(true)
os.Exit(1)
}
// main carries out outcome and terminates. main does not return.
func (k *outcome) main(msg message.Msg) {
if !k.active.CompareAndSwap(false, true) {
panic("outcome: attempted to run twice")
}
if k.ctx == nil || k.sys == nil || k.state == nil {
panic("outcome: did not finalise")
}
@@ -213,31 +39,303 @@ func (k *outcome) main(msg message.Msg) {
// read comp value early for early failure
hsuPath := internal.MustHsuPath()
// ms.beforeExit required beyond this point
ms := &mainState{Msg: msg, k: k}
if err := k.sys.Commit(); err != nil {
ms.fatal("cannot commit system setup:", err)
}
ms.uintptr |= mainNeedsRevert
ms.store = store.NewMulti(msg, k.state.sc.RunDirPath)
const (
// transitions to processCommit, or processFinal on failure
processStart = iota
// transitions to processServe, or processLifecycle on failure
processCommit
// transitions to processLifecycle only
processServe
// transitions to processCleanup only
processLifecycle
// transitions to processFinal only
processCleanup
// execution terminates, must be the final state
processFinal
)
// for the shim process
ctx, cancel := context.WithCancel(k.ctx)
defer cancel()
ms.cancel = cancel
ms.cmd = exec.CommandContext(ctx, hsuPath.String())
ms.cmd.Stdin, ms.cmd.Stdout, ms.cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
ms.cmd.Dir = fhs.Root // container init enters final working directory
var (
// state for next iteration
processState uintptr = processStart
// current state, must not be mutated directly
processStateCur uintptr = math.MaxUint
// point in time the current iteration began
processTime time.Time
// whether sys is currently in between a call to Commit and Revert
isBeforeRevert bool
// initialised during processStart if successful
handle *store.Handle
// initialised during processServe if state is saved
entryHandle *store.EntryHandle
// can be set in any state, used in processFinal
exitCode int
// shim process startup time,
// populated in processStart, accessed by processServe
startTime time.Time
// shim process as target uid,
// populated in processStart, accessed by processServe
shimCmd *exec.Cmd
// write end of shim setup pipe,
// populated in processStart, accessed by processServe
shimPipe *os.File
// perror cancels ctx and prints an error message
perror = func(err error, message string) {
cancel()
if shimPipe != nil {
if closeErr := shimPipe.Close(); closeErr != nil {
msg.Verbose(closeErr.Error())
}
shimPipe = nil
}
if exitCode == 0 {
exitCode = 1
}
printMessageError(msg.GetLogger().Println, "cannot "+message+":", err)
}
// perrorFatal cancels ctx, prints an error message, and sets the next state
perrorFatal = func(err error, message string, newState uintptr) {
perror(err, message)
processState = newState
}
)
for {
var processTimePrev time.Time
processTimePrev, processTime = processTime, time.Now()
var processStatePrev uintptr
processStatePrev, processStateCur = processStateCur, processState
if !processTimePrev.IsZero() && processStatePrev != processLifecycle {
msg.Verbosef("state %d took %d ms", processStatePrev, processTime.Sub(processTimePrev).Milliseconds())
}
switch processState {
case processStart:
if h, err := store.New(k.state.sc.RunDirPath.Append("state")).Handle(k.state.identity.unwrap()); err != nil {
perrorFatal(err, "obtain store segment handle", processFinal)
continue
} else {
handle = h
}
cmd, f, err := k.start(ctx, msg, hsuPath, &startTime)
if err != nil {
perrorFatal(err, "start shim", processFinal)
continue
} else {
shimCmd, shimPipe = cmd, f
}
processState = processCommit
case processCommit:
if isBeforeRevert {
perrorFatal(newWithMessage("invalid transition to commit state"), "commit", processLifecycle)
continue
}
unlock, err := handle.Lock()
if err != nil {
perrorFatal(err, "acquire lock on store segment", processLifecycle)
continue
}
if entryHandle, err = handle.Save(&hst.State{
ID: k.state.id.unwrap(),
PID: os.Getpid(),
ShimPID: shimCmd.Process.Pid,
Config: k.config,
Time: startTime,
}); err != nil {
unlock()
// transition here to avoid the commit/revert cycle on the doomed instance
perrorFatal(err, "save instance state", processLifecycle)
continue
}
err = k.sys.Commit()
unlock()
if err != nil {
perrorFatal(err, "commit system setup", processLifecycle)
continue
}
isBeforeRevert = true
processState = processServe
case processServe:
// this state transition to processLifecycle only
processState = processLifecycle
// this starts the container, system setup must complete before this point
if err := serveShim(msg, shimPipe, k.state); err != nil {
perror(err, "serve shim payload")
continue
} else {
shimPipe = nil // this is already closed by serveShim
}
case processLifecycle:
// this state transition to processCleanup only
processState = processCleanup
msg.Suspend()
select {
case err := <-func() chan error { w := make(chan error, 1); go func() { w <- shimCmd.Wait(); cancel() }(); return w }():
wstatus, ok := shimCmd.ProcessState.Sys().(syscall.WaitStatus)
if ok {
if v := wstatus.ExitStatus(); v != 0 {
exitCode = v
}
}
if msg.IsVerbose() {
if !ok {
if err != nil {
msg.Verbosef("wait: %v", err)
}
} else {
switch {
case wstatus.Exited():
msg.Verbosef("process %d exited with code %d", shimCmd.Process.Pid, wstatus.ExitStatus())
case wstatus.CoreDump():
msg.Verbosef("process %d dumped core", shimCmd.Process.Pid)
case wstatus.Signaled():
msg.Verbosef("process %d got %s", shimCmd.Process.Pid, wstatus.Signal())
default:
msg.Verbosef("process %d exited with status %#x", shimCmd.Process.Pid, wstatus)
}
}
}
case <-func() chan struct{} {
w := make(chan struct{})
// this ties processLifecycle to ctx with the additional compensated timeout duration
// to allow transition to the next state on a locked up shim
go func() { <-ctx.Done(); time.Sleep(k.state.Shim.WaitDelay + shimWaitTimeout); close(w) }()
return w
}():
// this is only reachable when wait did not return within shimWaitTimeout, after its WaitDelay has elapsed.
// This is different from the container failing to terminate within its timeout period, as that is enforced
// by the shim. This path is instead reached when there is a lockup in shim preventing it from completing.
msg.GetLogger().Printf("process %d did not terminate", shimCmd.Process.Pid)
}
msg.Resume()
case processCleanup:
// this state transition to processFinal only
processState = processFinal
unlock, err := handle.Lock()
if err != nil {
perror(err, "acquire lock on store segment")
}
if entryHandle != nil {
if err = entryHandle.Destroy(); err != nil {
perror(err, "destroy state entry")
}
}
if isBeforeRevert {
ec := system.Process
var entries iter.Seq[*store.EntryHandle]
if entries, _, err = handle.Entries(); err != nil {
// it is impossible to continue from this point,
// per-process state will be reverted to limit damage
perror(err, "read store segment entries")
} else {
// accumulate enablements of remaining instances
var (
// alive enablement bits
rt hst.Enablement
// alive instance count
n int
)
for eh := range entries {
var et hst.Enablement
if et, err = eh.Load(nil); err != nil {
perror(err, "read state header of instance "+eh.ID.String())
} else {
rt |= et
n++
}
}
if n == 0 {
ec |= system.User
} else {
msg.Verbosef("found %d instances, cleaning up without user-scoped operations", n)
}
ec |= rt ^ (hst.EWayland | hst.EX11 | hst.EDBus | hst.EPulse)
if msg.IsVerbose() {
if ec > 0 {
msg.Verbose("reverting operations scope", system.TypeString(ec))
}
}
}
if err = k.sys.Revert((*system.Criteria)(&ec)); err != nil {
var joinError interface {
Unwrap() []error
error
}
if !errors.As(err, &joinError) || joinError == nil {
perror(err, "revert system setup")
} else {
for _, v := range joinError.Unwrap() {
perror(v, "revert system setup step")
}
}
}
isBeforeRevert = false
}
unlock()
case processFinal:
msg.BeforeExit()
os.Exit(exitCode)
default: // not reached
k.fatalf("invalid transition from state %d to %d", processStatePrev, processState)
panic("unreachable")
}
}
}
// start starts the shim via cmd/hsu.
//
// If successful, a [time.Time] value for [hst.State] is stored in the value pointed to by startTime.
// The resulting [exec.Cmd] and write end of the shim setup pipe is returned.
func (k *outcome) start(ctx context.Context, msg message.Msg,
hsuPath *check.Absolute,
startTime *time.Time,
) (*exec.Cmd, *os.File, error) {
cmd := exec.CommandContext(ctx, hsuPath.String())
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
cmd.Dir = fhs.Root // container init enters final working directory
// shim runs in the same session as monitor; see shim.go for behaviour
ms.cmd.Cancel = func() error { return ms.cmd.Process.Signal(syscall.SIGCONT) }
cmd.Cancel = func() error { return cmd.Process.Signal(syscall.SIGCONT) }
var e *gob.Encoder
if fd, encoder, err := container.Setup(&ms.cmd.ExtraFiles); err != nil {
ms.fatal("cannot create shim setup pipe:", err)
var shimPipe *os.File
if fd, w, err := container.Setup(&cmd.ExtraFiles); err != nil {
return cmd, nil, &hst.AppError{Step: "create shim setup pipe", Err: err}
} else {
e = encoder
ms.cmd.Env = []string{
shimPipe = w
cmd.Env = []string{
// passed through to shim by hsu
shimEnv + "=" + strconv.Itoa(fd),
// interpreted by hsu
@@ -248,63 +346,34 @@ func (k *outcome) main(msg message.Msg) {
if len(k.supp) > 0 {
msg.Verbosef("attaching supplementary group ids %s", k.supp)
// interpreted by hsu
ms.cmd.Env = append(ms.cmd.Env, "HAKUREI_GROUPS="+strings.Join(k.supp, " "))
cmd.Env = append(cmd.Env, "HAKUREI_GROUPS="+strings.Join(k.supp, " "))
}
msg.Verbosef("setuid helper at %s", hsuPath)
msg.Suspend()
if err := ms.cmd.Start(); err != nil {
ms.fatal("cannot start setuid wrapper:", err)
}
startTime := time.Now().UTC()
ms.cmdWait = make(chan error, 1)
// this ties context back to the life of the process
go func() { ms.cmdWait <- ms.cmd.Wait(); cancel() }()
ms.Time = &startTime
// unfortunately the I/O here cannot be directly canceled;
// the cancellation path leads to fatal in this case so that is fine
select {
case err := <-func() (setupErr chan error) {
setupErr = make(chan error, 1)
go func() { setupErr <- e.Encode(k.state) }()
return
}():
if err != nil {
msg.Resume()
ms.fatal("cannot transmit shim config:", err)
}
case <-ctx.Done():
if err := cmd.Start(); err != nil {
msg.Resume()
ms.fatal("shim context canceled:", newWithMessageError("shim setup canceled", ctx.Err()))
return cmd, shimPipe, &hst.AppError{Step: "start setuid wrapper", Err: err}
}
// shim accepted setup payload, create process state
if ok, err := ms.store.Do(k.state.identity.unwrap(), func(c store.Cursor) {
if err := c.Save(&hst.State{
ID: k.state.id.unwrap(),
PID: os.Getpid(),
ShimPID: ms.cmd.Process.Pid,
Config: k.config,
Time: *ms.Time,
}); err != nil {
ms.fatal("cannot save state entry:", err)
}
}); err != nil {
if ok {
ms.uintptr |= mainNeedsDestroy
ms.fatal("cannot unlock state store:", err)
} else {
ms.fatal("cannot open state store:", err)
}
}
// state in store at this point, destroy defunct state entry on termination
ms.uintptr |= mainNeedsDestroy
*startTime = time.Now().UTC()
return cmd, shimPipe, nil
}
// beforeExit ties shim process to context
ms.beforeExit(false)
os.Exit(0)
// serveShim serves outcomeState through the shim setup pipe.
func serveShim(msg message.Msg, shimPipe *os.File, state *outcomeState) error {
if shimPipe == nil {
return newWithMessage("shim pipe not available")
}
if err := shimPipe.SetDeadline(time.Now().Add(shimSetupTimeout)); err != nil {
msg.Verbose(err.Error())
}
if err := gob.NewEncoder(shimPipe).Encode(state); err != nil {
msg.Resume()
return &hst.AppError{Step: "transmit shim config", Err: err}
}
_ = shimPipe.Close()
return nil
}
// printMessageError prints the error message according to [message.GetMessage],

View File

@@ -96,11 +96,37 @@ func shimEntrypoint(k syscallDispatcher) {
k.fatalf("cannot set SUID_DUMP_DISABLE: %v", err)
}
// the Go runtime does not expose siginfo_t so SIGCONT is handled in C to check si_pid
ppid := k.getppid()
var signalPipe io.ReadCloser
if r, wKeepAlive, err := k.setupContSignal(ppid); err != nil {
switch {
case errors.As(err, new(*os.SyscallError)): // returned by os.Pipe
k.fatal(err.Error())
return
case errors.As(err, new(syscall.Errno)): // returned by hakurei_shim_setup_cont_signal
k.fatalf("cannot install SIGCONT handler: %v", err)
return
default: // unreachable
k.fatalf("cannot set up exit request: %v", err)
return
}
} else {
defer wKeepAlive()
signalPipe = r
}
var (
state outcomeState
closeSetup func() error
)
if f, err := k.receive(shimEnv, &state, nil); err != nil {
if errors.Is(err, io.EOF) {
// fallback exit request: signal handler not yet installed
k.exit(hst.ExitRequest)
}
if errors.Is(err, syscall.EBADF) {
k.fatal("invalid config descriptor")
}
@@ -119,25 +145,8 @@ func shimEntrypoint(k syscallDispatcher) {
}
}
// the Go runtime does not expose siginfo_t so SIGCONT is handled in C to check si_pid
var signalPipe io.ReadCloser
if r, wKeepAlive, err := k.setupContSignal(state.Shim.PrivPID); err != nil {
switch {
case errors.As(err, new(*os.SyscallError)): // returned by os.Pipe
k.fatal(err.Error())
return
case errors.As(err, new(syscall.Errno)): // returned by hakurei_shim_setup_cont_signal
k.fatalf("cannot install SIGCONT handler: %v", err)
return
default: // unreachable
k.fatalf("cannot set up exit request: %v", err)
return
}
} else {
defer wKeepAlive()
signalPipe = r
if state.Shim.PrivPID != ppid {
k.fatalf("unexpectedly reparented from %d to %d", state.Shim.PrivPID, ppid)
}
// pdeath_signal delivery is checked as if the dying process called kill(2), see kernel/exit.c

View File

@@ -142,30 +142,47 @@ func TestShimEntrypoint(t *testing.T) {
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", outcomeState{}, nil}, nil, syscall.EBADF),
call("fatal", stub.ExpectArgs{[]any{"invalid config descriptor"}}, nil, nil),
// deferred
call("wKeepAlive", stub.ExpectArgs{}, nil, nil),
}}, nil},
{"receive env", func(k *kstub) error { shimEntrypoint(k); return nil }, stub.Expect{Calls: []stub.Call{
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", outcomeState{}, nil}, nil, container.ErrReceiveEnv),
call("fatal", stub.ExpectArgs{[]any{"HAKUREI_SHIM not set"}}, nil, nil),
// deferred
call("wKeepAlive", stub.ExpectArgs{}, nil, nil),
}}, nil},
{"receive strange", func(k *kstub) error { shimEntrypoint(k); return nil }, stub.Expect{Calls: []stub.Call{
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", outcomeState{}, nil}, nil, stub.UniqueError(10)),
call("fatalf", stub.ExpectArgs{"cannot receive shim setup params: %v", []any{stub.UniqueError(10)}}, nil, nil),
// deferred
call("wKeepAlive", stub.ExpectArgs{}, nil, nil),
}}, nil},
{"invalid state", func(k *kstub) error { shimEntrypoint(k); return nil }, stub.Expect{Calls: []stub.Call{
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", func() outcomeState {
state := templateState
state.Shim = newShimParams()
@@ -174,15 +191,16 @@ func TestShimEntrypoint(t *testing.T) {
}(), nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("fatal", stub.ExpectArgs{[]any{"impossible outcome state reached\n"}}, nil, nil),
// deferred
call("wKeepAlive", stub.ExpectArgs{}, nil, nil),
}}, nil},
{"sigaction pipe", func(k *kstub) error { shimEntrypoint(k); return nil }, stub.Expect{Calls: []stub.Call{
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", templateState, nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{m("/tmp/hakurei.10"), m("/run/user/1000/hakurei")}}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, &os.SyscallError{Syscall: "pipe2", Err: stub.UniqueError(9)}),
call("fatal", stub.ExpectArgs{[]any{"pipe2: unique error 9 injected by the test suite"}}, nil, nil),
}}, nil},
@@ -191,9 +209,7 @@ func TestShimEntrypoint(t *testing.T) {
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", templateState, nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{m("/tmp/hakurei.10"), m("/run/user/1000/hakurei")}}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, syscall.ENOTRECOVERABLE),
call("fatalf", stub.ExpectArgs{"cannot install SIGCONT handler: %v", []any{syscall.ENOTRECOVERABLE}}, nil, nil),
}}, nil},
@@ -202,9 +218,7 @@ func TestShimEntrypoint(t *testing.T) {
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", templateState, nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{m("/tmp/hakurei.10"), m("/run/user/1000/hakurei")}}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, stub.UniqueError(8)),
call("fatalf", stub.ExpectArgs{"cannot set up exit request: %v", []any{stub.UniqueError(8)}}, nil, nil),
}}, nil},
@@ -213,10 +227,11 @@ func TestShimEntrypoint(t *testing.T) {
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", templateState, nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{m("/tmp/hakurei.10"), m("/run/user/1000/hakurei")}}, nil, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("prctl", stub.ExpectArgs{uintptr(syscall.PR_SET_PDEATHSIG), uintptr(syscall.SIGCONT), uintptr(0)}, nil, stub.UniqueError(7)),
call("fatalf", stub.ExpectArgs{"cannot set parent-death signal: %v", []any{stub.UniqueError(7)}}, nil, nil),
@@ -228,6 +243,8 @@ func TestShimEntrypoint(t *testing.T) {
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", func() outcomeState {
state := templateState
state.Shim = newShimParams()
@@ -236,7 +253,6 @@ func TestShimEntrypoint(t *testing.T) {
}(), nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{m("/tmp/hakurei.10"), m("/run/user/1000/hakurei")}}, nil, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("prctl", stub.ExpectArgs{uintptr(syscall.PR_SET_PDEATHSIG), uintptr(syscall.SIGCONT), uintptr(0)}, nil, nil),
call("fatal", stub.ExpectArgs{[]any{"cannot create container state: unique error 6 injected by the test suite\n"}}, nil, nil),
@@ -248,6 +264,8 @@ func TestShimEntrypoint(t *testing.T) {
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", func() outcomeState {
state := templateState
state.Shim = newShimParams()
@@ -256,7 +274,6 @@ func TestShimEntrypoint(t *testing.T) {
}(), nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{m("/tmp/hakurei.10"), m("/run/user/1000/hakurei")}}, nil, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("prctl", stub.ExpectArgs{uintptr(syscall.PR_SET_PDEATHSIG), uintptr(syscall.SIGCONT), uintptr(0)}, nil, nil),
call("fatal", stub.ExpectArgs{[]any{"invalid container state"}}, nil, nil),
@@ -268,10 +285,11 @@ func TestShimEntrypoint(t *testing.T) {
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", templateState, nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{m("/tmp/hakurei.10"), m("/run/user/1000/hakurei")}}, nil, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("prctl", stub.ExpectArgs{uintptr(syscall.PR_SET_PDEATHSIG), uintptr(syscall.SIGCONT), uintptr(0)}, nil, nil),
call("New", stub.ExpectArgs{}, nil, nil),
call("closeReceive", stub.ExpectArgs{}, nil, nil),
@@ -291,10 +309,11 @@ func TestShimEntrypoint(t *testing.T) {
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", templateState, nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{m("/tmp/hakurei.10"), m("/run/user/1000/hakurei")}}, nil, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("prctl", stub.ExpectArgs{uintptr(syscall.PR_SET_PDEATHSIG), uintptr(syscall.SIGCONT), uintptr(0)}, nil, nil),
call("New", stub.ExpectArgs{}, nil, nil),
call("closeReceive", stub.ExpectArgs{}, nil, nil),
@@ -314,10 +333,11 @@ func TestShimEntrypoint(t *testing.T) {
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", templateState, nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{m("/tmp/hakurei.10"), m("/run/user/1000/hakurei")}}, nil, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("prctl", stub.ExpectArgs{uintptr(syscall.PR_SET_PDEATHSIG), uintptr(syscall.SIGCONT), uintptr(0)}, nil, nil),
call("New", stub.ExpectArgs{}, nil, nil),
call("closeReceive", stub.ExpectArgs{}, nil, nil),
@@ -336,10 +356,11 @@ func TestShimEntrypoint(t *testing.T) {
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", templateState, nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{m("/tmp/hakurei.10"), m("/run/user/1000/hakurei")}}, nil, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("prctl", stub.ExpectArgs{uintptr(syscall.PR_SET_PDEATHSIG), uintptr(syscall.SIGCONT), uintptr(0)}, nil, nil),
call("New", stub.ExpectArgs{}, nil, nil),
call("closeReceive", stub.ExpectArgs{}, nil, nil),
@@ -359,10 +380,11 @@ func TestShimEntrypoint(t *testing.T) {
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", templateState, nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{m("/tmp/hakurei.10"), m("/run/user/1000/hakurei")}}, nil, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("prctl", stub.ExpectArgs{uintptr(syscall.PR_SET_PDEATHSIG), uintptr(syscall.SIGCONT), uintptr(0)}, nil, nil),
call("New", stub.ExpectArgs{}, nil, nil),
call("closeReceive", stub.ExpectArgs{}, nil, stub.UniqueError(1)),
@@ -385,10 +407,11 @@ func TestShimEntrypoint(t *testing.T) {
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", templateState, nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{m("/tmp/hakurei.10"), m("/run/user/1000/hakurei")}}, nil, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("prctl", stub.ExpectArgs{uintptr(syscall.PR_SET_PDEATHSIG), uintptr(syscall.SIGCONT), uintptr(0)}, nil, nil),
call("New", stub.ExpectArgs{}, nil, nil),
call("closeReceive", stub.ExpectArgs{}, nil, nil),
@@ -411,10 +434,11 @@ func TestShimEntrypoint(t *testing.T) {
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", templateState, nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{m("/tmp/hakurei.10"), m("/run/user/1000/hakurei")}}, nil, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("prctl", stub.ExpectArgs{uintptr(syscall.PR_SET_PDEATHSIG), uintptr(syscall.SIGCONT), uintptr(0)}, nil, nil),
call("New", stub.ExpectArgs{}, nil, nil),
call("closeReceive", stub.ExpectArgs{}, nil, nil),
@@ -436,10 +460,11 @@ func TestShimEntrypoint(t *testing.T) {
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", templateState, nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{m("/tmp/hakurei.10"), m("/run/user/1000/hakurei")}}, nil, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("prctl", stub.ExpectArgs{uintptr(syscall.PR_SET_PDEATHSIG), uintptr(syscall.SIGCONT), uintptr(0)}, nil, nil),
call("New", stub.ExpectArgs{}, nil, nil),
call("closeReceive", stub.ExpectArgs{}, nil, nil),
@@ -462,10 +487,11 @@ func TestShimEntrypoint(t *testing.T) {
call("getMsg", stub.ExpectArgs{}, nil, nil),
call("getLogger", stub.ExpectArgs{}, (*log.Logger)(nil), nil),
call("setDumpable", stub.ExpectArgs{uintptr(container.SUID_DUMP_DISABLE)}, nil, nil),
call("getppid", stub.ExpectArgs{}, 0xbad, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("receive", stub.ExpectArgs{"HAKUREI_SHIM", templateState, nil}, nil, nil),
call("swapVerbose", stub.ExpectArgs{true}, false, nil),
call("verbosef", stub.ExpectArgs{"process share directory at %q, runtime directory at %q", []any{m("/tmp/hakurei.10"), m("/run/user/1000/hakurei")}}, nil, nil),
call("setupContSignal", stub.ExpectArgs{0xbad}, 0, nil),
call("prctl", stub.ExpectArgs{uintptr(syscall.PR_SET_PDEATHSIG), uintptr(syscall.SIGCONT), uintptr(0)}, nil, nil),
call("New", stub.ExpectArgs{}, nil, nil),
call("closeReceive", stub.ExpectArgs{}, nil, nil),