Compare commits
303 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
9fb0b2452e
|
|||
|
a3e87dd0ef
|
|||
|
90a38c0708
|
|||
|
39cc8caa93
|
|||
|
c4f64f7606
|
|||
|
a9e2a5e59f
|
|||
|
9fb0722cdf
|
|||
|
2f3e323c46
|
|||
|
1fc9c3200f
|
|||
|
096a25ad3a
|
|||
|
ffd2f979fb
|
|||
|
31a8cc9b5c
|
|||
|
bb3f60fc74
|
|||
|
697c91e04d
|
|||
|
3f7b8b4332
|
|||
|
fa94155f42
|
|||
|
233bd163fb
|
|||
|
f9b69c94bc
|
|||
|
68aefa6d59
|
|||
|
159fd55dbb
|
|||
|
ce6b3ff53b
|
|||
|
30afa0e2ab
|
|||
|
9b751de078
|
|||
|
d77ad3bb6e
|
|||
|
0142fc90b0
|
|||
|
3c9f7cfcd0
|
|||
|
a3526b3ceb
|
|||
|
6ad21e2288
|
|||
|
27e2e3f996
|
|||
|
e0c720681b
|
|||
|
f982b13a59
|
|||
|
443911ada1
|
|||
|
d7a3706db3
|
|||
|
3226dc44dc
|
|||
|
9f98d12ad8
|
|||
|
550e83dda9
|
|||
|
7877b4e627
|
|||
|
47ce6f5bd0
|
|||
|
48f4ccba33
|
|||
|
c31884bee4
|
|||
|
f8661ad479
|
|||
|
536f0cbae6
|
|||
|
8d872ff1cd
|
|||
|
bf14a412e4
|
|||
|
8b4576bc5f
|
|||
|
29ebc52e26
|
|||
|
5f81aac0e2
|
|||
|
47490823be
|
|||
|
1ac8ca7a80
|
|||
|
fd8b2fd522
|
|||
|
20a8519044
|
|||
|
8c4fd00c50
|
|||
|
bc3dd6fbb0
|
|||
|
616ed29edf
|
|||
|
9d9b7294a4
|
|||
|
6c1e2f10a7
|
|||
|
abf96d2283
|
|||
|
6c90e879da
|
|||
|
d1b404dc3a
|
|||
|
744e4e0632
|
|||
|
85eda49b2b
|
|||
|
b26bc05bb0
|
|||
|
2d63ea8fee
|
|||
|
dd4326418c
|
|||
|
79c0106ea0
|
|||
|
536db533de
|
|||
|
07927006a8
|
|||
|
77ea27b038
|
|||
|
e76bc6a13a
|
|||
|
cc403c96d8
|
|||
|
66118ba941
|
|||
|
823ba08dbc
|
|||
|
660835151e
|
|||
|
53e6df7e81
|
|||
|
bd80327a8f
|
|||
|
41f9aebbb7
|
|||
|
a2a0e36802
|
|||
|
fbe93fc771
|
|||
|
968d8dbaf1
|
|||
|
f1758a6fa8
|
|||
|
88aaa4497c
|
|||
|
b7ea68de35
|
|||
|
67e453f5c4
|
|||
|
67092c835a
|
|||
|
18918d9a0d
|
|||
|
380ca4e022
|
|||
|
887aef8514
|
|||
|
d61faa09eb
|
|||
|
50153788ef
|
|||
|
c84fe63217
|
|||
|
eb67e5e0a8
|
|||
|
948afe33e5
|
|||
|
76c657177d
|
|||
|
4356f978aa
|
|||
|
4f17dad645
|
|||
|
68b7d41c65
|
|||
|
e48f303e38
|
|||
|
f1fd406b82
|
|||
|
53b1de3395
|
|||
|
92dcadbf27
|
|||
|
0bd6a18326
|
|||
|
67d592c337
|
|||
|
fdc8a8419b
|
|||
|
122cfbf63a
|
|||
|
504f5d28fe
|
|||
|
3eadd5c580
|
|||
|
4d29333807
|
|||
|
e1533fa4c6
|
|||
|
9a74d5273d
|
|||
|
2abc8c454e
|
|||
|
fecb963e85
|
|||
|
cd9da57f20
|
|||
|
c6a95f5a6a
|
|||
|
228489371d
|
|||
|
490471d22b
|
|||
|
763d2572fe
|
|||
|
bb1b6beb87
|
|||
|
3224a7da63
|
|||
|
8a86cf74ee
|
|||
|
e34a59e332
|
|||
|
861801597d
|
|||
|
334578fdde
|
|||
|
20790af71e
|
|||
|
43b8a40fc0
|
|||
|
87c3059214
|
|||
|
6956dfc31a
|
|||
|
d9ebaf20f8
|
|||
|
acee0b3632
|
|||
|
5e55a796df
|
|||
|
f6eaf76ec9
|
|||
|
5c127a7035
|
|||
|
8a26521f5b
|
|||
|
0fd4556e38
|
|||
|
50b82dcf82
|
|||
|
20a8d30821
|
|||
|
cdf2e4a2fb
|
|||
|
dcb8a6ea06
|
|||
|
094a62ba9d
|
|||
|
6420b6e6e8
|
|||
|
d7d058fdc5
|
|||
|
84795b5d9f
|
|||
|
f84d30deed
|
|||
|
77821feb8b
|
|||
|
eb1060f395
|
|||
|
0e08254595
|
|||
|
349d8693bf
|
|||
|
e88ae87e50
|
|||
|
7cd4aa838c
|
|||
|
641942a4e3
|
|||
|
b6a66acfe4
|
|||
|
b72dc43bc3
|
|||
|
8e59ff98b5
|
|||
|
f06d7fd387
|
|||
|
ba75587132
|
|||
|
9a06ce2db0
|
|||
|
3ec15bcdf1
|
|||
|
d933234784
|
|||
|
1c49c75f95
|
|||
|
6a01a55d7e
|
|||
|
b14964a66d
|
|||
|
ff98c9ded9
|
|||
|
7f3d1d6375
|
|||
|
3a4f20b759
|
|||
|
21858ecfe4
|
|||
|
574a64aa85
|
|||
|
85d27229fd
|
|||
|
83fb80d710
|
|||
|
fe6dc62ebf
|
|||
|
823f9c76a7
|
|||
|
2df913999b
|
|||
|
52c959bd6a
|
|||
|
d258dea0bf
|
|||
|
dc96302111
|
|||
|
88e9a143d6
|
|||
|
8d06c0235b
|
|||
|
4155adc16a
|
|||
|
2a9525c77a
|
|||
|
efc90c3221
|
|||
|
610ee13ab3
|
|||
|
5936e6a4aa
|
|||
|
3499a82785
|
|||
|
088d35e4e6
|
|||
|
1667df9c43
|
|||
|
156dd767ef
|
|||
|
5fe166a4a7
|
|||
|
41a8d03dd2
|
|||
|
610572d0e6
|
|||
|
29951c5174
|
|||
|
91c3594dee
|
|||
|
7ccc2fc5ec
|
|||
|
63e137856e
|
|||
|
e1e46504a1
|
|||
|
ec9343ebd6
|
|||
|
423808ac76
|
|||
|
2494ede106
|
|||
|
da3848b92f
|
|||
|
34cb4ebd3b
|
|||
|
f712466714
|
|||
|
f2430b5f5e
|
|||
|
863e6f5db6
|
|||
|
23df2ab999
|
|||
|
7bd4d7d0e6
|
|||
|
b3c30bcc51
|
|||
|
38059db835
|
|||
|
409fd3149e
|
|||
|
4eea136308
|
|||
|
c86ff02d8d
|
|||
|
e8dda70c41
|
|||
|
7ea4e8b643
|
|||
|
5eefebcb48
|
|||
|
8e08e8f518
|
|||
|
54da6ce03d
|
|||
|
3a21ba1bca
|
|||
|
45301559bf
|
|||
|
0df87ab111
|
|||
|
aa0a949cef
|
|||
|
ce0064384d
|
|||
|
53d80f4b66
|
|||
|
156096ac98
|
|||
|
ceb75538cf
|
|||
|
0741a614ed
|
|||
|
e7e9b4caea
|
|||
|
f6d32e482a
|
|||
|
79adf217f4
|
|||
|
8efffd72f4
|
|||
|
86ad8b72aa
|
|||
|
e91049c3c5
|
|||
|
3d4d32932d
|
|||
|
0ab6c13c77
|
|||
|
834cb0d40b
|
|||
|
7548a627e5
|
|||
|
b98d27f773
|
|||
|
f3aa31e401
|
|||
|
4da26681b5
|
|||
|
4897b0259e
|
|||
|
d6e4f85864
|
|||
|
3eb927823f
|
|||
|
d76b9d04b8
|
|||
|
fa93476896
|
|||
|
bd0ef086b1
|
|||
|
05202cf994
|
|||
|
40081e7a06
|
|||
|
863d3dcf9f
|
|||
|
8ad9909065
|
|||
|
deda16da38
|
|||
|
55465c6e72
|
|||
|
ce249d23f1
|
|||
|
dd5d792d14
|
|||
|
d15d2ec2bd
|
|||
|
3078c41ce7
|
|||
|
e9de5d3aca
|
|||
|
993afde840
|
|||
|
c9cd16fd2a
|
|||
|
e42ea32dbe
|
|||
|
e7982b4ee9
|
|||
|
ef1ebf12d9
|
|||
|
775a9f57c9
|
|||
|
2f8ca83376
|
|||
|
3d720ada92
|
|||
|
2e5362e536
|
|||
|
6d3bd27220
|
|||
|
a27305cb4a
|
|||
|
0e476c5e5b
|
|||
|
54712e0426
|
|||
|
b77c1ecfdb
|
|||
|
dce5839a79
|
|||
|
d597592e1f
|
|||
|
056f5b12d4
|
|||
|
da2bb546ba
|
|||
|
7bfbd59810
|
|||
|
ea815a59e8
|
|||
|
28a8dc67d2
|
|||
|
ec49c63c5f
|
|||
|
5a50bf80ee
|
|||
|
ce06b7b663
|
|||
|
08bdc68f3a
|
|||
|
8cb0b433b2
|
|||
|
767f1844d2
|
|||
|
54610aaddc
|
|||
|
2e80660169
|
|||
|
d0a3c6a2f3
|
|||
|
0c0e3d6fc2
|
|||
|
fae910a1ad
|
|||
|
178c8bc28b
|
|||
|
30dcab0734
|
|||
|
0ea051062b
|
|||
|
b0f2ab6fff
|
|||
|
00a5bdf006
|
|||
|
a27dfdc058
|
|||
|
6d0d9cecd1
|
|||
|
17248d7d61
|
|||
|
41e5628c67
|
|||
|
ffbec828e1
|
|||
|
de0467a65e
|
|||
|
b5999b8814
|
|||
|
ebc67bb8ad
|
|||
|
e60ff660f6
|
|||
|
47db461546
|
|||
|
0a3fe5f907
|
|||
|
b72d502f1c
|
|||
|
f8b3db3f66
|
|||
|
0e2fb1788f
|
|||
|
d8417e2927
|
@@ -72,6 +72,23 @@ jobs:
|
|||||||
path: result/*
|
path: result/*
|
||||||
retention-days: 1
|
retention-days: 1
|
||||||
|
|
||||||
|
sharefs:
|
||||||
|
name: ShareFS
|
||||||
|
runs-on: nix
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Run NixOS test
|
||||||
|
run: nix build --out-link "result" --print-out-paths --print-build-logs .#checks.x86_64-linux.sharefs
|
||||||
|
|
||||||
|
- name: Upload test output
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: "sharefs-vm-output"
|
||||||
|
path: result/*
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
hpkg:
|
hpkg:
|
||||||
name: Hpkg
|
name: Hpkg
|
||||||
runs-on: nix
|
runs-on: nix
|
||||||
@@ -96,6 +113,7 @@ jobs:
|
|||||||
- race
|
- race
|
||||||
- sandbox
|
- sandbox
|
||||||
- sandbox-race
|
- sandbox-race
|
||||||
|
- sharefs
|
||||||
- hpkg
|
- hpkg
|
||||||
runs-on: nix
|
runs-on: nix
|
||||||
steps:
|
steps:
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -27,6 +27,7 @@ go.work.sum
|
|||||||
|
|
||||||
# go generate
|
# go generate
|
||||||
/cmd/hakurei/LICENSE
|
/cmd/hakurei/LICENSE
|
||||||
|
/internal/pkg/testdata/testtool
|
||||||
|
|
||||||
# release
|
# release
|
||||||
/dist/hakurei-*
|
/dist/hakurei-*
|
||||||
|
|||||||
@@ -14,7 +14,6 @@ import (
|
|||||||
_ "unsafe" // for go:linkname
|
_ "unsafe" // for go:linkname
|
||||||
|
|
||||||
"hakurei.app/command"
|
"hakurei.app/command"
|
||||||
"hakurei.app/container"
|
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/container/fhs"
|
"hakurei.app/container/fhs"
|
||||||
"hakurei.app/hst"
|
"hakurei.app/hst"
|
||||||
@@ -187,14 +186,6 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
}})
|
}})
|
||||||
}
|
}
|
||||||
|
|
||||||
// start pipewire-pulse: this most likely exists on host if PipeWire is available
|
|
||||||
if flagPulse {
|
|
||||||
config.Container.Filesystem = append(config.Container.Filesystem, hst.FilesystemConfigJSON{FilesystemConfig: &hst.FSDaemon{
|
|
||||||
Target: fhs.AbsRunUser.Append(strconv.Itoa(container.OverflowUid(msg)), "pulse/native"),
|
|
||||||
Exec: shell, Args: []string{"-lc", "exec pipewire-pulse"},
|
|
||||||
}})
|
|
||||||
}
|
|
||||||
|
|
||||||
config.Container.Filesystem = append(config.Container.Filesystem,
|
config.Container.Filesystem = append(config.Container.Filesystem,
|
||||||
// opportunistically bind kvm
|
// opportunistically bind kvm
|
||||||
hst.FilesystemConfigJSON{FilesystemConfig: &hst.FSBind{
|
hst.FilesystemConfigJSON{FilesystemConfig: &hst.FSBind{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
{
|
{
|
||||||
nixosTest,
|
testers,
|
||||||
callPackage,
|
callPackage,
|
||||||
|
|
||||||
system,
|
system,
|
||||||
@@ -8,7 +8,7 @@
|
|||||||
let
|
let
|
||||||
buildPackage = self.buildPackage.${system};
|
buildPackage = self.buildPackage.${system};
|
||||||
in
|
in
|
||||||
nixosTest {
|
testers.nixosTest {
|
||||||
name = "hpkg";
|
name = "hpkg";
|
||||||
nodes.machine = {
|
nodes.machine = {
|
||||||
environment.etc = {
|
environment.etc = {
|
||||||
|
|||||||
204
cmd/mbf/main.go
Normal file
204
cmd/mbf/main.go
Normal file
@@ -0,0 +1,204 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"syscall"
|
||||||
|
"unique"
|
||||||
|
|
||||||
|
"hakurei.app/command"
|
||||||
|
"hakurei.app/container"
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
"hakurei.app/internal/rosa"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
container.TryArgv0(nil)
|
||||||
|
|
||||||
|
log.SetFlags(0)
|
||||||
|
log.SetPrefix("mbf: ")
|
||||||
|
msg := message.New(log.Default())
|
||||||
|
|
||||||
|
if os.Geteuid() == 0 {
|
||||||
|
log.Fatal("this program must not run as root")
|
||||||
|
}
|
||||||
|
|
||||||
|
var cache *pkg.Cache
|
||||||
|
ctx, stop := signal.NotifyContext(context.Background(),
|
||||||
|
syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
|
||||||
|
defer stop()
|
||||||
|
defer func() {
|
||||||
|
if cache != nil {
|
||||||
|
cache.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
fmt.Println(r)
|
||||||
|
log.Fatal("consider scrubbing the on-disk cache")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
var (
|
||||||
|
flagQuiet bool
|
||||||
|
flagCures int
|
||||||
|
flagBase string
|
||||||
|
flagTShift int
|
||||||
|
)
|
||||||
|
c := command.New(os.Stderr, log.Printf, "mbf", func([]string) (err error) {
|
||||||
|
msg.SwapVerbose(!flagQuiet)
|
||||||
|
|
||||||
|
var base *check.Absolute
|
||||||
|
if flagBase, err = filepath.Abs(flagBase); err != nil {
|
||||||
|
return
|
||||||
|
} else if base, err = check.NewAbs(flagBase); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if cache, err = pkg.Open(ctx, msg, flagCures, base); err == nil {
|
||||||
|
if flagTShift < 0 {
|
||||||
|
cache.SetThreshold(0)
|
||||||
|
} else if flagTShift > 31 {
|
||||||
|
cache.SetThreshold(1 << 31)
|
||||||
|
} else {
|
||||||
|
cache.SetThreshold(1 << flagTShift)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}).Flag(
|
||||||
|
&flagQuiet,
|
||||||
|
"q", command.BoolFlag(false),
|
||||||
|
"Do not print cure messages",
|
||||||
|
).Flag(
|
||||||
|
&flagCures,
|
||||||
|
"cures", command.IntFlag(0),
|
||||||
|
"Maximum number of dependencies to cure at any given time",
|
||||||
|
).Flag(
|
||||||
|
&flagBase,
|
||||||
|
"d", command.StringFlag("cache"),
|
||||||
|
"Directory to store cured artifacts",
|
||||||
|
).Flag(
|
||||||
|
&flagTShift,
|
||||||
|
"tshift", command.IntFlag(-1),
|
||||||
|
"Dependency graph size exponent, to the power of 2",
|
||||||
|
)
|
||||||
|
|
||||||
|
{
|
||||||
|
var flagShifts int
|
||||||
|
c.NewCommand(
|
||||||
|
"scrub", "Examine the on-disk cache for errors",
|
||||||
|
func(args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
return errors.New("scrub expects no arguments")
|
||||||
|
}
|
||||||
|
if flagShifts < 0 || flagShifts > 31 {
|
||||||
|
flagShifts = 12
|
||||||
|
}
|
||||||
|
return cache.Scrub(runtime.NumCPU() << flagShifts)
|
||||||
|
},
|
||||||
|
).Flag(
|
||||||
|
&flagShifts,
|
||||||
|
"shift", command.IntFlag(12),
|
||||||
|
"Scrub parallelism size exponent, to the power of 2",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.NewCommand(
|
||||||
|
"stage3",
|
||||||
|
"Check for toolchain 3-stage non-determinism",
|
||||||
|
func(args []string) (err error) {
|
||||||
|
_, _, _, stage1 := (rosa.Std - 2).NewLLVM()
|
||||||
|
_, _, _, stage2 := (rosa.Std - 1).NewLLVM()
|
||||||
|
_, _, _, stage3 := rosa.Std.NewLLVM()
|
||||||
|
var (
|
||||||
|
pathname *check.Absolute
|
||||||
|
checksum [2]unique.Handle[pkg.Checksum]
|
||||||
|
)
|
||||||
|
|
||||||
|
if pathname, _, err = cache.Cure(stage1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Println("stage1:", pathname)
|
||||||
|
|
||||||
|
if pathname, checksum[0], err = cache.Cure(stage2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Println("stage2:", pathname)
|
||||||
|
if pathname, checksum[1], err = cache.Cure(stage3); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Println("stage3:", pathname)
|
||||||
|
|
||||||
|
if checksum[0] != checksum[1] {
|
||||||
|
err = &pkg.ChecksumMismatchError{
|
||||||
|
Got: checksum[0].Value(),
|
||||||
|
Want: checksum[1].Value(),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Println(
|
||||||
|
"stage2 is identical to stage3",
|
||||||
|
"("+pkg.Encode(checksum[0].Value())+")",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
{
|
||||||
|
var (
|
||||||
|
flagDump string
|
||||||
|
)
|
||||||
|
c.NewCommand(
|
||||||
|
"cure",
|
||||||
|
"Cure the named artifact and show its path",
|
||||||
|
func(args []string) error {
|
||||||
|
if len(args) != 1 {
|
||||||
|
return errors.New("cure requires 1 argument")
|
||||||
|
}
|
||||||
|
if p, ok := rosa.ResolveName(args[0]); !ok {
|
||||||
|
return fmt.Errorf("unsupported artifact %q", args[0])
|
||||||
|
} else if flagDump == "" {
|
||||||
|
pathname, _, err := cache.Cure(rosa.Std.Load(p))
|
||||||
|
if err == nil {
|
||||||
|
log.Println(pathname)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
f, err := os.OpenFile(
|
||||||
|
flagDump,
|
||||||
|
os.O_WRONLY|os.O_CREATE|os.O_EXCL,
|
||||||
|
0644,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = cache.EncodeAll(f, rosa.Std.Load(p)); err != nil {
|
||||||
|
_ = f.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.Close()
|
||||||
|
}
|
||||||
|
},
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagDump,
|
||||||
|
"dump", command.StringFlag(""),
|
||||||
|
"Write IR to specified pathname and terminate",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.MustParse(os.Args[1:], func(err error) {
|
||||||
|
if cache != nil {
|
||||||
|
cache.Close()
|
||||||
|
}
|
||||||
|
log.Fatal(err)
|
||||||
|
})
|
||||||
|
}
|
||||||
282
cmd/sharefs/fuse-operations.c
Normal file
282
cmd/sharefs/fuse-operations.c
Normal file
@@ -0,0 +1,282 @@
|
|||||||
|
#ifndef _GNU_SOURCE
|
||||||
|
#define _GNU_SOURCE /* O_DIRECT */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <dirent.h>
|
||||||
|
#include <errno.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
|
||||||
|
/* TODO(ophestra): remove after 05ce67fea99ca09cd4b6625cff7aec9cc222dd5a reaches a release */
|
||||||
|
#include <sys/syscall.h>
|
||||||
|
|
||||||
|
#include "fuse-operations.h"
|
||||||
|
|
||||||
|
/* MUST_TRANSLATE_PATHNAME translates a userspace pathname to a relative pathname;
|
||||||
|
* the resulting address points to a constant string or part of pathname, it is never heap allocated. */
|
||||||
|
#define MUST_TRANSLATE_PATHNAME(pathname) \
|
||||||
|
do { \
|
||||||
|
if (pathname == NULL) \
|
||||||
|
return -EINVAL; \
|
||||||
|
while (*pathname == '/') \
|
||||||
|
pathname++; \
|
||||||
|
if (*pathname == '\0') \
|
||||||
|
pathname = "."; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
/* GET_CONTEXT_PRIV obtains fuse context and private data for the calling thread. */
|
||||||
|
#define GET_CONTEXT_PRIV(ctx, priv) \
|
||||||
|
do { \
|
||||||
|
ctx = fuse_get_context(); \
|
||||||
|
priv = ctx->private_data; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
/* impl_getattr modifies a struct stat from the kernel to present to userspace;
|
||||||
|
* impl_getattr returns a negative errno style error code. */
|
||||||
|
static int impl_getattr(struct fuse_context *ctx, struct stat *statbuf) {
|
||||||
|
/* allowlist of permitted types */
|
||||||
|
if (!S_ISDIR(statbuf->st_mode) && !S_ISREG(statbuf->st_mode) && !S_ISLNK(statbuf->st_mode)) {
|
||||||
|
return -ENOTRECOVERABLE; /* returning an errno causes all operations on the file to return EIO */
|
||||||
|
}
|
||||||
|
|
||||||
|
#define OVERRIDE_PERM(v) (statbuf->st_mode & ~0777) | (v & 0777)
|
||||||
|
if (S_ISDIR(statbuf->st_mode))
|
||||||
|
statbuf->st_mode = OVERRIDE_PERM(SHAREFS_PERM_DIR);
|
||||||
|
else if (S_ISREG(statbuf->st_mode))
|
||||||
|
statbuf->st_mode = OVERRIDE_PERM(SHAREFS_PERM_REG);
|
||||||
|
else
|
||||||
|
statbuf->st_mode = 0; /* should always be symlink in this case */
|
||||||
|
|
||||||
|
statbuf->st_uid = ctx->uid;
|
||||||
|
statbuf->st_gid = SHAREFS_MEDIA_RW_ID;
|
||||||
|
statbuf->st_ctim = statbuf->st_mtim;
|
||||||
|
statbuf->st_nlink = 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* fuse_operations implementation */
|
||||||
|
|
||||||
|
int sharefs_getattr(const char *pathname, struct stat *statbuf, struct fuse_file_info *fi) {
|
||||||
|
struct fuse_context *ctx;
|
||||||
|
struct sharefs_private *priv;
|
||||||
|
GET_CONTEXT_PRIV(ctx, priv);
|
||||||
|
MUST_TRANSLATE_PATHNAME(pathname);
|
||||||
|
|
||||||
|
(void)fi;
|
||||||
|
|
||||||
|
if (fstatat(priv->dirfd, pathname, statbuf, AT_SYMLINK_NOFOLLOW) == -1)
|
||||||
|
return -errno;
|
||||||
|
return impl_getattr(ctx, statbuf);
|
||||||
|
}
|
||||||
|
|
||||||
|
int sharefs_readdir(const char *pathname, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *fi, enum fuse_readdir_flags flags) {
|
||||||
|
int fd;
|
||||||
|
DIR *dp;
|
||||||
|
struct stat st;
|
||||||
|
int ret = 0;
|
||||||
|
struct dirent *de;
|
||||||
|
|
||||||
|
struct fuse_context *ctx;
|
||||||
|
struct sharefs_private *priv;
|
||||||
|
GET_CONTEXT_PRIV(ctx, priv);
|
||||||
|
MUST_TRANSLATE_PATHNAME(pathname);
|
||||||
|
|
||||||
|
(void)offset;
|
||||||
|
(void)fi;
|
||||||
|
|
||||||
|
if ((fd = openat(priv->dirfd, pathname, O_RDONLY | O_DIRECTORY | O_CLOEXEC)) == -1)
|
||||||
|
return -errno;
|
||||||
|
if ((dp = fdopendir(fd)) == NULL) {
|
||||||
|
close(fd);
|
||||||
|
return -errno;
|
||||||
|
}
|
||||||
|
|
||||||
|
errno = 0; /* for the next readdir call */
|
||||||
|
while ((de = readdir(dp)) != NULL) {
|
||||||
|
if (flags & FUSE_READDIR_PLUS) {
|
||||||
|
if (fstatat(dirfd(dp), de->d_name, &st, AT_SYMLINK_NOFOLLOW) == -1) {
|
||||||
|
ret = -errno;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((ret = impl_getattr(ctx, &st)) < 0)
|
||||||
|
break;
|
||||||
|
|
||||||
|
errno = 0;
|
||||||
|
ret = filler(buf, de->d_name, &st, 0, FUSE_FILL_DIR_PLUS);
|
||||||
|
} else
|
||||||
|
ret = filler(buf, de->d_name, NULL, 0, 0);
|
||||||
|
|
||||||
|
if (ret != 0) {
|
||||||
|
ret = errno != 0 ? -errno : -EIO; /* filler */
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
errno = 0; /* for the next readdir call */
|
||||||
|
}
|
||||||
|
if (ret == 0 && errno != 0)
|
||||||
|
ret = -errno; /* readdir */
|
||||||
|
|
||||||
|
closedir(dp);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int sharefs_mkdir(const char *pathname, mode_t mode) {
|
||||||
|
struct fuse_context *ctx;
|
||||||
|
struct sharefs_private *priv;
|
||||||
|
GET_CONTEXT_PRIV(ctx, priv);
|
||||||
|
MUST_TRANSLATE_PATHNAME(pathname);
|
||||||
|
|
||||||
|
(void)mode;
|
||||||
|
|
||||||
|
if (mkdirat(priv->dirfd, pathname, SHAREFS_PERM_DIR) == -1)
|
||||||
|
return -errno;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int sharefs_unlink(const char *pathname) {
|
||||||
|
struct fuse_context *ctx;
|
||||||
|
struct sharefs_private *priv;
|
||||||
|
GET_CONTEXT_PRIV(ctx, priv);
|
||||||
|
MUST_TRANSLATE_PATHNAME(pathname);
|
||||||
|
|
||||||
|
if (unlinkat(priv->dirfd, pathname, 0) == -1)
|
||||||
|
return -errno;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int sharefs_rmdir(const char *pathname) {
|
||||||
|
struct fuse_context *ctx;
|
||||||
|
struct sharefs_private *priv;
|
||||||
|
GET_CONTEXT_PRIV(ctx, priv);
|
||||||
|
MUST_TRANSLATE_PATHNAME(pathname);
|
||||||
|
|
||||||
|
if (unlinkat(priv->dirfd, pathname, AT_REMOVEDIR) == -1)
|
||||||
|
return -errno;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int sharefs_rename(const char *oldpath, const char *newpath, unsigned int flags) {
|
||||||
|
struct fuse_context *ctx;
|
||||||
|
struct sharefs_private *priv;
|
||||||
|
GET_CONTEXT_PRIV(ctx, priv);
|
||||||
|
MUST_TRANSLATE_PATHNAME(oldpath);
|
||||||
|
MUST_TRANSLATE_PATHNAME(newpath);
|
||||||
|
|
||||||
|
/* TODO(ophestra): replace with wrapper after 05ce67fea99ca09cd4b6625cff7aec9cc222dd5a reaches a release */
|
||||||
|
if (syscall(__NR_renameat2, priv->dirfd, oldpath, priv->dirfd, newpath, flags) == -1)
|
||||||
|
return -errno;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int sharefs_truncate(const char *pathname, off_t length, struct fuse_file_info *fi) {
|
||||||
|
int fd;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
struct fuse_context *ctx;
|
||||||
|
struct sharefs_private *priv;
|
||||||
|
GET_CONTEXT_PRIV(ctx, priv);
|
||||||
|
MUST_TRANSLATE_PATHNAME(pathname);
|
||||||
|
|
||||||
|
(void)fi;
|
||||||
|
|
||||||
|
if ((fd = openat(priv->dirfd, pathname, O_WRONLY | O_CLOEXEC)) == -1)
|
||||||
|
return -errno;
|
||||||
|
if ((ret = ftruncate(fd, length)) == -1)
|
||||||
|
ret = -errno;
|
||||||
|
close(fd);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int sharefs_utimens(const char *pathname, const struct timespec times[2], struct fuse_file_info *fi) {
|
||||||
|
struct fuse_context *ctx;
|
||||||
|
struct sharefs_private *priv;
|
||||||
|
GET_CONTEXT_PRIV(ctx, priv);
|
||||||
|
MUST_TRANSLATE_PATHNAME(pathname);
|
||||||
|
|
||||||
|
(void)fi;
|
||||||
|
|
||||||
|
if (utimensat(priv->dirfd, pathname, times, AT_SYMLINK_NOFOLLOW) == -1)
|
||||||
|
return -errno;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int sharefs_create(const char *pathname, mode_t mode, struct fuse_file_info *fi) {
|
||||||
|
int fd;
|
||||||
|
|
||||||
|
struct fuse_context *ctx;
|
||||||
|
struct sharefs_private *priv;
|
||||||
|
GET_CONTEXT_PRIV(ctx, priv);
|
||||||
|
MUST_TRANSLATE_PATHNAME(pathname);
|
||||||
|
|
||||||
|
(void)mode;
|
||||||
|
|
||||||
|
if ((fd = openat(priv->dirfd, pathname, fi->flags & ~SHAREFS_FORBIDDEN_FLAGS, SHAREFS_PERM_REG)) == -1)
|
||||||
|
return -errno;
|
||||||
|
fi->fh = fd;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int sharefs_open(const char *pathname, struct fuse_file_info *fi) {
|
||||||
|
int fd;
|
||||||
|
|
||||||
|
struct fuse_context *ctx;
|
||||||
|
struct sharefs_private *priv;
|
||||||
|
GET_CONTEXT_PRIV(ctx, priv);
|
||||||
|
MUST_TRANSLATE_PATHNAME(pathname);
|
||||||
|
|
||||||
|
if ((fd = openat(priv->dirfd, pathname, fi->flags & ~SHAREFS_FORBIDDEN_FLAGS)) == -1)
|
||||||
|
return -errno;
|
||||||
|
fi->fh = fd;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int sharefs_read(const char *pathname, char *buf, size_t count, off_t offset, struct fuse_file_info *fi) {
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
(void)pathname;
|
||||||
|
|
||||||
|
if ((ret = pread(fi->fh, buf, count, offset)) == -1)
|
||||||
|
return -errno;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int sharefs_write(const char *pathname, const char *buf, size_t count, off_t offset, struct fuse_file_info *fi) {
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
(void)pathname;
|
||||||
|
|
||||||
|
if ((ret = pwrite(fi->fh, buf, count, offset)) == -1)
|
||||||
|
return -errno;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int sharefs_statfs(const char *pathname, struct statvfs *statbuf) {
|
||||||
|
int fd;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
struct fuse_context *ctx;
|
||||||
|
struct sharefs_private *priv;
|
||||||
|
GET_CONTEXT_PRIV(ctx, priv);
|
||||||
|
MUST_TRANSLATE_PATHNAME(pathname);
|
||||||
|
|
||||||
|
if ((fd = openat(priv->dirfd, pathname, O_RDONLY | O_CLOEXEC)) == -1)
|
||||||
|
return -errno;
|
||||||
|
if ((ret = fstatvfs(fd, statbuf)) == -1)
|
||||||
|
ret = -errno;
|
||||||
|
close(fd);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int sharefs_release(const char *pathname, struct fuse_file_info *fi) {
|
||||||
|
(void)pathname;
|
||||||
|
|
||||||
|
return close(fi->fh);
|
||||||
|
}
|
||||||
|
|
||||||
|
int sharefs_fsync(const char *pathname, int datasync, struct fuse_file_info *fi) {
|
||||||
|
(void)pathname;
|
||||||
|
|
||||||
|
if (datasync ? fdatasync(fi->fh) : fsync(fi->fh) == -1)
|
||||||
|
return -errno;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
34
cmd/sharefs/fuse-operations.h
Normal file
34
cmd/sharefs/fuse-operations.h
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
#define FUSE_USE_VERSION FUSE_MAKE_VERSION(3, 12)
|
||||||
|
#include <fuse.h>
|
||||||
|
#include <fuse_lowlevel.h> /* for fuse_cmdline_help */
|
||||||
|
|
||||||
|
#if (FUSE_VERSION < FUSE_MAKE_VERSION(3, 12))
|
||||||
|
#error This package requires libfuse >= v3.12
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define SHAREFS_MEDIA_RW_ID (1 << 10) - 1 /* owning gid presented to userspace */
|
||||||
|
#define SHAREFS_PERM_DIR 0700 /* permission bits for directories presented to userspace */
|
||||||
|
#define SHAREFS_PERM_REG 0600 /* permission bits for regular files presented to userspace */
|
||||||
|
#define SHAREFS_FORBIDDEN_FLAGS O_DIRECT /* these open flags are cleared unconditionally */
|
||||||
|
|
||||||
|
/* sharefs_private is populated by sharefs_init and contains process-wide context */
|
||||||
|
struct sharefs_private {
|
||||||
|
int dirfd; /* source dirfd opened during sharefs_init */
|
||||||
|
uintptr_t setup; /* cgo handle of opaque setup state */
|
||||||
|
};
|
||||||
|
|
||||||
|
int sharefs_getattr(const char *pathname, struct stat *statbuf, struct fuse_file_info *fi);
|
||||||
|
int sharefs_readdir(const char *pathname, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *fi, enum fuse_readdir_flags flags);
|
||||||
|
int sharefs_mkdir(const char *pathname, mode_t mode);
|
||||||
|
int sharefs_unlink(const char *pathname);
|
||||||
|
int sharefs_rmdir(const char *pathname);
|
||||||
|
int sharefs_rename(const char *oldpath, const char *newpath, unsigned int flags);
|
||||||
|
int sharefs_truncate(const char *pathname, off_t length, struct fuse_file_info *fi);
|
||||||
|
int sharefs_utimens(const char *pathname, const struct timespec times[2], struct fuse_file_info *fi);
|
||||||
|
int sharefs_create(const char *pathname, mode_t mode, struct fuse_file_info *fi);
|
||||||
|
int sharefs_open(const char *pathname, struct fuse_file_info *fi);
|
||||||
|
int sharefs_read(const char *pathname, char *buf, size_t count, off_t offset, struct fuse_file_info *fi);
|
||||||
|
int sharefs_write(const char *pathname, const char *buf, size_t count, off_t offset, struct fuse_file_info *fi);
|
||||||
|
int sharefs_statfs(const char *pathname, struct statvfs *statbuf);
|
||||||
|
int sharefs_release(const char *pathname, struct fuse_file_info *fi);
|
||||||
|
int sharefs_fsync(const char *pathname, int datasync, struct fuse_file_info *fi);
|
||||||
556
cmd/sharefs/fuse.go
Normal file
556
cmd/sharefs/fuse.go
Normal file
@@ -0,0 +1,556 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
/*
|
||||||
|
#cgo pkg-config: --static fuse3
|
||||||
|
|
||||||
|
#include "fuse-operations.h"
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
extern void *sharefs_init(struct fuse_conn_info *conn, struct fuse_config *cfg);
|
||||||
|
extern void sharefs_destroy(void *private_data);
|
||||||
|
|
||||||
|
typedef void (*closure)();
|
||||||
|
static inline struct fuse_opt _FUSE_OPT_END() { return (struct fuse_opt)FUSE_OPT_END; };
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/gob"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"os/signal"
|
||||||
|
"path"
|
||||||
|
"runtime"
|
||||||
|
"runtime/cgo"
|
||||||
|
"strconv"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"hakurei.app/container"
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/std"
|
||||||
|
"hakurei.app/hst"
|
||||||
|
"hakurei.app/internal/helper/proc"
|
||||||
|
"hakurei.app/internal/info"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// closure represents a C function pointer.
|
||||||
|
closure = C.closure
|
||||||
|
|
||||||
|
// fuseArgs represents the fuse_args structure.
|
||||||
|
fuseArgs = C.struct_fuse_args
|
||||||
|
|
||||||
|
// setupState holds state used for setup. Its cgo handle is included in
|
||||||
|
// sharefs_private and considered opaque to non-setup callbacks.
|
||||||
|
setupState struct {
|
||||||
|
// Whether sharefs_init failed.
|
||||||
|
initFailed bool
|
||||||
|
|
||||||
|
// Whether to create source directory as root.
|
||||||
|
mkdir bool
|
||||||
|
|
||||||
|
// Open file descriptor to fuse.
|
||||||
|
Fuse int
|
||||||
|
|
||||||
|
// Pathname to open for dirfd.
|
||||||
|
Source *check.Absolute
|
||||||
|
// New uid and gid to set by sharefs_init when starting as root.
|
||||||
|
Setuid, Setgid int
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() { gob.Register(new(setupState)) }
|
||||||
|
|
||||||
|
// destroySetup invalidates the setup [cgo.Handle] in a sharefs_private structure.
|
||||||
|
func destroySetup(private_data unsafe.Pointer) (ok bool) {
|
||||||
|
if private_data == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
priv := (*C.struct_sharefs_private)(private_data)
|
||||||
|
|
||||||
|
if h := cgo.Handle(priv.setup); h != 0 {
|
||||||
|
priv.setup = 0
|
||||||
|
h.Delete()
|
||||||
|
ok = true
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//export sharefs_init
|
||||||
|
func sharefs_init(_ *C.struct_fuse_conn_info, cfg *C.struct_fuse_config) unsafe.Pointer {
|
||||||
|
ctx := C.fuse_get_context()
|
||||||
|
priv := (*C.struct_sharefs_private)(ctx.private_data)
|
||||||
|
setup := cgo.Handle(priv.setup).Value().(*setupState)
|
||||||
|
|
||||||
|
if os.Geteuid() == 0 {
|
||||||
|
log.Println("filesystem daemon must not run as root")
|
||||||
|
goto fail
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.use_ino = C.true
|
||||||
|
cfg.direct_io = C.false
|
||||||
|
// getattr is context-dependent
|
||||||
|
cfg.attr_timeout = 0
|
||||||
|
cfg.entry_timeout = 0
|
||||||
|
cfg.negative_timeout = 0
|
||||||
|
|
||||||
|
// all future filesystem operations happen through this dirfd
|
||||||
|
if fd, err := syscall.Open(setup.Source.String(), syscall.O_DIRECTORY|syscall.O_RDONLY|syscall.O_CLOEXEC, 0); err != nil {
|
||||||
|
log.Printf("cannot open %q: %v", setup.Source, err)
|
||||||
|
goto fail
|
||||||
|
} else if err = syscall.Fchdir(fd); err != nil {
|
||||||
|
_ = syscall.Close(fd)
|
||||||
|
log.Printf("cannot enter %q: %s", setup.Source, err)
|
||||||
|
goto fail
|
||||||
|
} else {
|
||||||
|
priv.dirfd = C.int(fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx.private_data
|
||||||
|
|
||||||
|
fail:
|
||||||
|
setup.initFailed = true
|
||||||
|
C.fuse_exit(ctx.fuse)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//export sharefs_destroy
|
||||||
|
func sharefs_destroy(private_data unsafe.Pointer) {
|
||||||
|
if private_data != nil {
|
||||||
|
destroySetup(private_data)
|
||||||
|
priv := (*C.struct_sharefs_private)(private_data)
|
||||||
|
|
||||||
|
if err := syscall.Close(int(priv.dirfd)); err != nil {
|
||||||
|
log.Printf("cannot close source directory: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// showHelp prints the help message.
|
||||||
|
func showHelp(args *fuseArgs) {
|
||||||
|
executableName := sharefsName
|
||||||
|
if args.argc > 0 {
|
||||||
|
executableName = path.Base(C.GoString(*args.argv))
|
||||||
|
} else if name, err := os.Executable(); err == nil {
|
||||||
|
executableName = path.Base(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("usage: %s [options] <mountpoint>\n\n", executableName)
|
||||||
|
|
||||||
|
fmt.Println("Filesystem options:")
|
||||||
|
fmt.Println(" -o source=/data/media source directory to be mounted")
|
||||||
|
fmt.Println(" -o setuid=1023 uid to run as when starting as root")
|
||||||
|
fmt.Println(" -o setgid=1023 gid to run as when starting as root")
|
||||||
|
|
||||||
|
fmt.Println("\nFUSE options:")
|
||||||
|
C.fuse_cmdline_help()
|
||||||
|
C.fuse_lib_help(args)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseOpts parses fuse options via fuse_opt_parse.
|
||||||
|
func parseOpts(args *fuseArgs, setup *setupState, log *log.Logger) (ok bool) {
|
||||||
|
var unsafeOpts struct {
|
||||||
|
// Pathname to writable source directory.
|
||||||
|
source *C.char
|
||||||
|
|
||||||
|
// Whether to create source directory as root.
|
||||||
|
mkdir C.int
|
||||||
|
|
||||||
|
// Decimal string representation of uid to set when running as root.
|
||||||
|
setuid *C.char
|
||||||
|
// Decimal string representation of gid to set when running as root.
|
||||||
|
setgid *C.char
|
||||||
|
|
||||||
|
// Decimal string representation of open file descriptor to read setupState from.
|
||||||
|
// This is an internal detail for containerisation and must not be specified directly.
|
||||||
|
setup *C.char
|
||||||
|
}
|
||||||
|
|
||||||
|
if C.fuse_opt_parse(args, unsafe.Pointer(&unsafeOpts), &[]C.struct_fuse_opt{
|
||||||
|
{templ: C.CString("source=%s"), offset: C.ulong(unsafe.Offsetof(unsafeOpts.source)), value: 0},
|
||||||
|
{templ: C.CString("mkdir"), offset: C.ulong(unsafe.Offsetof(unsafeOpts.mkdir)), value: 1},
|
||||||
|
{templ: C.CString("setuid=%s"), offset: C.ulong(unsafe.Offsetof(unsafeOpts.setuid)), value: 0},
|
||||||
|
{templ: C.CString("setgid=%s"), offset: C.ulong(unsafe.Offsetof(unsafeOpts.setgid)), value: 0},
|
||||||
|
|
||||||
|
{templ: C.CString("setup=%s"), offset: C.ulong(unsafe.Offsetof(unsafeOpts.setup)), value: 0},
|
||||||
|
|
||||||
|
C._FUSE_OPT_END(),
|
||||||
|
}[0], nil) == -1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if unsafeOpts.source != nil {
|
||||||
|
defer C.free(unsafe.Pointer(unsafeOpts.source))
|
||||||
|
}
|
||||||
|
if unsafeOpts.setuid != nil {
|
||||||
|
defer C.free(unsafe.Pointer(unsafeOpts.setuid))
|
||||||
|
}
|
||||||
|
if unsafeOpts.setgid != nil {
|
||||||
|
defer C.free(unsafe.Pointer(unsafeOpts.setgid))
|
||||||
|
}
|
||||||
|
|
||||||
|
if unsafeOpts.setup != nil {
|
||||||
|
defer C.free(unsafe.Pointer(unsafeOpts.setup))
|
||||||
|
|
||||||
|
if v, err := strconv.Atoi(C.GoString(unsafeOpts.setup)); err != nil || v < 3 {
|
||||||
|
log.Println("invalid value for option setup")
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
r := os.NewFile(uintptr(v), "setup")
|
||||||
|
defer func() {
|
||||||
|
if err = r.Close(); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err = gob.NewDecoder(r).Decode(setup); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if setup.Fuse < 3 {
|
||||||
|
log.Println("invalid file descriptor", setup.Fuse)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if unsafeOpts.source == nil {
|
||||||
|
showHelp(args)
|
||||||
|
return false
|
||||||
|
} else if a, err := check.NewAbs(C.GoString(unsafeOpts.source)); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
setup.Source = a
|
||||||
|
}
|
||||||
|
setup.mkdir = unsafeOpts.mkdir != 0
|
||||||
|
|
||||||
|
if unsafeOpts.setuid == nil {
|
||||||
|
setup.Setuid = -1
|
||||||
|
} else if v, err := strconv.Atoi(C.GoString(unsafeOpts.setuid)); err != nil || v <= 0 {
|
||||||
|
log.Println("invalid value for option setuid")
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
setup.Setuid = v
|
||||||
|
}
|
||||||
|
if unsafeOpts.setgid == nil {
|
||||||
|
setup.Setgid = -1
|
||||||
|
} else if v, err := strconv.Atoi(C.GoString(unsafeOpts.setgid)); err != nil || v <= 0 {
|
||||||
|
log.Println("invalid value for option setgid")
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
setup.Setgid = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyArgs returns a heap allocated copy of an argument slice in fuse_args representation.
|
||||||
|
func copyArgs(s ...string) fuseArgs {
|
||||||
|
if len(s) == 0 {
|
||||||
|
return fuseArgs{argc: 0, argv: nil, allocated: 0}
|
||||||
|
}
|
||||||
|
args := unsafe.Slice((**C.char)(C.malloc(C.size_t(uintptr(len(s))*unsafe.Sizeof(s[0])))), len(s))
|
||||||
|
for i, arg := range s {
|
||||||
|
args[i] = C.CString(arg)
|
||||||
|
}
|
||||||
|
return fuseArgs{argc: C.int(len(s)), argv: &args[0], allocated: 1}
|
||||||
|
}
|
||||||
|
|
||||||
|
// freeArgs frees the contents of argument list.
|
||||||
|
func freeArgs(args *fuseArgs) { C.fuse_opt_free_args(args) }
|
||||||
|
|
||||||
|
// unsafeAddArgument adds an argument to fuseArgs via fuse_opt_add_arg.
|
||||||
|
// The last byte of arg must be 0.
|
||||||
|
func unsafeAddArgument(args *fuseArgs, arg string) {
|
||||||
|
C.fuse_opt_add_arg(args, (*C.char)(unsafe.Pointer(unsafe.StringData(arg))))
|
||||||
|
}
|
||||||
|
|
||||||
|
func _main(s ...string) (exitCode int) {
|
||||||
|
msg := message.New(log.Default())
|
||||||
|
container.TryArgv0(msg)
|
||||||
|
runtime.LockOSThread()
|
||||||
|
|
||||||
|
// don't mask creation mode, kernel already did that
|
||||||
|
syscall.Umask(0)
|
||||||
|
|
||||||
|
var pinner runtime.Pinner
|
||||||
|
defer pinner.Unpin()
|
||||||
|
|
||||||
|
args := copyArgs(s...)
|
||||||
|
defer freeArgs(&args)
|
||||||
|
|
||||||
|
// this causes the kernel to enforce access control based on
|
||||||
|
// struct stat populated by sharefs_getattr
|
||||||
|
unsafeAddArgument(&args, "-odefault_permissions\x00")
|
||||||
|
|
||||||
|
var priv C.struct_sharefs_private
|
||||||
|
pinner.Pin(&priv)
|
||||||
|
var setup setupState
|
||||||
|
priv.setup = C.uintptr_t(cgo.NewHandle(&setup))
|
||||||
|
defer destroySetup(unsafe.Pointer(&priv))
|
||||||
|
|
||||||
|
var opts C.struct_fuse_cmdline_opts
|
||||||
|
if C.fuse_parse_cmdline(&args, &opts) != 0 {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
if opts.mountpoint != nil {
|
||||||
|
defer C.free(unsafe.Pointer(opts.mountpoint))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.show_version != 0 {
|
||||||
|
fmt.Println("hakurei version", info.Version())
|
||||||
|
fmt.Println("FUSE library version", C.GoString(C.fuse_pkgversion()))
|
||||||
|
C.fuse_lowlevel_version()
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.show_help != 0 {
|
||||||
|
showHelp(&args)
|
||||||
|
return 0
|
||||||
|
} else if opts.mountpoint == nil {
|
||||||
|
log.Println("no mountpoint specified")
|
||||||
|
return 2
|
||||||
|
} else {
|
||||||
|
// hack to keep fuse_parse_cmdline happy in the container
|
||||||
|
mountpoint := C.GoString(opts.mountpoint)
|
||||||
|
pathnameArg := -1
|
||||||
|
for i, arg := range s {
|
||||||
|
if arg == mountpoint {
|
||||||
|
pathnameArg = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pathnameArg < 0 {
|
||||||
|
log.Println("mountpoint must be absolute")
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
s[pathnameArg] = container.Nonexistent
|
||||||
|
}
|
||||||
|
|
||||||
|
if !parseOpts(&args, &setup, msg.GetLogger()) {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
asRoot := os.Geteuid() == 0
|
||||||
|
|
||||||
|
if asRoot {
|
||||||
|
if setup.Setuid <= 0 || setup.Setgid <= 0 {
|
||||||
|
log.Println("setuid and setgid must not be 0")
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if setup.Fuse >= 3 {
|
||||||
|
log.Println("filesystem daemon must not run as root")
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if setup.mkdir {
|
||||||
|
if err := os.MkdirAll(setup.Source.String(), 0700); err != nil {
|
||||||
|
if !errors.Is(err, os.ErrExist) {
|
||||||
|
log.Println(err)
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
// skip setup for existing source directory
|
||||||
|
} else if err = os.Chown(setup.Source.String(), setup.Setuid, setup.Setgid); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if setup.Fuse < 3 && (setup.Setuid > 0 || setup.Setgid > 0) {
|
||||||
|
log.Println("setuid and setgid has no effect when not starting as root")
|
||||||
|
return 1
|
||||||
|
} else if setup.mkdir {
|
||||||
|
log.Println("mkdir has no effect when not starting as root")
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
op := C.struct_fuse_operations{
|
||||||
|
init: closure(C.sharefs_init),
|
||||||
|
destroy: closure(C.sharefs_destroy),
|
||||||
|
|
||||||
|
// implemented in fuse-helper.c
|
||||||
|
getattr: closure(C.sharefs_getattr),
|
||||||
|
readdir: closure(C.sharefs_readdir),
|
||||||
|
mkdir: closure(C.sharefs_mkdir),
|
||||||
|
unlink: closure(C.sharefs_unlink),
|
||||||
|
rmdir: closure(C.sharefs_rmdir),
|
||||||
|
rename: closure(C.sharefs_rename),
|
||||||
|
truncate: closure(C.sharefs_truncate),
|
||||||
|
utimens: closure(C.sharefs_utimens),
|
||||||
|
create: closure(C.sharefs_create),
|
||||||
|
open: closure(C.sharefs_open),
|
||||||
|
read: closure(C.sharefs_read),
|
||||||
|
write: closure(C.sharefs_write),
|
||||||
|
statfs: closure(C.sharefs_statfs),
|
||||||
|
release: closure(C.sharefs_release),
|
||||||
|
fsync: closure(C.sharefs_fsync),
|
||||||
|
}
|
||||||
|
|
||||||
|
fuse := C.fuse_new_fn(&args, &op, C.size_t(unsafe.Sizeof(op)), unsafe.Pointer(&priv))
|
||||||
|
if fuse == nil {
|
||||||
|
return 3
|
||||||
|
}
|
||||||
|
defer C.fuse_destroy(fuse)
|
||||||
|
se := C.fuse_get_session(fuse)
|
||||||
|
|
||||||
|
if setup.Fuse < 3 {
|
||||||
|
// unconfined, set up mount point and container
|
||||||
|
if C.fuse_mount(fuse, opts.mountpoint) != 0 {
|
||||||
|
return 4
|
||||||
|
}
|
||||||
|
// unmounted by initial process
|
||||||
|
defer func() {
|
||||||
|
if exitCode == 5 {
|
||||||
|
C.fuse_unmount(fuse)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if asRoot {
|
||||||
|
if err := syscall.Setresgid(setup.Setgid, setup.Setgid, setup.Setgid); err != nil {
|
||||||
|
log.Printf("cannot set gid: %v", err)
|
||||||
|
return 5
|
||||||
|
}
|
||||||
|
if err := syscall.Setgroups(nil); err != nil {
|
||||||
|
log.Printf("cannot set supplementary groups: %v", err)
|
||||||
|
return 5
|
||||||
|
}
|
||||||
|
if err := syscall.Setresuid(setup.Setuid, setup.Setuid, setup.Setuid); err != nil {
|
||||||
|
log.Printf("cannot set uid: %v", err)
|
||||||
|
return 5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
msg.SwapVerbose(opts.debug != 0)
|
||||||
|
ctx := context.Background()
|
||||||
|
if opts.foreground != 0 {
|
||||||
|
c, cancel := signal.NotifyContext(ctx, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
defer cancel()
|
||||||
|
ctx = c
|
||||||
|
}
|
||||||
|
z := container.New(ctx, msg)
|
||||||
|
z.AllowOrphan = opts.foreground == 0
|
||||||
|
z.Env = os.Environ()
|
||||||
|
|
||||||
|
// keep fuse_parse_cmdline happy in the container
|
||||||
|
z.Tmpfs(check.MustAbs(container.Nonexistent), 1<<10, 0755)
|
||||||
|
|
||||||
|
if a, err := check.NewAbs(container.MustExecutable(msg)); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return 5
|
||||||
|
} else {
|
||||||
|
z.Path = a
|
||||||
|
}
|
||||||
|
z.Args = s
|
||||||
|
z.ForwardCancel = true
|
||||||
|
z.SeccompPresets |= std.PresetStrict
|
||||||
|
z.ParentPerm = 0700
|
||||||
|
z.Bind(setup.Source, setup.Source, std.BindWritable)
|
||||||
|
if !z.AllowOrphan {
|
||||||
|
z.WaitDelay = hst.WaitDelayMax
|
||||||
|
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
|
||||||
|
}
|
||||||
|
z.Bind(z.Path, z.Path, 0)
|
||||||
|
setup.Fuse = int(proc.ExtraFileSlice(&z.ExtraFiles, os.NewFile(uintptr(C.fuse_session_fd(se)), "fuse")))
|
||||||
|
|
||||||
|
var setupWriter io.WriteCloser
|
||||||
|
if fd, w, err := container.Setup(&z.ExtraFiles); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return 5
|
||||||
|
} else {
|
||||||
|
z.Args = append(z.Args, "-osetup="+strconv.Itoa(fd))
|
||||||
|
setupWriter = w
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := z.Start(); err != nil {
|
||||||
|
if m, ok := message.GetMessage(err); ok {
|
||||||
|
log.Println(m)
|
||||||
|
} else {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
return 5
|
||||||
|
}
|
||||||
|
if err := z.Serve(); err != nil {
|
||||||
|
if m, ok := message.GetMessage(err); ok {
|
||||||
|
log.Println(m)
|
||||||
|
} else {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
return 5
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := gob.NewEncoder(setupWriter).Encode(&setup); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return 5
|
||||||
|
} else if err = setupWriter.Close(); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !z.AllowOrphan {
|
||||||
|
if err := z.Wait(); err != nil {
|
||||||
|
var exitError *exec.ExitError
|
||||||
|
if !errors.As(err, &exitError) || exitError == nil {
|
||||||
|
log.Println(err)
|
||||||
|
return 5
|
||||||
|
}
|
||||||
|
switch code := exitError.ExitCode(); syscall.Signal(code & 0x7f) {
|
||||||
|
case syscall.SIGINT:
|
||||||
|
case syscall.SIGTERM:
|
||||||
|
|
||||||
|
default:
|
||||||
|
return code
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
} else { // confined
|
||||||
|
C.free(unsafe.Pointer(opts.mountpoint))
|
||||||
|
// must be heap allocated
|
||||||
|
opts.mountpoint = C.CString("/dev/fd/" + strconv.Itoa(setup.Fuse))
|
||||||
|
|
||||||
|
if err := os.Chdir("/"); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if C.fuse_mount(fuse, opts.mountpoint) != 0 {
|
||||||
|
return 4
|
||||||
|
}
|
||||||
|
defer C.fuse_unmount(fuse)
|
||||||
|
|
||||||
|
if C.fuse_set_signal_handlers(se) != 0 {
|
||||||
|
return 6
|
||||||
|
}
|
||||||
|
defer C.fuse_remove_signal_handlers(se)
|
||||||
|
|
||||||
|
if opts.singlethread != 0 {
|
||||||
|
if C.fuse_loop(fuse) != 0 {
|
||||||
|
return 8
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
loopConfig := C.fuse_loop_cfg_create()
|
||||||
|
if loopConfig == nil {
|
||||||
|
return 7
|
||||||
|
}
|
||||||
|
defer C.fuse_loop_cfg_destroy(loopConfig)
|
||||||
|
|
||||||
|
C.fuse_loop_cfg_set_clone_fd(loopConfig, C.uint(opts.clone_fd))
|
||||||
|
|
||||||
|
C.fuse_loop_cfg_set_idle_threads(loopConfig, opts.max_idle_threads)
|
||||||
|
C.fuse_loop_cfg_set_max_threads(loopConfig, opts.max_threads)
|
||||||
|
if C.fuse_loop_mt(fuse, loopConfig) != 0 {
|
||||||
|
return 8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if setup.initFailed {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
113
cmd/sharefs/fuse_test.go
Normal file
113
cmd/sharefs/fuse_test.go
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"log"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseOpts(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
args []string
|
||||||
|
want setupState
|
||||||
|
wantLog string
|
||||||
|
wantOk bool
|
||||||
|
}{
|
||||||
|
{"zero length", []string{}, setupState{}, "", false},
|
||||||
|
|
||||||
|
{"not absolute", []string{"sharefs",
|
||||||
|
"-o", "source=nonexistent",
|
||||||
|
"-o", "setuid=1023",
|
||||||
|
"-o", "setgid=1023",
|
||||||
|
}, setupState{}, "sharefs: path \"nonexistent\" is not absolute\n", false},
|
||||||
|
|
||||||
|
{"not specified", []string{"sharefs",
|
||||||
|
"-o", "setuid=1023",
|
||||||
|
"-o", "setgid=1023",
|
||||||
|
}, setupState{}, "", false},
|
||||||
|
|
||||||
|
{"invalid setuid", []string{"sharefs",
|
||||||
|
"-o", "source=/proc/nonexistent",
|
||||||
|
"-o", "setuid=ff",
|
||||||
|
"-o", "setgid=1023",
|
||||||
|
}, setupState{
|
||||||
|
Source: check.MustAbs("/proc/nonexistent"),
|
||||||
|
}, "sharefs: invalid value for option setuid\n", false},
|
||||||
|
|
||||||
|
{"invalid setgid", []string{"sharefs",
|
||||||
|
"-o", "source=/proc/nonexistent",
|
||||||
|
"-o", "setuid=1023",
|
||||||
|
"-o", "setgid=ff",
|
||||||
|
}, setupState{
|
||||||
|
Source: check.MustAbs("/proc/nonexistent"),
|
||||||
|
Setuid: 1023,
|
||||||
|
}, "sharefs: invalid value for option setgid\n", false},
|
||||||
|
|
||||||
|
{"simple", []string{"sharefs",
|
||||||
|
"-o", "source=/proc/nonexistent",
|
||||||
|
}, setupState{
|
||||||
|
Source: check.MustAbs("/proc/nonexistent"),
|
||||||
|
Setuid: -1,
|
||||||
|
Setgid: -1,
|
||||||
|
}, "", true},
|
||||||
|
|
||||||
|
{"root", []string{"sharefs",
|
||||||
|
"-o", "source=/proc/nonexistent",
|
||||||
|
"-o", "setuid=1023",
|
||||||
|
"-o", "setgid=1023",
|
||||||
|
}, setupState{
|
||||||
|
Source: check.MustAbs("/proc/nonexistent"),
|
||||||
|
Setuid: 1023,
|
||||||
|
Setgid: 1023,
|
||||||
|
}, "", true},
|
||||||
|
|
||||||
|
{"setuid", []string{"sharefs",
|
||||||
|
"-o", "source=/proc/nonexistent",
|
||||||
|
"-o", "setuid=1023",
|
||||||
|
}, setupState{
|
||||||
|
Source: check.MustAbs("/proc/nonexistent"),
|
||||||
|
Setuid: 1023,
|
||||||
|
Setgid: -1,
|
||||||
|
}, "", true},
|
||||||
|
|
||||||
|
{"setgid", []string{"sharefs",
|
||||||
|
"-o", "source=/proc/nonexistent",
|
||||||
|
"-o", "setgid=1023",
|
||||||
|
}, setupState{
|
||||||
|
Source: check.MustAbs("/proc/nonexistent"),
|
||||||
|
Setuid: -1,
|
||||||
|
Setgid: 1023,
|
||||||
|
}, "", true},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var (
|
||||||
|
got setupState
|
||||||
|
buf bytes.Buffer
|
||||||
|
)
|
||||||
|
args := copyArgs(tc.args...)
|
||||||
|
defer freeArgs(&args)
|
||||||
|
unsafeAddArgument(&args, "-odefault_permissions\x00")
|
||||||
|
|
||||||
|
if ok := parseOpts(&args, &got, log.New(&buf, "sharefs: ", 0)); ok != tc.wantOk {
|
||||||
|
t.Errorf("parseOpts: ok = %v, want %v", ok, tc.wantOk)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(&got, &tc.want) {
|
||||||
|
t.Errorf("parseOpts: setup = %#v, want %#v", got, tc.want)
|
||||||
|
}
|
||||||
|
|
||||||
|
if buf.String() != tc.wantLog {
|
||||||
|
t.Errorf("parseOpts: log =\n%s\nwant\n%s", buf.String(), tc.wantLog)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
31
cmd/sharefs/main.go
Normal file
31
cmd/sharefs/main.go
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"slices"
|
||||||
|
)
|
||||||
|
|
||||||
|
// sharefsName is the prefix used by log.std in the sharefs process.
|
||||||
|
const sharefsName = "sharefs"
|
||||||
|
|
||||||
|
// handleMountArgs returns an alternative, libfuse-compatible args slice for
|
||||||
|
// args passed by mount -t fuse.sharefs [options] sharefs <mountpoint>.
|
||||||
|
//
|
||||||
|
// In this case, args always has a length of 5 with index 0 being what comes
|
||||||
|
// after "fuse." in the filesystem type, 1 is the uninterpreted string passed
|
||||||
|
// to mount (sharefsName is used as the magic string to enable this hack),
|
||||||
|
// 2 is passed through to libfuse as mountpoint, and 3 is always "-o".
|
||||||
|
func handleMountArgs(args []string) []string {
|
||||||
|
if len(args) == 5 && args[1] == sharefsName && args[3] == "-o" {
|
||||||
|
return []string{sharefsName, args[2], "-o", args[4]}
|
||||||
|
}
|
||||||
|
return slices.Clone(args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.SetFlags(0)
|
||||||
|
log.SetPrefix(sharefsName + ": ")
|
||||||
|
|
||||||
|
os.Exit(_main(handleMountArgs(os.Args)...))
|
||||||
|
}
|
||||||
29
cmd/sharefs/main_test.go
Normal file
29
cmd/sharefs/main_test.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"slices"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHandleMountArgs(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
args []string
|
||||||
|
want []string
|
||||||
|
}{
|
||||||
|
{"nil", nil, nil},
|
||||||
|
{"passthrough", []string{"sharefs", "-V"}, []string{"sharefs", "-V"}},
|
||||||
|
{"replace", []string{"/sbin/sharefs", "sharefs", "/sdcard", "-o", "rw"}, []string{"sharefs", "/sdcard", "-o", "rw"}},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if got := handleMountArgs(tc.args); !slices.Equal(got, tc.want) {
|
||||||
|
t.Errorf("handleMountArgs: %q, want %q", got, tc.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
41
cmd/sharefs/test/configuration.nix
Normal file
41
cmd/sharefs/test/configuration.nix
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
|
users.users = {
|
||||||
|
alice = {
|
||||||
|
isNormalUser = true;
|
||||||
|
description = "Alice Foobar";
|
||||||
|
password = "foobar";
|
||||||
|
uid = 1000;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
home-manager.users.alice.home.stateVersion = "24.11";
|
||||||
|
|
||||||
|
# Automatically login on tty1 as a normal user:
|
||||||
|
services.getty.autologinUser = "alice";
|
||||||
|
|
||||||
|
environment = {
|
||||||
|
# For benchmarking sharefs:
|
||||||
|
systemPackages = [ pkgs.fsmark ];
|
||||||
|
};
|
||||||
|
|
||||||
|
virtualisation = {
|
||||||
|
diskSize = 6 * 1024;
|
||||||
|
|
||||||
|
qemu.options = [
|
||||||
|
# Increase test performance:
|
||||||
|
"-smp 8"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
environment.hakurei = rec {
|
||||||
|
enable = true;
|
||||||
|
stateDir = "/var/lib/hakurei";
|
||||||
|
sharefs.source = "${stateDir}/sdcard";
|
||||||
|
users.alice = 0;
|
||||||
|
|
||||||
|
extraHomeConfig = {
|
||||||
|
home.stateVersion = "23.05";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
44
cmd/sharefs/test/default.nix
Normal file
44
cmd/sharefs/test/default.nix
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
{
|
||||||
|
testers,
|
||||||
|
|
||||||
|
system,
|
||||||
|
self,
|
||||||
|
}:
|
||||||
|
testers.nixosTest {
|
||||||
|
name = "sharefs";
|
||||||
|
nodes.machine =
|
||||||
|
{ options, pkgs, ... }:
|
||||||
|
let
|
||||||
|
fhs =
|
||||||
|
let
|
||||||
|
hakurei = options.environment.hakurei.package.default;
|
||||||
|
in
|
||||||
|
pkgs.buildFHSEnv {
|
||||||
|
pname = "hakurei-fhs";
|
||||||
|
inherit (hakurei) version;
|
||||||
|
targetPkgs = _: hakurei.targetPkgs;
|
||||||
|
extraOutputsToInstall = [ "dev" ];
|
||||||
|
profile = ''
|
||||||
|
export PKG_CONFIG_PATH="/usr/share/pkgconfig:$PKG_CONFIG_PATH"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
environment.systemPackages = [
|
||||||
|
# For go tests:
|
||||||
|
(pkgs.writeShellScriptBin "sharefs-workload-hakurei-tests" ''
|
||||||
|
cp -r "${self.packages.${system}.hakurei.src}" "/sdcard/hakurei" && cd "/sdcard/hakurei"
|
||||||
|
${fhs}/bin/hakurei-fhs -c 'CC="clang -O3 -Werror" go test ./...'
|
||||||
|
'')
|
||||||
|
];
|
||||||
|
|
||||||
|
imports = [
|
||||||
|
./configuration.nix
|
||||||
|
|
||||||
|
self.nixosModules.hakurei
|
||||||
|
self.inputs.home-manager.nixosModules.home-manager
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = builtins.readFile ./test.py;
|
||||||
|
}
|
||||||
60
cmd/sharefs/test/test.py
Normal file
60
cmd/sharefs/test/test.py
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
start_all()
|
||||||
|
machine.wait_for_unit("multi-user.target")
|
||||||
|
|
||||||
|
# To check sharefs version:
|
||||||
|
print(machine.succeed("sharefs -V"))
|
||||||
|
|
||||||
|
# Make sure sharefs started:
|
||||||
|
machine.wait_for_unit("sdcard.mount")
|
||||||
|
|
||||||
|
machine.succeed("mkdir /mnt")
|
||||||
|
def check_bad_opts_output(opts, want, source="/etc", privileged=False):
|
||||||
|
output = machine.fail(("" if privileged else "sudo -u alice -i ") + f"sharefs -f -o source={source},{opts} /mnt 2>&1")
|
||||||
|
if output != want:
|
||||||
|
raise Exception(f"unexpected output: {output}")
|
||||||
|
|
||||||
|
# Malformed setuid/setgid representation:
|
||||||
|
check_bad_opts_output("setuid=ff", "sharefs: invalid value for option setuid\n")
|
||||||
|
check_bad_opts_output("setgid=ff", "sharefs: invalid value for option setgid\n")
|
||||||
|
|
||||||
|
# Bounds check for setuid/setgid:
|
||||||
|
check_bad_opts_output("setuid=0", "sharefs: invalid value for option setuid\n")
|
||||||
|
check_bad_opts_output("setgid=0", "sharefs: invalid value for option setgid\n")
|
||||||
|
check_bad_opts_output("setuid=-1", "sharefs: invalid value for option setuid\n")
|
||||||
|
check_bad_opts_output("setgid=-1", "sharefs: invalid value for option setgid\n")
|
||||||
|
|
||||||
|
# Non-root setuid/setgid:
|
||||||
|
check_bad_opts_output("setuid=1023", "sharefs: setuid and setgid has no effect when not starting as root\n")
|
||||||
|
check_bad_opts_output("setgid=1023", "sharefs: setuid and setgid has no effect when not starting as root\n")
|
||||||
|
check_bad_opts_output("setuid=1023,setgid=1023", "sharefs: setuid and setgid has no effect when not starting as root\n")
|
||||||
|
check_bad_opts_output("mkdir", "sharefs: mkdir has no effect when not starting as root\n")
|
||||||
|
|
||||||
|
# Starting as root without setuid/setgid:
|
||||||
|
check_bad_opts_output("allow_other", "sharefs: setuid and setgid must not be 0\n", privileged=True)
|
||||||
|
check_bad_opts_output("setuid=1023", "sharefs: setuid and setgid must not be 0\n", privileged=True)
|
||||||
|
check_bad_opts_output("setgid=1023", "sharefs: setuid and setgid must not be 0\n", privileged=True)
|
||||||
|
|
||||||
|
# Make sure nothing actually got mounted:
|
||||||
|
machine.fail("umount /mnt")
|
||||||
|
machine.succeed("rmdir /mnt")
|
||||||
|
|
||||||
|
# Unprivileged mount/unmount:
|
||||||
|
machine.succeed("sudo -u alice -i mkdir /home/alice/{sdcard,persistent}")
|
||||||
|
machine.succeed("sudo -u alice -i sharefs -o source=/home/alice/persistent /home/alice/sdcard")
|
||||||
|
machine.succeed("sudo -u alice -i touch /home/alice/sdcard/check")
|
||||||
|
machine.succeed("sudo -u alice -i umount /home/alice/sdcard")
|
||||||
|
machine.succeed("sudo -u alice -i rm /home/alice/persistent/check")
|
||||||
|
machine.succeed("sudo -u alice -i rmdir /home/alice/{sdcard,persistent}")
|
||||||
|
|
||||||
|
# Benchmark sharefs:
|
||||||
|
machine.succeed("fs_mark -v -d /sdcard/fs_mark -l /tmp/fs_log.txt")
|
||||||
|
machine.copy_from_vm("/tmp/fs_log.txt", "")
|
||||||
|
|
||||||
|
# Check permissions:
|
||||||
|
machine.succeed("sudo -u sharefs touch /var/lib/hakurei/sdcard/fs_mark/.check")
|
||||||
|
machine.succeed("sudo -u sharefs rm /var/lib/hakurei/sdcard/fs_mark/.check")
|
||||||
|
machine.succeed("sudo -u alice rm -rf /sdcard/fs_mark")
|
||||||
|
machine.fail("ls /var/lib/hakurei/sdcard/fs_mark")
|
||||||
|
|
||||||
|
# Run hakurei tests on sharefs:
|
||||||
|
machine.succeed("sudo -u alice -i sharefs-workload-hakurei-tests")
|
||||||
@@ -202,7 +202,7 @@ func TestIsAutoRootBindable(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
var msg message.Msg
|
var msg message.Msg
|
||||||
if tc.log {
|
if tc.log {
|
||||||
msg = &kstub{nil, stub.New(t, func(s *stub.Stub[syscallDispatcher]) syscallDispatcher { panic("unreachable") }, stub.Expect{Calls: []stub.Call{
|
msg = &kstub{nil, nil, stub.New(t, func(s *stub.Stub[syscallDispatcher]) syscallDispatcher { panic("unreachable") }, stub.Expect{Calls: []stub.Call{
|
||||||
call("verbose", stub.ExpectArgs{[]any{"got unexpected root entry"}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{"got unexpected root entry"}}, nil, nil),
|
||||||
}})}
|
}})}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ const (
|
|||||||
|
|
||||||
CAP_SYS_ADMIN = 0x15
|
CAP_SYS_ADMIN = 0x15
|
||||||
CAP_SETPCAP = 0x8
|
CAP_SETPCAP = 0x8
|
||||||
|
CAP_NET_ADMIN = 0xc
|
||||||
CAP_DAC_OVERRIDE = 0x1
|
CAP_DAC_OVERRIDE = 0x1
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -9,46 +9,60 @@ import (
|
|||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"unique"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AbsoluteError is returned by [NewAbs] and holds the invalid pathname.
|
// AbsoluteError is returned by [NewAbs] and holds the invalid pathname.
|
||||||
type AbsoluteError struct{ Pathname string }
|
type AbsoluteError string
|
||||||
|
|
||||||
func (e *AbsoluteError) Error() string { return fmt.Sprintf("path %q is not absolute", e.Pathname) }
|
func (e AbsoluteError) Error() string {
|
||||||
func (e *AbsoluteError) Is(target error) bool {
|
return fmt.Sprintf("path %q is not absolute", string(e))
|
||||||
var ce *AbsoluteError
|
}
|
||||||
|
|
||||||
|
func (e AbsoluteError) Is(target error) bool {
|
||||||
|
var ce AbsoluteError
|
||||||
if !errors.As(target, &ce) {
|
if !errors.As(target, &ce) {
|
||||||
return errors.Is(target, syscall.EINVAL)
|
return errors.Is(target, syscall.EINVAL)
|
||||||
}
|
}
|
||||||
return *e == *ce
|
return e == ce
|
||||||
}
|
}
|
||||||
|
|
||||||
// Absolute holds a pathname checked to be absolute.
|
// Absolute holds a pathname checked to be absolute.
|
||||||
type Absolute struct{ pathname string }
|
type Absolute struct{ pathname unique.Handle[string] }
|
||||||
|
|
||||||
|
// ok returns whether [Absolute] is not the zero value.
|
||||||
|
func (a *Absolute) ok() bool { return a != nil && *a != (Absolute{}) }
|
||||||
|
|
||||||
// unsafeAbs returns [check.Absolute] on any string value.
|
// unsafeAbs returns [check.Absolute] on any string value.
|
||||||
func unsafeAbs(pathname string) *Absolute { return &Absolute{pathname} }
|
func unsafeAbs(pathname string) *Absolute {
|
||||||
|
return &Absolute{unique.Make(pathname)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the checked pathname.
|
||||||
func (a *Absolute) String() string {
|
func (a *Absolute) String() string {
|
||||||
if a.pathname == "" {
|
if !a.ok() {
|
||||||
panic("attempted use of zero Absolute")
|
panic("attempted use of zero Absolute")
|
||||||
}
|
}
|
||||||
|
return a.pathname.Value()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle returns the underlying [unique.Handle].
|
||||||
|
func (a *Absolute) Handle() unique.Handle[string] {
|
||||||
return a.pathname
|
return a.pathname
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Is efficiently compares the underlying pathname.
|
||||||
func (a *Absolute) Is(v *Absolute) bool {
|
func (a *Absolute) Is(v *Absolute) bool {
|
||||||
if a == nil && v == nil {
|
if a == nil && v == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return a != nil && v != nil &&
|
return a.ok() && v.ok() && a.pathname == v.pathname
|
||||||
a.pathname != "" && v.pathname != "" &&
|
|
||||||
a.pathname == v.pathname
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAbs checks pathname and returns a new [Absolute] if pathname is absolute.
|
// NewAbs checks pathname and returns a new [Absolute] if pathname is absolute.
|
||||||
func NewAbs(pathname string) (*Absolute, error) {
|
func NewAbs(pathname string) (*Absolute, error) {
|
||||||
if !path.IsAbs(pathname) {
|
if !path.IsAbs(pathname) {
|
||||||
return nil, &AbsoluteError{pathname}
|
return nil, AbsoluteError(pathname)
|
||||||
}
|
}
|
||||||
return unsafeAbs(pathname), nil
|
return unsafeAbs(pathname), nil
|
||||||
}
|
}
|
||||||
@@ -70,35 +84,49 @@ func (a *Absolute) Append(elem ...string) *Absolute {
|
|||||||
// Dir calls [path.Dir] with [Absolute] as its argument.
|
// Dir calls [path.Dir] with [Absolute] as its argument.
|
||||||
func (a *Absolute) Dir() *Absolute { return unsafeAbs(path.Dir(a.String())) }
|
func (a *Absolute) Dir() *Absolute { return unsafeAbs(path.Dir(a.String())) }
|
||||||
|
|
||||||
func (a *Absolute) GobEncode() ([]byte, error) { return []byte(a.String()), nil }
|
// GobEncode returns the checked pathname.
|
||||||
|
func (a *Absolute) GobEncode() ([]byte, error) {
|
||||||
|
return []byte(a.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GobDecode stores data if it represents an absolute pathname.
|
||||||
func (a *Absolute) GobDecode(data []byte) error {
|
func (a *Absolute) GobDecode(data []byte) error {
|
||||||
pathname := string(data)
|
pathname := string(data)
|
||||||
if !path.IsAbs(pathname) {
|
if !path.IsAbs(pathname) {
|
||||||
return &AbsoluteError{pathname}
|
return AbsoluteError(pathname)
|
||||||
}
|
}
|
||||||
a.pathname = pathname
|
a.pathname = unique.Make(pathname)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Absolute) MarshalJSON() ([]byte, error) { return json.Marshal(a.String()) }
|
// MarshalJSON returns a JSON representation of the checked pathname.
|
||||||
|
func (a *Absolute) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(a.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON stores data if it represents an absolute pathname.
|
||||||
func (a *Absolute) UnmarshalJSON(data []byte) error {
|
func (a *Absolute) UnmarshalJSON(data []byte) error {
|
||||||
var pathname string
|
var pathname string
|
||||||
if err := json.Unmarshal(data, &pathname); err != nil {
|
if err := json.Unmarshal(data, &pathname); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !path.IsAbs(pathname) {
|
if !path.IsAbs(pathname) {
|
||||||
return &AbsoluteError{pathname}
|
return AbsoluteError(pathname)
|
||||||
}
|
}
|
||||||
a.pathname = pathname
|
a.pathname = unique.Make(pathname)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SortAbs calls [slices.SortFunc] for a slice of [Absolute].
|
// SortAbs calls [slices.SortFunc] for a slice of [Absolute].
|
||||||
func SortAbs(x []*Absolute) {
|
func SortAbs(x []*Absolute) {
|
||||||
slices.SortFunc(x, func(a, b *Absolute) int { return strings.Compare(a.String(), b.String()) })
|
slices.SortFunc(x, func(a, b *Absolute) int {
|
||||||
|
return strings.Compare(a.String(), b.String())
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompactAbs calls [slices.CompactFunc] for a slice of [Absolute].
|
// CompactAbs calls [slices.CompactFunc] for a slice of [Absolute].
|
||||||
func CompactAbs(s []*Absolute) []*Absolute {
|
func CompactAbs(s []*Absolute) []*Absolute {
|
||||||
return slices.CompactFunc(s, func(a *Absolute, b *Absolute) bool { return a.String() == b.String() })
|
return slices.CompactFunc(s, func(a *Absolute, b *Absolute) bool {
|
||||||
|
return a.Is(b)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -31,8 +31,8 @@ func TestAbsoluteError(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{"EINVAL", new(AbsoluteError), syscall.EINVAL, true},
|
{"EINVAL", new(AbsoluteError), syscall.EINVAL, true},
|
||||||
{"not EINVAL", new(AbsoluteError), syscall.EBADE, false},
|
{"not EINVAL", new(AbsoluteError), syscall.EBADE, false},
|
||||||
{"ne val", new(AbsoluteError), &AbsoluteError{Pathname: "etc"}, false},
|
{"ne val", new(AbsoluteError), AbsoluteError("etc"), false},
|
||||||
{"equals", &AbsoluteError{Pathname: "etc"}, &AbsoluteError{Pathname: "etc"}, true},
|
{"equals", AbsoluteError("etc"), AbsoluteError("etc"), true},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
@@ -45,7 +45,7 @@ func TestAbsoluteError(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
want := `path "etc" is not absolute`
|
want := `path "etc" is not absolute`
|
||||||
if got := (&AbsoluteError{Pathname: "etc"}).Error(); got != want {
|
if got := (AbsoluteError("etc")).Error(); got != want {
|
||||||
t.Errorf("Error: %q, want %q", got, want)
|
t.Errorf("Error: %q, want %q", got, want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -62,8 +62,8 @@ func TestNewAbs(t *testing.T) {
|
|||||||
wantErr error
|
wantErr error
|
||||||
}{
|
}{
|
||||||
{"good", "/etc", MustAbs("/etc"), nil},
|
{"good", "/etc", MustAbs("/etc"), nil},
|
||||||
{"not absolute", "etc", nil, &AbsoluteError{Pathname: "etc"}},
|
{"not absolute", "etc", nil, AbsoluteError("etc")},
|
||||||
{"zero", "", nil, &AbsoluteError{Pathname: ""}},
|
{"zero", "", nil, AbsoluteError("")},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
@@ -84,7 +84,7 @@ func TestNewAbs(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
wantPanic := &AbsoluteError{Pathname: "etc"}
|
wantPanic := AbsoluteError("etc")
|
||||||
|
|
||||||
if r := recover(); !reflect.DeepEqual(r, wantPanic) {
|
if r := recover(); !reflect.DeepEqual(r, wantPanic) {
|
||||||
t.Errorf("MustAbs: panic = %v; want %v", r, wantPanic)
|
t.Errorf("MustAbs: panic = %v; want %v", r, wantPanic)
|
||||||
@@ -175,7 +175,7 @@ func TestCodecAbsolute(t *testing.T) {
|
|||||||
|
|
||||||
`"/etc"`, `{"val":"/etc","magic":3236757504}`},
|
`"/etc"`, `{"val":"/etc","magic":3236757504}`},
|
||||||
{"not absolute", nil,
|
{"not absolute", nil,
|
||||||
&AbsoluteError{Pathname: "etc"},
|
AbsoluteError("etc"),
|
||||||
"\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\a\xff\x80\x00\x03etc",
|
"\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\a\xff\x80\x00\x03etc",
|
||||||
",\xff\x83\x03\x01\x01\x06sCheck\x01\xff\x84\x00\x01\x02\x01\bPathname\x01\xff\x80\x00\x01\x05Magic\x01\x06\x00\x00\x00\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\x0f\xff\x84\x01\x03etc\x01\xfb\x01\x81\xda\x00\x00\x00",
|
",\xff\x83\x03\x01\x01\x06sCheck\x01\xff\x84\x00\x01\x02\x01\bPathname\x01\xff\x80\x00\x01\x05Magic\x01\x06\x00\x00\x00\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\x0f\xff\x84\x01\x03etc\x01\xfb\x01\x81\xda\x00\x00\x00",
|
||||||
|
|
||||||
|
|||||||
@@ -35,6 +35,8 @@ type (
|
|||||||
// Container represents a container environment being prepared or run.
|
// Container represents a container environment being prepared or run.
|
||||||
// None of [Container] methods are safe for concurrent use.
|
// None of [Container] methods are safe for concurrent use.
|
||||||
Container struct {
|
Container struct {
|
||||||
|
// Whether the container init should stay alive after its parent terminates.
|
||||||
|
AllowOrphan bool
|
||||||
// Cgroup fd, nil to disable.
|
// Cgroup fd, nil to disable.
|
||||||
Cgroup *int
|
Cgroup *int
|
||||||
// ExtraFiles passed through to initial process in the container,
|
// ExtraFiles passed through to initial process in the container,
|
||||||
@@ -252,8 +254,7 @@ func (p *Container) Start() error {
|
|||||||
}
|
}
|
||||||
p.cmd.Dir = fhs.Root
|
p.cmd.Dir = fhs.Root
|
||||||
p.cmd.SysProcAttr = &SysProcAttr{
|
p.cmd.SysProcAttr = &SysProcAttr{
|
||||||
Setsid: !p.RetainSession,
|
Setsid: !p.RetainSession,
|
||||||
Pdeathsig: SIGKILL,
|
|
||||||
Cloneflags: CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNS |
|
Cloneflags: CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNS |
|
||||||
CLONE_NEWIPC | CLONE_NEWUTS | CLONE_NEWCGROUP,
|
CLONE_NEWIPC | CLONE_NEWUTS | CLONE_NEWCGROUP,
|
||||||
|
|
||||||
@@ -262,12 +263,17 @@ func (p *Container) Start() error {
|
|||||||
CAP_SYS_ADMIN,
|
CAP_SYS_ADMIN,
|
||||||
// drop capabilities
|
// drop capabilities
|
||||||
CAP_SETPCAP,
|
CAP_SETPCAP,
|
||||||
|
// bring up loopback interface
|
||||||
|
CAP_NET_ADMIN,
|
||||||
// overlay access to upperdir and workdir
|
// overlay access to upperdir and workdir
|
||||||
CAP_DAC_OVERRIDE,
|
CAP_DAC_OVERRIDE,
|
||||||
},
|
},
|
||||||
|
|
||||||
UseCgroupFD: p.Cgroup != nil,
|
UseCgroupFD: p.Cgroup != nil,
|
||||||
}
|
}
|
||||||
|
if !p.AllowOrphan {
|
||||||
|
p.cmd.SysProcAttr.Pdeathsig = SIGKILL
|
||||||
|
}
|
||||||
if p.cmd.SysProcAttr.UseCgroupFD {
|
if p.cmd.SysProcAttr.UseCgroupFD {
|
||||||
p.cmd.SysProcAttr.CgroupFD = *p.Cgroup
|
p.cmd.SysProcAttr.CgroupFD = *p.Cgroup
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -274,13 +274,13 @@ var containerTestCases = []struct {
|
|||||||
Dev(check.MustAbs("/dev"), true),
|
Dev(check.MustAbs("/dev"), true),
|
||||||
),
|
),
|
||||||
earlyMnt(
|
earlyMnt(
|
||||||
ent("/", "/dev", "ro,nosuid,nodev,relatime", "tmpfs", "devtmpfs", ignore),
|
ent("/", "/dev", "ro,nosuid,nodev,relatime", "tmpfs", ignore, ignore),
|
||||||
ent("/null", "/dev/null", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/null", "/dev/null", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/zero", "/dev/zero", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/zero", "/dev/zero", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/full", "/dev/full", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/full", "/dev/full", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/random", "/dev/random", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/random", "/dev/random", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/urandom", "/dev/urandom", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/urandom", "/dev/urandom", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/tty", "/dev/tty", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/tty", "/dev/tty", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/", "/dev/pts", "rw,nosuid,noexec,relatime", "devpts", "devpts", "rw,mode=620,ptmxmode=666"),
|
ent("/", "/dev/pts", "rw,nosuid,noexec,relatime", "devpts", "devpts", "rw,mode=620,ptmxmode=666"),
|
||||||
ent("/", "/dev/mqueue", "rw,nosuid,nodev,noexec,relatime", "mqueue", "mqueue", "rw"),
|
ent("/", "/dev/mqueue", "rw,nosuid,nodev,noexec,relatime", "mqueue", "mqueue", "rw"),
|
||||||
ent("/", "/dev/shm", "rw,nosuid,nodev,relatime", "tmpfs", "tmpfs", ignore),
|
ent("/", "/dev/shm", "rw,nosuid,nodev,relatime", "tmpfs", "tmpfs", ignore),
|
||||||
@@ -292,13 +292,13 @@ var containerTestCases = []struct {
|
|||||||
Dev(check.MustAbs("/dev"), false),
|
Dev(check.MustAbs("/dev"), false),
|
||||||
),
|
),
|
||||||
earlyMnt(
|
earlyMnt(
|
||||||
ent("/", "/dev", "ro,nosuid,nodev,relatime", "tmpfs", "devtmpfs", ignore),
|
ent("/", "/dev", "ro,nosuid,nodev,relatime", "tmpfs", ignore, ignore),
|
||||||
ent("/null", "/dev/null", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/null", "/dev/null", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/zero", "/dev/zero", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/zero", "/dev/zero", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/full", "/dev/full", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/full", "/dev/full", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/random", "/dev/random", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/random", "/dev/random", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/urandom", "/dev/urandom", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/urandom", "/dev/urandom", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/tty", "/dev/tty", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/tty", "/dev/tty", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/", "/dev/pts", "rw,nosuid,noexec,relatime", "devpts", "devpts", "rw,mode=620,ptmxmode=666"),
|
ent("/", "/dev/pts", "rw,nosuid,noexec,relatime", "devpts", "devpts", "rw,mode=620,ptmxmode=666"),
|
||||||
ent("/", "/dev/shm", "rw,nosuid,nodev,relatime", "tmpfs", "tmpfs", ignore),
|
ent("/", "/dev/shm", "rw,nosuid,nodev,relatime", "tmpfs", "tmpfs", ignore),
|
||||||
),
|
),
|
||||||
@@ -690,11 +690,22 @@ func init() {
|
|||||||
return fmt.Errorf("got more than %d entries", len(mnt))
|
return fmt.Errorf("got more than %d entries", len(mnt))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ugly hack but should be reliable and is less likely to false negative than comparing by parsed flags
|
// ugly hack but should be reliable and is less likely to
|
||||||
cur.VfsOptstr = strings.TrimSuffix(cur.VfsOptstr, ",relatime")
|
//false negative than comparing by parsed flags
|
||||||
cur.VfsOptstr = strings.TrimSuffix(cur.VfsOptstr, ",noatime")
|
for _, s := range []string{
|
||||||
mnt[i].VfsOptstr = strings.TrimSuffix(mnt[i].VfsOptstr, ",relatime")
|
"relatime",
|
||||||
mnt[i].VfsOptstr = strings.TrimSuffix(mnt[i].VfsOptstr, ",noatime")
|
"noatime",
|
||||||
|
} {
|
||||||
|
cur.VfsOptstr = strings.TrimSuffix(cur.VfsOptstr, ","+s)
|
||||||
|
mnt[i].VfsOptstr = strings.TrimSuffix(mnt[i].VfsOptstr, ","+s)
|
||||||
|
}
|
||||||
|
for _, s := range []string{
|
||||||
|
"seclabel",
|
||||||
|
"inode64",
|
||||||
|
} {
|
||||||
|
cur.FsOptstr = strings.Replace(cur.FsOptstr, ","+s, "", 1)
|
||||||
|
mnt[i].FsOptstr = strings.Replace(mnt[i].FsOptstr, ","+s, "", 1)
|
||||||
|
}
|
||||||
|
|
||||||
if !cur.EqualWithIgnore(mnt[i], "\x00") {
|
if !cur.EqualWithIgnore(mnt[i], "\x00") {
|
||||||
fail = true
|
fail = true
|
||||||
|
|||||||
@@ -61,6 +61,8 @@ type syscallDispatcher interface {
|
|||||||
mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error
|
mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error
|
||||||
// ensureFile provides ensureFile.
|
// ensureFile provides ensureFile.
|
||||||
ensureFile(name string, perm, pperm os.FileMode) error
|
ensureFile(name string, perm, pperm os.FileMode) error
|
||||||
|
// mustLoopback provides mustLoopback.
|
||||||
|
mustLoopback(msg message.Msg)
|
||||||
|
|
||||||
// seccompLoad provides [seccomp.Load].
|
// seccompLoad provides [seccomp.Load].
|
||||||
seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error
|
seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error
|
||||||
@@ -164,6 +166,7 @@ func (k direct) mountTmpfs(fsname, target string, flags uintptr, size int, perm
|
|||||||
func (direct) ensureFile(name string, perm, pperm os.FileMode) error {
|
func (direct) ensureFile(name string, perm, pperm os.FileMode) error {
|
||||||
return ensureFile(name, perm, pperm)
|
return ensureFile(name, perm, pperm)
|
||||||
}
|
}
|
||||||
|
func (direct) mustLoopback(msg message.Msg) { mustLoopback(msg) }
|
||||||
|
|
||||||
func (direct) seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error {
|
func (direct) seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error {
|
||||||
return seccomp.Load(rules, flags)
|
return seccomp.Load(rules, flags)
|
||||||
|
|||||||
@@ -162,7 +162,8 @@ func checkSimple(t *testing.T, fname string, testCases []simpleTestCase) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
wait4signal := make(chan struct{})
|
wait4signal := make(chan struct{})
|
||||||
k := &kstub{wait4signal, stub.New(t, func(s *stub.Stub[syscallDispatcher]) syscallDispatcher { return &kstub{wait4signal, s} }, tc.want)}
|
lockNotify := make(chan struct{})
|
||||||
|
k := &kstub{wait4signal, lockNotify, stub.New(t, func(s *stub.Stub[syscallDispatcher]) syscallDispatcher { return &kstub{wait4signal, lockNotify, s} }, tc.want)}
|
||||||
defer stub.HandleExit(t)
|
defer stub.HandleExit(t)
|
||||||
if err := tc.f(k); !reflect.DeepEqual(err, tc.wantErr) {
|
if err := tc.f(k); !reflect.DeepEqual(err, tc.wantErr) {
|
||||||
t.Errorf("%s: error = %v, want %v", fname, err, tc.wantErr)
|
t.Errorf("%s: error = %v, want %v", fname, err, tc.wantErr)
|
||||||
@@ -200,8 +201,8 @@ func checkOpBehaviour(t *testing.T, testCases []opBehaviourTestCase) {
|
|||||||
t.Helper()
|
t.Helper()
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
k := &kstub{nil, stub.New(t,
|
k := &kstub{nil, nil, stub.New(t,
|
||||||
func(s *stub.Stub[syscallDispatcher]) syscallDispatcher { return &kstub{nil, s} },
|
func(s *stub.Stub[syscallDispatcher]) syscallDispatcher { return &kstub{nil, nil, s} },
|
||||||
stub.Expect{Calls: slices.Concat(tc.early, []stub.Call{{Name: stub.CallSeparator}}, tc.apply)},
|
stub.Expect{Calls: slices.Concat(tc.early, []stub.Call{{Name: stub.CallSeparator}}, tc.apply)},
|
||||||
)}
|
)}
|
||||||
state := &setupState{Params: tc.params, Msg: k}
|
state := &setupState{Params: tc.params, Msg: k}
|
||||||
@@ -322,12 +323,19 @@ const (
|
|||||||
|
|
||||||
type kstub struct {
|
type kstub struct {
|
||||||
wait4signal chan struct{}
|
wait4signal chan struct{}
|
||||||
|
lockNotify chan struct{}
|
||||||
*stub.Stub[syscallDispatcher]
|
*stub.Stub[syscallDispatcher]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *kstub) new(f func(k syscallDispatcher)) { k.Helper(); k.New(f) }
|
func (k *kstub) new(f func(k syscallDispatcher)) { k.Helper(); k.New(f) }
|
||||||
|
|
||||||
func (k *kstub) lockOSThread() { k.Helper(); k.Expects("lockOSThread") }
|
func (k *kstub) lockOSThread() {
|
||||||
|
k.Helper()
|
||||||
|
expect := k.Expects("lockOSThread")
|
||||||
|
if k.lockNotify != nil && expect.Ret == magicWait4Signal {
|
||||||
|
<-k.lockNotify
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (k *kstub) setPtracer(pid uintptr) error {
|
func (k *kstub) setPtracer(pid uintptr) error {
|
||||||
k.Helper()
|
k.Helper()
|
||||||
@@ -457,6 +465,8 @@ func (k *kstub) ensureFile(name string, perm, pperm os.FileMode) error {
|
|||||||
stub.CheckArg(k.Stub, "pperm", pperm, 2))
|
stub.CheckArg(k.Stub, "pperm", pperm, 2))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (*kstub) mustLoopback(message.Msg) { /* noop */ }
|
||||||
|
|
||||||
func (k *kstub) seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error {
|
func (k *kstub) seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error {
|
||||||
k.Helper()
|
k.Helper()
|
||||||
return k.Expects("seccompLoad").Error(
|
return k.Expects("seccompLoad").Error(
|
||||||
@@ -472,6 +482,10 @@ func (k *kstub) notify(c chan<- os.Signal, sig ...os.Signal) {
|
|||||||
k.FailNow()
|
k.FailNow()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if k.lockNotify != nil && expect.Ret == magicWait4Signal {
|
||||||
|
defer close(k.lockNotify)
|
||||||
|
}
|
||||||
|
|
||||||
// export channel for external instrumentation
|
// export channel for external instrumentation
|
||||||
if chanf, ok := expect.Args[0].(func(c chan<- os.Signal)); ok && chanf != nil {
|
if chanf, ok := expect.Args[0].(func(c chan<- os.Signal)); ok && chanf != nil {
|
||||||
chanf(c)
|
chanf(c)
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ func messageFromError(err error) (m string, ok bool) {
|
|||||||
if m, ok = messagePrefixP[os.PathError]("cannot ", err); ok {
|
if m, ok = messagePrefixP[os.PathError]("cannot ", err); ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if m, ok = messagePrefixP[check.AbsoluteError](zeroString, err); ok {
|
if m, ok = messagePrefix[check.AbsoluteError](zeroString, err); ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if m, ok = messagePrefix[OpRepeatError](zeroString, err); ok {
|
if m, ok = messagePrefix[OpRepeatError](zeroString, err); ok {
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ func TestMessageFromError(t *testing.T) {
|
|||||||
Err: stub.UniqueError(0xdeadbeef),
|
Err: stub.UniqueError(0xdeadbeef),
|
||||||
}, "cannot mount /sysroot: unique error 3735928559 injected by the test suite", true},
|
}, "cannot mount /sysroot: unique error 3735928559 injected by the test suite", true},
|
||||||
|
|
||||||
{"absolute", &check.AbsoluteError{Pathname: "etc/mtab"},
|
{"absolute", check.AbsoluteError("etc/mtab"),
|
||||||
`path "etc/mtab" is not absolute`, true},
|
`path "etc/mtab" is not absolute`, true},
|
||||||
|
|
||||||
{"repeat", OpRepeatError("autoetc"),
|
{"repeat", OpRepeatError("autoetc"),
|
||||||
|
|||||||
@@ -26,6 +26,8 @@ var (
|
|||||||
// AbsRunUser is [RunUser] as [check.Absolute].
|
// AbsRunUser is [RunUser] as [check.Absolute].
|
||||||
AbsRunUser = unsafeAbs(RunUser)
|
AbsRunUser = unsafeAbs(RunUser)
|
||||||
|
|
||||||
|
// AbsUsr is [Usr] as [check.Absolute].
|
||||||
|
AbsUsr = unsafeAbs(Usr)
|
||||||
// AbsUsrBin is [UsrBin] as [check.Absolute].
|
// AbsUsrBin is [UsrBin] as [check.Absolute].
|
||||||
AbsUsrBin = unsafeAbs(UsrBin)
|
AbsUsrBin = unsafeAbs(UsrBin)
|
||||||
|
|
||||||
|
|||||||
@@ -170,6 +170,10 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
offsetSetup = int(setupFd + 1)
|
offsetSetup = int(setupFd + 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !params.HostNet {
|
||||||
|
k.mustLoopback(msg)
|
||||||
|
}
|
||||||
|
|
||||||
// write uid/gid map here so parent does not need to set dumpable
|
// write uid/gid map here so parent does not need to set dumpable
|
||||||
if err := k.setDumpable(SUID_DUMP_USER); err != nil {
|
if err := k.setDumpable(SUID_DUMP_USER); err != nil {
|
||||||
k.fatalf(msg, "cannot set SUID_DUMP_USER: %v", err)
|
k.fatalf(msg, "cannot set SUID_DUMP_USER: %v", err)
|
||||||
|
|||||||
@@ -1992,7 +1992,7 @@ func TestInitEntrypoint(t *testing.T) {
|
|||||||
|
|
||||||
/* wait4 */
|
/* wait4 */
|
||||||
Tracks: []stub.Expect{{Calls: []stub.Call{
|
Tracks: []stub.Expect{{Calls: []stub.Call{
|
||||||
call("lockOSThread", stub.ExpectArgs{}, nil, nil),
|
call("lockOSThread", stub.ExpectArgs{}, magicWait4Signal, nil),
|
||||||
|
|
||||||
// this terminates the goroutine at the call, preventing it from leaking while preserving behaviour
|
// this terminates the goroutine at the call, preventing it from leaking while preserving behaviour
|
||||||
call("wait4", stub.ExpectArgs{-1, nil, 0, nil, stub.PanicExit}, 0, syscall.ECHILD),
|
call("wait4", stub.ExpectArgs{-1, nil, 0, nil, stub.PanicExit}, 0, syscall.ECHILD),
|
||||||
@@ -2075,7 +2075,7 @@ func TestInitEntrypoint(t *testing.T) {
|
|||||||
call("fatalf", stub.ExpectArgs{"cannot close setup pipe: %v", []any{stub.UniqueError(10)}}, nil, nil),
|
call("fatalf", stub.ExpectArgs{"cannot close setup pipe: %v", []any{stub.UniqueError(10)}}, nil, nil),
|
||||||
call("verbosef", stub.ExpectArgs{"starting initial program %s", []any{check.MustAbs("/run/current-system/sw/bin/bash")}}, nil, nil),
|
call("verbosef", stub.ExpectArgs{"starting initial program %s", []any{check.MustAbs("/run/current-system/sw/bin/bash")}}, nil, nil),
|
||||||
call("start", stub.ExpectArgs{"/run/current-system/sw/bin/bash", []string{"bash", "-c", "false"}, ([]string)(nil), "/.hakurei/nonexistent"}, &os.Process{Pid: 0xbad}, nil),
|
call("start", stub.ExpectArgs{"/run/current-system/sw/bin/bash", []string{"bash", "-c", "false"}, ([]string)(nil), "/.hakurei/nonexistent"}, &os.Process{Pid: 0xbad}, nil),
|
||||||
call("notify", stub.ExpectArgs{func(c chan<- os.Signal) { c <- CancelSignal }, []os.Signal{CancelSignal, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}}, nil, nil),
|
call("notify", stub.ExpectArgs{func(c chan<- os.Signal) { c <- CancelSignal }, []os.Signal{CancelSignal, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}}, magicWait4Signal, nil),
|
||||||
call("verbose", stub.ExpectArgs{[]any{"forwarding context cancellation"}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{"forwarding context cancellation"}}, nil, nil),
|
||||||
// magicWait4Signal as ret causes wait4 stub to unblock
|
// magicWait4Signal as ret causes wait4 stub to unblock
|
||||||
call("signal", stub.ExpectArgs{"/run/current-system/sw/bin/bash", []string{"bash", "-c", "false"}, ([]string)(nil), "/.hakurei/nonexistent", os.Interrupt}, magicWait4Signal, stub.UniqueError(9)),
|
call("signal", stub.ExpectArgs{"/run/current-system/sw/bin/bash", []string{"bash", "-c", "false"}, ([]string)(nil), "/.hakurei/nonexistent", os.Interrupt}, magicWait4Signal, stub.UniqueError(9)),
|
||||||
@@ -2090,7 +2090,7 @@ func TestInitEntrypoint(t *testing.T) {
|
|||||||
|
|
||||||
/* wait4 */
|
/* wait4 */
|
||||||
Tracks: []stub.Expect{{Calls: []stub.Call{
|
Tracks: []stub.Expect{{Calls: []stub.Call{
|
||||||
call("lockOSThread", stub.ExpectArgs{}, nil, nil),
|
call("lockOSThread", stub.ExpectArgs{}, magicWait4Signal, nil),
|
||||||
|
|
||||||
// magicWait4Signal as args[4] causes this to block until simulated signal is delivered
|
// magicWait4Signal as args[4] causes this to block until simulated signal is delivered
|
||||||
call("wait4", stub.ExpectArgs{-1, syscall.WaitStatus(0xfade01ce), 0, nil, magicWait4Signal}, 0xbad, nil),
|
call("wait4", stub.ExpectArgs{-1, syscall.WaitStatus(0xfade01ce), 0, nil, magicWait4Signal}, 0xbad, nil),
|
||||||
@@ -2175,7 +2175,7 @@ func TestInitEntrypoint(t *testing.T) {
|
|||||||
call("fatalf", stub.ExpectArgs{"cannot close setup pipe: %v", []any{stub.UniqueError(7)}}, nil, nil),
|
call("fatalf", stub.ExpectArgs{"cannot close setup pipe: %v", []any{stub.UniqueError(7)}}, nil, nil),
|
||||||
call("verbosef", stub.ExpectArgs{"starting initial program %s", []any{check.MustAbs("/run/current-system/sw/bin/bash")}}, nil, nil),
|
call("verbosef", stub.ExpectArgs{"starting initial program %s", []any{check.MustAbs("/run/current-system/sw/bin/bash")}}, nil, nil),
|
||||||
call("start", stub.ExpectArgs{"/run/current-system/sw/bin/bash", []string{"bash", "-c", "false"}, ([]string)(nil), "/.hakurei/nonexistent"}, &os.Process{Pid: 0xbad}, nil),
|
call("start", stub.ExpectArgs{"/run/current-system/sw/bin/bash", []string{"bash", "-c", "false"}, ([]string)(nil), "/.hakurei/nonexistent"}, &os.Process{Pid: 0xbad}, nil),
|
||||||
call("notify", stub.ExpectArgs{func(c chan<- os.Signal) { c <- syscall.SIGQUIT }, []os.Signal{CancelSignal, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}}, nil, nil),
|
call("notify", stub.ExpectArgs{func(c chan<- os.Signal) { c <- syscall.SIGQUIT }, []os.Signal{CancelSignal, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}}, magicWait4Signal, nil),
|
||||||
call("verbosef", stub.ExpectArgs{"got %s, forwarding to initial process", []any{"quit"}}, nil, nil),
|
call("verbosef", stub.ExpectArgs{"got %s, forwarding to initial process", []any{"quit"}}, nil, nil),
|
||||||
// magicWait4Signal as ret causes wait4 stub to unblock
|
// magicWait4Signal as ret causes wait4 stub to unblock
|
||||||
call("signal", stub.ExpectArgs{"/run/current-system/sw/bin/bash", []string{"bash", "-c", "false"}, ([]string)(nil), "/.hakurei/nonexistent", syscall.SIGQUIT}, magicWait4Signal, stub.UniqueError(0xfe)),
|
call("signal", stub.ExpectArgs{"/run/current-system/sw/bin/bash", []string{"bash", "-c", "false"}, ([]string)(nil), "/.hakurei/nonexistent", syscall.SIGQUIT}, magicWait4Signal, stub.UniqueError(0xfe)),
|
||||||
@@ -2190,7 +2190,7 @@ func TestInitEntrypoint(t *testing.T) {
|
|||||||
|
|
||||||
/* wait4 */
|
/* wait4 */
|
||||||
Tracks: []stub.Expect{{Calls: []stub.Call{
|
Tracks: []stub.Expect{{Calls: []stub.Call{
|
||||||
call("lockOSThread", stub.ExpectArgs{}, nil, nil),
|
call("lockOSThread", stub.ExpectArgs{}, magicWait4Signal, nil),
|
||||||
|
|
||||||
// magicWait4Signal as args[4] causes this to block until simulated signal is delivered
|
// magicWait4Signal as args[4] causes this to block until simulated signal is delivered
|
||||||
call("wait4", stub.ExpectArgs{-1, syscall.WaitStatus(0xfade01ce), 0, nil, magicWait4Signal}, 0xbad, nil),
|
call("wait4", stub.ExpectArgs{-1, syscall.WaitStatus(0xfade01ce), 0, nil, magicWait4Signal}, 0xbad, nil),
|
||||||
@@ -2275,7 +2275,7 @@ func TestInitEntrypoint(t *testing.T) {
|
|||||||
call("fatalf", stub.ExpectArgs{"cannot close setup pipe: %v", []any{stub.UniqueError(7)}}, nil, nil),
|
call("fatalf", stub.ExpectArgs{"cannot close setup pipe: %v", []any{stub.UniqueError(7)}}, nil, nil),
|
||||||
call("verbosef", stub.ExpectArgs{"starting initial program %s", []any{check.MustAbs("/run/current-system/sw/bin/bash")}}, nil, nil),
|
call("verbosef", stub.ExpectArgs{"starting initial program %s", []any{check.MustAbs("/run/current-system/sw/bin/bash")}}, nil, nil),
|
||||||
call("start", stub.ExpectArgs{"/run/current-system/sw/bin/bash", []string{"bash", "-c", "false"}, ([]string)(nil), "/.hakurei/nonexistent"}, &os.Process{Pid: 0xbad}, nil),
|
call("start", stub.ExpectArgs{"/run/current-system/sw/bin/bash", []string{"bash", "-c", "false"}, ([]string)(nil), "/.hakurei/nonexistent"}, &os.Process{Pid: 0xbad}, nil),
|
||||||
call("notify", stub.ExpectArgs{func(c chan<- os.Signal) { c <- os.Interrupt }, []os.Signal{CancelSignal, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}}, nil, nil),
|
call("notify", stub.ExpectArgs{func(c chan<- os.Signal) { c <- os.Interrupt }, []os.Signal{CancelSignal, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}}, magicWait4Signal, nil),
|
||||||
call("verbosef", stub.ExpectArgs{"got %s", []any{"interrupt"}}, nil, nil),
|
call("verbosef", stub.ExpectArgs{"got %s", []any{"interrupt"}}, nil, nil),
|
||||||
call("beforeExit", stub.ExpectArgs{}, nil, nil),
|
call("beforeExit", stub.ExpectArgs{}, nil, nil),
|
||||||
call("exit", stub.ExpectArgs{0}, nil, nil),
|
call("exit", stub.ExpectArgs{0}, nil, nil),
|
||||||
@@ -2283,7 +2283,7 @@ func TestInitEntrypoint(t *testing.T) {
|
|||||||
|
|
||||||
/* wait4 */
|
/* wait4 */
|
||||||
Tracks: []stub.Expect{{Calls: []stub.Call{
|
Tracks: []stub.Expect{{Calls: []stub.Call{
|
||||||
call("lockOSThread", stub.ExpectArgs{}, nil, nil),
|
call("lockOSThread", stub.ExpectArgs{}, magicWait4Signal, nil),
|
||||||
|
|
||||||
// this terminates the goroutine at the call, preventing it from leaking while preserving behaviour
|
// this terminates the goroutine at the call, preventing it from leaking while preserving behaviour
|
||||||
call("wait4", stub.ExpectArgs{-1, nil, 0, nil, stub.PanicExit}, 0, syscall.ECHILD),
|
call("wait4", stub.ExpectArgs{-1, nil, 0, nil, stub.PanicExit}, 0, syscall.ECHILD),
|
||||||
@@ -2366,7 +2366,7 @@ func TestInitEntrypoint(t *testing.T) {
|
|||||||
call("fatalf", stub.ExpectArgs{"cannot close setup pipe: %v", []any{stub.UniqueError(5)}}, nil, nil),
|
call("fatalf", stub.ExpectArgs{"cannot close setup pipe: %v", []any{stub.UniqueError(5)}}, nil, nil),
|
||||||
call("verbosef", stub.ExpectArgs{"starting initial program %s", []any{check.MustAbs("/run/current-system/sw/bin/bash")}}, nil, nil),
|
call("verbosef", stub.ExpectArgs{"starting initial program %s", []any{check.MustAbs("/run/current-system/sw/bin/bash")}}, nil, nil),
|
||||||
call("start", stub.ExpectArgs{"/run/current-system/sw/bin/bash", []string{"bash", "-c", "false"}, ([]string)(nil), "/.hakurei/nonexistent"}, &os.Process{Pid: 0xbad}, nil),
|
call("start", stub.ExpectArgs{"/run/current-system/sw/bin/bash", []string{"bash", "-c", "false"}, ([]string)(nil), "/.hakurei/nonexistent"}, &os.Process{Pid: 0xbad}, nil),
|
||||||
call("notify", stub.ExpectArgs{nil, []os.Signal{CancelSignal, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}}, nil, nil),
|
call("notify", stub.ExpectArgs{nil, []os.Signal{CancelSignal, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}}, magicWait4Signal, nil),
|
||||||
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
||||||
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
||||||
call("verbosef", stub.ExpectArgs{"initial process exited with signal %s", []any{syscall.Signal(0x4e)}}, nil, nil),
|
call("verbosef", stub.ExpectArgs{"initial process exited with signal %s", []any{syscall.Signal(0x4e)}}, nil, nil),
|
||||||
@@ -2377,7 +2377,7 @@ func TestInitEntrypoint(t *testing.T) {
|
|||||||
|
|
||||||
/* wait4 */
|
/* wait4 */
|
||||||
Tracks: []stub.Expect{{Calls: []stub.Call{
|
Tracks: []stub.Expect{{Calls: []stub.Call{
|
||||||
call("lockOSThread", stub.ExpectArgs{}, nil, nil),
|
call("lockOSThread", stub.ExpectArgs{}, magicWait4Signal, nil),
|
||||||
|
|
||||||
call("wait4", stub.ExpectArgs{-1, syscall.WaitStatus(0xfade01ce), 0, nil}, 0xbad, nil),
|
call("wait4", stub.ExpectArgs{-1, syscall.WaitStatus(0xfade01ce), 0, nil}, 0xbad, nil),
|
||||||
// this terminates the goroutine at the call, preventing it from leaking while preserving behaviour
|
// this terminates the goroutine at the call, preventing it from leaking while preserving behaviour
|
||||||
@@ -2461,7 +2461,7 @@ func TestInitEntrypoint(t *testing.T) {
|
|||||||
call("fatalf", stub.ExpectArgs{"cannot close setup pipe: %v", []any{stub.UniqueError(3)}}, nil, nil),
|
call("fatalf", stub.ExpectArgs{"cannot close setup pipe: %v", []any{stub.UniqueError(3)}}, nil, nil),
|
||||||
call("verbosef", stub.ExpectArgs{"starting initial program %s", []any{check.MustAbs("/run/current-system/sw/bin/bash")}}, nil, nil),
|
call("verbosef", stub.ExpectArgs{"starting initial program %s", []any{check.MustAbs("/run/current-system/sw/bin/bash")}}, nil, nil),
|
||||||
call("start", stub.ExpectArgs{"/run/current-system/sw/bin/bash", []string{"bash", "-c", "false"}, ([]string)(nil), "/.hakurei/nonexistent"}, &os.Process{Pid: 0xbad}, nil),
|
call("start", stub.ExpectArgs{"/run/current-system/sw/bin/bash", []string{"bash", "-c", "false"}, ([]string)(nil), "/.hakurei/nonexistent"}, &os.Process{Pid: 0xbad}, nil),
|
||||||
call("notify", stub.ExpectArgs{nil, []os.Signal{CancelSignal, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}}, nil, nil),
|
call("notify", stub.ExpectArgs{nil, []os.Signal{CancelSignal, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}}, magicWait4Signal, nil),
|
||||||
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
||||||
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
||||||
call("verbosef", stub.ExpectArgs{"initial process exited with signal %s", []any{syscall.Signal(0x4e)}}, nil, nil),
|
call("verbosef", stub.ExpectArgs{"initial process exited with signal %s", []any{syscall.Signal(0x4e)}}, nil, nil),
|
||||||
@@ -2471,7 +2471,7 @@ func TestInitEntrypoint(t *testing.T) {
|
|||||||
|
|
||||||
/* wait4 */
|
/* wait4 */
|
||||||
Tracks: []stub.Expect{{Calls: []stub.Call{
|
Tracks: []stub.Expect{{Calls: []stub.Call{
|
||||||
call("lockOSThread", stub.ExpectArgs{}, nil, nil),
|
call("lockOSThread", stub.ExpectArgs{}, magicWait4Signal, nil),
|
||||||
|
|
||||||
call("wait4", stub.ExpectArgs{-1, nil, 0, nil}, 0, syscall.EINTR),
|
call("wait4", stub.ExpectArgs{-1, nil, 0, nil}, 0, syscall.EINTR),
|
||||||
call("wait4", stub.ExpectArgs{-1, nil, 0, nil}, 0, syscall.EINTR),
|
call("wait4", stub.ExpectArgs{-1, nil, 0, nil}, 0, syscall.EINTR),
|
||||||
@@ -2599,7 +2599,7 @@ func TestInitEntrypoint(t *testing.T) {
|
|||||||
call("fatalf", stub.ExpectArgs{"cannot close setup pipe: %v", []any{stub.UniqueError(1)}}, nil, nil),
|
call("fatalf", stub.ExpectArgs{"cannot close setup pipe: %v", []any{stub.UniqueError(1)}}, nil, nil),
|
||||||
call("verbosef", stub.ExpectArgs{"starting initial program %s", []any{check.MustAbs("/run/current-system/sw/bin/bash")}}, nil, nil),
|
call("verbosef", stub.ExpectArgs{"starting initial program %s", []any{check.MustAbs("/run/current-system/sw/bin/bash")}}, nil, nil),
|
||||||
call("start", stub.ExpectArgs{"/run/current-system/sw/bin/bash", []string{"bash", "-c", "false"}, ([]string)(nil), "/.hakurei/nonexistent"}, &os.Process{Pid: 0xbad}, nil),
|
call("start", stub.ExpectArgs{"/run/current-system/sw/bin/bash", []string{"bash", "-c", "false"}, ([]string)(nil), "/.hakurei/nonexistent"}, &os.Process{Pid: 0xbad}, nil),
|
||||||
call("notify", stub.ExpectArgs{nil, []os.Signal{CancelSignal, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}}, nil, nil),
|
call("notify", stub.ExpectArgs{nil, []os.Signal{CancelSignal, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}}, magicWait4Signal, nil),
|
||||||
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
||||||
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
||||||
call("verbosef", stub.ExpectArgs{"initial process exited with code %d", []any{1}}, nil, nil),
|
call("verbosef", stub.ExpectArgs{"initial process exited with code %d", []any{1}}, nil, nil),
|
||||||
@@ -2609,7 +2609,7 @@ func TestInitEntrypoint(t *testing.T) {
|
|||||||
|
|
||||||
/* wait4 */
|
/* wait4 */
|
||||||
Tracks: []stub.Expect{{Calls: []stub.Call{
|
Tracks: []stub.Expect{{Calls: []stub.Call{
|
||||||
call("lockOSThread", stub.ExpectArgs{}, nil, nil),
|
call("lockOSThread", stub.ExpectArgs{}, magicWait4Signal, nil),
|
||||||
|
|
||||||
call("wait4", stub.ExpectArgs{-1, nil, 0, nil}, 0, syscall.EINTR),
|
call("wait4", stub.ExpectArgs{-1, nil, 0, nil}, 0, syscall.EINTR),
|
||||||
call("wait4", stub.ExpectArgs{-1, nil, 0, nil}, 0, syscall.EINTR),
|
call("wait4", stub.ExpectArgs{-1, nil, 0, nil}, 0, syscall.EINTR),
|
||||||
@@ -2741,7 +2741,7 @@ func TestInitEntrypoint(t *testing.T) {
|
|||||||
call("fatalf", stub.ExpectArgs{"cannot close setup pipe: %v", []any{stub.UniqueError(0)}}, nil, nil),
|
call("fatalf", stub.ExpectArgs{"cannot close setup pipe: %v", []any{stub.UniqueError(0)}}, nil, nil),
|
||||||
call("verbosef", stub.ExpectArgs{"starting initial program %s", []any{check.MustAbs("/bin/zsh")}}, nil, nil),
|
call("verbosef", stub.ExpectArgs{"starting initial program %s", []any{check.MustAbs("/bin/zsh")}}, nil, nil),
|
||||||
call("start", stub.ExpectArgs{"/bin/zsh", []string{"zsh", "-c", "exec vim"}, []string{"DISPLAY=:0"}, "/.hakurei"}, &os.Process{Pid: 0xcafe}, nil),
|
call("start", stub.ExpectArgs{"/bin/zsh", []string{"zsh", "-c", "exec vim"}, []string{"DISPLAY=:0"}, "/.hakurei"}, &os.Process{Pid: 0xcafe}, nil),
|
||||||
call("notify", stub.ExpectArgs{nil, []os.Signal{CancelSignal, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}}, nil, nil),
|
call("notify", stub.ExpectArgs{nil, []os.Signal{CancelSignal, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}}, magicWait4Signal, nil),
|
||||||
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
||||||
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
||||||
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{os.ErrInvalid.Error()}}, nil, nil),
|
||||||
@@ -2752,7 +2752,7 @@ func TestInitEntrypoint(t *testing.T) {
|
|||||||
|
|
||||||
/* wait4 */
|
/* wait4 */
|
||||||
Tracks: []stub.Expect{{Calls: []stub.Call{
|
Tracks: []stub.Expect{{Calls: []stub.Call{
|
||||||
call("lockOSThread", stub.ExpectArgs{}, nil, nil),
|
call("lockOSThread", stub.ExpectArgs{}, magicWait4Signal, nil),
|
||||||
|
|
||||||
call("wait4", stub.ExpectArgs{-1, nil, 0, nil}, 0, syscall.EINTR),
|
call("wait4", stub.ExpectArgs{-1, nil, 0, nil}, 0, syscall.EINTR),
|
||||||
call("wait4", stub.ExpectArgs{-1, nil, 0, nil}, 0, syscall.EINTR),
|
call("wait4", stub.ExpectArgs{-1, nil, 0, nil}, 0, syscall.EINTR),
|
||||||
|
|||||||
@@ -312,7 +312,10 @@ func TestMountOverlayOp(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
|
|
||||||
{"ephemeral", new(Ops).OverlayEphemeral(check.MustAbs("/nix/store"), check.MustAbs("/mnt-root/nix/.ro-store")), Ops{
|
{"ephemeral", new(Ops).OverlayEphemeral(
|
||||||
|
check.MustAbs("/nix/store"),
|
||||||
|
check.MustAbs("/mnt-root/nix/.ro-store"),
|
||||||
|
), Ops{
|
||||||
&MountOverlayOp{
|
&MountOverlayOp{
|
||||||
Target: check.MustAbs("/nix/store"),
|
Target: check.MustAbs("/nix/store"),
|
||||||
Lower: []*check.Absolute{check.MustAbs("/mnt-root/nix/.ro-store")},
|
Lower: []*check.Absolute{check.MustAbs("/mnt-root/nix/.ro-store")},
|
||||||
@@ -320,7 +323,10 @@ func TestMountOverlayOp(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
|
|
||||||
{"readonly", new(Ops).OverlayReadonly(check.MustAbs("/nix/store"), check.MustAbs("/mnt-root/nix/.ro-store")), Ops{
|
{"readonly", new(Ops).OverlayReadonly(
|
||||||
|
check.MustAbs("/nix/store"),
|
||||||
|
check.MustAbs("/mnt-root/nix/.ro-store"),
|
||||||
|
), Ops{
|
||||||
&MountOverlayOp{
|
&MountOverlayOp{
|
||||||
Target: check.MustAbs("/nix/store"),
|
Target: check.MustAbs("/nix/store"),
|
||||||
Lower: []*check.Absolute{check.MustAbs("/mnt-root/nix/.ro-store")},
|
Lower: []*check.Absolute{check.MustAbs("/mnt-root/nix/.ro-store")},
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ func (l *SymlinkOp) Valid() bool { return l != nil && l.Target != nil && l.LinkN
|
|||||||
func (l *SymlinkOp) early(_ *setupState, k syscallDispatcher) error {
|
func (l *SymlinkOp) early(_ *setupState, k syscallDispatcher) error {
|
||||||
if l.Dereference {
|
if l.Dereference {
|
||||||
if !path.IsAbs(l.LinkName) {
|
if !path.IsAbs(l.LinkName) {
|
||||||
return &check.AbsoluteError{Pathname: l.LinkName}
|
return check.AbsoluteError(l.LinkName)
|
||||||
}
|
}
|
||||||
if name, err := k.readlink(l.LinkName); err != nil {
|
if name, err := k.readlink(l.LinkName); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ func TestSymlinkOp(t *testing.T) {
|
|||||||
Target: check.MustAbs("/etc/mtab"),
|
Target: check.MustAbs("/etc/mtab"),
|
||||||
LinkName: "etc/mtab",
|
LinkName: "etc/mtab",
|
||||||
Dereference: true,
|
Dereference: true,
|
||||||
}, nil, &check.AbsoluteError{Pathname: "etc/mtab"}, nil, nil},
|
}, nil, check.AbsoluteError("etc/mtab"), nil, nil},
|
||||||
|
|
||||||
{"readlink", &Params{ParentPerm: 0755}, &SymlinkOp{
|
{"readlink", &Params{ParentPerm: 0755}, &SymlinkOp{
|
||||||
Target: check.MustAbs("/etc/mtab"),
|
Target: check.MustAbs("/etc/mtab"),
|
||||||
|
|||||||
269
container/netlink.go
Normal file
269
container/netlink.go
Normal file
@@ -0,0 +1,269 @@
|
|||||||
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
. "syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"hakurei.app/container/std"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
// rtnetlink represents a NETLINK_ROUTE socket.
|
||||||
|
type rtnetlink struct {
|
||||||
|
// Sent as part of rtnetlink messages.
|
||||||
|
pid uint32
|
||||||
|
// AF_NETLINK socket.
|
||||||
|
fd int
|
||||||
|
// Whether the socket is open.
|
||||||
|
ok bool
|
||||||
|
// Message sequence number.
|
||||||
|
seq uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// open creates the underlying NETLINK_ROUTE socket.
|
||||||
|
func (s *rtnetlink) open() (err error) {
|
||||||
|
if s.ok || s.fd < 0 {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
s.pid = uint32(Getpid())
|
||||||
|
if s.fd, err = Socket(
|
||||||
|
AF_NETLINK,
|
||||||
|
SOCK_RAW|SOCK_CLOEXEC,
|
||||||
|
NETLINK_ROUTE,
|
||||||
|
); err != nil {
|
||||||
|
return os.NewSyscallError("socket", err)
|
||||||
|
} else if err = Bind(s.fd, &SockaddrNetlink{
|
||||||
|
Family: AF_NETLINK,
|
||||||
|
Pid: s.pid,
|
||||||
|
}); err != nil {
|
||||||
|
_ = s.close()
|
||||||
|
return os.NewSyscallError("bind", err)
|
||||||
|
} else {
|
||||||
|
s.ok = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// close closes the underlying NETLINK_ROUTE socket.
|
||||||
|
func (s *rtnetlink) close() error {
|
||||||
|
if !s.ok {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
s.ok = false
|
||||||
|
err := Close(s.fd)
|
||||||
|
s.fd = -1
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// roundtrip sends a netlink message and handles the reply.
|
||||||
|
func (s *rtnetlink) roundtrip(data []byte) error {
|
||||||
|
if !s.ok {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() { s.seq++ }()
|
||||||
|
|
||||||
|
if err := Sendto(s.fd, data, 0, &SockaddrNetlink{
|
||||||
|
Family: AF_NETLINK,
|
||||||
|
}); err != nil {
|
||||||
|
return os.NewSyscallError("sendto", err)
|
||||||
|
}
|
||||||
|
buf := make([]byte, Getpagesize())
|
||||||
|
|
||||||
|
done:
|
||||||
|
for {
|
||||||
|
p := buf
|
||||||
|
if n, _, err := Recvfrom(s.fd, p, 0); err != nil {
|
||||||
|
return os.NewSyscallError("recvfrom", err)
|
||||||
|
} else if n < NLMSG_HDRLEN {
|
||||||
|
return errors.ErrUnsupported
|
||||||
|
} else {
|
||||||
|
p = p[:n]
|
||||||
|
}
|
||||||
|
|
||||||
|
if msgs, err := ParseNetlinkMessage(p); err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
for _, m := range msgs {
|
||||||
|
if m.Header.Seq != s.seq || m.Header.Pid != s.pid {
|
||||||
|
return errors.ErrUnsupported
|
||||||
|
}
|
||||||
|
if m.Header.Type == NLMSG_DONE {
|
||||||
|
break done
|
||||||
|
}
|
||||||
|
if m.Header.Type == NLMSG_ERROR {
|
||||||
|
if len(m.Data) >= 4 {
|
||||||
|
errno := Errno(-std.ScmpInt(binary.NativeEndian.Uint32(m.Data)))
|
||||||
|
if errno == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return errors.ErrUnsupported
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustRoundtrip calls roundtrip and terminates via msg for a non-nil error.
|
||||||
|
func (s *rtnetlink) mustRoundtrip(msg message.Msg, data []byte) {
|
||||||
|
err := s.roundtrip(data)
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if closeErr := Close(s.fd); closeErr != nil {
|
||||||
|
msg.Verbosef("cannot close: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch err.(type) {
|
||||||
|
case *os.SyscallError:
|
||||||
|
msg.GetLogger().Fatalf("cannot %v", err)
|
||||||
|
|
||||||
|
case Errno:
|
||||||
|
msg.GetLogger().Fatalf("RTNETLINK answers: %v", err)
|
||||||
|
|
||||||
|
default:
|
||||||
|
msg.GetLogger().Fatalln("RTNETLINK answers with unexpected message")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newaddrLo represents a RTM_NEWADDR message with two addresses.
|
||||||
|
type newaddrLo struct {
|
||||||
|
header NlMsghdr
|
||||||
|
data IfAddrmsg
|
||||||
|
|
||||||
|
r0 RtAttr
|
||||||
|
a0 [4]byte // in_addr
|
||||||
|
r1 RtAttr
|
||||||
|
a1 [4]byte // in_addr
|
||||||
|
}
|
||||||
|
|
||||||
|
// sizeofNewaddrLo is the expected size of newaddrLo.
|
||||||
|
const sizeofNewaddrLo = NLMSG_HDRLEN + SizeofIfAddrmsg + (SizeofRtAttr+4)*2
|
||||||
|
|
||||||
|
// newaddrLo returns the address of a populated newaddrLo.
|
||||||
|
func (s *rtnetlink) newaddrLo(lo int) *newaddrLo {
|
||||||
|
return &newaddrLo{NlMsghdr{
|
||||||
|
Len: sizeofNewaddrLo,
|
||||||
|
Type: RTM_NEWADDR,
|
||||||
|
Flags: NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE | NLM_F_EXCL,
|
||||||
|
Seq: s.seq,
|
||||||
|
Pid: s.pid,
|
||||||
|
}, IfAddrmsg{
|
||||||
|
Family: AF_INET,
|
||||||
|
Prefixlen: 8,
|
||||||
|
Flags: IFA_F_PERMANENT,
|
||||||
|
Scope: RT_SCOPE_HOST,
|
||||||
|
Index: uint32(lo),
|
||||||
|
}, RtAttr{
|
||||||
|
Len: uint16(SizeofRtAttr + len(newaddrLo{}.a0)),
|
||||||
|
Type: IFA_LOCAL,
|
||||||
|
}, [4]byte{127, 0, 0, 1}, RtAttr{
|
||||||
|
Len: uint16(SizeofRtAttr + len(newaddrLo{}.a1)),
|
||||||
|
Type: IFA_ADDRESS,
|
||||||
|
}, [4]byte{127, 0, 0, 1}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (msg *newaddrLo) toWireFormat() []byte {
|
||||||
|
var buf [sizeofNewaddrLo]byte
|
||||||
|
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[0:4][0])) = msg.header.Len
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[4:6][0])) = msg.header.Type
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[6:8][0])) = msg.header.Flags
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[8:12][0])) = msg.header.Seq
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[12:16][0])) = msg.header.Pid
|
||||||
|
|
||||||
|
buf[16] = msg.data.Family
|
||||||
|
buf[17] = msg.data.Prefixlen
|
||||||
|
buf[18] = msg.data.Flags
|
||||||
|
buf[19] = msg.data.Scope
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[20:24][0])) = msg.data.Index
|
||||||
|
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[24:26][0])) = msg.r0.Len
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[26:28][0])) = msg.r0.Type
|
||||||
|
copy(buf[28:32], msg.a0[:])
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[32:34][0])) = msg.r1.Len
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[34:36][0])) = msg.r1.Type
|
||||||
|
copy(buf[36:40], msg.a1[:])
|
||||||
|
|
||||||
|
return buf[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// newlinkLo represents a RTM_NEWLINK message.
|
||||||
|
type newlinkLo struct {
|
||||||
|
header NlMsghdr
|
||||||
|
data IfInfomsg
|
||||||
|
}
|
||||||
|
|
||||||
|
// sizeofNewlinkLo is the expected size of newlinkLo.
|
||||||
|
const sizeofNewlinkLo = NLMSG_HDRLEN + SizeofIfInfomsg
|
||||||
|
|
||||||
|
// newlinkLo returns the address of a populated newlinkLo.
|
||||||
|
func (s *rtnetlink) newlinkLo(lo int) *newlinkLo {
|
||||||
|
return &newlinkLo{NlMsghdr{
|
||||||
|
Len: sizeofNewlinkLo,
|
||||||
|
Type: RTM_NEWLINK,
|
||||||
|
Flags: NLM_F_REQUEST | NLM_F_ACK,
|
||||||
|
Seq: s.seq,
|
||||||
|
Pid: s.pid,
|
||||||
|
}, IfInfomsg{
|
||||||
|
Family: AF_UNSPEC,
|
||||||
|
Index: int32(lo),
|
||||||
|
Flags: IFF_UP,
|
||||||
|
Change: IFF_UP,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (msg *newlinkLo) toWireFormat() []byte {
|
||||||
|
var buf [sizeofNewlinkLo]byte
|
||||||
|
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[0:4][0])) = msg.header.Len
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[4:6][0])) = msg.header.Type
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[6:8][0])) = msg.header.Flags
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[8:12][0])) = msg.header.Seq
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[12:16][0])) = msg.header.Pid
|
||||||
|
|
||||||
|
buf[16] = msg.data.Family
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[18:20][0])) = msg.data.Type
|
||||||
|
*(*int32)(unsafe.Pointer(&buf[20:24][0])) = msg.data.Index
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[24:28][0])) = msg.data.Flags
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[28:32][0])) = msg.data.Change
|
||||||
|
|
||||||
|
return buf[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustLoopback creates the loopback address and brings the lo interface up.
|
||||||
|
// mustLoopback calls a fatal method of the underlying [log.Logger] of m with a
|
||||||
|
// user-facing error message if RTNETLINK behaves unexpectedly.
|
||||||
|
func mustLoopback(msg message.Msg) {
|
||||||
|
log := msg.GetLogger()
|
||||||
|
|
||||||
|
var lo int
|
||||||
|
if ifi, err := net.InterfaceByName("lo"); err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
} else {
|
||||||
|
lo = ifi.Index
|
||||||
|
}
|
||||||
|
|
||||||
|
var s rtnetlink
|
||||||
|
if err := s.open(); err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := s.close(); err != nil {
|
||||||
|
msg.Verbosef("cannot close netlink: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
s.mustRoundtrip(msg, s.newaddrLo(lo).toWireFormat())
|
||||||
|
s.mustRoundtrip(msg, s.newlinkLo(lo).toWireFormat())
|
||||||
|
}
|
||||||
72
container/netlink_test.go
Normal file
72
container/netlink_test.go
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSizeof(t *testing.T) {
|
||||||
|
if got := unsafe.Sizeof(newaddrLo{}); got != sizeofNewaddrLo {
|
||||||
|
t.Fatalf("newaddrLo: sizeof = %#x, want %#x", got, sizeofNewaddrLo)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got := unsafe.Sizeof(newlinkLo{}); got != sizeofNewlinkLo {
|
||||||
|
t.Fatalf("newlinkLo: sizeof = %#x, want %#x", got, sizeofNewlinkLo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRtnetlinkMessage(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
msg interface{ toWireFormat() []byte }
|
||||||
|
want []byte
|
||||||
|
}{
|
||||||
|
{"newaddrLo", (&rtnetlink{pid: 1, seq: 0}).newaddrLo(1), []byte{
|
||||||
|
/* Len */ 0x28, 0, 0, 0,
|
||||||
|
/* Type */ 0x14, 0,
|
||||||
|
/* Flags */ 5, 6,
|
||||||
|
/* Seq */ 0, 0, 0, 0,
|
||||||
|
/* Pid */ 1, 0, 0, 0,
|
||||||
|
|
||||||
|
/* Family */ 2,
|
||||||
|
/* Prefixlen */ 8,
|
||||||
|
/* Flags */ 0x80,
|
||||||
|
/* Scope */ 0xfe,
|
||||||
|
/* Index */ 1, 0, 0, 0,
|
||||||
|
|
||||||
|
/* Len */ 8, 0,
|
||||||
|
/* Type */ 2, 0,
|
||||||
|
/* in_addr */ 127, 0, 0, 1,
|
||||||
|
|
||||||
|
/* Len */ 8, 0,
|
||||||
|
/* Type */ 1, 0,
|
||||||
|
/* in_addr */ 127, 0, 0, 1,
|
||||||
|
}},
|
||||||
|
|
||||||
|
{"newlinkLo", (&rtnetlink{pid: 1, seq: 1}).newlinkLo(1), []byte{
|
||||||
|
/* Len */ 0x20, 0, 0, 0,
|
||||||
|
/* Type */ 0x10, 0,
|
||||||
|
/* Flags */ 5, 0,
|
||||||
|
/* Seq */ 1, 0, 0, 0,
|
||||||
|
/* Pid */ 1, 0, 0, 0,
|
||||||
|
|
||||||
|
/* Family */ 0,
|
||||||
|
/* pad */ 0,
|
||||||
|
/* Type */ 0, 0,
|
||||||
|
/* Index */ 1, 0, 0, 0,
|
||||||
|
/* Flags */ 1, 0, 0, 0,
|
||||||
|
/* Change */ 1, 0, 0, 0,
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if got := tc.msg.toWireFormat(); string(got) != string(tc.want) {
|
||||||
|
t.Fatalf("toWireFormat: %#v, want %#v", got, tc.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
27
container/seccomp/presets_riscv64_test.go
Normal file
27
container/seccomp/presets_riscv64_test.go
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
package seccomp_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "hakurei.app/container/seccomp"
|
||||||
|
. "hakurei.app/container/std"
|
||||||
|
)
|
||||||
|
|
||||||
|
var bpfExpected = bpfLookup{
|
||||||
|
{AllowMultiarch | AllowCAN |
|
||||||
|
AllowBluetooth, PresetExt |
|
||||||
|
PresetDenyNS | PresetDenyTTY | PresetDenyDevel |
|
||||||
|
PresetLinux32}: toHash(
|
||||||
|
"a1c4ffa35f4bfbf38061184760b9a09edfcb4964c3b534395e47327b83f3fb61f2f9573ddfcc4772424cc2f5dd12fd32471e6531dbe10e85eda3797dd4fa179f"),
|
||||||
|
|
||||||
|
{0, 0}: toHash(
|
||||||
|
"f3910fd727d087def593e3876c2c6ab9ace71d82ec8cbc992a26223e7bba85e1d7a0b56c5fc6303703f24595825dad8561637edaedd5384b34a6cd080946633c"),
|
||||||
|
{0, PresetExt}: toHash(
|
||||||
|
"741438c5e3f11c36c92ae8c5934f13440675c6e719541c2dbffeda79a10081bcfd9ad8314a60c1d1f53db86c8080c13fffa3bbcf7fe753935679b4b902737286"),
|
||||||
|
{0, PresetStrict}: toHash(
|
||||||
|
"79e9e464d02405c6d74fd2c771bd72a1311e488221c73a9c32db9270219837c54fccec2f36fe2474895547e60c311514567e2e6cf4e7a7fcf909c1ecd1e254a7"),
|
||||||
|
{0, PresetDenyNS | PresetDenyTTY | PresetDenyDevel}: toHash(
|
||||||
|
"3c443715a6c1e557a284862ea8efb70a5d4ecbe67d1226627323e861cd3646fb3e7768ec5b94b93760b7f652cf6916f66e317a4fbf8716d10c3673aa4fc3ae58"),
|
||||||
|
{0, PresetExt | PresetDenyDevel}: toHash(
|
||||||
|
"4448a74e8cc75a4ab63799c4f2cc2a5af63e5f4e8e9b8ac15a1873d647dfa67a4c67b39ed466d8dd32abc64136d401879fc6185c9ab00feeaf59ccf4305f8201"),
|
||||||
|
{0, PresetExt | PresetDenyNS | PresetDenyDevel}: toHash(
|
||||||
|
"c7c86e793cb7192f5f6c735f372cda27eb43ae1045e587f8eadb64c849520a3280b6570a3d7b601d32cddb38021585a2234db38e506cebfd10aa3d6c75440f17"),
|
||||||
|
}
|
||||||
@@ -12,6 +12,7 @@ my %syscall_cutoff_arch = (
|
|||||||
"x86" => 340,
|
"x86" => 340,
|
||||||
"x86_64" => 302,
|
"x86_64" => 302,
|
||||||
"aarch64" => 281,
|
"aarch64" => 281,
|
||||||
|
"riscv64" => 281,
|
||||||
);
|
);
|
||||||
|
|
||||||
print <<EOF;
|
print <<EOF;
|
||||||
|
|||||||
55
container/std/syscall_extra_linux_riscv64.go
Normal file
55
container/std/syscall_extra_linux_riscv64.go
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
package std
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const (
|
||||||
|
SYS_NEWFSTATAT = syscall.SYS_FSTATAT
|
||||||
|
)
|
||||||
|
|
||||||
|
var syscallNumExtra = map[string]ScmpSyscall{
|
||||||
|
"uselib": SNR_USELIB,
|
||||||
|
"clock_adjtime64": SNR_CLOCK_ADJTIME64,
|
||||||
|
"clock_settime64": SNR_CLOCK_SETTIME64,
|
||||||
|
"umount": SNR_UMOUNT,
|
||||||
|
"chown": SNR_CHOWN,
|
||||||
|
"chown32": SNR_CHOWN32,
|
||||||
|
"fchown32": SNR_FCHOWN32,
|
||||||
|
"lchown": SNR_LCHOWN,
|
||||||
|
"lchown32": SNR_LCHOWN32,
|
||||||
|
"setgid32": SNR_SETGID32,
|
||||||
|
"setgroups32": SNR_SETGROUPS32,
|
||||||
|
"setregid32": SNR_SETREGID32,
|
||||||
|
"setresgid32": SNR_SETRESGID32,
|
||||||
|
"setresuid32": SNR_SETRESUID32,
|
||||||
|
"setreuid32": SNR_SETREUID32,
|
||||||
|
"setuid32": SNR_SETUID32,
|
||||||
|
"modify_ldt": SNR_MODIFY_LDT,
|
||||||
|
"subpage_prot": SNR_SUBPAGE_PROT,
|
||||||
|
"switch_endian": SNR_SWITCH_ENDIAN,
|
||||||
|
"vm86": SNR_VM86,
|
||||||
|
"vm86old": SNR_VM86OLD,
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
SNR_USELIB ScmpSyscall = __PNR_uselib
|
||||||
|
SNR_CLOCK_ADJTIME64 ScmpSyscall = __PNR_clock_adjtime64
|
||||||
|
SNR_CLOCK_SETTIME64 ScmpSyscall = __PNR_clock_settime64
|
||||||
|
SNR_UMOUNT ScmpSyscall = __PNR_umount
|
||||||
|
SNR_CHOWN ScmpSyscall = __PNR_chown
|
||||||
|
SNR_CHOWN32 ScmpSyscall = __PNR_chown32
|
||||||
|
SNR_FCHOWN32 ScmpSyscall = __PNR_fchown32
|
||||||
|
SNR_LCHOWN ScmpSyscall = __PNR_lchown
|
||||||
|
SNR_LCHOWN32 ScmpSyscall = __PNR_lchown32
|
||||||
|
SNR_SETGID32 ScmpSyscall = __PNR_setgid32
|
||||||
|
SNR_SETGROUPS32 ScmpSyscall = __PNR_setgroups32
|
||||||
|
SNR_SETREGID32 ScmpSyscall = __PNR_setregid32
|
||||||
|
SNR_SETRESGID32 ScmpSyscall = __PNR_setresgid32
|
||||||
|
SNR_SETRESUID32 ScmpSyscall = __PNR_setresuid32
|
||||||
|
SNR_SETREUID32 ScmpSyscall = __PNR_setreuid32
|
||||||
|
SNR_SETUID32 ScmpSyscall = __PNR_setuid32
|
||||||
|
SNR_MODIFY_LDT ScmpSyscall = __PNR_modify_ldt
|
||||||
|
SNR_SUBPAGE_PROT ScmpSyscall = __PNR_subpage_prot
|
||||||
|
SNR_SWITCH_ENDIAN ScmpSyscall = __PNR_switch_endian
|
||||||
|
SNR_VM86 ScmpSyscall = __PNR_vm86
|
||||||
|
SNR_VM86OLD ScmpSyscall = __PNR_vm86old
|
||||||
|
)
|
||||||
719
container/std/syscall_linux_riscv64.go
Normal file
719
container/std/syscall_linux_riscv64.go
Normal file
@@ -0,0 +1,719 @@
|
|||||||
|
// mksysnum_linux.pl /usr/include/riscv64-linux-gnu/asm/unistd.h
|
||||||
|
// Code generated by the command above; DO NOT EDIT.
|
||||||
|
|
||||||
|
package std
|
||||||
|
|
||||||
|
import . "syscall"
|
||||||
|
|
||||||
|
var syscallNum = map[string]ScmpSyscall{
|
||||||
|
"io_setup": SNR_IO_SETUP,
|
||||||
|
"io_destroy": SNR_IO_DESTROY,
|
||||||
|
"io_submit": SNR_IO_SUBMIT,
|
||||||
|
"io_cancel": SNR_IO_CANCEL,
|
||||||
|
"io_getevents": SNR_IO_GETEVENTS,
|
||||||
|
"setxattr": SNR_SETXATTR,
|
||||||
|
"lsetxattr": SNR_LSETXATTR,
|
||||||
|
"fsetxattr": SNR_FSETXATTR,
|
||||||
|
"getxattr": SNR_GETXATTR,
|
||||||
|
"lgetxattr": SNR_LGETXATTR,
|
||||||
|
"fgetxattr": SNR_FGETXATTR,
|
||||||
|
"listxattr": SNR_LISTXATTR,
|
||||||
|
"llistxattr": SNR_LLISTXATTR,
|
||||||
|
"flistxattr": SNR_FLISTXATTR,
|
||||||
|
"removexattr": SNR_REMOVEXATTR,
|
||||||
|
"lremovexattr": SNR_LREMOVEXATTR,
|
||||||
|
"fremovexattr": SNR_FREMOVEXATTR,
|
||||||
|
"getcwd": SNR_GETCWD,
|
||||||
|
"lookup_dcookie": SNR_LOOKUP_DCOOKIE,
|
||||||
|
"eventfd2": SNR_EVENTFD2,
|
||||||
|
"epoll_create1": SNR_EPOLL_CREATE1,
|
||||||
|
"epoll_ctl": SNR_EPOLL_CTL,
|
||||||
|
"epoll_pwait": SNR_EPOLL_PWAIT,
|
||||||
|
"dup": SNR_DUP,
|
||||||
|
"dup3": SNR_DUP3,
|
||||||
|
"fcntl": SNR_FCNTL,
|
||||||
|
"inotify_init1": SNR_INOTIFY_INIT1,
|
||||||
|
"inotify_add_watch": SNR_INOTIFY_ADD_WATCH,
|
||||||
|
"inotify_rm_watch": SNR_INOTIFY_RM_WATCH,
|
||||||
|
"ioctl": SNR_IOCTL,
|
||||||
|
"ioprio_set": SNR_IOPRIO_SET,
|
||||||
|
"ioprio_get": SNR_IOPRIO_GET,
|
||||||
|
"flock": SNR_FLOCK,
|
||||||
|
"mknodat": SNR_MKNODAT,
|
||||||
|
"mkdirat": SNR_MKDIRAT,
|
||||||
|
"unlinkat": SNR_UNLINKAT,
|
||||||
|
"symlinkat": SNR_SYMLINKAT,
|
||||||
|
"linkat": SNR_LINKAT,
|
||||||
|
"umount2": SNR_UMOUNT2,
|
||||||
|
"mount": SNR_MOUNT,
|
||||||
|
"pivot_root": SNR_PIVOT_ROOT,
|
||||||
|
"nfsservctl": SNR_NFSSERVCTL,
|
||||||
|
"statfs": SNR_STATFS,
|
||||||
|
"fstatfs": SNR_FSTATFS,
|
||||||
|
"truncate": SNR_TRUNCATE,
|
||||||
|
"ftruncate": SNR_FTRUNCATE,
|
||||||
|
"fallocate": SNR_FALLOCATE,
|
||||||
|
"faccessat": SNR_FACCESSAT,
|
||||||
|
"chdir": SNR_CHDIR,
|
||||||
|
"fchdir": SNR_FCHDIR,
|
||||||
|
"chroot": SNR_CHROOT,
|
||||||
|
"fchmod": SNR_FCHMOD,
|
||||||
|
"fchmodat": SNR_FCHMODAT,
|
||||||
|
"fchownat": SNR_FCHOWNAT,
|
||||||
|
"fchown": SNR_FCHOWN,
|
||||||
|
"openat": SNR_OPENAT,
|
||||||
|
"close": SNR_CLOSE,
|
||||||
|
"vhangup": SNR_VHANGUP,
|
||||||
|
"pipe2": SNR_PIPE2,
|
||||||
|
"quotactl": SNR_QUOTACTL,
|
||||||
|
"getdents64": SNR_GETDENTS64,
|
||||||
|
"lseek": SNR_LSEEK,
|
||||||
|
"read": SNR_READ,
|
||||||
|
"write": SNR_WRITE,
|
||||||
|
"readv": SNR_READV,
|
||||||
|
"writev": SNR_WRITEV,
|
||||||
|
"pread64": SNR_PREAD64,
|
||||||
|
"pwrite64": SNR_PWRITE64,
|
||||||
|
"preadv": SNR_PREADV,
|
||||||
|
"pwritev": SNR_PWRITEV,
|
||||||
|
"sendfile": SNR_SENDFILE,
|
||||||
|
"pselect6": SNR_PSELECT6,
|
||||||
|
"ppoll": SNR_PPOLL,
|
||||||
|
"signalfd4": SNR_SIGNALFD4,
|
||||||
|
"vmsplice": SNR_VMSPLICE,
|
||||||
|
"splice": SNR_SPLICE,
|
||||||
|
"tee": SNR_TEE,
|
||||||
|
"readlinkat": SNR_READLINKAT,
|
||||||
|
"newfstatat": SNR_NEWFSTATAT,
|
||||||
|
"fstat": SNR_FSTAT,
|
||||||
|
"sync": SNR_SYNC,
|
||||||
|
"fsync": SNR_FSYNC,
|
||||||
|
"fdatasync": SNR_FDATASYNC,
|
||||||
|
"sync_file_range": SNR_SYNC_FILE_RANGE,
|
||||||
|
"timerfd_create": SNR_TIMERFD_CREATE,
|
||||||
|
"timerfd_settime": SNR_TIMERFD_SETTIME,
|
||||||
|
"timerfd_gettime": SNR_TIMERFD_GETTIME,
|
||||||
|
"utimensat": SNR_UTIMENSAT,
|
||||||
|
"acct": SNR_ACCT,
|
||||||
|
"capget": SNR_CAPGET,
|
||||||
|
"capset": SNR_CAPSET,
|
||||||
|
"personality": SNR_PERSONALITY,
|
||||||
|
"exit": SNR_EXIT,
|
||||||
|
"exit_group": SNR_EXIT_GROUP,
|
||||||
|
"waitid": SNR_WAITID,
|
||||||
|
"set_tid_address": SNR_SET_TID_ADDRESS,
|
||||||
|
"unshare": SNR_UNSHARE,
|
||||||
|
"futex": SNR_FUTEX,
|
||||||
|
"set_robust_list": SNR_SET_ROBUST_LIST,
|
||||||
|
"get_robust_list": SNR_GET_ROBUST_LIST,
|
||||||
|
"nanosleep": SNR_NANOSLEEP,
|
||||||
|
"getitimer": SNR_GETITIMER,
|
||||||
|
"setitimer": SNR_SETITIMER,
|
||||||
|
"kexec_load": SNR_KEXEC_LOAD,
|
||||||
|
"init_module": SNR_INIT_MODULE,
|
||||||
|
"delete_module": SNR_DELETE_MODULE,
|
||||||
|
"timer_create": SNR_TIMER_CREATE,
|
||||||
|
"timer_gettime": SNR_TIMER_GETTIME,
|
||||||
|
"timer_getoverrun": SNR_TIMER_GETOVERRUN,
|
||||||
|
"timer_settime": SNR_TIMER_SETTIME,
|
||||||
|
"timer_delete": SNR_TIMER_DELETE,
|
||||||
|
"clock_settime": SNR_CLOCK_SETTIME,
|
||||||
|
"clock_gettime": SNR_CLOCK_GETTIME,
|
||||||
|
"clock_getres": SNR_CLOCK_GETRES,
|
||||||
|
"clock_nanosleep": SNR_CLOCK_NANOSLEEP,
|
||||||
|
"syslog": SNR_SYSLOG,
|
||||||
|
"ptrace": SNR_PTRACE,
|
||||||
|
"sched_setparam": SNR_SCHED_SETPARAM,
|
||||||
|
"sched_setscheduler": SNR_SCHED_SETSCHEDULER,
|
||||||
|
"sched_getscheduler": SNR_SCHED_GETSCHEDULER,
|
||||||
|
"sched_getparam": SNR_SCHED_GETPARAM,
|
||||||
|
"sched_setaffinity": SNR_SCHED_SETAFFINITY,
|
||||||
|
"sched_getaffinity": SNR_SCHED_GETAFFINITY,
|
||||||
|
"sched_yield": SNR_SCHED_YIELD,
|
||||||
|
"sched_get_priority_max": SNR_SCHED_GET_PRIORITY_MAX,
|
||||||
|
"sched_get_priority_min": SNR_SCHED_GET_PRIORITY_MIN,
|
||||||
|
"sched_rr_get_interval": SNR_SCHED_RR_GET_INTERVAL,
|
||||||
|
"restart_syscall": SNR_RESTART_SYSCALL,
|
||||||
|
"kill": SNR_KILL,
|
||||||
|
"tkill": SNR_TKILL,
|
||||||
|
"tgkill": SNR_TGKILL,
|
||||||
|
"sigaltstack": SNR_SIGALTSTACK,
|
||||||
|
"rt_sigsuspend": SNR_RT_SIGSUSPEND,
|
||||||
|
"rt_sigaction": SNR_RT_SIGACTION,
|
||||||
|
"rt_sigprocmask": SNR_RT_SIGPROCMASK,
|
||||||
|
"rt_sigpending": SNR_RT_SIGPENDING,
|
||||||
|
"rt_sigtimedwait": SNR_RT_SIGTIMEDWAIT,
|
||||||
|
"rt_sigqueueinfo": SNR_RT_SIGQUEUEINFO,
|
||||||
|
"rt_sigreturn": SNR_RT_SIGRETURN,
|
||||||
|
"setpriority": SNR_SETPRIORITY,
|
||||||
|
"getpriority": SNR_GETPRIORITY,
|
||||||
|
"reboot": SNR_REBOOT,
|
||||||
|
"setregid": SNR_SETREGID,
|
||||||
|
"setgid": SNR_SETGID,
|
||||||
|
"setreuid": SNR_SETREUID,
|
||||||
|
"setuid": SNR_SETUID,
|
||||||
|
"setresuid": SNR_SETRESUID,
|
||||||
|
"getresuid": SNR_GETRESUID,
|
||||||
|
"setresgid": SNR_SETRESGID,
|
||||||
|
"getresgid": SNR_GETRESGID,
|
||||||
|
"setfsuid": SNR_SETFSUID,
|
||||||
|
"setfsgid": SNR_SETFSGID,
|
||||||
|
"times": SNR_TIMES,
|
||||||
|
"setpgid": SNR_SETPGID,
|
||||||
|
"getpgid": SNR_GETPGID,
|
||||||
|
"getsid": SNR_GETSID,
|
||||||
|
"setsid": SNR_SETSID,
|
||||||
|
"getgroups": SNR_GETGROUPS,
|
||||||
|
"setgroups": SNR_SETGROUPS,
|
||||||
|
"uname": SNR_UNAME,
|
||||||
|
"sethostname": SNR_SETHOSTNAME,
|
||||||
|
"setdomainname": SNR_SETDOMAINNAME,
|
||||||
|
"getrlimit": SNR_GETRLIMIT,
|
||||||
|
"setrlimit": SNR_SETRLIMIT,
|
||||||
|
"getrusage": SNR_GETRUSAGE,
|
||||||
|
"umask": SNR_UMASK,
|
||||||
|
"prctl": SNR_PRCTL,
|
||||||
|
"getcpu": SNR_GETCPU,
|
||||||
|
"gettimeofday": SNR_GETTIMEOFDAY,
|
||||||
|
"settimeofday": SNR_SETTIMEOFDAY,
|
||||||
|
"adjtimex": SNR_ADJTIMEX,
|
||||||
|
"getpid": SNR_GETPID,
|
||||||
|
"getppid": SNR_GETPPID,
|
||||||
|
"getuid": SNR_GETUID,
|
||||||
|
"geteuid": SNR_GETEUID,
|
||||||
|
"getgid": SNR_GETGID,
|
||||||
|
"getegid": SNR_GETEGID,
|
||||||
|
"gettid": SNR_GETTID,
|
||||||
|
"sysinfo": SNR_SYSINFO,
|
||||||
|
"mq_open": SNR_MQ_OPEN,
|
||||||
|
"mq_unlink": SNR_MQ_UNLINK,
|
||||||
|
"mq_timedsend": SNR_MQ_TIMEDSEND,
|
||||||
|
"mq_timedreceive": SNR_MQ_TIMEDRECEIVE,
|
||||||
|
"mq_notify": SNR_MQ_NOTIFY,
|
||||||
|
"mq_getsetattr": SNR_MQ_GETSETATTR,
|
||||||
|
"msgget": SNR_MSGGET,
|
||||||
|
"msgctl": SNR_MSGCTL,
|
||||||
|
"msgrcv": SNR_MSGRCV,
|
||||||
|
"msgsnd": SNR_MSGSND,
|
||||||
|
"semget": SNR_SEMGET,
|
||||||
|
"semctl": SNR_SEMCTL,
|
||||||
|
"semtimedop": SNR_SEMTIMEDOP,
|
||||||
|
"semop": SNR_SEMOP,
|
||||||
|
"shmget": SNR_SHMGET,
|
||||||
|
"shmctl": SNR_SHMCTL,
|
||||||
|
"shmat": SNR_SHMAT,
|
||||||
|
"shmdt": SNR_SHMDT,
|
||||||
|
"socket": SNR_SOCKET,
|
||||||
|
"socketpair": SNR_SOCKETPAIR,
|
||||||
|
"bind": SNR_BIND,
|
||||||
|
"listen": SNR_LISTEN,
|
||||||
|
"accept": SNR_ACCEPT,
|
||||||
|
"connect": SNR_CONNECT,
|
||||||
|
"getsockname": SNR_GETSOCKNAME,
|
||||||
|
"getpeername": SNR_GETPEERNAME,
|
||||||
|
"sendto": SNR_SENDTO,
|
||||||
|
"recvfrom": SNR_RECVFROM,
|
||||||
|
"setsockopt": SNR_SETSOCKOPT,
|
||||||
|
"getsockopt": SNR_GETSOCKOPT,
|
||||||
|
"shutdown": SNR_SHUTDOWN,
|
||||||
|
"sendmsg": SNR_SENDMSG,
|
||||||
|
"recvmsg": SNR_RECVMSG,
|
||||||
|
"readahead": SNR_READAHEAD,
|
||||||
|
"brk": SNR_BRK,
|
||||||
|
"munmap": SNR_MUNMAP,
|
||||||
|
"mremap": SNR_MREMAP,
|
||||||
|
"add_key": SNR_ADD_KEY,
|
||||||
|
"request_key": SNR_REQUEST_KEY,
|
||||||
|
"keyctl": SNR_KEYCTL,
|
||||||
|
"clone": SNR_CLONE,
|
||||||
|
"execve": SNR_EXECVE,
|
||||||
|
"mmap": SNR_MMAP,
|
||||||
|
"fadvise64": SNR_FADVISE64,
|
||||||
|
"swapon": SNR_SWAPON,
|
||||||
|
"swapoff": SNR_SWAPOFF,
|
||||||
|
"mprotect": SNR_MPROTECT,
|
||||||
|
"msync": SNR_MSYNC,
|
||||||
|
"mlock": SNR_MLOCK,
|
||||||
|
"munlock": SNR_MUNLOCK,
|
||||||
|
"mlockall": SNR_MLOCKALL,
|
||||||
|
"munlockall": SNR_MUNLOCKALL,
|
||||||
|
"mincore": SNR_MINCORE,
|
||||||
|
"madvise": SNR_MADVISE,
|
||||||
|
"remap_file_pages": SNR_REMAP_FILE_PAGES,
|
||||||
|
"mbind": SNR_MBIND,
|
||||||
|
"get_mempolicy": SNR_GET_MEMPOLICY,
|
||||||
|
"set_mempolicy": SNR_SET_MEMPOLICY,
|
||||||
|
"migrate_pages": SNR_MIGRATE_PAGES,
|
||||||
|
"move_pages": SNR_MOVE_PAGES,
|
||||||
|
"rt_tgsigqueueinfo": SNR_RT_TGSIGQUEUEINFO,
|
||||||
|
"perf_event_open": SNR_PERF_EVENT_OPEN,
|
||||||
|
"accept4": SNR_ACCEPT4,
|
||||||
|
"recvmmsg": SNR_RECVMMSG,
|
||||||
|
"wait4": SNR_WAIT4,
|
||||||
|
"prlimit64": SNR_PRLIMIT64,
|
||||||
|
"fanotify_init": SNR_FANOTIFY_INIT,
|
||||||
|
"fanotify_mark": SNR_FANOTIFY_MARK,
|
||||||
|
"name_to_handle_at": SNR_NAME_TO_HANDLE_AT,
|
||||||
|
"open_by_handle_at": SNR_OPEN_BY_HANDLE_AT,
|
||||||
|
"clock_adjtime": SNR_CLOCK_ADJTIME,
|
||||||
|
"syncfs": SNR_SYNCFS,
|
||||||
|
"setns": SNR_SETNS,
|
||||||
|
"sendmmsg": SNR_SENDMMSG,
|
||||||
|
"process_vm_readv": SNR_PROCESS_VM_READV,
|
||||||
|
"process_vm_writev": SNR_PROCESS_VM_WRITEV,
|
||||||
|
"kcmp": SNR_KCMP,
|
||||||
|
"finit_module": SNR_FINIT_MODULE,
|
||||||
|
"sched_setattr": SNR_SCHED_SETATTR,
|
||||||
|
"sched_getattr": SNR_SCHED_GETATTR,
|
||||||
|
"renameat2": SNR_RENAMEAT2,
|
||||||
|
"seccomp": SNR_SECCOMP,
|
||||||
|
"getrandom": SNR_GETRANDOM,
|
||||||
|
"memfd_create": SNR_MEMFD_CREATE,
|
||||||
|
"bpf": SNR_BPF,
|
||||||
|
"execveat": SNR_EXECVEAT,
|
||||||
|
"userfaultfd": SNR_USERFAULTFD,
|
||||||
|
"membarrier": SNR_MEMBARRIER,
|
||||||
|
"mlock2": SNR_MLOCK2,
|
||||||
|
"copy_file_range": SNR_COPY_FILE_RANGE,
|
||||||
|
"preadv2": SNR_PREADV2,
|
||||||
|
"pwritev2": SNR_PWRITEV2,
|
||||||
|
"pkey_mprotect": SNR_PKEY_MPROTECT,
|
||||||
|
"pkey_alloc": SNR_PKEY_ALLOC,
|
||||||
|
"pkey_free": SNR_PKEY_FREE,
|
||||||
|
"statx": SNR_STATX,
|
||||||
|
"io_pgetevents": SNR_IO_PGETEVENTS,
|
||||||
|
"rseq": SNR_RSEQ,
|
||||||
|
"kexec_file_load": SNR_KEXEC_FILE_LOAD,
|
||||||
|
"pidfd_send_signal": SNR_PIDFD_SEND_SIGNAL,
|
||||||
|
"io_uring_setup": SNR_IO_URING_SETUP,
|
||||||
|
"io_uring_enter": SNR_IO_URING_ENTER,
|
||||||
|
"io_uring_register": SNR_IO_URING_REGISTER,
|
||||||
|
"open_tree": SNR_OPEN_TREE,
|
||||||
|
"move_mount": SNR_MOVE_MOUNT,
|
||||||
|
"fsopen": SNR_FSOPEN,
|
||||||
|
"fsconfig": SNR_FSCONFIG,
|
||||||
|
"fsmount": SNR_FSMOUNT,
|
||||||
|
"fspick": SNR_FSPICK,
|
||||||
|
"pidfd_open": SNR_PIDFD_OPEN,
|
||||||
|
"clone3": SNR_CLONE3,
|
||||||
|
"close_range": SNR_CLOSE_RANGE,
|
||||||
|
"openat2": SNR_OPENAT2,
|
||||||
|
"pidfd_getfd": SNR_PIDFD_GETFD,
|
||||||
|
"faccessat2": SNR_FACCESSAT2,
|
||||||
|
"process_madvise": SNR_PROCESS_MADVISE,
|
||||||
|
"epoll_pwait2": SNR_EPOLL_PWAIT2,
|
||||||
|
"mount_setattr": SNR_MOUNT_SETATTR,
|
||||||
|
"quotactl_fd": SNR_QUOTACTL_FD,
|
||||||
|
"landlock_create_ruleset": SNR_LANDLOCK_CREATE_RULESET,
|
||||||
|
"landlock_add_rule": SNR_LANDLOCK_ADD_RULE,
|
||||||
|
"landlock_restrict_self": SNR_LANDLOCK_RESTRICT_SELF,
|
||||||
|
"memfd_secret": SNR_MEMFD_SECRET,
|
||||||
|
"process_mrelease": SNR_PROCESS_MRELEASE,
|
||||||
|
"futex_waitv": SNR_FUTEX_WAITV,
|
||||||
|
"set_mempolicy_home_node": SNR_SET_MEMPOLICY_HOME_NODE,
|
||||||
|
"cachestat": SNR_CACHESTAT,
|
||||||
|
"fchmodat2": SNR_FCHMODAT2,
|
||||||
|
"map_shadow_stack": SNR_MAP_SHADOW_STACK,
|
||||||
|
"futex_wake": SNR_FUTEX_WAKE,
|
||||||
|
"futex_wait": SNR_FUTEX_WAIT,
|
||||||
|
"futex_requeue": SNR_FUTEX_REQUEUE,
|
||||||
|
"statmount": SNR_STATMOUNT,
|
||||||
|
"listmount": SNR_LISTMOUNT,
|
||||||
|
"lsm_get_self_attr": SNR_LSM_GET_SELF_ATTR,
|
||||||
|
"lsm_set_self_attr": SNR_LSM_SET_SELF_ATTR,
|
||||||
|
"lsm_list_modules": SNR_LSM_LIST_MODULES,
|
||||||
|
"mseal": SNR_MSEAL,
|
||||||
|
"setxattrat": SNR_SETXATTRAT,
|
||||||
|
"getxattrat": SNR_GETXATTRAT,
|
||||||
|
"listxattrat": SNR_LISTXATTRAT,
|
||||||
|
"removexattrat": SNR_REMOVEXATTRAT,
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
SYS_USERFAULTFD = 282
|
||||||
|
SYS_MEMBARRIER = 283
|
||||||
|
SYS_MLOCK2 = 284
|
||||||
|
SYS_COPY_FILE_RANGE = 285
|
||||||
|
SYS_PREADV2 = 286
|
||||||
|
SYS_PWRITEV2 = 287
|
||||||
|
SYS_PKEY_MPROTECT = 288
|
||||||
|
SYS_PKEY_ALLOC = 289
|
||||||
|
SYS_PKEY_FREE = 290
|
||||||
|
SYS_STATX = 291
|
||||||
|
SYS_IO_PGETEVENTS = 292
|
||||||
|
SYS_RSEQ = 293
|
||||||
|
SYS_KEXEC_FILE_LOAD = 294
|
||||||
|
SYS_PIDFD_SEND_SIGNAL = 424
|
||||||
|
SYS_IO_URING_SETUP = 425
|
||||||
|
SYS_IO_URING_ENTER = 426
|
||||||
|
SYS_IO_URING_REGISTER = 427
|
||||||
|
SYS_OPEN_TREE = 428
|
||||||
|
SYS_MOVE_MOUNT = 429
|
||||||
|
SYS_FSOPEN = 430
|
||||||
|
SYS_FSCONFIG = 431
|
||||||
|
SYS_FSMOUNT = 432
|
||||||
|
SYS_FSPICK = 433
|
||||||
|
SYS_PIDFD_OPEN = 434
|
||||||
|
SYS_CLONE3 = 435
|
||||||
|
SYS_CLOSE_RANGE = 436
|
||||||
|
SYS_OPENAT2 = 437
|
||||||
|
SYS_PIDFD_GETFD = 438
|
||||||
|
SYS_FACCESSAT2 = 439
|
||||||
|
SYS_PROCESS_MADVISE = 440
|
||||||
|
SYS_EPOLL_PWAIT2 = 441
|
||||||
|
SYS_MOUNT_SETATTR = 442
|
||||||
|
SYS_QUOTACTL_FD = 443
|
||||||
|
SYS_LANDLOCK_CREATE_RULESET = 444
|
||||||
|
SYS_LANDLOCK_ADD_RULE = 445
|
||||||
|
SYS_LANDLOCK_RESTRICT_SELF = 446
|
||||||
|
SYS_MEMFD_SECRET = 447
|
||||||
|
SYS_PROCESS_MRELEASE = 448
|
||||||
|
SYS_FUTEX_WAITV = 449
|
||||||
|
SYS_SET_MEMPOLICY_HOME_NODE = 450
|
||||||
|
SYS_CACHESTAT = 451
|
||||||
|
SYS_FCHMODAT2 = 452
|
||||||
|
SYS_MAP_SHADOW_STACK = 453
|
||||||
|
SYS_FUTEX_WAKE = 454
|
||||||
|
SYS_FUTEX_WAIT = 455
|
||||||
|
SYS_FUTEX_REQUEUE = 456
|
||||||
|
SYS_STATMOUNT = 457
|
||||||
|
SYS_LISTMOUNT = 458
|
||||||
|
SYS_LSM_GET_SELF_ATTR = 459
|
||||||
|
SYS_LSM_SET_SELF_ATTR = 460
|
||||||
|
SYS_LSM_LIST_MODULES = 461
|
||||||
|
SYS_MSEAL = 462
|
||||||
|
SYS_SETXATTRAT = 463
|
||||||
|
SYS_GETXATTRAT = 464
|
||||||
|
SYS_LISTXATTRAT = 465
|
||||||
|
SYS_REMOVEXATTRAT = 466
|
||||||
|
SYS_OPEN_TREE_ATTR = 467
|
||||||
|
SYS_FILE_GETATTR = 468
|
||||||
|
SYS_FILE_SETATTR = 469
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
SNR_IO_SETUP ScmpSyscall = SYS_IO_SETUP
|
||||||
|
SNR_IO_DESTROY ScmpSyscall = SYS_IO_DESTROY
|
||||||
|
SNR_IO_SUBMIT ScmpSyscall = SYS_IO_SUBMIT
|
||||||
|
SNR_IO_CANCEL ScmpSyscall = SYS_IO_CANCEL
|
||||||
|
SNR_IO_GETEVENTS ScmpSyscall = SYS_IO_GETEVENTS
|
||||||
|
SNR_SETXATTR ScmpSyscall = SYS_SETXATTR
|
||||||
|
SNR_LSETXATTR ScmpSyscall = SYS_LSETXATTR
|
||||||
|
SNR_FSETXATTR ScmpSyscall = SYS_FSETXATTR
|
||||||
|
SNR_GETXATTR ScmpSyscall = SYS_GETXATTR
|
||||||
|
SNR_LGETXATTR ScmpSyscall = SYS_LGETXATTR
|
||||||
|
SNR_FGETXATTR ScmpSyscall = SYS_FGETXATTR
|
||||||
|
SNR_LISTXATTR ScmpSyscall = SYS_LISTXATTR
|
||||||
|
SNR_LLISTXATTR ScmpSyscall = SYS_LLISTXATTR
|
||||||
|
SNR_FLISTXATTR ScmpSyscall = SYS_FLISTXATTR
|
||||||
|
SNR_REMOVEXATTR ScmpSyscall = SYS_REMOVEXATTR
|
||||||
|
SNR_LREMOVEXATTR ScmpSyscall = SYS_LREMOVEXATTR
|
||||||
|
SNR_FREMOVEXATTR ScmpSyscall = SYS_FREMOVEXATTR
|
||||||
|
SNR_GETCWD ScmpSyscall = SYS_GETCWD
|
||||||
|
SNR_LOOKUP_DCOOKIE ScmpSyscall = SYS_LOOKUP_DCOOKIE
|
||||||
|
SNR_EVENTFD2 ScmpSyscall = SYS_EVENTFD2
|
||||||
|
SNR_EPOLL_CREATE1 ScmpSyscall = SYS_EPOLL_CREATE1
|
||||||
|
SNR_EPOLL_CTL ScmpSyscall = SYS_EPOLL_CTL
|
||||||
|
SNR_EPOLL_PWAIT ScmpSyscall = SYS_EPOLL_PWAIT
|
||||||
|
SNR_DUP ScmpSyscall = SYS_DUP
|
||||||
|
SNR_DUP3 ScmpSyscall = SYS_DUP3
|
||||||
|
SNR_FCNTL ScmpSyscall = SYS_FCNTL
|
||||||
|
SNR_INOTIFY_INIT1 ScmpSyscall = SYS_INOTIFY_INIT1
|
||||||
|
SNR_INOTIFY_ADD_WATCH ScmpSyscall = SYS_INOTIFY_ADD_WATCH
|
||||||
|
SNR_INOTIFY_RM_WATCH ScmpSyscall = SYS_INOTIFY_RM_WATCH
|
||||||
|
SNR_IOCTL ScmpSyscall = SYS_IOCTL
|
||||||
|
SNR_IOPRIO_SET ScmpSyscall = SYS_IOPRIO_SET
|
||||||
|
SNR_IOPRIO_GET ScmpSyscall = SYS_IOPRIO_GET
|
||||||
|
SNR_FLOCK ScmpSyscall = SYS_FLOCK
|
||||||
|
SNR_MKNODAT ScmpSyscall = SYS_MKNODAT
|
||||||
|
SNR_MKDIRAT ScmpSyscall = SYS_MKDIRAT
|
||||||
|
SNR_UNLINKAT ScmpSyscall = SYS_UNLINKAT
|
||||||
|
SNR_SYMLINKAT ScmpSyscall = SYS_SYMLINKAT
|
||||||
|
SNR_LINKAT ScmpSyscall = SYS_LINKAT
|
||||||
|
SNR_UMOUNT2 ScmpSyscall = SYS_UMOUNT2
|
||||||
|
SNR_MOUNT ScmpSyscall = SYS_MOUNT
|
||||||
|
SNR_PIVOT_ROOT ScmpSyscall = SYS_PIVOT_ROOT
|
||||||
|
SNR_NFSSERVCTL ScmpSyscall = SYS_NFSSERVCTL
|
||||||
|
SNR_STATFS ScmpSyscall = SYS_STATFS
|
||||||
|
SNR_FSTATFS ScmpSyscall = SYS_FSTATFS
|
||||||
|
SNR_TRUNCATE ScmpSyscall = SYS_TRUNCATE
|
||||||
|
SNR_FTRUNCATE ScmpSyscall = SYS_FTRUNCATE
|
||||||
|
SNR_FALLOCATE ScmpSyscall = SYS_FALLOCATE
|
||||||
|
SNR_FACCESSAT ScmpSyscall = SYS_FACCESSAT
|
||||||
|
SNR_CHDIR ScmpSyscall = SYS_CHDIR
|
||||||
|
SNR_FCHDIR ScmpSyscall = SYS_FCHDIR
|
||||||
|
SNR_CHROOT ScmpSyscall = SYS_CHROOT
|
||||||
|
SNR_FCHMOD ScmpSyscall = SYS_FCHMOD
|
||||||
|
SNR_FCHMODAT ScmpSyscall = SYS_FCHMODAT
|
||||||
|
SNR_FCHOWNAT ScmpSyscall = SYS_FCHOWNAT
|
||||||
|
SNR_FCHOWN ScmpSyscall = SYS_FCHOWN
|
||||||
|
SNR_OPENAT ScmpSyscall = SYS_OPENAT
|
||||||
|
SNR_CLOSE ScmpSyscall = SYS_CLOSE
|
||||||
|
SNR_VHANGUP ScmpSyscall = SYS_VHANGUP
|
||||||
|
SNR_PIPE2 ScmpSyscall = SYS_PIPE2
|
||||||
|
SNR_QUOTACTL ScmpSyscall = SYS_QUOTACTL
|
||||||
|
SNR_GETDENTS64 ScmpSyscall = SYS_GETDENTS64
|
||||||
|
SNR_LSEEK ScmpSyscall = SYS_LSEEK
|
||||||
|
SNR_READ ScmpSyscall = SYS_READ
|
||||||
|
SNR_WRITE ScmpSyscall = SYS_WRITE
|
||||||
|
SNR_READV ScmpSyscall = SYS_READV
|
||||||
|
SNR_WRITEV ScmpSyscall = SYS_WRITEV
|
||||||
|
SNR_PREAD64 ScmpSyscall = SYS_PREAD64
|
||||||
|
SNR_PWRITE64 ScmpSyscall = SYS_PWRITE64
|
||||||
|
SNR_PREADV ScmpSyscall = SYS_PREADV
|
||||||
|
SNR_PWRITEV ScmpSyscall = SYS_PWRITEV
|
||||||
|
SNR_SENDFILE ScmpSyscall = SYS_SENDFILE
|
||||||
|
SNR_PSELECT6 ScmpSyscall = SYS_PSELECT6
|
||||||
|
SNR_PPOLL ScmpSyscall = SYS_PPOLL
|
||||||
|
SNR_SIGNALFD4 ScmpSyscall = SYS_SIGNALFD4
|
||||||
|
SNR_VMSPLICE ScmpSyscall = SYS_VMSPLICE
|
||||||
|
SNR_SPLICE ScmpSyscall = SYS_SPLICE
|
||||||
|
SNR_TEE ScmpSyscall = SYS_TEE
|
||||||
|
SNR_READLINKAT ScmpSyscall = SYS_READLINKAT
|
||||||
|
SNR_NEWFSTATAT ScmpSyscall = SYS_NEWFSTATAT
|
||||||
|
SNR_FSTAT ScmpSyscall = SYS_FSTAT
|
||||||
|
SNR_SYNC ScmpSyscall = SYS_SYNC
|
||||||
|
SNR_FSYNC ScmpSyscall = SYS_FSYNC
|
||||||
|
SNR_FDATASYNC ScmpSyscall = SYS_FDATASYNC
|
||||||
|
SNR_SYNC_FILE_RANGE ScmpSyscall = SYS_SYNC_FILE_RANGE
|
||||||
|
SNR_TIMERFD_CREATE ScmpSyscall = SYS_TIMERFD_CREATE
|
||||||
|
SNR_TIMERFD_SETTIME ScmpSyscall = SYS_TIMERFD_SETTIME
|
||||||
|
SNR_TIMERFD_GETTIME ScmpSyscall = SYS_TIMERFD_GETTIME
|
||||||
|
SNR_UTIMENSAT ScmpSyscall = SYS_UTIMENSAT
|
||||||
|
SNR_ACCT ScmpSyscall = SYS_ACCT
|
||||||
|
SNR_CAPGET ScmpSyscall = SYS_CAPGET
|
||||||
|
SNR_CAPSET ScmpSyscall = SYS_CAPSET
|
||||||
|
SNR_PERSONALITY ScmpSyscall = SYS_PERSONALITY
|
||||||
|
SNR_EXIT ScmpSyscall = SYS_EXIT
|
||||||
|
SNR_EXIT_GROUP ScmpSyscall = SYS_EXIT_GROUP
|
||||||
|
SNR_WAITID ScmpSyscall = SYS_WAITID
|
||||||
|
SNR_SET_TID_ADDRESS ScmpSyscall = SYS_SET_TID_ADDRESS
|
||||||
|
SNR_UNSHARE ScmpSyscall = SYS_UNSHARE
|
||||||
|
SNR_FUTEX ScmpSyscall = SYS_FUTEX
|
||||||
|
SNR_SET_ROBUST_LIST ScmpSyscall = SYS_SET_ROBUST_LIST
|
||||||
|
SNR_GET_ROBUST_LIST ScmpSyscall = SYS_GET_ROBUST_LIST
|
||||||
|
SNR_NANOSLEEP ScmpSyscall = SYS_NANOSLEEP
|
||||||
|
SNR_GETITIMER ScmpSyscall = SYS_GETITIMER
|
||||||
|
SNR_SETITIMER ScmpSyscall = SYS_SETITIMER
|
||||||
|
SNR_KEXEC_LOAD ScmpSyscall = SYS_KEXEC_LOAD
|
||||||
|
SNR_INIT_MODULE ScmpSyscall = SYS_INIT_MODULE
|
||||||
|
SNR_DELETE_MODULE ScmpSyscall = SYS_DELETE_MODULE
|
||||||
|
SNR_TIMER_CREATE ScmpSyscall = SYS_TIMER_CREATE
|
||||||
|
SNR_TIMER_GETTIME ScmpSyscall = SYS_TIMER_GETTIME
|
||||||
|
SNR_TIMER_GETOVERRUN ScmpSyscall = SYS_TIMER_GETOVERRUN
|
||||||
|
SNR_TIMER_SETTIME ScmpSyscall = SYS_TIMER_SETTIME
|
||||||
|
SNR_TIMER_DELETE ScmpSyscall = SYS_TIMER_DELETE
|
||||||
|
SNR_CLOCK_SETTIME ScmpSyscall = SYS_CLOCK_SETTIME
|
||||||
|
SNR_CLOCK_GETTIME ScmpSyscall = SYS_CLOCK_GETTIME
|
||||||
|
SNR_CLOCK_GETRES ScmpSyscall = SYS_CLOCK_GETRES
|
||||||
|
SNR_CLOCK_NANOSLEEP ScmpSyscall = SYS_CLOCK_NANOSLEEP
|
||||||
|
SNR_SYSLOG ScmpSyscall = SYS_SYSLOG
|
||||||
|
SNR_PTRACE ScmpSyscall = SYS_PTRACE
|
||||||
|
SNR_SCHED_SETPARAM ScmpSyscall = SYS_SCHED_SETPARAM
|
||||||
|
SNR_SCHED_SETSCHEDULER ScmpSyscall = SYS_SCHED_SETSCHEDULER
|
||||||
|
SNR_SCHED_GETSCHEDULER ScmpSyscall = SYS_SCHED_GETSCHEDULER
|
||||||
|
SNR_SCHED_GETPARAM ScmpSyscall = SYS_SCHED_GETPARAM
|
||||||
|
SNR_SCHED_SETAFFINITY ScmpSyscall = SYS_SCHED_SETAFFINITY
|
||||||
|
SNR_SCHED_GETAFFINITY ScmpSyscall = SYS_SCHED_GETAFFINITY
|
||||||
|
SNR_SCHED_YIELD ScmpSyscall = SYS_SCHED_YIELD
|
||||||
|
SNR_SCHED_GET_PRIORITY_MAX ScmpSyscall = SYS_SCHED_GET_PRIORITY_MAX
|
||||||
|
SNR_SCHED_GET_PRIORITY_MIN ScmpSyscall = SYS_SCHED_GET_PRIORITY_MIN
|
||||||
|
SNR_SCHED_RR_GET_INTERVAL ScmpSyscall = SYS_SCHED_RR_GET_INTERVAL
|
||||||
|
SNR_RESTART_SYSCALL ScmpSyscall = SYS_RESTART_SYSCALL
|
||||||
|
SNR_KILL ScmpSyscall = SYS_KILL
|
||||||
|
SNR_TKILL ScmpSyscall = SYS_TKILL
|
||||||
|
SNR_TGKILL ScmpSyscall = SYS_TGKILL
|
||||||
|
SNR_SIGALTSTACK ScmpSyscall = SYS_SIGALTSTACK
|
||||||
|
SNR_RT_SIGSUSPEND ScmpSyscall = SYS_RT_SIGSUSPEND
|
||||||
|
SNR_RT_SIGACTION ScmpSyscall = SYS_RT_SIGACTION
|
||||||
|
SNR_RT_SIGPROCMASK ScmpSyscall = SYS_RT_SIGPROCMASK
|
||||||
|
SNR_RT_SIGPENDING ScmpSyscall = SYS_RT_SIGPENDING
|
||||||
|
SNR_RT_SIGTIMEDWAIT ScmpSyscall = SYS_RT_SIGTIMEDWAIT
|
||||||
|
SNR_RT_SIGQUEUEINFO ScmpSyscall = SYS_RT_SIGQUEUEINFO
|
||||||
|
SNR_RT_SIGRETURN ScmpSyscall = SYS_RT_SIGRETURN
|
||||||
|
SNR_SETPRIORITY ScmpSyscall = SYS_SETPRIORITY
|
||||||
|
SNR_GETPRIORITY ScmpSyscall = SYS_GETPRIORITY
|
||||||
|
SNR_REBOOT ScmpSyscall = SYS_REBOOT
|
||||||
|
SNR_SETREGID ScmpSyscall = SYS_SETREGID
|
||||||
|
SNR_SETGID ScmpSyscall = SYS_SETGID
|
||||||
|
SNR_SETREUID ScmpSyscall = SYS_SETREUID
|
||||||
|
SNR_SETUID ScmpSyscall = SYS_SETUID
|
||||||
|
SNR_SETRESUID ScmpSyscall = SYS_SETRESUID
|
||||||
|
SNR_GETRESUID ScmpSyscall = SYS_GETRESUID
|
||||||
|
SNR_SETRESGID ScmpSyscall = SYS_SETRESGID
|
||||||
|
SNR_GETRESGID ScmpSyscall = SYS_GETRESGID
|
||||||
|
SNR_SETFSUID ScmpSyscall = SYS_SETFSUID
|
||||||
|
SNR_SETFSGID ScmpSyscall = SYS_SETFSGID
|
||||||
|
SNR_TIMES ScmpSyscall = SYS_TIMES
|
||||||
|
SNR_SETPGID ScmpSyscall = SYS_SETPGID
|
||||||
|
SNR_GETPGID ScmpSyscall = SYS_GETPGID
|
||||||
|
SNR_GETSID ScmpSyscall = SYS_GETSID
|
||||||
|
SNR_SETSID ScmpSyscall = SYS_SETSID
|
||||||
|
SNR_GETGROUPS ScmpSyscall = SYS_GETGROUPS
|
||||||
|
SNR_SETGROUPS ScmpSyscall = SYS_SETGROUPS
|
||||||
|
SNR_UNAME ScmpSyscall = SYS_UNAME
|
||||||
|
SNR_SETHOSTNAME ScmpSyscall = SYS_SETHOSTNAME
|
||||||
|
SNR_SETDOMAINNAME ScmpSyscall = SYS_SETDOMAINNAME
|
||||||
|
SNR_GETRLIMIT ScmpSyscall = SYS_GETRLIMIT
|
||||||
|
SNR_SETRLIMIT ScmpSyscall = SYS_SETRLIMIT
|
||||||
|
SNR_GETRUSAGE ScmpSyscall = SYS_GETRUSAGE
|
||||||
|
SNR_UMASK ScmpSyscall = SYS_UMASK
|
||||||
|
SNR_PRCTL ScmpSyscall = SYS_PRCTL
|
||||||
|
SNR_GETCPU ScmpSyscall = SYS_GETCPU
|
||||||
|
SNR_GETTIMEOFDAY ScmpSyscall = SYS_GETTIMEOFDAY
|
||||||
|
SNR_SETTIMEOFDAY ScmpSyscall = SYS_SETTIMEOFDAY
|
||||||
|
SNR_ADJTIMEX ScmpSyscall = SYS_ADJTIMEX
|
||||||
|
SNR_GETPID ScmpSyscall = SYS_GETPID
|
||||||
|
SNR_GETPPID ScmpSyscall = SYS_GETPPID
|
||||||
|
SNR_GETUID ScmpSyscall = SYS_GETUID
|
||||||
|
SNR_GETEUID ScmpSyscall = SYS_GETEUID
|
||||||
|
SNR_GETGID ScmpSyscall = SYS_GETGID
|
||||||
|
SNR_GETEGID ScmpSyscall = SYS_GETEGID
|
||||||
|
SNR_GETTID ScmpSyscall = SYS_GETTID
|
||||||
|
SNR_SYSINFO ScmpSyscall = SYS_SYSINFO
|
||||||
|
SNR_MQ_OPEN ScmpSyscall = SYS_MQ_OPEN
|
||||||
|
SNR_MQ_UNLINK ScmpSyscall = SYS_MQ_UNLINK
|
||||||
|
SNR_MQ_TIMEDSEND ScmpSyscall = SYS_MQ_TIMEDSEND
|
||||||
|
SNR_MQ_TIMEDRECEIVE ScmpSyscall = SYS_MQ_TIMEDRECEIVE
|
||||||
|
SNR_MQ_NOTIFY ScmpSyscall = SYS_MQ_NOTIFY
|
||||||
|
SNR_MQ_GETSETATTR ScmpSyscall = SYS_MQ_GETSETATTR
|
||||||
|
SNR_MSGGET ScmpSyscall = SYS_MSGGET
|
||||||
|
SNR_MSGCTL ScmpSyscall = SYS_MSGCTL
|
||||||
|
SNR_MSGRCV ScmpSyscall = SYS_MSGRCV
|
||||||
|
SNR_MSGSND ScmpSyscall = SYS_MSGSND
|
||||||
|
SNR_SEMGET ScmpSyscall = SYS_SEMGET
|
||||||
|
SNR_SEMCTL ScmpSyscall = SYS_SEMCTL
|
||||||
|
SNR_SEMTIMEDOP ScmpSyscall = SYS_SEMTIMEDOP
|
||||||
|
SNR_SEMOP ScmpSyscall = SYS_SEMOP
|
||||||
|
SNR_SHMGET ScmpSyscall = SYS_SHMGET
|
||||||
|
SNR_SHMCTL ScmpSyscall = SYS_SHMCTL
|
||||||
|
SNR_SHMAT ScmpSyscall = SYS_SHMAT
|
||||||
|
SNR_SHMDT ScmpSyscall = SYS_SHMDT
|
||||||
|
SNR_SOCKET ScmpSyscall = SYS_SOCKET
|
||||||
|
SNR_SOCKETPAIR ScmpSyscall = SYS_SOCKETPAIR
|
||||||
|
SNR_BIND ScmpSyscall = SYS_BIND
|
||||||
|
SNR_LISTEN ScmpSyscall = SYS_LISTEN
|
||||||
|
SNR_ACCEPT ScmpSyscall = SYS_ACCEPT
|
||||||
|
SNR_CONNECT ScmpSyscall = SYS_CONNECT
|
||||||
|
SNR_GETSOCKNAME ScmpSyscall = SYS_GETSOCKNAME
|
||||||
|
SNR_GETPEERNAME ScmpSyscall = SYS_GETPEERNAME
|
||||||
|
SNR_SENDTO ScmpSyscall = SYS_SENDTO
|
||||||
|
SNR_RECVFROM ScmpSyscall = SYS_RECVFROM
|
||||||
|
SNR_SETSOCKOPT ScmpSyscall = SYS_SETSOCKOPT
|
||||||
|
SNR_GETSOCKOPT ScmpSyscall = SYS_GETSOCKOPT
|
||||||
|
SNR_SHUTDOWN ScmpSyscall = SYS_SHUTDOWN
|
||||||
|
SNR_SENDMSG ScmpSyscall = SYS_SENDMSG
|
||||||
|
SNR_RECVMSG ScmpSyscall = SYS_RECVMSG
|
||||||
|
SNR_READAHEAD ScmpSyscall = SYS_READAHEAD
|
||||||
|
SNR_BRK ScmpSyscall = SYS_BRK
|
||||||
|
SNR_MUNMAP ScmpSyscall = SYS_MUNMAP
|
||||||
|
SNR_MREMAP ScmpSyscall = SYS_MREMAP
|
||||||
|
SNR_ADD_KEY ScmpSyscall = SYS_ADD_KEY
|
||||||
|
SNR_REQUEST_KEY ScmpSyscall = SYS_REQUEST_KEY
|
||||||
|
SNR_KEYCTL ScmpSyscall = SYS_KEYCTL
|
||||||
|
SNR_CLONE ScmpSyscall = SYS_CLONE
|
||||||
|
SNR_EXECVE ScmpSyscall = SYS_EXECVE
|
||||||
|
SNR_MMAP ScmpSyscall = SYS_MMAP
|
||||||
|
SNR_FADVISE64 ScmpSyscall = SYS_FADVISE64
|
||||||
|
SNR_SWAPON ScmpSyscall = SYS_SWAPON
|
||||||
|
SNR_SWAPOFF ScmpSyscall = SYS_SWAPOFF
|
||||||
|
SNR_MPROTECT ScmpSyscall = SYS_MPROTECT
|
||||||
|
SNR_MSYNC ScmpSyscall = SYS_MSYNC
|
||||||
|
SNR_MLOCK ScmpSyscall = SYS_MLOCK
|
||||||
|
SNR_MUNLOCK ScmpSyscall = SYS_MUNLOCK
|
||||||
|
SNR_MLOCKALL ScmpSyscall = SYS_MLOCKALL
|
||||||
|
SNR_MUNLOCKALL ScmpSyscall = SYS_MUNLOCKALL
|
||||||
|
SNR_MINCORE ScmpSyscall = SYS_MINCORE
|
||||||
|
SNR_MADVISE ScmpSyscall = SYS_MADVISE
|
||||||
|
SNR_REMAP_FILE_PAGES ScmpSyscall = SYS_REMAP_FILE_PAGES
|
||||||
|
SNR_MBIND ScmpSyscall = SYS_MBIND
|
||||||
|
SNR_GET_MEMPOLICY ScmpSyscall = SYS_GET_MEMPOLICY
|
||||||
|
SNR_SET_MEMPOLICY ScmpSyscall = SYS_SET_MEMPOLICY
|
||||||
|
SNR_MIGRATE_PAGES ScmpSyscall = SYS_MIGRATE_PAGES
|
||||||
|
SNR_MOVE_PAGES ScmpSyscall = SYS_MOVE_PAGES
|
||||||
|
SNR_RT_TGSIGQUEUEINFO ScmpSyscall = SYS_RT_TGSIGQUEUEINFO
|
||||||
|
SNR_PERF_EVENT_OPEN ScmpSyscall = SYS_PERF_EVENT_OPEN
|
||||||
|
SNR_ACCEPT4 ScmpSyscall = SYS_ACCEPT4
|
||||||
|
SNR_RECVMMSG ScmpSyscall = SYS_RECVMMSG
|
||||||
|
SNR_WAIT4 ScmpSyscall = SYS_WAIT4
|
||||||
|
SNR_PRLIMIT64 ScmpSyscall = SYS_PRLIMIT64
|
||||||
|
SNR_FANOTIFY_INIT ScmpSyscall = SYS_FANOTIFY_INIT
|
||||||
|
SNR_FANOTIFY_MARK ScmpSyscall = SYS_FANOTIFY_MARK
|
||||||
|
SNR_NAME_TO_HANDLE_AT ScmpSyscall = SYS_NAME_TO_HANDLE_AT
|
||||||
|
SNR_OPEN_BY_HANDLE_AT ScmpSyscall = SYS_OPEN_BY_HANDLE_AT
|
||||||
|
SNR_CLOCK_ADJTIME ScmpSyscall = SYS_CLOCK_ADJTIME
|
||||||
|
SNR_SYNCFS ScmpSyscall = SYS_SYNCFS
|
||||||
|
SNR_SETNS ScmpSyscall = SYS_SETNS
|
||||||
|
SNR_SENDMMSG ScmpSyscall = SYS_SENDMMSG
|
||||||
|
SNR_PROCESS_VM_READV ScmpSyscall = SYS_PROCESS_VM_READV
|
||||||
|
SNR_PROCESS_VM_WRITEV ScmpSyscall = SYS_PROCESS_VM_WRITEV
|
||||||
|
SNR_KCMP ScmpSyscall = SYS_KCMP
|
||||||
|
SNR_FINIT_MODULE ScmpSyscall = SYS_FINIT_MODULE
|
||||||
|
SNR_SCHED_SETATTR ScmpSyscall = SYS_SCHED_SETATTR
|
||||||
|
SNR_SCHED_GETATTR ScmpSyscall = SYS_SCHED_GETATTR
|
||||||
|
SNR_RENAMEAT2 ScmpSyscall = SYS_RENAMEAT2
|
||||||
|
SNR_SECCOMP ScmpSyscall = SYS_SECCOMP
|
||||||
|
SNR_GETRANDOM ScmpSyscall = SYS_GETRANDOM
|
||||||
|
SNR_MEMFD_CREATE ScmpSyscall = SYS_MEMFD_CREATE
|
||||||
|
SNR_BPF ScmpSyscall = SYS_BPF
|
||||||
|
SNR_EXECVEAT ScmpSyscall = SYS_EXECVEAT
|
||||||
|
SNR_USERFAULTFD ScmpSyscall = SYS_USERFAULTFD
|
||||||
|
SNR_MEMBARRIER ScmpSyscall = SYS_MEMBARRIER
|
||||||
|
SNR_MLOCK2 ScmpSyscall = SYS_MLOCK2
|
||||||
|
SNR_COPY_FILE_RANGE ScmpSyscall = SYS_COPY_FILE_RANGE
|
||||||
|
SNR_PREADV2 ScmpSyscall = SYS_PREADV2
|
||||||
|
SNR_PWRITEV2 ScmpSyscall = SYS_PWRITEV2
|
||||||
|
SNR_PKEY_MPROTECT ScmpSyscall = SYS_PKEY_MPROTECT
|
||||||
|
SNR_PKEY_ALLOC ScmpSyscall = SYS_PKEY_ALLOC
|
||||||
|
SNR_PKEY_FREE ScmpSyscall = SYS_PKEY_FREE
|
||||||
|
SNR_STATX ScmpSyscall = SYS_STATX
|
||||||
|
SNR_IO_PGETEVENTS ScmpSyscall = SYS_IO_PGETEVENTS
|
||||||
|
SNR_RSEQ ScmpSyscall = SYS_RSEQ
|
||||||
|
SNR_KEXEC_FILE_LOAD ScmpSyscall = SYS_KEXEC_FILE_LOAD
|
||||||
|
SNR_PIDFD_SEND_SIGNAL ScmpSyscall = SYS_PIDFD_SEND_SIGNAL
|
||||||
|
SNR_IO_URING_SETUP ScmpSyscall = SYS_IO_URING_SETUP
|
||||||
|
SNR_IO_URING_ENTER ScmpSyscall = SYS_IO_URING_ENTER
|
||||||
|
SNR_IO_URING_REGISTER ScmpSyscall = SYS_IO_URING_REGISTER
|
||||||
|
SNR_OPEN_TREE ScmpSyscall = SYS_OPEN_TREE
|
||||||
|
SNR_MOVE_MOUNT ScmpSyscall = SYS_MOVE_MOUNT
|
||||||
|
SNR_FSOPEN ScmpSyscall = SYS_FSOPEN
|
||||||
|
SNR_FSCONFIG ScmpSyscall = SYS_FSCONFIG
|
||||||
|
SNR_FSMOUNT ScmpSyscall = SYS_FSMOUNT
|
||||||
|
SNR_FSPICK ScmpSyscall = SYS_FSPICK
|
||||||
|
SNR_PIDFD_OPEN ScmpSyscall = SYS_PIDFD_OPEN
|
||||||
|
SNR_CLONE3 ScmpSyscall = SYS_CLONE3
|
||||||
|
SNR_CLOSE_RANGE ScmpSyscall = SYS_CLOSE_RANGE
|
||||||
|
SNR_OPENAT2 ScmpSyscall = SYS_OPENAT2
|
||||||
|
SNR_PIDFD_GETFD ScmpSyscall = SYS_PIDFD_GETFD
|
||||||
|
SNR_FACCESSAT2 ScmpSyscall = SYS_FACCESSAT2
|
||||||
|
SNR_PROCESS_MADVISE ScmpSyscall = SYS_PROCESS_MADVISE
|
||||||
|
SNR_EPOLL_PWAIT2 ScmpSyscall = SYS_EPOLL_PWAIT2
|
||||||
|
SNR_MOUNT_SETATTR ScmpSyscall = SYS_MOUNT_SETATTR
|
||||||
|
SNR_QUOTACTL_FD ScmpSyscall = SYS_QUOTACTL_FD
|
||||||
|
SNR_LANDLOCK_CREATE_RULESET ScmpSyscall = SYS_LANDLOCK_CREATE_RULESET
|
||||||
|
SNR_LANDLOCK_ADD_RULE ScmpSyscall = SYS_LANDLOCK_ADD_RULE
|
||||||
|
SNR_LANDLOCK_RESTRICT_SELF ScmpSyscall = SYS_LANDLOCK_RESTRICT_SELF
|
||||||
|
SNR_MEMFD_SECRET ScmpSyscall = SYS_MEMFD_SECRET
|
||||||
|
SNR_PROCESS_MRELEASE ScmpSyscall = SYS_PROCESS_MRELEASE
|
||||||
|
SNR_FUTEX_WAITV ScmpSyscall = SYS_FUTEX_WAITV
|
||||||
|
SNR_SET_MEMPOLICY_HOME_NODE ScmpSyscall = SYS_SET_MEMPOLICY_HOME_NODE
|
||||||
|
SNR_CACHESTAT ScmpSyscall = SYS_CACHESTAT
|
||||||
|
SNR_FCHMODAT2 ScmpSyscall = SYS_FCHMODAT2
|
||||||
|
SNR_MAP_SHADOW_STACK ScmpSyscall = SYS_MAP_SHADOW_STACK
|
||||||
|
SNR_FUTEX_WAKE ScmpSyscall = SYS_FUTEX_WAKE
|
||||||
|
SNR_FUTEX_WAIT ScmpSyscall = SYS_FUTEX_WAIT
|
||||||
|
SNR_FUTEX_REQUEUE ScmpSyscall = SYS_FUTEX_REQUEUE
|
||||||
|
SNR_STATMOUNT ScmpSyscall = SYS_STATMOUNT
|
||||||
|
SNR_LISTMOUNT ScmpSyscall = SYS_LISTMOUNT
|
||||||
|
SNR_LSM_GET_SELF_ATTR ScmpSyscall = SYS_LSM_GET_SELF_ATTR
|
||||||
|
SNR_LSM_SET_SELF_ATTR ScmpSyscall = SYS_LSM_SET_SELF_ATTR
|
||||||
|
SNR_LSM_LIST_MODULES ScmpSyscall = SYS_LSM_LIST_MODULES
|
||||||
|
SNR_MSEAL ScmpSyscall = SYS_MSEAL
|
||||||
|
SNR_SETXATTRAT ScmpSyscall = SYS_SETXATTRAT
|
||||||
|
SNR_GETXATTRAT ScmpSyscall = SYS_GETXATTRAT
|
||||||
|
SNR_LISTXATTRAT ScmpSyscall = SYS_LISTXATTRAT
|
||||||
|
SNR_REMOVEXATTRAT ScmpSyscall = SYS_REMOVEXATTRAT
|
||||||
|
SNR_OPEN_TREE_ATTR ScmpSyscall = SYS_OPEN_TREE_ATTR
|
||||||
|
SNR_FILE_GETATTR ScmpSyscall = SYS_FILE_GETATTR
|
||||||
|
SNR_FILE_SETATTR ScmpSyscall = SYS_FILE_SETATTR
|
||||||
|
)
|
||||||
12
dist/install.sh
vendored
12
dist/install.sh
vendored
@@ -1,12 +1,12 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
cd "$(dirname -- "$0")" || exit 1
|
cd "$(dirname -- "$0")" || exit 1
|
||||||
|
|
||||||
install -vDm0755 "bin/hakurei" "${HAKUREI_INSTALL_PREFIX}/usr/bin/hakurei"
|
install -vDm0755 "bin/hakurei" "${DESTDIR}/usr/bin/hakurei"
|
||||||
install -vDm0755 "bin/hpkg" "${HAKUREI_INSTALL_PREFIX}/usr/bin/hpkg"
|
install -vDm0755 "bin/sharefs" "${DESTDIR}/usr/bin/sharefs"
|
||||||
|
|
||||||
install -vDm4511 "bin/hsu" "${HAKUREI_INSTALL_PREFIX}/usr/bin/hsu"
|
install -vDm4511 "bin/hsu" "${DESTDIR}/usr/bin/hsu"
|
||||||
if [ ! -f "${HAKUREI_INSTALL_PREFIX}/etc/hsurc" ]; then
|
if [ ! -f "${DESTDIR}/etc/hsurc" ]; then
|
||||||
install -vDm0400 "hsurc.default" "${HAKUREI_INSTALL_PREFIX}/etc/hsurc"
|
install -vDm0400 "hsurc.default" "${DESTDIR}/etc/hsurc"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
install -vDm0644 "comp/_hakurei" "${HAKUREI_INSTALL_PREFIX}/usr/share/zsh/site-functions/_hakurei"
|
install -vDm0644 "comp/_hakurei" "${DESTDIR}/usr/share/zsh/site-functions/_hakurei"
|
||||||
23
dist/release.sh
vendored
23
dist/release.sh
vendored
@@ -1,20 +1,31 @@
|
|||||||
#!/bin/sh -e
|
#!/bin/sh -e
|
||||||
cd "$(dirname -- "$0")/.."
|
cd "$(dirname -- "$0")/.."
|
||||||
VERSION="${HAKUREI_VERSION:-untagged}"
|
VERSION="${HAKUREI_VERSION:-untagged}"
|
||||||
pname="hakurei-${VERSION}"
|
pname="hakurei-${VERSION}-$(go env GOARCH)"
|
||||||
out="dist/${pname}"
|
out="${DESTDIR:-dist}/${pname}"
|
||||||
|
|
||||||
|
echo '# Preparing distribution files.'
|
||||||
mkdir -p "${out}"
|
mkdir -p "${out}"
|
||||||
cp -v "README.md" "dist/hsurc.default" "dist/install.sh" "${out}"
|
cp -v "README.md" "dist/hsurc.default" "dist/install.sh" "${out}"
|
||||||
cp -rv "dist/comp" "${out}"
|
cp -rv "dist/comp" "${out}"
|
||||||
|
echo
|
||||||
|
|
||||||
|
echo '# Building hakurei.'
|
||||||
go generate ./...
|
go generate ./...
|
||||||
go build -trimpath -v -o "${out}/bin/" -ldflags "-s -w -buildid= -extldflags '-static'
|
go build -trimpath -v -o "${out}/bin/" -ldflags "-s -w
|
||||||
|
-buildid= -extldflags '-static'
|
||||||
-X hakurei.app/internal/info.buildVersion=${VERSION}
|
-X hakurei.app/internal/info.buildVersion=${VERSION}
|
||||||
-X hakurei.app/internal/info.hakureiPath=/usr/bin/hakurei
|
-X hakurei.app/internal/info.hakureiPath=/usr/bin/hakurei
|
||||||
-X hakurei.app/internal/info.hsuPath=/usr/bin/hsu
|
-X hakurei.app/internal/info.hsuPath=/usr/bin/hsu
|
||||||
-X main.hakureiPath=/usr/bin/hakurei" ./...
|
-X main.hakureiPath=/usr/bin/hakurei" ./...
|
||||||
|
echo
|
||||||
|
|
||||||
rm -f "./${out}.tar.gz" && tar -C dist -czf "${out}.tar.gz" "${pname}"
|
echo '# Testing hakurei.'
|
||||||
rm -rf "./${out}"
|
go test -ldflags='-buildid= -extldflags=-static' ./...
|
||||||
(cd dist && sha512sum "${pname}.tar.gz" > "${pname}.tar.gz.sha512")
|
echo
|
||||||
|
|
||||||
|
echo '# Creating distribution.'
|
||||||
|
rm -f "${out}.tar.gz" && tar -C "${out}/.." -vczf "${out}.tar.gz" "${pname}"
|
||||||
|
rm -rf "${out}"
|
||||||
|
(cd "${out}/.." && sha512sum "${pname}.tar.gz" > "${pname}.tar.gz.sha512")
|
||||||
|
echo
|
||||||
|
|||||||
16
flake.lock
generated
16
flake.lock
generated
@@ -7,32 +7,32 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1756679287,
|
"lastModified": 1765384171,
|
||||||
"narHash": "sha256-Xd1vOeY9ccDf5VtVK12yM0FS6qqvfUop8UQlxEB+gTQ=",
|
"narHash": "sha256-FuFtkJrW1Z7u+3lhzPRau69E0CNjADku1mLQQflUORo=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "home-manager",
|
"repo": "home-manager",
|
||||||
"rev": "07fc025fe10487dd80f2ec694f1cd790e752d0e8",
|
"rev": "44777152652bc9eacf8876976fa72cc77ca8b9d8",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"ref": "release-25.05",
|
"ref": "release-25.11",
|
||||||
"repo": "home-manager",
|
"repo": "home-manager",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1757020766,
|
"lastModified": 1765311797,
|
||||||
"narHash": "sha256-PLoSjHRa2bUbi1x9HoXgTx2AiuzNXs54c8omhadyvp0=",
|
"narHash": "sha256-mSD5Ob7a+T2RNjvPvOA1dkJHGVrNVl8ZOrAwBjKBDQo=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "fe83bbdde2ccdc2cb9573aa846abe8363f79a97a",
|
"rev": "09eb77e94fa25202af8f3e81ddc7353d9970ac1b",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"ref": "nixos-25.05",
|
"ref": "nixos-25.11",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
|
|||||||
51
flake.nix
51
flake.nix
@@ -2,10 +2,10 @@
|
|||||||
description = "hakurei container tool and nixos module";
|
description = "hakurei container tool and nixos module";
|
||||||
|
|
||||||
inputs = {
|
inputs = {
|
||||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
|
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.11";
|
||||||
|
|
||||||
home-manager = {
|
home-manager = {
|
||||||
url = "github:nix-community/home-manager/release-25.05";
|
url = "github:nix-community/home-manager/release-25.11";
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
@@ -69,6 +69,8 @@
|
|||||||
withRace = true;
|
withRace = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
sharefs = callPackage ./cmd/sharefs/test { inherit system self; };
|
||||||
|
|
||||||
hpkg = callPackage ./cmd/hpkg/test { inherit system self; };
|
hpkg = callPackage ./cmd/hpkg/test { inherit system self; };
|
||||||
|
|
||||||
formatting = runCommandLocal "check-formatting" { nativeBuildInputs = [ nixfmt-rfc-style ]; } ''
|
formatting = runCommandLocal "check-formatting" { nativeBuildInputs = [ nixfmt-rfc-style ]; } ''
|
||||||
@@ -136,20 +138,32 @@
|
|||||||
;
|
;
|
||||||
};
|
};
|
||||||
hsu = pkgs.callPackage ./cmd/hsu/package.nix { inherit (self.packages.${system}) hakurei; };
|
hsu = pkgs.callPackage ./cmd/hsu/package.nix { inherit (self.packages.${system}) hakurei; };
|
||||||
|
sharefs = pkgs.linkFarm "sharefs" {
|
||||||
|
"bin/sharefs" = "${hakurei}/libexec/sharefs";
|
||||||
|
"bin/mount.fuse.sharefs" = "${hakurei}/libexec/sharefs";
|
||||||
|
};
|
||||||
|
|
||||||
dist = pkgs.runCommand "${hakurei.name}-dist" { buildInputs = hakurei.targetPkgs ++ [ pkgs.pkgsStatic.musl ]; } ''
|
dist =
|
||||||
# go requires XDG_CACHE_HOME for the build cache
|
pkgs.runCommand "${hakurei.name}-dist"
|
||||||
export XDG_CACHE_HOME="$(mktemp -d)"
|
{
|
||||||
|
buildInputs = hakurei.targetPkgs ++ [
|
||||||
|
pkgs.pkgsStatic.musl
|
||||||
|
];
|
||||||
|
}
|
||||||
|
''
|
||||||
|
cd $(mktemp -d) \
|
||||||
|
&& cp -r ${hakurei.src}/. . \
|
||||||
|
&& chmod +w cmd && cp -r ${hsu.src}/. cmd/hsu/ \
|
||||||
|
&& chmod -R +w .
|
||||||
|
|
||||||
# get a different workdir as go does not like /build
|
CC="musl-clang -O3 -Werror -Qunused-arguments" \
|
||||||
cd $(mktemp -d) \
|
GOCACHE="$(mktemp -d)" \
|
||||||
&& cp -r ${hakurei.src}/. . \
|
HAKUREI_TEST_SKIP_ACL=1 \
|
||||||
&& chmod +w cmd && cp -r ${hsu.src}/. cmd/hsu/ \
|
PATH="${pkgs.pkgsStatic.musl.bin}/bin:$PATH" \
|
||||||
&& chmod -R +w .
|
DESTDIR="$out" \
|
||||||
|
HAKUREI_VERSION="v${hakurei.version}" \
|
||||||
export HAKUREI_VERSION="v${hakurei.version}"
|
./dist/release.sh
|
||||||
CC="clang -O3 -Werror" ./dist/release.sh && mkdir $out && cp -v "dist/hakurei-$HAKUREI_VERSION.tar.gz"* $out
|
'';
|
||||||
'';
|
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -160,7 +174,10 @@
|
|||||||
pkgs = nixpkgsFor.${system};
|
pkgs = nixpkgsFor.${system};
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
default = pkgs.mkShell { buildInputs = hakurei.targetPkgs; };
|
default = pkgs.mkShell {
|
||||||
|
buildInputs = hakurei.targetPkgs;
|
||||||
|
hardeningDisable = [ "fortify" ];
|
||||||
|
};
|
||||||
withPackage = pkgs.mkShell { buildInputs = [ hakurei ] ++ hakurei.targetPkgs; };
|
withPackage = pkgs.mkShell { buildInputs = [ hakurei ] ++ hakurei.targetPkgs; };
|
||||||
|
|
||||||
vm =
|
vm =
|
||||||
@@ -185,13 +202,13 @@
|
|||||||
hakurei =
|
hakurei =
|
||||||
let
|
let
|
||||||
# this is used for interactive vm testing during development, where tests might be broken
|
# this is used for interactive vm testing during development, where tests might be broken
|
||||||
package = self.packages.${pkgs.system}.hakurei.override {
|
package = self.packages.${pkgs.stdenv.hostPlatform.system}.hakurei.override {
|
||||||
buildGoModule = previousArgs: pkgs.pkgsStatic.buildGoModule (previousArgs // { doCheck = false; });
|
buildGoModule = previousArgs: pkgs.pkgsStatic.buildGoModule (previousArgs // { doCheck = false; });
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
inherit package;
|
inherit package;
|
||||||
hsuPackage = self.packages.${pkgs.system}.hsu.override { hakurei = package; };
|
hsuPackage = self.packages.${pkgs.stdenv.hostPlatform.system}.hsu.override { hakurei = package; };
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -30,12 +30,46 @@ type Config struct {
|
|||||||
// This option is unsupported and most likely enables full control over the Wayland
|
// This option is unsupported and most likely enables full control over the Wayland
|
||||||
// session. Do not set this to true unless you are sure you know what you are doing.
|
// session. Do not set this to true unless you are sure you know what you are doing.
|
||||||
DirectWayland bool `json:"direct_wayland,omitempty"`
|
DirectWayland bool `json:"direct_wayland,omitempty"`
|
||||||
|
// Direct access to the PipeWire socket established via SecurityContext::Create, no
|
||||||
|
// attempt is made to start the pipewire-pulse server.
|
||||||
|
//
|
||||||
|
// The SecurityContext machinery is fatally flawed, it blindly sets read and execute
|
||||||
|
// bits on all objects for clients with the lowest achievable privilege level (by
|
||||||
|
// setting PW_KEY_ACCESS to "restricted"). This enables them to call any method
|
||||||
|
// targeting any object, and since Registry::Destroy checks for the read and execute bit,
|
||||||
|
// allows the destruction of any object other than PW_ID_CORE as well. This behaviour
|
||||||
|
// is implemented separately in media-session and wireplumber, with the wireplumber
|
||||||
|
// implementation in Lua via an embedded Lua vm. In all known setups, wireplumber is
|
||||||
|
// in use, and there is no known way to change its behaviour and set permissions
|
||||||
|
// differently without replacing the Lua script. Also, since PipeWire relies on these
|
||||||
|
// permissions to work, reducing them is not possible.
|
||||||
|
//
|
||||||
|
// Currently, the only other sandboxed use case is flatpak, which is not aware of
|
||||||
|
// PipeWire and blindly exposes the bare PulseAudio socket to the container (behaves
|
||||||
|
// like DirectPulse). This socket is backed by the pipewire-pulse compatibility daemon,
|
||||||
|
// which obtains client pid via the SO_PEERCRED option. The PipeWire daemon, pipewire-pulse
|
||||||
|
// daemon and the session manager daemon then separately performs the /.flatpak-info hack
|
||||||
|
// described in https://git.gensokyo.uk/security/hakurei/issues/21. Under such use case,
|
||||||
|
// since the client has no direct access to PipeWire, insecure parts of the protocol are
|
||||||
|
// obscured by pipewire-pulse simply not implementing them, and thus hiding the flaws
|
||||||
|
// described above.
|
||||||
|
//
|
||||||
|
// Hakurei does not rely on the /.flatpak-info hack. Instead, a socket is sets up via
|
||||||
|
// SecurityContext. A pipewire-pulse server connected through it achieves the same
|
||||||
|
// permissions as flatpak does via the /.flatpak-info hack and is maintained for the
|
||||||
|
// life of the container.
|
||||||
|
//
|
||||||
|
// This option is unsupported and enables a denial-of-service attack as the sandboxed
|
||||||
|
// client is able to destroy any client object and thus disconnecting them from PipeWire,
|
||||||
|
// or destroy the SecurityContext object preventing any further container creation.
|
||||||
|
// Do not set this to true, it is insecure under any configuration.
|
||||||
|
DirectPipeWire bool `json:"direct_pipewire,omitempty"`
|
||||||
// Direct access to PulseAudio socket, no attempt is made to establish pipewire-pulse
|
// Direct access to PulseAudio socket, no attempt is made to establish pipewire-pulse
|
||||||
// server via a PipeWire socket with a SecurityContext attached and the bare socket
|
// server via a PipeWire socket with a SecurityContext attached and the bare socket
|
||||||
// is made available to the container.
|
// is made available to the container.
|
||||||
//
|
//
|
||||||
// This option is unsupported and enables arbitrary code execution as the PulseAudio
|
// This option is unsupported and enables arbitrary code execution as the PulseAudio
|
||||||
// server. Do not set this to true, this is insecure under any configuration.
|
// server. Do not set this to true, it is insecure under any configuration.
|
||||||
DirectPulse bool `json:"direct_pulse,omitempty"`
|
DirectPulse bool `json:"direct_pulse,omitempty"`
|
||||||
|
|
||||||
// Extra acl updates to perform before setuid.
|
// Extra acl updates to perform before setuid.
|
||||||
|
|||||||
@@ -24,9 +24,8 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestUpdate(t *testing.T) {
|
func TestUpdate(t *testing.T) {
|
||||||
if os.Getenv("GO_TEST_SKIP_ACL") == "1" {
|
if os.Getenv("HAKUREI_TEST_SKIP_ACL") == "1" {
|
||||||
t.Log("acl test skipped")
|
t.Skip("acl test skipped")
|
||||||
t.SkipNow()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
testFilePath := path.Join(t.TempDir(), testFileName)
|
testFilePath := path.Join(t.TempDir(), testFileName)
|
||||||
@@ -143,6 +142,7 @@ func (c *getFAclInvocation) run(name string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
c.cmd = exec.Command("getfacl", "--omit-header", "--absolute-names", "--numeric", name)
|
c.cmd = exec.Command("getfacl", "--omit-header", "--absolute-names", "--numeric", name)
|
||||||
|
c.cmd.Stderr = os.Stderr
|
||||||
|
|
||||||
scanErr := make(chan error, 1)
|
scanErr := make(chan error, 1)
|
||||||
if p, err := c.cmd.StdoutPipe(); err != nil {
|
if p, err := c.cmd.StdoutPipe(); err != nil {
|
||||||
@@ -254,7 +254,7 @@ func getfacl(t *testing.T, name string) []*getFAclResp {
|
|||||||
t.Fatalf("getfacl: error = %v", err)
|
t.Fatalf("getfacl: error = %v", err)
|
||||||
}
|
}
|
||||||
if len(c.pe) != 0 {
|
if len(c.pe) != 0 {
|
||||||
t.Errorf("errors encountered parsing getfacl output\n%s", errors.Join(c.pe...).Error())
|
t.Errorf("errors encountered parsing getfacl output\n%s", errors.Join(c.pe...))
|
||||||
}
|
}
|
||||||
return c.val
|
return c.val
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,6 +53,10 @@ type syscallDispatcher interface {
|
|||||||
readdir(name string) ([]os.DirEntry, error)
|
readdir(name string) ([]os.DirEntry, error)
|
||||||
// tempdir provides [os.TempDir].
|
// tempdir provides [os.TempDir].
|
||||||
tempdir() string
|
tempdir() string
|
||||||
|
// mkdir provides [os.Mkdir].
|
||||||
|
mkdir(name string, perm os.FileMode) error
|
||||||
|
// removeAll provides [os.RemoveAll].
|
||||||
|
removeAll(path string) error
|
||||||
// exit provides [os.Exit].
|
// exit provides [os.Exit].
|
||||||
exit(code int)
|
exit(code int)
|
||||||
|
|
||||||
@@ -62,6 +66,8 @@ type syscallDispatcher interface {
|
|||||||
// lookupGroupId calls [user.LookupGroup] and returns the Gid field of the resulting [user.Group] struct.
|
// lookupGroupId calls [user.LookupGroup] and returns the Gid field of the resulting [user.Group] struct.
|
||||||
lookupGroupId(name string) (string, error)
|
lookupGroupId(name string) (string, error)
|
||||||
|
|
||||||
|
// lookPath provides exec.LookPath.
|
||||||
|
lookPath(file string) (string, error)
|
||||||
// cmdOutput provides the Output method of [exec.Cmd].
|
// cmdOutput provides the Output method of [exec.Cmd].
|
||||||
cmdOutput(cmd *exec.Cmd) ([]byte, error)
|
cmdOutput(cmd *exec.Cmd) ([]byte, error)
|
||||||
|
|
||||||
@@ -121,6 +127,8 @@ func (direct) stat(name string) (os.FileInfo, error) { return os.Stat(name)
|
|||||||
func (direct) open(name string) (osFile, error) { return os.Open(name) }
|
func (direct) open(name string) (osFile, error) { return os.Open(name) }
|
||||||
func (direct) readdir(name string) ([]os.DirEntry, error) { return os.ReadDir(name) }
|
func (direct) readdir(name string) ([]os.DirEntry, error) { return os.ReadDir(name) }
|
||||||
func (direct) tempdir() string { return os.TempDir() }
|
func (direct) tempdir() string { return os.TempDir() }
|
||||||
|
func (direct) mkdir(name string, perm os.FileMode) error { return os.Mkdir(name, perm) }
|
||||||
|
func (direct) removeAll(path string) error { return os.RemoveAll(path) }
|
||||||
func (direct) exit(code int) { os.Exit(code) }
|
func (direct) exit(code int) { os.Exit(code) }
|
||||||
|
|
||||||
func (direct) evalSymlinks(path string) (string, error) { return filepath.EvalSymlinks(path) }
|
func (direct) evalSymlinks(path string) (string, error) { return filepath.EvalSymlinks(path) }
|
||||||
@@ -134,6 +142,7 @@ func (direct) lookupGroupId(name string) (gid string, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (direct) lookPath(file string) (string, error) { return exec.LookPath(file) }
|
||||||
func (direct) cmdOutput(cmd *exec.Cmd) ([]byte, error) { return cmd.Output() }
|
func (direct) cmdOutput(cmd *exec.Cmd) ([]byte, error) { return cmd.Output() }
|
||||||
|
|
||||||
func (direct) notifyContext(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc) {
|
func (direct) notifyContext(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc) {
|
||||||
|
|||||||
@@ -701,10 +701,13 @@ func (panicDispatcher) stat(string) (os.FileInfo, error) { pa
|
|||||||
func (panicDispatcher) open(string) (osFile, error) { panic("unreachable") }
|
func (panicDispatcher) open(string) (osFile, error) { panic("unreachable") }
|
||||||
func (panicDispatcher) readdir(string) ([]os.DirEntry, error) { panic("unreachable") }
|
func (panicDispatcher) readdir(string) ([]os.DirEntry, error) { panic("unreachable") }
|
||||||
func (panicDispatcher) tempdir() string { panic("unreachable") }
|
func (panicDispatcher) tempdir() string { panic("unreachable") }
|
||||||
|
func (panicDispatcher) mkdir(string, os.FileMode) error { panic("unreachable") }
|
||||||
|
func (panicDispatcher) removeAll(string) error { panic("unreachable") }
|
||||||
func (panicDispatcher) exit(int) { panic("unreachable") }
|
func (panicDispatcher) exit(int) { panic("unreachable") }
|
||||||
func (panicDispatcher) evalSymlinks(string) (string, error) { panic("unreachable") }
|
func (panicDispatcher) evalSymlinks(string) (string, error) { panic("unreachable") }
|
||||||
func (panicDispatcher) prctl(uintptr, uintptr, uintptr) error { panic("unreachable") }
|
func (panicDispatcher) prctl(uintptr, uintptr, uintptr) error { panic("unreachable") }
|
||||||
func (panicDispatcher) lookupGroupId(string) (string, error) { panic("unreachable") }
|
func (panicDispatcher) lookupGroupId(string) (string, error) { panic("unreachable") }
|
||||||
|
func (panicDispatcher) lookPath(string) (string, error) { panic("unreachable") }
|
||||||
func (panicDispatcher) cmdOutput(*exec.Cmd) ([]byte, error) { panic("unreachable") }
|
func (panicDispatcher) cmdOutput(*exec.Cmd) ([]byte, error) { panic("unreachable") }
|
||||||
func (panicDispatcher) overflowUid(message.Msg) int { panic("unreachable") }
|
func (panicDispatcher) overflowUid(message.Msg) int { panic("unreachable") }
|
||||||
func (panicDispatcher) overflowGid(message.Msg) int { panic("unreachable") }
|
func (panicDispatcher) overflowGid(message.Msg) int { panic("unreachable") }
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ type outcomeState struct {
|
|||||||
// Copied from their respective exported values.
|
// Copied from their respective exported values.
|
||||||
mapuid, mapgid *stringPair[int]
|
mapuid, mapgid *stringPair[int]
|
||||||
|
|
||||||
// Copied from [EnvPaths] per-process.
|
// Copied from [env.Paths] per-process.
|
||||||
sc hst.Paths
|
sc hst.Paths
|
||||||
*env.Paths
|
*env.Paths
|
||||||
|
|
||||||
@@ -172,6 +172,8 @@ type outcomeStateSys struct {
|
|||||||
|
|
||||||
// Copied from [hst.Config]. Safe for read by spWaylandOp.toSystem only.
|
// Copied from [hst.Config]. Safe for read by spWaylandOp.toSystem only.
|
||||||
directWayland bool
|
directWayland bool
|
||||||
|
// Copied from [hst.Config]. Safe for read by spPipeWireOp.toSystem only.
|
||||||
|
directPipeWire bool
|
||||||
// Copied from [hst.Config]. Safe for read by spPulseOp.toSystem only.
|
// Copied from [hst.Config]. Safe for read by spPulseOp.toSystem only.
|
||||||
directPulse bool
|
directPulse bool
|
||||||
// Copied header from [hst.Config]. Safe for read by spFilesystemOp.toSystem only.
|
// Copied header from [hst.Config]. Safe for read by spFilesystemOp.toSystem only.
|
||||||
@@ -187,9 +189,8 @@ type outcomeStateSys struct {
|
|||||||
func (s *outcomeState) newSys(config *hst.Config, sys *system.I) *outcomeStateSys {
|
func (s *outcomeState) newSys(config *hst.Config, sys *system.I) *outcomeStateSys {
|
||||||
return &outcomeStateSys{
|
return &outcomeStateSys{
|
||||||
appId: config.ID, et: config.Enablements.Unwrap(),
|
appId: config.ID, et: config.Enablements.Unwrap(),
|
||||||
directWayland: config.DirectWayland, directPulse: config.DirectPulse,
|
directWayland: config.DirectWayland, directPipeWire: config.DirectPipeWire, directPulse: config.DirectPulse,
|
||||||
extraPerms: config.ExtraPerms,
|
extraPerms: config.ExtraPerms, sessionBus: config.SessionBus, systemBus: config.SystemBus,
|
||||||
sessionBus: config.SessionBus, systemBus: config.SystemBus,
|
|
||||||
sys: sys, outcomeState: s,
|
sys: sys, outcomeState: s,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -256,6 +257,10 @@ type outcomeStateParams struct {
|
|||||||
// Populated by spRuntimeOp.
|
// Populated by spRuntimeOp.
|
||||||
runtimeDir *check.Absolute
|
runtimeDir *check.Absolute
|
||||||
|
|
||||||
|
// Path to pipewire-pulse server.
|
||||||
|
// Populated by spPipeWireOp if DirectPipeWire is false.
|
||||||
|
pipewirePulsePath *check.Absolute
|
||||||
|
|
||||||
as hst.ApplyState
|
as hst.ApplyState
|
||||||
*outcomeState
|
*outcomeState
|
||||||
}
|
}
|
||||||
@@ -295,7 +300,7 @@ func (state *outcomeStateSys) toSystem() error {
|
|||||||
// optional via enablements
|
// optional via enablements
|
||||||
&spWaylandOp{},
|
&spWaylandOp{},
|
||||||
&spX11Op{},
|
&spX11Op{},
|
||||||
spPipeWireOp{},
|
&spPipeWireOp{},
|
||||||
&spPulseOp{},
|
&spPulseOp{},
|
||||||
&spDBusOp{},
|
&spDBusOp{},
|
||||||
|
|
||||||
|
|||||||
@@ -68,7 +68,11 @@ func TestOutcomeRun(t *testing.T) {
|
|||||||
).
|
).
|
||||||
|
|
||||||
// spPipeWireOp
|
// spPipeWireOp
|
||||||
PipeWire(m("/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/pipewire")).
|
PipeWire(
|
||||||
|
m("/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/pipewire"),
|
||||||
|
"org.chromium.Chromium",
|
||||||
|
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||||
|
).
|
||||||
|
|
||||||
// spDBusOp
|
// spDBusOp
|
||||||
MustProxyDBus(
|
MustProxyDBus(
|
||||||
@@ -96,7 +100,6 @@ func TestOutcomeRun(t *testing.T) {
|
|||||||
"GOOGLE_DEFAULT_CLIENT_ID=77185425430.apps.googleusercontent.com",
|
"GOOGLE_DEFAULT_CLIENT_ID=77185425430.apps.googleusercontent.com",
|
||||||
"GOOGLE_DEFAULT_CLIENT_SECRET=OTJgUOQcT7lO7GsGZq2G4IlT",
|
"GOOGLE_DEFAULT_CLIENT_SECRET=OTJgUOQcT7lO7GsGZq2G4IlT",
|
||||||
"HOME=/data/data/org.chromium.Chromium",
|
"HOME=/data/data/org.chromium.Chromium",
|
||||||
"PIPEWIRE_REMOTE=/run/user/1971/pipewire-0",
|
|
||||||
"SHELL=/run/current-system/sw/bin/zsh",
|
"SHELL=/run/current-system/sw/bin/zsh",
|
||||||
"TERM=xterm-256color",
|
"TERM=xterm-256color",
|
||||||
"USER=chronos",
|
"USER=chronos",
|
||||||
@@ -146,9 +149,6 @@ func TestOutcomeRun(t *testing.T) {
|
|||||||
// spWaylandOp
|
// spWaylandOp
|
||||||
Bind(m("/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/wayland"), m("/run/user/1971/wayland-0"), 0).
|
Bind(m("/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/wayland"), m("/run/user/1971/wayland-0"), 0).
|
||||||
|
|
||||||
// spPipeWireOp
|
|
||||||
Bind(m("/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/pipewire"), m("/run/user/1971/pipewire-0"), 0).
|
|
||||||
|
|
||||||
// spDBusOp
|
// spDBusOp
|
||||||
Bind(m("/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bus"), m("/run/user/1971/bus"), 0).
|
Bind(m("/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bus"), m("/run/user/1971/bus"), 0).
|
||||||
Bind(m("/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/system_bus_socket"), m("/var/run/dbus/system_bus_socket"), 0).
|
Bind(m("/tmp/hakurei.0/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/system_bus_socket"), m("/var/run/dbus/system_bus_socket"), 0).
|
||||||
@@ -170,7 +170,7 @@ func TestOutcomeRun(t *testing.T) {
|
|||||||
Remount(fhs.AbsRoot, syscall.MS_RDONLY),
|
Remount(fhs.AbsRoot, syscall.MS_RDONLY),
|
||||||
}},
|
}},
|
||||||
|
|
||||||
{"nixos permissive defaults no enablements", new(stubNixOS), &hst.Config{Container: &hst.ContainerConfig{
|
{"nixos permissive defaults no enablements", new(stubNixOS), &hst.Config{DirectPipeWire: true, Container: &hst.ContainerConfig{
|
||||||
Filesystem: []hst.FilesystemConfigJSON{
|
Filesystem: []hst.FilesystemConfigJSON{
|
||||||
{FilesystemConfig: &hst.FSBind{
|
{FilesystemConfig: &hst.FSBind{
|
||||||
Target: fhs.AbsRoot,
|
Target: fhs.AbsRoot,
|
||||||
@@ -252,6 +252,8 @@ func TestOutcomeRun(t *testing.T) {
|
|||||||
}},
|
}},
|
||||||
|
|
||||||
{"nixos permissive defaults chromium", new(stubNixOS), &hst.Config{
|
{"nixos permissive defaults chromium", new(stubNixOS), &hst.Config{
|
||||||
|
DirectPipeWire: true,
|
||||||
|
|
||||||
ID: "org.chromium.Chromium",
|
ID: "org.chromium.Chromium",
|
||||||
Identity: 9,
|
Identity: 9,
|
||||||
Groups: []string{"video"},
|
Groups: []string{"video"},
|
||||||
@@ -335,7 +337,7 @@ func TestOutcomeRun(t *testing.T) {
|
|||||||
Ensure(m("/tmp/hakurei.0/tmpdir/9"), 01700).UpdatePermType(system.User, m("/tmp/hakurei.0/tmpdir/9"), acl.Read, acl.Write, acl.Execute).
|
Ensure(m("/tmp/hakurei.0/tmpdir/9"), 01700).UpdatePermType(system.User, m("/tmp/hakurei.0/tmpdir/9"), acl.Read, acl.Write, acl.Execute).
|
||||||
Ephemeral(system.Process, m("/tmp/hakurei.0/ebf083d1b175911782d413369b64ce7c"), 0711).
|
Ephemeral(system.Process, m("/tmp/hakurei.0/ebf083d1b175911782d413369b64ce7c"), 0711).
|
||||||
Wayland(m("/tmp/hakurei.0/ebf083d1b175911782d413369b64ce7c/wayland"), m("/run/user/1971/wayland-0"), "org.chromium.Chromium", "ebf083d1b175911782d413369b64ce7c").
|
Wayland(m("/tmp/hakurei.0/ebf083d1b175911782d413369b64ce7c/wayland"), m("/run/user/1971/wayland-0"), "org.chromium.Chromium", "ebf083d1b175911782d413369b64ce7c").
|
||||||
PipeWire(m("/tmp/hakurei.0/ebf083d1b175911782d413369b64ce7c/pipewire")).
|
PipeWire(m("/tmp/hakurei.0/ebf083d1b175911782d413369b64ce7c/pipewire"), "org.chromium.Chromium", "ebf083d1b175911782d413369b64ce7c").
|
||||||
MustProxyDBus(&hst.BusConfig{
|
MustProxyDBus(&hst.BusConfig{
|
||||||
Talk: []string{
|
Talk: []string{
|
||||||
"org.freedesktop.Notifications",
|
"org.freedesktop.Notifications",
|
||||||
@@ -422,6 +424,8 @@ func TestOutcomeRun(t *testing.T) {
|
|||||||
}},
|
}},
|
||||||
|
|
||||||
{"nixos chromium direct wayland", new(stubNixOS), &hst.Config{
|
{"nixos chromium direct wayland", new(stubNixOS), &hst.Config{
|
||||||
|
DirectPipeWire: true,
|
||||||
|
|
||||||
ID: "org.chromium.Chromium",
|
ID: "org.chromium.Chromium",
|
||||||
Enablements: hst.NewEnablements(hst.EWayland | hst.EDBus | hst.EPipeWire | hst.EPulse),
|
Enablements: hst.NewEnablements(hst.EWayland | hst.EDBus | hst.EPipeWire | hst.EPulse),
|
||||||
Container: &hst.ContainerConfig{
|
Container: &hst.ContainerConfig{
|
||||||
@@ -486,7 +490,7 @@ func TestOutcomeRun(t *testing.T) {
|
|||||||
Ensure(m("/run/user/1971/hakurei"), 0700).UpdatePermType(system.User, m("/run/user/1971/hakurei"), acl.Execute).
|
Ensure(m("/run/user/1971/hakurei"), 0700).UpdatePermType(system.User, m("/run/user/1971/hakurei"), acl.Execute).
|
||||||
UpdatePermType(hst.EWayland, m("/run/user/1971/wayland-0"), acl.Read, acl.Write, acl.Execute).
|
UpdatePermType(hst.EWayland, m("/run/user/1971/wayland-0"), acl.Read, acl.Write, acl.Execute).
|
||||||
Ephemeral(system.Process, m("/tmp/hakurei.0/8e2c76b066dabe574cf073bdb46eb5c1"), 0711).
|
Ephemeral(system.Process, m("/tmp/hakurei.0/8e2c76b066dabe574cf073bdb46eb5c1"), 0711).
|
||||||
PipeWire(m("/tmp/hakurei.0/8e2c76b066dabe574cf073bdb46eb5c1/pipewire")).
|
PipeWire(m("/tmp/hakurei.0/8e2c76b066dabe574cf073bdb46eb5c1/pipewire"), "org.chromium.Chromium", "8e2c76b066dabe574cf073bdb46eb5c1").
|
||||||
MustProxyDBus(&hst.BusConfig{
|
MustProxyDBus(&hst.BusConfig{
|
||||||
Talk: []string{
|
Talk: []string{
|
||||||
"org.freedesktop.FileManager1", "org.freedesktop.Notifications",
|
"org.freedesktop.FileManager1", "org.freedesktop.Notifications",
|
||||||
@@ -879,6 +883,16 @@ func (k *stubNixOS) lookupGroupId(name string) (string, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (k *stubNixOS) lookPath(file string) (string, error) {
|
||||||
|
switch file {
|
||||||
|
case "pipewire-pulse":
|
||||||
|
return "/run/current-system/sw/bin/pipewire-pulse", nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unexpected file %q", file))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (k *stubNixOS) cmdOutput(cmd *exec.Cmd) ([]byte, error) {
|
func (k *stubNixOS) cmdOutput(cmd *exec.Cmd) ([]byte, error) {
|
||||||
switch cmd.Path {
|
switch cmd.Path {
|
||||||
case "/proc/nonexistent/hsu":
|
case "/proc/nonexistent/hsu":
|
||||||
|
|||||||
@@ -14,9 +14,12 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"hakurei.app/container"
|
"hakurei.app/container"
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
"hakurei.app/container/seccomp"
|
"hakurei.app/container/seccomp"
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/hst"
|
"hakurei.app/hst"
|
||||||
|
"hakurei.app/internal/pipewire"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -83,6 +86,55 @@ func Shim(msg message.Msg) {
|
|||||||
shimEntrypoint(direct{msg})
|
shimEntrypoint(direct{msg})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A shimPrivate holds state of the private work directory owned by shim.
|
||||||
|
type shimPrivate struct {
|
||||||
|
// Path to directory if created.
|
||||||
|
pathname *check.Absolute
|
||||||
|
|
||||||
|
k syscallDispatcher
|
||||||
|
id *stringPair[hst.ID]
|
||||||
|
}
|
||||||
|
|
||||||
|
// unwrap returns the underlying pathname.
|
||||||
|
func (sp *shimPrivate) unwrap() *check.Absolute {
|
||||||
|
if sp.pathname == nil {
|
||||||
|
if a, err := check.NewAbs(sp.k.tempdir()); err != nil {
|
||||||
|
sp.k.fatal(err)
|
||||||
|
panic("unreachable")
|
||||||
|
} else {
|
||||||
|
pathname := a.Append(".hakurei-shim-" + sp.id.String())
|
||||||
|
sp.k.getMsg().Verbosef("creating private work directory %q", pathname)
|
||||||
|
if err = sp.k.mkdir(pathname.String(), 0700); err != nil {
|
||||||
|
sp.k.fatal(err)
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
sp.pathname = pathname
|
||||||
|
return sp.unwrap()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return sp.pathname
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the absolute pathname to the directory held by shimPrivate.
|
||||||
|
func (sp *shimPrivate) String() string { return sp.unwrap().String() }
|
||||||
|
|
||||||
|
// destroy removes the directory held by shimPrivate.
|
||||||
|
func (sp *shimPrivate) destroy() {
|
||||||
|
defer func() { sp.pathname = nil }()
|
||||||
|
if sp.pathname != nil {
|
||||||
|
sp.k.getMsg().Verbosef("destroying private work directory %q", sp.pathname)
|
||||||
|
if err := sp.k.removeAll(sp.pathname.String()); err != nil {
|
||||||
|
sp.k.getMsg().GetLogger().Println(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// shimPipeWireTimeout is the duration pipewire-pulse is allowed to run before its socket becomes available.
|
||||||
|
shimPipeWireTimeout = 5 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
func shimEntrypoint(k syscallDispatcher) {
|
func shimEntrypoint(k syscallDispatcher) {
|
||||||
msg := k.getMsg()
|
msg := k.getMsg()
|
||||||
if msg == nil {
|
if msg == nil {
|
||||||
@@ -208,6 +260,7 @@ func shimEntrypoint(k syscallDispatcher) {
|
|||||||
|
|
||||||
ctx, stop := k.notifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
ctx, stop := k.notifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||||
cancelContainer.Store(&stop)
|
cancelContainer.Store(&stop)
|
||||||
|
sp := shimPrivate{k: k, id: state.id}
|
||||||
z := container.New(ctx, msg)
|
z := container.New(ctx, msg)
|
||||||
z.Params = *stateParams.params
|
z.Params = *stateParams.params
|
||||||
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
|
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
|
||||||
@@ -215,6 +268,79 @@ func shimEntrypoint(k syscallDispatcher) {
|
|||||||
// bounds and default enforced in finalise.go
|
// bounds and default enforced in finalise.go
|
||||||
z.WaitDelay = state.Shim.WaitDelay
|
z.WaitDelay = state.Shim.WaitDelay
|
||||||
|
|
||||||
|
if stateParams.pipewirePulsePath != nil {
|
||||||
|
zpw := container.NewCommand(ctx, msg, stateParams.pipewirePulsePath, pipewirePulseName)
|
||||||
|
zpw.Hostname = "hakurei-" + pipewirePulseName
|
||||||
|
zpw.SeccompFlags |= seccomp.AllowMultiarch
|
||||||
|
zpw.SeccompPresets |= std.PresetStrict
|
||||||
|
zpw.Env = []string{
|
||||||
|
// pipewire SecurityContext socket path
|
||||||
|
pipewire.Remote + "=" + stateParams.instancePath().Append("pipewire").String(),
|
||||||
|
// pipewire-pulse socket directory path
|
||||||
|
envXDGRuntimeDir + "=" + sp.String(),
|
||||||
|
}
|
||||||
|
if msg.IsVerbose() {
|
||||||
|
zpw.Stdin, zpw.Stdout, zpw.Stderr = os.Stdin, os.Stdout, os.Stderr
|
||||||
|
}
|
||||||
|
zpw.
|
||||||
|
Bind(fhs.AbsRoot, fhs.AbsRoot, 0).
|
||||||
|
Bind(sp.unwrap(), sp.unwrap(), std.BindWritable).
|
||||||
|
Proc(fhs.AbsProc).Dev(fhs.AbsDev, true)
|
||||||
|
socketPath := sp.unwrap().Append("pulse", "native")
|
||||||
|
innerSocketPath := stateParams.runtimeDir.Append("pulse", "native")
|
||||||
|
|
||||||
|
if err := k.containerStart(zpw); err != nil {
|
||||||
|
sp.destroy()
|
||||||
|
printMessageError(func(v ...any) { k.fatal(fmt.Sprintln(v...)) },
|
||||||
|
"cannot start "+pipewirePulseName+" container:", err)
|
||||||
|
}
|
||||||
|
if err := k.containerServe(zpw); err != nil {
|
||||||
|
sp.destroy()
|
||||||
|
printMessageError(func(v ...any) { k.fatal(fmt.Sprintln(v...)) },
|
||||||
|
"cannot configure "+pipewirePulseName+" container:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan error, 1)
|
||||||
|
k.new(func(k syscallDispatcher, msg message.Msg) { done <- k.containerWait(zpw) })
|
||||||
|
|
||||||
|
socketTimer := time.NewTimer(shimPipeWireTimeout)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-socketTimer.C:
|
||||||
|
sp.destroy()
|
||||||
|
k.fatal(pipewirePulseName + " exceeded deadline before socket appeared")
|
||||||
|
break
|
||||||
|
|
||||||
|
case err := <-done:
|
||||||
|
var exitError *exec.ExitError
|
||||||
|
if !errors.As(err, &exitError) {
|
||||||
|
msg.Verbosef("cannot wait: %v", err)
|
||||||
|
k.exit(127)
|
||||||
|
}
|
||||||
|
sp.destroy()
|
||||||
|
k.fatal(pipewirePulseName + " " + exitError.ProcessState.String())
|
||||||
|
break
|
||||||
|
|
||||||
|
default:
|
||||||
|
if _, err := k.stat(socketPath.String()); err != nil {
|
||||||
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
|
sp.destroy()
|
||||||
|
k.fatal(err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(500 * time.Microsecond)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
z.Bind(socketPath, innerSocketPath, 0)
|
||||||
|
z.Env = append(z.Env, "PULSE_SERVER=unix:"+innerSocketPath.String())
|
||||||
|
}
|
||||||
|
|
||||||
if err := k.containerStart(z); err != nil {
|
if err := k.containerStart(z); err != nil {
|
||||||
var f func(v ...any)
|
var f func(v ...any)
|
||||||
if logger := msg.GetLogger(); logger != nil {
|
if logger := msg.GetLogger(); logger != nil {
|
||||||
@@ -225,9 +351,11 @@ func shimEntrypoint(k syscallDispatcher) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
printMessageError(f, "cannot start container:", err)
|
printMessageError(f, "cannot start container:", err)
|
||||||
|
sp.destroy()
|
||||||
k.exit(hst.ExitFailure)
|
k.exit(hst.ExitFailure)
|
||||||
}
|
}
|
||||||
if err := k.containerServe(z); err != nil {
|
if err := k.containerServe(z); err != nil {
|
||||||
|
sp.destroy()
|
||||||
printMessageError(func(v ...any) { k.fatal(fmt.Sprintln(v...)) },
|
printMessageError(func(v ...any) { k.fatal(fmt.Sprintln(v...)) },
|
||||||
"cannot configure container:", err)
|
"cannot configure container:", err)
|
||||||
}
|
}
|
||||||
@@ -236,10 +364,13 @@ func shimEntrypoint(k syscallDispatcher) {
|
|||||||
seccomp.Preset(std.PresetStrict, seccomp.AllowMultiarch),
|
seccomp.Preset(std.PresetStrict, seccomp.AllowMultiarch),
|
||||||
seccomp.AllowMultiarch,
|
seccomp.AllowMultiarch,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
|
sp.destroy()
|
||||||
k.fatalf("cannot load syscall filter: %v", err)
|
k.fatalf("cannot load syscall filter: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := k.containerWait(z); err != nil {
|
if err := k.containerWait(z); err != nil {
|
||||||
|
sp.destroy()
|
||||||
|
|
||||||
var exitError *exec.ExitError
|
var exitError *exec.ExitError
|
||||||
if !errors.As(err, &exitError) {
|
if !errors.As(err, &exitError) {
|
||||||
if errors.Is(err, context.Canceled) {
|
if errors.Is(err, context.Canceled) {
|
||||||
@@ -250,4 +381,5 @@ func shimEntrypoint(k syscallDispatcher) {
|
|||||||
}
|
}
|
||||||
k.exit(exitError.ExitCode())
|
k.exit(exitError.ExitCode())
|
||||||
}
|
}
|
||||||
|
sp.destroy()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,29 +3,51 @@ package outcome
|
|||||||
import (
|
import (
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/hst"
|
"hakurei.app/hst"
|
||||||
"hakurei.app/internal/pipewire"
|
"hakurei.app/internal/pipewire"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() { gob.Register(spPipeWireOp{}) }
|
const pipewirePulseName = "pipewire-pulse"
|
||||||
|
|
||||||
|
func init() { gob.Register(new(spPipeWireOp)) }
|
||||||
|
|
||||||
// spPipeWireOp exports the PipeWire server to the container via SecurityContext.
|
// spPipeWireOp exports the PipeWire server to the container via SecurityContext.
|
||||||
// Runs after spRuntimeOp.
|
// Runs after spRuntimeOp.
|
||||||
type spPipeWireOp struct{}
|
type spPipeWireOp struct {
|
||||||
|
// Path to pipewire-pulse server. Populated during toSystem if DirectPipeWire is false.
|
||||||
|
CompatServerPath *check.Absolute
|
||||||
|
}
|
||||||
|
|
||||||
func (s spPipeWireOp) toSystem(state *outcomeStateSys) error {
|
func (s *spPipeWireOp) toSystem(state *outcomeStateSys) error {
|
||||||
if state.et&hst.EPipeWire == 0 {
|
if state.et&hst.EPipeWire == 0 {
|
||||||
return errNotEnabled
|
return errNotEnabled
|
||||||
}
|
}
|
||||||
|
if !state.directPipeWire {
|
||||||
|
if n, err := state.k.lookPath(pipewirePulseName); err != nil {
|
||||||
|
return &hst.AppError{Step: "look up " + pipewirePulseName, Err: err}
|
||||||
|
} else if s.CompatServerPath, err = check.NewAbs(n); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
state.sys.PipeWire(state.instance().Append("pipewire"))
|
appId := state.appId
|
||||||
|
if appId == "" {
|
||||||
|
// use instance ID in case app id is not set
|
||||||
|
appId = "app.hakurei." + state.id.String()
|
||||||
|
}
|
||||||
|
state.sys.PipeWire(state.instance().Append("pipewire"), appId, state.id.String())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s spPipeWireOp) toContainer(state *outcomeStateParams) error {
|
func (s *spPipeWireOp) toContainer(state *outcomeStateParams) error {
|
||||||
innerPath := state.runtimeDir.Append(pipewire.PW_DEFAULT_REMOTE)
|
if s.CompatServerPath == nil {
|
||||||
state.env[pipewire.Remote] = innerPath.String()
|
innerPath := state.runtimeDir.Append(pipewire.PW_DEFAULT_REMOTE)
|
||||||
state.params.Bind(state.instancePath().Append("pipewire"), innerPath, 0)
|
state.env[pipewire.Remote] = innerPath.String()
|
||||||
|
state.params.Bind(state.instancePath().Append("pipewire"), innerPath, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// pipewire-pulse behaviour implemented in shim.go
|
||||||
|
state.pipewirePulsePath = s.CompatServerPath
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ func TestSpPipeWireOp(t *testing.T) {
|
|||||||
|
|
||||||
checkOpBehaviour(t, []opBehaviourTestCase{
|
checkOpBehaviour(t, []opBehaviourTestCase{
|
||||||
{"not enabled", func(bool, bool) outcomeOp {
|
{"not enabled", func(bool, bool) outcomeOp {
|
||||||
return spPipeWireOp{}
|
return new(spPipeWireOp)
|
||||||
}, func() *hst.Config {
|
}, func() *hst.Config {
|
||||||
c := hst.Template()
|
c := hst.Template()
|
||||||
*c.Enablements = 0
|
*c.Enablements = 0
|
||||||
@@ -24,13 +24,19 @@ func TestSpPipeWireOp(t *testing.T) {
|
|||||||
}, nil, nil, nil, nil, errNotEnabled, nil, nil, nil, nil, nil},
|
}, nil, nil, nil, nil, errNotEnabled, nil, nil, nil, nil, nil},
|
||||||
|
|
||||||
{"success", func(bool, bool) outcomeOp {
|
{"success", func(bool, bool) outcomeOp {
|
||||||
return spPipeWireOp{}
|
return new(spPipeWireOp)
|
||||||
}, hst.Template, nil, []stub.Call{}, newI().
|
}, func() *hst.Config {
|
||||||
|
c := hst.Template()
|
||||||
|
c.DirectPipeWire = true
|
||||||
|
return c
|
||||||
|
}, nil, []stub.Call{}, newI().
|
||||||
// state.instance
|
// state.instance
|
||||||
Ephemeral(system.Process, m(wantInstancePrefix), 0711).
|
Ephemeral(system.Process, m(wantInstancePrefix), 0711).
|
||||||
// toSystem
|
// toSystem
|
||||||
PipeWire(
|
PipeWire(
|
||||||
m(wantInstancePrefix + "/pipewire"),
|
m(wantInstancePrefix+"/pipewire"),
|
||||||
|
"org.chromium.Chromium",
|
||||||
|
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||||
), sysUsesInstance(nil), nil, insertsOps(afterSpRuntimeOp(nil)), []stub.Call{
|
), sysUsesInstance(nil), nil, insertsOps(afterSpRuntimeOp(nil)), []stub.Call{
|
||||||
// this op configures the container state and does not make calls during toContainer
|
// this op configures the container state and does not make calls during toContainer
|
||||||
}, &container.Params{
|
}, &container.Params{
|
||||||
|
|||||||
@@ -108,7 +108,7 @@ func TestSpPulseOp(t *testing.T) {
|
|||||||
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, "proc/nonexistent/cookie", nil),
|
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, "proc/nonexistent/cookie", nil),
|
||||||
}, nil, nil, &hst.AppError{
|
}, nil, nil, &hst.AppError{
|
||||||
Step: "locate PulseAudio cookie",
|
Step: "locate PulseAudio cookie",
|
||||||
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/cookie"},
|
Err: check.AbsoluteError("proc/nonexistent/cookie"),
|
||||||
}, nil, nil, nil, nil, nil},
|
}, nil, nil, nil, nil, nil},
|
||||||
|
|
||||||
{"cookie loadFile", func(bool, bool) outcomeOp {
|
{"cookie loadFile", func(bool, bool) outcomeOp {
|
||||||
@@ -272,7 +272,7 @@ func TestDiscoverPulseCookie(t *testing.T) {
|
|||||||
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
||||||
}}, &hst.AppError{
|
}}, &hst.AppError{
|
||||||
Step: "locate PulseAudio cookie",
|
Step: "locate PulseAudio cookie",
|
||||||
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/pulse-cookie"},
|
Err: check.AbsoluteError("proc/nonexistent/pulse-cookie"),
|
||||||
}},
|
}},
|
||||||
|
|
||||||
{"success override", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
{"success override", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
||||||
@@ -286,7 +286,7 @@ func TestDiscoverPulseCookie(t *testing.T) {
|
|||||||
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
||||||
}}, &hst.AppError{
|
}}, &hst.AppError{
|
||||||
Step: "locate PulseAudio cookie",
|
Step: "locate PulseAudio cookie",
|
||||||
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/home"},
|
Err: check.AbsoluteError("proc/nonexistent/home"),
|
||||||
}},
|
}},
|
||||||
|
|
||||||
{"home stat", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
{"home stat", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
||||||
@@ -321,7 +321,7 @@ func TestDiscoverPulseCookie(t *testing.T) {
|
|||||||
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
||||||
}}, &hst.AppError{
|
}}, &hst.AppError{
|
||||||
Step: "locate PulseAudio cookie",
|
Step: "locate PulseAudio cookie",
|
||||||
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/xdg"},
|
Err: check.AbsoluteError("proc/nonexistent/xdg"),
|
||||||
}},
|
}},
|
||||||
|
|
||||||
{"xdg stat", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
{"xdg stat", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
||||||
|
|||||||
@@ -103,6 +103,8 @@ type Client struct {
|
|||||||
|
|
||||||
// Populated by [CoreBoundProps] events targeting [Client].
|
// Populated by [CoreBoundProps] events targeting [Client].
|
||||||
Properties SPADict `json:"props"`
|
Properties SPADict `json:"props"`
|
||||||
|
|
||||||
|
noRemove
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *Client) consume(opcode byte, files []int, unmarshal func(v any)) error {
|
func (client *Client) consume(opcode byte, files []int, unmarshal func(v any)) error {
|
||||||
@@ -113,7 +115,7 @@ func (client *Client) consume(opcode byte, files []int, unmarshal func(v any)) e
|
|||||||
return nil
|
return nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return &UnsupportedOpcodeError{opcode, client.String()}
|
panic(&UnsupportedOpcodeError{opcode, client.String()})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -40,13 +40,14 @@ const (
|
|||||||
PW_CORE_EVENT_ADD_MEM
|
PW_CORE_EVENT_ADD_MEM
|
||||||
PW_CORE_EVENT_REMOVE_MEM
|
PW_CORE_EVENT_REMOVE_MEM
|
||||||
PW_CORE_EVENT_BOUND_PROPS
|
PW_CORE_EVENT_BOUND_PROPS
|
||||||
PW_CORE_EVENT_NUM
|
|
||||||
|
|
||||||
|
PW_CORE_EVENT_NUM
|
||||||
PW_VERSION_CORE_EVENTS = 1
|
PW_VERSION_CORE_EVENTS = 1
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
PW_CORE_METHOD_ADD_LISTENER = iota
|
PW_CORE_METHOD_ADD_LISTENER = iota
|
||||||
|
|
||||||
PW_CORE_METHOD_HELLO
|
PW_CORE_METHOD_HELLO
|
||||||
PW_CORE_METHOD_SYNC
|
PW_CORE_METHOD_SYNC
|
||||||
PW_CORE_METHOD_PONG
|
PW_CORE_METHOD_PONG
|
||||||
@@ -54,25 +55,26 @@ const (
|
|||||||
PW_CORE_METHOD_GET_REGISTRY
|
PW_CORE_METHOD_GET_REGISTRY
|
||||||
PW_CORE_METHOD_CREATE_OBJECT
|
PW_CORE_METHOD_CREATE_OBJECT
|
||||||
PW_CORE_METHOD_DESTROY
|
PW_CORE_METHOD_DESTROY
|
||||||
PW_CORE_METHOD_NUM
|
|
||||||
|
|
||||||
|
PW_CORE_METHOD_NUM
|
||||||
PW_VERSION_CORE_METHODS = 0
|
PW_VERSION_CORE_METHODS = 0
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
PW_REGISTRY_EVENT_GLOBAL = iota
|
PW_REGISTRY_EVENT_GLOBAL = iota
|
||||||
PW_REGISTRY_EVENT_GLOBAL_REMOVE
|
PW_REGISTRY_EVENT_GLOBAL_REMOVE
|
||||||
PW_REGISTRY_EVENT_NUM
|
|
||||||
|
|
||||||
|
PW_REGISTRY_EVENT_NUM
|
||||||
PW_VERSION_REGISTRY_EVENTS = 0
|
PW_VERSION_REGISTRY_EVENTS = 0
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
PW_REGISTRY_METHOD_ADD_LISTENER = iota
|
PW_REGISTRY_METHOD_ADD_LISTENER = iota
|
||||||
|
|
||||||
PW_REGISTRY_METHOD_BIND
|
PW_REGISTRY_METHOD_BIND
|
||||||
PW_REGISTRY_METHOD_DESTROY
|
PW_REGISTRY_METHOD_DESTROY
|
||||||
PW_REGISTRY_METHOD_NUM
|
|
||||||
|
|
||||||
|
PW_REGISTRY_METHOD_NUM
|
||||||
PW_VERSION_REGISTRY_METHODS = 0
|
PW_VERSION_REGISTRY_METHODS = 0
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -266,6 +268,31 @@ type CoreErrorEvent struct{ CoreError }
|
|||||||
// Opcode satisfies [Message] with a constant value.
|
// Opcode satisfies [Message] with a constant value.
|
||||||
func (c *CoreErrorEvent) Opcode() byte { return PW_CORE_EVENT_ERROR }
|
func (c *CoreErrorEvent) Opcode() byte { return PW_CORE_EVENT_ERROR }
|
||||||
|
|
||||||
|
// The CoreRemoveId event is used internally by the object ID management logic.
|
||||||
|
//
|
||||||
|
// When a client deletes an object, the server will send this event to acknowledge
|
||||||
|
// that it has seen the delete request. When the client receives this event, it
|
||||||
|
// will know that it can safely reuse the object ID.
|
||||||
|
type CoreRemoveId struct {
|
||||||
|
// A proxy id that was removed.
|
||||||
|
ID Int `json:"id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Opcode satisfies [Message] with a constant value.
|
||||||
|
func (c *CoreRemoveId) Opcode() byte { return PW_CORE_EVENT_REMOVE_ID }
|
||||||
|
|
||||||
|
// FileCount satisfies [Message] with a constant value.
|
||||||
|
func (c *CoreRemoveId) FileCount() Int { return 0 }
|
||||||
|
|
||||||
|
// Size satisfies [KnownSize] with a constant value.
|
||||||
|
func (c *CoreRemoveId) Size() Word { return SizePrefix + Size(SizeInt) }
|
||||||
|
|
||||||
|
// MarshalBinary satisfies [encoding.BinaryMarshaler] via [Marshal].
|
||||||
|
func (c *CoreRemoveId) MarshalBinary() ([]byte, error) { return Marshal(c) }
|
||||||
|
|
||||||
|
// UnmarshalBinary satisfies [encoding.BinaryUnmarshaler] via [Unmarshal].
|
||||||
|
func (c *CoreRemoveId) UnmarshalBinary(data []byte) error { return Unmarshal(data, c) }
|
||||||
|
|
||||||
// The CoreBoundProps event is emitted when a local object ID is bound to a global ID.
|
// The CoreBoundProps event is emitted when a local object ID is bound to a global ID.
|
||||||
// It is emitted before the global becomes visible in the registry.
|
// It is emitted before the global becomes visible in the registry.
|
||||||
type CoreBoundProps struct {
|
type CoreBoundProps struct {
|
||||||
@@ -299,13 +326,96 @@ func (c *CoreBoundProps) UnmarshalBinary(data []byte) error { return Unmarshal(d
|
|||||||
|
|
||||||
// ErrBadBoundProps is returned when a [CoreBoundProps] event targeting a proxy
|
// ErrBadBoundProps is returned when a [CoreBoundProps] event targeting a proxy
|
||||||
// that should never be targeted is received and processed.
|
// that should never be targeted is received and processed.
|
||||||
var ErrBadBoundProps = errors.New("attempted to store bound props on proxy that should never be targeted")
|
var ErrBadBoundProps = errors.New("attempting to store bound props on a proxy that should never be targeted")
|
||||||
|
|
||||||
// noAck is embedded by proxies that are never targeted by [CoreBoundProps].
|
// noAck is embedded by proxies that are never targeted by [CoreBoundProps].
|
||||||
type noAck struct{}
|
type noAck struct{}
|
||||||
|
|
||||||
// setBoundProps should never be called as this proxy should never be targeted by [CoreBoundProps].
|
// setBoundProps should never be called as this proxy should never be targeted by [CoreBoundProps].
|
||||||
func (noAck) setBoundProps(*CoreBoundProps) error { return ErrBadBoundProps }
|
func (noAck) setBoundProps(*CoreBoundProps) error { panic(ErrBadBoundProps) }
|
||||||
|
|
||||||
|
// ErrBadRemove is returned when a [CoreRemoveId] event targeting a proxy
|
||||||
|
// that should never be targeted is received and processed.
|
||||||
|
var ErrBadRemove = errors.New("attempting to remove a proxy that should never be targeted")
|
||||||
|
|
||||||
|
// noRemove is embedded by proxies that are never targeted by [CoreRemoveId].
|
||||||
|
type noRemove struct{}
|
||||||
|
|
||||||
|
// remove should never be called as this proxy should never be targeted by [CoreRemoveId].
|
||||||
|
func (noRemove) remove() error { panic(ErrBadRemove) }
|
||||||
|
|
||||||
|
// ErrInvalidRemove is returned when a proxy is somehow removed twice. This is only reached for
|
||||||
|
// an implementation error as the proxy struct should no longer be reachable after the first call.
|
||||||
|
var ErrInvalidRemove = errors.New("attempting to remove an already freed proxy")
|
||||||
|
|
||||||
|
// removable is embedded by proxies that can be targeted by [CoreRemoveId] and requires no cleanup.
|
||||||
|
type removable bool
|
||||||
|
|
||||||
|
// remove checks against removal of a freed proxy and marks the proxy as removed.
|
||||||
|
func (s *removable) remove() error {
|
||||||
|
if *s {
|
||||||
|
panic(ErrInvalidRemove)
|
||||||
|
}
|
||||||
|
*s = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrProxyDestroyed is returned when attempting to use a proxy method when the underlying
|
||||||
|
// proxy has already been targeted by a [CoreRemoveId] event.
|
||||||
|
var ErrProxyDestroyed = errors.New("underlying proxy has been removed")
|
||||||
|
|
||||||
|
// checkDestroy returns [ErrProxyDestroyed] if the current proxy has been destroyed.
|
||||||
|
// Must be called at the beginning of any exported method of a proxy embedding removable.
|
||||||
|
func (s *removable) checkDestroy() error {
|
||||||
|
if *s {
|
||||||
|
// not fatal: the caller is allowed to recover from this and allocate a new proxy
|
||||||
|
return ErrProxyDestroyed
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustCheckDestroy calls checkDestroy and panics if a non-nil error is returned.
|
||||||
|
// This is useful for non-exported methods as they should become unreachable.
|
||||||
|
func (s *removable) mustCheckDestroy() {
|
||||||
|
if err := s.checkDestroy(); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// destructible is embedded by proxies that can be targeted by the [CoreRemoveId] event and the
|
||||||
|
// [CoreDestroy] method and requires no cleanup. destructible purposefully does not override
|
||||||
|
// removable.mustCheckDestroy because it is used by unexported methods called during event handling
|
||||||
|
// and are exempt from the destruction check.
|
||||||
|
type destructible struct {
|
||||||
|
destroyed bool
|
||||||
|
|
||||||
|
removable
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkDestroy overrides removable.checkDestroy to also check the destroyed field.
|
||||||
|
func (s *destructible) checkDestroy() error {
|
||||||
|
if s.destroyed {
|
||||||
|
return ErrProxyDestroyed
|
||||||
|
}
|
||||||
|
if err := s.removable.checkDestroy(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// destroy calls removable.checkDestroy then queues a [CoreDestroy] event if it succeeds.
|
||||||
|
func (s *destructible) destroy(ctx *Context, id Int) error {
|
||||||
|
if err := s.checkDestroy(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
l := len(ctx.pendingDestruction)
|
||||||
|
ctx.pendingDestruction[id] = struct{}{}
|
||||||
|
if len(ctx.pendingDestruction) != l+1 {
|
||||||
|
return ErrProxyDestroyed
|
||||||
|
}
|
||||||
|
s.destroyed = true
|
||||||
|
return ctx.GetCore().destroy(id)
|
||||||
|
}
|
||||||
|
|
||||||
// An InconsistentIdError describes an inconsistent state where the server claims an impossible
|
// An InconsistentIdError describes an inconsistent state where the server claims an impossible
|
||||||
// proxy or global id. This is only generated by the [CoreBoundProps] event.
|
// proxy or global id. This is only generated by the [CoreBoundProps] event.
|
||||||
@@ -349,10 +459,10 @@ func (c *CoreHello) MarshalBinary() ([]byte, error) { return Marshal(c) }
|
|||||||
// UnmarshalBinary satisfies [encoding.BinaryUnmarshaler] via [Unmarshal].
|
// UnmarshalBinary satisfies [encoding.BinaryUnmarshaler] via [Unmarshal].
|
||||||
func (c *CoreHello) UnmarshalBinary(data []byte) error { return Unmarshal(data, c) }
|
func (c *CoreHello) UnmarshalBinary(data []byte) error { return Unmarshal(data, c) }
|
||||||
|
|
||||||
// coreHello queues a [CoreHello] message for the PipeWire server.
|
// hello queues a [CoreHello] message for the PipeWire server.
|
||||||
// This method should not be called directly, the New function queues this message.
|
// This method should not be called directly, the [New] function queues this message.
|
||||||
func (ctx *Context) coreHello() error {
|
func (core *Core) hello() error {
|
||||||
return ctx.writeMessage(
|
return core.ctx.writeMessage(
|
||||||
PW_ID_CORE,
|
PW_ID_CORE,
|
||||||
&CoreHello{PW_VERSION_CORE},
|
&CoreHello{PW_VERSION_CORE},
|
||||||
)
|
)
|
||||||
@@ -388,12 +498,12 @@ func (c *CoreSync) MarshalBinary() ([]byte, error) { return Marshal(c) }
|
|||||||
// UnmarshalBinary satisfies [encoding.BinaryUnmarshaler] via [Unmarshal].
|
// UnmarshalBinary satisfies [encoding.BinaryUnmarshaler] via [Unmarshal].
|
||||||
func (c *CoreSync) UnmarshalBinary(data []byte) error { return Unmarshal(data, c) }
|
func (c *CoreSync) UnmarshalBinary(data []byte) error { return Unmarshal(data, c) }
|
||||||
|
|
||||||
// coreSync queues a [CoreSync] message for the PipeWire server.
|
// sync queues a [CoreSync] message for the PipeWire server.
|
||||||
// This is not safe to use directly, callers should use Sync instead.
|
// This is not safe to use directly, callers should use Sync instead.
|
||||||
func (ctx *Context) coreSync(id Int) error {
|
func (core *Core) sync(id Int) error {
|
||||||
return ctx.writeMessage(
|
return core.ctx.writeMessage(
|
||||||
PW_ID_CORE,
|
PW_ID_CORE,
|
||||||
&CoreSync{id, CoreSyncSequenceOffset + Int(ctx.sequence)},
|
&CoreSync{id, CoreSyncSequenceOffset + Int(core.ctx.sequence)},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -404,13 +514,13 @@ var ErrNotDone = errors.New("did not receive a Core::Done event targeting previo
|
|||||||
const (
|
const (
|
||||||
// syncTimeout is the maximum duration [Core.Sync] is allowed to take before
|
// syncTimeout is the maximum duration [Core.Sync] is allowed to take before
|
||||||
// receiving [CoreDone] or failing.
|
// receiving [CoreDone] or failing.
|
||||||
syncTimeout = 5 * time.Second
|
syncTimeout = 10 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
// Sync queues a [CoreSync] message for the PipeWire server and initiates a Roundtrip.
|
// Sync queues a [CoreSync] message for the PipeWire server and initiates a Roundtrip.
|
||||||
func (core *Core) Sync() error {
|
func (core *Core) Sync() error {
|
||||||
core.done = false
|
core.done = false
|
||||||
if err := core.ctx.coreSync(roundtripSyncID); err != nil {
|
if err := core.sync(roundtripSyncID); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
deadline := time.Now().Add(syncTimeout)
|
deadline := time.Now().Add(syncTimeout)
|
||||||
@@ -429,6 +539,10 @@ func (core *Core) Sync() error {
|
|||||||
core.ctx.closeReceivedFiles()
|
core.ctx.closeReceivedFiles()
|
||||||
return &ProxyFatalError{Err: UnacknowledgedProxyError(slices.Collect(maps.Keys(core.ctx.pendingIds))), ProxyErrs: core.ctx.cloneAsProxyErrors()}
|
return &ProxyFatalError{Err: UnacknowledgedProxyError(slices.Collect(maps.Keys(core.ctx.pendingIds))), ProxyErrs: core.ctx.cloneAsProxyErrors()}
|
||||||
}
|
}
|
||||||
|
if len(core.ctx.pendingDestruction) != 0 {
|
||||||
|
core.ctx.closeReceivedFiles()
|
||||||
|
return &ProxyFatalError{Err: UnacknowledgedProxyDestructionError(slices.Collect(maps.Keys(core.ctx.pendingDestruction))), ProxyErrs: core.ctx.cloneAsProxyErrors()}
|
||||||
|
}
|
||||||
return core.ctx.doSyncComplete()
|
return core.ctx.doSyncComplete()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -497,6 +611,89 @@ func (ctx *Context) GetRegistry() (*Registry, error) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CoreCreateObject is sent when the client requests to create a
|
||||||
|
// new object from a factory of a certain type.
|
||||||
|
//
|
||||||
|
// The client allocates a new_id for the proxy. The server will
|
||||||
|
// allocate a new resource with the same new_id and from then on,
|
||||||
|
// Methods and Events will be exchanged between the new object of
|
||||||
|
// the given type.
|
||||||
|
type CoreCreateObject struct {
|
||||||
|
// The name of a server factory object to use.
|
||||||
|
FactoryName String `json:"factory_name"`
|
||||||
|
// The type of the object to create, this is also the type of
|
||||||
|
// the interface of the new_id proxy.
|
||||||
|
Type String `json:"type"`
|
||||||
|
// Undocumented, assumed to be the local version of the proxy.
|
||||||
|
Version Int `json:"version"`
|
||||||
|
// Extra properties to create the object.
|
||||||
|
Properties *SPADict `json:"props"`
|
||||||
|
// The proxy id of the new object.
|
||||||
|
NewID Int `json:"new_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Opcode satisfies [Message] with a constant value.
|
||||||
|
func (c *CoreCreateObject) Opcode() byte { return PW_CORE_METHOD_CREATE_OBJECT }
|
||||||
|
|
||||||
|
// FileCount satisfies [Message] with a constant value.
|
||||||
|
func (c *CoreCreateObject) FileCount() Int { return 0 }
|
||||||
|
|
||||||
|
// Size satisfies [KnownSize] with a value computed at runtime.
|
||||||
|
func (c *CoreCreateObject) Size() Word {
|
||||||
|
return SizePrefix +
|
||||||
|
SizeString[Word](c.FactoryName) +
|
||||||
|
SizeString[Word](c.Type) +
|
||||||
|
Size(SizeInt) +
|
||||||
|
c.Properties.Size() +
|
||||||
|
Size(SizeInt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary satisfies [encoding.BinaryMarshaler] via [Marshal].
|
||||||
|
func (c *CoreCreateObject) MarshalBinary() ([]byte, error) { return Marshal(c) }
|
||||||
|
|
||||||
|
// UnmarshalBinary satisfies [encoding.BinaryUnmarshaler] via [Unmarshal].
|
||||||
|
func (c *CoreCreateObject) UnmarshalBinary(data []byte) error { return Unmarshal(data, c) }
|
||||||
|
|
||||||
|
// createObject queues a [CoreCreateObject] message for the PipeWire server.
|
||||||
|
// This is not safe to use directly, callers should use typed wrapper methods on [Registry] instead.
|
||||||
|
func (core *Core) createObject(factoryName, typeName String, version Int, props SPADict, newId Int) error {
|
||||||
|
return core.ctx.writeMessage(
|
||||||
|
PW_ID_CORE,
|
||||||
|
&CoreCreateObject{factoryName, typeName, version, &props, newId},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CoreDestroy is sent when the client requests to destroy an object.
|
||||||
|
type CoreDestroy struct {
|
||||||
|
// The proxy id of the object to destroy.
|
||||||
|
ID Int `json:"id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Opcode satisfies [Message] with a constant value.
|
||||||
|
func (c *CoreDestroy) Opcode() byte { return PW_CORE_METHOD_DESTROY }
|
||||||
|
|
||||||
|
// FileCount satisfies [Message] with a constant value.
|
||||||
|
func (c *CoreDestroy) FileCount() Int { return 0 }
|
||||||
|
|
||||||
|
// Size satisfies [KnownSize] with a constant value.
|
||||||
|
func (c *CoreDestroy) Size() Word { return SizePrefix + Size(SizeInt) }
|
||||||
|
|
||||||
|
// MarshalBinary satisfies [encoding.BinaryMarshaler] via [Marshal].
|
||||||
|
func (c *CoreDestroy) MarshalBinary() ([]byte, error) { return Marshal(c) }
|
||||||
|
|
||||||
|
// UnmarshalBinary satisfies [encoding.BinaryUnmarshaler] via [Unmarshal].
|
||||||
|
func (c *CoreDestroy) UnmarshalBinary(data []byte) error { return Unmarshal(data, c) }
|
||||||
|
|
||||||
|
// destroy queues a [CoreDestroy] message for the PipeWire server.
|
||||||
|
// This is not safe to use directly, callers should use the exported method
|
||||||
|
// on the proxy implementation instead.
|
||||||
|
func (core *Core) destroy(id Int) error {
|
||||||
|
return core.ctx.writeMessage(
|
||||||
|
PW_ID_CORE,
|
||||||
|
&CoreDestroy{id},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// A RegistryGlobal event is emitted to notify a client about a new global object.
|
// A RegistryGlobal event is emitted to notify a client about a new global object.
|
||||||
type RegistryGlobal struct {
|
type RegistryGlobal struct {
|
||||||
// The global id.
|
// The global id.
|
||||||
@@ -533,6 +730,30 @@ func (c *RegistryGlobal) MarshalBinary() ([]byte, error) { return Marshal(c) }
|
|||||||
// UnmarshalBinary satisfies [encoding.BinaryUnmarshaler] via [Unmarshal].
|
// UnmarshalBinary satisfies [encoding.BinaryUnmarshaler] via [Unmarshal].
|
||||||
func (c *RegistryGlobal) UnmarshalBinary(data []byte) error { return Unmarshal(data, c) }
|
func (c *RegistryGlobal) UnmarshalBinary(data []byte) error { return Unmarshal(data, c) }
|
||||||
|
|
||||||
|
// A RegistryGlobalRemove event is emitted when a global with id was removed.
|
||||||
|
type RegistryGlobalRemove struct {
|
||||||
|
// The global id that was removed.
|
||||||
|
ID Int `json:"id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Opcode satisfies [Message] with a constant value.
|
||||||
|
func (c *RegistryGlobalRemove) Opcode() byte { return PW_REGISTRY_EVENT_GLOBAL_REMOVE }
|
||||||
|
|
||||||
|
// FileCount satisfies [Message] with a constant value.
|
||||||
|
func (c *RegistryGlobalRemove) FileCount() Int { return 0 }
|
||||||
|
|
||||||
|
// Size satisfies [KnownSize] with a constant value.
|
||||||
|
func (c *RegistryGlobalRemove) Size() Word {
|
||||||
|
return SizePrefix +
|
||||||
|
Size(SizeInt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary satisfies [encoding.BinaryMarshaler] via [Marshal].
|
||||||
|
func (c *RegistryGlobalRemove) MarshalBinary() ([]byte, error) { return Marshal(c) }
|
||||||
|
|
||||||
|
// UnmarshalBinary satisfies [encoding.BinaryUnmarshaler] via [Unmarshal].
|
||||||
|
func (c *RegistryGlobalRemove) UnmarshalBinary(data []byte) error { return Unmarshal(data, c) }
|
||||||
|
|
||||||
// RegistryBind is sent when the client requests to bind to the
|
// RegistryBind is sent when the client requests to bind to the
|
||||||
// global object with id and use the client proxy with new_id as
|
// global object with id and use the client proxy with new_id as
|
||||||
// the proxy. After this call, methods can be sent to the remote
|
// the proxy. After this call, methods can be sent to the remote
|
||||||
@@ -584,10 +805,70 @@ func (registry *Registry) bind(proxy eventProxy, id, version Int) (Int, error) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RegistryDestroy is sent to try to destroy the global object with id.
|
||||||
|
// This might fail when the client does not have permission.
|
||||||
|
type RegistryDestroy struct {
|
||||||
|
// The global id to destroy.
|
||||||
|
ID Int `json:"id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Opcode satisfies [Message] with a constant value.
|
||||||
|
func (c *RegistryDestroy) Opcode() byte { return PW_REGISTRY_METHOD_DESTROY }
|
||||||
|
|
||||||
|
// FileCount satisfies [Message] with a constant value.
|
||||||
|
func (c *RegistryDestroy) FileCount() Int { return 0 }
|
||||||
|
|
||||||
|
// Size satisfies [KnownSize] with a constant value.
|
||||||
|
func (c *RegistryDestroy) Size() Word {
|
||||||
|
return SizePrefix +
|
||||||
|
Size(SizeInt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary satisfies [encoding.BinaryMarshaler] via [Marshal].
|
||||||
|
func (c *RegistryDestroy) MarshalBinary() ([]byte, error) { return Marshal(c) }
|
||||||
|
|
||||||
|
// UnmarshalBinary satisfies [encoding.BinaryUnmarshaler] via [Unmarshal].
|
||||||
|
func (c *RegistryDestroy) UnmarshalBinary(data []byte) error { return Unmarshal(data, c) }
|
||||||
|
|
||||||
|
// destroy queues a [RegistryDestroy] message for the PipeWire server.
|
||||||
|
func (registry *Registry) destroy(id Int) error {
|
||||||
|
return registry.ctx.writeMessage(
|
||||||
|
registry.ID,
|
||||||
|
&RegistryDestroy{id},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy tries to destroy the global object with id.
|
||||||
|
func (registry *Registry) Destroy(id Int) (err error) {
|
||||||
|
asCoreError := registry.ctx.expectsCoreError(registry.ID, &err)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = registry.destroy(id); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = registry.ctx.GetCore().Sync(); err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if coreError := asCoreError(); coreError == nil {
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
switch syscall.Errno(-coreError.Result) {
|
||||||
|
case syscall.EPERM:
|
||||||
|
return &PermissionError{registry.ID, coreError.Message}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return coreError
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// An UnsupportedObjectTypeError is the name of a type not known by the server [Registry].
|
// An UnsupportedObjectTypeError is the name of a type not known by the server [Registry].
|
||||||
type UnsupportedObjectTypeError string
|
type UnsupportedObjectTypeError string
|
||||||
|
|
||||||
func (e UnsupportedObjectTypeError) Error() string { return "unsupported object type " + string(e) }
|
func (e UnsupportedObjectTypeError) Error() string { return "unsupported object type " + string(e) }
|
||||||
|
func (e UnsupportedObjectTypeError) Message() string { return e.Error() }
|
||||||
|
|
||||||
// Core holds state of [PW_TYPE_INTERFACE_Core].
|
// Core holds state of [PW_TYPE_INTERFACE_Core].
|
||||||
type Core struct {
|
type Core struct {
|
||||||
@@ -598,22 +879,24 @@ type Core struct {
|
|||||||
done bool
|
done bool
|
||||||
|
|
||||||
ctx *Context
|
ctx *Context
|
||||||
|
|
||||||
noAck
|
noAck
|
||||||
|
noRemove
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrUnexpectedDone is a [CoreDone] event with unexpected values.
|
// ErrUnexpectedDone is a [CoreDone] event with unexpected values.
|
||||||
var ErrUnexpectedDone = errors.New("multiple Core::Done events targeting Core::Sync")
|
var ErrUnexpectedDone = errors.New("multiple Core::Done events targeting Core::Sync")
|
||||||
|
|
||||||
// An UnknownBoundIdError describes the server claiming to have bound a proxy id that was never allocated.
|
// An UnknownProxyIdError describes an event targeting a proxy id that was never allocated.
|
||||||
type UnknownBoundIdError[E any] struct {
|
type UnknownProxyIdError[E any] struct {
|
||||||
// Offending id decoded from Data.
|
// Offending id decoded from Data.
|
||||||
Id Int
|
Id Int
|
||||||
// Event received from the server.
|
// Event received from the server.
|
||||||
Event E
|
Event E
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *UnknownBoundIdError[E]) Error() string {
|
func (e *UnknownProxyIdError[E]) Error() string {
|
||||||
return "unknown bound proxy id " + strconv.Itoa(int(e.Id))
|
return "unknown proxy id " + strconv.Itoa(int(e.Id))
|
||||||
}
|
}
|
||||||
|
|
||||||
// An InvalidPingError is a [CorePing] event targeting a proxy id that was never allocated.
|
// An InvalidPingError is a [CorePing] event targeting a proxy id that was never allocated.
|
||||||
@@ -668,6 +951,19 @@ func (core *Core) consume(opcode byte, files []int, unmarshal func(v any)) error
|
|||||||
unmarshal(&coreError)
|
unmarshal(&coreError)
|
||||||
return &coreError
|
return &coreError
|
||||||
|
|
||||||
|
case PW_CORE_EVENT_REMOVE_ID:
|
||||||
|
var coreRemoveId CoreRemoveId
|
||||||
|
unmarshal(&coreRemoveId)
|
||||||
|
if proxy, ok := core.ctx.proxy[coreRemoveId.ID]; !ok {
|
||||||
|
// this should never happen so is non-recoverable if it does
|
||||||
|
panic(&UnknownProxyIdError[*CoreRemoveId]{Id: coreRemoveId.ID, Event: &coreRemoveId})
|
||||||
|
} else {
|
||||||
|
delete(core.ctx.proxy, coreRemoveId.ID)
|
||||||
|
// not always populated so this is not checked
|
||||||
|
delete(core.ctx.pendingDestruction, coreRemoveId.ID)
|
||||||
|
return proxy.remove()
|
||||||
|
}
|
||||||
|
|
||||||
case PW_CORE_EVENT_BOUND_PROPS:
|
case PW_CORE_EVENT_BOUND_PROPS:
|
||||||
var boundProps CoreBoundProps
|
var boundProps CoreBoundProps
|
||||||
unmarshal(&boundProps)
|
unmarshal(&boundProps)
|
||||||
@@ -675,12 +971,12 @@ func (core *Core) consume(opcode byte, files []int, unmarshal func(v any)) error
|
|||||||
delete(core.ctx.pendingIds, boundProps.ID)
|
delete(core.ctx.pendingIds, boundProps.ID)
|
||||||
proxy, ok := core.ctx.proxy[boundProps.ID]
|
proxy, ok := core.ctx.proxy[boundProps.ID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return &UnknownBoundIdError[*CoreBoundProps]{Id: boundProps.ID, Event: &boundProps}
|
return &UnknownProxyIdError[*CoreBoundProps]{Id: boundProps.ID, Event: &boundProps}
|
||||||
}
|
}
|
||||||
return proxy.setBoundProps(&boundProps)
|
return proxy.setBoundProps(&boundProps)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return &UnsupportedOpcodeError{opcode, core.String()}
|
panic(&UnsupportedOpcodeError{opcode, core.String()})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -698,7 +994,9 @@ type Registry struct {
|
|||||||
Objects map[Int]RegistryGlobal `json:"objects"`
|
Objects map[Int]RegistryGlobal `json:"objects"`
|
||||||
|
|
||||||
ctx *Context
|
ctx *Context
|
||||||
|
|
||||||
noAck
|
noAck
|
||||||
|
noRemove
|
||||||
}
|
}
|
||||||
|
|
||||||
// A GlobalIDCollisionError describes a [RegistryGlobal] event stepping on a previous instance of itself.
|
// A GlobalIDCollisionError describes a [RegistryGlobal] event stepping on a previous instance of itself.
|
||||||
@@ -714,6 +1012,14 @@ func (e *GlobalIDCollisionError) Error() string {
|
|||||||
" stepping on previous id " + strconv.Itoa(int(e.ID)) + " for " + e.Previous.Type
|
" stepping on previous id " + strconv.Itoa(int(e.ID)) + " for " + e.Previous.Type
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// An UnknownGlobalIDRemoveError describes a [RegistryGlobalRemove] event announcing the removal of
|
||||||
|
// a global id that is not yet known to [Registry] or was already deleted.
|
||||||
|
type UnknownGlobalIDRemoveError Int
|
||||||
|
|
||||||
|
func (e UnknownGlobalIDRemoveError) Error() string {
|
||||||
|
return "Registry::GlobalRemove event targets unknown id " + strconv.Itoa(int(e))
|
||||||
|
}
|
||||||
|
|
||||||
func (registry *Registry) consume(opcode byte, files []int, unmarshal func(v any)) error {
|
func (registry *Registry) consume(opcode byte, files []int, unmarshal func(v any)) error {
|
||||||
closeReceivedFiles(files...)
|
closeReceivedFiles(files...)
|
||||||
switch opcode {
|
switch opcode {
|
||||||
@@ -727,8 +1033,21 @@ func (registry *Registry) consume(opcode byte, files []int, unmarshal func(v any
|
|||||||
registry.Objects[global.ID] = global
|
registry.Objects[global.ID] = global
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
|
case PW_REGISTRY_EVENT_GLOBAL_REMOVE:
|
||||||
|
var globalRemove RegistryGlobalRemove
|
||||||
|
unmarshal(&globalRemove)
|
||||||
|
// server emits PW_CORE_EVENT_REMOVE_ID events targeting
|
||||||
|
// affected proxies so they do not need to be handled here
|
||||||
|
l := len(registry.Objects)
|
||||||
|
delete(registry.Objects, globalRemove.ID)
|
||||||
|
if len(registry.Objects) != l-1 {
|
||||||
|
// this should never happen so is non-recoverable if it does
|
||||||
|
panic(UnknownGlobalIDRemoveError(globalRemove.ID))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return &UnsupportedOpcodeError{opcode, registry.String()}
|
panic(&UnsupportedOpcodeError{opcode, registry.String()})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -171,7 +171,7 @@ func TestCoreError(t *testing.T) {
|
|||||||
/* padding */ 0, 0, 0, 0,
|
/* padding */ 0, 0, 0, 0,
|
||||||
|
|
||||||
/* size: 0x1b bytes */ 0x1b, 0, 0, 0,
|
/* size: 0x1b bytes */ 0x1b, 0, 0, 0,
|
||||||
/*type: String*/ 8, 0, 0, 0,
|
/* type: String */ 8, 0, 0, 0,
|
||||||
|
|
||||||
// value: "no permission to destroy 0\x00"
|
// value: "no permission to destroy 0\x00"
|
||||||
0x6e, 0x6f, 0x20, 0x70,
|
0x6e, 0x6f, 0x20, 0x70,
|
||||||
@@ -192,6 +192,24 @@ func TestCoreError(t *testing.T) {
|
|||||||
}.run(t)
|
}.run(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCoreRemoveId(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
encodingTestCases[pipewire.CoreRemoveId, *pipewire.CoreRemoveId]{
|
||||||
|
{"sample", []byte{
|
||||||
|
/* size: rest of data */ 0x10, 0, 0, 0,
|
||||||
|
/* type: Struct */ 0xe, 0, 0, 0,
|
||||||
|
|
||||||
|
/* size: 4 bytes */ 4, 0, 0, 0,
|
||||||
|
/* type: Int */ 4, 0, 0, 0,
|
||||||
|
/* value: 3 */ 3, 0, 0, 0,
|
||||||
|
/* padding */ 0, 0, 0, 0,
|
||||||
|
}, pipewire.CoreRemoveId{
|
||||||
|
ID: 3,
|
||||||
|
}, nil},
|
||||||
|
}.run(t)
|
||||||
|
}
|
||||||
|
|
||||||
func TestCoreBoundProps(t *testing.T) {
|
func TestCoreBoundProps(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
@@ -288,6 +306,83 @@ func TestCoreGetRegistry(t *testing.T) {
|
|||||||
}.run(t)
|
}.run(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCoreCreateObject(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
encodingTestCases[pipewire.CoreCreateObject, *pipewire.CoreCreateObject]{
|
||||||
|
{"sample", []byte{
|
||||||
|
/* size: rest of data */ 0x80, 0, 0, 0,
|
||||||
|
/* type: Struct */ 0xe, 0, 0, 0,
|
||||||
|
|
||||||
|
/* size: 0x13 bytes */ 0x13, 0, 0, 0,
|
||||||
|
/* type: String */ 8, 0, 0, 0,
|
||||||
|
|
||||||
|
// value: "spa-device-factory\x00"
|
||||||
|
0x73, 0x70, 0x61, 0x2d,
|
||||||
|
0x64, 0x65, 0x76, 0x69,
|
||||||
|
0x63, 0x65, 0x2d, 0x66,
|
||||||
|
0x61, 0x63, 0x74, 0x6f,
|
||||||
|
0x72, 0x79, 0, 0,
|
||||||
|
0, 0, 0, 0,
|
||||||
|
|
||||||
|
/* size: 0x1a bytes */ 0x1a, 0, 0, 0,
|
||||||
|
/* type: String */ 8, 0, 0, 0,
|
||||||
|
|
||||||
|
// value: "PipeWire:Interface:Device\x00"
|
||||||
|
0x50, 0x69, 0x70, 0x65,
|
||||||
|
0x57, 0x69, 0x72, 0x65,
|
||||||
|
0x3a, 0x49, 0x6e, 0x74,
|
||||||
|
0x65, 0x72, 0x66, 0x61,
|
||||||
|
0x63, 0x65, 0x3a, 0x44,
|
||||||
|
0x65, 0x76, 0x69, 0x63,
|
||||||
|
0x65, 0, 0, 0,
|
||||||
|
0, 0, 0, 0,
|
||||||
|
|
||||||
|
/* size: 4 bytes */ 4, 0, 0, 0,
|
||||||
|
/* type: Int */ 4, 0, 0, 0,
|
||||||
|
/* value: 3 */ 3, 0, 0, 0,
|
||||||
|
/* padding */ 0, 0, 0, 0,
|
||||||
|
|
||||||
|
/* size */ 0x10, 0, 0, 0,
|
||||||
|
/* type: Struct */ 0xe, 0, 0, 0,
|
||||||
|
|
||||||
|
/* size: 4 bytes */ 4, 0, 0, 0,
|
||||||
|
/* type: Int */ 4, 0, 0, 0,
|
||||||
|
/* value: 0 */ 0, 0, 0, 0,
|
||||||
|
/* padding */ 0, 0, 0, 0,
|
||||||
|
|
||||||
|
/* size: 4 bytes */ 4, 0, 0, 0,
|
||||||
|
/* type: Int */ 4, 0, 0, 0,
|
||||||
|
/* value: 0xbad */ 0xad, 0xb, 0, 0,
|
||||||
|
/* padding */ 0, 0, 0, 0,
|
||||||
|
}, pipewire.CoreCreateObject{
|
||||||
|
FactoryName: "spa-device-factory",
|
||||||
|
Type: pipewire.PW_TYPE_INTERFACE_Device,
|
||||||
|
Version: pipewire.PW_VERSION_FACTORY,
|
||||||
|
Properties: &pipewire.SPADict{},
|
||||||
|
NewID: 0xbad,
|
||||||
|
}, nil},
|
||||||
|
}.run(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCoreDestroy(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
encodingTestCases[pipewire.CoreDestroy, *pipewire.CoreDestroy]{
|
||||||
|
{"sample", []byte{
|
||||||
|
/* size: rest of data */ 0x10, 0, 0, 0,
|
||||||
|
/* type: Struct */ 0xe, 0, 0, 0,
|
||||||
|
|
||||||
|
/* size: 4 bytes */ 4, 0, 0, 0,
|
||||||
|
/* type: Int */ 4, 0, 0, 0,
|
||||||
|
/* value: 3 */ 3, 0, 0, 0,
|
||||||
|
/* padding */ 0, 0, 0, 0,
|
||||||
|
}, pipewire.CoreDestroy{
|
||||||
|
ID: 3,
|
||||||
|
}, nil},
|
||||||
|
}.run(t)
|
||||||
|
}
|
||||||
|
|
||||||
func TestRegistryGlobal(t *testing.T) {
|
func TestRegistryGlobal(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
@@ -745,6 +840,23 @@ func TestRegistryGlobal(t *testing.T) {
|
|||||||
}.run(t)
|
}.run(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRegistryGlobalRemove(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
encodingTestCases[pipewire.RegistryGlobalRemove, *pipewire.RegistryGlobalRemove]{
|
||||||
|
{"sample", []byte{
|
||||||
|
/* size: rest of data*/ 0x10, 0, 0, 0,
|
||||||
|
/* type: Struct */ 0xe, 0, 0, 0,
|
||||||
|
/* size: 4 bytes */ 4, 0, 0, 0,
|
||||||
|
/* type: Int */ 4, 0, 0, 0,
|
||||||
|
/* value: 0xbad */ 0xad, 0xb, 0, 0,
|
||||||
|
/* padding */ 0, 0, 0, 0,
|
||||||
|
}, pipewire.RegistryGlobalRemove{
|
||||||
|
ID: 0xbad,
|
||||||
|
}, nil},
|
||||||
|
}.run(t)
|
||||||
|
}
|
||||||
|
|
||||||
func TestRegistryBind(t *testing.T) {
|
func TestRegistryBind(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
@@ -757,3 +869,20 @@ func TestRegistryBind(t *testing.T) {
|
|||||||
}, nil},
|
}, nil},
|
||||||
}.run(t)
|
}.run(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRegistryDestroy(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
encodingTestCases[pipewire.RegistryDestroy, *pipewire.RegistryDestroy]{
|
||||||
|
{"sample", []byte{
|
||||||
|
/* size: rest of data*/ 0x10, 0, 0, 0,
|
||||||
|
/* type: Struct */ 0xe, 0, 0, 0,
|
||||||
|
/* size: 4 bytes */ 4, 0, 0, 0,
|
||||||
|
/* type: Int */ 4, 0, 0, 0,
|
||||||
|
/* value: 0xbad */ 0xad, 0xb, 0, 0,
|
||||||
|
/* padding */ 0, 0, 0, 0,
|
||||||
|
}, pipewire.RegistryDestroy{
|
||||||
|
ID: 0xbad,
|
||||||
|
}, nil},
|
||||||
|
}.run(t)
|
||||||
|
}
|
||||||
|
|||||||
@@ -16,9 +16,9 @@ package pipewire
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
@@ -26,10 +26,16 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Conn is a low level unix socket interface used by [Context].
|
// Conn is a low level unix socket interface used by [Context].
|
||||||
type Conn interface {
|
type Conn interface {
|
||||||
|
// MightBlock informs the implementation that the next call to
|
||||||
|
// Recvmsg or Sendmsg might block. A zero or negative timeout
|
||||||
|
// cancels this behaviour.
|
||||||
|
MightBlock(timeout time.Duration)
|
||||||
|
|
||||||
// Recvmsg calls syscall.Recvmsg on the underlying socket.
|
// Recvmsg calls syscall.Recvmsg on the underlying socket.
|
||||||
Recvmsg(p, oob []byte, flags int) (n, oobn, recvflags int, err error)
|
Recvmsg(p, oob []byte, flags int) (n, oobn, recvflags int, err error)
|
||||||
|
|
||||||
@@ -51,20 +57,27 @@ type Context struct {
|
|||||||
buf []byte
|
buf []byte
|
||||||
// Current [Header.Sequence] value, incremented every write.
|
// Current [Header.Sequence] value, incremented every write.
|
||||||
sequence Int
|
sequence Int
|
||||||
// Current server-side [Header.Sequence] value, incremented on every event processed.
|
// Pending file descriptors to be sent with the next message.
|
||||||
remoteSequence Int
|
pendingFiles []int
|
||||||
|
// File count already kept track of in [Header].
|
||||||
|
headerFiles int
|
||||||
|
|
||||||
// Proxy id associations.
|
// Proxy id associations.
|
||||||
proxy map[Int]eventProxy
|
proxy map[Int]eventProxy
|
||||||
// Newly allocated proxies pending acknowledgement from the server.
|
// Newly allocated proxies pending acknowledgement from the server.
|
||||||
pendingIds map[Int]struct{}
|
pendingIds map[Int]struct{}
|
||||||
// Smallest available Id for the next proxy.
|
// Smallest available Id for the next proxy.
|
||||||
nextId Int
|
nextId Int
|
||||||
// Server side registry generation number.
|
// Proxies targeted by the [CoreDestroy] event pending until next [CoreSync].
|
||||||
generation Long
|
pendingDestruction map[Int]struct{}
|
||||||
// Pending file descriptors to be sent with the next message.
|
|
||||||
pendingFiles []int
|
// Proxy for built-in core events.
|
||||||
// File count already kept track of in [Header].
|
core Core
|
||||||
headerFiles int
|
// Proxy for built-in client events.
|
||||||
|
client Client
|
||||||
|
|
||||||
|
// Current server-side [Header.Sequence] value, incremented on every event processed.
|
||||||
|
remoteSequence Int
|
||||||
// Files from the server. This is discarded on every Roundtrip so eventProxy
|
// Files from the server. This is discarded on every Roundtrip so eventProxy
|
||||||
// implementations must make sure to close them to avoid leaking fds.
|
// implementations must make sure to close them to avoid leaking fds.
|
||||||
//
|
//
|
||||||
@@ -80,13 +93,11 @@ type Context struct {
|
|||||||
// Pending footer value deferred to the next round trip,
|
// Pending footer value deferred to the next round trip,
|
||||||
// sent if pendingFooter is nil. This is for emulating upstream behaviour
|
// sent if pendingFooter is nil. This is for emulating upstream behaviour
|
||||||
deferredPendingFooter KnownSize
|
deferredPendingFooter KnownSize
|
||||||
|
// Server side registry generation number.
|
||||||
|
generation Long
|
||||||
// Deferred operations ran after a [Core.Sync] completes or Close is called. Errors
|
// Deferred operations ran after a [Core.Sync] completes or Close is called. Errors
|
||||||
//are reported as part of [ProxyConsumeError] and is not considered fatal unless panicked.
|
//are reported as part of [ProxyConsumeError] and is not considered fatal unless panicked.
|
||||||
syncComplete []func() error
|
syncComplete []func() error
|
||||||
// Proxy for built-in core events.
|
|
||||||
core Core
|
|
||||||
// Proxy for built-in client events.
|
|
||||||
client Client
|
|
||||||
|
|
||||||
// Passed to [Conn.Recvmsg]. Not copied if sufficient for all received messages.
|
// Passed to [Conn.Recvmsg]. Not copied if sufficient for all received messages.
|
||||||
iovecBuf [1 << 15]byte
|
iovecBuf [1 << 15]byte
|
||||||
@@ -120,8 +131,9 @@ func New(conn Conn, props SPADict) (*Context, error) {
|
|||||||
PW_ID_CLIENT: {},
|
PW_ID_CLIENT: {},
|
||||||
}
|
}
|
||||||
ctx.nextId = Int(len(ctx.proxy))
|
ctx.nextId = Int(len(ctx.proxy))
|
||||||
|
ctx.pendingDestruction = make(map[Int]struct{})
|
||||||
|
|
||||||
if err := ctx.coreHello(); err != nil {
|
if err := ctx.core.hello(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := ctx.clientUpdateProperties(props); err != nil {
|
if err := ctx.clientUpdateProperties(props); err != nil {
|
||||||
@@ -131,45 +143,142 @@ func New(conn Conn, props SPADict) (*Context, error) {
|
|||||||
return &ctx, nil
|
return &ctx, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// A SyscallConnCloser is a [syscall.Conn] that implements [io.Closer].
|
// unixConn is an implementation of the [Conn] interface for connections
|
||||||
type SyscallConnCloser interface {
|
// to Unix domain sockets.
|
||||||
syscall.Conn
|
type unixConn struct {
|
||||||
io.Closer
|
fd int
|
||||||
|
|
||||||
|
// Whether creation of a new epoll instance was attempted.
|
||||||
|
epoll bool
|
||||||
|
// File descriptor referring to the new epoll instance.
|
||||||
|
// Valid if epoll is true and epollErr is nil.
|
||||||
|
epollFd int
|
||||||
|
// Error returned by syscall.EpollCreate1.
|
||||||
|
epollErr error
|
||||||
|
// Stores epoll events from the kernel.
|
||||||
|
epollBuf [32]syscall.EpollEvent
|
||||||
|
|
||||||
|
// If non-zero, next call is treated as a blocking call.
|
||||||
|
timeout time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// A SyscallConn is a [Conn] adapter for [syscall.Conn].
|
// Dial connects to a Unix domain socket described by name.
|
||||||
type SyscallConn struct{ SyscallConnCloser }
|
func Dial(name string) (Conn, error) {
|
||||||
|
if fd, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC|syscall.SOCK_NONBLOCK, 0); err != nil {
|
||||||
|
return nil, os.NewSyscallError("socket", err)
|
||||||
|
} else if err = syscall.Connect(fd, &syscall.SockaddrUnix{Name: name}); err != nil {
|
||||||
|
_ = syscall.Close(fd)
|
||||||
|
return nil, os.NewSyscallError("connect", err)
|
||||||
|
} else {
|
||||||
|
return &unixConn{fd: fd}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Recvmsg implements [Conn.Recvmsg] via [syscall.Conn.SyscallConn].
|
// MightBlock informs the implementation that the next call
|
||||||
func (conn SyscallConn) Recvmsg(p, oob []byte, flags int) (n, oobn, recvflags int, err error) {
|
// might block for a non-zero timeout.
|
||||||
var rc syscall.RawConn
|
func (conn *unixConn) MightBlock(timeout time.Duration) {
|
||||||
if rc, err = conn.SyscallConn(); err != nil {
|
if timeout < 0 {
|
||||||
|
timeout = 0
|
||||||
|
}
|
||||||
|
conn.timeout = timeout
|
||||||
|
}
|
||||||
|
|
||||||
|
// wantsEpoll is called at the beginning of any method that might use epoll.
|
||||||
|
func (conn *unixConn) wantsEpoll() error {
|
||||||
|
if !conn.epoll {
|
||||||
|
conn.epoll = true
|
||||||
|
conn.epollFd, conn.epollErr = syscall.EpollCreate1(syscall.EPOLL_CLOEXEC)
|
||||||
|
if conn.epollErr == nil {
|
||||||
|
if conn.epollErr = syscall.EpollCtl(conn.epollFd, syscall.EPOLL_CTL_ADD, conn.fd, &syscall.EpollEvent{
|
||||||
|
Events: syscall.EPOLLERR | syscall.EPOLLHUP,
|
||||||
|
Fd: int32(conn.fd),
|
||||||
|
}); conn.epollErr != nil {
|
||||||
|
_ = syscall.Close(conn.epollFd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return conn.epollErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait waits for a specific I/O event on fd. Caller must arrange for wantsEpoll
|
||||||
|
// to be called somewhere before wait is called.
|
||||||
|
func (conn *unixConn) wait(event uint32) (err error) {
|
||||||
|
if conn.timeout == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
deadline := time.Now().Add(conn.timeout)
|
||||||
|
conn.timeout = 0
|
||||||
|
|
||||||
|
if err = syscall.EpollCtl(conn.epollFd, syscall.EPOLL_CTL_MOD, conn.fd, &syscall.EpollEvent{
|
||||||
|
Events: event | syscall.EPOLLERR | syscall.EPOLLHUP,
|
||||||
|
Fd: int32(conn.fd),
|
||||||
|
}); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if controlErr := rc.Control(func(fd uintptr) {
|
for timeout := deadline.Sub(time.Now()); timeout > 0; timeout = deadline.Sub(time.Now()) {
|
||||||
n, oobn, recvflags, _, err = syscall.Recvmsg(int(fd), p, oob, flags)
|
var n int
|
||||||
}); controlErr != nil && err == nil {
|
if n, err = syscall.EpollWait(conn.epollFd, conn.epollBuf[:], int(timeout/time.Millisecond)); err != nil {
|
||||||
err = controlErr
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch n {
|
||||||
|
case 1: // only the socket fd is ever added
|
||||||
|
if conn.epollBuf[0].Fd != int32(conn.fd) { // unreachable
|
||||||
|
return syscall.ENOTRECOVERABLE
|
||||||
|
}
|
||||||
|
if conn.epollBuf[0].Events&event == event ||
|
||||||
|
conn.epollBuf[0].Events&syscall.EPOLLERR|syscall.EPOLLHUP != 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err = syscall.ETIME
|
||||||
|
continue
|
||||||
|
|
||||||
|
case 0: // timeout
|
||||||
|
return syscall.ETIMEDOUT
|
||||||
|
|
||||||
|
default: // unreachable
|
||||||
|
return syscall.ENOTRECOVERABLE
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sendmsg implements [Conn.Sendmsg] via [syscall.Conn.SyscallConn].
|
// Recvmsg calls syscall.Recvmsg on the underlying socket.
|
||||||
func (conn SyscallConn) Sendmsg(p, oob []byte, flags int) (n int, err error) {
|
func (conn *unixConn) Recvmsg(p, oob []byte, flags int) (n, oobn, recvflags int, err error) {
|
||||||
var rc syscall.RawConn
|
if err = conn.wantsEpoll(); err != nil {
|
||||||
if rc, err = conn.SyscallConn(); err != nil {
|
return
|
||||||
|
} else if err = conn.wait(syscall.EPOLLIN); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if controlErr := rc.Control(func(fd uintptr) {
|
n, oobn, recvflags, _, err = syscall.Recvmsg(conn.fd, p, oob, flags)
|
||||||
n, err = syscall.SendmsgN(int(fd), p, oob, nil, flags)
|
|
||||||
}); controlErr != nil && err == nil {
|
|
||||||
err = controlErr
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sendmsg calls syscall.Sendmsg on the underlying socket.
|
||||||
|
func (conn *unixConn) Sendmsg(p, oob []byte, flags int) (n int, err error) {
|
||||||
|
if err = conn.wantsEpoll(); err != nil {
|
||||||
|
return
|
||||||
|
} else if err = conn.wait(syscall.EPOLLOUT); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err = syscall.SendmsgN(conn.fd, p, oob, nil, flags)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the underlying socket and the epoll fd if populated.
|
||||||
|
func (conn *unixConn) Close() (err error) {
|
||||||
|
if conn.epoll && conn.epollErr == nil {
|
||||||
|
conn.epollErr = syscall.Close(conn.epollFd)
|
||||||
|
}
|
||||||
|
if err = syscall.Close(conn.fd); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return conn.epollErr
|
||||||
|
}
|
||||||
|
|
||||||
// MustNew calls [New](conn, props) and panics on error.
|
// MustNew calls [New](conn, props) and panics on error.
|
||||||
// It is intended for use in tests with hard-coded strings.
|
// It is intended for use in tests with hard-coded strings.
|
||||||
func MustNew(conn Conn, props SPADict) *Context {
|
func MustNew(conn Conn, props SPADict) *Context {
|
||||||
@@ -303,7 +412,7 @@ func (ctx *Context) recvmsg(remaining []byte) (payload []byte, err error) {
|
|||||||
}
|
}
|
||||||
if err != syscall.EAGAIN && err != syscall.EWOULDBLOCK {
|
if err != syscall.EAGAIN && err != syscall.EWOULDBLOCK {
|
||||||
ctx.closeReceivedFiles()
|
ctx.closeReceivedFiles()
|
||||||
return nil, os.NewSyscallError("recvmsg", err)
|
return nil, &ProxyFatalError{Err: os.NewSyscallError("recvmsg", err), ProxyErrs: ctx.cloneAsProxyErrors()}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -340,7 +449,7 @@ func (ctx *Context) sendmsg(p []byte, fds ...int) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil && err != syscall.EAGAIN && err != syscall.EWOULDBLOCK {
|
if err != nil && err != syscall.EAGAIN && err != syscall.EWOULDBLOCK {
|
||||||
return os.NewSyscallError("sendmsg", err)
|
return &ProxyFatalError{Err: os.NewSyscallError("sendmsg", err), ProxyErrs: ctx.cloneAsProxyErrors()}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -419,6 +528,9 @@ type eventProxy interface {
|
|||||||
consume(opcode byte, files []int, unmarshal func(v any)) error
|
consume(opcode byte, files []int, unmarshal func(v any)) error
|
||||||
// setBoundProps stores a [CoreBoundProps] event received from the server.
|
// setBoundProps stores a [CoreBoundProps] event received from the server.
|
||||||
setBoundProps(event *CoreBoundProps) error
|
setBoundProps(event *CoreBoundProps) error
|
||||||
|
// remove is called when the proxy is removed for any reason, usually from
|
||||||
|
// being targeted by a [PW_CORE_EVENT_REMOVE_ID] event.
|
||||||
|
remove() error
|
||||||
|
|
||||||
// Stringer returns the PipeWire interface name.
|
// Stringer returns the PipeWire interface name.
|
||||||
fmt.Stringer
|
fmt.Stringer
|
||||||
@@ -499,13 +611,21 @@ func (e DanglingFilesError) Error() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// An UnacknowledgedProxyError holds newly allocated proxy ids that the server failed
|
// An UnacknowledgedProxyError holds newly allocated proxy ids that the server failed
|
||||||
// to acknowledge after an otherwise successful [Context.Roundtrip].
|
// to acknowledge after an otherwise successful [Core.Sync].
|
||||||
type UnacknowledgedProxyError []Int
|
type UnacknowledgedProxyError []Int
|
||||||
|
|
||||||
func (e UnacknowledgedProxyError) Error() string {
|
func (e UnacknowledgedProxyError) Error() string {
|
||||||
return "server did not acknowledge " + strconv.Itoa(len(e)) + " proxies"
|
return "server did not acknowledge " + strconv.Itoa(len(e)) + " proxies"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// An UnacknowledgedProxyDestructionError holds destroyed proxy ids that the server failed
|
||||||
|
// to acknowledge after an otherwise successful [Core.Sync].
|
||||||
|
type UnacknowledgedProxyDestructionError []Int
|
||||||
|
|
||||||
|
func (e UnacknowledgedProxyDestructionError) Error() string {
|
||||||
|
return "server did not acknowledge " + strconv.Itoa(len(e)) + " proxy destructions"
|
||||||
|
}
|
||||||
|
|
||||||
// A ProxyFatalError describes an error that terminates event handling during a
|
// A ProxyFatalError describes an error that terminates event handling during a
|
||||||
// [Context.Roundtrip] and makes further event processing no longer possible.
|
// [Context.Roundtrip] and makes further event processing no longer possible.
|
||||||
type ProxyFatalError struct {
|
type ProxyFatalError struct {
|
||||||
@@ -580,8 +700,15 @@ func (ctx *Context) Roundtrip() (err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// roundtripTimeout is the maximum duration socket operations during
|
||||||
|
// Context.roundtrip is allowed to block for.
|
||||||
|
roundtripTimeout = 5 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
// roundtrip implements the Roundtrip method without checking proxyErrors.
|
// roundtrip implements the Roundtrip method without checking proxyErrors.
|
||||||
func (ctx *Context) roundtrip() (err error) {
|
func (ctx *Context) roundtrip() (err error) {
|
||||||
|
ctx.conn.MightBlock(roundtripTimeout)
|
||||||
if err = ctx.sendmsg(ctx.buf, ctx.pendingFiles...); err != nil {
|
if err = ctx.sendmsg(ctx.buf, ctx.pendingFiles...); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -615,6 +742,7 @@ func (ctx *Context) roundtrip() (err error) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
var remaining []byte
|
var remaining []byte
|
||||||
|
ctx.conn.MightBlock(roundtripTimeout)
|
||||||
for {
|
for {
|
||||||
remaining, err = ctx.consume(remaining)
|
remaining, err = ctx.consume(remaining)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -636,7 +764,7 @@ func (ctx *Context) roundtrip() (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// currentSeq returns the current sequence number.
|
// currentSeq returns the current sequence number.
|
||||||
// This must only be called from eventProxy.consume.
|
// This must only be called immediately after queueing a message.
|
||||||
func (ctx *Context) currentSeq() Int { return ctx.sequence - 1 }
|
func (ctx *Context) currentSeq() Int { return ctx.sequence - 1 }
|
||||||
|
|
||||||
// currentRemoteSeq returns the current remote sequence number.
|
// currentRemoteSeq returns the current remote sequence number.
|
||||||
@@ -786,6 +914,52 @@ func (ctx *Context) Close() (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// expectsCoreError returns a function that inspects an error value and
|
||||||
|
// returns the address of a [CoreError] if it is the only error present
|
||||||
|
// and targets the specified proxy and sequence.
|
||||||
|
//
|
||||||
|
// The behaviour of expectsCoreError is only correct for an empty buf
|
||||||
|
// prior to calling. If buf is not empty, [Core.Sync] is called, with
|
||||||
|
// its return value stored to the value pointed to by errP if not nil,
|
||||||
|
// and the function is not populated.
|
||||||
|
//
|
||||||
|
// The caller must queue a message and call [Core.Sync] immediately
|
||||||
|
// after calling expectsCoreError.
|
||||||
|
func (ctx *Context) expectsCoreError(id Int, errP *error) (asCoreError func() (coreError *CoreError)) {
|
||||||
|
if len(ctx.buf) > 0 {
|
||||||
|
if err := ctx.GetCore().Sync(); err != nil {
|
||||||
|
*errP = err
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sequence := ctx.sequence
|
||||||
|
return func() (coreError *CoreError) {
|
||||||
|
if proxyErrors, ok := (*errP).(ProxyConsumeError); !ok ||
|
||||||
|
len(proxyErrors) != 1 ||
|
||||||
|
!errors.As(proxyErrors[0], &coreError) ||
|
||||||
|
coreError == nil ||
|
||||||
|
coreError.ID != id ||
|
||||||
|
coreError.Sequence != sequence {
|
||||||
|
// do not return a non-matching CoreError
|
||||||
|
coreError = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A PermissionError describes an error emitted by the server when trying to
|
||||||
|
// perform an operation that the client has no permission for.
|
||||||
|
type PermissionError struct {
|
||||||
|
// The id of the resource (proxy if emitted by the client) that is in error.
|
||||||
|
ID Int `json:"id"`
|
||||||
|
// An error message.
|
||||||
|
Message string `json:"message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*PermissionError) Unwrap() error { return syscall.EPERM }
|
||||||
|
func (e *PermissionError) Error() string { return e.Message }
|
||||||
|
|
||||||
// Remote is the environment (sic) with the remote name.
|
// Remote is the environment (sic) with the remote name.
|
||||||
const Remote = "PIPEWIRE_REMOTE"
|
const Remote = "PIPEWIRE_REMOTE"
|
||||||
|
|
||||||
@@ -793,14 +967,14 @@ const Remote = "PIPEWIRE_REMOTE"
|
|||||||
|
|
||||||
const DEFAULT_SYSTEM_RUNTIME_DIR = "/run/pipewire"
|
const DEFAULT_SYSTEM_RUNTIME_DIR = "/run/pipewire"
|
||||||
|
|
||||||
// connectName connects to a PipeWire remote by name and returns the [net.UnixConn].
|
// connectName connects to a PipeWire remote by name and returns the resulting [Conn].
|
||||||
func connectName(name string, manager bool) (conn *net.UnixConn, err error) {
|
func connectName(name string, manager bool) (conn Conn, err error) {
|
||||||
if manager && !strings.HasSuffix(name, "-manager") {
|
if manager && !strings.HasSuffix(name, "-manager") {
|
||||||
return connectName(name+"-manager", false)
|
return connectName(name+"-manager", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
if path.IsAbs(name) || (len(name) > 0 && name[0] == '@') {
|
if path.IsAbs(name) || (len(name) > 0 && name[0] == '@') {
|
||||||
return net.DialUnix("unix", nil, &net.UnixAddr{Name: name, Net: "unix"})
|
return Dial(name)
|
||||||
} else {
|
} else {
|
||||||
runtimeDir, ok := os.LookupEnv("PIPEWIRE_RUNTIME_DIR")
|
runtimeDir, ok := os.LookupEnv("PIPEWIRE_RUNTIME_DIR")
|
||||||
if !ok || !path.IsAbs(runtimeDir) {
|
if !ok || !path.IsAbs(runtimeDir) {
|
||||||
@@ -815,7 +989,7 @@ func connectName(name string, manager bool) (conn *net.UnixConn, err error) {
|
|||||||
if !ok || !path.IsAbs(runtimeDir) {
|
if !ok || !path.IsAbs(runtimeDir) {
|
||||||
runtimeDir = DEFAULT_SYSTEM_RUNTIME_DIR
|
runtimeDir = DEFAULT_SYSTEM_RUNTIME_DIR
|
||||||
}
|
}
|
||||||
return net.DialUnix("unix", nil, &net.UnixAddr{Name: path.Join(runtimeDir, name), Net: "unix"})
|
return Dial(path.Join(runtimeDir, name))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -833,12 +1007,11 @@ func ConnectName(name string, manager bool, props SPADict) (ctx *Context, err er
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var conn *net.UnixConn
|
var conn Conn
|
||||||
if conn, err = connectName(name, manager); err != nil {
|
if conn, err = connectName(name, manager); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if ctx, err = New(conn, props); err != nil {
|
||||||
if ctx, err = New(SyscallConn{conn}, props); err != nil {
|
|
||||||
ctx = nil
|
ctx = nil
|
||||||
_ = conn.Close()
|
_ = conn.Close()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
. "syscall"
|
. "syscall"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"hakurei.app/container/stub"
|
"hakurei.app/container/stub"
|
||||||
"hakurei.app/internal/pipewire"
|
"hakurei.app/internal/pipewire"
|
||||||
@@ -680,9 +681,6 @@ func TestContext(t *testing.T) {
|
|||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("SecurityContext.Create: error = %v", err)
|
t.Fatalf("SecurityContext.Create: error = %v", err)
|
||||||
}
|
}
|
||||||
if err := ctx.GetCore().Sync(); err != nil {
|
|
||||||
t.Fatalf("Sync: error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// none of these should change
|
// none of these should change
|
||||||
if coreInfo := ctx.GetCore().Info; !reflect.DeepEqual(coreInfo, &wantCoreInfo0) {
|
if coreInfo := ctx.GetCore().Info; !reflect.DeepEqual(coreInfo, &wantCoreInfo0) {
|
||||||
@@ -718,6 +716,18 @@ type stubUnixConn struct {
|
|||||||
current int
|
current int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (conn *stubUnixConn) MightBlock(timeout time.Duration) {
|
||||||
|
if timeout != 5*time.Second {
|
||||||
|
panic("unexpected timeout " + timeout.String())
|
||||||
|
}
|
||||||
|
if conn.current == 0 ||
|
||||||
|
(conn.samples[conn.current-1].nr == SYS_RECVMSG && conn.samples[conn.current-1].errno == EAGAIN && conn.samples[conn.current].nr == SYS_SENDMSG) ||
|
||||||
|
(conn.samples[conn.current-1].nr == SYS_SENDMSG && conn.samples[conn.current].nr == SYS_RECVMSG) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
panic("unexpected blocking hint before sample " + strconv.Itoa(conn.current))
|
||||||
|
}
|
||||||
|
|
||||||
// nextSample returns the current sample and increments the counter.
|
// nextSample returns the current sample and increments the counter.
|
||||||
func (conn *stubUnixConn) nextSample(nr uintptr) (sample *stubUnixConnSample, wantOOB []byte, err error) {
|
func (conn *stubUnixConn) nextSample(nr uintptr) (sample *stubUnixConnSample, wantOOB []byte, err error) {
|
||||||
sample = &conn.samples[conn.current]
|
sample = &conn.samples[conn.current]
|
||||||
@@ -836,6 +846,7 @@ func TestContextErrors(t *testing.T) {
|
|||||||
|
|
||||||
{"UnexpectedFileCountError", &pipewire.UnexpectedFileCountError{0, -1}, "received -1 files instead of the expected 0"},
|
{"UnexpectedFileCountError", &pipewire.UnexpectedFileCountError{0, -1}, "received -1 files instead of the expected 0"},
|
||||||
{"UnacknowledgedProxyError", make(pipewire.UnacknowledgedProxyError, 1<<4), "server did not acknowledge 16 proxies"},
|
{"UnacknowledgedProxyError", make(pipewire.UnacknowledgedProxyError, 1<<4), "server did not acknowledge 16 proxies"},
|
||||||
|
{"UnacknowledgedProxyDestructionError", make(pipewire.UnacknowledgedProxyDestructionError, 1<<4), "server did not acknowledge 16 proxy destructions"},
|
||||||
{"DanglingFilesError", make(pipewire.DanglingFilesError, 1<<4), "received 16 dangling files"},
|
{"DanglingFilesError", make(pipewire.DanglingFilesError, 1<<4), "received 16 dangling files"},
|
||||||
{"UnexpectedFilesError", pipewire.UnexpectedFilesError(1 << 4), "server message headers claim to have sent more files than actually received"},
|
{"UnexpectedFilesError", pipewire.UnexpectedFilesError(1 << 4), "server message headers claim to have sent more files than actually received"},
|
||||||
{"UnexpectedSequenceError", pipewire.UnexpectedSequenceError(1 << 4), "unexpected seq 16"},
|
{"UnexpectedSequenceError", pipewire.UnexpectedSequenceError(1 << 4), "unexpected seq 16"},
|
||||||
@@ -862,6 +873,19 @@ func TestContextErrors(t *testing.T) {
|
|||||||
ID: 0xbad,
|
ID: 0xbad,
|
||||||
Sequence: 0xcafe,
|
Sequence: 0xcafe,
|
||||||
}, "received Core::Ping seq 51966 targeting unknown proxy id 2989"},
|
}, "received Core::Ping seq 51966 targeting unknown proxy id 2989"},
|
||||||
|
|
||||||
|
{"GlobalIDCollisionError", &pipewire.GlobalIDCollisionError{
|
||||||
|
ID: 0xbad,
|
||||||
|
Previous: &pipewire.RegistryGlobal{Type: "PipeWire:Interface:Invalid"},
|
||||||
|
Current: &pipewire.RegistryGlobal{Type: "PipeWire:Interface:NewInvalid"},
|
||||||
|
}, "new Registry::Global event for PipeWire:Interface:NewInvalid stepping on previous id 2989 for PipeWire:Interface:Invalid"},
|
||||||
|
|
||||||
|
{"UnknownGlobalIDRemoveError", pipewire.UnknownGlobalIDRemoveError(0xbad), "Registry::GlobalRemove event targets unknown id 2989"},
|
||||||
|
|
||||||
|
{"PermissionError", &pipewire.PermissionError{
|
||||||
|
ID: 2,
|
||||||
|
Message: "no permission to destroy 0",
|
||||||
|
}, "no permission to destroy 0"},
|
||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
|||||||
@@ -384,19 +384,16 @@ func unmarshalValue(data []byte, v reflect.Value, wireSizeP *Word) error {
|
|||||||
return nil
|
return nil
|
||||||
|
|
||||||
case reflect.Pointer:
|
case reflect.Pointer:
|
||||||
if len(data) < SizePrefix {
|
if ok, err := unmarshalHandleNone(&data, wireSizeP); err != nil {
|
||||||
return ErrEOFPrefix
|
return err
|
||||||
}
|
} else if ok {
|
||||||
switch SPAKind(binary.NativeEndian.Uint32(data[SizeSPrefix:])) {
|
|
||||||
case SPA_TYPE_None:
|
|
||||||
v.SetZero()
|
v.SetZero()
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
default:
|
|
||||||
v.Set(reflect.New(v.Type().Elem()))
|
|
||||||
return unmarshalValue(data, v.Elem(), wireSizeP)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
v.Set(reflect.New(v.Type().Elem()))
|
||||||
|
return unmarshalValue(data, v.Elem(), wireSizeP)
|
||||||
|
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
*wireSizeP = 0
|
*wireSizeP = 0
|
||||||
if err := unmarshalCheckTypeBounds(&data, SPA_TYPE_String, wireSizeP); err != nil {
|
if err := unmarshalCheckTypeBounds(&data, SPA_TYPE_String, wireSizeP); err != nil {
|
||||||
@@ -422,6 +419,29 @@ func unmarshalValue(data []byte, v reflect.Value, wireSizeP *Word) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// unmarshalHandleNone establishes prefix bounds and, for a value of type [SPA_TYPE_None],
|
||||||
|
// validates its size and skips the header. This is for unmarshalling values that can be nil.
|
||||||
|
func unmarshalHandleNone(data *[]byte, wireSizeP *Word) (bool, error) {
|
||||||
|
if len(*data) < SizePrefix {
|
||||||
|
return false, ErrEOFPrefix
|
||||||
|
}
|
||||||
|
|
||||||
|
if SPAKind(binary.NativeEndian.Uint32((*data)[SizeSPrefix:])) != SPA_TYPE_None {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
*wireSizeP = 0
|
||||||
|
if err := unmarshalCheckTypeBounds(data, SPA_TYPE_None, wireSizeP); err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(*data) != 0 {
|
||||||
|
return true, TrailingGarbageError(*data)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
// An InconsistentSizeError describes an inconsistent size prefix encountered
|
// An InconsistentSizeError describes an inconsistent size prefix encountered
|
||||||
// in data passed to [Unmarshal].
|
// in data passed to [Unmarshal].
|
||||||
type InconsistentSizeError struct{ Prefix, Expect Word }
|
type InconsistentSizeError struct{ Prefix, Expect Word }
|
||||||
|
|||||||
@@ -26,9 +26,10 @@ const (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
PW_SECURITY_CONTEXT_METHOD_ADD_LISTENER = iota
|
PW_SECURITY_CONTEXT_METHOD_ADD_LISTENER = iota
|
||||||
PW_SECURITY_CONTEXT_METHOD_CREATE
|
|
||||||
PW_SECURITY_CONTEXT_METHOD_NUM
|
|
||||||
|
|
||||||
|
PW_SECURITY_CONTEXT_METHOD_CREATE
|
||||||
|
|
||||||
|
PW_SECURITY_CONTEXT_METHOD_NUM
|
||||||
PW_VERSION_SECURITY_CONTEXT_METHODS = 0
|
PW_VERSION_SECURITY_CONTEXT_METHODS = 0
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -90,6 +91,8 @@ type SecurityContext struct {
|
|||||||
GlobalID Int `json:"id"`
|
GlobalID Int `json:"id"`
|
||||||
|
|
||||||
ctx *Context
|
ctx *Context
|
||||||
|
|
||||||
|
destructible
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSecurityContext queues a [RegistryBind] message for the PipeWire server
|
// GetSecurityContext queues a [RegistryBind] message for the PipeWire server
|
||||||
@@ -108,13 +111,39 @@ func (registry *Registry) GetSecurityContext() (securityContext *SecurityContext
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create queues a [SecurityContextCreate] message for the PipeWire server.
|
// Create queues a [SecurityContextCreate] message for the PipeWire server.
|
||||||
func (securityContext *SecurityContext) Create(listenFd, closeFd int, props SPADict) error {
|
func (securityContext *SecurityContext) Create(listenFd, closeFd int, props SPADict) (err error) {
|
||||||
|
if err = securityContext.checkDestroy(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
asCoreError := securityContext.ctx.expectsCoreError(securityContext.ID, &err)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// queued in reverse based on upstream behaviour, unsure why
|
// queued in reverse based on upstream behaviour, unsure why
|
||||||
offset := securityContext.ctx.queueFiles(closeFd, listenFd)
|
offset := securityContext.ctx.queueFiles(closeFd, listenFd)
|
||||||
return securityContext.ctx.writeMessage(
|
if err = securityContext.ctx.writeMessage(
|
||||||
securityContext.ID,
|
securityContext.ID,
|
||||||
&SecurityContextCreate{ListenFd: offset + 1, CloseFd: offset + 0, Properties: &props},
|
&SecurityContextCreate{ListenFd: offset + 1, CloseFd: offset + 0, Properties: &props},
|
||||||
)
|
); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = securityContext.ctx.GetCore().Sync(); err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if coreError := asCoreError(); coreError == nil {
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
switch syscall.Errno(-coreError.Result) {
|
||||||
|
case syscall.EPERM:
|
||||||
|
return &PermissionError{securityContext.ID, coreError.Message}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return coreError
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// securityContextCloser holds onto resources associated to the security context.
|
// securityContextCloser holds onto resources associated to the security context.
|
||||||
@@ -144,6 +173,9 @@ func (scc *securityContextCloser) Close() (err error) {
|
|||||||
// BindAndCreate binds a new socket to the specified pathname and pass it to Create.
|
// BindAndCreate binds a new socket to the specified pathname and pass it to Create.
|
||||||
// It returns an [io.Closer] corresponding to [SecurityContextCreate.CloseFd].
|
// It returns an [io.Closer] corresponding to [SecurityContextCreate.CloseFd].
|
||||||
func (securityContext *SecurityContext) BindAndCreate(pathname string, props SPADict) (io.Closer, error) {
|
func (securityContext *SecurityContext) BindAndCreate(pathname string, props SPADict) (io.Closer, error) {
|
||||||
|
if err := securityContext.checkDestroy(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
var scc securityContextCloser
|
var scc securityContextCloser
|
||||||
|
|
||||||
// ensure pathname is available
|
// ensure pathname is available
|
||||||
@@ -185,17 +217,19 @@ func (securityContext *SecurityContext) BindAndCreate(pathname string, props SPA
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (securityContext *SecurityContext) consume(opcode byte, files []int, _ func(v any)) error {
|
func (securityContext *SecurityContext) consume(opcode byte, files []int, _ func(v any)) error {
|
||||||
|
securityContext.mustCheckDestroy()
|
||||||
closeReceivedFiles(files...)
|
closeReceivedFiles(files...)
|
||||||
switch opcode {
|
switch opcode {
|
||||||
// SecurityContext does not receive any events
|
// SecurityContext does not receive any events
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return &UnsupportedOpcodeError{opcode, securityContext.String()}
|
panic(&UnsupportedOpcodeError{opcode, securityContext.String()})
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (securityContext *SecurityContext) setBoundProps(event *CoreBoundProps) error {
|
func (securityContext *SecurityContext) setBoundProps(event *CoreBoundProps) error {
|
||||||
|
securityContext.mustCheckDestroy()
|
||||||
if securityContext.ID != event.ID {
|
if securityContext.ID != event.ID {
|
||||||
return &InconsistentIdError{Proxy: securityContext, ID: securityContext.ID, ServerID: event.ID}
|
return &InconsistentIdError{Proxy: securityContext, ID: securityContext.ID, ServerID: event.ID}
|
||||||
}
|
}
|
||||||
@@ -205,4 +239,9 @@ func (securityContext *SecurityContext) setBoundProps(event *CoreBoundProps) err
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Destroy destroys this [SecurityContext] proxy.
|
||||||
|
func (securityContext *SecurityContext) Destroy() error {
|
||||||
|
return securityContext.destroy(securityContext.ctx, securityContext.ID)
|
||||||
|
}
|
||||||
|
|
||||||
func (securityContext *SecurityContext) String() string { return PW_TYPE_INTERFACE_SecurityContext }
|
func (securityContext *SecurityContext) String() string { return PW_TYPE_INTERFACE_SecurityContext }
|
||||||
|
|||||||
203
internal/pkg/dir.go
Normal file
203
internal/pkg/dir.go
Normal file
@@ -0,0 +1,203 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha512"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FlatEntry is a directory entry to be encoded for [Flatten].
|
||||||
|
type FlatEntry struct {
|
||||||
|
Mode fs.FileMode // file mode bits
|
||||||
|
Path string // pathname of the file
|
||||||
|
Data []byte // file content or symlink destination
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
| mode uint32 | path_sz uint32 |
|
||||||
|
| data_sz uint64 |
|
||||||
|
| path string |
|
||||||
|
| data []byte |
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Encode encodes the entry for transmission or hashing.
|
||||||
|
func (ent *FlatEntry) Encode(w io.Writer) (n int, err error) {
|
||||||
|
pPathSize := alignSize(len(ent.Path))
|
||||||
|
if pPathSize > math.MaxUint32 {
|
||||||
|
return 0, syscall.E2BIG
|
||||||
|
}
|
||||||
|
pDataSize := alignSize(len(ent.Data))
|
||||||
|
|
||||||
|
payload := make([]byte, wordSize*2+pPathSize+pDataSize)
|
||||||
|
binary.LittleEndian.PutUint32(payload, uint32(ent.Mode))
|
||||||
|
binary.LittleEndian.PutUint32(payload[wordSize/2:], uint32(len(ent.Path)))
|
||||||
|
binary.LittleEndian.PutUint64(payload[wordSize:], uint64(len(ent.Data)))
|
||||||
|
copy(payload[wordSize*2:], ent.Path)
|
||||||
|
copy(payload[wordSize*2+pPathSize:], ent.Data)
|
||||||
|
return w.Write(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrInsecurePath is returned by [FlatEntry.Decode] if validation is requested
|
||||||
|
// and a nonlocal path is encountered in the stream.
|
||||||
|
var ErrInsecurePath = errors.New("insecure file path")
|
||||||
|
|
||||||
|
// Decode decodes the entry from its representation produced by Encode.
|
||||||
|
func (ent *FlatEntry) Decode(r io.Reader, validate bool) (n int, err error) {
|
||||||
|
var nr int
|
||||||
|
|
||||||
|
header := make([]byte, wordSize*2)
|
||||||
|
nr, err = r.Read(header)
|
||||||
|
n += nr
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) && n != 0 {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ent.Mode = fs.FileMode(binary.LittleEndian.Uint32(header))
|
||||||
|
pathSize := int(binary.LittleEndian.Uint32(header[wordSize/2:]))
|
||||||
|
pPathSize := alignSize(pathSize)
|
||||||
|
dataSize := int(binary.LittleEndian.Uint64(header[wordSize:]))
|
||||||
|
pDataSize := alignSize(dataSize)
|
||||||
|
|
||||||
|
buf := make([]byte, pPathSize+pDataSize)
|
||||||
|
nr, err = r.Read(buf)
|
||||||
|
n += nr
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
if nr != len(buf) {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ent.Path = string(buf[:pathSize])
|
||||||
|
if ent.Mode.IsDir() {
|
||||||
|
ent.Data = nil
|
||||||
|
} else {
|
||||||
|
ent.Data = buf[pPathSize : pPathSize+dataSize]
|
||||||
|
}
|
||||||
|
|
||||||
|
if validate && !filepath.IsLocal(ent.Path) {
|
||||||
|
err = ErrInsecurePath
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirScanner provides an efficient interface for reading a stream of encoded
|
||||||
|
// [FlatEntry]. Successive calls to the Scan method will step through the
|
||||||
|
// entries in the stream.
|
||||||
|
type DirScanner struct {
|
||||||
|
// Underlying reader to scan [FlatEntry] representations from.
|
||||||
|
r io.Reader
|
||||||
|
|
||||||
|
// First non-EOF I/O error, returned by the Err method.
|
||||||
|
err error
|
||||||
|
|
||||||
|
// Entry to store results in. Its address is returned by the Entry method
|
||||||
|
// and is updated on every call to Scan.
|
||||||
|
ent FlatEntry
|
||||||
|
|
||||||
|
// Validate pathnames during decoding.
|
||||||
|
validate bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDirScanner returns the address of a new instance of [DirScanner] reading
|
||||||
|
// from r. The caller must no longer read from r after this function returns.
|
||||||
|
func NewDirScanner(r io.Reader, validate bool) *DirScanner {
|
||||||
|
return &DirScanner{r: r, validate: validate}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err returns the first non-EOF I/O error.
|
||||||
|
func (s *DirScanner) Err() error {
|
||||||
|
if errors.Is(s.err, io.EOF) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return s.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Entry returns the address to the [FlatEntry] value storing the last result.
|
||||||
|
func (s *DirScanner) Entry() *FlatEntry { return &s.ent }
|
||||||
|
|
||||||
|
// Scan advances to the next [FlatEntry].
|
||||||
|
func (s *DirScanner) Scan() bool {
|
||||||
|
if s.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var n int
|
||||||
|
n, s.err = s.ent.Decode(s.r, s.validate)
|
||||||
|
if errors.Is(s.err, io.EOF) {
|
||||||
|
return n != 0
|
||||||
|
}
|
||||||
|
return s.err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flatten writes a deterministic representation of the contents of fsys to w.
|
||||||
|
// The resulting data can be hashed to produce a deterministic checksum for the
|
||||||
|
// directory.
|
||||||
|
func Flatten(fsys fs.FS, root string, w io.Writer) (n int, err error) {
|
||||||
|
var nr int
|
||||||
|
err = fs.WalkDir(fsys, root, func(path string, d fs.DirEntry, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var fi fs.FileInfo
|
||||||
|
fi, err = d.Info()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ent := FlatEntry{
|
||||||
|
Path: path,
|
||||||
|
Mode: fi.Mode(),
|
||||||
|
}
|
||||||
|
if ent.Mode.IsRegular() {
|
||||||
|
if ent.Data, err = fs.ReadFile(fsys, path); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else if ent.Mode&fs.ModeSymlink != 0 {
|
||||||
|
var newpath string
|
||||||
|
if newpath, err = fs.ReadLink(fsys, path); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ent.Data = []byte(newpath)
|
||||||
|
} else if !ent.Mode.IsDir() {
|
||||||
|
return InvalidFileModeError(ent.Mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
nr, err = ent.Encode(w)
|
||||||
|
n += nr
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashFS returns a checksum produced by hashing the result of [Flatten].
|
||||||
|
func HashFS(buf *Checksum, fsys fs.FS, root string) error {
|
||||||
|
h := sha512.New384()
|
||||||
|
if _, err := Flatten(fsys, root, h); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.Sum(buf[:0])
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashDir returns a checksum produced by hashing the result of [Flatten].
|
||||||
|
func HashDir(buf *Checksum, pathname *check.Absolute) error {
|
||||||
|
return HashFS(buf, os.DirFS(pathname.String()), ".")
|
||||||
|
}
|
||||||
570
internal/pkg/dir_test.go
Normal file
570
internal/pkg/dir_test.go
Normal file
@@ -0,0 +1,570 @@
|
|||||||
|
package pkg_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io/fs"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"testing/fstest"
|
||||||
|
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFlatten(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
fsys fs.FS
|
||||||
|
entries []pkg.FlatEntry
|
||||||
|
sum pkg.Checksum
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{"bad type", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
"invalid": {Mode: fs.ModeCharDevice | 0400},
|
||||||
|
}, nil, pkg.Checksum{}, pkg.InvalidFileModeError(
|
||||||
|
fs.ModeCharDevice | 0400,
|
||||||
|
)},
|
||||||
|
|
||||||
|
{"empty", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C"), nil},
|
||||||
|
|
||||||
|
{"sample cache file", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: 0400, Data: []byte{0}},
|
||||||
|
"checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq": {Mode: 0400, Data: []byte{0, 0, 0, 0, 0xad, 0xb, 0, 4, 0xfe, 0xfe, 0, 0, 0xfe, 0xca, 0, 0}},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||||
|
"identifier/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||||
|
"identifier/cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||||
|
"identifier/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||||
|
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: 0400, Path: "checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq", Data: []byte{0, 0, 0, 0, 0xad, 0xb, 0, 4, 0xfe, 0xfe, 0, 0, 0xfe, 0xca, 0, 0}},
|
||||||
|
{Mode: 0400, Path: "checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte{0}},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("St9rlE-mGZ5gXwiv_hzQ_B8bZP-UUvSNmf4nHUZzCMOumb6hKnheZSe0dmnuc4Q2"), nil},
|
||||||
|
|
||||||
|
{"sample http get cure", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU": {Mode: 0400, Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/oM-2pUlk-mOxK1t3aMWZer69UdOQlAXiAgMrpZ1476VoOqpYVP1aGFS9_HYy-D8_": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
|
||||||
|
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: 0400, Path: "checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU", Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/oM-2pUlk-mOxK1t3aMWZer69UdOQlAXiAgMrpZ1476VoOqpYVP1aGFS9_HYy-D8_", Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("L_0RFHpr9JUS4Zp14rz2dESSRvfLzpvqsLhR1-YjQt8hYlmEdVl7vI3_-v8UNPKs"), nil},
|
||||||
|
|
||||||
|
{"sample directory step simple", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"check": {Mode: 0400, Data: []byte{0, 0}},
|
||||||
|
|
||||||
|
"lib": {Mode: fs.ModeDir | 0700},
|
||||||
|
"lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
|
||||||
|
"lib/pkgconfig": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||||
|
|
||||||
|
{Mode: 0400, Path: "check", Data: []byte{0, 0}},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "lib"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "lib/pkgconfig"},
|
||||||
|
}, pkg.MustDecode("qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b"), nil},
|
||||||
|
|
||||||
|
{"sample directory step garbage", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"lib": {Mode: fs.ModeDir | 0500},
|
||||||
|
"lib/check": {Mode: 0400, Data: []byte{}},
|
||||||
|
|
||||||
|
"lib/pkgconfig": {Mode: fs.ModeDir | 0500},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "lib"},
|
||||||
|
{Mode: 0400, Path: "lib/check", Data: []byte{}},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "lib/pkgconfig"},
|
||||||
|
}, pkg.MustDecode("CUx-3hSbTWPsbMfDhgalG4Ni_GmR9TnVX8F99tY_P5GtkYvczg9RrF5zO0jX9XYT"), nil},
|
||||||
|
|
||||||
|
{"sample directory", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/check": {Mode: 0400, Data: []byte{0, 0}},
|
||||||
|
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/pkgconfig": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
|
||||||
|
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
|
||||||
|
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b"},
|
||||||
|
{Mode: 0400, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/check", Data: []byte{0, 0}},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/pkgconfig"},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("WVpvsVqVKg9Nsh744x57h51AuWUoUR2nnh8Md-EYBQpk6ziyTuUn6PLtF2e0Eu_d"), nil},
|
||||||
|
|
||||||
|
{"sample tar step unpack", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0500},
|
||||||
|
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
|
||||||
|
"work": {Mode: fs.ModeDir | 0500},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP"},
|
||||||
|
{Mode: 0400, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check", Data: []byte{0, 0}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig"},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "work"},
|
||||||
|
}, pkg.MustDecode("cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM"), nil},
|
||||||
|
|
||||||
|
{"sample tar", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||||
|
"identifier/rg7F1D5hwv6o4xctjD5zDq4i5MD0mArTsUIWfhUbik8xC6Bsyt3mjXXOm3goojTz": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||||
|
|
||||||
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP"},
|
||||||
|
{Mode: 0400, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check", Data: []byte{0, 0}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work"},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/rg7F1D5hwv6o4xctjD5zDq4i5MD0mArTsUIWfhUbik8xC6Bsyt3mjXXOm3goojTz", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("NQTlc466JmSVLIyWklm_u8_g95jEEb98PxJU-kjwxLpfdjwMWJq0G8ze9R4Vo1Vu"), nil},
|
||||||
|
|
||||||
|
{"sample tar expand step unpack", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
}, pkg.MustDecode("CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN"), nil},
|
||||||
|
|
||||||
|
{"sample tar expand", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||||
|
"identifier/_v1blm2h-_KA-dVaawdpLas6MjHc6rbhhFS8JWwx8iJxZGUu8EBbRrhr5AaZ9PJL": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||||
|
|
||||||
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_v1blm2h-_KA-dVaawdpLas6MjHc6rbhhFS8JWwx8iJxZGUu8EBbRrhr5AaZ9PJL", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("hSoSSgCYTNonX3Q8FjvjD1fBl-E-BQyA6OTXro2OadXqbST4tZ-akGXszdeqphRe"), nil},
|
||||||
|
|
||||||
|
{"testtool", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"check": {Mode: 0400, Data: []byte{0}},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||||
|
|
||||||
|
{Mode: 0400, Path: "check", Data: []byte{0}},
|
||||||
|
}, pkg.MustDecode("GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"), nil},
|
||||||
|
|
||||||
|
{"sample exec container", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||||
|
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
|
"identifier/dztPS6jRjiZtCF4_p8AzfnxGp6obkhrgFVsxdodbKWUoAEVtDz3MykepJB4kI_ks": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||||
|
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||||
|
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/dztPS6jRjiZtCF4_p8AzfnxGp6obkhrgFVsxdodbKWUoAEVtDz3MykepJB4kI_ks", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("Q5DluWQCAeohLoiGRImurwFp3vdz9IfQCoj7Fuhh73s4KQPRHpEQEnHTdNHmB8Fx"), nil},
|
||||||
|
|
||||||
|
{"testtool net", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"check": {Mode: 0400, Data: []byte("net")},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||||
|
|
||||||
|
{Mode: 0400, Path: "check", Data: []byte("net")},
|
||||||
|
}, pkg.MustDecode("a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W"), nil},
|
||||||
|
|
||||||
|
{"sample exec net container", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
|
||||||
|
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check": {Mode: 0400, Data: []byte("net")},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/G8qPxD9puvvoOVV7lrT80eyDeIl3G_CCFoKw12c8mCjMdG1zF7NEPkwYpNubClK3": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
|
||||||
|
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
|
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||||
|
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W"},
|
||||||
|
{Mode: 0400, Path: "checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check", Data: []byte("net")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/G8qPxD9puvvoOVV7lrT80eyDeIl3G_CCFoKw12c8mCjMdG1zF7NEPkwYpNubClK3", Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("bPYvvqxpfV7xcC1EptqyKNK1klLJgYHMDkzBcoOyK6j_Aj5hb0mXNPwTwPSK5F6Z"), nil},
|
||||||
|
|
||||||
|
{"sample exec container overlay root", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||||
|
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/RdMA-mubnrHuu3Ky1wWyxauSYCO0ZH_zCPUj3uDHqkfwv5sGcByoF_g5PjlGiClb": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||||
|
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/RdMA-mubnrHuu3Ky1wWyxauSYCO0ZH_zCPUj3uDHqkfwv5sGcByoF_g5PjlGiClb", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("PO2DSSCa4yoSgEYRcCSZfQfwow1yRigL3Ry-hI0RDI4aGuFBha-EfXeSJnG_5_Rl"), nil},
|
||||||
|
|
||||||
|
{"sample exec container overlay work", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||||
|
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/5hlaukCirnXE4W_RSLJFOZN47Z5RiHnacXzdFp_70cLgiJUGR6cSb_HaFftkzi0-": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||||
|
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/5hlaukCirnXE4W_RSLJFOZN47Z5RiHnacXzdFp_70cLgiJUGR6cSb_HaFftkzi0-", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("iaRt6l_Wm2n-h5UsDewZxQkCmjZjyL8r7wv32QT2kyV55-Lx09Dq4gfg9BiwPnKs"), nil},
|
||||||
|
|
||||||
|
{"sample exec container multiple layers", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||||
|
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
|
||||||
|
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check": {Mode: 0400, Data: []byte("layers")},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
|
"identifier/B-kc5iJMx8GtlCua4dz6BiJHnDAOUfPjgpbKq4e-QEn0_CZkSYs3fOA1ve06qMs2": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
|
||||||
|
"identifier/p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||||
|
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||||
|
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK"},
|
||||||
|
{Mode: 0400, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check", Data: []byte("layers")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/B-kc5iJMx8GtlCua4dz6BiJHnDAOUfPjgpbKq4e-QEn0_CZkSYs3fOA1ve06qMs2", Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("O2YzyR7IUGU5J2CADy0hUZ3A5NkP_Vwzs4UadEdn2oMZZVWRtH0xZGJ3HXiimTnZ"), nil},
|
||||||
|
|
||||||
|
{"sample exec container layer promotion", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||||
|
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/kvJIqZo5DKFOxC2ZQ-8_nPaQzEAz9cIm3p6guO-uLqm-xaiPu7oRkSnsu411jd_U": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
"identifier/xXTIYcXmgJWNLC91c417RRrNM9cjELwEZHpGvf8Fk_GNP5agRJp_SicD0w9aMeLJ": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
|
||||||
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||||
|
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/kvJIqZo5DKFOxC2ZQ-8_nPaQzEAz9cIm3p6guO-uLqm-xaiPu7oRkSnsu411jd_U", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/xXTIYcXmgJWNLC91c417RRrNM9cjELwEZHpGvf8Fk_GNP5agRJp_SicD0w9aMeLJ", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("3EaW6WibLi9gl03_UieiFPaFcPy5p4x3JPxrnLJxGaTI-bh3HU9DK9IMx7c3rrNm"), nil},
|
||||||
|
|
||||||
|
{"sample file short", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: 0400, Data: []byte{0}},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||||
|
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: 0400, Path: "checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte{0}},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi", Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("iR6H5OIsyOW4EwEgtm9rGzGF6DVtyHLySEtwnFE8bnus9VJcoCbR4JIek7Lw-vwT"), nil},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
t.Run("roundtrip", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if _, err := pkg.Flatten(
|
||||||
|
tc.fsys,
|
||||||
|
".",
|
||||||
|
&buf,
|
||||||
|
); !reflect.DeepEqual(err, tc.err) {
|
||||||
|
t.Fatalf("Flatten: error = %v, want %v", err, tc.err)
|
||||||
|
} else if tc.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s := pkg.NewDirScanner(bytes.NewReader(buf.Bytes()), true)
|
||||||
|
var got []pkg.FlatEntry
|
||||||
|
for s.Scan() {
|
||||||
|
got = append(got, *s.Entry())
|
||||||
|
}
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
t.Fatalf("Err: error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(got, tc.entries) {
|
||||||
|
t.Fatalf("Scan: %#v, want %#v", got, tc.entries)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
if tc.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("hash", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var got pkg.Checksum
|
||||||
|
if err := pkg.HashFS(&got, tc.fsys, "."); err != nil {
|
||||||
|
t.Fatalf("HashFS: error = %v", err)
|
||||||
|
} else if got != tc.sum {
|
||||||
|
t.Fatalf("HashFS: %v", &pkg.ChecksumMismatchError{
|
||||||
|
Got: got,
|
||||||
|
Want: tc.sum,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
499
internal/pkg/exec.go
Normal file
499
internal/pkg/exec.go
Normal file
@@ -0,0 +1,499 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
"unique"
|
||||||
|
|
||||||
|
"hakurei.app/container"
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
|
"hakurei.app/container/seccomp"
|
||||||
|
"hakurei.app/container/std"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AbsWork is the container pathname [CureContext.GetWorkDir] is mounted on.
|
||||||
|
var AbsWork = fhs.AbsRoot.Append("work/")
|
||||||
|
|
||||||
|
// ExecPath is a slice of [Artifact] and the [check.Absolute] pathname to make
|
||||||
|
// it available at under in the container.
|
||||||
|
type ExecPath struct {
|
||||||
|
// Pathname in the container mount namespace.
|
||||||
|
P *check.Absolute
|
||||||
|
// Artifacts to mount on the pathname, must contain at least one [Artifact].
|
||||||
|
// If there are multiple entries or W is true, P is set up as an overlay
|
||||||
|
// mount, and entries of A must not implement [FileArtifact].
|
||||||
|
A []Artifact
|
||||||
|
// Whether to make the mount point writable via the temp directory.
|
||||||
|
W bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// layers returns pathnames collected from A deduplicated by checksum.
|
||||||
|
func (p *ExecPath) layers(f *FContext) []*check.Absolute {
|
||||||
|
msg := f.GetMessage()
|
||||||
|
|
||||||
|
layers := make([]*check.Absolute, 0, len(p.A))
|
||||||
|
checksums := make(map[unique.Handle[Checksum]]struct{}, len(p.A))
|
||||||
|
for i := range p.A {
|
||||||
|
d := p.A[len(p.A)-1-i]
|
||||||
|
pathname, checksum := f.GetArtifact(d)
|
||||||
|
if _, ok := checksums[checksum]; ok {
|
||||||
|
if msg.IsVerbose() {
|
||||||
|
msg.Verbosef(
|
||||||
|
"promoted layer %d as %s",
|
||||||
|
len(p.A)-1-i, reportName(d, f.cache.Ident(d)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
checksums[checksum] = struct{}{}
|
||||||
|
layers = append(layers, pathname)
|
||||||
|
}
|
||||||
|
slices.Reverse(layers)
|
||||||
|
return layers
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path returns a populated [ExecPath].
|
||||||
|
func Path(pathname *check.Absolute, writable bool, a ...Artifact) ExecPath {
|
||||||
|
return ExecPath{pathname, a, writable}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustPath is like [Path], but takes a string pathname via [check.MustAbs].
|
||||||
|
func MustPath(pathname string, writable bool, a ...Artifact) ExecPath {
|
||||||
|
return ExecPath{check.MustAbs(pathname), a, writable}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ExecTimeoutDefault replaces out of range [NewExec] timeout values.
|
||||||
|
ExecTimeoutDefault = 15 * time.Minute
|
||||||
|
// ExecTimeoutMax is the arbitrary upper bound of [NewExec] timeout.
|
||||||
|
ExecTimeoutMax = 48 * time.Hour
|
||||||
|
)
|
||||||
|
|
||||||
|
// An execArtifact is an [Artifact] that produces output by running a program
|
||||||
|
// part of another [Artifact] in a [container] to produce its output.
|
||||||
|
//
|
||||||
|
// Methods of execArtifact does not modify any struct field or underlying arrays
|
||||||
|
// referred to by slices.
|
||||||
|
type execArtifact struct {
|
||||||
|
// Caller-supplied user-facing reporting name, guaranteed to be nonzero
|
||||||
|
// during initialisation.
|
||||||
|
name string
|
||||||
|
// Caller-supplied inner mount points.
|
||||||
|
paths []ExecPath
|
||||||
|
|
||||||
|
// Passed through to [container.Params].
|
||||||
|
dir *check.Absolute
|
||||||
|
// Passed through to [container.Params].
|
||||||
|
env []string
|
||||||
|
// Passed through to [container.Params].
|
||||||
|
path *check.Absolute
|
||||||
|
// Passed through to [container.Params].
|
||||||
|
args []string
|
||||||
|
|
||||||
|
// Duration the initial process is allowed to run. The zero value is
|
||||||
|
// equivalent to [ExecTimeoutDefault].
|
||||||
|
timeout time.Duration
|
||||||
|
|
||||||
|
// Caller-supplied exclusivity value, returned as is by IsExclusive.
|
||||||
|
exclusive bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ fmt.Stringer = new(execArtifact)
|
||||||
|
|
||||||
|
// execNetArtifact is like execArtifact but implements [KnownChecksum] and has
|
||||||
|
// its resulting container keep the host net namespace.
|
||||||
|
type execNetArtifact struct {
|
||||||
|
checksum Checksum
|
||||||
|
|
||||||
|
execArtifact
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ KnownChecksum = new(execNetArtifact)
|
||||||
|
|
||||||
|
// Checksum returns the caller-supplied checksum.
|
||||||
|
func (a *execNetArtifact) Checksum() Checksum { return a.checksum }
|
||||||
|
|
||||||
|
// Kind returns the hardcoded [Kind] constant.
|
||||||
|
func (*execNetArtifact) Kind() Kind { return KindExecNet }
|
||||||
|
|
||||||
|
// Cure cures the [Artifact] in the container described by the caller. The
|
||||||
|
// container retains host networking.
|
||||||
|
func (a *execNetArtifact) Cure(f *FContext) error {
|
||||||
|
return a.cure(f, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewExec returns a new [Artifact] that executes the program path in a
|
||||||
|
// container with specified paths bind mounted read-only in order. A private
|
||||||
|
// instance of /proc and /dev is made available to the container.
|
||||||
|
//
|
||||||
|
// The working and temporary directories are both created and mounted writable
|
||||||
|
// on [AbsWork] and [fhs.AbsTmp] respectively. If one or more paths target
|
||||||
|
// [AbsWork], the final entry is set up as a writable overlay mount on /work for
|
||||||
|
// which the upperdir is the host side work directory. In this configuration,
|
||||||
|
// the W field is ignored, and the program must avoid causing whiteout files to
|
||||||
|
// be created. Cure fails if upperdir ends up with entries other than directory,
|
||||||
|
// regular or symlink.
|
||||||
|
//
|
||||||
|
// If checksum is non-nil, the resulting [Artifact] implements [KnownChecksum]
|
||||||
|
// and its container runs in the host net namespace.
|
||||||
|
//
|
||||||
|
// The container is allowed to run for the specified duration before the initial
|
||||||
|
// process and all processes originating from it is terminated. A zero or
|
||||||
|
// negative timeout value is equivalent tp [ExecTimeoutDefault], a timeout value
|
||||||
|
// greater than [ExecTimeoutMax] is equivalent to [ExecTimeoutMax].
|
||||||
|
//
|
||||||
|
// The user-facing name and exclusivity value are not accessible from the
|
||||||
|
// container and does not affect curing outcome. Because of this, it is omitted
|
||||||
|
// from parameter data for computing identifier.
|
||||||
|
func NewExec(
|
||||||
|
name string,
|
||||||
|
checksum *Checksum,
|
||||||
|
timeout time.Duration,
|
||||||
|
exclusive bool,
|
||||||
|
|
||||||
|
dir *check.Absolute,
|
||||||
|
env []string,
|
||||||
|
pathname *check.Absolute,
|
||||||
|
args []string,
|
||||||
|
|
||||||
|
paths ...ExecPath,
|
||||||
|
) Artifact {
|
||||||
|
if name == "" {
|
||||||
|
name = "exec-" + path.Base(pathname.String())
|
||||||
|
}
|
||||||
|
if timeout <= 0 {
|
||||||
|
timeout = ExecTimeoutDefault
|
||||||
|
}
|
||||||
|
if timeout > ExecTimeoutMax {
|
||||||
|
timeout = ExecTimeoutMax
|
||||||
|
}
|
||||||
|
a := execArtifact{name, paths, dir, env, pathname, args, timeout, exclusive}
|
||||||
|
if checksum == nil {
|
||||||
|
return &a
|
||||||
|
}
|
||||||
|
return &execNetArtifact{*checksum, a}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kind returns the hardcoded [Kind] constant.
|
||||||
|
func (*execArtifact) Kind() Kind { return KindExec }
|
||||||
|
|
||||||
|
// Params writes paths, executable pathname and args.
|
||||||
|
func (a *execArtifact) Params(ctx *IContext) {
|
||||||
|
ctx.WriteString(a.name)
|
||||||
|
|
||||||
|
ctx.WriteUint32(uint32(len(a.paths)))
|
||||||
|
for _, p := range a.paths {
|
||||||
|
if p.P != nil {
|
||||||
|
ctx.WriteString(p.P.String())
|
||||||
|
} else {
|
||||||
|
ctx.WriteString("invalid P\x00")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.WriteUint32(uint32(len(p.A)))
|
||||||
|
for _, d := range p.A {
|
||||||
|
ctx.WriteIdent(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.W {
|
||||||
|
ctx.WriteUint32(1)
|
||||||
|
} else {
|
||||||
|
ctx.WriteUint32(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.WriteString(a.dir.String())
|
||||||
|
|
||||||
|
ctx.WriteUint32(uint32(len(a.env)))
|
||||||
|
for _, e := range a.env {
|
||||||
|
ctx.WriteString(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.WriteString(a.path.String())
|
||||||
|
|
||||||
|
ctx.WriteUint32(uint32(len(a.args)))
|
||||||
|
for _, arg := range a.args {
|
||||||
|
ctx.WriteString(arg)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.WriteUint32(uint32(a.timeout & 0xffffffff))
|
||||||
|
ctx.WriteUint32(uint32(a.timeout >> 32))
|
||||||
|
|
||||||
|
if a.exclusive {
|
||||||
|
ctx.WriteUint32(1)
|
||||||
|
} else {
|
||||||
|
ctx.WriteUint32(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// readExecArtifact interprets IR values and returns the address of execArtifact
|
||||||
|
// or execNetArtifact.
|
||||||
|
func readExecArtifact(r *IRReader, net bool) Artifact {
|
||||||
|
r.DiscardAll()
|
||||||
|
|
||||||
|
name := r.ReadString()
|
||||||
|
|
||||||
|
sz := r.ReadUint32()
|
||||||
|
if sz > irMaxDeps {
|
||||||
|
panic(ErrIRDepend)
|
||||||
|
}
|
||||||
|
paths := make([]ExecPath, sz)
|
||||||
|
for i := range paths {
|
||||||
|
paths[i].P = check.MustAbs(r.ReadString())
|
||||||
|
|
||||||
|
sz = r.ReadUint32()
|
||||||
|
if sz > irMaxDeps {
|
||||||
|
panic(ErrIRDepend)
|
||||||
|
}
|
||||||
|
paths[i].A = make([]Artifact, sz)
|
||||||
|
for j := range paths[i].A {
|
||||||
|
paths[i].A[j] = r.ReadIdent()
|
||||||
|
}
|
||||||
|
|
||||||
|
paths[i].W = r.ReadUint32() != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := check.MustAbs(r.ReadString())
|
||||||
|
|
||||||
|
sz = r.ReadUint32()
|
||||||
|
if sz > irMaxValues {
|
||||||
|
panic(ErrIRValues)
|
||||||
|
}
|
||||||
|
env := make([]string, sz)
|
||||||
|
for i := range env {
|
||||||
|
env[i] = r.ReadString()
|
||||||
|
}
|
||||||
|
|
||||||
|
pathname := check.MustAbs(r.ReadString())
|
||||||
|
|
||||||
|
sz = r.ReadUint32()
|
||||||
|
if sz > irMaxValues {
|
||||||
|
panic(ErrIRValues)
|
||||||
|
}
|
||||||
|
args := make([]string, sz)
|
||||||
|
for i := range args {
|
||||||
|
args[i] = r.ReadString()
|
||||||
|
}
|
||||||
|
|
||||||
|
timeout := time.Duration(r.ReadUint32())
|
||||||
|
timeout |= time.Duration(r.ReadUint32()) << 32
|
||||||
|
|
||||||
|
exclusive := r.ReadUint32() != 0
|
||||||
|
|
||||||
|
checksum, ok := r.Finalise()
|
||||||
|
|
||||||
|
var checksumP *Checksum
|
||||||
|
if net {
|
||||||
|
if !ok {
|
||||||
|
panic(ErrExpectedChecksum)
|
||||||
|
}
|
||||||
|
checksumVal := checksum.Value()
|
||||||
|
checksumP = &checksumVal
|
||||||
|
} else {
|
||||||
|
if ok {
|
||||||
|
panic(ErrUnexpectedChecksum)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewExec(
|
||||||
|
name, checksumP, timeout, exclusive, dir, env, pathname, args, paths...,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
register(KindExec,
|
||||||
|
func(r *IRReader) Artifact { return readExecArtifact(r, false) })
|
||||||
|
register(KindExecNet,
|
||||||
|
func(r *IRReader) Artifact { return readExecArtifact(r, true) })
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dependencies returns a slice of all artifacts collected from caller-supplied
|
||||||
|
// [ExecPath].
|
||||||
|
func (a *execArtifact) Dependencies() []Artifact {
|
||||||
|
artifacts := make([][]Artifact, 0, len(a.paths))
|
||||||
|
for _, p := range a.paths {
|
||||||
|
artifacts = append(artifacts, p.A)
|
||||||
|
}
|
||||||
|
return slices.Concat(artifacts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExclusive returns the caller-supplied exclusivity value.
|
||||||
|
func (a *execArtifact) IsExclusive() bool { return a.exclusive }
|
||||||
|
|
||||||
|
// String returns the caller-supplied reporting name.
|
||||||
|
func (a *execArtifact) String() string { return a.name }
|
||||||
|
|
||||||
|
// Cure cures the [Artifact] in the container described by the caller.
|
||||||
|
func (a *execArtifact) Cure(f *FContext) (err error) {
|
||||||
|
return a.cure(f, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// execWaitDelay is passed through to [container.Params].
|
||||||
|
execWaitDelay = time.Nanosecond
|
||||||
|
)
|
||||||
|
|
||||||
|
// scanVerbose prefixes program output for a verbose [message.Msg].
|
||||||
|
func scanVerbose(
|
||||||
|
msg message.Msg,
|
||||||
|
done chan<- struct{},
|
||||||
|
prefix string,
|
||||||
|
r io.Reader,
|
||||||
|
) {
|
||||||
|
defer close(done)
|
||||||
|
s := bufio.NewScanner(r)
|
||||||
|
s.Buffer(
|
||||||
|
make([]byte, bufio.MaxScanTokenSize),
|
||||||
|
bufio.MaxScanTokenSize<<12,
|
||||||
|
)
|
||||||
|
for s.Scan() {
|
||||||
|
msg.Verbose(prefix, s.Text())
|
||||||
|
}
|
||||||
|
if err := s.Err(); err != nil && !errors.Is(err, os.ErrClosed) {
|
||||||
|
msg.Verbose("*"+prefix, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// cure is like Cure but allows optional host net namespace. This is used for
|
||||||
|
// the [KnownChecksum] variant where networking is allowed.
|
||||||
|
func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
|
||||||
|
overlayWorkIndex := -1
|
||||||
|
for i, p := range a.paths {
|
||||||
|
if p.P == nil || len(p.A) == 0 {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
if p.P.Is(AbsWork) {
|
||||||
|
overlayWorkIndex = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var artifactCount int
|
||||||
|
for _, p := range a.paths {
|
||||||
|
artifactCount += len(p.A)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(f.Unwrap(), a.timeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
z := container.New(ctx, f.GetMessage())
|
||||||
|
z.WaitDelay = execWaitDelay
|
||||||
|
z.SeccompPresets |= std.PresetStrict & ^std.PresetDenyNS
|
||||||
|
z.SeccompFlags |= seccomp.AllowMultiarch
|
||||||
|
z.ParentPerm = 0700
|
||||||
|
z.HostNet = hostNet
|
||||||
|
z.Hostname = "cure"
|
||||||
|
if z.HostNet {
|
||||||
|
z.Hostname = "cure-net"
|
||||||
|
}
|
||||||
|
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
||||||
|
if msg := f.GetMessage(); msg.IsVerbose() {
|
||||||
|
var stdout, stderr io.ReadCloser
|
||||||
|
if stdout, err = z.StdoutPipe(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if stderr, err = z.StderrPipe(); err != nil {
|
||||||
|
_ = stdout.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err != nil && !errors.As(err, new(*exec.ExitError)) {
|
||||||
|
_ = stdout.Close()
|
||||||
|
_ = stderr.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
stdoutDone, stderrDone := make(chan struct{}), make(chan struct{})
|
||||||
|
go scanVerbose(msg, stdoutDone, "("+a.name+":1)", stdout)
|
||||||
|
go scanVerbose(msg, stderrDone, "("+a.name+":2)", stderr)
|
||||||
|
defer func() { <-stdoutDone; <-stderrDone }()
|
||||||
|
}
|
||||||
|
|
||||||
|
z.Dir, z.Env, z.Path, z.Args = a.dir, a.env, a.path, a.args
|
||||||
|
z.Grow(len(a.paths) + 4)
|
||||||
|
|
||||||
|
temp, work := f.GetTempDir(), f.GetWorkDir()
|
||||||
|
for i, b := range a.paths {
|
||||||
|
if i == overlayWorkIndex {
|
||||||
|
if err = os.MkdirAll(work.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tempWork := temp.Append(".work")
|
||||||
|
if err = os.MkdirAll(tempWork.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
z.Overlay(
|
||||||
|
AbsWork,
|
||||||
|
work,
|
||||||
|
tempWork,
|
||||||
|
b.layers(f)...,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.paths[i].W {
|
||||||
|
tempUpper, tempWork := temp.Append(
|
||||||
|
".upper", strconv.Itoa(i),
|
||||||
|
), temp.Append(
|
||||||
|
".work", strconv.Itoa(i),
|
||||||
|
)
|
||||||
|
if err = os.MkdirAll(tempUpper.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = os.MkdirAll(tempWork.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
z.Overlay(b.P, tempUpper, tempWork, b.layers(f)...)
|
||||||
|
} else if len(b.A) == 1 {
|
||||||
|
pathname, _ := f.GetArtifact(b.A[0])
|
||||||
|
z.Bind(pathname, b.P, 0)
|
||||||
|
} else {
|
||||||
|
z.OverlayReadonly(b.P, b.layers(f)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if overlayWorkIndex < 0 {
|
||||||
|
z.Bind(
|
||||||
|
work,
|
||||||
|
AbsWork,
|
||||||
|
std.BindWritable|std.BindEnsure,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
z.Bind(
|
||||||
|
f.GetTempDir(),
|
||||||
|
fhs.AbsTmp,
|
||||||
|
std.BindWritable|std.BindEnsure,
|
||||||
|
)
|
||||||
|
z.Proc(fhs.AbsProc).Dev(fhs.AbsDev, true)
|
||||||
|
|
||||||
|
if err = z.Start(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = z.Serve(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = z.Wait(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// do not allow empty directories to succeed
|
||||||
|
for {
|
||||||
|
err = syscall.Rmdir(work.String())
|
||||||
|
if err != syscall.EINTR {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil && errors.Is(err, syscall.ENOTEMPTY) {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
339
internal/pkg/exec_test.go
Normal file
339
internal/pkg/exec_test.go
Normal file
@@ -0,0 +1,339 @@
|
|||||||
|
package pkg_test
|
||||||
|
|
||||||
|
//go:generate env CGO_ENABLED=0 go build -tags testtool -o testdata/testtool ./testdata
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "embed"
|
||||||
|
"encoding/gob"
|
||||||
|
"errors"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"slices"
|
||||||
|
"testing"
|
||||||
|
"unique"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/stub"
|
||||||
|
"hakurei.app/hst"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
// testtoolBin is the container test tool binary made available to the
|
||||||
|
// execArtifact for testing its curing environment.
|
||||||
|
//
|
||||||
|
//go:embed testdata/testtool
|
||||||
|
var testtoolBin []byte
|
||||||
|
|
||||||
|
func TestExec(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
wantChecksumOffline := pkg.MustDecode(
|
||||||
|
"GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9",
|
||||||
|
)
|
||||||
|
|
||||||
|
checkWithCache(t, []cacheTestCase{
|
||||||
|
{"offline", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
testtool, testtoolDestroy := newTesttool()
|
||||||
|
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"container", pkg.NewExec(
|
||||||
|
"exec-offline", nil, 0, false,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool"},
|
||||||
|
|
||||||
|
pkg.MustPath("/file", false, newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
pkg.ID{0xfe, 0},
|
||||||
|
nil,
|
||||||
|
nil, nil,
|
||||||
|
)),
|
||||||
|
pkg.MustPath("/.hakurei", false, &stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
pkg.MustPath("/opt", false, testtool),
|
||||||
|
), ignorePathname, wantChecksumOffline, nil},
|
||||||
|
|
||||||
|
{"error passthrough", pkg.NewExec(
|
||||||
|
"", nil, 0, true,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool"},
|
||||||
|
|
||||||
|
pkg.MustPath("/proc/nonexistent", false, &stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("doomed artifact"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return stub.UniqueError(0xcafe)
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
), nil, pkg.Checksum{}, &pkg.DependencyCureError{
|
||||||
|
{
|
||||||
|
Ident: unique.Make(pkg.ID(pkg.MustDecode(
|
||||||
|
"Sowo6oZRmG6xVtUaxB6bDWZhVsqAJsIJWUp0OPKlE103cY0lodx7dem8J-qQF0Z1",
|
||||||
|
))),
|
||||||
|
Err: stub.UniqueError(0xcafe),
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
|
||||||
|
{"invalid paths", pkg.NewExec(
|
||||||
|
"", nil, 0, false,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool"},
|
||||||
|
|
||||||
|
pkg.ExecPath{},
|
||||||
|
), nil, pkg.Checksum{}, os.ErrInvalid},
|
||||||
|
})
|
||||||
|
|
||||||
|
// check init failure passthrough
|
||||||
|
var exitError *exec.ExitError
|
||||||
|
if _, _, err := c.Cure(pkg.NewExec(
|
||||||
|
"", nil, 0, false,
|
||||||
|
pkg.AbsWork,
|
||||||
|
nil,
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool"},
|
||||||
|
)); !errors.As(err, &exitError) ||
|
||||||
|
exitError.ExitCode() != hst.ExitFailure {
|
||||||
|
t.Fatalf("Cure: error = %v, want init exit status 1", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
testtoolDestroy(t, base, c)
|
||||||
|
}, pkg.MustDecode("Q5DluWQCAeohLoiGRImurwFp3vdz9IfQCoj7Fuhh73s4KQPRHpEQEnHTdNHmB8Fx")},
|
||||||
|
|
||||||
|
{"net", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
testtool, testtoolDestroy := newTesttool()
|
||||||
|
|
||||||
|
wantChecksum := pkg.MustDecode(
|
||||||
|
"a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W",
|
||||||
|
)
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"container", pkg.NewExec(
|
||||||
|
"exec-net", &wantChecksum, 0, false,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool", "net"},
|
||||||
|
|
||||||
|
pkg.MustPath("/file", false, newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
pkg.ID{0xfe, 0},
|
||||||
|
nil,
|
||||||
|
nil, nil,
|
||||||
|
)),
|
||||||
|
pkg.MustPath("/.hakurei", false, &stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
pkg.MustPath("/opt", false, testtool),
|
||||||
|
), ignorePathname, wantChecksum, nil},
|
||||||
|
})
|
||||||
|
|
||||||
|
testtoolDestroy(t, base, c)
|
||||||
|
}, pkg.MustDecode("bPYvvqxpfV7xcC1EptqyKNK1klLJgYHMDkzBcoOyK6j_Aj5hb0mXNPwTwPSK5F6Z")},
|
||||||
|
|
||||||
|
{"overlay root", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
testtool, testtoolDestroy := newTesttool()
|
||||||
|
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"container", pkg.NewExec(
|
||||||
|
"exec-overlay-root", nil, 0, false,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool"},
|
||||||
|
|
||||||
|
pkg.MustPath("/", true, &stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
pkg.MustPath("/opt", false, testtool),
|
||||||
|
), ignorePathname, wantChecksumOffline, nil},
|
||||||
|
})
|
||||||
|
|
||||||
|
testtoolDestroy(t, base, c)
|
||||||
|
}, pkg.MustDecode("PO2DSSCa4yoSgEYRcCSZfQfwow1yRigL3Ry-hI0RDI4aGuFBha-EfXeSJnG_5_Rl")},
|
||||||
|
|
||||||
|
{"overlay work", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
testtool, testtoolDestroy := newTesttool()
|
||||||
|
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"container", pkg.NewExec(
|
||||||
|
"exec-overlay-work", nil, 0, false,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||||
|
check.MustAbs("/work/bin/testtool"),
|
||||||
|
[]string{"testtool"},
|
||||||
|
|
||||||
|
pkg.MustPath("/", true, &stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}), pkg.MustPath("/work/", false, &stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}), pkg.Path(pkg.AbsWork, false /* ignored */, testtool),
|
||||||
|
), ignorePathname, wantChecksumOffline, nil},
|
||||||
|
})
|
||||||
|
|
||||||
|
testtoolDestroy(t, base, c)
|
||||||
|
}, pkg.MustDecode("iaRt6l_Wm2n-h5UsDewZxQkCmjZjyL8r7wv32QT2kyV55-Lx09Dq4gfg9BiwPnKs")},
|
||||||
|
|
||||||
|
{"multiple layers", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
testtool, testtoolDestroy := newTesttool()
|
||||||
|
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"container", pkg.NewExec(
|
||||||
|
"exec-multiple-layers", nil, 0, false,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool", "layers"},
|
||||||
|
|
||||||
|
pkg.MustPath("/", true, &stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}, &stubArtifactF{
|
||||||
|
kind: pkg.KindExec,
|
||||||
|
params: []byte("test sample with dependencies"),
|
||||||
|
|
||||||
|
deps: slices.Repeat([]pkg.Artifact{newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
pkg.ID{0xfe, 0},
|
||||||
|
nil,
|
||||||
|
nil, nil,
|
||||||
|
), &stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
|
||||||
|
// this is queued and might run instead of the other
|
||||||
|
// one so do not leave it as nil
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}}, 1<<5 /* concurrent cache hits */),
|
||||||
|
|
||||||
|
cure: func(f *pkg.FContext) error {
|
||||||
|
work := f.GetWorkDir()
|
||||||
|
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.WriteFile(work.Append("check").String(), []byte("layers"), 0400)
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
pkg.MustPath("/opt", false, testtool),
|
||||||
|
), ignorePathname, wantChecksumOffline, nil},
|
||||||
|
})
|
||||||
|
|
||||||
|
testtoolDestroy(t, base, c)
|
||||||
|
}, pkg.MustDecode("O2YzyR7IUGU5J2CADy0hUZ3A5NkP_Vwzs4UadEdn2oMZZVWRtH0xZGJ3HXiimTnZ")},
|
||||||
|
|
||||||
|
{"overlay layer promotion", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
testtool, testtoolDestroy := newTesttool()
|
||||||
|
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"container", pkg.NewExec(
|
||||||
|
"exec-layer-promotion", nil, 0, true,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool", "promote"},
|
||||||
|
|
||||||
|
pkg.MustPath("/", true, &stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("another empty directory"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}, &stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
pkg.MustPath("/opt", false, testtool),
|
||||||
|
), ignorePathname, wantChecksumOffline, nil},
|
||||||
|
})
|
||||||
|
|
||||||
|
testtoolDestroy(t, base, c)
|
||||||
|
}, pkg.MustDecode("3EaW6WibLi9gl03_UieiFPaFcPy5p4x3JPxrnLJxGaTI-bh3HU9DK9IMx7c3rrNm")},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// newTesttool returns an [Artifact] that cures into testtoolBin. The returned
|
||||||
|
// function must be called at the end of the test but not deferred.
|
||||||
|
func newTesttool() (
|
||||||
|
testtool pkg.Artifact,
|
||||||
|
testtoolDestroy func(t *testing.T, base *check.Absolute, c *pkg.Cache),
|
||||||
|
) {
|
||||||
|
// testtoolBin is built during go:generate and is not deterministic
|
||||||
|
testtool = overrideIdent{pkg.ID{0xfe, 0xff}, &stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
work := t.GetWorkDir()
|
||||||
|
if err := os.MkdirAll(
|
||||||
|
work.Append("bin").String(),
|
||||||
|
0700,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ift, err := net.Interfaces(); err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
var f *os.File
|
||||||
|
if f, err = os.Create(t.GetWorkDir().Append(
|
||||||
|
"ift",
|
||||||
|
).String()); err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
err = gob.NewEncoder(f).Encode(ift)
|
||||||
|
closeErr := f.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if closeErr != nil {
|
||||||
|
return closeErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return os.WriteFile(t.GetWorkDir().Append(
|
||||||
|
"bin",
|
||||||
|
"testtool",
|
||||||
|
).String(), testtoolBin, 0500)
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
testtoolDestroy = newDestroyArtifactFunc(testtool)
|
||||||
|
return
|
||||||
|
}
|
||||||
81
internal/pkg/file.go
Normal file
81
internal/pkg/file.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/sha512"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A fileArtifact is an [Artifact] that cures into data known ahead of time.
|
||||||
|
type fileArtifact []byte
|
||||||
|
|
||||||
|
var _ KnownChecksum = new(fileArtifact)
|
||||||
|
|
||||||
|
// fileArtifactNamed embeds fileArtifact alongside a caller-supplied name.
|
||||||
|
type fileArtifactNamed struct {
|
||||||
|
fileArtifact
|
||||||
|
// Caller-supplied user-facing reporting name.
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ fmt.Stringer = new(fileArtifactNamed)
|
||||||
|
var _ KnownChecksum = new(fileArtifactNamed)
|
||||||
|
|
||||||
|
// String returns the caller-supplied reporting name.
|
||||||
|
func (a *fileArtifactNamed) String() string { return a.name }
|
||||||
|
|
||||||
|
// Params writes the caller-supplied reporting name and the file body.
|
||||||
|
func (a *fileArtifactNamed) Params(ctx *IContext) {
|
||||||
|
ctx.WriteString(a.name)
|
||||||
|
ctx.Write(a.fileArtifact)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFile returns a [FileArtifact] that cures into a caller-supplied byte slice.
|
||||||
|
//
|
||||||
|
// Caller must not modify data after NewFile returns.
|
||||||
|
func NewFile(name string, data []byte) FileArtifact {
|
||||||
|
f := fileArtifact(data)
|
||||||
|
if name != "" {
|
||||||
|
return &fileArtifactNamed{f, name}
|
||||||
|
}
|
||||||
|
return &f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kind returns the hardcoded [Kind] constant.
|
||||||
|
func (*fileArtifact) Kind() Kind { return KindFile }
|
||||||
|
|
||||||
|
// Params writes an empty string and the file body.
|
||||||
|
func (a *fileArtifact) Params(ctx *IContext) {
|
||||||
|
ctx.WriteString("")
|
||||||
|
ctx.Write(*a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
register(KindFile, func(r *IRReader) Artifact {
|
||||||
|
name := r.ReadString()
|
||||||
|
data := r.ReadStringBytes()
|
||||||
|
if _, ok := r.Finalise(); !ok {
|
||||||
|
panic(ErrExpectedChecksum)
|
||||||
|
}
|
||||||
|
return NewFile(name, data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dependencies returns a nil slice.
|
||||||
|
func (*fileArtifact) Dependencies() []Artifact { return nil }
|
||||||
|
|
||||||
|
// IsExclusive returns false: Cure returns a prepopulated buffer.
|
||||||
|
func (*fileArtifact) IsExclusive() bool { return false }
|
||||||
|
|
||||||
|
// Checksum computes and returns the checksum of caller-supplied data.
|
||||||
|
func (a *fileArtifact) Checksum() Checksum {
|
||||||
|
h := sha512.New384()
|
||||||
|
h.Write(*a)
|
||||||
|
return Checksum(h.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cure returns the caller-supplied data.
|
||||||
|
func (a *fileArtifact) Cure(*RContext) (io.ReadCloser, error) {
|
||||||
|
return io.NopCloser(bytes.NewReader(*a)), nil
|
||||||
|
}
|
||||||
29
internal/pkg/file_test.go
Normal file
29
internal/pkg/file_test.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package pkg_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFile(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
checkWithCache(t, []cacheTestCase{
|
||||||
|
{"file", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"short", pkg.NewFile("null", []byte{0}), base.Append(
|
||||||
|
"identifier",
|
||||||
|
"3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi",
|
||||||
|
), pkg.MustDecode(
|
||||||
|
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
|
||||||
|
), nil},
|
||||||
|
})
|
||||||
|
}, pkg.MustDecode(
|
||||||
|
"iR6H5OIsyOW4EwEgtm9rGzGF6DVtyHLySEtwnFE8bnus9VJcoCbR4JIek7Lw-vwT",
|
||||||
|
)},
|
||||||
|
})
|
||||||
|
}
|
||||||
762
internal/pkg/ir.go
Normal file
762
internal/pkg/ir.go
Normal file
@@ -0,0 +1,762 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/sha512"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"syscall"
|
||||||
|
"unique"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// wordSize is the boundary which binary segments are always aligned to.
|
||||||
|
const wordSize = 8
|
||||||
|
|
||||||
|
// alignSize returns the padded size for aligning sz.
|
||||||
|
func alignSize(sz int) int {
|
||||||
|
return sz + (wordSize-(sz)%wordSize)%wordSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// panicToError recovers from a panic and replaces a nil error with the panicked
|
||||||
|
// error value. If the value does not implement error, it is re-panicked.
|
||||||
|
func panicToError(errP *error) {
|
||||||
|
r := recover()
|
||||||
|
if r == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err, ok := r.(error); !ok {
|
||||||
|
panic(r)
|
||||||
|
} else if *errP == nil {
|
||||||
|
*errP = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IContext is passed to [Artifact.Params] and provides methods for writing
|
||||||
|
// values to the IR writer. It does not expose the underlying [io.Writer].
|
||||||
|
//
|
||||||
|
// IContext is valid until [Artifact.Params] returns.
|
||||||
|
type IContext struct {
|
||||||
|
// Address of underlying [Cache], should be zeroed or made unusable after
|
||||||
|
// [Artifact.Params] returns and must not be exposed directly.
|
||||||
|
cache *Cache
|
||||||
|
// Written to by various methods, should be zeroed after [Artifact.Params]
|
||||||
|
// returns and must not be exposed directly.
|
||||||
|
w io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap returns the underlying [context.Context].
|
||||||
|
func (i *IContext) Unwrap() context.Context { return i.cache.ctx }
|
||||||
|
|
||||||
|
// irZero is a zero IR word.
|
||||||
|
var irZero [wordSize]byte
|
||||||
|
|
||||||
|
// IRValueKind denotes the kind of encoded value.
|
||||||
|
type IRValueKind uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// IRKindEnd denotes the end of the current parameters stream. The ancillary
|
||||||
|
// value is interpreted as [IREndFlag].
|
||||||
|
IRKindEnd IRValueKind = iota
|
||||||
|
// IRKindIdent denotes the identifier of a dependency [Artifact]. The
|
||||||
|
// ancillary value is reserved for future use.
|
||||||
|
IRKindIdent
|
||||||
|
// IRKindUint32 denotes an inlined uint32 value.
|
||||||
|
IRKindUint32
|
||||||
|
// IRKindString denotes a string with its true length encoded in header
|
||||||
|
// ancillary data. Its wire length is always aligned to 8 byte boundary.
|
||||||
|
IRKindString
|
||||||
|
|
||||||
|
irHeaderShift = 32
|
||||||
|
irHeaderMask = 0xffffffff
|
||||||
|
)
|
||||||
|
|
||||||
|
// String returns a user-facing name of k.
|
||||||
|
func (k IRValueKind) String() string {
|
||||||
|
switch k {
|
||||||
|
case IRKindEnd:
|
||||||
|
return "terminator"
|
||||||
|
case IRKindIdent:
|
||||||
|
return "ident"
|
||||||
|
case IRKindUint32:
|
||||||
|
return "uint32"
|
||||||
|
case IRKindString:
|
||||||
|
return "string"
|
||||||
|
default:
|
||||||
|
return "invalid kind " + strconv.Itoa(int(k))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// irValueHeader encodes [IRValueKind] and a 32-bit ancillary value.
|
||||||
|
type irValueHeader uint64
|
||||||
|
|
||||||
|
// encodeHeader returns irValueHeader encoding [IRValueKind] and ancillary data.
|
||||||
|
func (k IRValueKind) encodeHeader(v uint32) irValueHeader {
|
||||||
|
return irValueHeader(v)<<irHeaderShift | irValueHeader(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// put stores h in b[0:8].
|
||||||
|
func (h irValueHeader) put(b []byte) {
|
||||||
|
binary.LittleEndian.PutUint64(b[:], uint64(h))
|
||||||
|
}
|
||||||
|
|
||||||
|
// append appends the bytes of h to b and returns the appended slice.
|
||||||
|
func (h irValueHeader) append(b []byte) []byte {
|
||||||
|
return binary.LittleEndian.AppendUint64(b, uint64(h))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IREndFlag is ancillary data encoded in the header of an [IRKindEnd] value and
|
||||||
|
// specifies the presence of optional fields in the remaining [IRKindEnd] data.
|
||||||
|
// Order of present fields is the order of their corresponding constants defined
|
||||||
|
// below.
|
||||||
|
type IREndFlag uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// IREndKnownChecksum denotes a [KnownChecksum] artifact. For an [IRKindEnd]
|
||||||
|
// value with this flag set, the remaining data contains the [Checksum].
|
||||||
|
IREndKnownChecksum IREndFlag = 1 << iota
|
||||||
|
)
|
||||||
|
|
||||||
|
// mustWrite writes to IContext.w and panics on error. The panic is recovered
|
||||||
|
// from by the caller and used as the return value.
|
||||||
|
func (i *IContext) mustWrite(p []byte) {
|
||||||
|
if _, err := i.w.Write(p); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteIdent writes the identifier of [Artifact] to the IR. The behaviour of
|
||||||
|
// WriteIdent is not defined for an [Artifact] not part of the slice returned by
|
||||||
|
// [Artifact.Dependencies].
|
||||||
|
func (i *IContext) WriteIdent(a Artifact) {
|
||||||
|
buf := i.cache.getIdentBuf()
|
||||||
|
defer i.cache.putIdentBuf(buf)
|
||||||
|
|
||||||
|
IRKindIdent.encodeHeader(0).put(buf[:])
|
||||||
|
*(*ID)(buf[wordSize:]) = i.cache.Ident(a).Value()
|
||||||
|
i.mustWrite(buf[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteUint32 writes a uint32 value to the IR.
|
||||||
|
func (i *IContext) WriteUint32(v uint32) {
|
||||||
|
i.mustWrite(IRKindUint32.encodeHeader(v).append(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// irMaxStringLength is the maximum acceptable wire size of [IRKindString].
|
||||||
|
const irMaxStringLength = 1 << 20
|
||||||
|
|
||||||
|
// IRStringError is a string value too big to encode in IR.
|
||||||
|
type IRStringError string
|
||||||
|
|
||||||
|
func (IRStringError) Error() string {
|
||||||
|
return "params value too big to encode in IR"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes p as a string value to the IR.
|
||||||
|
func (i *IContext) Write(p []byte) {
|
||||||
|
sz := alignSize(len(p))
|
||||||
|
if len(p) > irMaxStringLength || sz > irMaxStringLength {
|
||||||
|
panic(IRStringError(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
i.mustWrite(IRKindString.encodeHeader(uint32(len(p))).append(nil))
|
||||||
|
i.mustWrite(p)
|
||||||
|
|
||||||
|
psz := sz - len(p)
|
||||||
|
if psz > 0 {
|
||||||
|
i.mustWrite(irZero[:psz])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteString writes s as a string value to the IR.
|
||||||
|
func (i *IContext) WriteString(s string) {
|
||||||
|
p := unsafe.Slice(unsafe.StringData(s), len(s))
|
||||||
|
i.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode writes a deterministic, efficient representation of a to w and returns
|
||||||
|
// the first non-nil error encountered while writing to w.
|
||||||
|
func (c *Cache) Encode(w io.Writer, a Artifact) (err error) {
|
||||||
|
deps := a.Dependencies()
|
||||||
|
idents := make([]*extIdent, len(deps))
|
||||||
|
for i, d := range deps {
|
||||||
|
dbuf, did := c.unsafeIdent(d, true)
|
||||||
|
if dbuf == nil {
|
||||||
|
dbuf = c.getIdentBuf()
|
||||||
|
binary.LittleEndian.PutUint64(dbuf[:], uint64(d.Kind()))
|
||||||
|
*(*ID)(dbuf[wordSize:]) = did.Value()
|
||||||
|
} else {
|
||||||
|
c.storeIdent(d, dbuf)
|
||||||
|
}
|
||||||
|
defer c.putIdentBuf(dbuf)
|
||||||
|
idents[i] = dbuf
|
||||||
|
}
|
||||||
|
slices.SortFunc(idents, func(a, b *extIdent) int {
|
||||||
|
return bytes.Compare(a[:], b[:])
|
||||||
|
})
|
||||||
|
idents = slices.CompactFunc(idents, func(a, b *extIdent) bool {
|
||||||
|
return *a == *b
|
||||||
|
})
|
||||||
|
|
||||||
|
// kind uint64 | deps_sz uint64
|
||||||
|
var buf [wordSize * 2]byte
|
||||||
|
binary.LittleEndian.PutUint64(buf[:], uint64(a.Kind()))
|
||||||
|
binary.LittleEndian.PutUint64(buf[wordSize:], uint64(len(idents)))
|
||||||
|
if _, err = w.Write(buf[:]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, dn := range idents {
|
||||||
|
// kind uint64 | ident ID
|
||||||
|
if _, err = w.Write(dn[:]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func() {
|
||||||
|
i := IContext{c, w}
|
||||||
|
|
||||||
|
defer panicToError(&err)
|
||||||
|
defer func() { i.cache, i.w = nil, nil }()
|
||||||
|
|
||||||
|
a.Params(&i)
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var f IREndFlag
|
||||||
|
kcBuf := c.getIdentBuf()
|
||||||
|
sz := wordSize
|
||||||
|
if kc, ok := a.(KnownChecksum); ok {
|
||||||
|
f |= IREndKnownChecksum
|
||||||
|
*(*Checksum)(kcBuf[wordSize:]) = kc.Checksum()
|
||||||
|
sz += len(Checksum{})
|
||||||
|
}
|
||||||
|
IRKindEnd.encodeHeader(uint32(f)).put(kcBuf[:])
|
||||||
|
|
||||||
|
_, err = w.Write(kcBuf[:sz])
|
||||||
|
c.putIdentBuf(kcBuf)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeAll implements EncodeAll by recursively encoding dependencies and
|
||||||
|
// performs deduplication by value via the encoded map.
|
||||||
|
func (c *Cache) encodeAll(
|
||||||
|
w io.Writer,
|
||||||
|
a Artifact,
|
||||||
|
encoded map[Artifact]struct{},
|
||||||
|
) (err error) {
|
||||||
|
if _, ok := encoded[a]; ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range a.Dependencies() {
|
||||||
|
if err = c.encodeAll(w, d, encoded); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
encoded[a] = struct{}{}
|
||||||
|
return c.Encode(w, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeAll writes a self-describing IR stream of a to w and returns the first
|
||||||
|
// non-nil error encountered while writing to w.
|
||||||
|
//
|
||||||
|
// EncodeAll tries to avoid encoding the same [Artifact] more than once, however
|
||||||
|
// it will fail to do so if they do not compare equal by value, as that will
|
||||||
|
// require buffering and greatly reduce performance. It is therefore up to the
|
||||||
|
// caller to avoid causing dependencies to be represented in a way such that
|
||||||
|
// two equivalent artifacts do not compare equal. While an IR stream with
|
||||||
|
// repeated artifacts is valid, it is somewhat inefficient, and the reference
|
||||||
|
// [IRDecoder] implementation produces a warning for it.
|
||||||
|
//
|
||||||
|
// Note that while EncodeAll makes use of the ident free list, it does not use
|
||||||
|
// the ident cache, nor does it contribute identifiers it computes back to the
|
||||||
|
// ident cache. Because of this, multiple invocations of EncodeAll will have
|
||||||
|
// similar cost and does not amortise when combined with a call to Cure.
|
||||||
|
func (c *Cache) EncodeAll(w io.Writer, a Artifact) error {
|
||||||
|
return c.encodeAll(w, a, make(map[Artifact]struct{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrRemainingIR is returned for a [IRReadFunc] that failed to call
|
||||||
|
// [IRReader.Finalise] before returning.
|
||||||
|
var ErrRemainingIR = errors.New("implementation did not consume final value")
|
||||||
|
|
||||||
|
// DanglingIdentError is an identifier in a [IRKindIdent] value that was never
|
||||||
|
// described in the IR stream before it was encountered.
|
||||||
|
type DanglingIdentError unique.Handle[ID]
|
||||||
|
|
||||||
|
func (e DanglingIdentError) Error() string {
|
||||||
|
return "artifact " + Encode(unique.Handle[ID](e).Value()) +
|
||||||
|
" was never described"
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// IRDecoder decodes [Artifact] from an IR stream. The stream is read to
|
||||||
|
// EOF and the final [Artifact] is returned. Previous artifacts may be
|
||||||
|
// looked up by their identifier.
|
||||||
|
//
|
||||||
|
// An [Artifact] may appear more than once in the same IR stream. A
|
||||||
|
// repeating [Artifact] generates a warning via [Cache] and will appear if
|
||||||
|
// verbose logging is enabled. Artifacts may only depend on artifacts
|
||||||
|
// previously described in the IR stream.
|
||||||
|
//
|
||||||
|
// Methods of IRDecoder are not safe for concurrent use.
|
||||||
|
IRDecoder struct {
|
||||||
|
// Address of underlying [Cache], must not be exposed directly.
|
||||||
|
c *Cache
|
||||||
|
|
||||||
|
// Underlying IR reader. Methods of [IRReader] must not use this as it
|
||||||
|
// bypasses ident measurement.
|
||||||
|
r io.Reader
|
||||||
|
// Artifacts already seen in the IR stream.
|
||||||
|
ident map[unique.Handle[ID]]Artifact
|
||||||
|
|
||||||
|
// Whether Decode returned, and the entire IR stream was decoded.
|
||||||
|
done, ok bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// IRReader provides methods to decode the IR wire format and read values
|
||||||
|
// from the reader embedded in the underlying [IRDecoder]. It is
|
||||||
|
// deliberately impossible to obtain the [IRValueKind] of the next value,
|
||||||
|
// and callers must never recover from panics in any read method.
|
||||||
|
//
|
||||||
|
// It is the responsibility of the caller to call Finalise after all IR
|
||||||
|
// values have been read. Failure to call Finalise causes the resulting
|
||||||
|
// [Artifact] to be rejected with [ErrRemainingIR].
|
||||||
|
//
|
||||||
|
// For an [Artifact] expected to have dependencies, the caller must consume
|
||||||
|
// all dependencies by calling Next until all dependencies are depleted, or
|
||||||
|
// call DiscardAll to explicitly discard them and rely on values encoded as
|
||||||
|
// [IRKindIdent] instead. Failure to consume all unstructured dependencies
|
||||||
|
// causes the resulting [Artifact] to be rejected with [MissedDependencyError].
|
||||||
|
//
|
||||||
|
// Requesting the value of an unstructured dependency not yet described in
|
||||||
|
// the IR stream via Next, or reading an [IRKindIdent] value not part of
|
||||||
|
// unstructured dependencies via ReadIdent may cause the resulting
|
||||||
|
// [Artifact] to be rejected with [DanglingIdentError], however either
|
||||||
|
// method may return a non-nil [Artifact] implementation of unspecified
|
||||||
|
// value.
|
||||||
|
IRReader struct {
|
||||||
|
// Address of underlying [IRDecoder], should be zeroed or made unusable
|
||||||
|
// after finalisation and must not be exposed directly.
|
||||||
|
d *IRDecoder
|
||||||
|
// Common buffer for word-sized reads.
|
||||||
|
buf [wordSize]byte
|
||||||
|
|
||||||
|
// Dependencies sent before params, sorted by identifier. Resliced on
|
||||||
|
// each call to Next and checked to be depleted during Finalise.
|
||||||
|
deps []*extIdent
|
||||||
|
|
||||||
|
// Number of values already read, -1 denotes a finalised IRReader.
|
||||||
|
count int
|
||||||
|
// Header of value currently being read.
|
||||||
|
h irValueHeader
|
||||||
|
|
||||||
|
// Measured IR reader. All reads for the current [Artifact] must go
|
||||||
|
// through this to produce a correct ident.
|
||||||
|
r io.Reader
|
||||||
|
// Buffers measure writes. Flushed and returned to d during Finalise.
|
||||||
|
ibw *bufio.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// IRReadFunc reads IR values written by [Artifact.Params] to produce an
|
||||||
|
// instance of [Artifact] identical to the one to produce these values.
|
||||||
|
IRReadFunc func(r *IRReader) Artifact
|
||||||
|
)
|
||||||
|
|
||||||
|
// kind returns the [IRValueKind] encoded in h.
|
||||||
|
func (h irValueHeader) kind() IRValueKind {
|
||||||
|
return IRValueKind(h & irHeaderMask)
|
||||||
|
}
|
||||||
|
|
||||||
|
// value returns ancillary data encoded in h.
|
||||||
|
func (h irValueHeader) value() uint32 {
|
||||||
|
return uint32(h >> irHeaderShift)
|
||||||
|
}
|
||||||
|
|
||||||
|
// irArtifact refers to artifact IR interpretation functions and must not be
|
||||||
|
// written to directly.
|
||||||
|
var irArtifact = make(map[Kind]IRReadFunc)
|
||||||
|
|
||||||
|
// InvalidKindError is an unregistered [Kind] value.
|
||||||
|
type InvalidKindError Kind
|
||||||
|
|
||||||
|
func (e InvalidKindError) Error() string {
|
||||||
|
return "invalid artifact kind " + strconv.Itoa(int(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
// register records the [IRReadFunc] of an implementation of [Artifact] under
|
||||||
|
// the specified [Kind]. Expecting to be used only during initialization, it
|
||||||
|
// panics if the mapping between [Kind] and [IRReadFunc] is not a bijection.
|
||||||
|
//
|
||||||
|
// register is not safe for concurrent use. register must not be called after
|
||||||
|
// the first instance of [Cache] has been opened.
|
||||||
|
func register(k Kind, f IRReadFunc) {
|
||||||
|
if _, ok := irArtifact[k]; ok {
|
||||||
|
panic("attempting to register " + strconv.Itoa(int(k)) + " twice")
|
||||||
|
}
|
||||||
|
irArtifact[k] = f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register records the [IRReadFunc] of a custom implementation of [Artifact]
|
||||||
|
// under the specified [Kind]. Expecting to be used only during initialization,
|
||||||
|
// it panics if the mapping between [Kind] and [IRReadFunc] is not a bijection,
|
||||||
|
// or the specified [Kind] is below [KindCustomOffset].
|
||||||
|
//
|
||||||
|
// Register is not safe for concurrent use. Register must not be called after
|
||||||
|
// the first instance of [Cache] has been opened.
|
||||||
|
func Register(k Kind, f IRReadFunc) {
|
||||||
|
if k < KindCustomOffset {
|
||||||
|
panic("attempting to register within internal kind range")
|
||||||
|
}
|
||||||
|
register(k, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a new [IRDecoder] that reads from the [io.Reader].
|
||||||
|
func (c *Cache) NewDecoder(r io.Reader) *IRDecoder {
|
||||||
|
return &IRDecoder{c, r, make(map[unique.Handle[ID]]Artifact), false, false}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// irMaxValues is the arbitrary maximum number of values allowed to be
|
||||||
|
// written by [Artifact.Params] and subsequently read via [IRReader].
|
||||||
|
irMaxValues = 1 << 12
|
||||||
|
|
||||||
|
// irMaxDeps is the arbitrary maximum number of direct dependencies allowed
|
||||||
|
// to be returned by [Artifact.Dependencies] and subsequently decoded by
|
||||||
|
// [IRDecoder].
|
||||||
|
irMaxDeps = 1 << 10
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrIRValues is returned for an [Artifact] with too many parameter values.
|
||||||
|
ErrIRValues = errors.New("artifact has too many IR parameter values")
|
||||||
|
|
||||||
|
// ErrIRDepend is returned for an [Artifact] with too many dependencies.
|
||||||
|
ErrIRDepend = errors.New("artifact has too many dependencies")
|
||||||
|
|
||||||
|
// ErrAlreadyFinalised is returned when attempting to use an [IRReader] that
|
||||||
|
// has already been finalised.
|
||||||
|
ErrAlreadyFinalised = errors.New("reader has already finalised")
|
||||||
|
)
|
||||||
|
|
||||||
|
// enterReader panics with an appropriate error for an out-of-bounds count and
|
||||||
|
// must be called at some point in any exported method.
|
||||||
|
func (ir *IRReader) enterReader(read bool) {
|
||||||
|
if ir.count < 0 {
|
||||||
|
panic(ErrAlreadyFinalised)
|
||||||
|
}
|
||||||
|
if ir.count >= irMaxValues {
|
||||||
|
panic(ErrIRValues)
|
||||||
|
}
|
||||||
|
|
||||||
|
if read {
|
||||||
|
ir.count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IRKindError describes an attempt to read an IR value of unexpected kind.
|
||||||
|
type IRKindError struct {
|
||||||
|
Got, Want IRValueKind
|
||||||
|
Ancillary uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *IRKindError) Error() string {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"got %s IR value (%#x) instead of %s",
|
||||||
|
e.Got, e.Ancillary, e.Want,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// readFull reads until either p is filled or an error is encountered.
|
||||||
|
func (ir *IRReader) readFull(p []byte) (n int, err error) {
|
||||||
|
for n < len(p) && err == nil {
|
||||||
|
var nn int
|
||||||
|
nn, err = ir.r.Read(p[n:])
|
||||||
|
n += nn
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustRead reads from the underlying measured reader and panics on error. If
|
||||||
|
// an [io.EOF] is encountered and n != len(p), the error is promoted to a
|
||||||
|
// [io.ErrUnexpectedEOF], if n == 0, [io.EOF] is kept as is, otherwise it is
|
||||||
|
// zeroed.
|
||||||
|
func (ir *IRReader) mustRead(p []byte) {
|
||||||
|
n, err := ir.readFull(p)
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
if n == len(p) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustReadHeader reads the next header via d and checks its kind.
|
||||||
|
func (ir *IRReader) mustReadHeader(k IRValueKind) {
|
||||||
|
ir.mustRead(ir.buf[:])
|
||||||
|
ir.h = irValueHeader(binary.LittleEndian.Uint64(ir.buf[:]))
|
||||||
|
if wk := ir.h.kind(); wk != k {
|
||||||
|
panic(&IRKindError{wk, k, ir.h.value()})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// putAll returns all dependency buffers to the underlying [Cache].
|
||||||
|
func (ir *IRReader) putAll() {
|
||||||
|
for _, buf := range ir.deps {
|
||||||
|
ir.d.c.putIdentBuf(buf)
|
||||||
|
}
|
||||||
|
ir.deps = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiscardAll discards all unstructured dependencies. This is useful to
|
||||||
|
// implementations that encode dependencies as [IRKindIdent] which are read back
|
||||||
|
// via ReadIdent.
|
||||||
|
func (ir *IRReader) DiscardAll() {
|
||||||
|
if ir.deps == nil {
|
||||||
|
panic("attempting to discard dependencies twice")
|
||||||
|
}
|
||||||
|
ir.putAll()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrDependencyDepleted is returned when attempting to advance to the next
|
||||||
|
// unstructured dependency when there are none left.
|
||||||
|
var ErrDependencyDepleted = errors.New("reading past end of dependencies")
|
||||||
|
|
||||||
|
// Next returns the next unstructured dependency.
|
||||||
|
func (ir *IRReader) Next() Artifact {
|
||||||
|
if len(ir.deps) == 0 {
|
||||||
|
panic(ErrDependencyDepleted)
|
||||||
|
}
|
||||||
|
|
||||||
|
id := unique.Make(ID(ir.deps[0][wordSize:]))
|
||||||
|
ir.d.c.putIdentBuf(ir.deps[0])
|
||||||
|
ir.deps = ir.deps[1:]
|
||||||
|
|
||||||
|
if a, ok := ir.d.ident[id]; !ok {
|
||||||
|
ir.putAll()
|
||||||
|
panic(DanglingIdentError(id))
|
||||||
|
} else {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MissedDependencyError is the number of unstructured dependencies remaining
|
||||||
|
// in [IRReader] that was never requested or explicitly discarded before
|
||||||
|
// finalisation.
|
||||||
|
type MissedDependencyError int
|
||||||
|
|
||||||
|
func (e MissedDependencyError) Error() string {
|
||||||
|
return "missed " + strconv.Itoa(int(e)) + " unstructured dependencies"
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrUnexpectedChecksum is returned by a [IRReadFunc] that does not expect
|
||||||
|
// a checksum but received one in [IRKindEnd] anyway.
|
||||||
|
ErrUnexpectedChecksum = errors.New("checksum specified on unsupported artifact")
|
||||||
|
// ErrExpectedChecksum is returned by a [IRReadFunc] that expects a checksum
|
||||||
|
// but did not receive one in [IRKindEnd].
|
||||||
|
ErrExpectedChecksum = errors.New("checksum required but not specified")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Finalise reads the final [IRKindEnd] value and marks r as finalised. Methods
|
||||||
|
// of r are invalid upon entry into Finalise. If a [Checksum] is available via
|
||||||
|
// [IREndKnownChecksum], its handle is returned and the caller must store its
|
||||||
|
// value in the resulting [Artifact].
|
||||||
|
func (ir *IRReader) Finalise() (checksum unique.Handle[Checksum], ok bool) {
|
||||||
|
ir.enterReader(true)
|
||||||
|
ir.count = -1
|
||||||
|
|
||||||
|
ir.mustReadHeader(IRKindEnd)
|
||||||
|
f := IREndFlag(ir.h.value())
|
||||||
|
|
||||||
|
if f&IREndKnownChecksum != 0 {
|
||||||
|
buf := ir.d.c.getIdentBuf()
|
||||||
|
defer ir.d.c.putIdentBuf(buf)
|
||||||
|
|
||||||
|
ir.mustRead(buf[wordSize:])
|
||||||
|
checksum = unique.Make(Checksum(buf[wordSize:]))
|
||||||
|
ok = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ir.ibw.Flush(); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
ir.r, ir.ibw = nil, nil
|
||||||
|
|
||||||
|
if len(ir.deps) != 0 {
|
||||||
|
panic(MissedDependencyError(len(ir.deps)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadIdent reads the next value as [IRKindIdent].
|
||||||
|
func (ir *IRReader) ReadIdent() Artifact {
|
||||||
|
ir.enterReader(true)
|
||||||
|
ir.mustReadHeader(IRKindIdent)
|
||||||
|
|
||||||
|
buf := ir.d.c.getIdentBuf()
|
||||||
|
defer ir.d.c.putIdentBuf(buf)
|
||||||
|
|
||||||
|
ir.mustRead(buf[wordSize:])
|
||||||
|
id := unique.Make(ID(buf[wordSize:]))
|
||||||
|
|
||||||
|
if a, ok := ir.d.ident[id]; !ok {
|
||||||
|
panic(DanglingIdentError(id))
|
||||||
|
} else {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadUint32 reads the next value as [IRKindUint32].
|
||||||
|
func (ir *IRReader) ReadUint32() uint32 {
|
||||||
|
ir.enterReader(true)
|
||||||
|
ir.mustReadHeader(IRKindUint32)
|
||||||
|
return ir.h.value()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadStringBytes reads the next value as [IRKindString] but returns it as a
|
||||||
|
// byte slice instead.
|
||||||
|
func (ir *IRReader) ReadStringBytes() []byte {
|
||||||
|
ir.enterReader(true)
|
||||||
|
ir.mustReadHeader(IRKindString)
|
||||||
|
|
||||||
|
sz := int(ir.h.value())
|
||||||
|
szWire := alignSize(sz)
|
||||||
|
if szWire > irMaxStringLength {
|
||||||
|
panic(IRStringError("\x00"))
|
||||||
|
}
|
||||||
|
|
||||||
|
p := make([]byte, szWire)
|
||||||
|
ir.mustRead(p)
|
||||||
|
return p[:sz]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadString reads the next value as [IRKindString].
|
||||||
|
func (ir *IRReader) ReadString() string {
|
||||||
|
p := ir.ReadStringBytes()
|
||||||
|
return unsafe.String(unsafe.SliceData(p), len(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode decodes the next [Artifact] in the IR stream and returns any buffer
|
||||||
|
// originating from [Cache] before returning. decode returns [io.EOF] if and
|
||||||
|
// only if the underlying [io.Reader] is already read to EOF.
|
||||||
|
func (d *IRDecoder) decode() (a Artifact, err error) {
|
||||||
|
defer panicToError(&err)
|
||||||
|
var ir IRReader
|
||||||
|
|
||||||
|
defer func() { ir.d = nil }()
|
||||||
|
ir.d = d
|
||||||
|
|
||||||
|
h := sha512.New384()
|
||||||
|
ir.ibw = d.c.getWriter(h)
|
||||||
|
defer d.c.putWriter(ir.ibw)
|
||||||
|
ir.r = io.TeeReader(d.r, ir.ibw)
|
||||||
|
|
||||||
|
if n, _err := ir.readFull(ir.buf[:]); _err != nil {
|
||||||
|
if errors.Is(_err, io.EOF) {
|
||||||
|
if n != 0 {
|
||||||
|
_err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = _err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ak := Kind(binary.LittleEndian.Uint64(ir.buf[:]))
|
||||||
|
f, ok := irArtifact[ak]
|
||||||
|
if !ok {
|
||||||
|
err = InvalidKindError(ak)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer ir.putAll()
|
||||||
|
ir.mustRead(ir.buf[:])
|
||||||
|
sz := binary.LittleEndian.Uint64(ir.buf[:])
|
||||||
|
if sz > irMaxDeps {
|
||||||
|
err = ErrIRDepend
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ir.deps = make([]*extIdent, sz)
|
||||||
|
for i := range ir.deps {
|
||||||
|
ir.deps[i] = d.c.getIdentBuf()
|
||||||
|
}
|
||||||
|
for _, buf := range ir.deps {
|
||||||
|
ir.mustRead(buf[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
a = f(&ir)
|
||||||
|
if a == nil {
|
||||||
|
err = syscall.ENOTRECOVERABLE
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ir.count != -1 {
|
||||||
|
err = ErrRemainingIR
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := d.c.getIdentBuf()
|
||||||
|
h.Sum(buf[wordSize:wordSize])
|
||||||
|
id := unique.Make(ID(buf[wordSize:]))
|
||||||
|
d.c.putIdentBuf(buf)
|
||||||
|
if _, ok = d.ident[id]; !ok {
|
||||||
|
d.ident[id] = a
|
||||||
|
} else {
|
||||||
|
d.c.msg.Verbosef(
|
||||||
|
"artifact %s appeared more than once in IR stream",
|
||||||
|
Encode(id.Value()),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode consumes the IR stream to EOF and returns the final [Artifact]. After
|
||||||
|
// Decode returns, Lookup is available and Decode must not be called again.
|
||||||
|
func (d *IRDecoder) Decode() (a Artifact, err error) {
|
||||||
|
if d.done {
|
||||||
|
panic("attempting to decode an IR stream twice")
|
||||||
|
}
|
||||||
|
defer func() { d.done = true }()
|
||||||
|
|
||||||
|
var cur Artifact
|
||||||
|
next:
|
||||||
|
a, err = d.decode()
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
cur = a
|
||||||
|
goto next
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
a, err = cur, nil
|
||||||
|
d.ok = true
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup looks up an [Artifact] described by the IR stream by its identifier.
|
||||||
|
func (d *IRDecoder) Lookup(id unique.Handle[ID]) (a Artifact, ok bool) {
|
||||||
|
if !d.ok {
|
||||||
|
panic("attempting to look up artifact without full IR stream")
|
||||||
|
}
|
||||||
|
a, ok = d.ident[id]
|
||||||
|
return
|
||||||
|
}
|
||||||
114
internal/pkg/ir_test.go
Normal file
114
internal/pkg/ir_test.go
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
package pkg_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIRRoundtrip(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
a pkg.Artifact
|
||||||
|
}{
|
||||||
|
{"http get aligned", pkg.NewHTTPGet(
|
||||||
|
nil, "file:///testdata",
|
||||||
|
pkg.Checksum(bytes.Repeat([]byte{0xfd}, len(pkg.Checksum{}))),
|
||||||
|
)},
|
||||||
|
{"http get unaligned", pkg.NewHTTPGet(
|
||||||
|
nil, "https://hakurei.app",
|
||||||
|
pkg.Checksum(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))),
|
||||||
|
)},
|
||||||
|
|
||||||
|
{"http get tar", pkg.NewHTTPGetTar(
|
||||||
|
nil, "file:///testdata",
|
||||||
|
pkg.Checksum(bytes.Repeat([]byte{0xff}, len(pkg.Checksum{}))),
|
||||||
|
pkg.TarBzip2,
|
||||||
|
)},
|
||||||
|
{"http get tar unaligned", pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://hakurei.app",
|
||||||
|
pkg.Checksum(bytes.Repeat([]byte{0xfe}, len(pkg.Checksum{}))),
|
||||||
|
pkg.TarUncompressed,
|
||||||
|
)},
|
||||||
|
|
||||||
|
{"exec offline", pkg.NewExec(
|
||||||
|
"exec-offline", nil, 0, false,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool"},
|
||||||
|
|
||||||
|
pkg.MustPath("/file", false, pkg.NewFile("file", []byte(
|
||||||
|
"stub file",
|
||||||
|
))), pkg.MustPath("/.hakurei", false, pkg.NewHTTPGetTar(
|
||||||
|
nil, "file:///hakurei.tar",
|
||||||
|
pkg.Checksum(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))),
|
||||||
|
pkg.TarUncompressed,
|
||||||
|
)), pkg.MustPath("/opt", false, pkg.NewHTTPGetTar(
|
||||||
|
nil, "file:///testtool.tar.gz",
|
||||||
|
pkg.Checksum(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))),
|
||||||
|
pkg.TarGzip,
|
||||||
|
)),
|
||||||
|
)},
|
||||||
|
|
||||||
|
{"exec net", pkg.NewExec(
|
||||||
|
"exec-net",
|
||||||
|
(*pkg.Checksum)(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))),
|
||||||
|
0, false,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool", "net"},
|
||||||
|
|
||||||
|
pkg.MustPath("/file", false, pkg.NewFile("file", []byte(
|
||||||
|
"stub file",
|
||||||
|
))), pkg.MustPath("/.hakurei", false, pkg.NewHTTPGetTar(
|
||||||
|
nil, "file:///hakurei.tar",
|
||||||
|
pkg.Checksum(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))),
|
||||||
|
pkg.TarUncompressed,
|
||||||
|
)), pkg.MustPath("/opt", false, pkg.NewHTTPGetTar(
|
||||||
|
nil, "file:///testtool.tar.gz",
|
||||||
|
pkg.Checksum(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))),
|
||||||
|
pkg.TarGzip,
|
||||||
|
)),
|
||||||
|
)},
|
||||||
|
|
||||||
|
{"file anonymous", pkg.NewFile("", []byte{0})},
|
||||||
|
{"file", pkg.NewFile("stub", []byte("stub"))},
|
||||||
|
}
|
||||||
|
testCasesCache := make([]cacheTestCase, len(testCases))
|
||||||
|
for i, tc := range testCases {
|
||||||
|
want := tc.a
|
||||||
|
testCasesCache[i] = cacheTestCase{tc.name, nil,
|
||||||
|
func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
r, w := io.Pipe()
|
||||||
|
|
||||||
|
done := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
t.Helper()
|
||||||
|
done <- c.EncodeAll(w, want)
|
||||||
|
_ = w.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
if got, err := c.NewDecoder(r).Decode(); err != nil {
|
||||||
|
t.Fatalf("Decode: error = %v", err)
|
||||||
|
} else if !reflect.DeepEqual(got, want) {
|
||||||
|
t.Fatalf("Decode: %#v, want %#v", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := <-done; err != nil {
|
||||||
|
t.Fatalf("EncodeAll: error = %v", err)
|
||||||
|
}
|
||||||
|
}, pkg.MustDecode(
|
||||||
|
"E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
checkWithCache(t, testCasesCache)
|
||||||
|
}
|
||||||
106
internal/pkg/net.go
Normal file
106
internal/pkg/net.go
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"path"
|
||||||
|
"unique"
|
||||||
|
)
|
||||||
|
|
||||||
|
// An httpArtifact is an [Artifact] backed by a [http] url string. The method is
|
||||||
|
// hardcoded as [http.MethodGet]. Request body is not allowed because it cannot
|
||||||
|
// be deterministically represented by Params.
|
||||||
|
type httpArtifact struct {
|
||||||
|
// Caller-supplied url string.
|
||||||
|
url string
|
||||||
|
|
||||||
|
// Caller-supplied checksum of the response body. This is validated when
|
||||||
|
// closing the [io.ReadCloser] returned by Cure.
|
||||||
|
checksum unique.Handle[Checksum]
|
||||||
|
|
||||||
|
// client is the address of the caller-supplied [http.Client].
|
||||||
|
client *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ KnownChecksum = new(httpArtifact)
|
||||||
|
var _ fmt.Stringer = new(httpArtifact)
|
||||||
|
|
||||||
|
// NewHTTPGet returns a new [FileArtifact] backed by the supplied client. A GET
|
||||||
|
// request is set up for url. If c is nil, [http.DefaultClient] is used instead.
|
||||||
|
func NewHTTPGet(
|
||||||
|
c *http.Client,
|
||||||
|
url string,
|
||||||
|
checksum Checksum,
|
||||||
|
) FileArtifact {
|
||||||
|
return &httpArtifact{url: url, checksum: unique.Make(checksum), client: c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kind returns the hardcoded [Kind] constant.
|
||||||
|
func (*httpArtifact) Kind() Kind { return KindHTTPGet }
|
||||||
|
|
||||||
|
// Params writes the backing url string. Client is not represented as it does
|
||||||
|
// not affect [Cache.Cure] outcome.
|
||||||
|
func (a *httpArtifact) Params(ctx *IContext) { ctx.WriteString(a.url) }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
register(KindHTTPGet, func(r *IRReader) Artifact {
|
||||||
|
url := r.ReadString()
|
||||||
|
checksum, ok := r.Finalise()
|
||||||
|
if !ok {
|
||||||
|
panic(ErrExpectedChecksum)
|
||||||
|
}
|
||||||
|
return NewHTTPGet(nil, url, checksum.Value())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dependencies returns a nil slice.
|
||||||
|
func (*httpArtifact) Dependencies() []Artifact { return nil }
|
||||||
|
|
||||||
|
// IsExclusive returns false: Cure returns as soon as a response is received.
|
||||||
|
func (*httpArtifact) IsExclusive() bool { return false }
|
||||||
|
|
||||||
|
// Checksum returns the caller-supplied checksum.
|
||||||
|
func (a *httpArtifact) Checksum() Checksum { return a.checksum.Value() }
|
||||||
|
|
||||||
|
// String returns [path.Base] over the backing url.
|
||||||
|
func (a *httpArtifact) String() string { return path.Base(a.url) }
|
||||||
|
|
||||||
|
// ResponseStatusError is returned for a response returned by an [http.Client]
|
||||||
|
// with a status code other than [http.StatusOK].
|
||||||
|
type ResponseStatusError int
|
||||||
|
|
||||||
|
func (e ResponseStatusError) Error() string {
|
||||||
|
return "the requested URL returned non-OK status: " + http.StatusText(int(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cure sends the http request and returns the resulting response body reader
|
||||||
|
// wrapped to perform checksum validation. It is valid but not encouraged to
|
||||||
|
// close the resulting [io.ReadCloser] before it is read to EOF, as that causes
|
||||||
|
// Close to block until all remaining data is consumed and validated.
|
||||||
|
func (a *httpArtifact) Cure(r *RContext) (rc io.ReadCloser, err error) {
|
||||||
|
var req *http.Request
|
||||||
|
req, err = http.NewRequestWithContext(r.Unwrap(), http.MethodGet, a.url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Header.Set("User-Agent", "Hakurei/1.1")
|
||||||
|
|
||||||
|
c := a.client
|
||||||
|
if c == nil {
|
||||||
|
c = http.DefaultClient
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp *http.Response
|
||||||
|
if resp, err = c.Do(req); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
return nil, ResponseStatusError(resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = r.NewMeasuredReader(resp.Body, a.checksum)
|
||||||
|
return
|
||||||
|
}
|
||||||
161
internal/pkg/net_test.go
Normal file
161
internal/pkg/net_test.go
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
package pkg_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha512"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"testing/fstest"
|
||||||
|
"unique"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHTTPGet(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
const testdata = "\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69"
|
||||||
|
|
||||||
|
testdataChecksum := func() unique.Handle[pkg.Checksum] {
|
||||||
|
h := sha512.New384()
|
||||||
|
h.Write([]byte(testdata))
|
||||||
|
return unique.Make(pkg.Checksum(h.Sum(nil)))
|
||||||
|
}()
|
||||||
|
|
||||||
|
var transport http.Transport
|
||||||
|
client := http.Client{Transport: &transport}
|
||||||
|
transport.RegisterProtocol("file", http.NewFileTransportFS(fstest.MapFS{
|
||||||
|
"testdata": {Data: []byte(testdata), Mode: 0400},
|
||||||
|
}))
|
||||||
|
|
||||||
|
checkWithCache(t, []cacheTestCase{
|
||||||
|
{"direct", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
var r pkg.RContext
|
||||||
|
rCacheVal := reflect.ValueOf(&r).Elem().FieldByName("cache")
|
||||||
|
reflect.NewAt(
|
||||||
|
rCacheVal.Type(),
|
||||||
|
unsafe.Pointer(rCacheVal.UnsafeAddr()),
|
||||||
|
).Elem().Set(reflect.ValueOf(c))
|
||||||
|
|
||||||
|
f := pkg.NewHTTPGet(
|
||||||
|
&client,
|
||||||
|
"file:///testdata",
|
||||||
|
testdataChecksum.Value(),
|
||||||
|
)
|
||||||
|
var got []byte
|
||||||
|
if rc, err := f.Cure(&r); err != nil {
|
||||||
|
t.Fatalf("Cure: error = %v", err)
|
||||||
|
} else if got, err = io.ReadAll(rc); err != nil {
|
||||||
|
t.Fatalf("ReadAll: error = %v", err)
|
||||||
|
} else if string(got) != testdata {
|
||||||
|
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||||
|
} else if err = rc.Close(); err != nil {
|
||||||
|
t.Fatalf("Close: error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check direct validation
|
||||||
|
f = pkg.NewHTTPGet(
|
||||||
|
&client,
|
||||||
|
"file:///testdata",
|
||||||
|
pkg.Checksum{},
|
||||||
|
)
|
||||||
|
wantErrMismatch := &pkg.ChecksumMismatchError{
|
||||||
|
Got: testdataChecksum.Value(),
|
||||||
|
}
|
||||||
|
if rc, err := f.Cure(&r); err != nil {
|
||||||
|
t.Fatalf("Cure: error = %v", err)
|
||||||
|
} else if got, err = io.ReadAll(rc); err != nil {
|
||||||
|
t.Fatalf("ReadAll: error = %v", err)
|
||||||
|
} else if string(got) != testdata {
|
||||||
|
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||||
|
} else if err = rc.Close(); !reflect.DeepEqual(err, wantErrMismatch) {
|
||||||
|
t.Fatalf("Close: error = %#v, want %#v", err, wantErrMismatch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check fallback validation
|
||||||
|
if rc, err := f.Cure(&r); err != nil {
|
||||||
|
t.Fatalf("Cure: error = %v", err)
|
||||||
|
} else if err = rc.Close(); !reflect.DeepEqual(err, wantErrMismatch) {
|
||||||
|
t.Fatalf("Close: error = %#v, want %#v", err, wantErrMismatch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check direct response error
|
||||||
|
f = pkg.NewHTTPGet(
|
||||||
|
&client,
|
||||||
|
"file:///nonexistent",
|
||||||
|
pkg.Checksum{},
|
||||||
|
)
|
||||||
|
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
|
||||||
|
if _, err := f.Cure(&r); !reflect.DeepEqual(err, wantErrNotFound) {
|
||||||
|
t.Fatalf("Cure: error = %#v, want %#v", err, wantErrNotFound)
|
||||||
|
}
|
||||||
|
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
||||||
|
|
||||||
|
{"cure", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
var r pkg.RContext
|
||||||
|
rCacheVal := reflect.ValueOf(&r).Elem().FieldByName("cache")
|
||||||
|
reflect.NewAt(
|
||||||
|
rCacheVal.Type(),
|
||||||
|
unsafe.Pointer(rCacheVal.UnsafeAddr()),
|
||||||
|
).Elem().Set(reflect.ValueOf(c))
|
||||||
|
|
||||||
|
f := pkg.NewHTTPGet(
|
||||||
|
&client,
|
||||||
|
"file:///testdata",
|
||||||
|
testdataChecksum.Value(),
|
||||||
|
)
|
||||||
|
wantPathname := base.Append(
|
||||||
|
"identifier",
|
||||||
|
"oM-2pUlk-mOxK1t3aMWZer69UdOQlAXiAgMrpZ1476VoOqpYVP1aGFS9_HYy-D8_",
|
||||||
|
)
|
||||||
|
if pathname, checksum, err := c.Cure(f); err != nil {
|
||||||
|
t.Fatalf("Cure: error = %v", err)
|
||||||
|
} else if !pathname.Is(wantPathname) {
|
||||||
|
t.Fatalf("Cure: %q, want %q", pathname, wantPathname)
|
||||||
|
} else if checksum != testdataChecksum {
|
||||||
|
t.Fatalf("Cure: %x, want %x", checksum.Value(), testdataChecksum.Value())
|
||||||
|
}
|
||||||
|
|
||||||
|
var got []byte
|
||||||
|
if rc, err := f.Cure(&r); err != nil {
|
||||||
|
t.Fatalf("Cure: error = %v", err)
|
||||||
|
} else if got, err = io.ReadAll(rc); err != nil {
|
||||||
|
t.Fatalf("ReadAll: error = %v", err)
|
||||||
|
} else if string(got) != testdata {
|
||||||
|
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||||
|
} else if err = rc.Close(); err != nil {
|
||||||
|
t.Fatalf("Close: error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check load from cache
|
||||||
|
f = pkg.NewHTTPGet(
|
||||||
|
&client,
|
||||||
|
"file:///testdata",
|
||||||
|
testdataChecksum.Value(),
|
||||||
|
)
|
||||||
|
if rc, err := f.Cure(&r); err != nil {
|
||||||
|
t.Fatalf("Cure: error = %v", err)
|
||||||
|
} else if got, err = io.ReadAll(rc); err != nil {
|
||||||
|
t.Fatalf("ReadAll: error = %v", err)
|
||||||
|
} else if string(got) != testdata {
|
||||||
|
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||||
|
} else if err = rc.Close(); err != nil {
|
||||||
|
t.Fatalf("Close: error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check error passthrough
|
||||||
|
f = pkg.NewHTTPGet(
|
||||||
|
&client,
|
||||||
|
"file:///nonexistent",
|
||||||
|
pkg.Checksum{},
|
||||||
|
)
|
||||||
|
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
|
||||||
|
if _, _, err := c.Cure(f); !reflect.DeepEqual(err, wantErrNotFound) {
|
||||||
|
t.Fatalf("Pathname: error = %#v, want %#v", err, wantErrNotFound)
|
||||||
|
}
|
||||||
|
}, pkg.MustDecode("L_0RFHpr9JUS4Zp14rz2dESSRvfLzpvqsLhR1-YjQt8hYlmEdVl7vI3_-v8UNPKs")},
|
||||||
|
})
|
||||||
|
}
|
||||||
1737
internal/pkg/pkg.go
Normal file
1737
internal/pkg/pkg.go
Normal file
File diff suppressed because it is too large
Load Diff
1233
internal/pkg/pkg_test.go
Normal file
1233
internal/pkg/pkg_test.go
Normal file
File diff suppressed because it is too large
Load Diff
250
internal/pkg/tar.go
Normal file
250
internal/pkg/tar.go
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"compress/bzip2"
|
||||||
|
"compress/gzip"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TarUncompressed denotes an uncompressed tarball.
|
||||||
|
TarUncompressed = iota
|
||||||
|
// TarGzip denotes a tarball compressed via [gzip].
|
||||||
|
TarGzip
|
||||||
|
// TarBzip2 denotes a tarball compressed via [bzip2].
|
||||||
|
TarBzip2
|
||||||
|
)
|
||||||
|
|
||||||
|
// A tarArtifact is an [Artifact] unpacking a tarball backed by a [FileArtifact].
|
||||||
|
type tarArtifact struct {
|
||||||
|
// Caller-supplied backing tarball.
|
||||||
|
f Artifact
|
||||||
|
// Compression on top of the tarball.
|
||||||
|
compression uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// tarArtifactNamed embeds tarArtifact for a [fmt.Stringer] tarball.
|
||||||
|
type tarArtifactNamed struct {
|
||||||
|
tarArtifact
|
||||||
|
// Copied from tarArtifact.f.
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ fmt.Stringer = new(tarArtifactNamed)
|
||||||
|
|
||||||
|
// String returns the name of the underlying [Artifact] suffixed with unpack.
|
||||||
|
func (a *tarArtifactNamed) String() string { return a.name + "-unpack" }
|
||||||
|
|
||||||
|
// NewTar returns a new [Artifact] backed by the supplied [Artifact] and
|
||||||
|
// compression method. The source [Artifact] must be compatible with
|
||||||
|
// [TContext.Open].
|
||||||
|
func NewTar(a Artifact, compression uint32) Artifact {
|
||||||
|
ta := tarArtifact{a, compression}
|
||||||
|
if s, ok := a.(fmt.Stringer); ok {
|
||||||
|
if name := s.String(); name != "" {
|
||||||
|
return &tarArtifactNamed{ta, name}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &ta
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHTTPGetTar is abbreviation for NewHTTPGet passed to NewTar.
|
||||||
|
func NewHTTPGetTar(
|
||||||
|
hc *http.Client,
|
||||||
|
url string,
|
||||||
|
checksum Checksum,
|
||||||
|
compression uint32,
|
||||||
|
) Artifact {
|
||||||
|
return NewTar(NewHTTPGet(hc, url, checksum), compression)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kind returns the hardcoded [Kind] constant.
|
||||||
|
func (a *tarArtifact) Kind() Kind { return KindTar }
|
||||||
|
|
||||||
|
// Params writes compression encoded in little endian.
|
||||||
|
func (a *tarArtifact) Params(ctx *IContext) { ctx.WriteUint32(a.compression) }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
register(KindTar, func(r *IRReader) Artifact {
|
||||||
|
a := NewTar(r.Next(), r.ReadUint32())
|
||||||
|
if _, ok := r.Finalise(); ok {
|
||||||
|
panic(ErrUnexpectedChecksum)
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dependencies returns a slice containing the backing file.
|
||||||
|
func (a *tarArtifact) Dependencies() []Artifact {
|
||||||
|
return []Artifact{a.f}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExclusive returns false: decompressor and tar reader are fully sequential.
|
||||||
|
func (a *tarArtifact) IsExclusive() bool { return false }
|
||||||
|
|
||||||
|
// A DisallowedTypeflagError describes a disallowed typeflag encountered while
|
||||||
|
// unpacking a tarball.
|
||||||
|
type DisallowedTypeflagError byte
|
||||||
|
|
||||||
|
func (e DisallowedTypeflagError) Error() string {
|
||||||
|
return "disallowed typeflag '" + string(e) + "'"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cure cures the [Artifact], producing a directory located at work.
|
||||||
|
func (a *tarArtifact) Cure(t *TContext) (err error) {
|
||||||
|
temp := t.GetTempDir()
|
||||||
|
var tr io.ReadCloser
|
||||||
|
if tr, err = t.Open(a.f); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func(f io.ReadCloser) {
|
||||||
|
if err == nil {
|
||||||
|
err = tr.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
closeErr := f.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
}(tr)
|
||||||
|
tr = io.NopCloser(tr)
|
||||||
|
|
||||||
|
switch a.compression {
|
||||||
|
case TarUncompressed:
|
||||||
|
break
|
||||||
|
|
||||||
|
case TarGzip:
|
||||||
|
if tr, err = gzip.NewReader(tr); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
case TarBzip2:
|
||||||
|
tr = io.NopCloser(bzip2.NewReader(tr))
|
||||||
|
break
|
||||||
|
|
||||||
|
default:
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
type dirTargetPerm struct {
|
||||||
|
path *check.Absolute
|
||||||
|
mode fs.FileMode
|
||||||
|
}
|
||||||
|
var madeDirectories []dirTargetPerm
|
||||||
|
|
||||||
|
if err = os.MkdirAll(temp.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var header *tar.Header
|
||||||
|
r := tar.NewReader(tr)
|
||||||
|
for header, err = r.Next(); err == nil; header, err = r.Next() {
|
||||||
|
typeflag := header.Typeflag
|
||||||
|
if typeflag == 0 {
|
||||||
|
if len(header.Name) > 0 && header.Name[len(header.Name)-1] == '/' {
|
||||||
|
typeflag = tar.TypeDir
|
||||||
|
} else {
|
||||||
|
typeflag = tar.TypeReg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pathname := temp.Append(header.Name)
|
||||||
|
if typeflag >= '0' && typeflag <= '9' && typeflag != tar.TypeDir {
|
||||||
|
if err = os.MkdirAll(pathname.Dir().String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch typeflag {
|
||||||
|
case tar.TypeReg:
|
||||||
|
var f *os.File
|
||||||
|
if f, err = os.OpenFile(
|
||||||
|
pathname.String(),
|
||||||
|
os.O_CREATE|os.O_EXCL|os.O_WRONLY,
|
||||||
|
header.FileInfo().Mode()&0500,
|
||||||
|
); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = io.Copy(f, r); err != nil {
|
||||||
|
_ = f.Close()
|
||||||
|
return
|
||||||
|
} else if err = f.Close(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
case tar.TypeLink:
|
||||||
|
if err = os.Link(
|
||||||
|
temp.Append(header.Linkname).String(),
|
||||||
|
pathname.String(),
|
||||||
|
); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
case tar.TypeSymlink:
|
||||||
|
if err = os.Symlink(header.Linkname, pathname.String()); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
case tar.TypeDir:
|
||||||
|
madeDirectories = append(madeDirectories, dirTargetPerm{
|
||||||
|
path: pathname,
|
||||||
|
mode: header.FileInfo().Mode(),
|
||||||
|
})
|
||||||
|
if err = os.MkdirAll(pathname.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
case tar.TypeXGlobalHeader:
|
||||||
|
continue // ignore
|
||||||
|
|
||||||
|
default:
|
||||||
|
return DisallowedTypeflagError(typeflag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
for _, e := range madeDirectories {
|
||||||
|
if err = os.Chmod(e.path.String(), e.mode&0500); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = os.Chmod(temp.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var entries []os.DirEntry
|
||||||
|
if entries, err = os.ReadDir(temp.String()); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(entries) == 1 && entries[0].IsDir() {
|
||||||
|
p := temp.Append(entries[0].Name())
|
||||||
|
if err = os.Chmod(p.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = os.Rename(p.String(), t.GetWorkDir().String())
|
||||||
|
} else {
|
||||||
|
err = os.Rename(temp.String(), t.GetWorkDir().String())
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
226
internal/pkg/tar_test.go
Normal file
226
internal/pkg/tar_test.go
Normal file
@@ -0,0 +1,226 @@
|
|||||||
|
package pkg_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"crypto/sha512"
|
||||||
|
"errors"
|
||||||
|
"io/fs"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"testing/fstest"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/stub"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTar(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
checkWithCache(t, []cacheTestCase{
|
||||||
|
{"http", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
checkTarHTTP(t, base, c, fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, pkg.MustDecode(
|
||||||
|
"cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM",
|
||||||
|
))
|
||||||
|
}, pkg.MustDecode("NQTlc466JmSVLIyWklm_u8_g95jEEb98PxJU-kjwxLpfdjwMWJq0G8ze9R4Vo1Vu")},
|
||||||
|
|
||||||
|
{"http expand", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
checkTarHTTP(t, base, c, fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"lib": {Mode: fs.ModeDir | 0700},
|
||||||
|
"lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
}, pkg.MustDecode(
|
||||||
|
"CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN",
|
||||||
|
))
|
||||||
|
}, pkg.MustDecode("hSoSSgCYTNonX3Q8FjvjD1fBl-E-BQyA6OTXro2OadXqbST4tZ-akGXszdeqphRe")},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkTarHTTP(
|
||||||
|
t *testing.T,
|
||||||
|
base *check.Absolute,
|
||||||
|
c *pkg.Cache,
|
||||||
|
testdataFsys fs.FS,
|
||||||
|
wantChecksum pkg.Checksum,
|
||||||
|
) {
|
||||||
|
var testdata string
|
||||||
|
{
|
||||||
|
var buf bytes.Buffer
|
||||||
|
w := tar.NewWriter(&buf)
|
||||||
|
if err := w.AddFS(testdataFsys); err != nil {
|
||||||
|
t.Fatalf("AddFS: error = %v", err)
|
||||||
|
}
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
t.Fatalf("Close: error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var zbuf bytes.Buffer
|
||||||
|
gw := gzip.NewWriter(&zbuf)
|
||||||
|
if _, err := gw.Write(buf.Bytes()); err != nil {
|
||||||
|
t.Fatalf("Write: error = %v", err)
|
||||||
|
}
|
||||||
|
if err := gw.Close(); err != nil {
|
||||||
|
t.Fatalf("Close: error = %v", err)
|
||||||
|
}
|
||||||
|
testdata = zbuf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
testdataChecksum := func() pkg.Checksum {
|
||||||
|
h := sha512.New384()
|
||||||
|
h.Write([]byte(testdata))
|
||||||
|
return (pkg.Checksum)(h.Sum(nil))
|
||||||
|
}()
|
||||||
|
|
||||||
|
var transport http.Transport
|
||||||
|
client := http.Client{Transport: &transport}
|
||||||
|
transport.RegisterProtocol("file", http.NewFileTransportFS(fstest.MapFS{
|
||||||
|
"testdata": {Data: []byte(testdata), Mode: 0400},
|
||||||
|
}))
|
||||||
|
|
||||||
|
wantIdent := func() pkg.ID {
|
||||||
|
h := sha512.New384()
|
||||||
|
|
||||||
|
// kind uint64
|
||||||
|
h.Write([]byte{byte(pkg.KindTar), 0, 0, 0, 0, 0, 0, 0})
|
||||||
|
// deps_sz uint64
|
||||||
|
h.Write([]byte{1, 0, 0, 0, 0, 0, 0, 0})
|
||||||
|
|
||||||
|
// kind uint64
|
||||||
|
h.Write([]byte{byte(pkg.KindHTTPGet), 0, 0, 0, 0, 0, 0, 0})
|
||||||
|
// ident ID
|
||||||
|
h0 := sha512.New384()
|
||||||
|
// kind uint64
|
||||||
|
h0.Write([]byte{byte(pkg.KindHTTPGet), 0, 0, 0, 0, 0, 0, 0})
|
||||||
|
// deps_sz uint64
|
||||||
|
h0.Write([]byte{0, 0, 0, 0, 0, 0, 0, 0})
|
||||||
|
// url string
|
||||||
|
h0.Write([]byte{byte(pkg.IRKindString), 0, 0, 0})
|
||||||
|
h0.Write([]byte{0x10, 0, 0, 0})
|
||||||
|
h0.Write([]byte("file:///testdata"))
|
||||||
|
// end(KnownChecksum)
|
||||||
|
h0.Write([]byte{byte(pkg.IRKindEnd), 0, 0, 0})
|
||||||
|
h0.Write([]byte{byte(pkg.IREndKnownChecksum), 0, 0, 0})
|
||||||
|
// checksum Checksum
|
||||||
|
h0.Write(testdataChecksum[:])
|
||||||
|
h.Write(h0.Sum(nil))
|
||||||
|
// compression uint32
|
||||||
|
h.Write([]byte{byte(pkg.IRKindUint32), 0, 0, 0})
|
||||||
|
h.Write([]byte{pkg.TarGzip, 0, 0, 0})
|
||||||
|
// end
|
||||||
|
h.Write([]byte{byte(pkg.IRKindEnd), 0, 0, 0})
|
||||||
|
h.Write([]byte{0, 0, 0, 0})
|
||||||
|
|
||||||
|
return pkg.ID(h.Sum(nil))
|
||||||
|
}()
|
||||||
|
|
||||||
|
a := pkg.NewHTTPGetTar(
|
||||||
|
&client,
|
||||||
|
"file:///testdata",
|
||||||
|
testdataChecksum,
|
||||||
|
pkg.TarGzip,
|
||||||
|
)
|
||||||
|
|
||||||
|
tarDir := stubArtifact{
|
||||||
|
kind: pkg.KindExec,
|
||||||
|
params: []byte("directory containing a single regular file"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
work := t.GetWorkDir()
|
||||||
|
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.WriteFile(
|
||||||
|
work.Append("sample.tar.gz").String(),
|
||||||
|
[]byte(testdata),
|
||||||
|
0400,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tarDirMulti := stubArtifact{
|
||||||
|
kind: pkg.KindExec,
|
||||||
|
params: []byte("directory containing a multiple entries"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
work := t.GetWorkDir()
|
||||||
|
if err := os.MkdirAll(work.Append(
|
||||||
|
"garbage",
|
||||||
|
).String(), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.WriteFile(
|
||||||
|
work.Append("sample.tar.gz").String(),
|
||||||
|
[]byte(testdata),
|
||||||
|
0400,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tarDirType := stubArtifact{
|
||||||
|
kind: pkg.KindExec,
|
||||||
|
params: []byte("directory containing a symbolic link"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
work := t.GetWorkDir()
|
||||||
|
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.Symlink(
|
||||||
|
work.String(),
|
||||||
|
work.Append("sample.tar.gz").String(),
|
||||||
|
)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// destroy these to avoid including it in flatten test case
|
||||||
|
defer newDestroyArtifactFunc(&tarDir)(t, base, c)
|
||||||
|
defer newDestroyArtifactFunc(&tarDirMulti)(t, base, c)
|
||||||
|
defer newDestroyArtifactFunc(&tarDirType)(t, base, c)
|
||||||
|
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"file", a, base.Append(
|
||||||
|
"identifier",
|
||||||
|
pkg.Encode(wantIdent),
|
||||||
|
), wantChecksum, nil},
|
||||||
|
|
||||||
|
{"directory", pkg.NewTar(
|
||||||
|
&tarDir,
|
||||||
|
pkg.TarGzip,
|
||||||
|
), ignorePathname, wantChecksum, nil},
|
||||||
|
|
||||||
|
{"multiple entries", pkg.NewTar(
|
||||||
|
&tarDirMulti,
|
||||||
|
pkg.TarGzip,
|
||||||
|
), nil, pkg.Checksum{}, errors.New(
|
||||||
|
"input directory does not contain a single regular file",
|
||||||
|
)},
|
||||||
|
|
||||||
|
{"bad type", pkg.NewTar(
|
||||||
|
&tarDirType,
|
||||||
|
pkg.TarGzip,
|
||||||
|
), nil, pkg.Checksum{}, errors.New(
|
||||||
|
"input directory does not contain a single regular file",
|
||||||
|
)},
|
||||||
|
|
||||||
|
{"error passthrough", pkg.NewTar(&stubArtifact{
|
||||||
|
kind: pkg.KindExec,
|
||||||
|
params: []byte("doomed artifact"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return stub.UniqueError(0xcafe)
|
||||||
|
},
|
||||||
|
}, pkg.TarGzip), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
||||||
|
})
|
||||||
|
}
|
||||||
268
internal/pkg/testdata/main.go
vendored
Normal file
268
internal/pkg/testdata/main.go
vendored
Normal file
@@ -0,0 +1,268 @@
|
|||||||
|
//go:build testtool
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/gob"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"reflect"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
|
"hakurei.app/container/vfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.SetFlags(0)
|
||||||
|
log.SetPrefix("testtool: ")
|
||||||
|
|
||||||
|
var hostNet, layers, promote bool
|
||||||
|
if len(os.Args) == 2 && os.Args[0] == "testtool" {
|
||||||
|
switch os.Args[1] {
|
||||||
|
case "net":
|
||||||
|
hostNet = true
|
||||||
|
log.SetPrefix("testtool(net): ")
|
||||||
|
break
|
||||||
|
|
||||||
|
case "layers":
|
||||||
|
layers = true
|
||||||
|
log.SetPrefix("testtool(layers): ")
|
||||||
|
break
|
||||||
|
|
||||||
|
case "promote":
|
||||||
|
promote = true
|
||||||
|
log.SetPrefix("testtool(promote): ")
|
||||||
|
|
||||||
|
default:
|
||||||
|
log.Fatalf("Args: %q", os.Args)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else if wantArgs := []string{"testtool"}; !slices.Equal(os.Args, wantArgs) {
|
||||||
|
log.Fatalf("Args: %q, want %q", os.Args, wantArgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
var overlayRoot bool
|
||||||
|
wantEnv := []string{"HAKUREI_TEST=1"}
|
||||||
|
if len(os.Environ()) == 2 {
|
||||||
|
overlayRoot = true
|
||||||
|
if !layers && !promote {
|
||||||
|
log.SetPrefix("testtool(overlay root): ")
|
||||||
|
}
|
||||||
|
wantEnv = []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}
|
||||||
|
}
|
||||||
|
if !slices.Equal(wantEnv, os.Environ()) {
|
||||||
|
log.Fatalf("Environ: %q, want %q", os.Environ(), wantEnv)
|
||||||
|
}
|
||||||
|
|
||||||
|
var overlayWork bool
|
||||||
|
const (
|
||||||
|
wantExec = "/opt/bin/testtool"
|
||||||
|
wantExecWork = "/work/bin/testtool"
|
||||||
|
)
|
||||||
|
var iftPath string
|
||||||
|
if got, err := os.Executable(); err != nil {
|
||||||
|
log.Fatalf("Executable: error = %v", err)
|
||||||
|
} else {
|
||||||
|
iftPath = path.Join(path.Dir(path.Dir(got)), "ift")
|
||||||
|
|
||||||
|
if got != wantExec {
|
||||||
|
switch got {
|
||||||
|
case wantExecWork:
|
||||||
|
overlayWork = true
|
||||||
|
log.SetPrefix("testtool(overlay work): ")
|
||||||
|
|
||||||
|
default:
|
||||||
|
log.Fatalf("Executable: %q, want %q", got, wantExec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wantHostname := "cure"
|
||||||
|
if hostNet {
|
||||||
|
wantHostname += "-net"
|
||||||
|
}
|
||||||
|
|
||||||
|
if hostname, err := os.Hostname(); err != nil {
|
||||||
|
log.Fatalf("Hostname: error = %v", err)
|
||||||
|
} else if hostname != wantHostname {
|
||||||
|
log.Fatalf("Hostname: %q, want %q", hostname, wantHostname)
|
||||||
|
}
|
||||||
|
|
||||||
|
var m *vfs.MountInfo
|
||||||
|
if f, err := os.Open(fhs.Proc + "self/mountinfo"); err != nil {
|
||||||
|
log.Fatalf("Open: error = %v", err)
|
||||||
|
} else {
|
||||||
|
err = vfs.NewMountInfoDecoder(f).Decode(&m)
|
||||||
|
closeErr := f.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Decode: error = %v", err)
|
||||||
|
}
|
||||||
|
if closeErr != nil {
|
||||||
|
log.Fatalf("Close: error = %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ift, err := net.Interfaces(); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
} else if !hostNet {
|
||||||
|
if len(ift) != 1 || ift[0].Name != "lo" {
|
||||||
|
log.Fatalln("got interfaces", strings.Join(slices.Collect(func(yield func(ifn string) bool) {
|
||||||
|
for _, ifi := range ift {
|
||||||
|
if !yield(ifi.Name) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}), ", "))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var iftParent []net.Interface
|
||||||
|
|
||||||
|
var r *os.File
|
||||||
|
if r, err = os.Open(iftPath); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
} else {
|
||||||
|
err = gob.NewDecoder(r).Decode(&iftParent)
|
||||||
|
closeErr := r.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
if closeErr != nil {
|
||||||
|
log.Fatal(closeErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(ift, iftParent) {
|
||||||
|
log.Fatalf("Interfaces: %#v, want %#v", ift, iftParent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const checksumEmptyDir = "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"
|
||||||
|
ident := "dztPS6jRjiZtCF4_p8AzfnxGp6obkhrgFVsxdodbKWUoAEVtDz3MykepJB4kI_ks"
|
||||||
|
log.Println(m)
|
||||||
|
next := func() { m = m.Next; log.Println(m) }
|
||||||
|
|
||||||
|
if overlayRoot {
|
||||||
|
ident = "RdMA-mubnrHuu3Ky1wWyxauSYCO0ZH_zCPUj3uDHqkfwv5sGcByoF_g5PjlGiClb"
|
||||||
|
|
||||||
|
if m.Root != "/" || m.Target != "/" ||
|
||||||
|
m.Source != "overlay" || m.FsType != "overlay" {
|
||||||
|
log.Fatal("unexpected root mount entry")
|
||||||
|
}
|
||||||
|
var lowerdir string
|
||||||
|
for _, o := range strings.Split(m.FsOptstr, ",") {
|
||||||
|
const lowerdirKey = "lowerdir="
|
||||||
|
if strings.HasPrefix(o, lowerdirKey) {
|
||||||
|
lowerdir = o[len(lowerdirKey):]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !layers {
|
||||||
|
if path.Base(lowerdir) != checksumEmptyDir {
|
||||||
|
log.Fatal("unexpected artifact checksum")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ident = "p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT"
|
||||||
|
|
||||||
|
lowerdirsEscaped := strings.Split(lowerdir, ":")
|
||||||
|
lowerdirs := lowerdirsEscaped[:0]
|
||||||
|
// ignore the option separator since it does not appear in ident
|
||||||
|
for i, e := range lowerdirsEscaped {
|
||||||
|
if len(e) > 0 &&
|
||||||
|
e[len(e)-1] == check.SpecialOverlayEscape[0] &&
|
||||||
|
(len(e) == 1 || e[len(e)-2] != check.SpecialOverlayEscape[0]) {
|
||||||
|
// ignore escaped pathname separator since it does not
|
||||||
|
// appear in ident
|
||||||
|
|
||||||
|
e = e[:len(e)-1]
|
||||||
|
if len(lowerdirsEscaped) != i {
|
||||||
|
lowerdirsEscaped[i+1] = e + lowerdirsEscaped[i+1]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lowerdirs = append(lowerdirs, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(lowerdirs) != 2 ||
|
||||||
|
path.Base(lowerdirs[0]) != "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU" ||
|
||||||
|
path.Base(lowerdirs[1]) != "nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK" {
|
||||||
|
log.Fatalf("unexpected lowerdirs %s", strings.Join(lowerdirs, ", "))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if hostNet {
|
||||||
|
ident = "G8qPxD9puvvoOVV7lrT80eyDeIl3G_CCFoKw12c8mCjMdG1zF7NEPkwYpNubClK3"
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Root != "/sysroot" || m.Target != "/" {
|
||||||
|
log.Fatal("unexpected root mount entry")
|
||||||
|
}
|
||||||
|
|
||||||
|
next()
|
||||||
|
if path.Base(m.Root) != "OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb" {
|
||||||
|
log.Fatal("unexpected file artifact checksum")
|
||||||
|
}
|
||||||
|
|
||||||
|
next()
|
||||||
|
if path.Base(m.Root) != checksumEmptyDir {
|
||||||
|
log.Fatal("unexpected artifact checksum")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if promote {
|
||||||
|
ident = "xXTIYcXmgJWNLC91c417RRrNM9cjELwEZHpGvf8Fk_GNP5agRJp_SicD0w9aMeLJ"
|
||||||
|
}
|
||||||
|
|
||||||
|
next() // testtool artifact
|
||||||
|
|
||||||
|
next()
|
||||||
|
if overlayWork {
|
||||||
|
ident = "5hlaukCirnXE4W_RSLJFOZN47Z5RiHnacXzdFp_70cLgiJUGR6cSb_HaFftkzi0-"
|
||||||
|
if m.Root != "/" || m.Target != "/work" ||
|
||||||
|
m.Source != "overlay" || m.FsType != "overlay" {
|
||||||
|
log.Fatal("unexpected work mount entry")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if path.Base(m.Root) != ident || m.Target != "/work" {
|
||||||
|
log.Fatal("unexpected work mount entry")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
next()
|
||||||
|
if path.Base(m.Root) != ident || m.Target != "/tmp" {
|
||||||
|
log.Fatal("unexpected temp mount entry")
|
||||||
|
}
|
||||||
|
|
||||||
|
next()
|
||||||
|
if m.Root != "/" || m.Target != "/proc" || m.Source != "proc" || m.FsType != "proc" {
|
||||||
|
log.Fatal("unexpected proc mount entry")
|
||||||
|
}
|
||||||
|
|
||||||
|
next()
|
||||||
|
if m.Root != "/" || m.Target != "/dev" || m.Source != "devtmpfs" || m.FsType != "tmpfs" {
|
||||||
|
log.Fatal("unexpected dev mount entry")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 9; i++ { // private /dev entries
|
||||||
|
next()
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Next != nil {
|
||||||
|
log.Println("unexpected extra mount entries")
|
||||||
|
for m.Next != nil {
|
||||||
|
next()
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
checkData := []byte{0}
|
||||||
|
if hostNet {
|
||||||
|
checkData = []byte("net")
|
||||||
|
}
|
||||||
|
if err := os.WriteFile("check", checkData, 0400); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
90
internal/rosa/acl.go
Normal file
90
internal/rosa/acl.go
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
|
func (t Toolchain) newAttr() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "2.5.2"
|
||||||
|
checksum = "YWEphrz6vg1sUMmHHVr1CRo53pFXRhq_pjN-AlG8UgwZK1y6m7zuDhxqJhD0SV0l"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("attr", version, t.NewPatchedSource(
|
||||||
|
"attr", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://download.savannah.nongnu.org/releases/attr/"+
|
||||||
|
"attr-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), true, [2]string{"libgen-basename", `From 8a80d895dfd779373363c3a4b62ecce5a549efb2 Mon Sep 17 00:00:00 2001
|
||||||
|
From: "Haelwenn (lanodan) Monnier" <contact@hacktivis.me>
|
||||||
|
Date: Sat, 30 Mar 2024 10:17:10 +0100
|
||||||
|
Subject: tools/attr.c: Add missing libgen.h include for basename(3)
|
||||||
|
|
||||||
|
Fixes compilation issue with musl and modern C99 compilers.
|
||||||
|
|
||||||
|
See: https://bugs.gentoo.org/926294
|
||||||
|
---
|
||||||
|
tools/attr.c | 1 +
|
||||||
|
1 file changed, 1 insertion(+)
|
||||||
|
|
||||||
|
diff --git a/tools/attr.c b/tools/attr.c
|
||||||
|
index f12e4af..6a3c1e9 100644
|
||||||
|
--- a/tools/attr.c
|
||||||
|
+++ b/tools/attr.c
|
||||||
|
@@ -28,6 +28,7 @@
|
||||||
|
#include <errno.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <locale.h>
|
||||||
|
+#include <libgen.h>
|
||||||
|
|
||||||
|
#include <attr/attributes.h>
|
||||||
|
|
||||||
|
--
|
||||||
|
cgit v1.1`}, [2]string{"musl-errno", `diff --git a/test/attr.test b/test/attr.test
|
||||||
|
index 6ce2f9b..e9bde92 100644
|
||||||
|
--- a/test/attr.test
|
||||||
|
+++ b/test/attr.test
|
||||||
|
@@ -11,7 +11,7 @@ Try various valid and invalid names
|
||||||
|
|
||||||
|
$ touch f
|
||||||
|
$ setfattr -n user -v value f
|
||||||
|
- > setfattr: f: Operation not supported
|
||||||
|
+ > setfattr: f: Not supported
|
||||||
|
|
||||||
|
$ setfattr -n user. -v value f
|
||||||
|
> setfattr: f: Invalid argument
|
||||||
|
`},
|
||||||
|
), &MakeAttr{
|
||||||
|
ScriptEarly: `
|
||||||
|
ln -s ../../system/bin/perl /usr/bin
|
||||||
|
`,
|
||||||
|
Configure: [][2]string{
|
||||||
|
{"enable-static"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
t.Load(Perl),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Attr] = Toolchain.newAttr }
|
||||||
|
|
||||||
|
func (t Toolchain) newACL() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "2.3.2"
|
||||||
|
checksum = "-fY5nwH4K8ZHBCRXrzLdguPkqjKI6WIiGu4dBtrZ1o0t6AIU73w8wwJz_UyjIS0P"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("acl", version, pkg.NewHTTPGetTar(
|
||||||
|
nil,
|
||||||
|
"https://download.savannah.nongnu.org/releases/acl/"+
|
||||||
|
"acl-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &MakeAttr{
|
||||||
|
Configure: [][2]string{
|
||||||
|
{"enable-static"},
|
||||||
|
},
|
||||||
|
|
||||||
|
// makes assumptions about uid_map/gid_map
|
||||||
|
SkipCheck: true,
|
||||||
|
},
|
||||||
|
t.Load(Attr),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[ACL] = Toolchain.newACL }
|
||||||
175
internal/rosa/all.go
Normal file
175
internal/rosa/all.go
Normal file
@@ -0,0 +1,175 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PArtifact is a lazily-initialised [pkg.Artifact] preset.
|
||||||
|
type PArtifact int
|
||||||
|
|
||||||
|
const (
|
||||||
|
ACL PArtifact = iota
|
||||||
|
Attr
|
||||||
|
Autoconf
|
||||||
|
Automake
|
||||||
|
Bash
|
||||||
|
Binutils
|
||||||
|
CMake
|
||||||
|
Coreutils
|
||||||
|
Curl
|
||||||
|
Diffutils
|
||||||
|
Findutils
|
||||||
|
Fuse
|
||||||
|
Gawk
|
||||||
|
GMP
|
||||||
|
Gettext
|
||||||
|
Git
|
||||||
|
Go
|
||||||
|
Gperf
|
||||||
|
Grep
|
||||||
|
Gzip
|
||||||
|
Hakurei
|
||||||
|
HakureiDist
|
||||||
|
IniConfig
|
||||||
|
KernelHeaders
|
||||||
|
LibXau
|
||||||
|
Libexpat
|
||||||
|
Libpsl
|
||||||
|
Libffi
|
||||||
|
Libgd
|
||||||
|
Libtool
|
||||||
|
Libseccomp
|
||||||
|
Libucontext
|
||||||
|
Libxml2
|
||||||
|
M4
|
||||||
|
MPC
|
||||||
|
MPFR
|
||||||
|
Make
|
||||||
|
Meson
|
||||||
|
Mksh
|
||||||
|
NSS
|
||||||
|
NSSCACert
|
||||||
|
Ninja
|
||||||
|
OpenSSL
|
||||||
|
Packaging
|
||||||
|
Patch
|
||||||
|
Perl
|
||||||
|
PkgConfig
|
||||||
|
Pluggy
|
||||||
|
PyTest
|
||||||
|
Pygments
|
||||||
|
Python
|
||||||
|
Rsync
|
||||||
|
Sed
|
||||||
|
Setuptools
|
||||||
|
Toybox
|
||||||
|
toyboxEarly
|
||||||
|
Unzip
|
||||||
|
utilMacros
|
||||||
|
Wayland
|
||||||
|
WaylandProtocols
|
||||||
|
XCB
|
||||||
|
XCBProto
|
||||||
|
Xproto
|
||||||
|
XZ
|
||||||
|
Zlib
|
||||||
|
|
||||||
|
buildcatrust
|
||||||
|
|
||||||
|
// gcc is a hacked-to-pieces GCC toolchain meant for use in intermediate
|
||||||
|
// stages only. This preset and its direct output must never be exposed.
|
||||||
|
gcc
|
||||||
|
|
||||||
|
// _presetEnd is the total number of presets and does not denote a preset.
|
||||||
|
_presetEnd
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// artifactsF is an array of functions for the result of [PArtifact].
|
||||||
|
artifactsF [_presetEnd]func(t Toolchain) pkg.Artifact
|
||||||
|
|
||||||
|
// artifacts stores the result of artifactsF.
|
||||||
|
artifacts [_toolchainEnd][len(artifactsF)]pkg.Artifact
|
||||||
|
// artifactsOnce is for lazy initialisation of artifacts.
|
||||||
|
artifactsOnce [_toolchainEnd][len(artifactsF)]sync.Once
|
||||||
|
)
|
||||||
|
|
||||||
|
// Load returns the resulting [pkg.Artifact] of [PArtifact].
|
||||||
|
func (t Toolchain) Load(p PArtifact) pkg.Artifact {
|
||||||
|
artifactsOnce[t][p].Do(func() {
|
||||||
|
artifacts[t][p] = artifactsF[p](t)
|
||||||
|
})
|
||||||
|
return artifacts[t][p]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolveName returns a [PArtifact] by name.
|
||||||
|
func ResolveName(name string) (p PArtifact, ok bool) {
|
||||||
|
p, ok = map[string]PArtifact{
|
||||||
|
"acl": ACL,
|
||||||
|
"attr": Attr,
|
||||||
|
"autoconf": Autoconf,
|
||||||
|
"automake": Automake,
|
||||||
|
"bash": Bash,
|
||||||
|
"binutils": Binutils,
|
||||||
|
"cmake": CMake,
|
||||||
|
"coreutils": Coreutils,
|
||||||
|
"curl": Curl,
|
||||||
|
"diffutils": Diffutils,
|
||||||
|
"findutils": Findutils,
|
||||||
|
"fuse": Fuse,
|
||||||
|
"gawk": Gawk,
|
||||||
|
"gmp": GMP,
|
||||||
|
"gettext": Gettext,
|
||||||
|
"git": Git,
|
||||||
|
"go": Go,
|
||||||
|
"gperf": Gperf,
|
||||||
|
"grep": Grep,
|
||||||
|
"gzip": Gzip,
|
||||||
|
"hakurei": Hakurei,
|
||||||
|
"hakurei-dist": HakureiDist,
|
||||||
|
"iniconfig": IniConfig,
|
||||||
|
"kernel-headers": KernelHeaders,
|
||||||
|
"libXau": LibXau,
|
||||||
|
"libexpat": Libexpat,
|
||||||
|
"libpsl": Libpsl,
|
||||||
|
"libseccomp": Libseccomp,
|
||||||
|
"libucontext": Libucontext,
|
||||||
|
"libxml2": Libxml2,
|
||||||
|
"libffi": Libffi,
|
||||||
|
"libgd": Libgd,
|
||||||
|
"libtool": Libtool,
|
||||||
|
"m4": M4,
|
||||||
|
"mpc": MPC,
|
||||||
|
"mpfr": MPFR,
|
||||||
|
"make": Make,
|
||||||
|
"meson": Meson,
|
||||||
|
"mksh": Mksh,
|
||||||
|
"nss": NSS,
|
||||||
|
"nss-cacert": NSSCACert,
|
||||||
|
"ninja": Ninja,
|
||||||
|
"openssl": OpenSSL,
|
||||||
|
"packaging": Packaging,
|
||||||
|
"patch": Patch,
|
||||||
|
"perl": Perl,
|
||||||
|
"pkg-config": PkgConfig,
|
||||||
|
"pluggy": Pluggy,
|
||||||
|
"pytest": PyTest,
|
||||||
|
"pygments": Pygments,
|
||||||
|
"python": Python,
|
||||||
|
"rsync": Rsync,
|
||||||
|
"sed": Sed,
|
||||||
|
"setuptools": Setuptools,
|
||||||
|
"toybox": Toybox,
|
||||||
|
"unzip": Unzip,
|
||||||
|
"wayland": Wayland,
|
||||||
|
"wayland-protocols": WaylandProtocols,
|
||||||
|
"xcb": XCB,
|
||||||
|
"xcb-proto": XCBProto,
|
||||||
|
"xproto": Xproto,
|
||||||
|
"xz": XZ,
|
||||||
|
"zlib": Zlib,
|
||||||
|
}[name]
|
||||||
|
return
|
||||||
|
}
|
||||||
125
internal/rosa/busybox.go
Normal file
125
internal/rosa/busybox.go
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
// busyboxBin is a busybox binary distribution installed under bin/busybox.
|
||||||
|
type busyboxBin struct {
|
||||||
|
// Underlying busybox binary.
|
||||||
|
bin pkg.FileArtifact
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kind returns the hardcoded [pkg.Kind] value.
|
||||||
|
func (a busyboxBin) Kind() pkg.Kind { return kindBusyboxBin }
|
||||||
|
|
||||||
|
// Params is a noop.
|
||||||
|
func (a busyboxBin) Params(*pkg.IContext) {}
|
||||||
|
|
||||||
|
// IsExclusive returns false: Cure performs a trivial filesystem write.
|
||||||
|
func (busyboxBin) IsExclusive() bool { return false }
|
||||||
|
|
||||||
|
// Dependencies returns the underlying busybox [pkg.File].
|
||||||
|
func (a busyboxBin) Dependencies() []pkg.Artifact {
|
||||||
|
return []pkg.Artifact{a.bin}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
pkg.Register(kindBusyboxBin, func(r *pkg.IRReader) pkg.Artifact {
|
||||||
|
a := busyboxBin{r.Next().(pkg.FileArtifact)}
|
||||||
|
if _, ok := r.Finalise(); ok {
|
||||||
|
panic(pkg.ErrUnexpectedChecksum)
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the reporting name of the underlying file prefixed with expand.
|
||||||
|
func (a busyboxBin) String() string {
|
||||||
|
return "expand-" + a.bin.(fmt.Stringer).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cure installs the underlying busybox [pkg.File] to bin/busybox.
|
||||||
|
func (a busyboxBin) Cure(t *pkg.TContext) (err error) {
|
||||||
|
var r io.ReadCloser
|
||||||
|
if r, err = t.Open(a.bin); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
closeErr := r.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
binDir := t.GetWorkDir().Append("bin")
|
||||||
|
if err = os.MkdirAll(binDir.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var w *os.File
|
||||||
|
if w, err = os.OpenFile(
|
||||||
|
binDir.Append("busybox").String(),
|
||||||
|
os.O_WRONLY|os.O_CREATE|os.O_EXCL,
|
||||||
|
0500,
|
||||||
|
); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
closeErr := w.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
_, err = io.Copy(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBusyboxBin returns a [pkg.Artifact] containing a busybox installation from
|
||||||
|
// the https://busybox.net/downloads/binaries/ binary release.
|
||||||
|
func newBusyboxBin() pkg.Artifact {
|
||||||
|
var version, url, checksum string
|
||||||
|
switch runtime.GOARCH {
|
||||||
|
case "amd64":
|
||||||
|
version = "1.35.0"
|
||||||
|
url = "https://busybox.net/downloads/binaries/" +
|
||||||
|
version + "-" + linuxArch() + "-linux-musl/busybox"
|
||||||
|
checksum = "L7OBIsPu9enNHn7FqpBT1kOg_mCLNmetSeNMA3i4Y60Z5jTgnlX3qX3zcQtLx5AB"
|
||||||
|
case "arm64":
|
||||||
|
version = "1.31.0"
|
||||||
|
url = "https://busybox.net/downloads/binaries/" +
|
||||||
|
version + "-defconfig-multiarch-musl/busybox-armv8l"
|
||||||
|
checksum = "npJjBO7iwhjW6Kx2aXeSxf8kXhVgTCDChOZTTsI8ZfFfa3tbsklxRiidZQdrVERg"
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic("unsupported target " + runtime.GOARCH)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pkg.NewExec(
|
||||||
|
"busybox-bin-"+version, nil, pkg.ExecTimeoutMax, false,
|
||||||
|
fhs.AbsRoot, []string{
|
||||||
|
"PATH=/system/bin",
|
||||||
|
},
|
||||||
|
AbsSystem.Append("bin", "busybox"),
|
||||||
|
[]string{"hush", "-c", "" +
|
||||||
|
"busybox mkdir -p /work/system/bin/ && " +
|
||||||
|
"busybox cp /system/bin/busybox /work/system/bin/ && " +
|
||||||
|
"busybox --install -s /work/system/bin/"},
|
||||||
|
pkg.Path(AbsSystem, true, busyboxBin{pkg.NewHTTPGet(
|
||||||
|
&http.Client{Transport: &http.Transport{
|
||||||
|
// busybox website is really slow to respond
|
||||||
|
TLSHandshakeTimeout: 2 * time.Minute,
|
||||||
|
}}, url,
|
||||||
|
mustDecode(checksum),
|
||||||
|
)}),
|
||||||
|
)
|
||||||
|
}
|
||||||
125
internal/rosa/cmake.go
Normal file
125
internal/rosa/cmake.go
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t Toolchain) newCMake() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "4.2.1"
|
||||||
|
checksum = "Y3OdbMsob6Xk2y1DCME6z4Fryb5_TkFD7knRT8dTNIRtSqbiCJyyDN9AxggN_I75"
|
||||||
|
)
|
||||||
|
return t.New("cmake-"+version, 0, []pkg.Artifact{
|
||||||
|
t.Load(Make),
|
||||||
|
t.Load(KernelHeaders),
|
||||||
|
}, nil, nil, `
|
||||||
|
cd "$(mktemp -d)"
|
||||||
|
/usr/src/cmake/bootstrap \
|
||||||
|
--prefix=/system \
|
||||||
|
--parallel="$(nproc)" \
|
||||||
|
-- \
|
||||||
|
-DCMAKE_USE_OPENSSL=OFF
|
||||||
|
make "-j$(nproc)"
|
||||||
|
make DESTDIR=/work install
|
||||||
|
`, pkg.Path(AbsUsrSrc.Append("cmake"), true, t.NewPatchedSource(
|
||||||
|
// expected to be writable in the copy made during bootstrap
|
||||||
|
"cmake", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://github.com/Kitware/CMake/releases/download/"+
|
||||||
|
"v"+version+"/cmake-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), false,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
func init() { artifactsF[CMake] = Toolchain.newCMake }
|
||||||
|
|
||||||
|
// CMakeAttr holds the project-specific attributes that will be applied to a new
|
||||||
|
// [pkg.Artifact] compiled via [CMake].
|
||||||
|
type CMakeAttr struct {
|
||||||
|
// Path elements joined with source.
|
||||||
|
Append []string
|
||||||
|
// Use source tree as scratch space.
|
||||||
|
Writable bool
|
||||||
|
|
||||||
|
// CMake CACHE entries.
|
||||||
|
Cache [][2]string
|
||||||
|
// Additional environment variables.
|
||||||
|
Env []string
|
||||||
|
// Runs before cmake.
|
||||||
|
ScriptEarly string
|
||||||
|
// Runs after cmake, replaces default.
|
||||||
|
ScriptConfigured string
|
||||||
|
// Runs after install.
|
||||||
|
Script string
|
||||||
|
|
||||||
|
// Override the default installation prefix [AbsSystem].
|
||||||
|
Prefix *check.Absolute
|
||||||
|
|
||||||
|
// Passed through to [Toolchain.New].
|
||||||
|
Paths []pkg.ExecPath
|
||||||
|
// Passed through to [Toolchain.New].
|
||||||
|
Flag int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewViaCMake returns a [pkg.Artifact] for compiling and installing via [CMake].
|
||||||
|
func (t Toolchain) NewViaCMake(
|
||||||
|
name, version, variant string,
|
||||||
|
source pkg.Artifact,
|
||||||
|
attr *CMakeAttr,
|
||||||
|
extra ...pkg.Artifact,
|
||||||
|
) pkg.Artifact {
|
||||||
|
if name == "" || version == "" || variant == "" {
|
||||||
|
panic("names must be non-empty")
|
||||||
|
}
|
||||||
|
if attr == nil {
|
||||||
|
attr = &CMakeAttr{
|
||||||
|
Cache: [][2]string{
|
||||||
|
{"CMAKE_BUILD_TYPE", "Release"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(attr.Cache) == 0 {
|
||||||
|
panic("CACHE must be non-empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
scriptConfigured := "cmake --build .\ncmake --install .\n"
|
||||||
|
if attr.ScriptConfigured != "" {
|
||||||
|
scriptConfigured = attr.ScriptConfigured
|
||||||
|
}
|
||||||
|
|
||||||
|
prefix := attr.Prefix
|
||||||
|
if prefix == nil {
|
||||||
|
prefix = AbsSystem
|
||||||
|
}
|
||||||
|
|
||||||
|
sourcePath := AbsUsrSrc.Append(name)
|
||||||
|
return t.New(name+"-"+variant+"-"+version, attr.Flag, stage3Concat(t, extra,
|
||||||
|
t.Load(CMake),
|
||||||
|
t.Load(Ninja),
|
||||||
|
), nil, slices.Concat([]string{
|
||||||
|
"ROSA_SOURCE=" + sourcePath.String(),
|
||||||
|
"ROSA_CMAKE_SOURCE=" + sourcePath.Append(attr.Append...).String(),
|
||||||
|
"ROSA_INSTALL_PREFIX=/work" + prefix.String(),
|
||||||
|
}, attr.Env), attr.ScriptEarly+`
|
||||||
|
mkdir /cure && cd /cure
|
||||||
|
cmake -G Ninja \
|
||||||
|
-DCMAKE_C_COMPILER_TARGET="${ROSA_TRIPLE}" \
|
||||||
|
-DCMAKE_CXX_COMPILER_TARGET="${ROSA_TRIPLE}" \
|
||||||
|
-DCMAKE_ASM_COMPILER_TARGET="${ROSA_TRIPLE}" \
|
||||||
|
`+strings.Join(slices.Collect(func(yield func(string) bool) {
|
||||||
|
for _, v := range attr.Cache {
|
||||||
|
if !yield("-D" + v[0] + "=" + v[1]) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}), " \\\n\t")+` \
|
||||||
|
-DCMAKE_INSTALL_PREFIX="${ROSA_INSTALL_PREFIX}" \
|
||||||
|
"${ROSA_CMAKE_SOURCE}"
|
||||||
|
`+scriptConfigured+attr.Script, slices.Concat([]pkg.ExecPath{
|
||||||
|
pkg.Path(sourcePath, attr.Writable, source),
|
||||||
|
}, attr.Paths)...)
|
||||||
|
}
|
||||||
32
internal/rosa/curl.go
Normal file
32
internal/rosa/curl.go
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
|
func (t Toolchain) newCurl() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "8.18.0"
|
||||||
|
checksum = "YpOolP_sx1DIrCEJ3elgVAu0wTLDS-EZMZFvOP0eha7FaLueZUlEpuMwDzJNyi7i"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("curl", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://curl.se/download/curl-"+version+".tar.bz2",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarBzip2,
|
||||||
|
), &MakeAttr{
|
||||||
|
Env: []string{
|
||||||
|
"TFLAGS=-j256",
|
||||||
|
},
|
||||||
|
Configure: [][2]string{
|
||||||
|
{"with-openssl"},
|
||||||
|
{"with-ca-bundle", "/system/etc/ssl/certs/ca-bundle.crt"},
|
||||||
|
},
|
||||||
|
ScriptConfigured: `
|
||||||
|
make "-j$(nproc)"
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
t.Load(Perl),
|
||||||
|
|
||||||
|
t.Load(Libpsl),
|
||||||
|
t.Load(OpenSSL),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Curl] = Toolchain.newCurl }
|
||||||
163
internal/rosa/etc.go
Normal file
163
internal/rosa/etc.go
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
// cureEtc contains deterministic elements of /etc, made available as part of
|
||||||
|
// [Toolchain]. This silences test suites expecting certain standard files to be
|
||||||
|
// available in /etc.
|
||||||
|
type cureEtc struct {
|
||||||
|
// Optional via newIANAEtc.
|
||||||
|
iana pkg.Artifact
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cure writes hardcoded configuration to files under etc.
|
||||||
|
func (a cureEtc) Cure(t *pkg.FContext) (err error) {
|
||||||
|
etc := t.GetWorkDir().Append("etc")
|
||||||
|
if err = os.MkdirAll(etc.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, f := range [][2]string{
|
||||||
|
{"hosts", "127.0.0.1 localhost cure cure-net\n"},
|
||||||
|
{"passwd", `root:x:0:0:System administrator:/proc/nonexistent:/bin/sh
|
||||||
|
cure:x:1023:1023:Cure:/usr/src:/bin/sh
|
||||||
|
nobody:x:65534:65534:Overflow user:/proc/nonexistent:/system/bin/false
|
||||||
|
`},
|
||||||
|
{"group", `root:x:0:
|
||||||
|
cure:x:1023:
|
||||||
|
nobody:x:65534:
|
||||||
|
`},
|
||||||
|
} {
|
||||||
|
if err = os.WriteFile(
|
||||||
|
etc.Append(f[0]).String(),
|
||||||
|
[]byte(f[1]),
|
||||||
|
0400,
|
||||||
|
); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.iana != nil {
|
||||||
|
iana, _ := t.GetArtifact(a.iana)
|
||||||
|
|
||||||
|
buf := make([]byte, syscall.Getpagesize()<<3)
|
||||||
|
for _, name := range []string{
|
||||||
|
"protocols",
|
||||||
|
"services",
|
||||||
|
} {
|
||||||
|
var dst, src *os.File
|
||||||
|
if dst, err = os.OpenFile(
|
||||||
|
etc.Append(name).String(),
|
||||||
|
syscall.O_CREAT|syscall.O_EXCL|syscall.O_WRONLY,
|
||||||
|
0400,
|
||||||
|
); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if src, err = os.Open(
|
||||||
|
iana.Append(name).String(),
|
||||||
|
); err != nil {
|
||||||
|
_ = dst.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.CopyBuffer(dst, src, buf)
|
||||||
|
closeErrs := [...]error{
|
||||||
|
dst.Close(),
|
||||||
|
src.Close(),
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
} else if err = errors.Join(closeErrs[:]...); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return os.Chmod(etc.String(), 0500)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kind returns the hardcoded [pkg.Kind] value.
|
||||||
|
func (cureEtc) Kind() pkg.Kind { return kindEtc }
|
||||||
|
|
||||||
|
// Params writes whether iana-etc is populated.
|
||||||
|
func (a cureEtc) Params(ctx *pkg.IContext) {
|
||||||
|
if a.iana != nil {
|
||||||
|
ctx.WriteUint32(1)
|
||||||
|
} else {
|
||||||
|
ctx.WriteUint32(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
pkg.Register(kindEtc, func(r *pkg.IRReader) pkg.Artifact {
|
||||||
|
a := cureEtc{}
|
||||||
|
if r.ReadUint32() != 0 {
|
||||||
|
a.iana = r.Next()
|
||||||
|
}
|
||||||
|
if _, ok := r.Finalise(); ok {
|
||||||
|
panic(pkg.ErrUnexpectedChecksum)
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExclusive returns false: Cure performs a few trivial filesystem writes.
|
||||||
|
func (cureEtc) IsExclusive() bool { return false }
|
||||||
|
|
||||||
|
// Dependencies returns a slice containing the backing iana-etc release.
|
||||||
|
func (a cureEtc) Dependencies() []pkg.Artifact {
|
||||||
|
if a.iana != nil {
|
||||||
|
return []pkg.Artifact{a.iana}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a hardcoded reporting name.
|
||||||
|
func (a cureEtc) String() string {
|
||||||
|
if a.iana == nil {
|
||||||
|
return "cure-etc-minimal"
|
||||||
|
}
|
||||||
|
return "cure-etc"
|
||||||
|
}
|
||||||
|
|
||||||
|
// newIANAEtc returns an unpacked iana-etc release.
|
||||||
|
func newIANAEtc() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "20251215"
|
||||||
|
checksum = "kvKz0gW_rGG5QaNK9ZWmWu1IEgYAdmhj_wR7DYrh3axDfIql_clGRHmelP7525NJ"
|
||||||
|
)
|
||||||
|
return pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://github.com/Mic92/iana-etc/releases/download/"+
|
||||||
|
version+"/iana-etc-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
resolvconfPath pkg.ExecPath
|
||||||
|
resolvconfOnce sync.Once
|
||||||
|
)
|
||||||
|
|
||||||
|
// resolvconf returns a hardcoded /etc/resolv.conf file.
|
||||||
|
func resolvconf() pkg.ExecPath {
|
||||||
|
resolvconfOnce.Do(func() {
|
||||||
|
resolvconfPath = pkg.Path(
|
||||||
|
fhs.AbsEtc.Append("resolv.conf"), false,
|
||||||
|
pkg.NewFile("resolv.conf", []byte(`
|
||||||
|
nameserver 1.1.1.1
|
||||||
|
nameserver 1.0.0.1
|
||||||
|
`)),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
return resolvconfPath
|
||||||
|
}
|
||||||
45
internal/rosa/fuse.go
Normal file
45
internal/rosa/fuse.go
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
|
func (t Toolchain) newFuse() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "3.18.1"
|
||||||
|
checksum = "COb-BgJRWXLbt9XUkNeuiroQizpMifXqxgieE1SlkMXhs_WGSyJStrmyewAw2hd6"
|
||||||
|
)
|
||||||
|
return t.New("fuse-"+version, 0, []pkg.Artifact{
|
||||||
|
t.Load(Python),
|
||||||
|
t.Load(Meson),
|
||||||
|
t.Load(Ninja),
|
||||||
|
|
||||||
|
t.Load(IniConfig),
|
||||||
|
t.Load(Packaging),
|
||||||
|
t.Load(Pluggy),
|
||||||
|
t.Load(Pygments),
|
||||||
|
t.Load(PyTest),
|
||||||
|
|
||||||
|
t.Load(KernelHeaders),
|
||||||
|
}, nil, nil, `
|
||||||
|
cd "$(mktemp -d)"
|
||||||
|
meson setup \
|
||||||
|
--reconfigure \
|
||||||
|
--buildtype=release \
|
||||||
|
--prefix=/system \
|
||||||
|
--prefer-static \
|
||||||
|
-Dtests=true \
|
||||||
|
-Duseroot=false \
|
||||||
|
-Dinitscriptdir=/system/init.d \
|
||||||
|
-Ddefault_library=both \
|
||||||
|
. /usr/src/fuse
|
||||||
|
meson compile
|
||||||
|
python3 -m pytest test/
|
||||||
|
meson install \
|
||||||
|
--destdir=/work
|
||||||
|
`, pkg.Path(AbsUsrSrc.Append("fuse"), false, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://github.com/libfuse/libfuse/releases/download/"+
|
||||||
|
"fuse-"+version+"/fuse-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Fuse] = Toolchain.newFuse }
|
||||||
101
internal/rosa/git.go
Normal file
101
internal/rosa/git.go
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t Toolchain) newGit() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "2.52.0"
|
||||||
|
checksum = "uH3J1HAN_c6PfGNJd2OBwW4zo36n71wmkdvityYnrh8Ak0D1IifiAvEWz9Vi9DmS"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("git", version, t.NewPatchedSource(
|
||||||
|
"git", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://www.kernel.org/pub/software/scm/git/"+
|
||||||
|
"git-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), false,
|
||||||
|
), &MakeAttr{
|
||||||
|
// uses source tree as scratch space
|
||||||
|
Writable: true,
|
||||||
|
InPlace: true,
|
||||||
|
|
||||||
|
// test suite in subdirectory
|
||||||
|
SkipCheck: true,
|
||||||
|
|
||||||
|
Make: []string{"all"},
|
||||||
|
ScriptEarly: `
|
||||||
|
cd /usr/src/git
|
||||||
|
|
||||||
|
make configure
|
||||||
|
`,
|
||||||
|
Script: `
|
||||||
|
ln -s ../../system/bin/perl /usr/bin/ || true
|
||||||
|
|
||||||
|
function disable_test {
|
||||||
|
local test=$1 pattern=$2
|
||||||
|
if [ $# -eq 1 ]; then
|
||||||
|
rm "t/${test}.sh"
|
||||||
|
else
|
||||||
|
sed -i "t/${test}.sh" \
|
||||||
|
-e "/^\s*test_expect_.*$pattern/,/^\s*' *\$/{s/^/: #/}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
disable_test t5319-multi-pack-index
|
||||||
|
disable_test t1305-config-include
|
||||||
|
disable_test t3900-i18n-commit
|
||||||
|
disable_test t3507-cherry-pick-conflict
|
||||||
|
disable_test t4201-shortlog
|
||||||
|
disable_test t5303-pack-corruption-resilience
|
||||||
|
disable_test t4301-merge-tree-write-tree
|
||||||
|
disable_test t8005-blame-i18n
|
||||||
|
disable_test t9350-fast-export
|
||||||
|
disable_test t9300-fast-import
|
||||||
|
disable_test t0211-trace2-perf
|
||||||
|
disable_test t1517-outside-repo
|
||||||
|
disable_test t2200-add-update
|
||||||
|
|
||||||
|
make \
|
||||||
|
-C t \
|
||||||
|
GIT_PROVE_OPTS="--jobs 32 --failures" \
|
||||||
|
prove
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
t.Load(Perl),
|
||||||
|
t.Load(Diffutils),
|
||||||
|
t.Load(M4),
|
||||||
|
t.Load(Autoconf),
|
||||||
|
t.Load(Gettext),
|
||||||
|
|
||||||
|
t.Load(Zlib),
|
||||||
|
t.Load(Curl),
|
||||||
|
t.Load(OpenSSL),
|
||||||
|
t.Load(Libexpat),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Git] = Toolchain.newGit }
|
||||||
|
|
||||||
|
// NewViaGit returns a [pkg.Artifact] for cloning a git repository.
|
||||||
|
func (t Toolchain) NewViaGit(
|
||||||
|
name, url, rev string,
|
||||||
|
checksum pkg.Checksum,
|
||||||
|
) pkg.Artifact {
|
||||||
|
return t.New(name+"-"+rev, 0, []pkg.Artifact{
|
||||||
|
t.Load(NSSCACert),
|
||||||
|
t.Load(OpenSSL),
|
||||||
|
t.Load(Libpsl),
|
||||||
|
t.Load(Curl),
|
||||||
|
t.Load(Libexpat),
|
||||||
|
t.Load(Git),
|
||||||
|
}, &checksum, nil, `
|
||||||
|
git \
|
||||||
|
-c advice.detachedHead=false \
|
||||||
|
clone \
|
||||||
|
--revision=`+rev+` \
|
||||||
|
`+url+` \
|
||||||
|
/work
|
||||||
|
rm -rf /work/.git
|
||||||
|
`, resolvconf())
|
||||||
|
}
|
||||||
623
internal/rosa/gnu.go
Normal file
623
internal/rosa/gnu.go
Normal file
@@ -0,0 +1,623 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
|
func (t Toolchain) newM4() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "1.4.20"
|
||||||
|
checksum = "RT0_L3m4Co86bVBY3lCFAEs040yI1WdeNmRylFpah8IZovTm6O4wI7qiHJN3qsW9"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("m4", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gnu/m4/m4-"+version+".tar.bz2",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarBzip2,
|
||||||
|
), &MakeAttr{
|
||||||
|
Writable: true,
|
||||||
|
ScriptEarly: `
|
||||||
|
cd /usr/src/m4
|
||||||
|
chmod +w tests/test-c32ispunct.sh && echo '#!/bin/sh' > tests/test-c32ispunct.sh
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
t.Load(Diffutils),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[M4] = Toolchain.newM4 }
|
||||||
|
|
||||||
|
func (t Toolchain) newSed() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "4.9"
|
||||||
|
checksum = "pe7HWH4PHNYrazOTlUoE1fXmhn2GOPFN_xE62i0llOr3kYGrH1g2_orDz0UtZ9Nt"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("sed", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gnu/sed/sed-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), nil,
|
||||||
|
t.Load(Diffutils),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Sed] = Toolchain.newSed }
|
||||||
|
|
||||||
|
func (t Toolchain) newAutoconf() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "2.72"
|
||||||
|
checksum = "-c5blYkC-xLDer3TWEqJTyh1RLbOd1c5dnRLKsDnIrg_wWNOLBpaqMY8FvmUFJ33"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("autoconf", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gnu/autoconf/autoconf-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &MakeAttr{
|
||||||
|
Make: []string{
|
||||||
|
`TESTSUITEFLAGS="-j$(nproc)"`,
|
||||||
|
},
|
||||||
|
Flag: TExclusive,
|
||||||
|
},
|
||||||
|
t.Load(M4),
|
||||||
|
t.Load(Perl),
|
||||||
|
t.Load(Bash),
|
||||||
|
t.Load(Diffutils),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Autoconf] = Toolchain.newAutoconf }
|
||||||
|
|
||||||
|
func (t Toolchain) newAutomake() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "1.18.1"
|
||||||
|
checksum = "FjvLG_GdQP7cThTZJLDMxYpRcKdpAVG-YDs1Fj1yaHlSdh_Kx6nRGN14E0r_BjcG"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("automake", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gnu/automake/automake-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &MakeAttr{
|
||||||
|
Writable: true,
|
||||||
|
ScriptEarly: `
|
||||||
|
cd /usr/src/automake
|
||||||
|
|
||||||
|
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||||
|
|
||||||
|
test_disable '#!/bin/sh' t/objcxx-minidemo.sh
|
||||||
|
test_disable '#!/bin/sh' t/objcxx-deps.sh
|
||||||
|
|
||||||
|
test_disable '#!/bin/sh' t/dist-no-built-sources.sh
|
||||||
|
test_disable '#!/bin/sh' t/distname.sh
|
||||||
|
test_disable '#!/bin/sh' t/pr9.sh
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
t.Load(M4),
|
||||||
|
t.Load(Perl),
|
||||||
|
t.Load(Grep),
|
||||||
|
t.Load(Gzip),
|
||||||
|
t.Load(Autoconf),
|
||||||
|
t.Load(Diffutils),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Automake] = Toolchain.newAutomake }
|
||||||
|
|
||||||
|
func (t Toolchain) newLibtool() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "2.5.4"
|
||||||
|
checksum = "pa6LSrQggh8mSJHQfwGjysAApmZlGJt8wif2cCLzqAAa2jpsTY0jZ-6stS3BWZ2Q"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("libtool", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gnu/libtool/libtool-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &MakeAttr{
|
||||||
|
Make: []string{
|
||||||
|
`TESTSUITEFLAGS=32`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
t.Load(M4),
|
||||||
|
t.Load(Diffutils),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Libtool] = Toolchain.newLibtool }
|
||||||
|
|
||||||
|
func (t Toolchain) newGzip() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "1.14"
|
||||||
|
checksum = "NWhjUavnNfTDFkZJyAUonL9aCOak8GVajWX2OMlzpFnuI0ErpBFyj88mz2xSjz0q"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("gzip", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gnu/gzip/gzip-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &MakeAttr{
|
||||||
|
// dependency loop
|
||||||
|
SkipCheck: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Gzip] = Toolchain.newGzip }
|
||||||
|
|
||||||
|
func (t Toolchain) newGettext() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "1.0"
|
||||||
|
checksum = "3MasKeEdPeFEgWgzsBKk7JqWqql1wEMbgPmzAfs-mluyokoW0N8oQVxPQoOnSdgC"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("gettext", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gnu/gettext/gettext-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &MakeAttr{
|
||||||
|
Writable: true,
|
||||||
|
ScriptEarly: `
|
||||||
|
cd /usr/src/gettext
|
||||||
|
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||||
|
|
||||||
|
test_disable '#!/bin/sh' gettext-tools/tests/msgcat-22
|
||||||
|
test_disable '#!/bin/sh' gettext-tools/tests/msgconv-2
|
||||||
|
test_disable '#!/bin/sh' gettext-tools/tests/msgconv-8
|
||||||
|
test_disable '#!/bin/sh' gettext-tools/tests/xgettext-python-3
|
||||||
|
test_disable '#!/bin/sh' gettext-tools/tests/msgmerge-compendium-6
|
||||||
|
test_disable '#!/bin/sh' gettext-tools/tests/gettextpo-1
|
||||||
|
test_disable '#!/bin/sh' gettext-tools/tests/format-c-5
|
||||||
|
test_disable '#!/bin/sh' gettext-tools/gnulib-tests/test-c32ispunct.sh
|
||||||
|
test_disable 'int main(){return 0;}' gettext-tools/gnulib-tests/test-stdcountof-h.c
|
||||||
|
|
||||||
|
touch gettext-tools/autotools/archive.dir.tar
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
t.Load(Diffutils),
|
||||||
|
t.Load(Gzip),
|
||||||
|
t.Load(Sed),
|
||||||
|
|
||||||
|
t.Load(KernelHeaders),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Gettext] = Toolchain.newGettext }
|
||||||
|
|
||||||
|
func (t Toolchain) newDiffutils() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "3.12"
|
||||||
|
checksum = "9J5VAq5oA7eqwzS1Yvw-l3G5o-TccUrNQR3PvyB_lgdryOFAfxtvQfKfhdpquE44"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("diffutils", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gnu/diffutils/diffutils-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &MakeAttr{
|
||||||
|
Writable: true,
|
||||||
|
ScriptEarly: `
|
||||||
|
cd /usr/src/diffutils
|
||||||
|
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||||
|
|
||||||
|
test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh
|
||||||
|
test_disable 'int main(){return 0;}' gnulib-tests/test-c32ispunct.c
|
||||||
|
test_disable '#!/bin/sh' tests/cmp
|
||||||
|
`,
|
||||||
|
Flag: TEarly,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Diffutils] = Toolchain.newDiffutils }
|
||||||
|
|
||||||
|
func (t Toolchain) newPatch() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "2.8"
|
||||||
|
checksum = "MA0BQc662i8QYBD-DdGgyyfTwaeALZ1K0yusV9rAmNiIsQdX-69YC4t9JEGXZkeR"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("patch", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gnu/patch/patch-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &MakeAttr{
|
||||||
|
Writable: true,
|
||||||
|
ScriptEarly: `
|
||||||
|
cd /usr/src/patch
|
||||||
|
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||||
|
|
||||||
|
test_disable '#!/bin/sh' tests/ed-style
|
||||||
|
test_disable '#!/bin/sh' tests/need-filename
|
||||||
|
`,
|
||||||
|
Flag: TEarly,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Patch] = Toolchain.newPatch }
|
||||||
|
|
||||||
|
func (t Toolchain) newBash() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "5.3"
|
||||||
|
checksum = "4LQ_GRoB_ko-Ih8QPf_xRKA02xAm_TOxQgcJLmFDT6udUPxTAWrsj-ZNeuTusyDq"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("bash", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gnu/bash/bash-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &MakeAttr{
|
||||||
|
Script: "ln -s bash /work/system/bin/sh\n",
|
||||||
|
Configure: [][2]string{
|
||||||
|
{"without-bash-malloc"},
|
||||||
|
},
|
||||||
|
Flag: TEarly,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Bash] = Toolchain.newBash }
|
||||||
|
|
||||||
|
func (t Toolchain) newCoreutils() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "9.9"
|
||||||
|
checksum = "B1_TaXj1j5aiVIcazLWu8Ix03wDV54uo2_iBry4qHG6Y-9bjDpUPlkNLmU_3Nvw6"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("coreutils", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gnu/coreutils/coreutils-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &MakeAttr{
|
||||||
|
Writable: true,
|
||||||
|
ScriptEarly: `
|
||||||
|
cd /usr/src/coreutils
|
||||||
|
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||||
|
|
||||||
|
test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh
|
||||||
|
test_disable '#!/bin/sh' tests/split/line-bytes.sh
|
||||||
|
test_disable '#!/bin/sh' tests/dd/no-allocate.sh
|
||||||
|
test_disable 'int main(){return 0;}' gnulib-tests/test-chown.c
|
||||||
|
test_disable 'int main(){return 0;}' gnulib-tests/test-fchownat.c
|
||||||
|
test_disable 'int main(){return 0;}' gnulib-tests/test-lchown.c
|
||||||
|
`,
|
||||||
|
Flag: TEarly,
|
||||||
|
},
|
||||||
|
t.Load(Perl),
|
||||||
|
t.Load(Bash),
|
||||||
|
|
||||||
|
t.Load(KernelHeaders),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Coreutils] = Toolchain.newCoreutils }
|
||||||
|
|
||||||
|
func (t Toolchain) newGperf() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "3.3"
|
||||||
|
checksum = "RtIy9pPb_Bb8-31J2Nw-rRGso2JlS-lDlVhuNYhqR7Nt4xM_nObznxAlBMnarJv7"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("gperf", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gperf/gperf-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), nil,
|
||||||
|
t.Load(Diffutils),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Gperf] = Toolchain.newGperf }
|
||||||
|
|
||||||
|
func (t Toolchain) newGawk() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "5.3.2"
|
||||||
|
checksum = "uIs0d14h_d2DgMGYwrPtegGNyt_bxzG3D6Fe-MmExx_pVoVkQaHzrtmiXVr6NHKk"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("gawk", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gnu/gawk/gawk-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &MakeAttr{
|
||||||
|
Flag: TEarly,
|
||||||
|
|
||||||
|
// dependency loop
|
||||||
|
SkipCheck: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Gawk] = Toolchain.newGawk }
|
||||||
|
|
||||||
|
func (t Toolchain) newGrep() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "3.12"
|
||||||
|
checksum = "qMB4RjaPNRRYsxix6YOrjE8gyAT1zVSTy4nW4wKW9fqa0CHYAuWgPwDTirENzm_1"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("grep", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gnu/grep/grep-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &MakeAttr{
|
||||||
|
Writable: true,
|
||||||
|
ScriptEarly: `
|
||||||
|
cd /usr/src/grep
|
||||||
|
|
||||||
|
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||||
|
|
||||||
|
test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh
|
||||||
|
test_disable 'int main(){return 0;}' gnulib-tests/test-c32ispunct.c
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
t.Load(Diffutils),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Grep] = Toolchain.newGrep }
|
||||||
|
|
||||||
|
func (t Toolchain) newFindutils() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "4.10.0"
|
||||||
|
checksum = "ZXABdNBQXL7QjTygynRRTdXYWxQKZ0Wn5eMd3NUnxR0xaS0u0VfcKoTlbo50zxv6"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("findutils", version, pkg.NewHTTPGet(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gnu/findutils/findutils-"+version+".tar.xz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
), &MakeAttr{
|
||||||
|
SourceSuffix: ".tar.xz",
|
||||||
|
ScriptEarly: `
|
||||||
|
cd /usr/src/
|
||||||
|
tar xf findutils.tar.xz
|
||||||
|
mv findutils-` + version + ` findutils
|
||||||
|
|
||||||
|
cd findutils
|
||||||
|
echo '#!/bin/sh' > gnulib-tests/test-c32ispunct.sh
|
||||||
|
echo 'int main(){return 0;}' > tests/xargs/test-sigusr.c
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
t.Load(Diffutils),
|
||||||
|
t.Load(XZ),
|
||||||
|
t.Load(Sed),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Findutils] = Toolchain.newFindutils }
|
||||||
|
|
||||||
|
func (t Toolchain) newBinutils() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "2.45"
|
||||||
|
checksum = "hlLtqqHDmzAT2OQVHaKEd_io2DGFvJkaeS-igBuK8bRRir7LUKGHgHYNkDVKaHTT"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("binutils", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gnu/binutils/binutils-"+version+".tar.bz2",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarBzip2,
|
||||||
|
), &MakeAttr{
|
||||||
|
ScriptConfigured: `
|
||||||
|
make "-j$(nproc)"
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
t.Load(Bash),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Binutils] = Toolchain.newBinutils }
|
||||||
|
|
||||||
|
func (t Toolchain) newGMP() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "6.3.0"
|
||||||
|
checksum = "yrgbgEDWKDdMWVHh7gPbVl56-sRtVVhfvv0M_LX7xMUUk_mvZ1QOJEAnt7g4i3k5"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("gmp", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://gcc.gnu.org/pub/gcc/infrastructure/"+
|
||||||
|
"gmp-"+version+".tar.bz2",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarBzip2,
|
||||||
|
), &MakeAttr{
|
||||||
|
ScriptConfigured: `
|
||||||
|
make "-j$(nproc)"
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
t.Load(M4),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[GMP] = Toolchain.newGMP }
|
||||||
|
|
||||||
|
func (t Toolchain) newMPFR() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "4.2.2"
|
||||||
|
checksum = "wN3gx0zfIuCn9r3VAn_9bmfvAYILwrRfgBjYSD1IjLqyLrLojNN5vKyQuTE9kA-B"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("mpfr", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://gcc.gnu.org/pub/gcc/infrastructure/"+
|
||||||
|
"mpfr-"+version+".tar.bz2",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarBzip2,
|
||||||
|
), nil,
|
||||||
|
t.Load(GMP),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[MPFR] = Toolchain.newMPFR }
|
||||||
|
|
||||||
|
func (t Toolchain) newMPC() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "1.3.1"
|
||||||
|
checksum = "o8r8K9R4x7PuRx0-JE3-bC5jZQrtxGV2nkB773aqJ3uaxOiBDCID1gKjPaaDxX4V"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("mpc", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://gcc.gnu.org/pub/gcc/infrastructure/"+
|
||||||
|
"mpc-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), nil,
|
||||||
|
t.Load(GMP),
|
||||||
|
t.Load(MPFR),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[MPC] = Toolchain.newMPC }
|
||||||
|
|
||||||
|
func (t Toolchain) newGCC() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "15.2.0"
|
||||||
|
checksum = "TXJ5WrbXlGLzy1swghQTr4qxgDCyIZFgJry51XEPTBZ8QYbVmFeB4lZbSMtPJ-a1"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("gcc", version, t.NewPatchedSource(
|
||||||
|
"gcc", version,
|
||||||
|
pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftp.tsukuba.wide.ad.jp/software/gcc/releases/"+
|
||||||
|
"gcc-"+version+"/gcc-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), true, [2]string{"musl-off64_t-loff_t", `diff --git a/libgo/sysinfo.c b/libgo/sysinfo.c
|
||||||
|
index 180f5c31d74..44d7ea73f7d 100644
|
||||||
|
--- a/libgo/sysinfo.c
|
||||||
|
+++ b/libgo/sysinfo.c
|
||||||
|
@@ -365,11 +365,7 @@ enum {
|
||||||
|
typedef loff_t libgo_loff_t_type;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
-#if defined(HAVE_OFF64_T)
|
||||||
|
-typedef off64_t libgo_off_t_type;
|
||||||
|
-#else
|
||||||
|
typedef off_t libgo_off_t_type;
|
||||||
|
-#endif
|
||||||
|
|
||||||
|
// The following section introduces explicit references to types and
|
||||||
|
// constants of interest to support bootstrapping libgo using a
|
||||||
|
`}, [2]string{"musl-legacy-lfs", `diff --git a/libgo/go/internal/syscall/unix/at_largefile.go b/libgo/go/internal/syscall/unix/at_largefile.go
|
||||||
|
index 82e0dcfd074..16151ecad1b 100644
|
||||||
|
--- a/libgo/go/internal/syscall/unix/at_largefile.go
|
||||||
|
+++ b/libgo/go/internal/syscall/unix/at_largefile.go
|
||||||
|
@@ -10,5 +10,5 @@ import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
-//extern fstatat64
|
||||||
|
+//extern fstatat
|
||||||
|
func fstatat(int32, *byte, *syscall.Stat_t, int32) int32
|
||||||
|
diff --git a/libgo/go/os/dir_largefile.go b/libgo/go/os/dir_largefile.go
|
||||||
|
index 1fc5ee0771f..0c6dffe1a75 100644
|
||||||
|
--- a/libgo/go/os/dir_largefile.go
|
||||||
|
+++ b/libgo/go/os/dir_largefile.go
|
||||||
|
@@ -11,5 +11,5 @@ package os
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
-//extern readdir64
|
||||||
|
+//extern readdir
|
||||||
|
func libc_readdir(*syscall.DIR) *syscall.Dirent
|
||||||
|
diff --git a/libgo/go/syscall/libcall_glibc.go b/libgo/go/syscall/libcall_glibc.go
|
||||||
|
index 5c1ec483c75..5a1245ed44b 100644
|
||||||
|
--- a/libgo/go/syscall/libcall_glibc.go
|
||||||
|
+++ b/libgo/go/syscall/libcall_glibc.go
|
||||||
|
@@ -114,7 +114,7 @@ func Pipe2(p []int, flags int) (err error) {
|
||||||
|
}
|
||||||
|
|
||||||
|
//sys sendfile(outfd int, infd int, offset *Offset_t, count int) (written int, err error)
|
||||||
|
-//sendfile64(outfd _C_int, infd _C_int, offset *Offset_t, count Size_t) Ssize_t
|
||||||
|
+//sendfile(outfd _C_int, infd _C_int, offset *Offset_t, count Size_t) Ssize_t
|
||||||
|
|
||||||
|
func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
|
||||||
|
if race.Enabled {
|
||||||
|
diff --git a/libgo/go/syscall/libcall_linux.go b/libgo/go/syscall/libcall_linux.go
|
||||||
|
index 03ca7261b59..ad21fd0b3ac 100644
|
||||||
|
--- a/libgo/go/syscall/libcall_linux.go
|
||||||
|
+++ b/libgo/go/syscall/libcall_linux.go
|
||||||
|
@@ -158,7 +158,7 @@ func Reboot(cmd int) (err error) {
|
||||||
|
//adjtimex(buf *Timex) _C_int
|
||||||
|
|
||||||
|
//sys Fstatfs(fd int, buf *Statfs_t) (err error)
|
||||||
|
-//fstatfs64(fd _C_int, buf *Statfs_t) _C_int
|
||||||
|
+//fstatfs(fd _C_int, buf *Statfs_t) _C_int
|
||||||
|
|
||||||
|
func Gettid() (tid int) {
|
||||||
|
r1, _, _ := Syscall(SYS_GETTID, 0, 0, 0)
|
||||||
|
@@ -245,7 +245,7 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i
|
||||||
|
}
|
||||||
|
|
||||||
|
//sys Statfs(path string, buf *Statfs_t) (err error)
|
||||||
|
-//statfs64(path *byte, buf *Statfs_t) _C_int
|
||||||
|
+//statfs(path *byte, buf *Statfs_t) _C_int
|
||||||
|
|
||||||
|
//sysnb Sysinfo(info *Sysinfo_t) (err error)
|
||||||
|
//sysinfo(info *Sysinfo_t) _C_int
|
||||||
|
diff --git a/libgo/go/syscall/libcall_posix_largefile.go b/libgo/go/syscall/libcall_posix_largefile.go
|
||||||
|
index f90055bb29a..334212f0af1 100644
|
||||||
|
--- a/libgo/go/syscall/libcall_posix_largefile.go
|
||||||
|
+++ b/libgo/go/syscall/libcall_posix_largefile.go
|
||||||
|
@@ -10,40 +10,40 @@
|
||||||
|
package syscall
|
||||||
|
|
||||||
|
//sys Creat(path string, mode uint32) (fd int, err error)
|
||||||
|
-//creat64(path *byte, mode Mode_t) _C_int
|
||||||
|
+//creat(path *byte, mode Mode_t) _C_int
|
||||||
|
|
||||||
|
//sys Fstat(fd int, stat *Stat_t) (err error)
|
||||||
|
-//fstat64(fd _C_int, stat *Stat_t) _C_int
|
||||||
|
+//fstat(fd _C_int, stat *Stat_t) _C_int
|
||||||
|
|
||||||
|
//sys Ftruncate(fd int, length int64) (err error)
|
||||||
|
-//ftruncate64(fd _C_int, length Offset_t) _C_int
|
||||||
|
+//ftruncate(fd _C_int, length Offset_t) _C_int
|
||||||
|
|
||||||
|
//sysnb Getrlimit(resource int, rlim *Rlimit) (err error)
|
||||||
|
-//getrlimit64(resource _C_int, rlim *Rlimit) _C_int
|
||||||
|
+//getrlimit(resource _C_int, rlim *Rlimit) _C_int
|
||||||
|
|
||||||
|
//sys Lstat(path string, stat *Stat_t) (err error)
|
||||||
|
-//lstat64(path *byte, stat *Stat_t) _C_int
|
||||||
|
+//lstat(path *byte, stat *Stat_t) _C_int
|
||||||
|
|
||||||
|
//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
|
||||||
|
-//mmap64(addr *byte, length Size_t, prot _C_int, flags _C_int, fd _C_int, offset Offset_t) *byte
|
||||||
|
+//mmap(addr *byte, length Size_t, prot _C_int, flags _C_int, fd _C_int, offset Offset_t) *byte
|
||||||
|
|
||||||
|
//sys Open(path string, mode int, perm uint32) (fd int, err error)
|
||||||
|
-//__go_open64(path *byte, mode _C_int, perm Mode_t) _C_int
|
||||||
|
+//__go_open(path *byte, mode _C_int, perm Mode_t) _C_int
|
||||||
|
|
||||||
|
//sys Pread(fd int, p []byte, offset int64) (n int, err error)
|
||||||
|
-//pread64(fd _C_int, buf *byte, count Size_t, offset Offset_t) Ssize_t
|
||||||
|
+//pread(fd _C_int, buf *byte, count Size_t, offset Offset_t) Ssize_t
|
||||||
|
|
||||||
|
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
|
||||||
|
-//pwrite64(fd _C_int, buf *byte, count Size_t, offset Offset_t) Ssize_t
|
||||||
|
+//pwrite(fd _C_int, buf *byte, count Size_t, offset Offset_t) Ssize_t
|
||||||
|
|
||||||
|
//sys Seek(fd int, offset int64, whence int) (off int64, err error)
|
||||||
|
-//lseek64(fd _C_int, offset Offset_t, whence _C_int) Offset_t
|
||||||
|
+//lseek(fd _C_int, offset Offset_t, whence _C_int) Offset_t
|
||||||
|
|
||||||
|
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
|
||||||
|
-//setrlimit64(resource int, rlim *Rlimit) _C_int
|
||||||
|
+//setrlimit(resource int, rlim *Rlimit) _C_int
|
||||||
|
|
||||||
|
//sys Stat(path string, stat *Stat_t) (err error)
|
||||||
|
-//stat64(path *byte, stat *Stat_t) _C_int
|
||||||
|
+//stat(path *byte, stat *Stat_t) _C_int
|
||||||
|
|
||||||
|
//sys Truncate(path string, length int64) (err error)
|
||||||
|
-//truncate64(path *byte, length Offset_t) _C_int
|
||||||
|
+//truncate(path *byte, length Offset_t) _C_int
|
||||||
|
diff --git a/libgo/runtime/go-varargs.c b/libgo/runtime/go-varargs.c
|
||||||
|
index f84860891e6..7efc9615985 100644
|
||||||
|
--- a/libgo/runtime/go-varargs.c
|
||||||
|
+++ b/libgo/runtime/go-varargs.c
|
||||||
|
@@ -84,7 +84,7 @@ __go_ioctl_ptr (int d, int request, void *arg)
|
||||||
|
int
|
||||||
|
__go_open64 (char *path, int mode, mode_t perm)
|
||||||
|
{
|
||||||
|
- return open64 (path, mode, perm);
|
||||||
|
+ return open (path, mode, perm);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
`}), &MakeAttr{
|
||||||
|
ScriptEarly: `
|
||||||
|
ln -s system/lib /
|
||||||
|
ln -s system/lib /work/
|
||||||
|
`,
|
||||||
|
Configure: [][2]string{
|
||||||
|
{"disable-multilib"},
|
||||||
|
{"with-multilib-list", `""`},
|
||||||
|
{"enable-default-pie"},
|
||||||
|
{"disable-nls"},
|
||||||
|
{"with-gnu-as"},
|
||||||
|
{"with-gnu-ld"},
|
||||||
|
{"with-system-zlib"},
|
||||||
|
{"enable-languages", "c,c++,go"},
|
||||||
|
{"with-native-system-header-dir", "/system/include"},
|
||||||
|
},
|
||||||
|
Make: []string{
|
||||||
|
"BOOT_CFLAGS='-O2 -g'",
|
||||||
|
"bootstrap",
|
||||||
|
},
|
||||||
|
|
||||||
|
// This toolchain is hacked to pieces, it is not expected to ever work
|
||||||
|
// well in its current state. That does not matter as long as the
|
||||||
|
// toolchain it produces passes its own test suite.
|
||||||
|
SkipCheck: true,
|
||||||
|
|
||||||
|
// GCC spends most of its time in its many configure scripts, however
|
||||||
|
// it also saturates the CPU for a consequential amount of time.
|
||||||
|
Flag: TExclusive,
|
||||||
|
},
|
||||||
|
t.Load(Binutils),
|
||||||
|
|
||||||
|
t.Load(GMP),
|
||||||
|
t.Load(MPFR),
|
||||||
|
t.Load(MPC),
|
||||||
|
|
||||||
|
t.Load(Zlib),
|
||||||
|
t.Load(Libucontext),
|
||||||
|
t.Load(KernelHeaders),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[gcc] = Toolchain.newGCC }
|
||||||
158
internal/rosa/go.go
Normal file
158
internal/rosa/go.go
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"slices"
|
||||||
|
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
// newGoBootstrap returns the Go bootstrap toolchain.
|
||||||
|
func (t Toolchain) newGoBootstrap() pkg.Artifact {
|
||||||
|
const checksum = "8o9JL_ToiQKadCTb04nvBDkp8O1xiWOolAxVEqaTGodieNe4lOFEjlOxN3bwwe23"
|
||||||
|
return t.New("go1.4-bootstrap", 0, []pkg.Artifact{
|
||||||
|
t.Load(Bash),
|
||||||
|
}, nil, []string{
|
||||||
|
"CGO_ENABLED=0",
|
||||||
|
}, `
|
||||||
|
mkdir -p /var/tmp/ /work/system/
|
||||||
|
cp -r /usr/src/go /work/system/
|
||||||
|
cd /work/system/go/src
|
||||||
|
chmod -R +w ..
|
||||||
|
|
||||||
|
./make.bash
|
||||||
|
`, pkg.Path(AbsUsrSrc.Append("go"), false, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://dl.google.com/go/go1.4-bootstrap-20171003.tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// newGo returns a specific version of the Go toolchain.
|
||||||
|
func (t Toolchain) newGo(
|
||||||
|
version, checksum string,
|
||||||
|
env []string,
|
||||||
|
script string,
|
||||||
|
extra ...pkg.Artifact,
|
||||||
|
) pkg.Artifact {
|
||||||
|
return t.New("go"+version, 0, slices.Concat([]pkg.Artifact{
|
||||||
|
t.Load(Bash),
|
||||||
|
}, extra), nil, slices.Concat([]string{
|
||||||
|
"CC=cc",
|
||||||
|
"GOCACHE=/tmp/gocache",
|
||||||
|
"GOROOT_BOOTSTRAP=/system/go",
|
||||||
|
"TMPDIR=/dev/shm/go",
|
||||||
|
}, env), `
|
||||||
|
mkdir /work/system "${TMPDIR}"
|
||||||
|
cp -r /usr/src/go /work/system
|
||||||
|
cd /work/system/go/src
|
||||||
|
chmod -R +w ..
|
||||||
|
`+script+`
|
||||||
|
./all.bash
|
||||||
|
|
||||||
|
mkdir /work/system/bin
|
||||||
|
ln -s \
|
||||||
|
../go/bin/go \
|
||||||
|
../go/bin/gofmt \
|
||||||
|
/work/system/bin
|
||||||
|
`, pkg.Path(AbsUsrSrc.Append("go"), false, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://go.dev/dl/go"+version+".src.tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Toolchain) newGoLatest() pkg.Artifact {
|
||||||
|
var (
|
||||||
|
bootstrapEnv []string
|
||||||
|
bootstrapExtra []pkg.Artifact
|
||||||
|
|
||||||
|
finalEnv []string
|
||||||
|
)
|
||||||
|
switch runtime.GOARCH {
|
||||||
|
case "amd64":
|
||||||
|
bootstrapExtra = append(bootstrapExtra, t.newGoBootstrap())
|
||||||
|
|
||||||
|
case "arm64":
|
||||||
|
bootstrapEnv = append(bootstrapEnv,
|
||||||
|
"GOROOT_BOOTSTRAP=/system",
|
||||||
|
)
|
||||||
|
bootstrapExtra = append(bootstrapExtra,
|
||||||
|
t.Load(Binutils),
|
||||||
|
|
||||||
|
t.Load(GMP),
|
||||||
|
t.Load(MPFR),
|
||||||
|
t.Load(MPC),
|
||||||
|
|
||||||
|
t.Load(Zlib),
|
||||||
|
t.Load(Libucontext),
|
||||||
|
|
||||||
|
t.Load(gcc),
|
||||||
|
)
|
||||||
|
|
||||||
|
finalEnv = append(finalEnv, "CGO_ENABLED=0")
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic("unsupported target " + runtime.GOARCH)
|
||||||
|
}
|
||||||
|
|
||||||
|
go119 := t.newGo(
|
||||||
|
"1.19",
|
||||||
|
"9_e0aFHsIkVxWVGsp9T2RvvjOc3p4n9o9S8tkNe9Cvgzk_zI2FhRQB7ioQkeAAro",
|
||||||
|
append(bootstrapEnv, "CGO_ENABLED=0"), `
|
||||||
|
rm \
|
||||||
|
crypto/tls/handshake_client_test.go \
|
||||||
|
cmd/pprof/pprof_test.go \
|
||||||
|
os/os_unix_test.go
|
||||||
|
sed -i \
|
||||||
|
's/os\.Getenv("GCCGO")$/"nonexistent"/' \
|
||||||
|
go/internal/gccgoimporter/importer_test.go
|
||||||
|
echo \
|
||||||
|
'type syscallDescriptor = int' >> \
|
||||||
|
os/rawconn_test.go
|
||||||
|
`, bootstrapExtra...)
|
||||||
|
|
||||||
|
go121 := t.newGo(
|
||||||
|
"1.21.13",
|
||||||
|
"YtrDka402BOAEwywx03Vz4QlVwoBiguJHzG7PuythMCPHXS8CVMLvzmvgEbu4Tzu",
|
||||||
|
[]string{"CGO_ENABLED=0"}, `
|
||||||
|
sed -i \
|
||||||
|
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
|
||||||
|
cmd/link/internal/`+runtime.GOARCH+`/obj.go
|
||||||
|
|
||||||
|
rm \
|
||||||
|
crypto/tls/handshake_client_test.go \
|
||||||
|
crypto/tls/handshake_server_test.go \
|
||||||
|
os/os_unix_test.go
|
||||||
|
echo \
|
||||||
|
'type syscallDescriptor = int' >> \
|
||||||
|
os/rawconn_test.go
|
||||||
|
`, go119,
|
||||||
|
)
|
||||||
|
|
||||||
|
go123 := t.newGo(
|
||||||
|
"1.23.12",
|
||||||
|
"wcI32bl1tkqbgcelGtGWPI4RtlEddd-PTd76Eb-k7nXA5LbE9yTNdIL9QSOOxMOs",
|
||||||
|
[]string{"CGO_ENABLED=0"}, `
|
||||||
|
sed -i \
|
||||||
|
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
|
||||||
|
cmd/link/internal/`+runtime.GOARCH+`/obj.go
|
||||||
|
`, go121,
|
||||||
|
)
|
||||||
|
|
||||||
|
go125 := t.newGo(
|
||||||
|
"1.25.7",
|
||||||
|
"fyylHdBVRUobnBjYj3NKBaYPUw3kGmo2mEELiZonOYurPfbarNU1x77B99Fjut7Q",
|
||||||
|
finalEnv, `
|
||||||
|
sed -i \
|
||||||
|
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
|
||||||
|
cmd/link/internal/`+runtime.GOARCH+`/obj.go
|
||||||
|
|
||||||
|
rm \
|
||||||
|
os/root_unix_test.go
|
||||||
|
`, go123,
|
||||||
|
)
|
||||||
|
|
||||||
|
return go125
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Go] = Toolchain.newGoLatest }
|
||||||
303
internal/rosa/hakurei.go
Normal file
303
internal/rosa/hakurei.go
Normal file
@@ -0,0 +1,303 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t Toolchain) newHakurei(suffix, script string) pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "0.3.4"
|
||||||
|
checksum = "wVwSLo75a2OnH5tgxNWXR_YhiOJUFnYM_9-sJtxAEOKhcPE0BJafs6PU8o5JzyCT"
|
||||||
|
)
|
||||||
|
return t.New("hakurei"+suffix+"-"+version, 0, []pkg.Artifact{
|
||||||
|
t.Load(Go),
|
||||||
|
|
||||||
|
t.Load(Gzip),
|
||||||
|
t.Load(PkgConfig),
|
||||||
|
|
||||||
|
t.Load(KernelHeaders),
|
||||||
|
t.Load(Libseccomp),
|
||||||
|
t.Load(ACL),
|
||||||
|
t.Load(Attr),
|
||||||
|
t.Load(Fuse),
|
||||||
|
|
||||||
|
t.Load(Xproto),
|
||||||
|
t.Load(LibXau),
|
||||||
|
t.Load(XCBProto),
|
||||||
|
t.Load(XCB),
|
||||||
|
|
||||||
|
t.Load(Libffi),
|
||||||
|
t.Load(Libexpat),
|
||||||
|
t.Load(Libxml2),
|
||||||
|
t.Load(Wayland),
|
||||||
|
t.Load(WaylandProtocols),
|
||||||
|
}, nil, []string{
|
||||||
|
"CGO_ENABLED=1",
|
||||||
|
"GOCACHE=/tmp/gocache",
|
||||||
|
"CC=clang -O3 -Werror",
|
||||||
|
}, `
|
||||||
|
echo '# Building test helper (hostname).'
|
||||||
|
go build -v -o /bin/hostname /usr/src/hostname/main.go
|
||||||
|
echo
|
||||||
|
|
||||||
|
chmod -R +w /usr/src/hakurei
|
||||||
|
cd /usr/src/hakurei
|
||||||
|
|
||||||
|
HAKUREI_VERSION='v`+version+`'
|
||||||
|
`+script, pkg.Path(AbsUsrSrc.Append("hakurei"), true, t.NewPatchedSource("hakurei", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://git.gensokyo.uk/security/hakurei/archive/"+
|
||||||
|
"v"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), true, [2]string{"dist-00-tests", `From 67e453f5c4de915de23ecbe5980e595758f0f2fb Mon Sep 17 00:00:00 2001
|
||||||
|
From: Ophestra <cat@gensokyo.uk>
|
||||||
|
Date: Tue, 27 Jan 2026 06:49:48 +0900
|
||||||
|
Subject: [PATCH] dist: run tests
|
||||||
|
|
||||||
|
This used to be impossible due to nix jank which has been addressed.
|
||||||
|
|
||||||
|
Signed-off-by: Ophestra <cat@gensokyo.uk>
|
||||||
|
---
|
||||||
|
dist/release.sh | 21 ++++++++++++++++-----
|
||||||
|
flake.nix | 32 ++++++++++++++++++++------------
|
||||||
|
internal/acl/acl_test.go | 2 +-
|
||||||
|
package.nix | 2 +-
|
||||||
|
4 files changed, 38 insertions(+), 19 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/dist/release.sh b/dist/release.sh
|
||||||
|
index 4dcb278..0ba9104 100755
|
||||||
|
--- a/dist/release.sh
|
||||||
|
+++ b/dist/release.sh
|
||||||
|
@@ -2,19 +2,30 @@
|
||||||
|
cd "$(dirname -- "$0")/.."
|
||||||
|
VERSION="${HAKUREI_VERSION:-untagged}"
|
||||||
|
pname="hakurei-${VERSION}"
|
||||||
|
-out="dist/${pname}"
|
||||||
|
+out="${DESTDIR:-dist}/${pname}"
|
||||||
|
|
||||||
|
+echo '# Preparing distribution files.'
|
||||||
|
mkdir -p "${out}"
|
||||||
|
cp -v "README.md" "dist/hsurc.default" "dist/install.sh" "${out}"
|
||||||
|
cp -rv "dist/comp" "${out}"
|
||||||
|
+echo
|
||||||
|
|
||||||
|
+echo '# Building hakurei.'
|
||||||
|
go generate ./...
|
||||||
|
-go build -trimpath -v -o "${out}/bin/" -ldflags "-s -w -buildid= -extldflags '-static'
|
||||||
|
+go build -trimpath -v -o "${out}/bin/" -ldflags "-s -w
|
||||||
|
+ -buildid= -extldflags '-static'
|
||||||
|
-X hakurei.app/internal/info.buildVersion=${VERSION}
|
||||||
|
-X hakurei.app/internal/info.hakureiPath=/usr/bin/hakurei
|
||||||
|
-X hakurei.app/internal/info.hsuPath=/usr/bin/hsu
|
||||||
|
-X main.hakureiPath=/usr/bin/hakurei" ./...
|
||||||
|
+echo
|
||||||
|
|
||||||
|
-rm -f "./${out}.tar.gz" && tar -C dist -czf "${out}.tar.gz" "${pname}"
|
||||||
|
-rm -rf "./${out}"
|
||||||
|
-(cd dist && sha512sum "${pname}.tar.gz" > "${pname}.tar.gz.sha512")
|
||||||
|
+echo '# Testing hakurei.'
|
||||||
|
+go test -ldflags='-buildid= -extldflags=-static' ./...
|
||||||
|
+echo
|
||||||
|
+
|
||||||
|
+echo '# Creating distribution.'
|
||||||
|
+rm -f "${out}.tar.gz" && tar -C "${out}/.." -vczf "${out}.tar.gz" "${pname}"
|
||||||
|
+rm -rf "${out}"
|
||||||
|
+(cd "${out}/.." && sha512sum "${pname}.tar.gz" > "${pname}.tar.gz.sha512")
|
||||||
|
+echo
|
||||||
|
diff --git a/flake.nix b/flake.nix
|
||||||
|
index 9e09c61..2340b92 100644
|
||||||
|
--- a/flake.nix
|
||||||
|
+++ b/flake.nix
|
||||||
|
@@ -143,19 +143,27 @@
|
||||||
|
"bin/mount.fuse.sharefs" = "${hakurei}/libexec/sharefs";
|
||||||
|
};
|
||||||
|
|
||||||
|
- dist = pkgs.runCommand "${hakurei.name}-dist" { buildInputs = hakurei.targetPkgs ++ [ pkgs.pkgsStatic.musl ]; } ''
|
||||||
|
- # go requires XDG_CACHE_HOME for the build cache
|
||||||
|
- export XDG_CACHE_HOME="$(mktemp -d)"
|
||||||
|
+ dist =
|
||||||
|
+ pkgs.runCommand "${hakurei.name}-dist"
|
||||||
|
+ {
|
||||||
|
+ buildInputs = hakurei.targetPkgs ++ [
|
||||||
|
+ pkgs.pkgsStatic.musl
|
||||||
|
+ ];
|
||||||
|
+ }
|
||||||
|
+ ''
|
||||||
|
+ cd $(mktemp -d) \
|
||||||
|
+ && cp -r ${hakurei.src}/. . \
|
||||||
|
+ && chmod +w cmd && cp -r ${hsu.src}/. cmd/hsu/ \
|
||||||
|
+ && chmod -R +w .
|
||||||
|
|
||||||
|
- # get a different workdir as go does not like /build
|
||||||
|
- cd $(mktemp -d) \
|
||||||
|
- && cp -r ${hakurei.src}/. . \
|
||||||
|
- && chmod +w cmd && cp -r ${hsu.src}/. cmd/hsu/ \
|
||||||
|
- && chmod -R +w .
|
||||||
|
-
|
||||||
|
- export HAKUREI_VERSION="v${hakurei.version}"
|
||||||
|
- CC="clang -O3 -Werror" ./dist/release.sh && mkdir $out && cp -v "dist/hakurei-$HAKUREI_VERSION.tar.gz"* $out
|
||||||
|
- '';
|
||||||
|
+ CC="musl-clang -O3 -Werror -Qunused-arguments" \
|
||||||
|
+ GOCACHE="$(mktemp -d)" \
|
||||||
|
+ HAKUREI_TEST_SKIP_ACL=1 \
|
||||||
|
+ PATH="${pkgs.pkgsStatic.musl.bin}/bin:$PATH" \
|
||||||
|
+ DESTDIR="$out" \
|
||||||
|
+ HAKUREI_VERSION="v${hakurei.version}" \
|
||||||
|
+ ./dist/release.sh
|
||||||
|
+ '';
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
diff --git a/internal/acl/acl_test.go b/internal/acl/acl_test.go
|
||||||
|
index af6da55..19ce45a 100644
|
||||||
|
--- a/internal/acl/acl_test.go
|
||||||
|
+++ b/internal/acl/acl_test.go
|
||||||
|
@@ -24,7 +24,7 @@ var (
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestUpdate(t *testing.T) {
|
||||||
|
- if os.Getenv("GO_TEST_SKIP_ACL") == "1" {
|
||||||
|
+ if os.Getenv("HAKUREI_TEST_SKIP_ACL") == "1" {
|
||||||
|
t.Skip("acl test skipped")
|
||||||
|
}
|
||||||
|
|
||||||
|
diff --git a/package.nix b/package.nix
|
||||||
|
index 00c4401..2eaa2ec 100644
|
||||||
|
--- a/package.nix
|
||||||
|
+++ b/package.nix
|
||||||
|
@@ -89,7 +89,7 @@ buildGoModule rec {
|
||||||
|
CC = "clang -O3 -Werror";
|
||||||
|
|
||||||
|
# nix build environment does not allow acls
|
||||||
|
- GO_TEST_SKIP_ACL = 1;
|
||||||
|
+ HAKUREI_TEST_SKIP_ACL = 1;
|
||||||
|
};
|
||||||
|
|
||||||
|
buildInputs = [`}, [2]string{"container-tests", `From bf14a412e47344fff2681f4b24d1ecc7415bfcb0 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Ophestra <cat@gensokyo.uk>
|
||||||
|
Date: Sat, 31 Jan 2026 10:59:56 +0900
|
||||||
|
Subject: [PATCH] container: fix host-dependent test cases
|
||||||
|
|
||||||
|
These are not fully controlled by hakurei and may change depending on host configuration.
|
||||||
|
|
||||||
|
Signed-off-by: Ophestra <cat@gensokyo.uk>
|
||||||
|
---
|
||||||
|
container/container_test.go | 27 +++++++++++++++------------
|
||||||
|
1 file changed, 15 insertions(+), 12 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/container/container_test.go b/container/container_test.go
|
||||||
|
index d737a18..98713cb 100644
|
||||||
|
--- a/container/container_test.go
|
||||||
|
+++ b/container/container_test.go
|
||||||
|
@@ -275,12 +275,12 @@ var containerTestCases = []struct {
|
||||||
|
),
|
||||||
|
earlyMnt(
|
||||||
|
ent("/", "/dev", "ro,nosuid,nodev,relatime", "tmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/null", "/dev/null", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/zero", "/dev/zero", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/full", "/dev/full", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/random", "/dev/random", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/urandom", "/dev/urandom", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/tty", "/dev/tty", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/null", "/dev/null", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/zero", "/dev/zero", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/full", "/dev/full", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/random", "/dev/random", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/urandom", "/dev/urandom", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/tty", "/dev/tty", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
ent("/", "/dev/pts", "rw,nosuid,noexec,relatime", "devpts", "devpts", "rw,mode=620,ptmxmode=666"),
|
||||||
|
ent("/", "/dev/mqueue", "rw,nosuid,nodev,noexec,relatime", "mqueue", "mqueue", "rw"),
|
||||||
|
ent("/", "/dev/shm", "rw,nosuid,nodev,relatime", "tmpfs", "tmpfs", ignore),
|
||||||
|
@@ -293,12 +293,12 @@ var containerTestCases = []struct {
|
||||||
|
),
|
||||||
|
earlyMnt(
|
||||||
|
ent("/", "/dev", "ro,nosuid,nodev,relatime", "tmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/null", "/dev/null", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/zero", "/dev/zero", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/full", "/dev/full", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/random", "/dev/random", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/urandom", "/dev/urandom", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/tty", "/dev/tty", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/null", "/dev/null", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/zero", "/dev/zero", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/full", "/dev/full", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/random", "/dev/random", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/urandom", "/dev/urandom", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/tty", "/dev/tty", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
ent("/", "/dev/pts", "rw,nosuid,noexec,relatime", "devpts", "devpts", "rw,mode=620,ptmxmode=666"),
|
||||||
|
ent("/", "/dev/shm", "rw,nosuid,nodev,relatime", "tmpfs", "tmpfs", ignore),
|
||||||
|
),
|
||||||
|
@@ -696,6 +696,9 @@ func init() {
|
||||||
|
mnt[i].VfsOptstr = strings.TrimSuffix(mnt[i].VfsOptstr, ",relatime")
|
||||||
|
mnt[i].VfsOptstr = strings.TrimSuffix(mnt[i].VfsOptstr, ",noatime")
|
||||||
|
|
||||||
|
+ cur.FsOptstr = strings.Replace(cur.FsOptstr, ",seclabel", "", 1)
|
||||||
|
+ mnt[i].FsOptstr = strings.Replace(mnt[i].FsOptstr, ",seclabel", "", 1)
|
||||||
|
+
|
||||||
|
if !cur.EqualWithIgnore(mnt[i], "\x00") {
|
||||||
|
fail = true
|
||||||
|
log.Printf("[FAIL] %s", cur)`}, [2]string{"dist-01-tarball-name", `diff --git a/dist/release.sh b/dist/release.sh
|
||||||
|
index 0ba9104..2990ee1 100755
|
||||||
|
--- a/dist/release.sh
|
||||||
|
+++ b/dist/release.sh
|
||||||
|
@@ -1,7 +1,7 @@
|
||||||
|
#!/bin/sh -e
|
||||||
|
cd "$(dirname -- "$0")/.."
|
||||||
|
VERSION="${HAKUREI_VERSION:-untagged}"
|
||||||
|
-pname="hakurei-${VERSION}"
|
||||||
|
+pname="hakurei-${VERSION}-$(go env GOARCH)"
|
||||||
|
out="${DESTDIR:-dist}/${pname}"
|
||||||
|
|
||||||
|
echo '# Preparing distribution files.'
|
||||||
|
`}),
|
||||||
|
), pkg.Path(AbsUsrSrc.Append("hostname", "main.go"), false, pkg.NewFile(
|
||||||
|
"hostname.go",
|
||||||
|
[]byte(`
|
||||||
|
package main
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if name, err := os.Hostname(); err != nil {
|
||||||
|
panic(err)
|
||||||
|
} else {
|
||||||
|
os.Stdout.WriteString(name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`),
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
artifactsF[Hakurei] = func(t Toolchain) pkg.Artifact {
|
||||||
|
return t.newHakurei("", `
|
||||||
|
mkdir -p /work/system/libexec/hakurei/
|
||||||
|
|
||||||
|
echo '# Building hakurei.'
|
||||||
|
go generate -v ./...
|
||||||
|
go build -trimpath -v -o /work/system/libexec/hakurei -ldflags="-s -w
|
||||||
|
-buildid=
|
||||||
|
-extldflags=-static
|
||||||
|
-X hakurei.app/internal/info.buildVersion="$HAKUREI_VERSION"
|
||||||
|
-X hakurei.app/internal/info.hakureiPath=/system/bin/hakurei
|
||||||
|
-X hakurei.app/internal/info.hsuPath=/system/bin/hsu
|
||||||
|
-X main.hakureiPath=/system/bin/hakurei" ./...
|
||||||
|
echo
|
||||||
|
|
||||||
|
echo '# Testing hakurei.'
|
||||||
|
go test -ldflags='-buildid= -extldflags=-static' ./...
|
||||||
|
echo
|
||||||
|
|
||||||
|
mkdir -p /work/system/bin/
|
||||||
|
(cd /work/system/libexec/hakurei && mv \
|
||||||
|
hakurei \
|
||||||
|
sharefs \
|
||||||
|
../../bin/)
|
||||||
|
`)
|
||||||
|
}
|
||||||
|
artifactsF[HakureiDist] = func(t Toolchain) pkg.Artifact {
|
||||||
|
return t.newHakurei("-dist", `
|
||||||
|
export HAKUREI_VERSION
|
||||||
|
DESTDIR=/work /usr/src/hakurei/dist/release.sh
|
||||||
|
`)
|
||||||
|
}
|
||||||
|
}
|
||||||
44
internal/rosa/kernel.go
Normal file
44
internal/rosa/kernel.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"slices"
|
||||||
|
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
// newKernel is a helper for interacting with Kbuild.
|
||||||
|
func (t Toolchain) newKernel(
|
||||||
|
flag int,
|
||||||
|
patches [][2]string,
|
||||||
|
script string,
|
||||||
|
extra ...pkg.Artifact,
|
||||||
|
) pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "6.18.5"
|
||||||
|
checksum = "-V1e1WWl7HuePkmm84sSKF7nLuHfUs494uNMzMqXEyxcNE_PUE0FICL0oGWn44mM"
|
||||||
|
)
|
||||||
|
return t.New("kernel-"+version, flag, slices.Concat([]pkg.Artifact{
|
||||||
|
t.Load(Make),
|
||||||
|
}, extra), nil, nil, `
|
||||||
|
export LLVM=1
|
||||||
|
export HOSTLDFLAGS="${LDFLAGS}"
|
||||||
|
cd /usr/src/linux
|
||||||
|
`+script, pkg.Path(AbsUsrSrc.Append("linux"), true, t.NewPatchedSource(
|
||||||
|
"kernel", version, pkg.NewHTTPGetTar(
|
||||||
|
nil,
|
||||||
|
"https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/"+
|
||||||
|
"snapshot/linux-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), false, patches...,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Toolchain) newKernelHeaders() pkg.Artifact {
|
||||||
|
return t.newKernel(TEarly, nil, `
|
||||||
|
make "-j$(nproc)" \
|
||||||
|
INSTALL_HDR_PATH=/work/system \
|
||||||
|
headers_install
|
||||||
|
`, t.Load(Rsync))
|
||||||
|
}
|
||||||
|
func init() { artifactsF[KernelHeaders] = Toolchain.newKernelHeaders }
|
||||||
28
internal/rosa/libexpat.go
Normal file
28
internal/rosa/libexpat.go
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t Toolchain) newLibexpat() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "2.7.3"
|
||||||
|
checksum = "GmkoD23nRi9cMT0cgG1XRMrZWD82UcOMzkkvP1gkwSFWCBgeSXMuoLpa8-v8kxW-"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("libexpat", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://github.com/libexpat/libexpat/releases/download/"+
|
||||||
|
"R_"+strings.ReplaceAll(version, ".", "_")+"/"+
|
||||||
|
"expat-"+version+".tar.bz2",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarBzip2,
|
||||||
|
), &MakeAttr{
|
||||||
|
Configure: [][2]string{
|
||||||
|
{"enable-static"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
t.Load(Bash),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Libexpat] = Toolchain.newLibexpat }
|
||||||
23
internal/rosa/libffi.go
Normal file
23
internal/rosa/libffi.go
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
|
func (t Toolchain) newLibffi() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "3.4.5"
|
||||||
|
checksum = "apIJzypF4rDudeRoI_n3K7N-zCeBLTbQlHRn9NSAZqdLAWA80mR0gXPTpHsL7oMl"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("libffi", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://github.com/libffi/libffi/releases/download/"+
|
||||||
|
"v"+version+"/libffi-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &MakeAttr{
|
||||||
|
Configure: [][2]string{
|
||||||
|
{"enable-static"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
t.Load(KernelHeaders),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Libffi] = Toolchain.newLibffi }
|
||||||
30
internal/rosa/libgd.go
Normal file
30
internal/rosa/libgd.go
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
|
func (t Toolchain) newLibgd() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "2.3.3"
|
||||||
|
checksum = "8T-sh1_FJT9K9aajgxzh8ot6vWIF-xxjcKAHvTak9MgGUcsFfzP8cAvvv44u2r36"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("libgd", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://github.com/libgd/libgd/releases/download/"+
|
||||||
|
"gd-"+version+"/libgd-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &MakeAttr{
|
||||||
|
OmitDefaults: true,
|
||||||
|
Env: []string{
|
||||||
|
"TMPDIR=/dev/shm/gd",
|
||||||
|
},
|
||||||
|
ScriptEarly: `
|
||||||
|
mkdir /dev/shm/gd
|
||||||
|
`,
|
||||||
|
Configure: [][2]string{
|
||||||
|
{"enable-static"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
t.Load(Zlib),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Libgd] = Toolchain.newLibgd }
|
||||||
28
internal/rosa/libpsl.go
Normal file
28
internal/rosa/libpsl.go
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
|
func (t Toolchain) newLibpsl() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "0.21.5"
|
||||||
|
checksum = "XjfxSzh7peG2Vg4vJlL8z4JZJLcXqbuP6pLWkrGCmRxlnYUFTKNBqWGHCxEOlCad"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("libpsl", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://github.com/rockdaboot/libpsl/releases/download/"+
|
||||||
|
version+"/libpsl-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &MakeAttr{
|
||||||
|
Writable: true,
|
||||||
|
ScriptEarly: `
|
||||||
|
cd /usr/src/libpsl
|
||||||
|
|
||||||
|
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||||
|
|
||||||
|
test_disable 'int main(){return 0;}' tests/test-is-public-builtin.c
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
t.Load(Python),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Libpsl] = Toolchain.newLibpsl }
|
||||||
33
internal/rosa/libseccomp.go
Normal file
33
internal/rosa/libseccomp.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t Toolchain) newLibseccomp() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "2.6.0"
|
||||||
|
checksum = "mMu-iR71guPjFbb31u-YexBaanKE_nYPjPux-vuBiPfS_0kbwJdfCGlkofaUm-EY"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("libseccomp", version, pkg.NewHTTPGetTar(
|
||||||
|
nil,
|
||||||
|
"https://github.com/seccomp/libseccomp/releases/download/"+
|
||||||
|
"v"+version+"/libseccomp-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &MakeAttr{
|
||||||
|
ScriptEarly: `
|
||||||
|
ln -s ../system/bin/bash /bin/
|
||||||
|
`,
|
||||||
|
Configure: [][2]string{
|
||||||
|
{"enable-static"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
t.Load(Bash),
|
||||||
|
t.Load(Diffutils),
|
||||||
|
t.Load(Gperf),
|
||||||
|
|
||||||
|
t.Load(KernelHeaders),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Libseccomp] = Toolchain.newLibseccomp }
|
||||||
40
internal/rosa/libucontext.go
Normal file
40
internal/rosa/libucontext.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
|
func (t Toolchain) newLibucontext() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "1.5"
|
||||||
|
checksum = "Ggk7FMmDNBdCx1Z9PcNWWW6LSpjGYssn2vU0GK5BLXJYw7ZxZbA2m_eSgT9TFnIG"
|
||||||
|
)
|
||||||
|
return t.New("libucontext", 0, []pkg.Artifact{
|
||||||
|
t.Load(Make),
|
||||||
|
}, nil, []string{
|
||||||
|
"ARCH=" + linuxArch(),
|
||||||
|
}, `
|
||||||
|
cd /usr/src/libucontext
|
||||||
|
make check
|
||||||
|
make DESTDIR=/work install
|
||||||
|
`, pkg.Path(AbsUsrSrc.Append("libucontext"), true,
|
||||||
|
t.NewPatchedSource("libucontext", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://github.com/kaniini/libucontext/archive/refs/tags/"+
|
||||||
|
"libucontext-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), true, [2]string{"rosa-prefix", `diff --git a/Makefile b/Makefile
|
||||||
|
index c80e574..4a8c1d3 100644
|
||||||
|
--- a/Makefile
|
||||||
|
+++ b/Makefile
|
||||||
|
@@ -17,7 +17,7 @@ ifeq ($(ARCH),$(filter $(ARCH),arm64))
|
||||||
|
override ARCH = aarch64
|
||||||
|
endif
|
||||||
|
|
||||||
|
-prefix = /usr
|
||||||
|
+prefix = /system
|
||||||
|
libdir = ${prefix}/lib
|
||||||
|
shared_libdir = ${libdir}
|
||||||
|
static_libdir = ${libdir}
|
||||||
|
`}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Libucontext] = Toolchain.newLibucontext }
|
||||||
34
internal/rosa/libxml2.go
Normal file
34
internal/rosa/libxml2.go
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t Toolchain) newLibxml2() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "2.15.1"
|
||||||
|
checksum = "pYzAR3cNrEHezhEMirgiq7jbboLzwMj5GD7SQp0jhSIMdgoU4G9oU9Gxun3zzUIU"
|
||||||
|
)
|
||||||
|
return t.NewViaMake("libxml2", version, pkg.NewHTTPGet(
|
||||||
|
nil, "https://download.gnome.org/sources/libxml2/"+
|
||||||
|
strings.Join(strings.Split(version, ".")[:2], ".")+
|
||||||
|
"/libxml2-"+version+".tar.xz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
), &MakeAttr{
|
||||||
|
ScriptEarly: `
|
||||||
|
cd /usr/src/
|
||||||
|
tar xf libxml2.tar.xz
|
||||||
|
mv libxml2-` + version + ` libxml2
|
||||||
|
`,
|
||||||
|
Configure: [][2]string{
|
||||||
|
{"enable-static"},
|
||||||
|
},
|
||||||
|
SourceSuffix: ".tar.xz",
|
||||||
|
},
|
||||||
|
t.Load(Diffutils),
|
||||||
|
t.Load(XZ),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Libxml2] = Toolchain.newLibxml2 }
|
||||||
524
internal/rosa/llvm.go
Normal file
524
internal/rosa/llvm.go
Normal file
@@ -0,0 +1,524 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
// llvmAttr holds the attributes that will be applied to a new [pkg.Artifact]
|
||||||
|
// containing a LLVM variant.
|
||||||
|
type llvmAttr struct {
|
||||||
|
flags int
|
||||||
|
|
||||||
|
// Concatenated with default environment for CMakeAttr.Env.
|
||||||
|
env []string
|
||||||
|
// Concatenated with generated entries for CMakeAttr.Cache.
|
||||||
|
cmake [][2]string
|
||||||
|
// Override CMakeAttr.Append.
|
||||||
|
append []string
|
||||||
|
// Concatenated with default dependencies for Toolchain.NewViaCMake.
|
||||||
|
extra []pkg.Artifact
|
||||||
|
// Passed through to CMakeAttr.Paths.
|
||||||
|
paths []pkg.ExecPath
|
||||||
|
// Passed through to CMakeAttr.ScriptConfigured.
|
||||||
|
scriptConfigured string
|
||||||
|
// Concatenated with default fixup for CMakeAttr.Script.
|
||||||
|
script string
|
||||||
|
// Passed through to CMakeAttr.Prefix.
|
||||||
|
prefix *check.Absolute
|
||||||
|
// Passed through to CMakeAttr.Writable.
|
||||||
|
writable bool
|
||||||
|
|
||||||
|
// Patch name and body pairs.
|
||||||
|
patches [][2]string
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
llvmProjectClang = 1 << iota
|
||||||
|
llvmProjectLld
|
||||||
|
|
||||||
|
llvmProjectAll = 1<<iota - 1
|
||||||
|
|
||||||
|
llvmRuntimeCompilerRT = 1 << iota
|
||||||
|
llvmRuntimeLibunwind
|
||||||
|
llvmRuntimeLibc
|
||||||
|
llvmRuntimeLibcxx
|
||||||
|
llvmRuntimeLibcxxABI
|
||||||
|
|
||||||
|
llvmAll = 1<<iota - 1
|
||||||
|
llvmRuntimeAll = llvmAll - (2 * llvmProjectAll) - 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// llvmFlagName resolves a llvmAttr.flags project or runtime flag to its name.
|
||||||
|
func llvmFlagName(flag int) string {
|
||||||
|
switch flag {
|
||||||
|
case llvmProjectClang:
|
||||||
|
return "clang"
|
||||||
|
case llvmProjectLld:
|
||||||
|
return "lld"
|
||||||
|
|
||||||
|
case llvmRuntimeCompilerRT:
|
||||||
|
return "compiler-rt"
|
||||||
|
case llvmRuntimeLibunwind:
|
||||||
|
return "libunwind"
|
||||||
|
case llvmRuntimeLibc:
|
||||||
|
return "libc"
|
||||||
|
case llvmRuntimeLibcxx:
|
||||||
|
return "libcxx"
|
||||||
|
case llvmRuntimeLibcxxABI:
|
||||||
|
return "libcxxabi"
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic("invalid flag " + strconv.Itoa(flag))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newLLVMVariant returns a [pkg.Artifact] containing a LLVM variant.
|
||||||
|
func (t Toolchain) newLLVMVariant(variant string, attr *llvmAttr) pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "21.1.8"
|
||||||
|
checksum = "8SUpqDkcgwOPsqHVtmf9kXfFeVmjVxl4LMn-qSE1AI_Xoeju-9HaoPNGtidyxyka"
|
||||||
|
)
|
||||||
|
if attr == nil {
|
||||||
|
panic("LLVM attr must be non-nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
var projects, runtimes []string
|
||||||
|
for i := 1; i < llvmProjectAll; i <<= 1 {
|
||||||
|
if attr.flags&i != 0 {
|
||||||
|
projects = append(projects, llvmFlagName(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := (llvmProjectAll + 1) << 1; i < llvmRuntimeAll; i <<= 1 {
|
||||||
|
if attr.flags&i != 0 {
|
||||||
|
runtimes = append(runtimes, llvmFlagName(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var script, scriptEarly string
|
||||||
|
|
||||||
|
cache := [][2]string{
|
||||||
|
{"CMAKE_BUILD_TYPE", "Release"},
|
||||||
|
|
||||||
|
{"LLVM_HOST_TRIPLE", `"${ROSA_TRIPLE}"`},
|
||||||
|
{"LLVM_DEFAULT_TARGET_TRIPLE", `"${ROSA_TRIPLE}"`},
|
||||||
|
}
|
||||||
|
if len(projects) > 0 {
|
||||||
|
cache = append(cache,
|
||||||
|
[2]string{"LLVM_ENABLE_PROJECTS", `"${ROSA_LLVM_PROJECTS}"`})
|
||||||
|
}
|
||||||
|
if len(runtimes) > 0 {
|
||||||
|
cache = append(cache,
|
||||||
|
[2]string{"LLVM_ENABLE_RUNTIMES", `"${ROSA_LLVM_RUNTIMES}"`})
|
||||||
|
}
|
||||||
|
|
||||||
|
cmakeAppend := []string{"llvm"}
|
||||||
|
if attr.append != nil {
|
||||||
|
cmakeAppend = attr.append
|
||||||
|
} else {
|
||||||
|
cache = append(cache,
|
||||||
|
[2]string{"LLVM_ENABLE_LIBCXX", "ON"},
|
||||||
|
[2]string{"LLVM_USE_LINKER", "lld"},
|
||||||
|
|
||||||
|
[2]string{"LLVM_INSTALL_BINUTILS_SYMLINKS", "ON"},
|
||||||
|
[2]string{"LLVM_INSTALL_CCTOOLS_SYMLINKS", "ON"},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if attr.flags&llvmProjectClang != 0 {
|
||||||
|
cache = append(cache,
|
||||||
|
[2]string{"CLANG_DEFAULT_LINKER", "lld"},
|
||||||
|
[2]string{"CLANG_DEFAULT_CXX_STDLIB", "libc++"},
|
||||||
|
[2]string{"CLANG_DEFAULT_RTLIB", "compiler-rt"},
|
||||||
|
[2]string{"CLANG_DEFAULT_UNWINDLIB", "libunwind"},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if attr.flags&llvmProjectLld != 0 {
|
||||||
|
script += `
|
||||||
|
ln -s ld.lld /work/system/bin/ld
|
||||||
|
`
|
||||||
|
}
|
||||||
|
if attr.flags&llvmRuntimeCompilerRT != 0 {
|
||||||
|
if attr.append == nil {
|
||||||
|
cache = append(cache,
|
||||||
|
[2]string{"COMPILER_RT_USE_LLVM_UNWINDER", "ON"})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if attr.flags&llvmRuntimeLibunwind != 0 {
|
||||||
|
cache = append(cache,
|
||||||
|
[2]string{"LIBUNWIND_USE_COMPILER_RT", "ON"})
|
||||||
|
}
|
||||||
|
if attr.flags&llvmRuntimeLibcxx != 0 {
|
||||||
|
cache = append(cache,
|
||||||
|
[2]string{"LIBCXX_HAS_MUSL_LIBC", "ON"},
|
||||||
|
[2]string{"LIBCXX_USE_COMPILER_RT", "ON"},
|
||||||
|
)
|
||||||
|
|
||||||
|
if t > toolchainStage3 {
|
||||||
|
// libcxxabi fails to compile if c++ headers not prefixed in /usr
|
||||||
|
// is found by the compiler, and doing this is easier than
|
||||||
|
// overriding CXXFLAGS; not using mv here to avoid chown failures
|
||||||
|
scriptEarly += `
|
||||||
|
cp -r /system/include /usr/include && rm -rf /system/include
|
||||||
|
`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if attr.flags&llvmRuntimeLibcxxABI != 0 {
|
||||||
|
cache = append(cache,
|
||||||
|
[2]string{"LIBCXXABI_USE_COMPILER_RT", "ON"},
|
||||||
|
[2]string{"LIBCXXABI_USE_LLVM_UNWINDER", "ON"},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.NewViaCMake("llvm", version, variant, t.NewPatchedSource(
|
||||||
|
"llvmorg", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://github.com/llvm/llvm-project/archive/refs/tags/"+
|
||||||
|
"llvmorg-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), true, attr.patches...,
|
||||||
|
), &CMakeAttr{
|
||||||
|
Cache: slices.Concat(cache, attr.cmake),
|
||||||
|
Append: cmakeAppend,
|
||||||
|
Prefix: attr.prefix,
|
||||||
|
|
||||||
|
Env: slices.Concat([]string{
|
||||||
|
"ROSA_LLVM_PROJECTS=" + strings.Join(projects, ";"),
|
||||||
|
"ROSA_LLVM_RUNTIMES=" + strings.Join(runtimes, ";"),
|
||||||
|
}, attr.env),
|
||||||
|
ScriptEarly: scriptEarly,
|
||||||
|
ScriptConfigured: attr.scriptConfigured,
|
||||||
|
Script: script + attr.script,
|
||||||
|
Writable: attr.writable,
|
||||||
|
|
||||||
|
Paths: attr.paths,
|
||||||
|
Flag: TExclusive,
|
||||||
|
}, stage3Concat(t, attr.extra,
|
||||||
|
t.Load(Libffi),
|
||||||
|
t.Load(Python),
|
||||||
|
t.Load(Perl),
|
||||||
|
t.Load(Diffutils),
|
||||||
|
t.Load(Bash),
|
||||||
|
t.Load(Gawk),
|
||||||
|
t.Load(Coreutils),
|
||||||
|
t.Load(Findutils),
|
||||||
|
|
||||||
|
t.Load(KernelHeaders),
|
||||||
|
)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newLLVM returns LLVM toolchain across multiple [pkg.Artifact].
|
||||||
|
func (t Toolchain) newLLVM() (musl, compilerRT, runtimes, clang pkg.Artifact) {
|
||||||
|
var target string
|
||||||
|
switch runtime.GOARCH {
|
||||||
|
case "386", "amd64":
|
||||||
|
target = "X86"
|
||||||
|
case "arm64":
|
||||||
|
target = "AArch64"
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic("unsupported target " + runtime.GOARCH)
|
||||||
|
}
|
||||||
|
|
||||||
|
minimalDeps := [][2]string{
|
||||||
|
{"LLVM_ENABLE_ZLIB", "OFF"},
|
||||||
|
{"LLVM_ENABLE_ZSTD", "OFF"},
|
||||||
|
{"LLVM_ENABLE_LIBXML2", "OFF"},
|
||||||
|
}
|
||||||
|
|
||||||
|
compilerRT = t.newLLVMVariant("compiler-rt", &llvmAttr{
|
||||||
|
env: stage3ExclConcat(t, []string{},
|
||||||
|
"LDFLAGS="+earlyLDFLAGS(false),
|
||||||
|
),
|
||||||
|
cmake: [][2]string{
|
||||||
|
// libc++ not yet available
|
||||||
|
{"CMAKE_CXX_COMPILER_TARGET", ""},
|
||||||
|
|
||||||
|
{"COMPILER_RT_BUILD_BUILTINS", "ON"},
|
||||||
|
{"COMPILER_RT_DEFAULT_TARGET_ONLY", "ON"},
|
||||||
|
{"COMPILER_RT_SANITIZERS_TO_BUILD", "asan"},
|
||||||
|
{"LLVM_ENABLE_PER_TARGET_RUNTIME_DIR", "ON"},
|
||||||
|
|
||||||
|
// does not work without libunwind
|
||||||
|
{"COMPILER_RT_BUILD_CTX_PROFILE", "OFF"},
|
||||||
|
{"COMPILER_RT_BUILD_LIBFUZZER", "OFF"},
|
||||||
|
{"COMPILER_RT_BUILD_MEMPROF", "OFF"},
|
||||||
|
{"COMPILER_RT_BUILD_PROFILE", "OFF"},
|
||||||
|
{"COMPILER_RT_BUILD_XRAY", "OFF"},
|
||||||
|
},
|
||||||
|
append: []string{"compiler-rt"},
|
||||||
|
extra: []pkg.Artifact{t.NewMusl(&MuslAttr{
|
||||||
|
Headers: true,
|
||||||
|
Env: []string{
|
||||||
|
"CC=clang",
|
||||||
|
},
|
||||||
|
})},
|
||||||
|
script: `
|
||||||
|
mkdir -p "${ROSA_INSTALL_PREFIX}/lib/clang/21/lib/"
|
||||||
|
ln -s \
|
||||||
|
"../../../${ROSA_TRIPLE}" \
|
||||||
|
"${ROSA_INSTALL_PREFIX}/lib/clang/21/lib/"
|
||||||
|
|
||||||
|
ln -s \
|
||||||
|
"clang_rt.crtbegin-` + linuxArch() + `.o" \
|
||||||
|
"${ROSA_INSTALL_PREFIX}/lib/${ROSA_TRIPLE}/crtbeginS.o"
|
||||||
|
ln -s \
|
||||||
|
"clang_rt.crtend-` + linuxArch() + `.o" \
|
||||||
|
"${ROSA_INSTALL_PREFIX}/lib/${ROSA_TRIPLE}/crtendS.o"
|
||||||
|
`,
|
||||||
|
})
|
||||||
|
|
||||||
|
musl = t.NewMusl(&MuslAttr{
|
||||||
|
Extra: []pkg.Artifact{compilerRT},
|
||||||
|
Env: stage3ExclConcat(t, []string{
|
||||||
|
"CC=clang",
|
||||||
|
"LIBCC=/system/lib/clang/21/lib/" +
|
||||||
|
triplet() + "/libclang_rt.builtins.a",
|
||||||
|
"AR=ar",
|
||||||
|
"RANLIB=ranlib",
|
||||||
|
},
|
||||||
|
"LDFLAGS="+earlyLDFLAGS(false),
|
||||||
|
),
|
||||||
|
})
|
||||||
|
|
||||||
|
runtimes = t.newLLVMVariant("runtimes", &llvmAttr{
|
||||||
|
env: stage3ExclConcat(t, []string{},
|
||||||
|
"LDFLAGS="+earlyLDFLAGS(false),
|
||||||
|
),
|
||||||
|
flags: llvmRuntimeLibunwind | llvmRuntimeLibcxx | llvmRuntimeLibcxxABI,
|
||||||
|
cmake: slices.Concat([][2]string{
|
||||||
|
// libc++ not yet available
|
||||||
|
{"CMAKE_CXX_COMPILER_WORKS", "ON"},
|
||||||
|
|
||||||
|
{"LIBCXX_HAS_ATOMIC_LIB", "OFF"},
|
||||||
|
{"LIBCXXABI_HAS_CXA_THREAD_ATEXIT_IMPL", "OFF"},
|
||||||
|
}, minimalDeps),
|
||||||
|
append: []string{"runtimes"},
|
||||||
|
extra: []pkg.Artifact{
|
||||||
|
compilerRT,
|
||||||
|
musl,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
clang = t.newLLVMVariant("clang", &llvmAttr{
|
||||||
|
flags: llvmProjectClang | llvmProjectLld,
|
||||||
|
env: stage3ExclConcat(t, []string{},
|
||||||
|
"CFLAGS="+earlyCFLAGS,
|
||||||
|
"CXXFLAGS="+earlyCXXFLAGS(),
|
||||||
|
"LDFLAGS="+earlyLDFLAGS(false),
|
||||||
|
),
|
||||||
|
cmake: slices.Concat([][2]string{
|
||||||
|
{"LLVM_TARGETS_TO_BUILD", target},
|
||||||
|
{"CMAKE_CROSSCOMPILING", "OFF"},
|
||||||
|
{"CXX_SUPPORTS_CUSTOM_LINKER", "ON"},
|
||||||
|
}, minimalDeps),
|
||||||
|
extra: []pkg.Artifact{
|
||||||
|
musl,
|
||||||
|
compilerRT,
|
||||||
|
runtimes,
|
||||||
|
},
|
||||||
|
script: `
|
||||||
|
ln -s clang /work/system/bin/cc
|
||||||
|
ln -s clang++ /work/system/bin/c++
|
||||||
|
|
||||||
|
ninja check-all
|
||||||
|
`,
|
||||||
|
|
||||||
|
patches: [][2]string{
|
||||||
|
{"add-rosa-vendor", `diff --git a/llvm/include/llvm/TargetParser/Triple.h b/llvm/include/llvm/TargetParser/Triple.h
|
||||||
|
index 657f4230379e..12c305756184 100644
|
||||||
|
--- a/llvm/include/llvm/TargetParser/Triple.h
|
||||||
|
+++ b/llvm/include/llvm/TargetParser/Triple.h
|
||||||
|
@@ -185,6 +185,7 @@ public:
|
||||||
|
|
||||||
|
Apple,
|
||||||
|
PC,
|
||||||
|
+ Rosa,
|
||||||
|
SCEI,
|
||||||
|
Freescale,
|
||||||
|
IBM,
|
||||||
|
diff --git a/llvm/lib/TargetParser/Triple.cpp b/llvm/lib/TargetParser/Triple.cpp
|
||||||
|
index 0584c941d2e6..e4d6ef963cc7 100644
|
||||||
|
--- a/llvm/lib/TargetParser/Triple.cpp
|
||||||
|
+++ b/llvm/lib/TargetParser/Triple.cpp
|
||||||
|
@@ -269,6 +269,7 @@ StringRef Triple::getVendorTypeName(VendorType Kind) {
|
||||||
|
case NVIDIA: return "nvidia";
|
||||||
|
case OpenEmbedded: return "oe";
|
||||||
|
case PC: return "pc";
|
||||||
|
+ case Rosa: return "rosa";
|
||||||
|
case SCEI: return "scei";
|
||||||
|
case SUSE: return "suse";
|
||||||
|
}
|
||||||
|
@@ -669,6 +670,7 @@ static Triple::VendorType parseVendor(StringRef VendorName) {
|
||||||
|
.Case("suse", Triple::SUSE)
|
||||||
|
.Case("oe", Triple::OpenEmbedded)
|
||||||
|
.Case("intel", Triple::Intel)
|
||||||
|
+ .Case("rosa", Triple::Rosa)
|
||||||
|
.Default(Triple::UnknownVendor);
|
||||||
|
}
|
||||||
|
|
||||||
|
`},
|
||||||
|
|
||||||
|
{"xfail-broken-tests", `diff --git a/clang/test/Modules/timestamps.c b/clang/test/Modules/timestamps.c
|
||||||
|
index 50fdce630255..4b4465a75617 100644
|
||||||
|
--- a/clang/test/Modules/timestamps.c
|
||||||
|
+++ b/clang/test/Modules/timestamps.c
|
||||||
|
@@ -1,3 +1,5 @@
|
||||||
|
+// XFAIL: target={{.*-rosa-linux-musl}}
|
||||||
|
+
|
||||||
|
/// Verify timestamps that gets embedded in the module
|
||||||
|
#include <c-header.h>
|
||||||
|
|
||||||
|
`},
|
||||||
|
|
||||||
|
{"path-system-include", `diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
|
||||||
|
index cdbf21fb9026..dd052858700d 100644
|
||||||
|
--- a/clang/lib/Driver/ToolChains/Linux.cpp
|
||||||
|
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
|
||||||
|
@@ -773,6 +773,12 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
|
||||||
|
addExternCSystemInclude(
|
||||||
|
DriverArgs, CC1Args,
|
||||||
|
concat(SysRoot, "/usr/include", MultiarchIncludeDir));
|
||||||
|
+ if (!MultiarchIncludeDir.empty() &&
|
||||||
|
+ D.getVFS().exists(concat(SysRoot, "/system/include", MultiarchIncludeDir)))
|
||||||
|
+ addExternCSystemInclude(
|
||||||
|
+ DriverArgs, CC1Args,
|
||||||
|
+ concat(SysRoot, "/system/include", MultiarchIncludeDir));
|
||||||
|
+
|
||||||
|
|
||||||
|
if (getTriple().getOS() == llvm::Triple::RTEMS)
|
||||||
|
return;
|
||||||
|
@@ -783,6 +789,7 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
|
||||||
|
addExternCSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/include"));
|
||||||
|
|
||||||
|
addExternCSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/usr/include"));
|
||||||
|
+ addExternCSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/system/include"));
|
||||||
|
|
||||||
|
if (!DriverArgs.hasArg(options::OPT_nobuiltininc) && getTriple().isMusl())
|
||||||
|
addSystemInclude(DriverArgs, CC1Args, ResourceDirInclude);
|
||||||
|
`},
|
||||||
|
|
||||||
|
{"path-system-libraries", `diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
|
||||||
|
index 8ac8d4eb9181..f4d1347ab64d 100644
|
||||||
|
--- a/clang/lib/Driver/ToolChains/Linux.cpp
|
||||||
|
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
|
||||||
|
@@ -282,6 +282,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
|
||||||
|
const bool IsHexagon = Arch == llvm::Triple::hexagon;
|
||||||
|
const bool IsRISCV = Triple.isRISCV();
|
||||||
|
const bool IsCSKY = Triple.isCSKY();
|
||||||
|
+ const bool IsRosa = Triple.getVendor() == llvm::Triple::Rosa;
|
||||||
|
|
||||||
|
if (IsCSKY && !SelectedMultilibs.empty())
|
||||||
|
SysRoot = SysRoot + SelectedMultilibs.back().osSuffix();
|
||||||
|
@@ -318,12 +319,23 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
|
||||||
|
const std::string OSLibDir = std::string(getOSLibDir(Triple, Args));
|
||||||
|
const std::string MultiarchTriple = getMultiarchTriple(D, Triple, SysRoot);
|
||||||
|
|
||||||
|
+ if (IsRosa) {
|
||||||
|
+ ExtraOpts.push_back("-rpath");
|
||||||
|
+ ExtraOpts.push_back("/system/lib");
|
||||||
|
+ ExtraOpts.push_back("-rpath");
|
||||||
|
+ ExtraOpts.push_back(concat("/system/lib", MultiarchTriple));
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
// mips32: Debian multilib, we use /libo32, while in other case, /lib is
|
||||||
|
// used. We need add both libo32 and /lib.
|
||||||
|
if (Arch == llvm::Triple::mips || Arch == llvm::Triple::mipsel) {
|
||||||
|
Generic_GCC::AddMultilibPaths(D, SysRoot, "libo32", MultiarchTriple, Paths);
|
||||||
|
- addPathIfExists(D, concat(SysRoot, "/libo32"), Paths);
|
||||||
|
- addPathIfExists(D, concat(SysRoot, "/usr/libo32"), Paths);
|
||||||
|
+ if (!IsRosa) {
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/libo32"), Paths);
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/usr/libo32"), Paths);
|
||||||
|
+ } else {
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/system/libo32"), Paths);
|
||||||
|
+ }
|
||||||
|
}
|
||||||
|
Generic_GCC::AddMultilibPaths(D, SysRoot, OSLibDir, MultiarchTriple, Paths);
|
||||||
|
|
||||||
|
@@ -341,18 +353,30 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
|
||||||
|
Paths);
|
||||||
|
}
|
||||||
|
|
||||||
|
- addPathIfExists(D, concat(SysRoot, "/usr/lib", MultiarchTriple), Paths);
|
||||||
|
- addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir), Paths);
|
||||||
|
+ if (!IsRosa) {
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/usr/lib", MultiarchTriple), Paths);
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir), Paths);
|
||||||
|
+ } else {
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/system/lib", MultiarchTriple), Paths);
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/system", OSLibDir), Paths);
|
||||||
|
+ }
|
||||||
|
if (IsRISCV) {
|
||||||
|
StringRef ABIName = tools::riscv::getRISCVABI(Args, Triple);
|
||||||
|
addPathIfExists(D, concat(SysRoot, "/", OSLibDir, ABIName), Paths);
|
||||||
|
- addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir, ABIName), Paths);
|
||||||
|
+ if (!IsRosa)
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir, ABIName), Paths);
|
||||||
|
+ else
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/system", OSLibDir, ABIName), Paths);
|
||||||
|
}
|
||||||
|
|
||||||
|
Generic_GCC::AddMultiarchPaths(D, SysRoot, OSLibDir, Paths);
|
||||||
|
|
||||||
|
- addPathIfExists(D, concat(SysRoot, "/lib"), Paths);
|
||||||
|
- addPathIfExists(D, concat(SysRoot, "/usr/lib"), Paths);
|
||||||
|
+ if (!IsRosa) {
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/lib"), Paths);
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/usr/lib"), Paths);
|
||||||
|
+ } else {
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/system/lib"), Paths);
|
||||||
|
+ }
|
||||||
|
}
|
||||||
|
|
||||||
|
ToolChain::RuntimeLibType Linux::GetDefaultRuntimeLibType() const {
|
||||||
|
@@ -457,6 +481,9 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
|
||||||
|
return Triple.isArch64Bit() ? "/system/bin/linker64" : "/system/bin/linker";
|
||||||
|
}
|
||||||
|
if (Triple.isMusl()) {
|
||||||
|
+ if (Triple.getVendor() == llvm::Triple::Rosa)
|
||||||
|
+ return "/system/bin/linker";
|
||||||
|
+
|
||||||
|
std::string ArchName;
|
||||||
|
bool IsArm = false;
|
||||||
|
|
||||||
|
diff --git a/clang/tools/clang-installapi/Options.cpp b/clang/tools/clang-installapi/Options.cpp
|
||||||
|
index 64324a3f8b01..15ce70b68217 100644
|
||||||
|
--- a/clang/tools/clang-installapi/Options.cpp
|
||||||
|
+++ b/clang/tools/clang-installapi/Options.cpp
|
||||||
|
@@ -515,7 +515,7 @@ bool Options::processFrontendOptions(InputArgList &Args) {
|
||||||
|
FEOpts.FwkPaths = std::move(FrameworkPaths);
|
||||||
|
|
||||||
|
// Add default framework/library paths.
|
||||||
|
- PathSeq DefaultLibraryPaths = {"/usr/lib", "/usr/local/lib"};
|
||||||
|
+ PathSeq DefaultLibraryPaths = {"/usr/lib", "/system/lib", "/usr/local/lib"};
|
||||||
|
PathSeq DefaultFrameworkPaths = {"/Library/Frameworks",
|
||||||
|
"/System/Library/Frameworks"};
|
||||||
|
|
||||||
|
`},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// llvm stores the result of Toolchain.newLLVM.
|
||||||
|
llvm [_toolchainEnd][4]pkg.Artifact
|
||||||
|
// llvmOnce is for lazy initialisation of llvm.
|
||||||
|
llvmOnce [_toolchainEnd]sync.Once
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewLLVM returns LLVM toolchain across multiple [pkg.Artifact].
|
||||||
|
func (t Toolchain) NewLLVM() (musl, compilerRT, runtimes, clang pkg.Artifact) {
|
||||||
|
llvmOnce[t].Do(func() {
|
||||||
|
llvm[t][0], llvm[t][1], llvm[t][2], llvm[t][3] = t.newLLVM()
|
||||||
|
})
|
||||||
|
return llvm[t][0], llvm[t][1], llvm[t][2], llvm[t][3]
|
||||||
|
}
|
||||||
162
internal/rosa/make.go
Normal file
162
internal/rosa/make.go
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t Toolchain) newMake() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "4.4.1"
|
||||||
|
checksum = "YS_B07ZcAy9PbaK5_vKGj64SrxO2VMpnMKfc9I0Q9IC1rn0RwOH7802pJoj2Mq4a"
|
||||||
|
)
|
||||||
|
return t.New("make-"+version, TEarly, nil, nil, nil, `
|
||||||
|
cd "$(mktemp -d)"
|
||||||
|
/usr/src/make/configure \
|
||||||
|
--prefix=/system \
|
||||||
|
--build="${ROSA_TRIPLE}" \
|
||||||
|
--disable-dependency-tracking
|
||||||
|
./build.sh
|
||||||
|
./make DESTDIR=/work install check
|
||||||
|
`, pkg.Path(AbsUsrSrc.Append("make"), false, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://ftpmirror.gnu.org/gnu/make/make-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Make] = Toolchain.newMake }
|
||||||
|
|
||||||
|
// MakeAttr holds the project-specific attributes that will be applied to a new
|
||||||
|
// [pkg.Artifact] compiled via [Make].
|
||||||
|
type MakeAttr struct {
|
||||||
|
// Mount the source tree writable.
|
||||||
|
Writable bool
|
||||||
|
|
||||||
|
// Do not include default extras.
|
||||||
|
OmitDefaults bool
|
||||||
|
// Dependencies not provided by stage3.
|
||||||
|
NonStage3 []pkg.Artifact
|
||||||
|
|
||||||
|
// Additional environment variables.
|
||||||
|
Env []string
|
||||||
|
// Runs before configure.
|
||||||
|
ScriptEarly string
|
||||||
|
// Runs after configure.
|
||||||
|
ScriptConfigured string
|
||||||
|
// Runs after install.
|
||||||
|
Script string
|
||||||
|
|
||||||
|
// Remain in working directory set up during ScriptEarly.
|
||||||
|
InPlace bool
|
||||||
|
|
||||||
|
// Flags passed to the configure script.
|
||||||
|
Configure [][2]string
|
||||||
|
// Extra make targets.
|
||||||
|
Make []string
|
||||||
|
// Target triple, zero value is equivalent to the Rosa OS triple.
|
||||||
|
Build string
|
||||||
|
// Whether to skip the check target.
|
||||||
|
SkipCheck bool
|
||||||
|
// Name of the check target, zero value is equivalent to "check".
|
||||||
|
CheckName string
|
||||||
|
|
||||||
|
// Suffix appended to the source pathname.
|
||||||
|
SourceSuffix string
|
||||||
|
|
||||||
|
// Passed through to [Toolchain.New].
|
||||||
|
Flag int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewViaMake returns a [pkg.Artifact] for compiling and installing via [Make].
|
||||||
|
func (t Toolchain) NewViaMake(
|
||||||
|
name, version string,
|
||||||
|
source pkg.Artifact,
|
||||||
|
attr *MakeAttr,
|
||||||
|
extra ...pkg.Artifact,
|
||||||
|
) pkg.Artifact {
|
||||||
|
if name == "" || version == "" {
|
||||||
|
panic("names must be non-empty")
|
||||||
|
}
|
||||||
|
if attr == nil {
|
||||||
|
attr = new(MakeAttr)
|
||||||
|
}
|
||||||
|
build := `"${ROSA_TRIPLE}"`
|
||||||
|
if attr.Build != "" {
|
||||||
|
build = attr.Build
|
||||||
|
}
|
||||||
|
|
||||||
|
var configureFlags string
|
||||||
|
if len(attr.Configure) > 0 {
|
||||||
|
const sep = " \\\n\t"
|
||||||
|
configureFlags += sep + strings.Join(
|
||||||
|
slices.Collect(func(yield func(string) bool) {
|
||||||
|
for _, v := range attr.Configure {
|
||||||
|
s := v[0]
|
||||||
|
if v[1] == "" || (v[0] != "" &&
|
||||||
|
v[0][0] >= 'a' &&
|
||||||
|
v[0][0] <= 'z') {
|
||||||
|
s = "--" + s
|
||||||
|
}
|
||||||
|
if v[1] != "" {
|
||||||
|
s += "=" + v[1]
|
||||||
|
}
|
||||||
|
if !yield(s) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
sep,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
var buildFlag string
|
||||||
|
if attr.Build != `""` {
|
||||||
|
buildFlag = ` \
|
||||||
|
--build=` + build
|
||||||
|
}
|
||||||
|
|
||||||
|
makeTargets := make([]string, 1, 2+len(attr.Make))
|
||||||
|
if !attr.SkipCheck {
|
||||||
|
if attr.CheckName == "" {
|
||||||
|
makeTargets = append(makeTargets, "check")
|
||||||
|
} else {
|
||||||
|
makeTargets = append(makeTargets, attr.CheckName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
makeTargets = append(makeTargets, attr.Make...)
|
||||||
|
if len(makeTargets) == 1 {
|
||||||
|
makeTargets = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
finalExtra := []pkg.Artifact{
|
||||||
|
t.Load(Make),
|
||||||
|
}
|
||||||
|
if attr.OmitDefaults || attr.Flag&TEarly == 0 {
|
||||||
|
finalExtra = append(finalExtra,
|
||||||
|
t.Load(Gawk),
|
||||||
|
t.Load(Coreutils),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
finalExtra = append(finalExtra, extra...)
|
||||||
|
|
||||||
|
scriptEarly := attr.ScriptEarly
|
||||||
|
if !attr.InPlace {
|
||||||
|
scriptEarly += "\ncd \"$(mktemp -d)\""
|
||||||
|
} else if scriptEarly == "" {
|
||||||
|
panic("cannot remain in root")
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.New(name+"-"+version, attr.Flag, stage3Concat(t,
|
||||||
|
attr.NonStage3,
|
||||||
|
finalExtra...,
|
||||||
|
), nil, attr.Env, scriptEarly+`
|
||||||
|
/usr/src/`+name+`/configure \
|
||||||
|
--prefix=/system`+buildFlag+configureFlags+attr.ScriptConfigured+`
|
||||||
|
make "-j$(nproc)"`+strings.Join(makeTargets, " ")+`
|
||||||
|
make DESTDIR=/work install
|
||||||
|
`+attr.Script, pkg.Path(AbsUsrSrc.Append(
|
||||||
|
name+attr.SourceSuffix,
|
||||||
|
), attr.Writable, source))
|
||||||
|
}
|
||||||
27
internal/rosa/meson.go
Normal file
27
internal/rosa/meson.go
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
|
func (t Toolchain) newMeson() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "1.10.1"
|
||||||
|
checksum = "w895BXF_icncnXatT_OLCFe2PYEtg4KrKooMgUYdN-nQVvbFX3PvYWHGEpogsHtd"
|
||||||
|
)
|
||||||
|
return t.New("meson-"+version, 0, []pkg.Artifact{
|
||||||
|
t.Load(Python),
|
||||||
|
t.Load(Setuptools),
|
||||||
|
}, nil, nil, `
|
||||||
|
cd /usr/src/meson
|
||||||
|
chmod -R +w meson.egg-info
|
||||||
|
python3 setup.py \
|
||||||
|
install \
|
||||||
|
--prefix=/system \
|
||||||
|
--root=/work
|
||||||
|
`, pkg.Path(AbsUsrSrc.Append("meson"), true, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://github.com/mesonbuild/meson/releases/download/"+
|
||||||
|
version+"/meson-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Meson] = Toolchain.newMeson }
|
||||||
36
internal/rosa/mksh.go
Normal file
36
internal/rosa/mksh.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
|
func (t Toolchain) newMksh() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "59c"
|
||||||
|
checksum = "0Zj-k4nXEu3IuJY4lvwD2OrC2t27GdZj8SPy4DoaeuBRH1padWb7oREpYgwY8JNq"
|
||||||
|
)
|
||||||
|
return t.New("mksh-"+version, 0, stage3Concat(t, []pkg.Artifact{},
|
||||||
|
t.Load(Perl),
|
||||||
|
t.Load(Coreutils),
|
||||||
|
), nil, []string{
|
||||||
|
"LDSTATIC=-static",
|
||||||
|
"CPPFLAGS=-DMKSH_DEFAULT_PROFILEDIR=\\\"/system/etc\\\"",
|
||||||
|
}, `
|
||||||
|
cd "$(mktemp -d)"
|
||||||
|
sh /usr/src/mksh/Build.sh -r
|
||||||
|
CPPFLAGS="${CPPFLAGS} -DMKSH_BINSHPOSIX -DMKSH_BINSHREDUCED" \
|
||||||
|
sh /usr/src/mksh/Build.sh -r -L
|
||||||
|
./test.sh -C regress:no-ctty
|
||||||
|
|
||||||
|
mkdir -p /work/system/bin/
|
||||||
|
cp -v mksh /work/system/bin/
|
||||||
|
cp -v lksh /work/system/bin/sh
|
||||||
|
|
||||||
|
mkdir -p /work/bin/
|
||||||
|
ln -vs ../system/bin/sh /work/bin/
|
||||||
|
`, pkg.Path(AbsUsrSrc.Append("mksh"), false, pkg.NewHTTPGetTar(
|
||||||
|
nil,
|
||||||
|
"https://mbsd.evolvis.org/MirOS/dist/mir/mksh/mksh-R"+version+".tgz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Mksh] = Toolchain.newMksh }
|
||||||
64
internal/rosa/musl.go
Normal file
64
internal/rosa/musl.go
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"slices"
|
||||||
|
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MuslAttr holds the attributes that will be applied to musl.
|
||||||
|
type MuslAttr struct {
|
||||||
|
// Install headers only.
|
||||||
|
Headers bool
|
||||||
|
// Environment variables concatenated with defaults.
|
||||||
|
Env []string
|
||||||
|
// Dependencies concatenated with defaults.
|
||||||
|
Extra []pkg.Artifact
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMusl returns a [pkg.Artifact] containing an installation of musl libc.
|
||||||
|
func (t Toolchain) NewMusl(attr *MuslAttr) pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "1.2.5"
|
||||||
|
checksum = "y6USdIeSdHER_Fw2eT2CNjqShEye85oEg2jnOur96D073ukmIpIqDOLmECQroyDb"
|
||||||
|
)
|
||||||
|
|
||||||
|
if attr == nil {
|
||||||
|
attr = new(MuslAttr)
|
||||||
|
}
|
||||||
|
|
||||||
|
target := "install"
|
||||||
|
script := `
|
||||||
|
mkdir -p /work/system/bin
|
||||||
|
COMPAT_LINKER_NAME="ld-musl-` + linuxArch() + `.so.1"
|
||||||
|
ln -vs ../lib/libc.so /work/system/bin/linker
|
||||||
|
ln -vs ../lib/libc.so /work/system/bin/ldd
|
||||||
|
ln -vs libc.so "/work/system/lib/${COMPAT_LINKER_NAME}"
|
||||||
|
rm -v "/work/lib/${COMPAT_LINKER_NAME}"
|
||||||
|
rmdir -v /work/lib
|
||||||
|
`
|
||||||
|
if attr.Headers {
|
||||||
|
target = "install-headers"
|
||||||
|
script = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.New("musl-"+version, 0, stage3Concat(t, attr.Extra,
|
||||||
|
t.Load(Make),
|
||||||
|
t.Load(Coreutils),
|
||||||
|
), nil, slices.Concat([]string{
|
||||||
|
"ROSA_MUSL_TARGET=" + target,
|
||||||
|
}, attr.Env), `
|
||||||
|
cd "$(mktemp -d)"
|
||||||
|
/usr/src/musl/configure \
|
||||||
|
--prefix=/system \
|
||||||
|
--target="${ROSA_TRIPLE}"
|
||||||
|
make "-j$(nproc)" DESTDIR=/work "${ROSA_MUSL_TARGET}"
|
||||||
|
`+script, pkg.Path(AbsUsrSrc.Append("musl"), false, t.NewPatchedSource(
|
||||||
|
// expected to be writable in copies
|
||||||
|
"musl", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://musl.libc.org/releases/musl-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), false,
|
||||||
|
)))
|
||||||
|
}
|
||||||
39
internal/rosa/ninja.go
Normal file
39
internal/rosa/ninja.go
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
|
func (t Toolchain) newNinja() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "1.13.2"
|
||||||
|
checksum = "ygKWMa0YV2lWKiFro5hnL-vcKbc_-RACZuPu0Io8qDvgQlZ0dxv7hPNSFkt4214v"
|
||||||
|
)
|
||||||
|
return t.New("ninja-"+version, 0, []pkg.Artifact{
|
||||||
|
t.Load(CMake),
|
||||||
|
t.Load(Python),
|
||||||
|
t.Load(Bash),
|
||||||
|
}, nil, nil, `
|
||||||
|
cd "$(mktemp -d)"
|
||||||
|
python3 /usr/src/ninja/configure.py \
|
||||||
|
--bootstrap \
|
||||||
|
--gtest-source-dir=/usr/src/googletest
|
||||||
|
./ninja all
|
||||||
|
./ninja_test
|
||||||
|
|
||||||
|
mkdir -p /work/system/bin/
|
||||||
|
cp ninja /work/system/bin/
|
||||||
|
`, pkg.Path(AbsUsrSrc.Append("googletest"), false,
|
||||||
|
pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://github.com/google/googletest/releases/download/"+
|
||||||
|
"v1.16.0/googletest-1.16.0.tar.gz",
|
||||||
|
mustDecode("NjLGvSbgPy_B-y-o1hdanlzEzaYeStFcvFGxpYV3KYlhrWWFRcugYhM3ZMzOA9B_"),
|
||||||
|
pkg.TarGzip,
|
||||||
|
)), pkg.Path(AbsUsrSrc.Append("ninja"), true, t.NewPatchedSource(
|
||||||
|
"ninja", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://github.com/ninja-build/ninja/archive/refs/tags/"+
|
||||||
|
"v"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), false,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Ninja] = Toolchain.newNinja }
|
||||||
36
internal/rosa/openssl.go
Normal file
36
internal/rosa/openssl.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
|
func (t Toolchain) newOpenSSL() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "3.5.5"
|
||||||
|
checksum = "I2Hp1LxcTR8j4G6LFEQMVy6EJH-Na1byI9Ti-ThBot6EMLNRnjGXGq-WXrim3Fkz"
|
||||||
|
)
|
||||||
|
return t.New("openssl-"+version, 0, []pkg.Artifact{
|
||||||
|
t.Load(Perl),
|
||||||
|
t.Load(Make),
|
||||||
|
|
||||||
|
t.Load(Zlib),
|
||||||
|
t.Load(KernelHeaders),
|
||||||
|
}, nil, []string{
|
||||||
|
"CC=cc",
|
||||||
|
}, `
|
||||||
|
cd "$(mktemp -d)"
|
||||||
|
/usr/src/openssl/Configure \
|
||||||
|
--prefix=/system \
|
||||||
|
--libdir=lib \
|
||||||
|
--openssldir=etc/ssl
|
||||||
|
make \
|
||||||
|
"-j$(nproc)" \
|
||||||
|
HARNESS_JOBS=256 \
|
||||||
|
test
|
||||||
|
make DESTDIR=/work install
|
||||||
|
`, pkg.Path(AbsUsrSrc.Append("openssl"), false, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://github.com/openssl/openssl/releases/download/"+
|
||||||
|
"openssl-"+version+"/openssl-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
func init() { artifactsF[OpenSSL] = Toolchain.newOpenSSL }
|
||||||
39
internal/rosa/perl.go
Normal file
39
internal/rosa/perl.go
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
|
func (t Toolchain) newPerl() pkg.Artifact {
|
||||||
|
const (
|
||||||
|
version = "5.42.0"
|
||||||
|
checksum = "2KR7Jbpk-ZVn1a30LQRwbgUvg2AXlPQZfzrqCr31qD5-yEsTwVQ_W76eZH-EdxM9"
|
||||||
|
)
|
||||||
|
return t.New("perl-"+version, TEarly, []pkg.Artifact{
|
||||||
|
t.Load(Make),
|
||||||
|
}, nil, nil, `
|
||||||
|
cd /usr/src/perl
|
||||||
|
|
||||||
|
echo 'print STDOUT "1..0 # Skip broken test\n";' > ext/Pod-Html/t/htmldir3.t
|
||||||
|
rm -f /system/bin/ps # perl does not like toybox ps
|
||||||
|
|
||||||
|
./Configure \
|
||||||
|
-des \
|
||||||
|
-Dprefix=/system \
|
||||||
|
-Dcc="clang" \
|
||||||
|
-Dcflags='--std=gnu99' \
|
||||||
|
-Dldflags="${LDFLAGS}" \
|
||||||
|
-Doptimize='-O2 -fno-strict-aliasing' \
|
||||||
|
-Duseithreads
|
||||||
|
make \
|
||||||
|
"-j$(nproc)" \
|
||||||
|
TEST_JOBS=256 \
|
||||||
|
test_harness
|
||||||
|
make DESTDIR=/work install
|
||||||
|
`, pkg.Path(AbsUsrSrc.Append("perl"), true, t.NewPatchedSource(
|
||||||
|
"perl", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://www.cpan.org/src/5.0/perl-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), false,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
func init() { artifactsF[Perl] = Toolchain.newPerl }
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user