1
0
forked from rosa/hakurei

280 Commits

Author SHA1 Message Date
kat
cb4b2706c0 cmd/mbf: bring back pkgserver's favicon!
It existed in mae's #33, but ozy seems to have lost it during her
changes pre-merge, so just add it back again.

This favicon image was grabbed from mae:
8a38b614c6/cmd/pkgserver/ui/static/favicon.ico
That commit is the latest one of the salvaged original #33 history; see
rosa/hakurei#33 (comment).
2026-05-15 21:05:26 +10:00
c32c06b2e8 internal/rosa: mesa artifact
This has many dependencies.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 05:12:35 +09:00
61199f734c internal/rosa/glslang: remove headers prefix
Maintainers tried to be clever with this and breaks cmake paths.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 04:57:38 +09:00
87cf0d4e6b internal/rosa/mesa: libva artifact
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 04:40:04 +09:00
cf0dffa0f5 internal/rosa/mesa: libglvnd enable glx
Required to break circular dependency.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 04:35:50 +09:00
686d7ec63a internal/rosa/x: xserver artifact
Required by libglvnd test suite.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 04:15:48 +09:00
4c653b1151 internal/rosa/x: xkeyboard-config artifact
Required by xserver test suite.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 03:59:22 +09:00
0b0a63d151 internal/rosa/x: libxcb-util-wm artifact
Required by xserver xephyr.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 03:46:04 +09:00
6231cfe2aa internal/rosa/x: libxcb-util-image artifact
Required by xserver xephyr.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 03:36:45 +09:00
712e80890b internal/rosa/x: libxcb-util artifact
Required by xserver xephyr.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 03:25:24 +09:00
3fe7d48014 internal/rosa/x: libxcb-render-util artifact
Required by xserver.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 03:09:37 +09:00
16f9d39427 internal/rosa: libepoxy artifact
Required by xserver.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 02:16:55 +09:00
c1cd5ba07b internal/rosa: libtirpc artifact
Required by xserver.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 02:07:25 +09:00
7b0cd2e472 internal/rosa/x: libXdmcp artifact
Required by xserver.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 01:44:37 +09:00
e580307528 internal/rosa/x: libxcvt artifact
Required by xserver.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 01:24:00 +09:00
ee1dffb676 internal/rosa/x: libXfont2 artifact
Required by xserver.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 01:17:27 +09:00
f095fcf181 internal/rosa/x: font-util and libfontenc artifact
Required by libXfont2.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 01:14:12 +09:00
ca8a130130 internal/rosa: freetype artifact
Required by libXfont.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-14 00:54:42 +09:00
0ad6b00e41 internal/rosa/x: xkbcomp artifact
Required by xserver.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 22:24:08 +09:00
ad0f1cf36b internal/rosa/x: libxkbfile artifact
Required by xkbcomp.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 22:22:02 +09:00
b12d924fa2 internal/rosa: pixman artifact
Required by xserver.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 22:07:53 +09:00
c31d8ae41a internal/rosa/x: libXfixes artifact
Required by libva.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 21:36:47 +09:00
6dbbf15c0e internal/rosa: lm_sensors artifact
Generally useful, and an optional dependency of mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 20:11:37 +09:00
be7de68a42 internal/rosa/perl: Test::Cmd artifact
Required by lm_sensors test suite.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 20:05:43 +09:00
a759cf3666 internal/pkg: check exec substitution
This relies on the testtool having ident as relevant input to assert successful substitution.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 19:43:53 +09:00
8c2dd3e984 internal/pkg: verify status kind
While it is still impossible to reliably determine the expected contents of these status files, this checks their nature for expected substitution behaviour.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 19:27:58 +09:00
67038d5af4 internal/pkg: log fault in tests when available
This would otherwise only be available in verbose output, interleaved with everything else.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 18:58:18 +09:00
53d8d12e7f internal/rosa/git: disable flaky test
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 18:51:11 +09:00
7997d79e56 cmd/mbf: display and destroy fault entries
This change extends cmd/mbf commands for working with fault entries.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 19:06:09 +09:00
f2f1726190 internal/pkg: record cure faults
These are useful for troubleshooting. This change records them in a separate directory.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 17:58:18 +09:00
f63203cb0a internal/pkg: populate substitute status
These are not created when taking the fast path, but should be inherited from the alternative.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 16:16:37 +09:00
19555c7670 internal/rosa/gtk: glib 2.88.0 to 2.88.1
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 00:48:38 +09:00
a3beab8959 internal/rosa/libucontext: 1.5 to 1.5.1
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 00:48:15 +09:00
2ea786d6a9 internal/rosa/libbsd: libmd 1.1.0 to 1.2.0
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 00:47:57 +09:00
747d4ec4b0 internal/rosa/libexpat: 2.8.0 to 2.8.1
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 00:47:32 +09:00
b76e6f6519 internal/rosa/tamago: 1.26.2 to 1.26.3
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 00:47:05 +09:00
840d8f68bf internal/rosa/git: disable flaky test
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 00:38:59 +09:00
4bede7ecdd internal/pkg: discontinue DCE resolution on signal
This serves as a stopgap measure to skip long-running DCE resolutions.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 00:29:01 +09:00
487a03b5a3 internal/pkg: deduplicate DCE by ident
This eliminates edge cases where target artifacts do not compare equal.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-13 00:18:27 +09:00
8f3c22896a internal/pkg: DCE benchmark unwrap only
This eliminates noise at lower depths.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 19:56:59 +09:00
a167c1aba5 internal/pkg: hold artifact in DCE
This is significantly slower but enables much better error reporting.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 19:45:25 +09:00
a6008ef68b internal/pkg: benchmark early DCE
This error has never had decent performance, now is a good time to improve that.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 18:59:25 +09:00
5228b27362 internal/rosa/glslang: 16.2.0 to 16.3.0
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 17:53:35 +09:00
f00d3a07ad internal/rosa/python: trove-classifiers 2026.4.28.13 to 2026.5.7.17
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 17:53:17 +09:00
f9538bc21b internal/rosa/python: 3.14.4 to 3.14.5
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 17:52:53 +09:00
6ae5efec56 internal/rosa/gnu: gcc 15.2.0 to 16.1.0
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 17:52:31 +09:00
14f4c59c8c internal/rosa/llvm: 22.1.4 to 22.1.5
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 17:52:13 +09:00
688d43417b internal/pkg: rename measured exec type
This type is no longer exclusive to KindExecNet.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 15:23:33 +09:00
9f8fafa39b internal/rosa: measure kernel headers
This makes version bumps robust and much less tedious.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 15:19:57 +09:00
6643cfbeee internal/pkg: optionally measure exec artifact
Useful for verifying deterministic output without enabling network access.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 15:11:17 +09:00
dcde38f2e9 internal/rosa/llvm: set exclusive bit
This was missed when improving bootstrap.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 15:08:09 +09:00
deebbf6b1a internal/rosa/git: disable more flaky tests
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 04:13:02 +09:00
0c557798bc internal/rosa/curl: 8.19.0 to 8.20.0
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 04:12:40 +09:00
327e6ed5a2 internal/rosa/kernel: 6.12.84 to 6.12.87
This change also pins header version constants to the same values, to be updated manually on a real API change. This eliminates rebuilds on bumping kernel version.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 04:05:30 +09:00
76c7a423a9 internal/rosa/git: disable more flaky tests
Again, causing too many spurious failures.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 03:18:12 +09:00
6e113b8836 internal/pkg: content-based dependency substitution
This change introduces a new fast path for FloodArtifact. It is taken when a curing artifact has identical-by-content controlled relevant inputs and are otherwise identical to an already-cured artifact.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-12 00:19:42 +09:00
ce9f4b5f71 internal/rosa: vim artifact
Very useful for troubleshooting.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-10 21:45:56 +09:00
8f727273ef internal/pkg: add riscv64 sums
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-10 17:12:30 +09:00
d0a63b942e internal/pkg: add arm64 sums
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-10 16:42:42 +09:00
7f2126df32 internal/rosa/hakurei: 0.4.1 to 0.4.2
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-10 16:30:12 +09:00
0cf0e18e35 release: 0.4.2
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-10 16:16:59 +09:00
ee5c0dd135 cmd/dist: optionally skip tests
Works around incomplete syscall translation by qemu.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-10 04:15:07 +09:00
92c48d82e2 internal/rosa/go: respect check flag
These tests are also quite expensive, so optionally skip them.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-10 04:01:06 +09:00
c79a4fe7f8 internal/rosa/stage0: add riscv64 tarball
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-09 10:51:19 +09:00
0aeb2bccfb internal/rosa: libconfig artifact
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-09 00:33:27 +09:00
50e079b99f internal/rosa: xcb-util-keysyms artifact
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-09 00:16:06 +09:00
fb2cb5005a internal/rosa: libdisplay-info artifact
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-09 00:07:43 +09:00
6e73c28a92 internal/rosa: hwdata artifact
Required by libdisplay-info.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-09 00:05:40 +09:00
2c08aa3674 internal/rosa/glslang: disable broken arm64 tests
These just fail on arm64, so disable them.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-08 23:56:19 +09:00
1af73ae7b4 internal/rosa/go: 1.26.2 to 1.26.3
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-08 23:25:57 +09:00
c9aa5e04b1 internal/rosa/go: bootstrap 1.25.9 to 1.25.10
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-08 23:20:39 +09:00
70a38bd3b0 internal/rosa: libarchive artifact
Required by mesa, also a cleaner implementation than GNU.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-08 23:16:33 +09:00
533b15da89 internal/rosa/mksh: respect check flag
This skips the test suite when OptSkipCheck is set.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-08 21:20:20 +09:00
a890e1d0e5 cmd/mbf: optionally override non-native flags
This is a clean workaround for configuration differences to save time during development.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-08 13:45:36 +09:00
e3520835bb cmd/mbf: optionally register all targets
This enables non-native cures from the daemon.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-08 13:29:58 +09:00
0e56847754 cmd/mbf: add arm64 magic
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-08 00:23:09 +09:00
145d03b366 cmd/mbf: optional emulated target architecture
This enables transparent cross-compilation without breaking purity.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-07 20:29:31 +09:00
2886228d40 internal/rosa/qemu: build static binaries
Dynamic linking here barely saves space, and this is required for binfmt.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-07 20:25:13 +09:00
e1e499b79e internal/rosa/git: disable more broken tests
These are causing many spurious failures.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-07 20:06:11 +09:00
65b7dd8b37 internal/rosa: configurable architecture
This enables curing via binfmt.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-07 20:01:44 +09:00
8d72b9e5bd internal/pkg: optionally register binfmt
This transparently supports curing foreign exec artifacts.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-07 19:43:06 +09:00
8a3c3d145a internal/pkg: correctly generate cure expects
This needs to dereference the identifier symlink.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-07 15:57:45 +09:00
575ef307ad container: binfmt registration
This arranges for binfmt entries to be registered for the container.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-07 15:55:19 +09:00
d4144fcf7f container: optionally map uid/gid 0 as init
Unfortunately required to work around flawed APIs like binfmt_misc.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-07 15:15:47 +09:00
bad66facbc container: improve capability handling
This cleans up preserving caps for expansion and correctly sets privileged caps.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-07 14:27:28 +09:00
4aba014eac container: abandon response on termination
This prevents blocking on early failure.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-07 00:58:02 +09:00
779ba994ce container: check capability in test helper
This makes corresponding nixos tests redundant.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-06 21:05:54 +09:00
917be2de93 internal/pkg/exec: close early failure before wait
This avoids a deadlock on an early container failure.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-06 18:38:16 +09:00
9aad98d409 internal/rosa: suppress init verbosity in tests
This is generally the preferred option.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-06 06:54:20 +09:00
b0d06b67dc internal/pkg: centralise exec testdata checksums
This significantly reduces maintenance burden.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-06 06:37:58 +09:00
089100f29d internal/rosa/stage0: add arm64 tarball
This was bootstrapped from the old tarball, but with the new patchset.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-06 05:47:14 +09:00
dfd26abf6c internal/pkg: improve output measuring
This significantly improves readability and maintainability.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-06 05:44:04 +09:00
617ee21647 container/init: mount intermediate before early
This is usable as scratch space during early.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-06 00:55:45 +09:00
15cdb37ec2 cmd/mbf: optional init verbosity
This output is generally not needed and only useful when debugging container machinery itself.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-05 23:56:16 +09:00
1f0bdc7aca internal/rosa/meson: disable fallback
For some reason nodownload still allows fallback in some cases.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-05 21:32:19 +09:00
e3ffe85670 internal/rosa/python: pycparser artifact
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-05 20:37:09 +09:00
f4403ba5cd internal/rosa: libpng artifact
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-05 20:23:50 +09:00
5a26895a22 internal/pkg: optionally suppress init verbosity
This flag applies to every exec artifact cured by the cache. It has no effect on cure outcome.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-05 20:03:06 +09:00
09d9f766a9 container: optionally suppress init verbosity
This change also removes verbose output no longer considered useful.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-05 19:59:44 +09:00
6558169666 internal/rosa/x: libXrandr artifact
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-05 19:39:19 +09:00
cccf970c57 internal/rosa/x: libXrender artifact
Required by libXrandr.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-05 19:37:11 +09:00
57ffb21690 internal/rosa/x: libXxf86vm artifact
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-05 19:27:59 +09:00
9c560b455a internal/rosa/stage0: replace amd64 tarball
This toolchain is built with the new patchset.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-05 04:39:53 +09:00
4c7c0fbfc6 internal/rosa/llvm: update configuration for early runtimes
These were never updated when the bootstrap was moved to stage0-only.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-05 04:38:17 +09:00
18b3b7904e internal/rosa/llvm: exclude benchmarks
These are being built despite LLVM_BUILD_BENCHMARKS defaulting to off.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-05 03:11:26 +09:00
fefefdf734 internal/rosa/llvm: insert Rosa OS paths via musl ldso
This is cleaner than unconditionally adding rpath, and avoids breaking rpath priority.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-05 02:44:26 +09:00
b84bb09a80 internal/rosa/hakurei: 0.4.0 to 0.4.1
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-04 05:28:14 +09:00
337bf20f50 release: 0.4.1
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-04 05:04:00 +09:00
1cb792cf6e cmd/dist: increase gzip level
Performance does not matter in this case.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-04 04:04:18 +09:00
b2b40b07e8 cmd/dist: optional verbosity
This makes output less noisy. The build is fast enough not to require progress indication.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-04 04:02:02 +09:00
da11b26ec1 container/initoverlay: configure via fsconfig
This works around the page size limit at the cost of negligible performance regressions.

Closes #34.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-04 02:29:56 +09:00
024489e800 ext: wrap file-descriptor-based mount facilities
This only implements what is required by package container for now.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-04 01:54:35 +09:00
0f795712b0 internal/rosa/llvm: enable LLVM_BUILD_TESTS
This arranges for tests to be built early, and is more efficient towards the end of the build.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-03 20:05:30 +09:00
7e2210ff71 internal/rosa/llvm: provide runtimes early in stage0
The LLVM build system fails to handle a dynamically linked toolchain correctly, and leaks the system installation during builds.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-03 19:48:49 +09:00
a71a008f3c cmd/mbf: optionally build on early stages
This makes debugging the bootstrap process much less cumbersome.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-03 18:46:47 +09:00
162265b47e container: reject strings larger than a page
The vfs stores these values in a page obtained via GFP, and silently stops copying once the page is filled. This check prevents confusing behaviour in such cases.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-03 17:30:25 +09:00
3fa7ac04e4 internal/rosa/x: combine with xcb
Separating them no longer makes sense.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-03 04:38:00 +09:00
bf2867d653 internal/rosa/x: libxshmfence artifact
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-03 04:35:39 +09:00
ec0f0f6507 internal/rosa/x: libXext artifact
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-03 04:23:20 +09:00
a77a802955 internal/rosa/x: xlib artifact
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-03 04:15:21 +09:00
4407e14dfc internal/rosa/x: migrate to xorgproto
This is much cleaner than the many protocol packages.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-03 04:09:36 +09:00
e024d3184a internal/rosa/clang: install cpp symlink
Required by some buggy autotools scripts.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-03 00:41:23 +09:00
8e1bf00c2d internal/rosa/stage0: add arm64 tarball
This replaces the previous, much larger stage0 distribution.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-02 23:53:08 +09:00
b111e22050 internal/rosa/x: libxtrans artifact
Required by many X libraries.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-02 23:42:00 +09:00
1fa458c0be internal/rosa/glslang: SPIRV-LLVM-Translator artifact
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-02 22:47:51 +09:00
2c7ae67a67 internal/rosa/llvm: LIT args helper
This is useful for other projects using LIT.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-02 22:17:57 +09:00
3826621b21 internal/rosa/python: lit artifact
Used by LLVM-related projects.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-02 22:15:37 +09:00
041b505c2e internal/rosa/cmake: implicit CMAKE_BUILD_TYPE
Lack of this behaviour is a holdover from when the helper was first split from the (now removed) LLVM helper.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-02 21:53:38 +09:00
e6debce649 internal/rosa/llvm: make source independently available
This is unfortunately still required, due to the monorepo nature of LLVM.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-02 21:47:01 +09:00
aa26b86fce internal/rosa/llvm: skip multiple-compile-threads-basic on arm64
This intermittently crashes.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-02 12:39:46 +09:00
a57a8fd5d8 internal/rosa/llvm: skip unwind_leaffunction on arm64
This unexpectedly passes.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-02 05:53:00 +09:00
mae
1d5d063d6a cmd/mbf: package status dashboard
This displays package metadata with optional status from a report.
2026-05-02 05:05:56 +09:00
e61628a34e cmd/mbf: test cure all via daemon
This is the daemon equivalent of CureAll in internal/rosa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-02 02:39:12 +09:00
5a18f14929 internal/rosa/gnu: bison disable broken test
This is miscompiled by the current toolchain.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-02 02:23:51 +09:00
f12880688d internal/rosa/gnu: test skip helper
The terribleness of GNU invites interesting helpers.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-02 05:19:54 +09:00
bb5bbfe16a internal/rosa/go: disable tsan test
The newly enabled tsan does not play well with go126, so disable for now.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-02 00:12:41 +09:00
427e1ca37c internal/rosa/go: bootstrap 1.25.7 to 1.25.9
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-01 23:24:07 +09:00
96fdd9ecc5 internal/rosa: disable LTO in tests
This is too expensive and not feasible for development.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-01 20:08:26 +09:00
02771b655b internal/rosa/stage0: replace amd64 tarball
This is a non-LTO distribution with the new layer configuration.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-01 18:57:28 +09:00
d1c8d2c39b internal/rosa/gnu: skip libtool tests in stage0
This upsets the linker in stage0.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-01 05:26:40 +09:00
0efd742e8a internal/rosa/llvm: enable libclc as a runtime
Enabling this as a project is deprecated.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-01 05:17:02 +09:00
ae1fe638d5 internal/rosa/stage0: remove unused layers
The stage0 toolchain no longer requires bundled dependencies other than the bare toolchain and environment itself.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-01 03:52:41 +09:00
445d95023b internal/rosa: global preset flags
These changes preset behaviour globally. Useful for ad hoc workarounds for development or bootstrapping on resource-constrained systems.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-01 03:42:48 +09:00
fc66f0bb47 internal/rosa/llvm: use llvm build system
This removes the multistep bootstrap hack. Stage0 exceptions are also eliminated for a later change to bring the stage0 distribution down to just a bare toolchain, toybox and shell. This change also enables dynamic linking and ThinLTO.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-01 03:36:58 +09:00
2cd6b35bee internal/rosa/cmake: run tests
This uses the standard CMake test target.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-05-01 03:04:59 +09:00
09a216c6ec internal/rosa/perl: make /system/bin writable
This enables cure in stage0 where /system/bin is read-only.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-30 19:25:46 +09:00
44d17325c2 internal/rosa: raise stage0 extra layers
This enables extras to override stage0 tarball.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-30 18:58:42 +09:00
544ce77cbc internal/rosa/make: do not attempt check
This is circular during bootstrap, and tests are silently skipped without perl, so disable them explicitly.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-30 17:36:46 +09:00
63c3c30b23 internal/rosa/zlib: compile with -fPIC
For static linking into shared libraries. This was missed when migrating to CMake.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-30 15:55:46 +09:00
d23c4ecc7c internal/rosa/llvm: use correct triple for rpath
MultiarchTriple produces a generic glibc triple string.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-30 00:39:13 +09:00
a46656dff8 internal/rosa/python: mako 1.3.11 to 1.3.12
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-29 14:25:26 +09:00
77db153ff5 internal/rosa/python: trove-classifiers 2026.1.14.14 to 2026.4.28.13
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-29 14:25:07 +09:00
520d95bc07 internal/rosa/libxslt: fetch source tarball
This does not have submodules, so the overhead of git is unnecessary.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-28 18:31:44 +09:00
451df3f4e7 internal/rosa/libxml2: fetch source tarball
This does not have submodules, so the overhead of git is unnecessary.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-28 18:31:28 +09:00
011fac15ed internal/rosa/git: 2.53.0 to 2.54.0
This release broke httpd detection and job control on mksh.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-28 18:23:20 +09:00
347682ad0b internal/rosa/kernel: 6.12.83 to 6.12.84
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-28 17:44:20 +09:00
1a2b979add internal/rosa/rsync: 3.4.1 to 3.4.2
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-28 16:37:47 +09:00
b1c90cc380 internal/rosa/libexpat: 2.7.5 to 2.8.0
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-28 16:37:16 +09:00
3a66b8143a internal/rosa/nss: 3.123 to 3.123.1
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-28 16:15:14 +09:00
64bbd3aabd internal/rosa/mesa: libdrm 2.4.131 to 2.4.133
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-28 16:03:49 +09:00
08799a13d0 internal/rosa/glslang: spirv-tools check stable versions
This hides release candidates.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-28 16:03:29 +09:00
1aef9c3bbb internal/rosa/python: pathspec 1.0.4 to 1.1.1
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-28 16:02:19 +09:00
1f38303747 internal/rosa/python: packaging 26.1 to 26.2
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-28 16:01:56 +09:00
640777b00c internal/rosa/gnu: parallel 20260322 to 20260422
This pulls in bash with nonstandard hardcoded path.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-28 15:58:59 +09:00
1d657193cf internal/rosa/kernel: disable md
This is entirely unused and is a somewhat large attack surface, so disable it.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-28 15:48:20 +09:00
bab5406295 internal/rosa/go: require popcnt for x86
This backports https://go.dev/cl/746640.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-28 14:36:59 +09:00
725ae7d64d nix: remove all explicit timeouts
These were useful during development because timing out is often the only indication of failure due to the terrible design of nixos vm test harness. This has become a nuisance however especially when the system is under load, so remove explicit values and fall back to the ludicrously high default.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-23 13:07:22 +09:00
37a0c3967e internal/rosa/gnu: mpc fetch source tarball
This does not have submodules, so the overhead of git is unnecessary.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-23 12:57:11 +09:00
ea0692548f internal/rosa/gnu: coreutils 9.10 to 9.11
Test regression was fixed, dropping patch.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-23 12:30:46 +09:00
48ea23e648 internal/rosa/gnu: sed 4.9 to 4.10
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-23 12:30:06 +09:00
40320e4920 internal/rosa/meson: 1.11.0 to 1.11.1
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-23 12:29:17 +09:00
3ca0f61632 internal/rosa/llvm: 22.1.3 to 22.1.4
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-23 12:28:55 +09:00
6ffaac96e3 internal/rosa/cmake: 4.3.1 to 4.3.2
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-23 12:28:34 +09:00
13c7713d0c internal/rosa/kernel: 6.12.82 to 6.12.83
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-23 12:28:14 +09:00
42389f7ec5 internal/rosa/qemu: 10.2.2 to 11.0.0
This pulls in some python packages.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-23 01:15:13 +09:00
30f130c691 internal/rosa/python: wheel artifact
No idea why this ended up as a package.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-23 01:07:14 +09:00
ceb4d26087 internal/pkg: record cache variant on-disk
This makes custom artifacts much less error-prone to use.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-23 00:53:21 +09:00
852f3a9b3d internal/rosa/kernel: 6.12.81 to 6.12.82
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-20 22:11:13 +09:00
5e02dbdb0d internal/rosa/python: remove pypi helpers
Pypi is disallowed by policy so these helpers are no longer useful.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-20 02:37:10 +09:00
6a3248d472 internal/rosa/python: install pyyaml from source
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-20 02:35:30 +09:00
67404c98d9 internal/rosa/nss: install buildcatrust from source
Dependencies are now available, so this no longer has to rely on the release.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-20 02:09:24 +09:00
b9bf69cfce internal/rosa/python: install mako from source
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-20 01:55:23 +09:00
4648f98272 internal/rosa/python: run tests via helper
Despite the lack of standards, pytest seems somewhat widely agreed upon.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-20 01:50:57 +09:00
11d99439ac internal/rosa/python: install markupsafe from source
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-20 01:26:11 +09:00
39e4c5b8ac internal/rosa/python: optionally install before check
Some test suites require package to be installed globally.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-20 01:25:43 +09:00
e8f6db38b6 internal/rosa/python: install pytest from source
Used by many python packages.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 23:17:38 +09:00
20d5b71575 internal/rosa/python: install iniconfig from source
This also required the setuptools-scm hack.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 22:53:32 +09:00
e903e7f542 internal/rosa/python: install pygments from source
This finally has its dependencies.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 22:40:43 +09:00
1caa051f4d internal/rosa/python: hatchling artifact
Required by many python packages.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 22:35:18 +09:00
dcdc6f7f6d internal/rosa/python: trove-classifiers artifact
Required by hatchling.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 22:32:12 +09:00
5ad6f26b46 internal/rosa/python: install packaging from source
This is required by many packages.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 22:12:49 +09:00
7ba75a79f4 internal/rosa/python: install pluggy from source
This finally has all its dependencies at this point.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 21:55:55 +09:00
9ef84d3904 internal/rosa/python: setuptools-scm artifact
Awful hack required by many packages.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 21:38:44 +09:00
3b7b6e51fb internal/rosa/python: pass build dependencies separately
This is cleaner with less duplicate code.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 20:26:41 +09:00
b1b4debb82 internal/rosa/python: pathspec artifact
Required by hatchling, which is required by many python packages.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 20:13:26 +09:00
021cbbc2a8 cmd/mbf: default daemon socket in cache
This location makes more sense than the current directory.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 19:50:54 +09:00
a4a54a4a4d cmd/mbf: remove pointless recover
This used to scrub the cache, and was not fully removed when that became nonviable.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 19:49:01 +09:00
04a344aac6 internal/rosa/python: flirt_core artifact
A build system required by a dependency of another build system, which is required by yet another build system.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 19:25:04 +09:00
6b98156a3d internal/rosa/python: change insane strict_timestamps default
There is no scenario where this is useful, and it breaks builds.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 18:56:22 +09:00
753432cf09 cmd/mbf: optionally wait for cancel
Synchronisation is not needed here during interactive use.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 18:24:11 +09:00
f8902e3679 internal/rosa/python: append to source path
This gets around messy projects with multiple packages.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 17:51:00 +09:00
8ee53a5164 internal/rosa: use builtin for checksum warning
This avoids having to configure the logger early.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 17:50:12 +09:00
3981d44757 internal/rosa/python: migrate setuptools to wrapper
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 15:36:43 +09:00
9fd67e47b4 internal/rosa/python: wrap python package
Metadata for this is somewhat boilerplate-heavy, so wrap it to create metadata in one call.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 15:22:18 +09:00
4dcec40156 cmd/mbf: close on cancel completion
Like the previous change, this enables synchronisation on the client side via epoll.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 15:03:52 +09:00
9a274c78a3 cmd/mbf: close on abort completion
This enables synchronisation on the client side via epoll.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 14:53:28 +09:00
5647c3a91f internal/rosa/meson: run meson test suite
Tests requiring internet access or unreasonable dependencies are removed.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 01:07:20 +09:00
992139c75d internal/rosa/python: extra script after install
This is generally for test suite, due to the lack of standard or widely agreed upon convention.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 00:35:24 +09:00
57c69b533e internal/rosa/meson: migrate to helper
This also migrates to source from the Microsoft Github release.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 00:16:22 +09:00
6f0c2a80f2 internal/rosa/python: migrate setuptools to helper
This is much cleaner, and should be functionally equivalent.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 00:04:19 +09:00
08dfefb28d internal/rosa/python: pip helper
Binary pip releases are not considered acceptable, this more generic helper is required for building from source.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-19 00:03:36 +09:00
b081629662 internal/rosa/libxml2: 2.15.2 to 2.15.3
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 09:05:49 +09:00
fba541f301 internal/rosa/nss: 3.122.1 to 3.123
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 09:05:23 +09:00
5f0da3d5c2 internal/rosa/gnu: mpc 1.4.0 to 1.4.1
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 09:04:33 +09:00
4d5841dd62 internal/rosa: elfutils 0.194 to 0.195
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 09:03:49 +09:00
9e752b588a internal/pkg: drop cached error on cancel
This avoids disabling the artifact when using the individual cancel method. Unfortunately this makes the method blocking.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 03:24:48 +09:00
27b1aaae38 internal/pkg: pending error alongside done channel
This significantly simplifies synchronisation of access to identErr.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 03:10:37 +09:00
9e18de1dc2 internal/pkg: flush cached errors on abort
This avoids disabling the artifact until cache is reopened. The same has to be implemented for Cancel in a future change.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 02:59:44 +09:00
b80ea91a42 cmd/mbf: abort remote cures
This command arranges for all pending cures to be aborted. It does not wait for cures to complete.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-17 22:47:02 +09:00
30a9dfa4b8 internal/pkg: abort all pending cures
This cancels all current pending cures without closing the cache.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-17 22:40:35 +09:00
8d657b6fdf cmd/mbf: cancel remote cure
This exposes the new fine-grained cancel API in cmd/mbf.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-17 22:00:04 +09:00
ae9b9adfd2 internal/rosa: retry in SIGSEGV test
Munmap is not always immediate.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-17 20:45:19 +09:00
dd6a480a21 cmd/mbf: handle flags in serve
This enables easier expansion of the protocol.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-17 20:14:09 +09:00
3942272c30 internal/pkg: fine-grained cancellation
This enables a specific artifact to be targeted for cancellation.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-17 19:33:21 +09:00
9036986156 cmd/mbf: optionally ignore reply
An acknowledgement is not always required in this use case. This change also adds 64 bits of connection configuration for future expansion.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-17 16:46:49 +09:00
a394971dd7 cmd/mbf: do not abort cache acquisition during testing
This can sometimes fire during testing due to how short the test is.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-17 02:06:51 +09:00
9daba60809 cmd/mbf: daemon command
This services internal/pkg artifact IR with Rosa OS extensions originating from another process.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-17 02:05:59 +09:00
bcd79a22ff cmd/mbf: do not open cache for IR encoding
This can now be allocated independently.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-17 01:04:39 +09:00
0ff7ab915b internal/pkg: move IR primitives out of cache
These are memory management and caching primitives. Having them as part of Cache is cumbersome and requires a temporary directory that is never used. This change isolates them from Cache to enable independent use.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-17 01:02:13 +09:00
823575acac cmd/mbf: move info command
This is cleaner with less shared state.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-16 17:43:52 +09:00
136bc0917b cmd/mbf: optionally open cache
Some commands do not require the cache. This change also makes acquisition of locked cache cancelable.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-16 15:59:34 +09:00
d6b082dd0b internal/rosa/ninja: bootstrap with verbose output
This otherwise outputs nothing, and appears to hang until the (fully single-threaded) bootstrap completes.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-15 22:19:05 +09:00
89d6d9576b internal/rosa/make: optionally format value as is
This enables correct formatting for awkward configure scripts.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-15 22:17:58 +09:00
fafce04a5d internal/rosa/kernel: firmware 20260309 to 20260410
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-15 22:16:47 +09:00
5d760a1db9 internal/rosa/kernel: 6.12.80 to 6.12.81
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-15 22:16:30 +09:00
d197e40b2a internal/rosa/python: mako 1.3.10 to 1.3.11
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-15 22:21:54 +09:00
2008902247 internal/rosa/python: packaging 26.0 to 26.1
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-15 22:15:18 +09:00
30ac985fd2 internal/rosa/meson: 1.10.2 to 1.11.0
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-15 22:14:52 +09:00
e9fec368f8 internal/rosa/nss: 3.122 to 3.122.1
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-15 22:13:45 +09:00
46add42f58 internal/rosa/openssl: disable building docs
These take very long and are never used in the Rosa OS environment.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-15 22:13:18 +09:00
377b61e342 internal/rosa/openssl: do not double test job count
The test suite is racy, this reduces flakiness.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-15 22:12:36 +09:00
520c36db6d internal/rosa: respect preferred job count
This discontinues use of nproc, and also overrides detection behaviour in ninja.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-15 18:49:36 +09:00
3352bb975b internal/pkg: job count in container environment
This exposes preferred job count to the container initial process.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-15 15:49:21 +09:00
f7f48d57e9 internal/pkg: pass impure job count
This is cleaner than checking cpu count during cure, it is impossible to avoid impurity in both situations but this is configurable.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-15 15:36:44 +09:00
5c2345128e internal/rosa/llvm: autodetect stage0 target
This is fine, now that stages beyond stage0 have explicit target.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-14 03:10:26 +09:00
78f9676b1f internal/rosa/llvm: centralise llvm source
This avoids having to sidestep the NewPackage name formatting machinery to take the cache fast path.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-14 03:03:06 +09:00
5b5b676132 internal/rosa/cmake: remove variant
This has no effect outside formatting of name and is a remnant of the old llvm helpers.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-14 02:57:47 +09:00
78383fb6e8 internal/rosa/llvm: migrate libclc
This eliminates newLLVMVariant.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-14 02:40:13 +09:00
e97f6a393f internal/rosa/llvm: migrate runtimes and clang
This eliminates most newLLVM family of functions.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-14 02:07:13 +09:00
eeffefd22b internal/rosa/llvm: migrate compiler-rt helper
This also removes unused dependencies.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-14 01:12:56 +09:00
ac825640ab internal/rosa/llvm: migrate musl
This removes the pointless special treatment given to musl.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-14 00:35:42 +09:00
a7f7ce1795 internal/rosa/llvm: migrate compiler-rt
The newLLVM family of functions predate the package system. This change migrates compiler-rt without changing any resulting artifacts.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-14 00:19:33 +09:00
38c639e35c internal/rosa/llvm: remove project/runtime helper
More remnants from early days, these are not reusable at all but that was not known at the time.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-14 00:03:23 +09:00
b2cb13e94c internal/rosa/llvm: centralise patches
This enables easier reuse of the patchset.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 23:52:44 +09:00
46f98d12d6 internal/rosa/llvm: remove conditional flags in helper
The llvm helper is a remnant from very early days, and ended up not being very useful, but was never removed. This change begins its removal, without changing the resulting artifacts for now.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 23:38:11 +09:00
503c7f953c internal/rosa/x: libpciaccess artifact
Required by userspace gpu drivers.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 19:04:38 +09:00
15c9f6545d internal/rosa/perl: populate anitya identifiers
These are also tracked by Anitya.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 18:44:43 +09:00
83b0e32c55 internal/rosa: helpers for common url formats
This cleans up call site of NewPackage.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 18:02:57 +09:00
eeaf26e7a2 internal/rosa: wrapper around git helper
This results in much cleaner call site for the majority of use cases.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 15:20:51 +09:00
b587caf2e8 internal/rosa: assume file source is xz-compressed
XZ happens to be the only widely-used format that is awful to deal with, everything else is natively supported.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 15:07:30 +09:00
f1c2ca4928 internal/rosa/mesa: libdrm artifact
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 03:27:09 +09:00
0ca301219f internal/rosa/python: pyyaml artifact
Mesa unfortunately requires this horrible format.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 03:18:47 +09:00
e2199e1276 internal/rosa/python: mako artifact
This unfortunately pulls in platform-specific package.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 03:11:38 +09:00
86eacb3208 cmd/mbf: checksum command
This computes and encodes sha384 checksum of data streamed from standard input.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 03:09:21 +09:00
8541bdd858 internal/rosa: wrap per-arch values
This is cleaner syntax in some specific cases.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 02:59:55 +09:00
46be0b0dc8 internal/rosa/nss: buildcatrust 0.4.0 to 0.5.1
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 02:18:21 +09:00
cbe37e87e7 internal/rosa/python: pytest 9.0.2 to 9.0.3
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 02:18:02 +09:00
66d741fb07 internal/rosa/python: pygments 2.19.2 to 2.20.0
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 02:13:04 +09:00
0d449011f6 internal/rosa/python: use predictable URLs
This is much cleaner and more maintainable than specifying URL prefix manually. This change also populates Anitya project identifiers.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 02:08:22 +09:00
46428ed85d internal/rosa/python: url pip wheel helper
This enables a cleaner higher-level helper.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-13 01:59:28 +09:00
081d6b463c internal/rosa/llvm: libclc artifact
This is built independently of llvm build system to avoid having to build llvm again.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-12 22:57:04 +09:00
11b3171180 internal/rosa/glslang: glslang artifact
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-12 22:34:17 +09:00
adbb84c3dd internal/rosa/glslang: spirv-tools artifact
Required by glslang.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-12 22:27:49 +09:00
1084e31d95 internal/rosa/glslang: spirv-headers artifact
Required by spirv-tools.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-12 22:16:29 +09:00
27a1b8fe0a internal/rosa/mesa: libglvnd artifact
Required by mesa.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-12 21:27:30 +09:00
b2141a41d7 internal/rosa/dbus: xdg-dbus-proxy artifact
This is currently a hakurei runtime dependency, but will eventually be removed.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-12 19:41:49 +09:00
c0dff5bc87 internal/rosa/gnu: gcc set with-multilib-list as needed
This breaks riscv64.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-12 18:03:45 +09:00
04513c0510 internal/rosa/gnu: gmp explicit CC
The configure script is hard coded to use gcc without fallback on riscv64.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-12 17:25:15 +09:00
28ebf973d6 nix: add sharefs supplementary group
This works around vfs inode file attribute race.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-11 23:28:18 +09:00
41aeb404ec internal/rosa/hakurei: 0.3.7 to 0.4.0
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-11 10:53:29 +09:00
163 changed files with 9823 additions and 3092 deletions

3
.gitignore vendored
View File

@@ -7,7 +7,8 @@
# go generate # go generate
/cmd/hakurei/LICENSE /cmd/hakurei/LICENSE
/internal/pkg/testdata/testtool /cmd/mbf/internal/pkgserver/ui/static
/internal/pkg/internal/testtool/testtool
/internal/rosa/hakurei_current.tar.gz /internal/rosa/hakurei_current.tar.gz
# cmd/dist default destination # cmd/dist default destination

5
all.sh
View File

@@ -1,6 +1,3 @@
#!/bin/sh -e #!/bin/sh -e
TOOLCHAIN_VERSION="$(go version)" HAKUREI_DIST_MAKE='' exec "$(dirname -- "$0")/cmd/dist/dist.sh"
cd "$(dirname -- "$0")/"
echo "# Building cmd/dist using ${TOOLCHAIN_VERSION}."
go run -v --tags=dist ./cmd/dist

View File

@@ -4,15 +4,23 @@ import "strings"
const ( const (
// SpecialOverlayEscape is the escape string for overlay mount options. // SpecialOverlayEscape is the escape string for overlay mount options.
//
// Deprecated: This is no longer used and will be removed in 0.5.
SpecialOverlayEscape = `\` SpecialOverlayEscape = `\`
// SpecialOverlayOption is the separator string between overlay mount options. // SpecialOverlayOption is the separator string between overlay mount options.
//
// Deprecated: This is no longer used and will be removed in 0.5.
SpecialOverlayOption = "," SpecialOverlayOption = ","
// SpecialOverlayPath is the separator string between overlay paths. // SpecialOverlayPath is the separator string between overlay paths.
//
// Deprecated: This is no longer used and will be removed in 0.5.
SpecialOverlayPath = ":" SpecialOverlayPath = ":"
) )
// EscapeOverlayDataSegment escapes a string for formatting into the data // EscapeOverlayDataSegment escapes a string for formatting into the data
// argument of an overlay mount system call. // argument of an overlay mount system call.
//
// Deprecated: This is no longer used and will be removed in 0.5.
func EscapeOverlayDataSegment(s string) string { func EscapeOverlayDataSegment(s string) string {
if s == "" { if s == "" {
return "" return ""

10
cmd/dist/dist.sh vendored Executable file
View File

@@ -0,0 +1,10 @@
#!/bin/sh -e
TOOLCHAIN_VERSION="$(go version)"
cd "$(dirname -- "$0")/../.."
echo "Building cmd/dist using ${TOOLCHAIN_VERSION}."
FLAGS=''
if test -n "$VERBOSE"; then
FLAGS="$FLAGS -v"
fi
go run $FLAGS --tags=dist ./cmd/dist

40
cmd/dist/main.go vendored
View File

@@ -42,14 +42,19 @@ func mustRun(ctx context.Context, name string, arg ...string) {
var comp []byte var comp []byte
func main() { func main() {
fmt.Println()
log.SetFlags(0) log.SetFlags(0)
log.SetPrefix("# ") log.SetPrefix("")
verbose := os.Getenv("VERBOSE") != ""
runTests := os.Getenv("HAKUREI_DIST_MAKE") == ""
version := getenv("HAKUREI_VERSION", "untagged") version := getenv("HAKUREI_VERSION", "untagged")
prefix := getenv("PREFIX", "/usr") prefix := getenv("PREFIX", "/usr")
destdir := getenv("DESTDIR", "dist") destdir := getenv("DESTDIR", "dist")
if verbose {
log.Println()
}
if err := os.MkdirAll(destdir, 0755); err != nil { if err := os.MkdirAll(destdir, 0755); err != nil {
log.Fatal(err) log.Fatal(err)
} }
@@ -76,12 +81,17 @@ func main() {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
defer cancel() defer cancel()
log.Println("Building hakurei.") verboseFlag := "-v"
if !verbose {
verboseFlag = "-buildvcs=false"
}
log.Printf("Building hakurei for %s/%s.", runtime.GOOS, runtime.GOARCH)
mustRun(ctx, "go", "generate", "./...") mustRun(ctx, "go", "generate", "./...")
mustRun( mustRun(
ctx, "go", "build", ctx, "go", "build",
"-trimpath", "-trimpath",
"-v", "-o", s, verboseFlag, "-o", s,
"-ldflags=-s -w "+ "-ldflags=-s -w "+
"-buildid= -linkmode external -extldflags=-static "+ "-buildid= -linkmode external -extldflags=-static "+
"-X hakurei.app/internal/info.buildVersion="+version+" "+ "-X hakurei.app/internal/info.buildVersion="+version+" "+
@@ -90,17 +100,19 @@ func main() {
"-X main.hakureiPath="+prefix+"/bin/hakurei", "-X main.hakureiPath="+prefix+"/bin/hakurei",
"./...", "./...",
) )
fmt.Println() log.Println()
log.Println("Testing Hakurei.") if runTests {
mustRun( log.Println("##### Testing Hakurei.")
ctx, "go", "test", mustRun(
"-ldflags=-buildid= -linkmode external -extldflags=-static", ctx, "go", "test",
"./...", "-ldflags=-buildid= -linkmode external -extldflags=-static",
) "./...",
fmt.Println() )
log.Println()
}
log.Println("Creating distribution.") log.Println("##### Creating distribution.")
const suffix = ".tar.gz" const suffix = ".tar.gz"
distName := "hakurei-" + version + "-" + runtime.GOARCH distName := "hakurei-" + version + "-" + runtime.GOARCH
var f *os.File var f *os.File
@@ -121,7 +133,7 @@ func main() {
}() }()
h := sha512.New() h := sha512.New()
gw := gzip.NewWriter(io.MultiWriter(f, h)) gw, _ := gzip.NewWriterLevel(io.MultiWriter(f, h), gzip.BestCompression)
tw := tar.NewWriter(gw) tw := tar.NewWriter(gw)
mustWriteHeader := func(name string, size int64, mode os.FileMode) { mustWriteHeader := func(name string, size int64, mode os.FileMode) {

135
cmd/mbf/cache.go Normal file
View File

@@ -0,0 +1,135 @@
package main
import (
"context"
"os"
"path/filepath"
"testing"
"hakurei.app/check"
"hakurei.app/container"
"hakurei.app/internal/pkg"
"hakurei.app/message"
)
// cache refers to an instance of [pkg.Cache] that might be open.
type cache struct {
ctx context.Context
msg message.Msg
// Should generally not be used directly.
c *pkg.Cache
cures, jobs int
// Primarily to work around missing landlock LSM.
hostAbstract bool
// Set SCHED_IDLE.
idle bool
// Unset [pkg.CSuppressInit].
verboseInit bool
// Loaded artifact of [rosa.QEMU].
qemu pkg.Artifact
base string
}
// open opens the underlying [pkg.Cache].
func (cache *cache) open() (err error) {
if cache.c != nil {
return os.ErrInvalid
}
var base *check.Absolute
if cache.base, err = filepath.Abs(cache.base); err != nil {
return
} else if base, err = check.NewAbs(cache.base); err != nil {
return
}
var flags int
if cache.idle {
flags |= pkg.CSchedIdle
}
if cache.hostAbstract {
flags |= pkg.CHostAbstract
}
if !cache.verboseInit {
flags |= pkg.CSuppressInit
}
done := make(chan struct{})
defer close(done)
go func() {
select {
case <-cache.ctx.Done():
if testing.Testing() {
return
}
os.Exit(2)
case <-done:
return
}
}()
cache.msg.Verbosef("opening cache at %s", base)
cache.c, err = pkg.Open(
cache.ctx,
cache.msg,
flags,
cache.cures,
cache.jobs,
base,
)
if err != nil {
return
}
done <- struct{}{}
if cache.qemu != nil {
var pathname *check.Absolute
pathname, _, err = cache.c.Cure(cache.qemu)
if err != nil {
cache.c.Close()
return
}
pkg.RegisterArch("riscv64", container.BinfmtEntry{
Offset: 0,
Magic: "\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xf3\x00",
Mask: "\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff",
Interpreter: pathname.Append(
"system/bin",
"qemu-riscv64",
),
})
pkg.RegisterArch("arm64", container.BinfmtEntry{
Offset: 0,
Magic: "\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xb7\x00",
Mask: "\xff\xff\xff\xff\xff\xff\xff\xfc\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff",
Interpreter: pathname.Append(
"system/bin",
"qemu-aarch64",
),
})
}
return
}
// Close closes the underlying [pkg.Cache] if it is open.
func (cache *cache) Close() {
if cache.c != nil {
cache.c.Close()
}
}
// Do calls f on the underlying cache and returns its error value.
func (cache *cache) Do(f func(cache *pkg.Cache) error) error {
if cache.c == nil {
if err := cache.open(); err != nil {
return err
}
}
return f(cache.c)
}

37
cmd/mbf/cache_test.go Normal file
View File

@@ -0,0 +1,37 @@
package main
import (
"log"
"os"
"testing"
"hakurei.app/internal/pkg"
"hakurei.app/message"
)
func TestCache(t *testing.T) {
t.Parallel()
cm := cache{
ctx: t.Context(),
msg: message.New(log.New(os.Stderr, "check: ", 0)),
base: t.TempDir(),
hostAbstract: true, idle: true,
}
defer cm.Close()
cm.Close()
if err := cm.open(); err != nil {
t.Fatalf("open: error = %v", err)
}
if err := cm.open(); err != os.ErrInvalid {
t.Errorf("(duplicate) open: error = %v", err)
}
if err := cm.Do(func(cache *pkg.Cache) error {
return cache.Scrub(0)
}); err != nil {
t.Errorf("Scrub: error = %v", err)
}
}

354
cmd/mbf/daemon.go Normal file
View File

@@ -0,0 +1,354 @@
package main
import (
"context"
"encoding/binary"
"errors"
"io"
"log"
"math"
"net"
"os"
"sync"
"syscall"
"testing"
"time"
"unique"
"hakurei.app/check"
"hakurei.app/internal/pkg"
)
// daemonTimeout is the maximum amount of time cureFromIR will wait on I/O.
const daemonTimeout = 30 * time.Second
// daemonDeadline returns the deadline corresponding to daemonTimeout, or the
// zero value when running in a test.
func daemonDeadline() time.Time {
if testing.Testing() {
return time.Time{}
}
return time.Now().Add(daemonTimeout)
}
const (
// remoteNoReply notifies that the client will not receive a cure reply.
remoteNoReply = 1 << iota
)
// cureFromIR services an IR curing request.
func cureFromIR(
cache *pkg.Cache,
conn net.Conn,
flags uint64,
) (pkg.Artifact, error) {
a, decodeErr := cache.NewDecoder(conn).Decode()
if decodeErr != nil {
_, err := conn.Write([]byte("\x00" + decodeErr.Error()))
return nil, errors.Join(decodeErr, err, conn.Close())
}
pathname, _, cureErr := cache.Cure(a)
if flags&remoteNoReply != 0 {
return a, errors.Join(cureErr, conn.Close())
}
if err := conn.SetWriteDeadline(daemonDeadline()); err != nil {
return a, errors.Join(cureErr, err, conn.Close())
}
if cureErr != nil {
_, err := conn.Write([]byte("\x00" + cureErr.Error()))
return a, errors.Join(cureErr, err, conn.Close())
}
_, err := conn.Write([]byte(pathname.String()))
if testing.Testing() && errors.Is(err, io.ErrClosedPipe) {
return a, nil
}
return a, errors.Join(err, conn.Close())
}
const (
// specialCancel is a message consisting of a single identifier referring
// to a curing artifact to be cancelled.
specialCancel = iota
// specialAbort requests for all pending cures to be aborted. It has no
// message body.
specialAbort
// remoteSpecial denotes a special message with custom layout.
remoteSpecial = math.MaxUint64
)
// writeSpecialHeader writes the header of a remoteSpecial message.
func writeSpecialHeader(conn net.Conn, kind uint64) error {
var sh [16]byte
binary.LittleEndian.PutUint64(sh[:], remoteSpecial)
binary.LittleEndian.PutUint64(sh[8:], kind)
if n, err := conn.Write(sh[:]); err != nil {
return err
} else if n != len(sh) {
return io.ErrShortWrite
}
return nil
}
// cancelIdent reads an identifier from conn and cancels the corresponding cure.
func cancelIdent(
cache *pkg.Cache,
conn net.Conn,
) (*pkg.ID, bool, error) {
var ident pkg.ID
if _, err := io.ReadFull(conn, ident[:]); err != nil {
return nil, false, errors.Join(err, conn.Close())
}
ok := cache.Cancel(unique.Make(ident))
return &ident, ok, conn.Close()
}
// serve services connections from a [net.UnixListener].
func serve(
ctx context.Context,
log *log.Logger,
cm *cache,
ul *net.UnixListener,
) error {
ul.SetUnlinkOnClose(true)
if cm.c == nil {
if err := cm.open(); err != nil {
return errors.Join(err, ul.Close())
}
}
var wg sync.WaitGroup
defer wg.Wait()
wg.Go(func() {
for {
if ctx.Err() != nil {
break
}
conn, err := ul.AcceptUnix()
if err != nil {
if !errors.Is(err, os.ErrDeadlineExceeded) {
log.Println(err)
}
continue
}
wg.Go(func() {
done := make(chan struct{})
defer close(done)
go func() {
select {
case <-ctx.Done():
_ = conn.SetDeadline(time.Now())
case <-done:
return
}
}()
if _err := conn.SetReadDeadline(daemonDeadline()); _err != nil {
log.Println(_err)
if _err = conn.Close(); _err != nil {
log.Println(_err)
}
return
}
var word [8]byte
if _, _err := io.ReadFull(conn, word[:]); _err != nil {
log.Println(_err)
if _err = conn.Close(); _err != nil {
log.Println(_err)
}
return
}
flags := binary.LittleEndian.Uint64(word[:])
if flags == remoteSpecial {
if _, _err := io.ReadFull(conn, word[:]); _err != nil {
log.Println(_err)
if _err = conn.Close(); _err != nil {
log.Println(_err)
}
return
}
switch special := binary.LittleEndian.Uint64(word[:]); special {
default:
log.Printf("invalid special %d", special)
case specialCancel:
if id, ok, _err := cancelIdent(cm.c, conn); _err != nil {
log.Println(_err)
} else if !ok {
log.Println(
"attempting to cancel invalid artifact",
pkg.Encode(*id),
)
} else {
log.Println(
"cancelled artifact",
pkg.Encode(*id),
)
}
case specialAbort:
log.Println("aborting all pending cures")
cm.c.Abort()
if _err := conn.Close(); _err != nil {
log.Println(_err)
}
}
return
}
if a, _err := cureFromIR(cm.c, conn, flags); _err != nil {
log.Println(_err)
} else {
log.Printf(
"fulfilled artifact %s",
pkg.Encode(cm.c.Ident(a).Value()),
)
}
})
}
})
<-ctx.Done()
if err := ul.SetDeadline(time.Now()); err != nil {
return errors.Join(err, ul.Close())
}
wg.Wait()
return ul.Close()
}
// dial wraps [net.DialUnix] with a context.
func dial(ctx context.Context, addr *net.UnixAddr) (
done chan<- struct{},
conn *net.UnixConn,
err error,
) {
conn, err = net.DialUnix("unix", nil, addr)
if err != nil {
return
}
d := make(chan struct{})
done = d
go func() {
select {
case <-ctx.Done():
_ = conn.SetDeadline(time.Now())
case <-d:
return
}
}()
return
}
// cureRemote cures a [pkg.Artifact] on a daemon.
func cureRemote(
ctx context.Context,
addr *net.UnixAddr,
a pkg.Artifact,
flags uint64,
) (*check.Absolute, error) {
if flags == remoteSpecial {
return nil, syscall.EINVAL
}
done, conn, err := dial(ctx, addr)
if err != nil {
return nil, err
}
defer close(done)
if n, flagErr := conn.Write(binary.LittleEndian.AppendUint64(nil, flags)); flagErr != nil {
return nil, errors.Join(flagErr, conn.Close())
} else if n != 8 {
return nil, errors.Join(io.ErrShortWrite, conn.Close())
}
if err = pkg.NewIR().EncodeAll(conn, a); err != nil {
return nil, errors.Join(err, conn.Close())
} else if err = conn.CloseWrite(); err != nil {
return nil, errors.Join(err, conn.Close())
}
if flags&remoteNoReply != 0 {
return nil, conn.Close()
}
payload, recvErr := io.ReadAll(conn)
if err = errors.Join(recvErr, conn.Close()); err != nil {
if errors.Is(err, os.ErrDeadlineExceeded) {
if cancelErr := ctx.Err(); cancelErr != nil {
err = cancelErr
}
}
return nil, err
}
if len(payload) > 0 && payload[0] == 0 {
return nil, errors.New(string(payload[1:]))
}
var p *check.Absolute
p, err = check.NewAbs(string(payload))
return p, err
}
// cancelRemote cancels a [pkg.Artifact] curing on a daemon.
func cancelRemote(
ctx context.Context,
addr *net.UnixAddr,
a pkg.Artifact,
wait bool,
) error {
done, conn, err := dial(ctx, addr)
if err != nil {
return err
}
defer close(done)
if err = writeSpecialHeader(conn, specialCancel); err != nil {
return errors.Join(err, conn.Close())
}
var n int
id := pkg.NewIR().Ident(a).Value()
if n, err = conn.Write(id[:]); err != nil {
return errors.Join(err, conn.Close())
} else if n != len(id) {
return errors.Join(io.ErrShortWrite, conn.Close())
}
if wait {
if _, err = conn.Read(make([]byte, 1)); err == io.EOF {
err = nil
}
}
return errors.Join(err, conn.Close())
}
// abortRemote aborts all [pkg.Artifact] curing on a daemon.
func abortRemote(
ctx context.Context,
addr *net.UnixAddr,
wait bool,
) error {
done, conn, err := dial(ctx, addr)
if err != nil {
return err
}
defer close(done)
err = writeSpecialHeader(conn, specialAbort)
if wait && err == nil {
if _, err = conn.Read(make([]byte, 1)); err == io.EOF {
err = nil
}
}
return errors.Join(err, conn.Close())
}

146
cmd/mbf/daemon_test.go Normal file
View File

@@ -0,0 +1,146 @@
package main
import (
"bytes"
"context"
"errors"
"io"
"log"
"net"
"os"
"path/filepath"
"slices"
"strings"
"testing"
"time"
"hakurei.app/check"
"hakurei.app/internal/pkg"
"hakurei.app/message"
)
func TestNoReply(t *testing.T) {
t.Parallel()
if !daemonDeadline().IsZero() {
t.Fatal("daemonDeadline did not return the zero value")
}
c, err := pkg.Open(
t.Context(),
message.New(log.New(os.Stderr, "cir: ", 0)),
0, 0, 0,
check.MustAbs(t.TempDir()),
)
if err != nil {
t.Fatalf("Open: error = %v", err)
}
defer c.Close()
client, server := net.Pipe()
done := make(chan struct{})
go func() {
defer close(done)
go func() {
<-t.Context().Done()
if _err := client.SetDeadline(time.Now()); _err != nil && !errors.Is(_err, io.ErrClosedPipe) {
panic(_err)
}
}()
if _err := c.EncodeAll(
client,
pkg.NewFile("check", []byte{0}),
); _err != nil {
panic(_err)
} else if _err = client.Close(); _err != nil {
panic(_err)
}
}()
a, cureErr := cureFromIR(c, server, remoteNoReply)
if cureErr != nil {
t.Fatalf("cureFromIR: error = %v", cureErr)
}
<-done
wantIdent := pkg.MustDecode("fiZf-ZY_Yq6qxJNrHbMiIPYCsGkUiKCRsZrcSELXTqZWtCnESlHmzV5ThhWWGGYG")
if gotIdent := c.Ident(a).Value(); gotIdent != wantIdent {
t.Errorf(
"cureFromIR: %s, want %s",
pkg.Encode(gotIdent), pkg.Encode(wantIdent),
)
}
}
func TestDaemon(t *testing.T) {
t.Parallel()
var buf bytes.Buffer
logger := log.New(&buf, "daemon: ", 0)
addr := net.UnixAddr{
Name: filepath.Join(t.TempDir(), "daemon"),
Net: "unix",
}
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
cm := cache{
ctx: ctx,
msg: message.New(logger),
base: t.TempDir(),
}
defer cm.Close()
ul, err := net.ListenUnix("unix", &addr)
if err != nil {
t.Fatalf("ListenUnix: error = %v", err)
}
done := make(chan struct{})
go func() {
defer close(done)
if _err := serve(ctx, logger, &cm, ul); _err != nil {
panic(_err)
}
}()
if err = cancelRemote(ctx, &addr, pkg.NewFile("nonexistent", nil), true); err != nil {
t.Fatalf("cancelRemote: error = %v", err)
}
if err = abortRemote(ctx, &addr, true); err != nil {
t.Fatalf("abortRemote: error = %v", err)
}
// keep this last for synchronisation
var p *check.Absolute
p, err = cureRemote(ctx, &addr, pkg.NewFile("check", []byte{0}), 0)
if err != nil {
t.Fatalf("cureRemote: error = %v", err)
}
cancel()
<-done
const want = "fiZf-ZY_Yq6qxJNrHbMiIPYCsGkUiKCRsZrcSELXTqZWtCnESlHmzV5ThhWWGGYG"
if got := filepath.Base(p.String()); got != want {
t.Errorf("cureRemote: %s, want %s", got, want)
}
wantLog := []string{
"",
"daemon: aborting all pending cures",
"daemon: attempting to cancel invalid artifact kQm9fmnCmXST1-MMmxzcau2oKZCXXrlZydo4PkeV5hO_2PKfeC8t98hrbV_ZZx_j",
"daemon: fulfilled artifact fiZf-ZY_Yq6qxJNrHbMiIPYCsGkUiKCRsZrcSELXTqZWtCnESlHmzV5ThhWWGGYG",
}
gotLog := strings.Split(buf.String(), "\n")
slices.Sort(gotLog)
if !slices.Equal(gotLog, wantLog) {
t.Errorf(
"serve: logged\n%s\nwant\n%s",
strings.Join(gotLog, "\n"), strings.Join(wantLog, "\n"),
)
}
}

114
cmd/mbf/info.go Normal file
View File

@@ -0,0 +1,114 @@
package main
import (
"errors"
"fmt"
"io"
"os"
"strings"
"hakurei.app/internal/pkg"
"hakurei.app/internal/rosa"
)
// commandInfo implements the info subcommand.
func commandInfo(
cm *cache,
args []string,
w io.Writer,
writeStatus bool,
r *rosa.Report,
) (err error) {
if len(args) == 0 {
return errors.New("info requires at least 1 argument")
}
// recovered by HandleAccess
mustPrintln := func(a ...any) {
if _, _err := fmt.Fprintln(w, a...); _err != nil {
panic(_err)
}
}
mustPrint := func(a ...any) {
if _, _err := fmt.Fprint(w, a...); _err != nil {
panic(_err)
}
}
for i, name := range args {
if p, ok := rosa.ResolveName(name); !ok {
return fmt.Errorf("unknown artifact %q", name)
} else {
var suffix string
if version := rosa.Std.Version(p); version != rosa.Unversioned {
suffix += "-" + version
}
mustPrintln("name : " + name + suffix)
meta := rosa.GetMetadata(p)
mustPrintln("description : " + meta.Description)
if meta.Website != "" {
mustPrintln("website : " +
strings.TrimSuffix(meta.Website, "/"))
}
if len(meta.Dependencies) > 0 {
mustPrint("depends on :")
for _, d := range meta.Dependencies {
s := rosa.GetMetadata(d).Name
if version := rosa.Std.Version(d); version != rosa.Unversioned {
s += "-" + version
}
mustPrint(" " + s)
}
mustPrintln()
}
const statusPrefix = "status : "
if writeStatus {
if r == nil {
var f io.ReadSeekCloser
err = cm.Do(func(cache *pkg.Cache) (err error) {
f, err = cache.OpenStatus(rosa.Std.Load(p))
return
})
if err != nil {
if errors.Is(err, os.ErrNotExist) {
mustPrintln(
statusPrefix + "not yet cured",
)
} else {
return
}
} else {
mustPrint(statusPrefix)
_, err = io.Copy(w, f)
if err = errors.Join(err, f.Close()); err != nil {
return
}
}
} else if err = cm.Do(func(cache *pkg.Cache) (err error) {
status, n := r.ArtifactOf(cache.Ident(rosa.Std.Load(p)))
if status == nil {
mustPrintln(
statusPrefix + "not in report",
)
} else {
mustPrintln("size :", n)
mustPrint(statusPrefix)
if _, err = w.Write(status); err != nil {
return
}
}
return
}); err != nil {
return
}
}
if i != len(args)-1 {
mustPrintln()
}
}
}
return nil
}

181
cmd/mbf/info_test.go Normal file
View File

@@ -0,0 +1,181 @@
package main
import (
"context"
"fmt"
"log"
"os"
"path/filepath"
"reflect"
"strings"
"syscall"
"testing"
"unsafe"
"hakurei.app/internal/pkg"
"hakurei.app/internal/rosa"
"hakurei.app/message"
)
func TestInfo(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
args []string
status map[string]string
report string
want string
wantErr any
}{
{"qemu", []string{"qemu"}, nil, "", `
name : qemu-` + rosa.Std.Version(rosa.QEMU) + `
description : a generic and open source machine emulator and virtualizer
website : https://www.qemu.org
depends on : glib-` + rosa.Std.Version(rosa.GLib) + ` zstd-` + rosa.Std.Version(rosa.Zstd) + `
`, nil},
{"multi", []string{"hakurei", "hakurei-dist"}, nil, "", `
name : hakurei-` + rosa.Std.Version(rosa.Hakurei) + `
description : low-level userspace tooling for Rosa OS
website : https://hakurei.app
name : hakurei-dist-` + rosa.Std.Version(rosa.HakureiDist) + `
description : low-level userspace tooling for Rosa OS (distribution tarball)
website : https://hakurei.app
`, nil},
{"nonexistent", []string{"zlib", "\x00"}, nil, "", `
name : zlib-` + rosa.Std.Version(rosa.Zlib) + `
description : lossless data-compression library
website : https://zlib.net
`, fmt.Errorf("unknown artifact %q", "\x00")},
{"status cache", []string{"zlib", "zstd"}, map[string]string{
"zstd": "internal/pkg (amd64) on satori\n",
"hakurei": "internal/pkg (amd64) on satori\n\n",
}, "", `
name : zlib-` + rosa.Std.Version(rosa.Zlib) + `
description : lossless data-compression library
website : https://zlib.net
status : not yet cured
name : zstd-` + rosa.Std.Version(rosa.Zstd) + `
description : a fast compression algorithm
website : https://facebook.github.io/zstd
status : internal/pkg (amd64) on satori
`, nil},
{"status cache perm", []string{"zlib"}, map[string]string{
"zlib": "\x00",
}, "", `
name : zlib-` + rosa.Std.Version(rosa.Zlib) + `
description : lossless data-compression library
website : https://zlib.net
`, func(cm *cache) error {
return &os.PathError{
Op: "open",
Path: filepath.Join(cm.base, "status", pkg.Encode(cm.c.Ident(rosa.Std.Load(rosa.Zlib)).Value())),
Err: syscall.EACCES,
}
}},
{"status report", []string{"zlib"}, nil, strings.Repeat("\x00", len(pkg.Checksum{})+8), `
name : zlib-` + rosa.Std.Version(rosa.Zlib) + `
description : lossless data-compression library
website : https://zlib.net
status : not in report
`, nil},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
var (
cm *cache
buf strings.Builder
r *rosa.Report
)
if tc.status != nil || tc.report != "" {
cm = &cache{
ctx: context.Background(),
msg: message.New(log.New(os.Stderr, "info: ", 0)),
base: t.TempDir(),
}
defer cm.Close()
}
if tc.report != "" {
pathname := filepath.Join(t.TempDir(), "report")
err := os.WriteFile(
pathname,
unsafe.Slice(unsafe.StringData(tc.report), len(tc.report)),
0400,
)
if err != nil {
t.Fatal(err)
}
r, err = rosa.OpenReport(pathname)
if err != nil {
t.Fatal(err)
}
defer func() {
if err = r.Close(); err != nil {
t.Fatal(err)
}
}()
}
if tc.status != nil {
for name, status := range tc.status {
p, ok := rosa.ResolveName(name)
if !ok {
t.Fatalf("invalid name %q", name)
}
perm := os.FileMode(0400)
if status == "\x00" {
perm = 0
}
if err := cm.Do(func(cache *pkg.Cache) error {
return os.WriteFile(filepath.Join(
cm.base,
"status",
pkg.Encode(cache.Ident(rosa.Std.Load(p)).Value()),
), unsafe.Slice(unsafe.StringData(status), len(status)), perm)
}); err != nil {
t.Fatalf("Do: error = %v", err)
}
}
}
var wantErr error
switch c := tc.wantErr.(type) {
case error:
wantErr = c
case func(cm *cache) error:
wantErr = c(cm)
default:
if tc.wantErr != nil {
t.Fatalf("invalid wantErr %#v", tc.wantErr)
}
}
if err := commandInfo(
cm,
tc.args,
&buf,
cm != nil,
r,
); !reflect.DeepEqual(err, wantErr) {
t.Fatalf("commandInfo: error = %v, want %v", err, wantErr)
}
if got := buf.String(); got != strings.TrimPrefix(tc.want, "\n") {
t.Errorf("commandInfo:\n%s\nwant\n%s", got, tc.want)
}
})
}
}

View File

@@ -0,0 +1,202 @@
// Package pkgserver implements the package metadata service backend.
package pkgserver
import (
"context"
"encoding/json"
"log"
"net/http"
"net/url"
"path"
"strconv"
"sync"
"time"
"hakurei.app/internal/info"
"hakurei.app/internal/rosa"
)
// for lazy initialisation of serveInfo
var (
infoPayload struct {
// Current package count.
Count int `json:"count"`
// Hakurei version, set at link time.
HakureiVersion string `json:"hakurei_version"`
}
infoPayloadOnce sync.Once
)
// handleInfo writes constant system information.
func handleInfo(w http.ResponseWriter, _ *http.Request) {
infoPayloadOnce.Do(func() {
infoPayload.Count = int(rosa.PresetUnexportedStart)
infoPayload.HakureiVersion = info.Version()
})
// TODO(mae): cache entire response if no additional fields are planned
writeAPIPayload(w, infoPayload)
}
// newStatusHandler returns a [http.HandlerFunc] that offers status files for
// viewing or download, if available.
func (index *packageIndex) newStatusHandler(disposition bool) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
m, ok := index.names[path.Base(r.URL.Path)]
if !ok || !m.HasReport {
http.NotFound(w, r)
return
}
contentType := "text/plain; charset=utf-8"
if disposition {
contentType = "application/octet-stream"
// quoting like this is unsound, but okay, because metadata is hardcoded
contentDisposition := `attachment; filename="`
contentDisposition += m.Name + "-"
if m.Version != "" {
contentDisposition += m.Version + "-"
}
contentDisposition += m.ids + `.log"`
w.Header().Set("Content-Disposition", contentDisposition)
}
w.Header().Set("Content-Type", contentType)
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
if err := func() (err error) {
defer index.handleAccess(&err)()
_, err = w.Write(m.status)
return
}(); err != nil {
log.Println(err)
http.Error(
w, "cannot deliver status, contact maintainers",
http.StatusInternalServerError,
)
}
}
}
// handleGet writes a slice of metadata with specified order.
func (index *packageIndex) handleGet(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
limit, err := strconv.Atoi(q.Get("limit"))
if err != nil || limit > 100 || limit < 1 {
http.Error(
w, "limit must be an integer between 1 and 100",
http.StatusBadRequest,
)
return
}
i, err := strconv.Atoi(q.Get("index"))
if err != nil || i >= len(index.sorts[0]) || i < 0 {
http.Error(
w, "index must be an integer between 0 and "+
strconv.Itoa(int(rosa.PresetUnexportedStart-1)),
http.StatusBadRequest,
)
return
}
sort, err := strconv.Atoi(q.Get("sort"))
if err != nil || sort >= len(index.sorts) || sort < 0 {
http.Error(
w, "sort must be an integer between 0 and "+
strconv.Itoa(sortOrderEnd),
http.StatusBadRequest,
)
return
}
values := index.sorts[sort][i:min(i+limit, len(index.sorts[sort]))]
writeAPIPayload(w, &struct {
Values []*metadata `json:"values"`
}{values})
}
func (index *packageIndex) handleSearch(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
limit, err := strconv.Atoi(q.Get("limit"))
if err != nil || limit > 100 || limit < 1 {
http.Error(
w, "limit must be an integer between 1 and 100",
http.StatusBadRequest,
)
return
}
i, err := strconv.Atoi(q.Get("index"))
if err != nil || i >= len(index.sorts[0]) || i < 0 {
http.Error(
w, "index must be an integer between 0 and "+
strconv.Itoa(int(rosa.PresetUnexportedStart-1)),
http.StatusBadRequest,
)
return
}
search, err := url.QueryUnescape(q.Get("search"))
if len(search) > 100 || err != nil {
http.Error(
w, "search must be a string between 0 and 100 characters long",
http.StatusBadRequest,
)
return
}
desc := q.Get("desc") == "true"
n, res, err := index.performSearchQuery(limit, i, search, desc)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
writeAPIPayload(w, &struct {
Count int `json:"count"`
Values []searchResult `json:"values"`
}{n, res})
}
// apiVersion is the name of the current API revision, as part of the pattern.
const apiVersion = "v1"
// registerAPI registers API handler functions.
func (index *packageIndex) registerAPI(mux *http.ServeMux) {
mux.HandleFunc("GET /api/"+apiVersion+"/info", handleInfo)
mux.HandleFunc("GET /api/"+apiVersion+"/get", index.handleGet)
mux.HandleFunc("GET /api/"+apiVersion+"/search", index.handleSearch)
mux.HandleFunc("GET /api/"+apiVersion+"/status/", index.newStatusHandler(false))
mux.HandleFunc("GET /status/", index.newStatusHandler(true))
}
// Register arranges for mux to service API requests.
func Register(ctx context.Context, mux *http.ServeMux, report *rosa.Report) error {
var index packageIndex
index.search = make(searchCache)
if err := index.populate(report); err != nil {
return err
}
ticker := time.NewTicker(1 * time.Minute)
go func() {
for {
select {
case <-ctx.Done():
ticker.Stop()
return
case <-ticker.C:
index.search.clean()
}
}
}()
index.registerAPI(mux)
return nil
}
// writeAPIPayload sets headers common to API responses and encodes payload as
// JSON for the response body.
func writeAPIPayload(w http.ResponseWriter, payload any) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
w.Header().Set("Pragma", "no-cache")
w.Header().Set("Expires", "0")
if err := json.NewEncoder(w).Encode(payload); err != nil {
log.Println(err)
http.Error(
w, "cannot encode payload, contact maintainers",
http.StatusInternalServerError,
)
}
}

View File

@@ -0,0 +1,181 @@
package pkgserver
import (
"net/http"
"net/http/httptest"
"slices"
"strconv"
"testing"
"hakurei.app/internal/info"
"hakurei.app/internal/rosa"
)
// prefix is prepended to every API path.
const prefix = "/api/" + apiVersion + "/"
func TestAPIInfo(t *testing.T) {
t.Parallel()
w := httptest.NewRecorder()
handleInfo(w, httptest.NewRequestWithContext(
t.Context(),
http.MethodGet,
prefix+"info",
nil,
))
resp := w.Result()
checkStatus(t, resp, http.StatusOK)
checkAPIHeader(t, w.Header())
checkPayload(t, resp, struct {
Count int `json:"count"`
HakureiVersion string `json:"hakurei_version"`
}{int(rosa.PresetUnexportedStart), info.Version()})
}
func TestAPIGet(t *testing.T) {
t.Parallel()
const target = prefix + "get"
index := newIndex(t)
newRequest := func(suffix string) *httptest.ResponseRecorder {
w := httptest.NewRecorder()
index.handleGet(w, httptest.NewRequestWithContext(
t.Context(),
http.MethodGet,
target+suffix,
nil,
))
return w
}
checkValidate := func(t *testing.T, suffix string, vmin, vmax int, wantErr string) {
t.Run("invalid", func(t *testing.T) {
t.Parallel()
w := newRequest("?" + suffix + "=invalid")
resp := w.Result()
checkError(t, resp, wantErr, http.StatusBadRequest)
})
t.Run("min", func(t *testing.T) {
t.Parallel()
w := newRequest("?" + suffix + "=" + strconv.Itoa(vmin-1))
resp := w.Result()
checkError(t, resp, wantErr, http.StatusBadRequest)
w = newRequest("?" + suffix + "=" + strconv.Itoa(vmin))
resp = w.Result()
checkStatus(t, resp, http.StatusOK)
})
t.Run("max", func(t *testing.T) {
t.Parallel()
w := newRequest("?" + suffix + "=" + strconv.Itoa(vmax+1))
resp := w.Result()
checkError(t, resp, wantErr, http.StatusBadRequest)
w = newRequest("?" + suffix + "=" + strconv.Itoa(vmax))
resp = w.Result()
checkStatus(t, resp, http.StatusOK)
})
}
t.Run("limit", func(t *testing.T) {
t.Parallel()
checkValidate(
t, "index=0&sort=0&limit", 1, 100,
"limit must be an integer between 1 and 100",
)
})
t.Run("index", func(t *testing.T) {
t.Parallel()
checkValidate(
t, "limit=1&sort=0&index", 0, int(rosa.PresetUnexportedStart-1),
"index must be an integer between 0 and "+strconv.Itoa(int(rosa.PresetUnexportedStart-1)),
)
})
t.Run("sort", func(t *testing.T) {
t.Parallel()
checkValidate(
t, "index=0&limit=1&sort", 0, int(sortOrderEnd),
"sort must be an integer between 0 and "+strconv.Itoa(int(sortOrderEnd)),
)
})
checkWithSuffix := func(name, suffix string, want []*metadata) {
t.Run(name, func(t *testing.T) {
t.Parallel()
w := newRequest(suffix)
resp := w.Result()
checkStatus(t, resp, http.StatusOK)
checkAPIHeader(t, w.Header())
checkPayloadFunc(t, resp, func(got *struct {
Values []*metadata `json:"values"`
}) bool {
return slices.EqualFunc(got.Values, want, func(a, b *metadata) bool {
return (a.Version == b.Version ||
a.Version == rosa.Unversioned ||
b.Version == rosa.Unversioned) &&
a.HasReport == b.HasReport &&
a.Name == b.Name &&
a.Description == b.Description &&
a.Website == b.Website
})
})
})
}
checkWithSuffix("declarationAscending", "?limit=2&index=1&sort=0", []*metadata{
{
Metadata: rosa.GetMetadata(1),
Version: rosa.Std.Version(1),
},
{
Metadata: rosa.GetMetadata(2),
Version: rosa.Std.Version(2),
},
})
checkWithSuffix("declarationAscending offset", "?limit=3&index=5&sort=0", []*metadata{
{
Metadata: rosa.GetMetadata(5),
Version: rosa.Std.Version(5),
},
{
Metadata: rosa.GetMetadata(6),
Version: rosa.Std.Version(6),
},
{
Metadata: rosa.GetMetadata(7),
Version: rosa.Std.Version(7),
},
})
checkWithSuffix("declarationDescending", "?limit=3&index=0&sort=1", []*metadata{
{
Metadata: rosa.GetMetadata(rosa.PresetUnexportedStart - 1),
Version: rosa.Std.Version(rosa.PresetUnexportedStart - 1),
},
{
Metadata: rosa.GetMetadata(rosa.PresetUnexportedStart - 2),
Version: rosa.Std.Version(rosa.PresetUnexportedStart - 2),
},
{
Metadata: rosa.GetMetadata(rosa.PresetUnexportedStart - 3),
Version: rosa.Std.Version(rosa.PresetUnexportedStart - 3),
},
})
checkWithSuffix("declarationDescending offset", "?limit=1&index=37&sort=1", []*metadata{
{
Metadata: rosa.GetMetadata(rosa.PresetUnexportedStart - 38),
Version: rosa.Std.Version(rosa.PresetUnexportedStart - 38),
},
})
}

View File

@@ -0,0 +1,106 @@
package pkgserver
import (
"cmp"
"errors"
"slices"
"strings"
"hakurei.app/internal/pkg"
"hakurei.app/internal/rosa"
)
const (
declarationAscending = iota
declarationDescending
nameAscending
nameDescending
sizeAscending
sizeDescending
sortOrderEnd = iota - 1
)
// packageIndex refers to metadata by name and various sort orders.
type packageIndex struct {
sorts [sortOrderEnd + 1][rosa.PresetUnexportedStart]*metadata
names map[string]*metadata
search searchCache
// Taken from [rosa.Report] if available.
handleAccess func(*error) func()
}
// metadata holds [rosa.Metadata] extended with additional information.
type metadata struct {
p rosa.PArtifact
*rosa.Metadata
// Populated via [rosa.Toolchain.Version], [rosa.Unversioned] is equivalent
// to the zero value. Otherwise, the zero value is invalid.
Version string `json:"version,omitempty"`
// Output data size, available if present in report.
Size int64 `json:"size,omitempty"`
// Whether the underlying [pkg.Artifact] is present in the report.
HasReport bool `json:"report"`
// Ident string encoded ahead of time.
ids string
// Backed by [rosa.Report], access must be prepared by HandleAccess.
status []byte
}
// populate deterministically populates packageIndex, optionally with a report.
func (index *packageIndex) populate(report *rosa.Report) (err error) {
if report != nil {
defer report.HandleAccess(&err)()
index.handleAccess = report.HandleAccess
}
var work [rosa.PresetUnexportedStart]*metadata
index.names = make(map[string]*metadata)
ir := pkg.NewIR()
for p := range rosa.PresetUnexportedStart {
m := metadata{
p: p,
Metadata: rosa.GetMetadata(p),
Version: rosa.Std.Version(p),
}
if m.Version == "" {
return errors.New("invalid version from " + m.Name)
}
if m.Version == rosa.Unversioned {
m.Version = ""
}
if report != nil {
id := ir.Ident(rosa.Std.Load(p))
m.ids = pkg.Encode(id.Value())
m.status, m.Size = report.ArtifactOf(id)
m.HasReport = m.Size >= 0
}
work[p] = &m
index.names[m.Name] = &m
}
index.sorts[declarationAscending] = work
index.sorts[declarationDescending] = work
slices.Reverse(index.sorts[declarationDescending][:])
index.sorts[nameAscending] = work
slices.SortFunc(index.sorts[nameAscending][:], func(a, b *metadata) int {
return strings.Compare(a.Name, b.Name)
})
index.sorts[nameDescending] = index.sorts[nameAscending]
slices.Reverse(index.sorts[nameDescending][:])
index.sorts[sizeAscending] = work
slices.SortFunc(index.sorts[sizeAscending][:], func(a, b *metadata) int {
return cmp.Compare(a.Size, b.Size)
})
index.sorts[sizeDescending] = index.sorts[sizeAscending]
slices.Reverse(index.sorts[sizeDescending][:])
return
}

View File

@@ -0,0 +1,96 @@
package pkgserver
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"reflect"
"testing"
)
// newIndex returns the address of a newly populated packageIndex.
func newIndex(t *testing.T) *packageIndex {
t.Helper()
var index packageIndex
if err := index.populate(nil); err != nil {
t.Fatalf("populate: error = %v", err)
}
return &index
}
// checkStatus checks response status code.
func checkStatus(t *testing.T, resp *http.Response, want int) {
t.Helper()
if resp.StatusCode != want {
t.Errorf(
"StatusCode: %s, want %s",
http.StatusText(resp.StatusCode),
http.StatusText(want),
)
}
}
// checkHeader checks the value of a header entry.
func checkHeader(t *testing.T, h http.Header, key, want string) {
t.Helper()
if got := h.Get(key); got != want {
t.Errorf("%s: %q, want %q", key, got, want)
}
}
// checkAPIHeader checks common entries set for API endpoints.
func checkAPIHeader(t *testing.T, h http.Header) {
t.Helper()
checkHeader(t, h, "Content-Type", "application/json; charset=utf-8")
checkHeader(t, h, "Cache-Control", "no-cache, no-store, must-revalidate")
checkHeader(t, h, "Pragma", "no-cache")
checkHeader(t, h, "Expires", "0")
}
// checkPayloadFunc checks the JSON response of an API endpoint by passing it to f.
func checkPayloadFunc[T any](
t *testing.T,
resp *http.Response,
f func(got *T) bool,
) {
t.Helper()
var got T
r := io.Reader(resp.Body)
if testing.Verbose() {
var buf bytes.Buffer
r = io.TeeReader(r, &buf)
defer func() { t.Helper(); t.Log(buf.String()) }()
}
if err := json.NewDecoder(r).Decode(&got); err != nil {
t.Fatalf("Decode: error = %v", err)
}
if !f(&got) {
t.Errorf("Body: %#v", got)
}
}
// checkPayload checks the JSON response of an API endpoint.
func checkPayload[T any](t *testing.T, resp *http.Response, want T) {
t.Helper()
checkPayloadFunc(t, resp, func(got *T) bool {
return reflect.DeepEqual(got, &want)
})
}
func checkError(t *testing.T, resp *http.Response, error string, code int) {
t.Helper()
checkStatus(t, resp, code)
if got, _ := io.ReadAll(resp.Body); string(got) != fmt.Sprintln(error) {
t.Errorf("Body: %q, want %q", string(got), error)
}
}

View File

@@ -0,0 +1,81 @@
package pkgserver
import (
"cmp"
"maps"
"regexp"
"slices"
"time"
)
type searchCache map[string]searchCacheEntry
type searchResult struct {
NameIndices [][]int `json:"name_matches"`
DescIndices [][]int `json:"desc_matches,omitempty"`
Score float64 `json:"score"`
*metadata
}
type searchCacheEntry struct {
query string
results []searchResult
expiry time.Time
}
func (index *packageIndex) performSearchQuery(limit int, i int, search string, desc bool) (int, []searchResult, error) {
query := search
if desc {
query += ";withDesc"
}
entry, ok := index.search[query]
if ok && len(entry.results) > 0 {
return len(entry.results), entry.results[min(i, len(entry.results)-1):min(i+limit, len(entry.results))], nil
}
regex, err := regexp.Compile(search)
if err != nil {
return 0, make([]searchResult, 0), err
}
res := make([]searchResult, 0)
for p := range maps.Values(index.names) {
nameIndices := regex.FindAllIndex([]byte(p.Name), -1)
var descIndices [][]int = nil
if desc {
descIndices = regex.FindAllIndex([]byte(p.Description), -1)
}
if nameIndices == nil && descIndices == nil {
continue
}
score := float64(indexsum(nameIndices)) / (float64(len(nameIndices)) + 1)
if desc {
score += float64(indexsum(descIndices)) / (float64(len(descIndices)) + 1) / 10.0
}
res = append(res, searchResult{
NameIndices: nameIndices,
DescIndices: descIndices,
Score: score,
metadata: p,
})
}
slices.SortFunc(res[:], func(a, b searchResult) int { return -cmp.Compare(a.Score, b.Score) })
expiry := time.Now().Add(1 * time.Minute)
entry = searchCacheEntry{
query: search,
results: res,
expiry: expiry,
}
index.search[query] = entry
return len(res), res[i:min(i+limit, len(entry.results))], nil
}
func (s *searchCache) clean() {
maps.DeleteFunc(*s, func(_ string, v searchCacheEntry) bool {
return v.expiry.Before(time.Now())
})
}
func indexsum(in [][]int) int {
sum := 0
for i := 0; i < len(in); i++ {
sum += in[i][1] - in[i][0]
}
return sum
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

View File

@@ -0,0 +1,57 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="style.css">
<title>Hakurei PkgServer</title>
<script src="index.js"></script>
</head>
<body>
<h1>Hakurei PkgServer</h1>
<div class="top-controls" id="top-controls-regular">
<p>Showing entries <span id="entry-counter"></span>.</p>
<span id="search-bar">
<label for="search">Search: </label>
<input type="text" name="search" id="search"/>
<button onclick="doSearch()">Find</button>
<label for="include-desc">Include descriptions: </label>
<input type="checkbox" name="include-desc" id="include-desc" checked/>
</span>
<div><label for="count">Entries per page: </label><select name="count" id="count">
<option value="10">10</option>
<option value="20">20</option>
<option value="30">30</option>
<option value="50">50</option>
</select></div>
<div><label for="sort">Sort by: </label><select name="sort" id="sort">
<option value="0">Definition (ascending)</option>
<option value="1">Definition (descending)</option>
<option value="2">Name (ascending)</option>
<option value="3">Name (descending)</option>
<option value="4">Size (ascending)</option>
<option value="5">Size (descending)</option>
</select></div>
</div>
<div class="top-controls" id="search-top-controls" hidden>
<p>Showing search results <span id="search-entry-counter"></span> for query "<span id="search-query"></span>".</p>
<button onclick="exitSearch()">Back</button>
<div><label for="search-count">Entries per page: </label><select name="search-count" id="search-count">
<option value="10">10</option>
<option value="20">20</option>
<option value="30">30</option>
<option value="50">50</option>
</select></div>
<p>Sorted by best match</p>
</div>
<div class="page-controls"><a href="javascript:prevPage()">&laquo; Previous</a> <input type="text" class="page-number" value="1"/> <a href="javascript:nextPage()">Next &raquo;</a></div>
<table id="pkg-list">
<tr><td>Loading...</td></tr>
</table>
<div class="page-controls"><a href="javascript:prevPage()">&laquo; Previous</a> <input type="text" class="page-number" value="1"/> <a href="javascript:nextPage()">Next &raquo;</a></div>
<footer>
<p>&copy;<a href="https://hakurei.app/">Hakurei</a> (<span id="hakurei-version">unknown</span>). Licensed under the MIT license.</p>
</footer>
<script>main();</script>
</body>
</html>

View File

@@ -0,0 +1,331 @@
interface PackageIndexEntry {
name: string
size?: number
description?: string
website?: string
version?: string
report?: boolean
}
function entryToHTML(entry: PackageIndexEntry | SearchResult): HTMLTableRowElement {
let v = entry.version != null ? `<span>${escapeHtml(entry.version)}</span>` : ""
let s = entry.size != null && entry.size > 0 ? `<p>Size: ${toByteSizeString(entry.size)} (${entry.size})</p>` : ""
let n: string
let d: string
if ('name_matches' in entry) {
n = `<h2>${nameMatches(entry as SearchResult)} ${v}</h2>`
} else {
n = `<h2>${escapeHtml(entry.name)} ${v}</h2>`
}
if ('desc_matches' in entry && STATE.getIncludeDescriptions()) {
d = descMatches(entry as SearchResult)
} else {
d = (entry as PackageIndexEntry).description != null ? `<p>${escapeHtml((entry as PackageIndexEntry).description)}</p>` : ""
}
let w = entry.website != null ? `<a href="${encodeURI(entry.website)}">Website</a>` : ""
let r = entry.report ? `Log (<a href=\"${encodeURI('/api/v1/status/' + entry.name)}\">View</a> | <a href=\"${encodeURI('/status/' + entry.name)}\">Download</a>)` : ""
let row = <HTMLTableRowElement>(document.createElement('tr'))
row.innerHTML = `<td>
${n}
${d}
${s}
${w}
${r}
</td>`
return row
}
function nameMatches(sr: SearchResult): string {
return markMatches(sr.name, sr.name_matches)
}
function descMatches(sr: SearchResult): string {
return markMatches(sr.description!, sr.desc_matches)
}
function markMatches(str: string, indices: [number, number][]): string {
if (indices == null) {
return str
}
let out: string = ""
let j = 0
for (let i = 0; i < str.length; i++) {
if (j < indices.length) {
if (i === indices[j][0]) {
out += `<mark>${escapeHtmlChar(str[i])}`
continue
}
if (i === indices[j][1]) {
out += `</mark>${escapeHtmlChar(str[i])}`
j++
continue
}
}
out += escapeHtmlChar(str[i])
}
if (indices[j] !== undefined) {
out += "</mark>"
}
return out
}
function toByteSizeString(bytes: number): string {
if (bytes == null) return `unspecified`
if (bytes < 1024) return `${bytes}B`
if (bytes < Math.pow(1024, 2)) return `${(bytes / 1024).toFixed(2)}kiB`
if (bytes < Math.pow(1024, 3)) return `${(bytes / Math.pow(1024, 2)).toFixed(2)}MiB`
if (bytes < Math.pow(1024, 4)) return `${(bytes / Math.pow(1024, 3)).toFixed(2)}GiB`
if (bytes < Math.pow(1024, 5)) return `${(bytes / Math.pow(1024, 4)).toFixed(2)}TiB`
return "not only is it big, it's large"
}
const API_VERSION = 1
const ENDPOINT = `/api/v${API_VERSION}`
interface InfoPayload {
count?: number
hakurei_version?: string
}
async function infoRequest(): Promise<InfoPayload> {
const res = await fetch(`${ENDPOINT}/info`)
const payload = await res.json()
return payload as InfoPayload
}
interface GetPayload {
values?: PackageIndexEntry[]
}
enum SortOrders {
DeclarationAscending,
DeclarationDescending,
NameAscending,
NameDescending
}
async function getRequest(limit: number, index: number, sort: SortOrders): Promise<GetPayload> {
const res = await fetch(`${ENDPOINT}/get?limit=${limit}&index=${index}&sort=${sort.valueOf()}`)
const payload = await res.json()
return payload as GetPayload
}
interface SearchResult extends PackageIndexEntry {
name_matches: [number, number][]
desc_matches: [number, number][]
score: number
}
interface SearchPayload {
count?: number
values?: SearchResult[]
}
async function searchRequest(limit: number, index: number, search: string, desc: boolean): Promise<SearchPayload> {
const res = await fetch(`${ENDPOINT}/search?limit=${limit}&index=${index}&search=${encodeURIComponent(search)}&desc=${desc}`)
if (!res.ok) {
exitSearch()
alert("invalid search query!")
return Promise.reject(res.statusText)
}
const payload = await res.json()
return payload as SearchPayload
}
class State {
entriesPerPage: number = 10
entryIndex: number = 0
maxTotal: number = 0
maxEntries: number = 0
sort: SortOrders = SortOrders.DeclarationAscending
search: boolean = false
getEntriesPerPage(): number {
return this.entriesPerPage
}
setEntriesPerPage(entriesPerPage: number) {
this.entriesPerPage = entriesPerPage
this.setEntryIndex(Math.floor(this.getEntryIndex() / entriesPerPage) * entriesPerPage)
}
getEntryIndex(): number {
return this.entryIndex
}
setEntryIndex(entryIndex: number) {
this.entryIndex = entryIndex
this.updatePage()
this.updateRange()
this.updateListings()
}
getMaxTotal(): number {
return this.maxTotal
}
setMaxTotal(max: number) {
this.maxTotal = max
}
getSortOrder(): SortOrders {
return this.sort
}
setSortOrder(sortOrder: SortOrders) {
this.sort = sortOrder
this.setEntryIndex(0)
}
updatePage() {
let page = Math.ceil(((this.getEntryIndex() + this.getEntriesPerPage()) - 1) / this.getEntriesPerPage())
for (let e of document.getElementsByClassName("page-number")) {
(e as HTMLInputElement).value = String(page)
}
}
updateRange() {
let max = Math.min(this.getEntryIndex() + this.getEntriesPerPage(), this.getMaxTotal())
document.getElementById("entry-counter")!.textContent = `${this.getEntryIndex() + 1}-${max} of ${this.getMaxTotal()}`
if (this.search) {
document.getElementById("search-entry-counter")!.textContent = `${this.getEntryIndex() + 1}-${max} of ${this.maxTotal}/${this.maxEntries}`
document.getElementById("search-query")!.innerHTML = `<code>${escapeHtml(this.getSearchQuery())}</code>`
}
}
getSearchQuery(): string {
let queryString = document.getElementById("search")!;
return (queryString as HTMLInputElement).value
}
getIncludeDescriptions(): boolean {
let includeDesc = document.getElementById("include-desc")!;
return (includeDesc as HTMLInputElement).checked
}
updateListings() {
if (this.search) {
searchRequest(this.getEntriesPerPage(), this.getEntryIndex(), this.getSearchQuery(), this.getIncludeDescriptions())
.then(res => {
let table = document.getElementById("pkg-list")!
table.innerHTML = ''
for (let row of res.values!) {
table.appendChild(entryToHTML(row))
}
STATE.maxTotal = res.count!
STATE.updateRange()
if(res.count! < 1) {
exitSearch()
alert("no results found!")
}
})
} else {
getRequest(this.getEntriesPerPage(), this.getEntryIndex(), this.getSortOrder())
.then(res => {
let table = document.getElementById("pkg-list")!
table.innerHTML = ''
for (let row of res.values!) {
table.appendChild(entryToHTML(row))
}
})
}
}
}
let STATE: State
function lastPageIndex(): number {
return Math.floor(STATE.getMaxTotal() / STATE.getEntriesPerPage()) * STATE.getEntriesPerPage()
}
function setPage(page: number) {
STATE.setEntryIndex(Math.max(0, Math.min(STATE.getEntriesPerPage() * (page - 1), lastPageIndex())))
}
function escapeHtml(str?: string): string {
let out: string = ''
if (str == undefined) return ""
for (let i = 0; i < str.length; i++) {
out += escapeHtmlChar(str[i])
}
return out
}
function escapeHtmlChar(char: string): string {
if (char.length != 1) return char
switch (char[0]) {
case '&':
return "&amp;"
case '<':
return "&lt;"
case '>':
return "&gt;"
case '"':
return "&quot;"
case "'":
return "&apos;"
default:
return char
}
}
function firstPage() {
STATE.setEntryIndex(0)
}
function prevPage() {
let index = STATE.getEntryIndex()
STATE.setEntryIndex(Math.max(0, index - STATE.getEntriesPerPage()))
}
function lastPage() {
STATE.setEntryIndex(lastPageIndex())
}
function nextPage() {
let index = STATE.getEntryIndex()
STATE.setEntryIndex(Math.min(lastPageIndex(), index + STATE.getEntriesPerPage()))
}
function doSearch() {
document.getElementById("top-controls-regular")!.toggleAttribute("hidden");
document.getElementById("search-top-controls")!.toggleAttribute("hidden");
STATE.search = true;
STATE.setEntryIndex(0);
}
function exitSearch() {
document.getElementById("top-controls-regular")!.toggleAttribute("hidden");
document.getElementById("search-top-controls")!.toggleAttribute("hidden");
STATE.search = false;
STATE.setMaxTotal(STATE.maxEntries)
STATE.setEntryIndex(0)
}
function main() {
STATE = new State()
infoRequest()
.then(res => {
STATE.maxEntries = res.count!
STATE.setMaxTotal(STATE.maxEntries)
document.getElementById("hakurei-version")!.textContent = res.hakurei_version!
STATE.updateRange()
STATE.updateListings()
})
for (let e of document.getElementsByClassName("page-number")) {
e.addEventListener("change", (_) => {
setPage(parseInt((e as HTMLInputElement).value))
})
}
document.getElementById("count")?.addEventListener("change", (event) => {
STATE.setEntriesPerPage(parseInt((event.target as HTMLSelectElement).value))
})
document.getElementById("sort")?.addEventListener("change", (event) => {
STATE.setSortOrder(parseInt((event.target as HTMLSelectElement).value))
})
document.getElementById("search")?.addEventListener("keyup", (event) => {
if (event.key === 'Enter') doSearch()
})
}

View File

@@ -0,0 +1,21 @@
.page-number {
width: 2em;
text-align: center;
}
.page-number {
width: 2em;
text-align: center;
}
@media (prefers-color-scheme: dark) {
html {
background-color: #2c2c2c;
color: ghostwhite;
}
}
@media (prefers-color-scheme: light) {
html {
background-color: #d3d3d3;
color: black;
}
}

View File

@@ -0,0 +1,8 @@
{
"compilerOptions": {
"target": "ES2024",
"strict": true,
"alwaysStrict": true,
"outDir": "static"
}
}

View File

@@ -0,0 +1,9 @@
// Package ui holds the static web UI.
package ui
import "net/http"
// Register arranges for mux to serve the embedded frontend.
func Register(mux *http.ServeMux) {
mux.Handle("GET /", http.FileServer(http.FS(static)))
}

View File

@@ -0,0 +1,21 @@
//go:build frontend
package ui
import (
"embed"
"io/fs"
)
//go:generate tsc
//go:generate cp index.html style.css favicon.ico static
//go:embed static
var _static embed.FS
var static = func() fs.FS {
if f, err := fs.Sub(_static, "static"); err != nil {
panic(err)
} else {
return f
}
}()

View File

@@ -0,0 +1,7 @@
//go:build !frontend
package ui
import "testing/fstest"
var static fstest.MapFS

View File

@@ -14,16 +14,18 @@ package main
import ( import (
"context" "context"
"crypto/sha512"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log" "log"
"net"
"net/http"
"os" "os"
"os/signal" "os/signal"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strconv" "strconv"
"strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
"syscall" "syscall"
@@ -40,6 +42,9 @@ import (
"hakurei.app/internal/pkg" "hakurei.app/internal/pkg"
"hakurei.app/internal/rosa" "hakurei.app/internal/rosa"
"hakurei.app/message" "hakurei.app/message"
"hakurei.app/cmd/mbf/internal/pkgserver"
"hakurei.app/cmd/mbf/internal/pkgserver/ui"
) )
func main() { func main() {
@@ -53,77 +58,141 @@ func main() {
log.Fatal("this program must not run as root") log.Fatal("this program must not run as root")
} }
var cache *pkg.Cache
ctx, stop := signal.NotifyContext(context.Background(), ctx, stop := signal.NotifyContext(context.Background(),
syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
defer stop() defer stop()
defer func() {
if cache != nil {
cache.Close()
}
if r := recover(); r != nil { var cm cache
fmt.Println(r) defer func() { cm.Close() }()
log.Fatal("consider scrubbing the on-disk cache")
}
}()
var ( var (
flagQuiet bool flagQuiet bool
flagCures int flagQEMU bool
flagBase string flagArch string
flagIdle bool flagCheck bool
flagLTO bool
flagHostAbstract bool flagCrossOverride int
addr net.UnixAddr
) )
c := command.New(os.Stderr, log.Printf, "mbf", func([]string) (err error) { c := command.New(os.Stderr, log.Printf, "mbf", func([]string) error {
msg.SwapVerbose(!flagQuiet) msg.SwapVerbose(!flagQuiet)
cm.ctx, cm.msg = ctx, msg
flagBase = os.ExpandEnv(flagBase) cm.base = os.ExpandEnv(cm.base)
if flagBase == "" { if cm.base == "" {
flagBase = "cache" cm.base = "cache"
} }
var base *check.Absolute addr.Net = "unix"
if flagBase, err = filepath.Abs(flagBase); err != nil { addr.Name = os.ExpandEnv(addr.Name)
return if addr.Name == "" {
} else if base, err = check.NewAbs(flagBase); err != nil { addr.Name = filepath.Join(cm.base, "daemon")
return
} }
var flags int var flags int
if flagIdle { if !flagCheck {
flags |= pkg.CSchedIdle flags |= rosa.OptSkipCheck
} }
if flagHostAbstract { if !flagLTO {
flags |= pkg.CHostAbstract flags |= rosa.OptLLVMNoLTO
}
rosa.DropCaches("", flags)
cross := flagArch != "" && flagArch != runtime.GOARCH
if flagQEMU || cross {
cm.qemu = rosa.Std.Load(rosa.QEMU)
} }
cache, err = pkg.Open(ctx, msg, flags, flagCures, base)
return if cross {
if flagCrossOverride != -1 {
flags = flagCrossOverride
}
rosa.DropCaches(flagArch, flags)
if !rosa.HasStage0() {
return pkg.UnsupportedArchError(flagArch)
}
}
return nil
}).Flag( }).Flag(
&flagQuiet, &flagQuiet,
"q", command.BoolFlag(false), "q", command.BoolFlag(false),
"Do not print cure messages", "Do not print cure messages",
).Flag( ).Flag(
&flagCures, &flagQEMU,
"register", command.BoolFlag(false),
"Enable additional target architectures",
).Flag(
&flagArch,
"arch", command.StringFlag(runtime.GOARCH),
"Target architecture",
).Flag(
&flagLTO,
"lto", command.BoolFlag(false),
"Enable LTO in stage2 and stage3 LLVM toolchains",
).Flag(
&flagCheck,
"check", command.BoolFlag(true),
"Run test suites",
).Flag(
&flagCrossOverride,
"cross-flags", command.IntFlag(-1),
"Override non-native target preset flags",
).Flag(
&cm.verboseInit,
"v", command.BoolFlag(false),
"Do not suppress verbose output from init",
).Flag(
&cm.cures,
"cures", command.IntFlag(0), "cures", command.IntFlag(0),
"Maximum number of dependencies to cure at any given time", "Maximum number of dependencies to cure at any given time",
).Flag( ).Flag(
&flagBase, &cm.jobs,
"jobs", command.IntFlag(0),
"Preferred number of jobs to run, when applicable",
).Flag(
&cm.base,
"d", command.StringFlag("$MBF_CACHE_DIR"), "d", command.StringFlag("$MBF_CACHE_DIR"),
"Directory to store cured artifacts", "Directory to store cured artifacts",
).Flag( ).Flag(
&flagIdle, &cm.idle,
"sched-idle", command.BoolFlag(false), "sched-idle", command.BoolFlag(false),
"Set SCHED_IDLE scheduling policy", "Set SCHED_IDLE scheduling policy",
).Flag( ).Flag(
&flagHostAbstract, &cm.hostAbstract,
"host-abstract", command.BoolFlag( "host-abstract", command.BoolFlag(
os.Getenv("MBF_HOST_ABSTRACT") != "", os.Getenv("MBF_HOST_ABSTRACT") != "",
), ),
"Do not restrict networked cure containers from connecting to host "+ "Do not restrict networked cure containers from connecting to host "+
"abstract UNIX sockets", "abstract UNIX sockets",
).Flag(
&addr.Name,
"socket", command.StringFlag("$MBF_DAEMON_SOCKET"),
"Pathname of socket to bind to",
)
c.NewCommand(
"checksum", "Compute checksum of data read from standard input",
func([]string) error {
done := make(chan struct{})
defer close(done)
go func() {
select {
case <-ctx.Done():
os.Exit(1)
case <-done:
return
}
}()
h := sha512.New384()
if _, err := io.Copy(h, os.Stdin); err != nil {
return err
}
log.Println(pkg.Encode(pkg.Checksum(h.Sum(nil))))
return nil
},
) )
{ {
@@ -137,7 +206,9 @@ func main() {
if flagShifts < 0 || flagShifts > 31 { if flagShifts < 0 || flagShifts > 31 {
flagShifts = 12 flagShifts = 12
} }
return cache.Scrub(runtime.NumCPU() << flagShifts) return cm.Do(func(cache *pkg.Cache) error {
return cache.Scrub(runtime.NumCPU() << flagShifts)
})
}, },
).Flag( ).Flag(
&flagShifts, &flagShifts,
@@ -148,6 +219,7 @@ func main() {
{ {
var ( var (
flagBind string
flagStatus bool flagStatus bool
flagReport string flagReport string
) )
@@ -155,9 +227,7 @@ func main() {
"info", "info",
"Display out-of-band metadata of an artifact", "Display out-of-band metadata of an artifact",
func(args []string) (err error) { func(args []string) (err error) {
if len(args) == 0 { const shutdownTimeout = 15 * time.Second
return errors.New("info requires at least 1 argument")
}
var r *rosa.Report var r *rosa.Report
if flagReport != "" { if flagReport != "" {
@@ -172,88 +242,46 @@ func main() {
defer r.HandleAccess(&err)() defer r.HandleAccess(&err)()
} }
for i, name := range args { if flagBind == "" {
if p, ok := rosa.ResolveName(name); !ok { return commandInfo(&cm, args, os.Stdout, flagStatus, r)
return fmt.Errorf("unknown artifact %q", name)
} else {
var suffix string
if version := rosa.Std.Version(p); version != rosa.Unversioned {
suffix += "-" + version
}
fmt.Println("name : " + name + suffix)
meta := rosa.GetMetadata(p)
fmt.Println("description : " + meta.Description)
if meta.Website != "" {
fmt.Println("website : " +
strings.TrimSuffix(meta.Website, "/"))
}
if len(meta.Dependencies) > 0 {
fmt.Print("depends on :")
for _, d := range meta.Dependencies {
s := rosa.GetMetadata(d).Name
if version := rosa.Std.Version(d); version != rosa.Unversioned {
s += "-" + version
}
fmt.Print(" " + s)
}
fmt.Println()
}
const statusPrefix = "status : "
if flagStatus {
if r == nil {
var f io.ReadSeekCloser
f, err = cache.OpenStatus(rosa.Std.Load(p))
if err != nil {
if errors.Is(err, os.ErrNotExist) {
fmt.Println(
statusPrefix + "not yet cured",
)
} else {
return
}
} else {
fmt.Print(statusPrefix)
_, err = io.Copy(os.Stdout, f)
if err = errors.Join(err, f.Close()); err != nil {
return
}
}
} else {
status, n := r.ArtifactOf(cache.Ident(rosa.Std.Load(p)))
if status == nil {
fmt.Println(
statusPrefix + "not in report",
)
} else {
fmt.Println("size :", n)
fmt.Print(statusPrefix)
if _, err = os.Stdout.Write(status); err != nil {
return
}
}
}
}
if i != len(args)-1 {
fmt.Println()
}
}
} }
return nil
var mux http.ServeMux
ui.Register(&mux)
if err = pkgserver.Register(ctx, &mux, r); err != nil {
return
}
server := http.Server{Addr: flagBind, Handler: &mux}
go func() {
<-ctx.Done()
cc, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
defer cancel()
if _err := server.Shutdown(cc); _err != nil {
log.Fatal(_err)
}
}()
msg.Verbosef("listening on %q", flagBind)
err = server.ListenAndServe()
if errors.Is(err, http.ErrServerClosed) {
err = nil
}
return
}, },
). ).Flag(
Flag( &flagBind,
&flagStatus, "bind", command.StringFlag(""),
"status", command.BoolFlag(false), "TCP address for the server to listen on",
"Display cure status if available", ).Flag(
). &flagStatus,
Flag( "status", command.BoolFlag(false),
&flagReport, "Display cure status if available",
"report", command.StringFlag(""), ).Flag(
"Load cure status from this report file instead of cache", &flagReport,
) "report", command.StringFlag(""),
"Load cure status from this report file instead of cache",
)
} }
c.NewCommand( c.NewCommand(
@@ -287,7 +315,9 @@ func main() {
if ext.Isatty(int(w.Fd())) { if ext.Isatty(int(w.Fd())) {
return errors.New("output appears to be a terminal") return errors.New("output appears to be a terminal")
} }
return rosa.WriteReport(msg, w, cache) return cm.Do(func(cache *pkg.Cache) error {
return rosa.WriteReport(msg, w, cache)
})
}, },
) )
@@ -350,14 +380,26 @@ func main() {
" package(s) are out of date")) " package(s) are out of date"))
} }
return errors.Join(errs...) return errors.Join(errs...)
}). }).Flag(
Flag( &flagJobs,
&flagJobs, "j", command.IntFlag(32),
"j", command.IntFlag(32), "Maximum number of simultaneous connections",
"Maximum number of simultaneous connections", )
)
} }
c.NewCommand(
"daemon",
"Service artifact IR with Rosa OS extensions",
func(args []string) error {
ul, err := net.ListenUnix("unix", &addr)
if err != nil {
return err
}
log.Printf("listening on pathname socket at %s", addr.Name)
return serve(ctx, log.Default(), &cm, ul)
},
)
{ {
var ( var (
flagGentoo string flagGentoo string
@@ -382,25 +424,37 @@ func main() {
rosa.SetGentooStage3(flagGentoo, checksum) rosa.SetGentooStage3(flagGentoo, checksum)
} }
_, _, _, stage1 := (t - 2).NewLLVM()
_, _, _, stage2 := (t - 1).NewLLVM()
_, _, _, stage3 := t.NewLLVM()
var ( var (
pathname *check.Absolute pathname *check.Absolute
checksum [2]unique.Handle[pkg.Checksum] checksum [2]unique.Handle[pkg.Checksum]
) )
if pathname, _, err = cache.Cure(stage1); err != nil { if err = cm.Do(func(cache *pkg.Cache) (err error) {
return err pathname, _, err = cache.Cure(
(t - 2).Load(rosa.LLVM),
)
return
}); err != nil {
return
} }
log.Println("stage1:", pathname) log.Println("stage1:", pathname)
if pathname, checksum[0], err = cache.Cure(stage2); err != nil { if err = cm.Do(func(cache *pkg.Cache) (err error) {
return err pathname, checksum[0], err = cache.Cure(
(t - 1).Load(rosa.LLVM),
)
return
}); err != nil {
return
} }
log.Println("stage2:", pathname) log.Println("stage2:", pathname)
if pathname, checksum[1], err = cache.Cure(stage3); err != nil { if err = cm.Do(func(cache *pkg.Cache) (err error) {
return err pathname, checksum[1], err = cache.Cure(
t.Load(rosa.LLVM),
)
return
}); err != nil {
return
} }
log.Println("stage3:", pathname) log.Println("stage3:", pathname)
@@ -417,39 +471,46 @@ func main() {
} }
if flagStage0 { if flagStage0 {
if pathname, _, err = cache.Cure( if err = cm.Do(func(cache *pkg.Cache) (err error) {
t.Load(rosa.Stage0), pathname, _, err = cache.Cure(
); err != nil { t.Load(rosa.Stage0),
return err )
return
}); err != nil {
return
} }
log.Println(pathname) log.Println(pathname)
} }
return return
}, },
). ).Flag(
Flag( &flagGentoo,
&flagGentoo, "gentoo", command.StringFlag(""),
"gentoo", command.StringFlag(""), "Bootstrap from a Gentoo stage3 tarball",
"Bootstrap from a Gentoo stage3 tarball", ).Flag(
). &flagChecksum,
Flag( "checksum", command.StringFlag(""),
&flagChecksum, "Checksum of Gentoo stage3 tarball",
"checksum", command.StringFlag(""), ).Flag(
"Checksum of Gentoo stage3 tarball", &flagStage0,
). "stage0", command.BoolFlag(false),
Flag( "Create bootstrap stage0 tarball",
&flagStage0, )
"stage0", command.BoolFlag(false),
"Create bootstrap stage0 tarball",
)
} }
{ {
var ( var (
flagDump string flagDump string
flagEnter bool flagEnter bool
flagExport string flagExport string
flagRemote bool
flagNoReply bool
flagFaults bool
flagPop bool
flagBoot bool
flagStd bool
) )
c.NewCommand( c.NewCommand(
"cure", "cure",
@@ -463,9 +524,20 @@ func main() {
return fmt.Errorf("unknown artifact %q", args[0]) return fmt.Errorf("unknown artifact %q", args[0])
} }
t := rosa.Std
if flagBoot {
t -= 2
} else if flagStd {
t -= 1
}
switch { switch {
default: default:
pathname, _, err := cache.Cure(rosa.Std.Load(p)) var pathname *check.Absolute
err := cm.Do(func(cache *pkg.Cache) (err error) {
pathname, _, err = cache.Cure(t.Load(p))
return
})
if err != nil { if err != nil {
return err return err
} }
@@ -505,7 +577,7 @@ func main() {
return err return err
} }
if err = cache.EncodeAll(f, rosa.Std.Load(p)); err != nil { if err = pkg.NewIR().EncodeAll(f, rosa.Std.Load(p)); err != nil {
_ = f.Close() _ = f.Close()
return err return err
} }
@@ -513,33 +585,150 @@ func main() {
return f.Close() return f.Close()
case flagEnter: case flagEnter:
return cache.EnterExec( return cm.Do(func(cache *pkg.Cache) error {
ctx, return cache.EnterExec(
rosa.Std.Load(p), ctx,
true, os.Stdin, os.Stdout, os.Stderr, t.Load(p),
rosa.AbsSystem.Append("bin", "mksh"), true, os.Stdin, os.Stdout, os.Stderr,
"sh", rosa.AbsSystem.Append("bin", "mksh"),
) "sh",
)
})
case flagRemote:
var flags uint64
if flagNoReply {
flags |= remoteNoReply
}
a := t.Load(p)
pathname, err := cureRemote(ctx, &addr, a, flags)
if !flagNoReply && err == nil {
log.Println(pathname)
}
if errors.Is(err, context.Canceled) {
cc, cancel := context.WithDeadline(context.Background(), daemonDeadline())
defer cancel()
if _err := cancelRemote(cc, &addr, a, false); _err != nil {
log.Println(err)
}
}
return err
case flagFaults:
var faults []pkg.Fault
if err := cm.Do(func(cache *pkg.Cache) (err error) {
faults, err = cache.ReadFaults(t.Load(p))
return
}); err != nil {
return err
}
for _, fault := range faults {
log.Printf("%s: %s ago", fault.String(), time.Since(fault.Time()))
}
return nil
case flagPop:
var faults []pkg.Fault
if err := cm.Do(func(cache *pkg.Cache) (err error) {
faults, err = cache.ReadFaults(t.Load(p))
return
}); err != nil {
return err
}
if len(faults) == 0 {
return errors.New("no fault entries found")
}
fault := faults[len(faults)-1]
r, err := fault.Open()
if err != nil {
return err
}
if _, err = io.Copy(os.Stdout, r); err != nil {
_ = r.Close()
return err
}
fmt.Println()
if err = r.Close(); err != nil {
return err
}
log.Printf("faulting cure terminated %s ago", time.Since(fault.Time()))
return fault.Destroy()
} }
}, },
). ).Flag(
Flag( &flagDump,
&flagDump, "dump", command.StringFlag(""),
"dump", command.StringFlag(""), "Write IR to specified pathname and terminate",
"Write IR to specified pathname and terminate", ).Flag(
). &flagExport,
Flag( "export", command.StringFlag(""),
&flagExport, "Export cured artifact to specified pathname",
"export", command.StringFlag(""), ).Flag(
"Export cured artifact to specified pathname", &flagEnter,
). "enter", command.BoolFlag(false),
Flag( "Enter cure container with an interactive shell",
&flagEnter, ).Flag(
"enter", command.BoolFlag(false), &flagRemote,
"Enter cure container with an interactive shell", "daemon", command.BoolFlag(false),
) "Cure artifact on the daemon",
).Flag(
&flagNoReply,
"no-reply", command.BoolFlag(false),
"Do not receive a reply from the daemon",
).Flag(
&flagBoot,
"boot", command.BoolFlag(false),
"Build on the stage0 toolchain",
).Flag(
&flagStd,
"std", command.BoolFlag(false),
"Build on the intermediate toolchain",
).Flag(
&flagFaults,
"faults", command.BoolFlag(false),
"Display fault entries of the specified artifact",
).Flag(
&flagPop,
"pop", command.BoolFlag(false),
"Display and destroy the most recent fault entry",
)
} }
c.NewCommand(
"clear",
"Remove all fault entries from the cache",
func([]string) error {
return cm.Do(func(*pkg.Cache) error {
pathname := filepath.Join(cm.base, "fault")
dents, err := os.ReadDir(pathname)
if err != nil {
return err
}
for _, dent := range dents {
msg.Verbosef("destroying entry %s", dent.Name())
if err = os.Remove(filepath.Join(pathname, dent.Name())); err != nil {
return err
}
}
log.Printf("destroyed %d fault entries", len(dents))
return nil
})
},
)
c.NewCommand(
"abort",
"Abort all pending cures on the daemon",
func([]string) error { return abortRemote(ctx, &addr, false) },
)
{ {
var ( var (
flagNet bool flagNet bool
@@ -551,7 +740,7 @@ func main() {
"shell", "shell",
"Interactive shell in the specified Rosa OS environment", "Interactive shell in the specified Rosa OS environment",
func(args []string) error { func(args []string) error {
presets := make([]rosa.PArtifact, len(args)) presets := make([]rosa.PArtifact, len(args)+3)
for i, arg := range args { for i, arg := range args {
p, ok := rosa.ResolveName(arg) p, ok := rosa.ResolveName(arg)
if !ok { if !ok {
@@ -559,21 +748,24 @@ func main() {
} }
presets[i] = p presets[i] = p
} }
base := rosa.LLVM
if !flagWithToolchain {
base = rosa.Musl
}
presets = append(presets,
base,
rosa.Mksh,
rosa.Toybox,
)
root := make(pkg.Collect, 0, 6+len(args)) root := make(pkg.Collect, 0, 6+len(args))
root = rosa.Std.AppendPresets(root, presets...) root = rosa.Std.AppendPresets(root, presets...)
if flagWithToolchain { if err := cm.Do(func(cache *pkg.Cache) error {
musl, compilerRT, runtimes, clang := (rosa.Std - 1).NewLLVM() _, _, err := cache.Cure(&root)
root = append(root, musl, compilerRT, runtimes, clang) return err
} else { }); err == nil {
root = append(root, rosa.Std.Load(rosa.Musl))
}
root = append(root,
rosa.Std.Load(rosa.Mksh),
rosa.Std.Load(rosa.Toybox),
)
if _, _, err := cache.Cure(&root); err == nil {
return errors.New("unreachable") return errors.New("unreachable")
} else if !pkg.IsCollected(err) { } else if !pkg.IsCollected(err) {
return err return err
@@ -585,11 +777,22 @@ func main() {
} }
cured := make(map[pkg.Artifact]cureRes) cured := make(map[pkg.Artifact]cureRes)
for _, a := range root { for _, a := range root {
pathname, checksum, err := cache.Cure(a) if err := cm.Do(func(cache *pkg.Cache) error {
if err != nil { pathname, checksum, err := cache.Cure(a)
if err == nil {
cured[a] = cureRes{pathname, checksum}
}
return err
}); err != nil {
return err
}
}
// explicitly open for direct error-free use from this point
if cm.c == nil {
if err := cm.open(); err != nil {
return err return err
} }
cured[a] = cureRes{pathname, checksum}
} }
layers := pkg.PromoteLayers(root, func(a pkg.Artifact) ( layers := pkg.PromoteLayers(root, func(a pkg.Artifact) (
@@ -599,7 +802,7 @@ func main() {
res := cured[a] res := cured[a]
return res.pathname, res.checksum return res.pathname, res.checksum
}, func(i int, d pkg.Artifact) { }, func(i int, d pkg.Artifact) {
r := pkg.Encode(cache.Ident(d).Value()) r := pkg.Encode(cm.c.Ident(d).Value())
if s, ok := d.(fmt.Stringer); ok { if s, ok := d.(fmt.Stringer); ok {
if name := s.String(); name != "" { if name := s.String(); name != "" {
r += "-" + name r += "-" + name
@@ -618,6 +821,7 @@ func main() {
z.Hostname = "localhost" z.Hostname = "localhost"
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1 z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
z.Quiet = !cm.verboseInit
if s, ok := os.LookupEnv("TERM"); ok { if s, ok := os.LookupEnv("TERM"); ok {
z.Env = append(z.Env, "TERM="+s) z.Env = append(z.Env, "TERM="+s)
} }
@@ -663,22 +867,19 @@ func main() {
} }
return z.Wait() return z.Wait()
}, },
). ).Flag(
Flag( &flagNet,
&flagNet, "net", command.BoolFlag(false),
"net", command.BoolFlag(false), "Share host net namespace",
"Share host net namespace", ).Flag(
). &flagSession,
Flag( "session", command.BoolFlag(true),
&flagSession, "Retain session",
"session", command.BoolFlag(true), ).Flag(
"Retain session", &flagWithToolchain,
). "with-toolchain", command.BoolFlag(false),
Flag( "Include the stage2 LLVM toolchain",
&flagWithToolchain, )
"with-toolchain", command.BoolFlag(false),
"Include the stage2 LLVM toolchain",
)
} }
@@ -689,9 +890,7 @@ func main() {
) )
c.MustParse(os.Args[1:], func(err error) { c.MustParse(os.Args[1:], func(err error) {
if cache != nil { cm.Close()
cache.Close()
}
if w, ok := err.(interface{ Unwrap() []error }); !ok { if w, ok := err.(interface{ Unwrap() []error }); !ok {
log.Fatal(err) log.Fatal(err)
} else { } else {

47
cmd/mbf/main_test.go Normal file
View File

@@ -0,0 +1,47 @@
package main
import (
"net"
"os"
"testing"
"hakurei.app/internal/rosa"
)
func TestMain(m *testing.M) {
rosa.DropCaches("", rosa.OptLLVMNoLTO)
os.Exit(m.Run())
}
func TestCureAll(t *testing.T) {
t.Parallel()
const env = "ROSA_TEST_DAEMON"
if !testing.Verbose() {
t.Skip("verbose flag not set")
}
pathname, ok := os.LookupEnv(env)
if !ok {
t.Skip(env + " not set")
}
addr := net.UnixAddr{Net: "unix", Name: pathname}
t.Cleanup(func() {
if t.Failed() {
if err := abortRemote(t.Context(), &addr, false); err != nil {
t.Fatal(err)
}
}
})
for i := range rosa.PresetEnd {
p := rosa.PArtifact(i)
t.Run(rosa.GetMetadata(p).Name, func(t *testing.T) {
_, err := cureRemote(t.Context(), &addr, rosa.Std.Load(p), 0)
if err != nil {
t.Error(err)
}
})
}
}

View File

@@ -20,11 +20,14 @@
}; };
virtualisation = { virtualisation = {
# Hopefully reduces spurious test failures:
memorySize = if pkgs.stdenv.hostPlatform.is32bit then 2046 else 8192;
diskSize = 6 * 1024; diskSize = 6 * 1024;
qemu.options = [ qemu.options = [
# Increase test performance: # Increase test performance:
"-smp 8" "-smp 16"
]; ];
}; };

View File

@@ -28,7 +28,7 @@ testers.nixosTest {
# For go tests: # For go tests:
(pkgs.writeShellScriptBin "sharefs-workload-hakurei-tests" '' (pkgs.writeShellScriptBin "sharefs-workload-hakurei-tests" ''
cp -r "${self.packages.${system}.hakurei.src}" "/sdcard/hakurei" && cd "/sdcard/hakurei" cp -r "${self.packages.${system}.hakurei.src}" "/sdcard/hakurei" && cd "/sdcard/hakurei"
${fhs}/bin/hakurei-fhs -c 'CC="clang -O3 -Werror" go test ./...' ${fhs}/bin/hakurei-fhs -c 'ROSA_SKIP_BINFMT=1 CC="clang -O3 -Werror" go test ./...'
'') '')
]; ];

46
container/binfmt.go Normal file
View File

@@ -0,0 +1,46 @@
package container
import (
"strings"
"unsafe"
"hakurei.app/check"
)
// escapeBinfmt escapes magic/mask sequences in a [BinfmtEntry].
func escapeBinfmt(buf *strings.Builder, s string) string {
const lowerhex = "0123456789abcdef"
buf.Reset()
for _, c := range unsafe.Slice(unsafe.StringData(s), len(s)) {
switch c {
case 0, '\\', ':':
buf.WriteString(`\x`)
buf.WriteByte(lowerhex[c>>4])
buf.WriteByte(lowerhex[c&0xf])
default:
buf.WriteByte(c)
}
}
return buf.String()
}
// BinfmtEntry is an entry to be registered by the init process.
type BinfmtEntry struct {
// The offset of the magic/mask in the file, counted in bytes.
Offset byte
// The byte sequence binfmt_misc is matching for.
Magic string
// An (optional, defaults to all 0xff) mask.
Mask string
// The program that should be invoked with the binary as first argument.
Interpreter *check.Absolute
}
// Valid returns whether e can be registered into the kernel.
func (e *BinfmtEntry) Valid() bool {
return e != nil &&
int(e.Offset)+max(len(e.Magic), len(e.Mask)) < 128 &&
e.Interpreter != nil && len(e.Interpreter.String()) < 128
}

62
container/binfmt_test.go Normal file
View File

@@ -0,0 +1,62 @@
package container
import (
"strings"
"testing"
"hakurei.app/fhs"
)
func TestEscapeBinfmt(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
magic string
want string
}{
{"packed DOS applications", "\x0eDEX", "\x0eDEX"},
{"riscv64 magic",
"\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xf3\x00",
"\x7fELF\x02\x01\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\x02\\x00\xf3\\x00"},
{"riscv64 mask",
"\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff",
"\xff\xff\xff\xff\xff\xff\xff\\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff"},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
got := escapeBinfmt(new(strings.Builder), tc.magic)
if got != tc.want {
t.Errorf("escapeBinfmt: %q, want %q", got, tc.want)
}
})
}
}
func TestBinfmtEntry(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
e BinfmtEntry
valid bool
}{
{"zero", BinfmtEntry{}, false},
{"large offset", BinfmtEntry{Offset: 128}, false},
{"long magic", BinfmtEntry{Magic: strings.Repeat("\x00", 128)}, false},
{"long mask", BinfmtEntry{Mask: strings.Repeat("\x00", 128)}, false},
{"valid", BinfmtEntry{Interpreter: fhs.AbsRoot}, true},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
if tc.e.Valid() != tc.valid {
t.Errorf("Valid: %v", !tc.valid)
}
})
}
}

View File

@@ -18,6 +18,7 @@ const (
CAP_SETPCAP = 0x8 CAP_SETPCAP = 0x8
CAP_NET_ADMIN = 0xc CAP_NET_ADMIN = 0xc
CAP_DAC_OVERRIDE = 0x1 CAP_DAC_OVERRIDE = 0x1
CAP_SETFCAP = 0x1f
) )
type ( type (

View File

@@ -67,6 +67,9 @@ type (
// Copied to the underlying [exec.Cmd]. // Copied to the underlying [exec.Cmd].
WaitDelay time.Duration WaitDelay time.Duration
// Suppress verbose output of init.
Quiet bool
cmd *exec.Cmd cmd *exec.Cmd
ctx context.Context ctx context.Context
msg message.Msg msg message.Msg
@@ -88,12 +91,20 @@ type (
// Time to wait for processes lingering after the initial process terminates. // Time to wait for processes lingering after the initial process terminates.
AdoptWaitDelay time.Duration AdoptWaitDelay time.Duration
// Map uid/gid 0 in the init process. Requires [FstypeProc] attached to
// [fhs.Proc] in the container filesystem.
InitAsRoot bool
// Mapped Uid in user namespace. // Mapped Uid in user namespace.
Uid int Uid int
// Mapped Gid in user namespace. // Mapped Gid in user namespace.
Gid int Gid int
// Hostname value in UTS namespace. // Hostname value in UTS namespace.
Hostname string Hostname string
// Register binfmt_misc entries.
Binfmt []BinfmtEntry
// Alternative pathname to attach binfmt_misc filesystem. The zero value
// requires [FstypeProc] to be made available at [fhs.Proc].
BinfmtPath *check.Absolute
// Sequential container setup ops. // Sequential container setup ops.
*Ops *Ops
@@ -213,6 +224,9 @@ func (p *Container) Start() error {
if p.cmd.Process != nil { if p.cmd.Process != nil {
return errors.New("container: already started") return errors.New("container: already started")
} }
if !p.InitAsRoot && len(p.Binfmt) > 0 {
return errors.New("container: init as root required, but not enabled")
}
if err := ensureCloseOnExec(); err != nil { if err := ensureCloseOnExec(); err != nil {
return err return err
@@ -283,6 +297,18 @@ func (p *Container) Start() error {
if !p.HostNet { if !p.HostNet {
p.cmd.SysProcAttr.Cloneflags |= CLONE_NEWNET p.cmd.SysProcAttr.Cloneflags |= CLONE_NEWNET
} }
if p.InitAsRoot {
p.cmd.SysProcAttr.AmbientCaps = append(p.cmd.SysProcAttr.AmbientCaps,
// mappings during init as root
CAP_SETFCAP,
)
if !p.SeccompDisable &&
len(p.SeccompRules) == 0 &&
p.SeccompPresets&std.PresetDenyNS != 0 {
return errors.New("container: as root requires late namespace creation")
}
}
// place setup pipe before user supplied extra files, this is later restored by init // place setup pipe before user supplied extra files, this is later restored by init
if r, w, err := os.Pipe(); err != nil { if r, w, err := os.Pipe(); err != nil {
@@ -342,8 +368,6 @@ func (p *Container) Start() error {
Err: ENOSYS, Err: ENOSYS,
Origin: true, Origin: true,
} }
} else {
p.msg.Verbosef("landlock abi version %d", abi)
} }
if rulesetFd, err := rulesetAttr.Create(0); err != nil { if rulesetFd, err := rulesetAttr.Create(0); err != nil {
@@ -353,7 +377,6 @@ func (p *Container) Start() error {
Err: err, Err: err,
} }
} else { } else {
p.msg.Verbosef("enforcing landlock ruleset %s", rulesetAttr)
if err = landlock.RestrictSelf(rulesetFd, 0); err != nil { if err = landlock.RestrictSelf(rulesetFd, 0); err != nil {
_ = Close(rulesetFd) _ = Close(rulesetFd)
return &StartError{ return &StartError{
@@ -410,7 +433,6 @@ func (p *Container) Start() error {
} }
} }
p.msg.Verbose("starting container init")
if err := p.cmd.Start(); err != nil { if err := p.cmd.Start(); err != nil {
return &StartError{ return &StartError{
Step: "start container init", Step: "start container init",
@@ -481,7 +503,6 @@ func (p *Container) Serve() (err error) {
} }
case <-done: case <-done:
p.msg.Verbose("setup payload took", time.Since(t))
return return
} }
}(p.setup[1]) }(p.setup[1])
@@ -491,7 +512,7 @@ func (p *Container) Serve() (err error) {
Getuid(), Getuid(),
Getgid(), Getgid(),
len(p.ExtraFiles), len(p.ExtraFiles),
p.msg.IsVerbose(), p.msg.IsVerbose() && !p.Quiet,
}) })
} }

View File

@@ -16,6 +16,8 @@ import (
"strings" "strings"
"syscall" "syscall"
"testing" "testing"
"time"
"unsafe"
"hakurei.app/check" "hakurei.app/check"
"hakurei.app/command" "hakurei.app/command"
@@ -233,6 +235,9 @@ func earlyMnt(mnt ...*vfs.MountInfoEntry) func(*testing.T, context.Context) []*v
return func(*testing.T, context.Context) []*vfs.MountInfoEntry { return mnt } return func(*testing.T, context.Context) []*vfs.MountInfoEntry { return mnt }
} }
//go:linkname toHost hakurei.app/container.toHost
func toHost(name string) string
var containerTestCases = []struct { var containerTestCases = []struct {
name string name string
filter bool filter bool
@@ -332,13 +337,15 @@ var containerTestCases = []struct {
func(t *testing.T, ctx context.Context) []*vfs.MountInfoEntry { func(t *testing.T, ctx context.Context) []*vfs.MountInfoEntry {
return []*vfs.MountInfoEntry{ return []*vfs.MountInfoEntry{
ent("/", hst.PrivateTmp, "rw", "overlay", "overlay", ent("/", hst.PrivateTmp, "rw", "overlay", "overlay",
"rw,lowerdir="+ "rw"+
container.InternalToHostOvlEscape(ctx.Value(testVal("lower0")).(*check.Absolute).String())+":"+ ",lowerdir+="+
container.InternalToHostOvlEscape(ctx.Value(testVal("lower1")).(*check.Absolute).String())+ toHost(ctx.Value(testVal("lower0")).(*check.Absolute).String())+
",lowerdir+="+
toHost(ctx.Value(testVal("lower1")).(*check.Absolute).String())+
",upperdir="+ ",upperdir="+
container.InternalToHostOvlEscape(ctx.Value(testVal("upper")).(*check.Absolute).String())+ toHost(ctx.Value(testVal("upper")).(*check.Absolute).String())+
",workdir="+ ",workdir="+
container.InternalToHostOvlEscape(ctx.Value(testVal("work")).(*check.Absolute).String())+ toHost(ctx.Value(testVal("work")).(*check.Absolute).String())+
",redirect_dir=nofollow,uuid=on,userxattr"), ",redirect_dir=nofollow,uuid=on,userxattr"),
} }
}, },
@@ -388,9 +395,11 @@ var containerTestCases = []struct {
func(t *testing.T, ctx context.Context) []*vfs.MountInfoEntry { func(t *testing.T, ctx context.Context) []*vfs.MountInfoEntry {
return []*vfs.MountInfoEntry{ return []*vfs.MountInfoEntry{
ent("/", hst.PrivateTmp, "rw", "overlay", "overlay", ent("/", hst.PrivateTmp, "rw", "overlay", "overlay",
"ro,lowerdir="+ "ro"+
container.InternalToHostOvlEscape(ctx.Value(testVal("lower0")).(*check.Absolute).String())+":"+ ",lowerdir+="+
container.InternalToHostOvlEscape(ctx.Value(testVal("lower1")).(*check.Absolute).String())+ toHost(ctx.Value(testVal("lower0")).(*check.Absolute).String())+
",lowerdir+="+
toHost(ctx.Value(testVal("lower1")).(*check.Absolute).String())+
",redirect_dir=nofollow,userxattr"), ",redirect_dir=nofollow,userxattr"),
} }
}, },
@@ -400,39 +409,11 @@ var containerTestCases = []struct {
func TestContainer(t *testing.T) { func TestContainer(t *testing.T) {
t.Parallel() t.Parallel()
t.Run("cancel", testContainerCancel(nil, func(t *testing.T, c *container.Container) { var suffix string
wantErr := context.Canceled runTests:
wantExitCode := 0
if err := c.Wait(); !reflect.DeepEqual(err, wantErr) {
if m, ok := container.InternalMessageFromError(err); ok {
t.Error(m)
}
t.Errorf("Wait: error = %#v, want %#v", err, wantErr)
}
if ps := c.ProcessState(); ps == nil {
t.Errorf("ProcessState unexpectedly returned nil")
} else if code := ps.ExitCode(); code != wantExitCode {
t.Errorf("ExitCode: %d, want %d", code, wantExitCode)
}
}))
t.Run("forward", testContainerCancel(func(c *container.Container) {
c.ForwardCancel = true
}, func(t *testing.T, c *container.Container) {
var exitError *exec.ExitError
if err := c.Wait(); !errors.As(err, &exitError) {
if m, ok := container.InternalMessageFromError(err); ok {
t.Error(m)
}
t.Errorf("Wait: error = %v", err)
}
if code := exitError.ExitCode(); code != blockExitCodeInterrupt {
t.Errorf("ExitCode: %d, want %d", code, blockExitCodeInterrupt)
}
}))
for i, tc := range containerTestCases { for i, tc := range containerTestCases {
t.Run(tc.name, func(t *testing.T) { _suffix := suffix
t.Run(tc.name+_suffix, func(t *testing.T) {
t.Parallel() t.Parallel()
wantOps, wantOpsCtx := tc.ops(t) wantOps, wantOpsCtx := tc.ops(t)
@@ -456,6 +437,8 @@ func TestContainer(t *testing.T) {
c.SeccompDisable = !tc.filter c.SeccompDisable = !tc.filter
c.RetainSession = tc.session c.RetainSession = tc.session
c.HostNet = tc.net c.HostNet = tc.net
c.InitAsRoot = _suffix != ""
c.Env = append(c.Env, "HAKUREI_TEST_SUFFIX="+_suffix)
if info.CanDegrade { if info.CanDegrade {
if _, err := landlock.GetABI(); err != nil { if _, err := landlock.GetABI(); err != nil {
if !errors.Is(err, syscall.ENOSYS) { if !errors.Is(err, syscall.ENOSYS) {
@@ -465,6 +448,9 @@ func TestContainer(t *testing.T) {
t.Log("Landlock LSM is unavailable, enabling HostAbstract") t.Log("Landlock LSM is unavailable, enabling HostAbstract")
} }
} }
if c.InitAsRoot {
c.SeccompPresets &= ^std.PresetDenyNS
}
c. c.
Readonly(check.MustAbs(pathReadonly), 0755). Readonly(check.MustAbs(pathReadonly), 0755).
@@ -533,6 +519,11 @@ func TestContainer(t *testing.T) {
} }
}) })
} }
if suffix == "" {
suffix = " as root"
goto runTests
}
} }
func ent(root, target, vfsOptstr, fsType, source, fsOptstr string) *vfs.MountInfoEntry { func ent(root, target, vfsOptstr, fsType, source, fsOptstr string) *vfs.MountInfoEntry {
@@ -555,49 +546,118 @@ func hostnameFromTestCase(name string) string {
} }
func testContainerCancel( func testContainerCancel(
t *testing.T,
containerExtra func(c *container.Container), containerExtra func(c *container.Container),
waitCheck func(t *testing.T, c *container.Container), waitCheck func(ps *os.ProcessState, waitErr error),
) func(t *testing.T) { ) {
return func(t *testing.T) { ctx, cancel := context.WithCancel(t.Context())
t.Parallel()
ctx, cancel := context.WithCancel(t.Context())
c := helperNewContainer(ctx, "block") c := helperNewContainer(ctx, "block")
c.Stdout, c.Stderr = os.Stdout, os.Stderr c.Stdout, c.Stderr = os.Stdout, os.Stderr
if containerExtra != nil { if containerExtra != nil {
containerExtra(c) containerExtra(c)
}
ready := make(chan struct{})
if r, w, err := os.Pipe(); err != nil {
t.Fatalf("cannot pipe: %v", err)
} else {
c.ExtraFiles = append(c.ExtraFiles, w)
go func() {
defer close(ready)
if _, err = r.Read(make([]byte, 1)); err != nil {
panic(err.Error())
}
}()
}
if err := c.Start(); err != nil {
if m, ok := container.InternalMessageFromError(err); ok {
t.Fatal(m)
} else {
t.Fatalf("cannot start container: %v", err)
}
} else if err = c.Serve(); err != nil {
if m, ok := container.InternalMessageFromError(err); ok {
t.Error(m)
} else {
t.Errorf("cannot serve setup params: %v", err)
}
}
<-ready
cancel()
waitCheck(t, c)
} }
ready := make(chan struct{})
var waitErr error
r, w, err := os.Pipe()
if err != nil {
t.Fatalf("cannot pipe: %v", err)
}
c.ExtraFiles = append(c.ExtraFiles, w)
go func() {
defer close(ready)
if _, _err := r.Read(make([]byte, 1)); _err != nil {
panic(_err)
}
}()
if err = c.Start(); err != nil {
if m, ok := container.InternalMessageFromError(err); ok {
t.Fatal(m)
} else {
t.Fatalf("cannot start container: %v", err)
}
}
done := make(chan struct{})
go func() {
defer close(done)
waitErr = c.Wait()
_ = r.SetReadDeadline(time.Now())
}()
if err = c.Serve(); err != nil {
if m, ok := container.InternalMessageFromError(err); ok {
t.Error(m)
} else {
t.Errorf("cannot serve setup params: %v", err)
}
}
<-ready
cancel()
<-done
waitCheck(c.ProcessState(), waitErr)
}
func TestForward(t *testing.T) {
t.Parallel()
f := func(ps *os.ProcessState, waitErr error) {
var exitError *exec.ExitError
if !errors.As(waitErr, &exitError) {
if m, ok := container.InternalMessageFromError(waitErr); ok {
t.Error(m)
}
t.Errorf("Wait: error = %v", waitErr)
}
if code := exitError.ExitCode(); code != blockExitCodeInterrupt {
t.Errorf("ExitCode: %d, want %d", code, blockExitCodeInterrupt)
}
}
t.Run("direct", func(t *testing.T) {
t.Parallel()
testContainerCancel(t, func(c *container.Container) {
c.ForwardCancel = true
}, f)
})
t.Run("as root", func(t *testing.T) {
testContainerCancel(t, func(c *container.Container) {
c.ForwardCancel = true
c.InitAsRoot = true
c.Proc(fhs.AbsProc)
}, f)
})
}
func TestCancel(t *testing.T) {
t.Parallel()
f := func(ps *os.ProcessState, waitErr error) {
wantErr := context.Canceled
if !reflect.DeepEqual(waitErr, wantErr) {
if m, ok := container.InternalMessageFromError(waitErr); ok {
t.Error(m)
}
t.Errorf("Wait: error = %#v, want %#v", waitErr, wantErr)
}
if ps == nil {
t.Errorf("ProcessState unexpectedly returned nil")
} else if code := ps.ExitCode(); code != 0 {
t.Errorf("ExitCode: %d, want %d", code, 0)
}
}
t.Run("direct", func(t *testing.T) {
t.Parallel()
testContainerCancel(t, nil, f)
})
t.Run("as root", func(t *testing.T) {
testContainerCancel(t, func(c *container.Container) {
c.InitAsRoot = true
c.Proc(fhs.AbsProc)
}, f)
})
} }
func TestContainerString(t *testing.T) { func TestContainerString(t *testing.T) {
@@ -633,6 +693,8 @@ func init() {
}) })
c.Command("container", command.UsageInternal, func(args []string) error { c.Command("container", command.UsageInternal, func(args []string) error {
asRoot := os.Getenv("HAKUREI_TEST_SUFFIX") == " as root"
if len(args) != 1 { if len(args) != 1 {
return syscall.EINVAL return syscall.EINVAL
} }
@@ -650,6 +712,66 @@ func init() {
return fmt.Errorf("gid: %d, want %d", gid, tc.gid) return fmt.Errorf("gid: %d, want %d", gid, tc.gid)
} }
// no attack surface increase during as root due to no_new_privs
var wantBounding uintptr = 1
asRootNot := " not"
if !asRoot {
wantBounding = 0
asRootNot = ""
}
const (
PR_CAP_AMBIENT = 0x2f
PR_CAP_AMBIENT_IS_SET = 0x1
)
for i := range container.LastCap(nil) + 1 {
r, _, errno := syscall.Syscall(
syscall.SYS_PRCTL,
PR_CAP_AMBIENT,
PR_CAP_AMBIENT_IS_SET,
i,
)
if errno != 0 {
return os.NewSyscallError("prctl", errno)
}
if r != 0 {
return fmt.Errorf("capability %d in ambient set", i)
}
r, _, errno = syscall.Syscall(
syscall.SYS_PRCTL,
syscall.PR_CAPBSET_READ,
i,
0,
)
if errno != 0 {
return os.NewSyscallError("prctl", errno)
}
if r != wantBounding {
return fmt.Errorf("capability %d%s in bounding set", i, asRootNot)
}
}
const _LINUX_CAPABILITY_VERSION_3 = 0x20080522
var capData struct {
effective uint32
permitted uint32
inheritable uint32
}
if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&struct {
version uint32
pid int32
}{_LINUX_CAPABILITY_VERSION_3, 0})), uintptr(unsafe.Pointer(&capData)), 0); errno != 0 {
return os.NewSyscallError("capget", errno)
}
if max(capData.effective, capData.permitted, capData.inheritable) != 0 {
return fmt.Errorf(
"effective = %d, permitted = %d, inheritable = %d",
capData.effective, capData.permitted, capData.inheritable,
)
}
wantHost := hostnameFromTestCase(tc.name) wantHost := hostnameFromTestCase(tc.name)
if host, err := os.Hostname(); err != nil { if host, err := os.Hostname(); err != nil {
return fmt.Errorf("cannot get hostname: %v", err) return fmt.Errorf("cannot get hostname: %v", err)
@@ -767,7 +889,7 @@ func TestMain(m *testing.M) {
} }
c.MustParse(os.Args[1:], func(err error) { c.MustParse(os.Args[1:], func(err error) {
if err != nil { if err != nil {
log.Fatal(err.Error()) log.Fatal(err)
} }
}) })
return return

View File

@@ -65,6 +65,8 @@ type syscallDispatcher interface {
remount(msg message.Msg, target string, flags uintptr) error remount(msg message.Msg, target string, flags uintptr) error
// mountTmpfs provides mountTmpfs. // mountTmpfs provides mountTmpfs.
mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error
// mountOverlay provides mountOverlay.
mountOverlay(target string, options [][2]string) error
// ensureFile provides ensureFile. // ensureFile provides ensureFile.
ensureFile(name string, perm, pperm os.FileMode) error ensureFile(name string, perm, pperm os.FileMode) error
// mustLoopback provides mustLoopback. // mustLoopback provides mustLoopback.
@@ -169,6 +171,9 @@ func (direct) remount(msg message.Msg, target string, flags uintptr) error {
func (k direct) mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error { func (k direct) mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error {
return mountTmpfs(k, fsname, target, flags, size, perm) return mountTmpfs(k, fsname, target, flags, size, perm)
} }
func (k direct) mountOverlay(target string, options [][2]string) error {
return mountOverlay(target, options)
}
func (direct) ensureFile(name string, perm, pperm os.FileMode) error { func (direct) ensureFile(name string, perm, pperm os.FileMode) error {
return ensureFile(name, perm, pperm) return ensureFile(name, perm, pperm)
} }

View File

@@ -468,6 +468,14 @@ func (k *kstub) mountTmpfs(fsname, target string, flags uintptr, size int, perm
stub.CheckArg(k.Stub, "perm", perm, 4)) stub.CheckArg(k.Stub, "perm", perm, 4))
} }
func (k *kstub) mountOverlay(target string, options [][2]string) error {
k.Helper()
return k.Expects("mountOverlay").Error(
stub.CheckArg(k.Stub, "target", target, 0),
stub.CheckArgReflect(k.Stub, "options", options, 1),
)
}
func (k *kstub) ensureFile(name string, perm, pperm os.FileMode) error { func (k *kstub) ensureFile(name string, perm, pperm os.FileMode) error {
k.Helper() k.Helper()
return k.Expects("ensureFile").Error( return k.Expects("ensureFile").Error(

View File

@@ -118,6 +118,10 @@ func errnoFallback(op, path string, err error) (syscall.Errno, *os.PathError) {
// mount wraps syscall.Mount for error handling. // mount wraps syscall.Mount for error handling.
func mount(source, target, fstype string, flags uintptr, data string) error { func mount(source, target, fstype string, flags uintptr, data string) error {
if max(len(source), len(target), len(data))+1 > os.Getpagesize() {
return &MountError{source, target, fstype, flags, data, syscall.ENOMEM}
}
err := syscall.Mount(source, target, fstype, flags, data) err := syscall.Mount(source, target, fstype, flags, data)
if err == nil { if err == nil {
return nil return nil

View File

@@ -11,11 +11,13 @@ import (
"path/filepath" "path/filepath"
"slices" "slices"
"strconv" "strconv"
"strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
. "syscall" . "syscall"
"time" "time"
"hakurei.app/check"
"hakurei.app/container/seccomp" "hakurei.app/container/seccomp"
"hakurei.app/ext" "hakurei.app/ext"
"hakurei.app/fhs" "hakurei.app/fhs"
@@ -182,23 +184,33 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
cancel() cancel()
} }
uid, gid := param.Uid, param.Gid
if param.InitAsRoot {
uid, gid = 0, 0
}
// write uid/gid map here so parent does not need to set dumpable // write uid/gid map here so parent does not need to set dumpable
if err := k.setDumpable(ext.SUID_DUMP_USER); err != nil { if err := k.setDumpable(ext.SUID_DUMP_USER); err != nil {
k.fatalf(msg, "cannot set SUID_DUMP_USER: %v", err) k.fatalf(msg, "cannot set SUID_DUMP_USER: %v", err)
} }
if err := k.writeFile(fhs.Proc+"self/uid_map", if err := k.writeFile(
append([]byte{}, strconv.Itoa(param.Uid)+" "+strconv.Itoa(param.HostUid)+" 1\n"...), fhs.Proc+"self/uid_map",
0); err != nil { []byte(strconv.Itoa(uid)+" "+strconv.Itoa(param.HostUid)+" 1\n"),
0,
); err != nil {
k.fatalf(msg, "%v", err) k.fatalf(msg, "%v", err)
} }
if err := k.writeFile(fhs.Proc+"self/setgroups", if err := k.writeFile(
fhs.Proc+"self/setgroups",
[]byte("deny\n"), []byte("deny\n"),
0); err != nil && !os.IsNotExist(err) { 0,
); err != nil && !os.IsNotExist(err) {
k.fatalf(msg, "%v", err) k.fatalf(msg, "%v", err)
} }
if err := k.writeFile(fhs.Proc+"self/gid_map", if err := k.writeFile(fhs.Proc+"self/gid_map",
append([]byte{}, strconv.Itoa(param.Gid)+" "+strconv.Itoa(param.HostGid)+" 1\n"...), []byte(strconv.Itoa(gid)+" "+strconv.Itoa(param.HostGid)+" 1\n"),
0); err != nil { 0,
); err != nil {
k.fatalf(msg, "%v", err) k.fatalf(msg, "%v", err)
} }
if err := k.setDumpable(ext.SUID_DUMP_DISABLE); err != nil { if err := k.setDumpable(ext.SUID_DUMP_DISABLE); err != nil {
@@ -223,6 +235,23 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
state := &setupState{process: make(map[int]WaitStatus), Params: &param.Params, Msg: msg, Context: ctx} state := &setupState{process: make(map[int]WaitStatus), Params: &param.Params, Msg: msg, Context: ctx}
defer cancel() defer cancel()
if err := k.mount(SourceTmpfsRootfs, intermediateHostPath, FstypeTmpfs, MS_NODEV|MS_NOSUID, zeroString); err != nil {
k.fatalf(msg, "cannot mount intermediate root: %v", optionalErrorUnwrap(err))
}
if err := k.chdir(intermediateHostPath); err != nil {
k.fatalf(msg, "cannot enter intermediate host path: %v", err)
}
if len(param.Binfmt) > 0 {
for i, e := range param.Binfmt {
if pathname, err := k.evalSymlinks(e.Interpreter.String()); err != nil {
k.fatal(msg, err)
} else if param.Binfmt[i].Interpreter, err = check.NewAbs(pathname); err != nil {
k.fatal(msg, err)
}
}
}
/* early is called right before pivot_root into intermediate root; /* early is called right before pivot_root into intermediate root;
this step is mostly for gathering information that would otherwise be this step is mostly for gathering information that would otherwise be
difficult to obtain via library functions after pivot_root, and difficult to obtain via library functions after pivot_root, and
@@ -242,13 +271,6 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
} }
} }
if err := k.mount(SourceTmpfsRootfs, intermediateHostPath, FstypeTmpfs, MS_NODEV|MS_NOSUID, zeroString); err != nil {
k.fatalf(msg, "cannot mount intermediate root: %v", optionalErrorUnwrap(err))
}
if err := k.chdir(intermediateHostPath); err != nil {
k.fatalf(msg, "cannot enter intermediate host path: %v", err)
}
if err := k.mkdir(sysrootDir, 0755); err != nil { if err := k.mkdir(sysrootDir, 0755); err != nil {
k.fatalf(msg, "%v", err) k.fatalf(msg, "%v", err)
} }
@@ -285,6 +307,48 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
} }
} }
if len(param.Binfmt) > 0 {
const interpreter = "/interpreter"
if param.BinfmtPath == nil {
param.BinfmtPath = fhs.AbsProcSys.Append("fs/binfmt_misc")
}
binfmt := sysrootPath + param.BinfmtPath.String()
if err := k.mkdirAll(binfmt, 0); err != nil {
k.fatal(msg, err)
}
if err := k.mount(
SourceBinfmtMisc,
binfmt,
FstypeBinfmtMisc,
MS_NOSUID|MS_NOEXEC|MS_NODEV,
zeroString,
); err != nil {
k.fatal(msg, err)
}
var buf strings.Builder
buf.Grow(1920)
register := binfmt + "/register"
for i, e := range param.Binfmt {
if err := k.symlink(hostPath+e.Interpreter.String(), interpreter); err != nil {
k.fatal(msg, err)
} else if err = k.writeFile(register, []byte(":"+
strconv.Itoa(i)+":"+
"M:"+
strconv.Itoa(int(e.Offset))+":"+
escapeBinfmt(&buf, e.Magic)+":"+
escapeBinfmt(&buf, e.Mask)+":"+
interpreter+":"+
"F"), 0); err != nil {
k.fatal(msg, err)
} else if err = k.remove(interpreter); err != nil {
k.fatal(msg, err)
}
}
}
// setup requiring host root complete at this point // setup requiring host root complete at this point
if err := k.mount(hostDir, hostDir, zeroString, MS_SILENT|MS_REC|MS_PRIVATE, zeroString); err != nil { if err := k.mount(hostDir, hostDir, zeroString, MS_SILENT|MS_REC|MS_PRIVATE, zeroString); err != nil {
k.fatalf(msg, "cannot make host root rprivate: %v", optionalErrorUnwrap(err)) k.fatalf(msg, "cannot make host root rprivate: %v", optionalErrorUnwrap(err))
@@ -323,11 +387,19 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
} }
} }
var keepCaps []uintptr
if param.Privileged {
keepCaps = append(keepCaps, CAP_SYS_ADMIN, CAP_SETPCAP)
}
if param.InitAsRoot {
keepCaps = append(keepCaps, CAP_SETFCAP)
}
if err := k.capAmbientClearAll(); err != nil { if err := k.capAmbientClearAll(); err != nil {
k.fatalf(msg, "cannot clear the ambient capability set: %v", err) k.fatalf(msg, "cannot clear the ambient capability set: %v", err)
} }
for i := uintptr(0); i <= lastcap; i++ { for i := range lastcap + 1 {
if param.Privileged && i == CAP_SYS_ADMIN { if slices.Contains(keepCaps, i) {
continue continue
} }
if err := k.capBoundingSetDrop(i); err != nil { if err := k.capBoundingSetDrop(i); err != nil {
@@ -336,20 +408,23 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
} }
var keep [2]uint32 var keep [2]uint32
if param.Privileged { for _, c := range keepCaps {
keep[capToIndex(CAP_SYS_ADMIN)] |= capToMask(CAP_SYS_ADMIN) keep[capToIndex(c)] |= capToMask(c)
if err := k.capAmbientRaise(CAP_SYS_ADMIN); err != nil {
k.fatalf(msg, "cannot raise CAP_SYS_ADMIN: %v", err)
}
} }
if err := k.capset( if err := k.capset(
&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &capHeader{_LINUX_CAPABILITY_VERSION_3, 0},
&[2]capData{{0, keep[0], keep[0]}, {0, keep[1], keep[1]}}, &[2]capData{{keep[0], keep[0], keep[0]}, {keep[1], keep[1], keep[1]}},
); err != nil { ); err != nil {
k.fatalf(msg, "cannot capset: %v", err) k.fatalf(msg, "cannot capset: %v", err)
} }
for _, c := range keepCaps {
if err := k.capAmbientRaise(c); err != nil {
k.fatalf(msg, "cannot raise %#x: %v", c, err)
}
}
if !param.SeccompDisable { if !param.SeccompDisable {
rules := param.SeccompRules rules := param.SeccompRules
if len(rules) == 0 { // non-empty rules slice always overrides presets if len(rules) == 0 { // non-empty rules slice always overrides presets
@@ -474,6 +549,14 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
cmd.ExtraFiles = extraFiles cmd.ExtraFiles = extraFiles
cmd.Dir = param.Dir.String() cmd.Dir = param.Dir.String()
if param.InitAsRoot {
cmd.SysProcAttr = &SysProcAttr{
Cloneflags: CLONE_NEWUSER,
UidMappings: []SysProcIDMap{{ContainerID: param.Uid, HostID: 0, Size: 1}},
GidMappings: []SysProcIDMap{{ContainerID: param.Gid, HostID: 0, Size: 1}},
}
}
msg.Verbosef("starting initial process %s", param.Path) msg.Verbosef("starting initial process %s", param.Path)
if err := k.start(cmd); err != nil { if err := k.start(cmd); err != nil {
k.fatalf(msg, "%v", err) k.fatalf(msg, "%v", err)

View File

@@ -332,6 +332,8 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("fatalf", stub.ExpectArgs{"invalid op at index %d", []any{0}}, nil, nil), call("fatalf", stub.ExpectArgs{"invalid op at index %d", []any{0}}, nil, nil),
/* end early */ /* end early */
@@ -370,6 +372,8 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("fatalf", stub.ExpectArgs{"invalid op at index %d", []any{0}}, nil, nil), call("fatalf", stub.ExpectArgs{"invalid op at index %d", []any{0}}, nil, nil),
/* end early */ /* end early */
@@ -408,6 +412,8 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", stub.UniqueError(61)), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", stub.UniqueError(61)),
call("fatalf", stub.ExpectArgs{"cannot prepare op at index %d: %v", []any{0, stub.UniqueError(61)}}, nil, nil), call("fatalf", stub.ExpectArgs{"cannot prepare op at index %d: %v", []any{0, stub.UniqueError(61)}}, nil, nil),
@@ -447,6 +453,8 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", &os.PathError{Op: "readlink", Path: "/", Err: stub.UniqueError(60)}), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", &os.PathError{Op: "readlink", Path: "/", Err: stub.UniqueError(60)}),
call("fatal", stub.ExpectArgs{[]any{"cannot readlink /: unique error 60 injected by the test suite"}}, nil, nil), call("fatal", stub.ExpectArgs{[]any{"cannot readlink /: unique error 60 injected by the test suite"}}, nil, nil),
@@ -486,9 +494,6 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, stub.UniqueError(58)), call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, stub.UniqueError(58)),
call("fatalf", stub.ExpectArgs{"cannot mount intermediate root: %v", []any{stub.UniqueError(58)}}, nil, nil), call("fatalf", stub.ExpectArgs{"cannot mount intermediate root: %v", []any{stub.UniqueError(58)}}, nil, nil),
}, },
@@ -526,9 +531,6 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil), call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, stub.UniqueError(56)), call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, stub.UniqueError(56)),
call("fatalf", stub.ExpectArgs{"cannot enter intermediate host path: %v", []any{stub.UniqueError(56)}}, nil, nil), call("fatalf", stub.ExpectArgs{"cannot enter intermediate host path: %v", []any{stub.UniqueError(56)}}, nil, nil),
@@ -567,11 +569,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, stub.UniqueError(54)), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, stub.UniqueError(54)),
call("fatalf", stub.ExpectArgs{"%v", []any{stub.UniqueError(54)}}, nil, nil), call("fatalf", stub.ExpectArgs{"%v", []any{stub.UniqueError(54)}}, nil, nil),
}, },
@@ -609,11 +611,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, stub.UniqueError(52)), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, stub.UniqueError(52)),
call("fatalf", stub.ExpectArgs{"cannot bind sysroot: %v", []any{stub.UniqueError(52)}}, nil, nil), call("fatalf", stub.ExpectArgs{"cannot bind sysroot: %v", []any{stub.UniqueError(52)}}, nil, nil),
@@ -652,11 +654,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, stub.UniqueError(50)), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, stub.UniqueError(50)),
@@ -696,11 +698,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -741,11 +743,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -787,11 +789,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -842,11 +844,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -897,11 +899,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -953,11 +955,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1010,11 +1012,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1069,11 +1071,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1129,11 +1131,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1190,11 +1192,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1252,11 +1254,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1315,11 +1317,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1379,11 +1381,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1444,11 +1446,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1510,11 +1512,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1584,11 +1586,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1622,7 +1624,6 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x8)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil),
@@ -1654,8 +1655,9 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0x200100, 0x200100, 0x200100}, {0, 0, 0}}}, nil, nil),
call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, stub.UniqueError(19)), call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, stub.UniqueError(19)),
call("fatalf", stub.ExpectArgs{"cannot raise CAP_SYS_ADMIN: %v", []any{stub.UniqueError(19)}}, nil, nil), call("fatalf", stub.ExpectArgs{"cannot raise %#x: %v", []any{uintptr(0x15), stub.UniqueError(19)}}, nil, nil),
}, },
}, nil}, }, nil},
@@ -1691,11 +1693,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1729,7 +1731,6 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x8)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil),
@@ -1761,8 +1762,7 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil),
call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, nil), call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0x200100, 0x200100, 0x200100}, {0, 0, 0}}}, nil, stub.UniqueError(17)),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0, 0x200000, 0x200000}, {0, 0, 0}}}, nil, stub.UniqueError(17)),
call("fatalf", stub.ExpectArgs{"cannot capset: %v", []any{stub.UniqueError(17)}}, nil, nil), call("fatalf", stub.ExpectArgs{"cannot capset: %v", []any{stub.UniqueError(17)}}, nil, nil),
}, },
}, nil}, }, nil},
@@ -1799,11 +1799,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1837,7 +1837,6 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x8)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil),
@@ -1869,8 +1868,9 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0x200100, 0x200100, 0x200100}, {0, 0, 0}}}, nil, nil),
call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, nil), call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0, 0x200000, 0x200000}, {0, 0, 0}}}, nil, nil), call("capAmbientRaise", stub.ExpectArgs{uintptr(0x8)}, nil, nil),
call("verbosef", stub.ExpectArgs{"resolving presets %#x", []any{std.FilterPreset(0xf)}}, nil, nil), call("verbosef", stub.ExpectArgs{"resolving presets %#x", []any{std.FilterPreset(0xf)}}, nil, nil),
call("seccompLoad", stub.ExpectArgs{seccomp.Preset(0xf, 0), seccomp.ExportFlag(0)}, nil, stub.UniqueError(15)), call("seccompLoad", stub.ExpectArgs{seccomp.Preset(0xf, 0), seccomp.ExportFlag(0)}, nil, stub.UniqueError(15)),
call("fatalf", stub.ExpectArgs{"cannot load syscall filter: %v", []any{stub.UniqueError(15)}}, nil, nil), call("fatalf", stub.ExpectArgs{"cannot load syscall filter: %v", []any{stub.UniqueError(15)}}, nil, nil),
@@ -1908,11 +1908,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2032,11 +2032,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(4), nil), call("lastcap", stub.ExpectArgs{}, uintptr(4), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2132,11 +2132,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(4), nil), call("lastcap", stub.ExpectArgs{}, uintptr(4), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2232,11 +2232,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(4), nil), call("lastcap", stub.ExpectArgs{}, uintptr(4), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2323,11 +2323,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(4), nil), call("lastcap", stub.ExpectArgs{}, uintptr(4), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2418,11 +2418,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(4), nil), call("lastcap", stub.ExpectArgs{}, uintptr(4), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2520,11 +2520,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2659,11 +2659,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2697,7 +2697,6 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x8)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil),
@@ -2729,8 +2728,9 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0x200100, 0x200100, 0x200100}, {0, 0, 0}}}, nil, nil),
call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, nil), call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0, 0x200000, 0x200000}, {0, 0, 0}}}, nil, nil), call("capAmbientRaise", stub.ExpectArgs{uintptr(0x8)}, nil, nil),
call("verbosef", stub.ExpectArgs{"resolving presets %#x", []any{std.FilterPreset(0xf)}}, nil, nil), call("verbosef", stub.ExpectArgs{"resolving presets %#x", []any{std.FilterPreset(0xf)}}, nil, nil),
call("seccompLoad", stub.ExpectArgs{seccomp.Preset(0xf, 0), seccomp.ExportFlag(0)}, nil, nil), call("seccompLoad", stub.ExpectArgs{seccomp.Preset(0xf, 0), seccomp.ExportFlag(0)}, nil, nil),
call("verbosef", stub.ExpectArgs{"%d filter rules loaded", []any{73}}, nil, nil), call("verbosef", stub.ExpectArgs{"%d filter rules loaded", []any{73}}, nil, nil),

View File

@@ -4,9 +4,9 @@ import (
"encoding/gob" "encoding/gob"
"fmt" "fmt"
"slices" "slices"
"strings"
"hakurei.app/check" "hakurei.app/check"
"hakurei.app/ext"
"hakurei.app/fhs" "hakurei.app/fhs"
) )
@@ -150,7 +150,7 @@ func (o *MountOverlayOp) early(_ *setupState, k syscallDispatcher) error {
if v, err := k.evalSymlinks(o.Upper.String()); err != nil { if v, err := k.evalSymlinks(o.Upper.String()); err != nil {
return err return err
} else { } else {
o.upper = check.EscapeOverlayDataSegment(toHost(v)) o.upper = toHost(v)
} }
} }
@@ -158,7 +158,7 @@ func (o *MountOverlayOp) early(_ *setupState, k syscallDispatcher) error {
if v, err := k.evalSymlinks(o.Work.String()); err != nil { if v, err := k.evalSymlinks(o.Work.String()); err != nil {
return err return err
} else { } else {
o.work = check.EscapeOverlayDataSegment(toHost(v)) o.work = toHost(v)
} }
} }
} }
@@ -168,12 +168,39 @@ func (o *MountOverlayOp) early(_ *setupState, k syscallDispatcher) error {
if v, err := k.evalSymlinks(a.String()); err != nil { if v, err := k.evalSymlinks(a.String()); err != nil {
return err return err
} else { } else {
o.lower[i] = check.EscapeOverlayDataSegment(toHost(v)) o.lower[i] = toHost(v)
} }
} }
return nil return nil
} }
// mountOverlay sets up an overlay mount via [ext.FS].
func mountOverlay(target string, options [][2]string) error {
fs, err := ext.OpenFS(SourceOverlay, 0)
if err != nil {
return err
}
if err = fs.SetString("source", SourceOverlay); err != nil {
_ = fs.Close()
return err
}
for _, option := range options {
if err = fs.SetString(option[0], option[1]); err != nil {
_ = fs.Close()
return err
}
}
if err = fs.SetFlag(OptionOverlayUserxattr); err != nil {
_ = fs.Close()
return err
}
if err = fs.Mount(target, 0); err != nil {
_ = fs.Close()
return err
}
return fs.Close()
}
func (o *MountOverlayOp) apply(state *setupState, k syscallDispatcher) error { func (o *MountOverlayOp) apply(state *setupState, k syscallDispatcher) error {
target := o.Target.String() target := o.Target.String()
if !o.noPrefix { if !o.noPrefix {
@@ -194,7 +221,7 @@ func (o *MountOverlayOp) apply(state *setupState, k syscallDispatcher) error {
} }
} }
options := make([]string, 0, 4) options := make([][2]string, 0, 2+len(o.lower))
if o.upper == zeroString && o.work == zeroString { // readonly if o.upper == zeroString && o.work == zeroString { // readonly
if len(o.Lower) < 2 { if len(o.Lower) < 2 {
@@ -205,15 +232,16 @@ func (o *MountOverlayOp) apply(state *setupState, k syscallDispatcher) error {
if len(o.Lower) == 0 { if len(o.Lower) == 0 {
return &OverlayArgumentError{OverlayEmptyLower, zeroString} return &OverlayArgumentError{OverlayEmptyLower, zeroString}
} }
options = append(options, options = append(options, [][2]string{
OptionOverlayUpperdir+"="+o.upper, {OptionOverlayUpperdir, o.upper},
OptionOverlayWorkdir+"="+o.work) {OptionOverlayWorkdir, o.work},
}...)
}
for _, lower := range o.lower {
options = append(options, [2]string{OptionOverlayLowerdir + "+", lower})
} }
options = append(options,
OptionOverlayLowerdir+"="+strings.Join(o.lower, check.SpecialOverlayPath),
OptionOverlayUserxattr)
return k.mount(SourceOverlay, target, FstypeOverlay, 0, strings.Join(options, check.SpecialOverlayOption)) return k.mountOverlay(target, options)
} }
func (o *MountOverlayOp) late(*setupState, syscallDispatcher) error { return nil } func (o *MountOverlayOp) late(*setupState, syscallDispatcher) error { return nil }

View File

@@ -97,13 +97,12 @@ func TestMountOverlayOp(t *testing.T) {
call("mkdirAll", stub.ExpectArgs{"/sysroot", os.FileMode(0705)}, nil, nil), call("mkdirAll", stub.ExpectArgs{"/sysroot", os.FileMode(0705)}, nil, nil),
call("mkdirTemp", stub.ExpectArgs{"/", "overlay.upper.*"}, "overlay.upper.32768", nil), call("mkdirTemp", stub.ExpectArgs{"/", "overlay.upper.*"}, "overlay.upper.32768", nil),
call("mkdirTemp", stub.ExpectArgs{"/", "overlay.work.*"}, "overlay.work.32768", nil), call("mkdirTemp", stub.ExpectArgs{"/", "overlay.work.*"}, "overlay.work.32768", nil),
call("mount", stub.ExpectArgs{"overlay", "/sysroot", "overlay", uintptr(0), "" + call("mountOverlay", stub.ExpectArgs{"/sysroot", [][2]string{
"upperdir=overlay.upper.32768," + {"upperdir", "overlay.upper.32768"},
"workdir=overlay.work.32768," + {"workdir", "overlay.work.32768"},
"lowerdir=" + {"lowerdir+", `/host/var/lib/planterette/base/debian:f92c9052`},
`/host/var/lib/planterette/base/debian\:f92c9052:` + {"lowerdir+", `/host/var/lib/planterette/app/org.chromium.Chromium@debian:f92c9052`},
`/host/var/lib/planterette/app/org.chromium.Chromium@debian\:f92c9052,` + }}, nil, nil),
"userxattr"}, nil, nil),
}, nil}, }, nil},
{"short lower ro", &Params{ParentPerm: 0755}, &MountOverlayOp{ {"short lower ro", &Params{ParentPerm: 0755}, &MountOverlayOp{
@@ -129,11 +128,10 @@ func TestMountOverlayOp(t *testing.T) {
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store0"}, "/mnt-root/nix/.ro-store0", nil), call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store0"}, "/mnt-root/nix/.ro-store0", nil),
}, nil, []stub.Call{ }, nil, []stub.Call{
call("mkdirAll", stub.ExpectArgs{"/nix/store", os.FileMode(0755)}, nil, nil), call("mkdirAll", stub.ExpectArgs{"/nix/store", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"overlay", "/nix/store", "overlay", uintptr(0), "" + call("mountOverlay", stub.ExpectArgs{"/nix/store", [][2]string{
"lowerdir=" + {"lowerdir+", "/host/mnt-root/nix/.ro-store"},
"/host/mnt-root/nix/.ro-store:" + {"lowerdir+", "/host/mnt-root/nix/.ro-store0"},
"/host/mnt-root/nix/.ro-store0," + }}, nil, nil),
"userxattr"}, nil, nil),
}, nil}, }, nil},
{"success ro", &Params{ParentPerm: 0755}, &MountOverlayOp{ {"success ro", &Params{ParentPerm: 0755}, &MountOverlayOp{
@@ -147,11 +145,10 @@ func TestMountOverlayOp(t *testing.T) {
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store0"}, "/mnt-root/nix/.ro-store0", nil), call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store0"}, "/mnt-root/nix/.ro-store0", nil),
}, nil, []stub.Call{ }, nil, []stub.Call{
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0755)}, nil, nil), call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"overlay", "/sysroot/nix/store", "overlay", uintptr(0), "" + call("mountOverlay", stub.ExpectArgs{"/sysroot/nix/store", [][2]string{
"lowerdir=" + {"lowerdir+", "/host/mnt-root/nix/.ro-store"},
"/host/mnt-root/nix/.ro-store:" + {"lowerdir+", "/host/mnt-root/nix/.ro-store0"},
"/host/mnt-root/nix/.ro-store0," + }}, nil, nil),
"userxattr"}, nil, nil),
}, nil}, }, nil},
{"nil lower", &Params{ParentPerm: 0700}, &MountOverlayOp{ {"nil lower", &Params{ParentPerm: 0700}, &MountOverlayOp{
@@ -219,7 +216,11 @@ func TestMountOverlayOp(t *testing.T) {
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store"}, "/mnt-root/nix/ro-store", nil), call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store"}, "/mnt-root/nix/ro-store", nil),
}, nil, []stub.Call{ }, nil, []stub.Call{
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil), call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil),
call("mount", stub.ExpectArgs{"overlay", "/sysroot/nix/store", "overlay", uintptr(0), "upperdir=/host/mnt-root/nix/.rw-store/.upper,workdir=/host/mnt-root/nix/.rw-store/.work,lowerdir=/host/mnt-root/nix/ro-store,userxattr"}, nil, stub.UniqueError(0)), call("mountOverlay", stub.ExpectArgs{"/sysroot/nix/store", [][2]string{
{"upperdir", "/host/mnt-root/nix/.rw-store/.upper"},
{"workdir", "/host/mnt-root/nix/.rw-store/.work"},
{"lowerdir+", "/host/mnt-root/nix/ro-store"},
}}, nil, stub.UniqueError(0)),
}, stub.UniqueError(0)}, }, stub.UniqueError(0)},
{"success single layer", &Params{ParentPerm: 0700}, &MountOverlayOp{ {"success single layer", &Params{ParentPerm: 0700}, &MountOverlayOp{
@@ -233,11 +234,11 @@ func TestMountOverlayOp(t *testing.T) {
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store"}, "/mnt-root/nix/ro-store", nil), call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store"}, "/mnt-root/nix/ro-store", nil),
}, nil, []stub.Call{ }, nil, []stub.Call{
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil), call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil),
call("mount", stub.ExpectArgs{"overlay", "/sysroot/nix/store", "overlay", uintptr(0), "" + call("mountOverlay", stub.ExpectArgs{"/sysroot/nix/store", [][2]string{
"upperdir=/host/mnt-root/nix/.rw-store/.upper," + {"upperdir", "/host/mnt-root/nix/.rw-store/.upper"},
"workdir=/host/mnt-root/nix/.rw-store/.work," + {"workdir", "/host/mnt-root/nix/.rw-store/.work"},
"lowerdir=/host/mnt-root/nix/ro-store," + {"lowerdir+", "/host/mnt-root/nix/ro-store"},
"userxattr"}, nil, nil), }}, nil, nil),
}, nil}, }, nil},
{"success", &Params{ParentPerm: 0700}, &MountOverlayOp{ {"success", &Params{ParentPerm: 0700}, &MountOverlayOp{
@@ -261,16 +262,15 @@ func TestMountOverlayOp(t *testing.T) {
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store3"}, "/mnt-root/nix/ro-store3", nil), call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store3"}, "/mnt-root/nix/ro-store3", nil),
}, nil, []stub.Call{ }, nil, []stub.Call{
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil), call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil),
call("mount", stub.ExpectArgs{"overlay", "/sysroot/nix/store", "overlay", uintptr(0), "" + call("mountOverlay", stub.ExpectArgs{"/sysroot/nix/store", [][2]string{
"upperdir=/host/mnt-root/nix/.rw-store/.upper," + {"upperdir", "/host/mnt-root/nix/.rw-store/.upper"},
"workdir=/host/mnt-root/nix/.rw-store/.work," + {"workdir", "/host/mnt-root/nix/.rw-store/.work"},
"lowerdir=" + {"lowerdir+", "/host/mnt-root/nix/ro-store"},
"/host/mnt-root/nix/ro-store:" + {"lowerdir+", "/host/mnt-root/nix/ro-store0"},
"/host/mnt-root/nix/ro-store0:" + {"lowerdir+", "/host/mnt-root/nix/ro-store1"},
"/host/mnt-root/nix/ro-store1:" + {"lowerdir+", "/host/mnt-root/nix/ro-store2"},
"/host/mnt-root/nix/ro-store2:" + {"lowerdir+", "/host/mnt-root/nix/ro-store3"},
"/host/mnt-root/nix/ro-store3," + }}, nil, nil),
"userxattr"}, nil, nil),
}, nil}, }, nil},
}) })

View File

@@ -40,6 +40,9 @@ const (
// SourceMqueue is used when mounting mqueue. // SourceMqueue is used when mounting mqueue.
// Note that any source value is allowed when fstype is [FstypeMqueue]. // Note that any source value is allowed when fstype is [FstypeMqueue].
SourceMqueue = "mqueue" SourceMqueue = "mqueue"
// SourceBinfmtMisc is used when mounting binfmt_misc.
// Note that any source value is allowed when fstype is [SourceBinfmtMisc].
SourceBinfmtMisc = "binfmt_misc"
// SourceOverlay is used when mounting overlay. // SourceOverlay is used when mounting overlay.
// Note that any source value is allowed when fstype is [FstypeOverlay]. // Note that any source value is allowed when fstype is [FstypeOverlay].
SourceOverlay = "overlay" SourceOverlay = "overlay"
@@ -70,6 +73,9 @@ const (
// FstypeMqueue represents the mqueue pseudo-filesystem. // FstypeMqueue represents the mqueue pseudo-filesystem.
// This filesystem type is usually mounted on /dev/mqueue. // This filesystem type is usually mounted on /dev/mqueue.
FstypeMqueue = "mqueue" FstypeMqueue = "mqueue"
// FstypeBinfmtMisc represents the binfmt_misc pseudo-filesystem.
// This filesystem type is usually mounted on /proc/sys/fs/binfmt_misc.
FstypeBinfmtMisc = "binfmt_misc"
// FstypeOverlay represents the overlay pseudo-filesystem. // FstypeOverlay represents the overlay pseudo-filesystem.
// This filesystem type can be mounted anywhere in the container filesystem. // This filesystem type can be mounted anywhere in the container filesystem.
FstypeOverlay = "overlay" FstypeOverlay = "overlay"

View File

@@ -10,7 +10,6 @@ import (
"testing" "testing"
"unsafe" "unsafe"
"hakurei.app/check"
"hakurei.app/vfs" "hakurei.app/vfs"
) )
@@ -50,9 +49,6 @@ func TestToHost(t *testing.T) {
} }
} }
// InternalToHostOvlEscape exports toHost passed to [check.EscapeOverlayDataSegment].
func InternalToHostOvlEscape(s string) string { return check.EscapeOverlayDataSegment(toHost(s)) }
func TestCreateFile(t *testing.T) { func TestCreateFile(t *testing.T) {
t.Run("nonexistent", func(t *testing.T) { t.Run("nonexistent", func(t *testing.T) {
t.Run("mkdir", func(t *testing.T) { t.Run("mkdir", func(t *testing.T) {

267
ext/fs.go Normal file
View File

@@ -0,0 +1,267 @@
package ext
import (
"os"
"runtime"
"syscall"
"unsafe"
)
// include/uapi/linux/mount.h
/*
* move_mount() flags.
*/
const (
MOVE_MOUNT_F_SYMLINKS = 1 << iota /* Follow symlinks on from path */
MOVE_MOUNT_F_AUTOMOUNTS /* Follow automounts on from path */
MOVE_MOUNT_F_EMPTY_PATH /* Empty from path permitted */
_
MOVE_MOUNT_T_SYMLINKS /* Follow symlinks on to path */
MOVE_MOUNT_T_AUTOMOUNTS /* Follow automounts on to path */
MOVE_MOUNT_T_EMPTY_PATH /* Empty to path permitted */
_
MOVE_MOUNT_SET_GROUP /* Set sharing group instead */
MOVE_MOUNT_BENEATH /* Mount beneath top mount */
)
/*
* fsopen() flags.
*/
const (
FSOPEN_CLOEXEC = 1 << iota
)
/*
* fspick() flags.
*/
const (
FSPICK_CLOEXEC = 1 << iota
FSPICK_SYMLINK_NOFOLLOW
FSPICK_NO_AUTOMOUNT
FSPICK_EMPTY_PATH
)
/*
* The type of fsconfig() call made.
*/
const (
FSCONFIG_SET_FLAG = iota /* Set parameter, supplying no value */
FSCONFIG_SET_STRING /* Set parameter, supplying a string value */
FSCONFIG_SET_BINARY /* Set parameter, supplying a binary blob value */
FSCONFIG_SET_PATH /* Set parameter, supplying an object by path */
FSCONFIG_SET_PATH_EMPTY /* Set parameter, supplying an object by (empty) path */
FSCONFIG_SET_FD /* Set parameter, supplying an object by fd */
FSCONFIG_CMD_CREATE /* Create new or reuse existing superblock */
FSCONFIG_CMD_RECONFIGURE /* Invoke superblock reconfiguration */
FSCONFIG_CMD_CREATE_EXCL /* Create new superblock, fail if reusing existing superblock */
)
/*
* fsmount() flags.
*/
const (
FSMOUNT_CLOEXEC = 1 << iota
)
/*
* Mount attributes.
*/
const (
MOUNT_ATTR_RDONLY = 0x00000001 /* Mount read-only */
MOUNT_ATTR_NOSUID = 0x00000002 /* Ignore suid and sgid bits */
MOUNT_ATTR_NODEV = 0x00000004 /* Disallow access to device special files */
MOUNT_ATTR_NOEXEC = 0x00000008 /* Disallow program execution */
MOUNT_ATTR__ATIME = 0x00000070 /* Setting on how atime should be updated */
MOUNT_ATTR_RELATIME = 0x00000000 /* - Update atime relative to mtime/ctime. */
MOUNT_ATTR_NOATIME = 0x00000010 /* - Do not update access times. */
MOUNT_ATTR_STRICTATIME = 0x00000020 /* - Always perform atime updates */
MOUNT_ATTR_NODIRATIME = 0x00000080 /* Do not update directory access times */
MOUNT_ATTR_IDMAP = 0x00100000 /* Idmap mount to @userns_fd in struct mount_attr. */
MOUNT_ATTR_NOSYMFOLLOW = 0x00200000 /* Do not follow symlinks */
)
// FS provides low-level wrappers around the suite of file-descriptor-based
// mount facilities in Linux.
type FS struct {
fd uintptr
c runtime.Cleanup
}
// newFS allocates a new [FS] for the specified fd.
func newFS(fd uintptr) *FS {
fs := FS{fd: fd}
fs.c = runtime.AddCleanup(&fs, func(fd uintptr) {
_ = syscall.Close(int(fd))
}, fd)
return &fs
}
// Close closes the underlying filesystem context.
func (fs *FS) Close() error {
if fs == nil {
return syscall.EINVAL
}
err := syscall.Close(int(fs.fd))
fs.c.Stop()
return err
}
// OpenFS creates a new filesystem context.
func OpenFS(fsname string, flags int) (fs *FS, err error) {
var s *byte
s, err = syscall.BytePtrFromString(fsname)
if err != nil {
return
}
fd, _, errno := syscall.Syscall(
SYS_FSOPEN,
uintptr(unsafe.Pointer(s)),
uintptr(flags|FSOPEN_CLOEXEC),
0,
)
if errno != 0 {
err = os.NewSyscallError("fsopen", errno)
} else {
fs = newFS(fd)
}
return
}
// PickFS selects filesystem for reconfiguration.
func PickFS(dirfd int, pathname string, flags int) (fs *FS, err error) {
var s *byte
s, err = syscall.BytePtrFromString(pathname)
if err != nil {
return
}
fd, _, errno := syscall.Syscall(
SYS_FSPICK,
uintptr(dirfd),
uintptr(unsafe.Pointer(s)),
uintptr(flags|FSPICK_CLOEXEC),
)
if errno != 0 {
err = os.NewSyscallError("fspick", errno)
} else {
fs = newFS(fd)
}
return
}
// config configures new or existing filesystem context.
func (fs *FS) config(cmd uint, key *byte, value unsafe.Pointer, aux int) (err error) {
_, _, errno := syscall.Syscall6(
SYS_FSCONFIG,
fs.fd,
uintptr(cmd),
uintptr(unsafe.Pointer(key)),
uintptr(value),
uintptr(aux),
0,
)
if errno != 0 {
err = os.NewSyscallError("fsconfig", errno)
}
return
}
// SetFlag sets the flag parameter named by key. ([FSCONFIG_SET_FLAG])
func (fs *FS) SetFlag(key string) (err error) {
var s *byte
s, err = syscall.BytePtrFromString(key)
if err != nil {
return
}
return fs.config(FSCONFIG_SET_FLAG, s, nil, 0)
}
// SetString sets the string parameter named by key to the value specified by
// value. ([FSCONFIG_SET_STRING])
func (fs *FS) SetString(key, value string) (err error) {
var s0 *byte
s0, err = syscall.BytePtrFromString(key)
if err != nil {
return
}
var s1 *byte
s1, err = syscall.BytePtrFromString(value)
if err != nil {
return
}
return fs.config(FSCONFIG_SET_STRING, s0, unsafe.Pointer(s1), 0)
}
// mount instantiates mount object from filesystem context.
func (fs *FS) mount(flags, attrFlags int) (fsfd int, err error) {
r, _, errno := syscall.Syscall(
SYS_FSMOUNT,
fs.fd,
uintptr(flags|FSMOUNT_CLOEXEC),
uintptr(attrFlags),
)
fsfd = int(r)
if errno != 0 {
err = os.NewSyscallError("fsmount", errno)
}
return
}
// MoveMount moves or attaches mount object to filesystem.
func MoveMount(
fromDirfd int,
fromPathname string,
toDirfd int,
toPathname string,
flags int,
) (err error) {
var s0 *byte
s0, err = syscall.BytePtrFromString(fromPathname)
if err != nil {
return
}
var s1 *byte
s1, err = syscall.BytePtrFromString(toPathname)
if err != nil {
return
}
_, _, errno := syscall.Syscall6(
SYS_MOVE_MOUNT,
uintptr(fromDirfd),
uintptr(unsafe.Pointer(s0)),
uintptr(toDirfd),
uintptr(unsafe.Pointer(s1)),
uintptr(flags),
0,
)
if errno != 0 {
err = os.NewSyscallError("move_mount", errno)
}
return
}
// Mount attaches the underlying filesystem context to the specified pathname.
func (fs *FS) Mount(pathname string, attrFlags int) error {
if err := fs.config(FSCONFIG_CMD_CREATE_EXCL, nil, nil, 0); err != nil {
return err
}
fd, err := fs.mount(0, attrFlags)
if err != nil {
return err
}
err = MoveMount(
fd, "",
-1, pathname,
MOVE_MOUNT_F_EMPTY_PATH,
)
closeErr := syscall.Close(fd)
if err == nil {
err = closeErr
}
return err
}

View File

@@ -42,6 +42,8 @@ var (
AbsDevShm = unsafeAbs(DevShm) AbsDevShm = unsafeAbs(DevShm)
// AbsProc is [Proc] as [check.Absolute]. // AbsProc is [Proc] as [check.Absolute].
AbsProc = unsafeAbs(Proc) AbsProc = unsafeAbs(Proc)
// AbsProcSys is [ProcSys] as [check.Absolute].
AbsProcSys = unsafeAbs(ProcSys)
// AbsProcSelfExe is [ProcSelfExe] as [check.Absolute]. // AbsProcSelfExe is [ProcSelfExe] as [check.Absolute].
AbsProcSelfExe = unsafeAbs(ProcSelfExe) AbsProcSelfExe = unsafeAbs(ProcSelfExe)
// AbsSys is [Sys] as [check.Absolute]. // AbsSys is [Sys] as [check.Absolute].

View File

@@ -64,78 +64,6 @@ func TestFlatten(t *testing.T) {
{Mode: fs.ModeDir | 0700, Path: "work"}, {Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C"), nil}, }, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C"), nil},
{"sample cache file", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: 0400, Data: []byte{0}},
"checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq": {Mode: 0400, Data: []byte{0, 0, 0, 0, 0xad, 0xb, 0, 4, 0xfe, 0xfe, 0, 0, 0xfe, 0xca, 0, 0}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
"identifier/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
"identifier/cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
"identifier/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: 0400, Path: "checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq", Data: []byte{0, 0, 0, 0, 0xad, 0xb, 0, 4, 0xfe, 0xfe, 0, 0, 0xfe, 0xca, 0, 0}},
{Mode: 0400, Path: "checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte{0}},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("St9rlE-mGZ5gXwiv_hzQ_B8bZP-UUvSNmf4nHUZzCMOumb6hKnheZSe0dmnuc4Q2"), nil},
{"sample http get cure", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU": {Mode: 0400, Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/oM-2pUlk-mOxK1t3aMWZer69UdOQlAXiAgMrpZ1476VoOqpYVP1aGFS9_HYy-D8_": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: 0400, Path: "checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU", Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/oM-2pUlk-mOxK1t3aMWZer69UdOQlAXiAgMrpZ1476VoOqpYVP1aGFS9_HYy-D8_", Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("L_0RFHpr9JUS4Zp14rz2dESSRvfLzpvqsLhR1-YjQt8hYlmEdVl7vI3_-v8UNPKs"), nil},
{"sample directory step simple", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500},
"check": {Mode: 0400, Data: []byte{0, 0}},
"lib": {Mode: fs.ModeDir | 0700},
"lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"lib/pkgconfig": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0500, Path: "."},
{Mode: 0400, Path: "check", Data: []byte{0, 0}},
{Mode: fs.ModeDir | 0700, Path: "lib"},
{Mode: fs.ModeSymlink | 0777, Path: "lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
{Mode: fs.ModeDir | 0700, Path: "lib/pkgconfig"},
}, pkg.MustDecode("qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b"), nil},
{"sample directory step garbage", fstest.MapFS{ {"sample directory step garbage", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500}, ".": {Mode: fs.ModeDir | 0500},
@@ -151,421 +79,6 @@ func TestFlatten(t *testing.T) {
{Mode: fs.ModeDir | 0500, Path: "lib/pkgconfig"}, {Mode: fs.ModeDir | 0500, Path: "lib/pkgconfig"},
}, pkg.MustDecode("CUx-3hSbTWPsbMfDhgalG4Ni_GmR9TnVX8F99tY_P5GtkYvczg9RrF5zO0jX9XYT"), nil}, }, pkg.MustDecode("CUx-3hSbTWPsbMfDhgalG4Ni_GmR9TnVX8F99tY_P5GtkYvczg9RrF5zO0jX9XYT"), nil},
{"sample directory", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b": {Mode: fs.ModeDir | 0500},
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/check": {Mode: 0400, Data: []byte{0, 0}},
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib": {Mode: fs.ModeDir | 0700},
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/pkgconfig": {Mode: fs.ModeDir | 0700},
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b"},
{Mode: 0400, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/check", Data: []byte{0, 0}},
{Mode: fs.ModeDir | 0700, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib"},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
{Mode: fs.ModeDir | 0700, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/pkgconfig"},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("WVpvsVqVKg9Nsh744x57h51AuWUoUR2nnh8Md-EYBQpk6ziyTuUn6PLtF2e0Eu_d"), nil},
{"sample no assume checksum", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M": {Mode: fs.ModeDir | 0500},
"checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M/check": {Mode: 0400, Data: []byte{}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/_wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M")},
"identifier/_wEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M")},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M"},
{Mode: 0400, Path: "checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M/check", Data: []byte{}},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_wEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M")},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("OC290t23aimNo2Rp2pPwan5GI2KRLRdOwYxXQMD9jw0QROgHnNXWodoWdV0hwu2w"), nil},
{"sample tar step unpack", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500},
"checksum": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"identifier": {Mode: fs.ModeDir | 0500},
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"work": {Mode: fs.ModeDir | 0500},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0500, Path: "."},
{Mode: fs.ModeDir | 0500, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP"},
{Mode: 0400, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check", Data: []byte{0, 0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib"},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig"},
{Mode: fs.ModeDir | 0500, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
{Mode: fs.ModeDir | 0500, Path: "work"},
}, pkg.MustDecode("cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM"), nil},
{"sample tar", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
"identifier/rg7F1D5hwv6o4xctjD5zDq4i5MD0mArTsUIWfhUbik8xC6Bsyt3mjXXOm3goojTz": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM"},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP"},
{Mode: 0400, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check", Data: []byte{0, 0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib"},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig"},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work"},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/rg7F1D5hwv6o4xctjD5zDq4i5MD0mArTsUIWfhUbik8xC6Bsyt3mjXXOm3goojTz", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("NQTlc466JmSVLIyWklm_u8_g95jEEb98PxJU-kjwxLpfdjwMWJq0G8ze9R4Vo1Vu"), nil},
{"sample tar expand step unpack", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500},
"libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0500, Path: "."},
{Mode: fs.ModeSymlink | 0777, Path: "libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
}, pkg.MustDecode("CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN"), nil},
{"sample tar expand", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN": {Mode: fs.ModeDir | 0500},
"checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
"identifier/_v1blm2h-_KA-dVaawdpLas6MjHc6rbhhFS8JWwx8iJxZGUu8EBbRrhr5AaZ9PJL": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN"},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_v1blm2h-_KA-dVaawdpLas6MjHc6rbhhFS8JWwx8iJxZGUu8EBbRrhr5AaZ9PJL", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("hSoSSgCYTNonX3Q8FjvjD1fBl-E-BQyA6OTXro2OadXqbST4tZ-akGXszdeqphRe"), nil},
{"testtool", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500},
"check": {Mode: 0400, Data: []byte{0}},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0500, Path: "."},
{Mode: 0400, Path: "check", Data: []byte{0}},
}, pkg.MustDecode("GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"), nil},
{"sample exec container", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/dztPS6jRjiZtCF4_p8AzfnxGp6obkhrgFVsxdodbKWUoAEVtDz3MykepJB4kI_ks": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/dztPS6jRjiZtCF4_p8AzfnxGp6obkhrgFVsxdodbKWUoAEVtDz3MykepJB4kI_ks", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("Q5DluWQCAeohLoiGRImurwFp3vdz9IfQCoj7Fuhh73s4KQPRHpEQEnHTdNHmB8Fx"), nil},
{"testtool net", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500},
"check": {Mode: 0400, Data: []byte("net")},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0500, Path: "."},
{Mode: 0400, Path: "check", Data: []byte("net")},
}, pkg.MustDecode("a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W"), nil},
{"sample exec net container", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W": {Mode: fs.ModeDir | 0500},
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check": {Mode: 0400, Data: []byte("net")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/G8qPxD9puvvoOVV7lrT80eyDeIl3G_CCFoKw12c8mCjMdG1zF7NEPkwYpNubClK3": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
{Mode: fs.ModeDir | 0500, Path: "checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W"},
{Mode: 0400, Path: "checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check", Data: []byte("net")},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/G8qPxD9puvvoOVV7lrT80eyDeIl3G_CCFoKw12c8mCjMdG1zF7NEPkwYpNubClK3", Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("bPYvvqxpfV7xcC1EptqyKNK1klLJgYHMDkzBcoOyK6j_Aj5hb0mXNPwTwPSK5F6Z"), nil},
{"sample exec container overlay root", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/RdMA-mubnrHuu3Ky1wWyxauSYCO0ZH_zCPUj3uDHqkfwv5sGcByoF_g5PjlGiClb": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/RdMA-mubnrHuu3Ky1wWyxauSYCO0ZH_zCPUj3uDHqkfwv5sGcByoF_g5PjlGiClb", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("PO2DSSCa4yoSgEYRcCSZfQfwow1yRigL3Ry-hI0RDI4aGuFBha-EfXeSJnG_5_Rl"), nil},
{"sample exec container overlay work", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/5hlaukCirnXE4W_RSLJFOZN47Z5RiHnacXzdFp_70cLgiJUGR6cSb_HaFftkzi0-": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/5hlaukCirnXE4W_RSLJFOZN47Z5RiHnacXzdFp_70cLgiJUGR6cSb_HaFftkzi0-", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("iaRt6l_Wm2n-h5UsDewZxQkCmjZjyL8r7wv32QT2kyV55-Lx09Dq4gfg9BiwPnKs"), nil},
{"sample exec container multiple layers", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK": {Mode: fs.ModeDir | 0500},
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check": {Mode: 0400, Data: []byte("layers")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/B-kc5iJMx8GtlCua4dz6BiJHnDAOUfPjgpbKq4e-QEn0_CZkSYs3fOA1ve06qMs2": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
"identifier/p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
{Mode: fs.ModeDir | 0500, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK"},
{Mode: 0400, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check", Data: []byte("layers")},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/B-kc5iJMx8GtlCua4dz6BiJHnDAOUfPjgpbKq4e-QEn0_CZkSYs3fOA1ve06qMs2", Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("O2YzyR7IUGU5J2CADy0hUZ3A5NkP_Vwzs4UadEdn2oMZZVWRtH0xZGJ3HXiimTnZ"), nil},
{"sample exec container layer promotion", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/kvJIqZo5DKFOxC2ZQ-8_nPaQzEAz9cIm3p6guO-uLqm-xaiPu7oRkSnsu411jd_U": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"identifier/xXTIYcXmgJWNLC91c417RRrNM9cjELwEZHpGvf8Fk_GNP5agRJp_SicD0w9aMeLJ": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/kvJIqZo5DKFOxC2ZQ-8_nPaQzEAz9cIm3p6guO-uLqm-xaiPu7oRkSnsu411jd_U", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/xXTIYcXmgJWNLC91c417RRrNM9cjELwEZHpGvf8Fk_GNP5agRJp_SicD0w9aMeLJ", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("3EaW6WibLi9gl03_UieiFPaFcPy5p4x3JPxrnLJxGaTI-bh3HU9DK9IMx7c3rrNm"), nil},
{"sample file short", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: 0400, Data: []byte{0}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: 0400, Path: "checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte{0}},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi", Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("iR6H5OIsyOW4EwEgtm9rGzGF6DVtyHLySEtwnFE8bnus9VJcoCbR4JIek7Lw-vwT"), nil},
} }
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {

View File

@@ -9,8 +9,10 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"slices" "slices"
"strconv" "strconv"
"sync"
"syscall" "syscall"
"time" "time"
"unique" "unique"
@@ -27,6 +29,11 @@ import (
// AbsWork is the container pathname [TContext.GetWorkDir] is mounted on. // AbsWork is the container pathname [TContext.GetWorkDir] is mounted on.
var AbsWork = fhs.AbsRoot.Append("work/") var AbsWork = fhs.AbsRoot.Append("work/")
// EnvJobs is the name of the environment variable holding a decimal
// representation of the preferred job count. Its value must not affect cure
// outcome.
const EnvJobs = "CURE_JOBS"
// ExecPath is a slice of [Artifact] and the [check.Absolute] pathname to make // ExecPath is a slice of [Artifact] and the [check.Absolute] pathname to make
// it available at under in the container. // it available at under in the container.
type ExecPath struct { type ExecPath struct {
@@ -89,6 +96,32 @@ func MustPath(pathname string, writable bool, a ...Artifact) ExecPath {
return ExecPath{check.MustAbs(pathname), a, writable} return ExecPath{check.MustAbs(pathname), a, writable}
} }
var (
binfmt map[string]container.BinfmtEntry
binfmtMu sync.RWMutex
)
// RegisterArch arranges for [KindExec] and [KindExecNet] to support a new
// architecture via a binfmt_misc entry. Each architecture must be registered
// at most once.
func RegisterArch(arch string, e container.BinfmtEntry) {
if arch == "" {
panic(UnsupportedArchError(arch))
}
binfmtMu.Lock()
defer binfmtMu.Unlock()
if binfmt == nil {
binfmt = make(map[string]container.BinfmtEntry)
}
if _, ok := binfmt[arch]; ok {
panic("attempting to register " + strconv.Quote(arch) + " twice")
}
binfmt[arch] = e
}
const ( const (
// ExecTimeoutDefault replaces out of range [NewExec] timeout values. // ExecTimeoutDefault replaces out of range [NewExec] timeout values.
ExecTimeoutDefault = 15 * time.Minute ExecTimeoutDefault = 15 * time.Minute
@@ -105,6 +138,8 @@ type execArtifact struct {
// Caller-supplied user-facing reporting name, guaranteed to be nonzero // Caller-supplied user-facing reporting name, guaranteed to be nonzero
// during initialisation. // during initialisation.
name string name string
// Target architecture.
arch string
// Caller-supplied inner mount points. // Caller-supplied inner mount points.
paths []ExecPath paths []ExecPath
@@ -127,28 +162,40 @@ type execArtifact struct {
var _ fmt.Stringer = new(execArtifact) var _ fmt.Stringer = new(execArtifact)
// execNetArtifact is like execArtifact but implements [KnownChecksum] and has // execMeasuredArtifact is like execArtifact but implements [KnownChecksum] and
// its resulting container keep the host net namespace. // has its resulting container optionally keep the host net namespace.
type execNetArtifact struct { type execMeasuredArtifact struct {
checksum Checksum checksum Checksum
// Whether to keep host net namespace.
hostNet bool
execArtifact execArtifact
} }
var _ KnownChecksum = new(execNetArtifact) var _ KnownChecksum = new(execMeasuredArtifact)
// Checksum returns the caller-supplied checksum. // Checksum returns the caller-supplied checksum.
func (a *execNetArtifact) Checksum() Checksum { return a.checksum } func (a *execMeasuredArtifact) Checksum() Checksum { return a.checksum }
// Kind returns the hardcoded [Kind] constant. // Kind returns [KindExecNet], or [KindExec] if hostNet is false.
func (*execNetArtifact) Kind() Kind { return KindExecNet } func (a *execMeasuredArtifact) Kind() Kind {
if a == nil || a.hostNet {
return KindExecNet
}
return KindExec
}
// Cure cures the [Artifact] in the container described by the caller. The // Cure cures the [Artifact] in the container described by the caller. The
// container retains host networking. // container optionally retains host networking.
func (a *execNetArtifact) Cure(f *FContext) error { func (a *execMeasuredArtifact) Cure(f *FContext) error {
return a.cure(f, true) return a.cure(f, a.hostNet)
} }
// ErrNetChecksum is panicked by [NewExec] if host net namespace is requested
// with a nil checksum.
var ErrNetChecksum = errors.New("attempting to keep net namespace without checksum")
// NewExec returns a new [Artifact] that executes the program path in a // NewExec returns a new [Artifact] that executes the program path in a
// container with specified paths bind mounted read-only in order. A private // container with specified paths bind mounted read-only in order. A private
// instance of /proc and /dev is made available to the container. // instance of /proc and /dev is made available to the container.
@@ -162,7 +209,7 @@ func (a *execNetArtifact) Cure(f *FContext) error {
// regular or symlink. // regular or symlink.
// //
// If checksum is non-nil, the resulting [Artifact] implements [KnownChecksum] // If checksum is non-nil, the resulting [Artifact] implements [KnownChecksum]
// and its container runs in the host net namespace. // and its container optionally runs in the host net namespace.
// //
// The container is allowed to run for the specified duration before the initial // The container is allowed to run for the specified duration before the initial
// process and all processes originating from it is terminated. A zero or // process and all processes originating from it is terminated. A zero or
@@ -173,10 +220,10 @@ func (a *execNetArtifact) Cure(f *FContext) error {
// container and does not affect curing outcome. Because of this, it is omitted // container and does not affect curing outcome. Because of this, it is omitted
// from parameter data for computing identifier. // from parameter data for computing identifier.
func NewExec( func NewExec(
name string, name, arch string,
checksum *Checksum, checksum *Checksum,
timeout time.Duration, timeout time.Duration,
exclusive bool, hostNet, exclusive bool,
dir *check.Absolute, dir *check.Absolute,
env []string, env []string,
@@ -188,17 +235,23 @@ func NewExec(
if name == "" { if name == "" {
name = "exec-" + filepath.Base(pathname.String()) name = "exec-" + filepath.Base(pathname.String())
} }
if arch == "" {
arch = runtime.GOARCH
}
if timeout <= 0 { if timeout <= 0 {
timeout = ExecTimeoutDefault timeout = ExecTimeoutDefault
} }
if timeout > ExecTimeoutMax { if timeout > ExecTimeoutMax {
timeout = ExecTimeoutMax timeout = ExecTimeoutMax
} }
a := execArtifact{name, paths, dir, env, pathname, args, timeout, exclusive} a := execArtifact{name, arch, paths, dir, env, pathname, args, timeout, exclusive}
if checksum == nil { if checksum == nil {
if hostNet {
panic(ErrNetChecksum)
}
return &a return &a
} }
return &execNetArtifact{*checksum, a} return &execMeasuredArtifact{*checksum, hostNet, a}
} }
// Kind returns the hardcoded [Kind] constant. // Kind returns the hardcoded [Kind] constant.
@@ -206,6 +259,7 @@ func (*execArtifact) Kind() Kind { return KindExec }
// Params writes paths, executable pathname and args. // Params writes paths, executable pathname and args.
func (a *execArtifact) Params(ctx *IContext) { func (a *execArtifact) Params(ctx *IContext) {
ctx.WriteString(a.arch)
ctx.WriteString(a.name) ctx.WriteString(a.name)
ctx.WriteUint32(uint32(len(a.paths))) ctx.WriteUint32(uint32(len(a.paths)))
@@ -252,11 +306,26 @@ func (a *execArtifact) Params(ctx *IContext) {
} }
} }
// UnsupportedArchError describes an unsupported or invalid architecture.
type UnsupportedArchError string
func (e UnsupportedArchError) Error() string {
if e == "" {
return "invalid architecture name"
}
return "unsupported architecture " + string(e)
}
// readExecArtifact interprets IR values and returns the address of execArtifact // readExecArtifact interprets IR values and returns the address of execArtifact
// or execNetArtifact. // or execNetArtifact.
func readExecArtifact(r *IRReader, net bool) Artifact { func readExecArtifact(r *IRReader, net bool) Artifact {
r.DiscardAll() r.DiscardAll()
arch := r.ReadString()
if arch == "" {
panic(UnsupportedArchError(arch))
}
name := r.ReadString() name := r.ReadString()
sz := r.ReadUint32() sz := r.ReadUint32()
@@ -307,22 +376,17 @@ func readExecArtifact(r *IRReader, net bool) Artifact {
exclusive := r.ReadUint32() != 0 exclusive := r.ReadUint32() != 0
checksum, ok := r.Finalise() checksum, ok := r.Finalise()
var checksumP *Checksum var checksumP *Checksum
if net { if ok {
if !ok { checksumP = new(checksum.Value())
panic(ErrExpectedChecksum) }
}
checksumVal := checksum.Value() if net && !ok {
checksumP = &checksumVal panic(ErrExpectedChecksum)
} else {
if ok {
panic(ErrUnexpectedChecksum)
}
} }
return NewExec( return NewExec(
name, checksumP, timeout, exclusive, dir, env, pathname, args, paths..., name, arch, checksumP, timeout, net, exclusive, dir, env, pathname, args, paths...,
) )
} }
@@ -397,7 +461,7 @@ const SeccompPresets = std.PresetStrict &
func (a *execArtifact) makeContainer( func (a *execArtifact) makeContainer(
ctx context.Context, ctx context.Context,
msg message.Msg, msg message.Msg,
flags int, flags, jobs int,
hostNet bool, hostNet bool,
temp, work *check.Absolute, temp, work *check.Absolute,
getArtifact GetArtifactFunc, getArtifact GetArtifactFunc,
@@ -431,11 +495,23 @@ func (a *execArtifact) makeContainer(
if z.HostNet { if z.HostNet {
z.Hostname = "cure-net" z.Hostname = "cure-net"
} }
z.Quiet = flags&CSuppressInit != 0
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1 z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
z.Dir, z.Path, z.Args = a.dir, a.path, a.args
z.Dir, z.Env, z.Path, z.Args = a.dir, a.env, a.path, a.args z.Env = slices.Concat(a.env, []string{EnvJobs + "=" + strconv.Itoa(jobs)})
z.Grow(len(a.paths) + 4) z.Grow(len(a.paths) + 4)
if a.arch != runtime.GOARCH {
binfmtMu.RLock()
e, ok := binfmt[a.arch]
binfmtMu.RUnlock()
if !ok {
return nil, UnsupportedArchError(a.arch)
}
z.Binfmt = []container.BinfmtEntry{e}
z.InitAsRoot = true
}
for i, b := range a.paths { for i, b := range a.paths {
if i == overlayWorkIndex { if i == overlayWorkIndex {
if err = os.MkdirAll(work.String(), 0700); err != nil { if err = os.MkdirAll(work.String(), 0700); err != nil {
@@ -522,9 +598,9 @@ func (c *Cache) EnterExec(
case *execArtifact: case *execArtifact:
e = f e = f
case *execNetArtifact: case *execMeasuredArtifact:
e = &f.execArtifact e = &f.execArtifact
hostNet = true hostNet = f.hostNet
default: default:
return ErrNotExec return ErrNotExec
@@ -563,6 +639,7 @@ func (c *Cache) EnterExec(
z, err = e.makeContainer( z, err = e.makeContainer(
ctx, c.msg, ctx, c.msg,
c.flags, c.flags,
c.jobs,
hostNet, hostNet,
temp, work, temp, work,
func(a Artifact) (*check.Absolute, unique.Handle[Checksum]) { func(a Artifact) (*check.Absolute, unique.Handle[Checksum]) {
@@ -602,7 +679,7 @@ func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
msg := f.GetMessage() msg := f.GetMessage()
var z *container.Container var z *container.Container
if z, err = a.makeContainer( if z, err = a.makeContainer(
ctx, msg, f.cache.flags, hostNet, ctx, msg, f.cache.flags, f.GetJobs(), hostNet,
f.GetTempDir(), f.GetWorkDir(), f.GetTempDir(), f.GetWorkDir(),
f.GetArtifact, f.GetArtifact,
f.cache.Ident, f.cache.Ident,
@@ -624,12 +701,6 @@ func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
_ = stdout.Close() _ = stdout.Close()
return return
} }
defer func() {
if err != nil && !errors.As(err, new(*exec.ExitError)) {
_ = stdout.Close()
_ = stderr.Close()
}
}()
brStdout, brStderr := f.cache.getReader(stdout), f.cache.getReader(stderr) brStdout, brStderr := f.cache.getReader(stdout), f.cache.getReader(stderr)
stdoutDone, stderrDone := make(chan struct{}), make(chan struct{}) stdoutDone, stderrDone := make(chan struct{}), make(chan struct{})
@@ -644,6 +715,11 @@ func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
io.TeeReader(brStderr, status), io.TeeReader(brStderr, status),
) )
defer func() { defer func() {
if err != nil && !errors.As(err, new(*exec.ExitError)) {
_ = stdout.Close()
_ = stderr.Close()
}
<-stdoutDone <-stdoutDone
<-stderrDone <-stderrDone
f.cache.putReader(brStdout) f.cache.putReader(brStdout)

View File

@@ -1,44 +1,70 @@
package pkg_test package pkg_test
//go:generate env CGO_ENABLED=0 go build -tags testtool -o testdata/testtool ./testdata
import ( import (
"bytes"
_ "embed" _ "embed"
"encoding/gob" "encoding/gob"
"errors" "errors"
"io/fs"
"net" "net"
"os" "os"
"os/exec" "os/exec"
"path/filepath"
"slices" "slices"
"testing" "testing"
"unique"
"hakurei.app/check" "hakurei.app/check"
"hakurei.app/container"
"hakurei.app/hst" "hakurei.app/hst"
"hakurei.app/internal/info"
"hakurei.app/internal/pkg" "hakurei.app/internal/pkg"
"hakurei.app/internal/stub" "hakurei.app/internal/stub"
"hakurei.app/internal/pkg/internal/testtool/expected"
) )
// testtoolBin is the container test tool binary made available to the // testtoolBin is the container test tool binary made available to the
// execArtifact for testing its curing environment. // execArtifact for testing its curing environment.
// //
//go:embed testdata/testtool //go:generate env CGO_ENABLED=0 go build -tags testtool -o internal/testtool ./internal/testtool
//go:embed internal/testtool/testtool
var testtoolBin []byte var testtoolBin []byte
func init() {
pathname, err := filepath.Abs("internal/testtool/testtool")
if err != nil {
panic(err)
}
pkg.RegisterArch("cafe", container.BinfmtEntry{
Magic: expected.Magic,
Interpreter: check.MustAbs(pathname),
})
}
func TestExec(t *testing.T) { func TestExec(t *testing.T) {
t.Parallel() t.Parallel()
wantChecksumOffline := pkg.MustDecode( wantOffline := expectsFS{
"GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9", ".": {Mode: fs.ModeDir | 0500},
)
"check": {Mode: 0400, Data: []byte{0}},
}
wantOfflineEncode := pkg.Encode(wantOffline.hash())
failingArtifact := &stubArtifact{
kind: pkg.KindTar,
params: []byte("doomed artifact"),
cure: func(t *pkg.TContext) error {
return stub.UniqueError(0xcafe)
},
}
checkWithCache(t, []cacheTestCase{ checkWithCache(t, []cacheTestCase{
{"offline", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) { {"offline", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool() testtool, testtoolDestroy := newTesttool()
cureMany(t, c, []cureStep{ cureMany(t, c, []cureStep{
{"container", pkg.NewExec( {"container", pkg.NewExec(
"exec-offline", nil, 0, false, "exec-offline", "", new(wantOffline.hash()), 0, false, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1"}, []string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
@@ -58,67 +84,128 @@ func TestExec(t *testing.T) {
}, },
}), }),
pkg.MustPath("/opt", false, testtool), pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantChecksumOffline, nil}, ), ignorePathname, wantOffline, nil},
{"error passthrough", pkg.NewExec( {"substitution", pkg.NewExec(
"", nil, 0, true, "exec-offline", "", new(wantOffline.hash()), 0, false, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1"}, []string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
[]string{"testtool"}, []string{"testtool"},
pkg.MustPath("/proc/nonexistent", false, &stubArtifact{ pkg.MustPath("/file", false, newStubFile(
pkg.KindHTTPGet,
pkg.ID{0xfe, 0},
nil,
nil, nil,
)),
// substitution miss fails in testtool due to differing idents
pkg.MustPath("/.hakurei", false, &stubArtifact{
kind: pkg.KindTar, kind: pkg.KindTar,
params: []byte("doomed artifact"), params: []byte("empty directory (substituted)"),
cure: func(t *pkg.TContext) error { cure: func(t *pkg.TContext) error {
return stub.UniqueError(0xcafe) return os.MkdirAll(t.GetWorkDir().String(), 0700)
}, },
}), }),
), nil, pkg.Checksum{}, &pkg.DependencyCureError{ pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantOffline, nil},
{"error passthrough", pkg.NewExec(
"", "", nil, 0, false, true,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"),
[]string{"testtool"},
pkg.MustPath("/proc/nonexistent", false, failingArtifact),
), nil, nil, &pkg.DependencyCureError{
{ {
Ident: unique.Make(pkg.ID(pkg.MustDecode( A: failingArtifact,
"Sowo6oZRmG6xVtUaxB6bDWZhVsqAJsIJWUp0OPKlE103cY0lodx7dem8J-qQF0Z1",
))),
Err: stub.UniqueError(0xcafe), Err: stub.UniqueError(0xcafe),
}, },
}}, }},
{"invalid paths", pkg.NewExec( {"invalid paths", pkg.NewExec(
"", nil, 0, false, "", "", nil, 0, false, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1"}, []string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
[]string{"testtool"}, []string{"testtool"},
pkg.ExecPath{}, pkg.ExecPath{},
), nil, pkg.Checksum{}, pkg.ErrInvalidPaths}, ), nil, nil, pkg.ErrInvalidPaths},
}) })
// check init failure passthrough // check init failure passthrough
var exitError *exec.ExitError initFailureArtifact := pkg.NewExec(
if _, _, err := c.Cure(pkg.NewExec( "", "", nil, 0, false, false,
"", nil, 0, false,
pkg.AbsWork, pkg.AbsWork,
nil, nil,
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
[]string{"testtool"}, []string{"testtool"},
)); !errors.As(err, &exitError) || )
var exitError *exec.ExitError
if _, _, err := c.Cure(initFailureArtifact); !errors.As(err, &exitError) ||
exitError.ExitCode() != hst.ExitFailure { exitError.ExitCode() != hst.ExitFailure {
t.Fatalf("Cure: error = %v, want init exit status 1", err) t.Fatalf("Cure: error = %v, want init exit status 1", err)
} }
testtoolDestroy(t, base, c) var faultStatus []byte
}, pkg.MustDecode("Q5DluWQCAeohLoiGRImurwFp3vdz9IfQCoj7Fuhh73s4KQPRHpEQEnHTdNHmB8Fx")}, if faults, err := c.ReadFaults(initFailureArtifact); err != nil {
t.Fatal(err)
} else if len(faults) != 1 {
t.Fatalf("ReadFaults: %v", faults)
} else if faultStatus, err = os.ReadFile(faults[0].String()); err != nil {
t.Fatal(err)
} else if err = faults[0].Destroy(); err != nil {
t.Fatal(err)
} else {
t.Logf("destroyed expected fault at %s", faults[0].Time().UTC())
}
{"net", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) { if !bytes.HasPrefix(faultStatus, []byte(
"internal/pkg ",
)) || !bytes.Contains(faultStatus, []byte(
"\ninit: fork/exec /opt/bin/testtool: no such file or directory\n",
)) {
t.Errorf("unexpected status:\n%s", string(faultStatus))
}
destroyStatus(t, base, 2, 1)
testtoolDestroy(t, base, c)
}, expectsFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantOfflineEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantOfflineEncode + "/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/IY91PCtOpCYy21AaIK0c9f8-Z6fb2_2ewoHWkt4dxoLf0GOrWqS8yAGFLV84b1Dw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"identifier/QwS7SmiatdqryQYgESdGw7Yw2PcpNf0vNfpvUA0t92BTlKiUjfCrXyMW17G2X77X": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/" + expected.Offline: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"net", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool() testtool, testtoolDestroy := newTesttool()
wantChecksum := pkg.MustDecode( wantNet := expectsFS{
"a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W", ".": {Mode: fs.ModeDir | 0500},
)
"check": {Mode: 0400, Data: []byte("net")},
}
cureMany(t, c, []cureStep{ cureMany(t, c, []cureStep{
{"container", pkg.NewExec( {"container", pkg.NewExec(
"exec-net", &wantChecksum, 0, false, "exec-net", "", new(wantNet.hash()), 0, true, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1"}, []string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
@@ -138,18 +225,37 @@ func TestExec(t *testing.T) {
}, },
}), }),
pkg.MustPath("/opt", false, testtool), pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantChecksum, nil}, ), ignorePathname, wantNet, nil},
}) })
destroyStatus(t, base, 2, 0)
testtoolDestroy(t, base, c) testtoolDestroy(t, base, c)
}, pkg.MustDecode("bPYvvqxpfV7xcC1EptqyKNK1klLJgYHMDkzBcoOyK6j_Aj5hb0mXNPwTwPSK5F6Z")}, }, expectsFS{
".": {Mode: fs.ModeDir | 0700},
{"overlay root", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) { "checksum": {Mode: fs.ModeDir | 0700},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W": {Mode: fs.ModeDir | 0500},
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check": {Mode: 0400, Data: []byte("net")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/" + expected.Net: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"overlay root", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool() testtool, testtoolDestroy := newTesttool()
cureMany(t, c, []cureStep{ cureMany(t, c, []cureStep{
{"container", pkg.NewExec( {"container", pkg.NewExec(
"exec-overlay-root", nil, 0, false, "exec-overlay-root", "", nil, 0, false, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}, []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
@@ -163,18 +269,35 @@ func TestExec(t *testing.T) {
}, },
}), }),
pkg.MustPath("/opt", false, testtool), pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantChecksumOffline, nil}, ), ignorePathname, wantOffline, nil},
}) })
destroyStatus(t, base, 2, 0)
testtoolDestroy(t, base, c) testtoolDestroy(t, base, c)
}, pkg.MustDecode("PO2DSSCa4yoSgEYRcCSZfQfwow1yRigL3Ry-hI0RDI4aGuFBha-EfXeSJnG_5_Rl")}, }, expectsFS{
".": {Mode: fs.ModeDir | 0700},
{"overlay work", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) { "checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantOfflineEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantOfflineEncode + "/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/" + expected.OvlRoot: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"overlay work", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool() testtool, testtoolDestroy := newTesttool()
cureMany(t, c, []cureStep{ cureMany(t, c, []cureStep{
{"container", pkg.NewExec( {"container", pkg.NewExec(
"exec-overlay-work", nil, 0, false, "exec-overlay-work", "", nil, 0, false, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}, []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
check.MustAbs("/work/bin/testtool"), check.MustAbs("/work/bin/testtool"),
@@ -193,18 +316,35 @@ func TestExec(t *testing.T) {
return os.MkdirAll(t.GetWorkDir().String(), 0700) return os.MkdirAll(t.GetWorkDir().String(), 0700)
}, },
}), pkg.Path(pkg.AbsWork, false /* ignored */, testtool), }), pkg.Path(pkg.AbsWork, false /* ignored */, testtool),
), ignorePathname, wantChecksumOffline, nil}, ), ignorePathname, wantOffline, nil},
}) })
destroyStatus(t, base, 2, 0)
testtoolDestroy(t, base, c) testtoolDestroy(t, base, c)
}, pkg.MustDecode("iaRt6l_Wm2n-h5UsDewZxQkCmjZjyL8r7wv32QT2kyV55-Lx09Dq4gfg9BiwPnKs")}, }, expectsFS{
".": {Mode: fs.ModeDir | 0700},
{"multiple layers", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) { "checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantOfflineEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantOfflineEncode + "/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/" + expected.Work: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"multiple layers", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool() testtool, testtoolDestroy := newTesttool()
cureMany(t, c, []cureStep{ cureMany(t, c, []cureStep{
{"container", pkg.NewExec( {"container", pkg.NewExec(
"exec-multiple-layers", nil, 0, false, "exec-multiple-layers", "", nil, 0, false, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}, []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
@@ -245,18 +385,40 @@ func TestExec(t *testing.T) {
}, },
}), }),
pkg.MustPath("/opt", false, testtool), pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantChecksumOffline, nil}, ), ignorePathname, wantOffline, nil},
}) })
destroyStatus(t, base, 2, 0)
testtoolDestroy(t, base, c) testtoolDestroy(t, base, c)
}, pkg.MustDecode("O2YzyR7IUGU5J2CADy0hUZ3A5NkP_Vwzs4UadEdn2oMZZVWRtH0xZGJ3HXiimTnZ")}, }, expectsFS{
".": {Mode: fs.ModeDir | 0700},
{"overlay layer promotion", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) { "checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantOfflineEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantOfflineEncode + "/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK": {Mode: fs.ModeDir | 0500},
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check": {Mode: 0400, Data: []byte("layers")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/B-kc5iJMx8GtlCua4dz6BiJHnDAOUfPjgpbKq4e-QEn0_CZkSYs3fOA1ve06qMs2": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
"identifier/" + expected.Layers: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"overlay layer promotion", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool() testtool, testtoolDestroy := newTesttool()
cureMany(t, c, []cureStep{ cureMany(t, c, []cureStep{
{"container", pkg.NewExec( {"container", pkg.NewExec(
"exec-layer-promotion", nil, 0, true, "exec-layer-promotion", "", nil, 0, false, true,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}, []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
@@ -276,11 +438,96 @@ func TestExec(t *testing.T) {
}, },
}), }),
pkg.MustPath("/opt", false, testtool), pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantChecksumOffline, nil}, ), ignorePathname, wantOffline, nil},
}) })
destroyStatus(t, base, 2, 0)
testtoolDestroy(t, base, c) testtoolDestroy(t, base, c)
}, pkg.MustDecode("3EaW6WibLi9gl03_UieiFPaFcPy5p4x3JPxrnLJxGaTI-bh3HU9DK9IMx7c3rrNm")}, }, expectsFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantOfflineEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantOfflineEncode + "/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/kvJIqZo5DKFOxC2ZQ-8_nPaQzEAz9cIm3p6guO-uLqm-xaiPu7oRkSnsu411jd_U": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"identifier/" + expected.Promote: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"binfmt", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
if info.CanDegrade && os.Getenv("ROSA_SKIP_BINFMT") != "" {
t.Skip("binfmt_misc test explicitly skipped")
}
cureMany(t, c, []cureStep{
{"container", pkg.NewExec(
"exec-binfmt", "cafe", nil, 0, false, true,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1", "HAKUREI_BINFMT=1"},
check.MustAbs("/opt/bin/sample"),
[]string{"sample"},
pkg.MustPath("/", true, &stubArtifact{
kind: pkg.KindTar,
params: []byte("empty directory"),
cure: func(t *pkg.TContext) error {
return os.MkdirAll(t.GetWorkDir().String(), 0700)
},
}),
pkg.MustPath("/opt", false, overrideIdent{pkg.ID{0xfe, 0xff}, &stubArtifact{
kind: pkg.KindTar,
cure: func(t *pkg.TContext) error {
work := t.GetWorkDir()
if err := os.MkdirAll(
work.Append("bin").String(),
0700,
); err != nil {
return err
}
return os.WriteFile(t.GetWorkDir().Append(
"bin",
"sample",
).String(), []byte(expected.Full), 0500)
},
}}),
), ignorePathname, expectsFS{
".": {Mode: fs.ModeDir | 0500},
"check": {Mode: 0400, Data: []byte("binfmt")},
}, nil},
})
destroyStatus(t, base, 2, 0)
}, expectsFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/5aevg3YpDxjqQZ-pdvXK7YqgkL5JKqcoStYQxeD96kuYar6K2mRQWMHib6NQRnpV": {Mode: fs.ModeDir | 0500},
"checksum/5aevg3YpDxjqQZ-pdvXK7YqgkL5JKqcoStYQxeD96kuYar6K2mRQWMHib6NQRnpV/bin": {Mode: fs.ModeDir | 0700},
"checksum/5aevg3YpDxjqQZ-pdvXK7YqgkL5JKqcoStYQxeD96kuYar6K2mRQWMHib6NQRnpV/bin/sample": {Mode: 0500, Data: []byte("\xca\xfe\xba\xbe\xfd\xfd:3")},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/UnDo4B5KneEUY5b4vRUk_y9MWgkWuw2N8f8a2XayO686xXur-aZmX2-7n_8tKMe3": {Mode: fs.ModeDir | 0500},
"checksum/UnDo4B5KneEUY5b4vRUk_y9MWgkWuw2N8f8a2XayO686xXur-aZmX2-7n_8tKMe3/check": {Mode: 0400, Data: []byte("binfmt")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/6VQTJ1lI5BmVuI1YFYJ8ClO3MRORvTTrcWFDcUU-l5Ga8EofxCxGlSTYN-u8dKj_": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/UnDo4B5KneEUY5b4vRUk_y9MWgkWuw2N8f8a2XayO686xXur-aZmX2-7n_8tKMe3")},
"identifier/_v8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/5aevg3YpDxjqQZ-pdvXK7YqgkL5JKqcoStYQxeD96kuYar6K2mRQWMHib6NQRnpV")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
}) })
} }

View File

@@ -1,6 +1,7 @@
package pkg_test package pkg_test
import ( import (
"io/fs"
"testing" "testing"
"hakurei.app/check" "hakurei.app/check"
@@ -10,18 +11,27 @@ import (
func TestFile(t *testing.T) { func TestFile(t *testing.T) {
t.Parallel() t.Parallel()
want := expectsFile{0}
checkWithCache(t, []cacheTestCase{ checkWithCache(t, []cacheTestCase{
{"file", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) { {"file", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
cureMany(t, c, []cureStep{ cureMany(t, c, []cureStep{
{"short", pkg.NewFile("null", []byte{0}), base.Append( {"short", pkg.NewFile("null", []byte{0}), base.Append(
"identifier", "identifier",
"3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi", "3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi",
), pkg.MustDecode( ), want, nil},
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
), nil},
}) })
}, pkg.MustDecode( }, expectsFS{
"iR6H5OIsyOW4EwEgtm9rGzGF6DVtyHLySEtwnFE8bnus9VJcoCbR4JIek7Lw-vwT", ".": {Mode: fs.ModeDir | 0700},
)},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + pkg.Encode(want.hash()): {Mode: 0400, Data: []byte{0}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
"substitute": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
}) })
} }

View File

@@ -0,0 +1,9 @@
// Package expected contains data shared between test helper and test harness.
package expected
const (
// Magic are magic bytes in the binfmt test case.
Magic = "\xca\xfe\xba\xbe\xfd\xfd"
// Full is the full content of the binfmt test case executable.
Full = Magic + ":3"
)

View File

@@ -0,0 +1,10 @@
package expected
const (
Offline = "q5ktDTq0miP-VvB2blxqXQeaRXCUWgP_KbC18KNtUDtyoaI_h5mHmGuPMArVEBDs"
OvlRoot = "NacZGXwuRkTvcHaG08a22ujJ8qCWN0RSoFlRSR5FSt0ZcBbJ28FRvkYsHEtX7G8i"
Layers = "WBJDrATtX6rIE5yAu8ePX3WmDF0Tt9kFiue0m3cRnyRoVx1my8a67fh3CAW486oP"
Net = "CmYtj2sNB3LHtqiDuck_Lz3MjLLIiwyP8N4NDitQ1Icvv__LVP9p8tm-sHeQaKKp"
Promote = "TX3eCloaQFkV-SZIH6Jg6E5WKH--rcXY1P0jnZKmLFKWrNqnOzd4G9eIBh6i5ywN"
Work = "OuNiLSC68pZhAOr1YQ4WbV1tzASA0nxLEBcK7lO7MqxDY_j8dmP_C612RTuF23Lu"
)

View File

@@ -0,0 +1,10 @@
package expected
const (
Offline = "WapqyoPxbWSnq07dWHt71mHaJXq99pAjJfFlELlJljSiZMhTFqqlzU1_mN86shSj"
OvlRoot = "V9anFOiRvjGfAeBhLl14AL8TKdWZyD0WTPYe4fS9mOBw8iW5Lmarvt6TG6MV8uWm"
Layers = "tKx7JNRoSBdK_7MdzI-nwTNV2wmiPzwYdcd17oLmXKL_iLmUzUiA79qTqdrTasrv"
Net = "aXyDLzBCJ9XltXZIfetEVsEkrqHfcXuD5XE_FcUnYbN3emwL55N6P8LlHzNfGnM5"
Promote = "3k4V16n96Lq04gjFSKmm4sFjyQ883FFBNXgTy9s_DjeTwxT3pg_iacEh8yMb_S4m"
Work = "6Q49MhFWRE3Ne6MycwAotgl1GtoU5WCHqJNWG2byYZCY-zX-IxPrWiKk7bKkNzhE"
)

View File

@@ -0,0 +1,10 @@
package expected
const (
Offline = "Z6yXE5gOJScL3srmnVMWgCXccDiUNZ5snSrf6RkXuU1_U0rX_kGVwsfHUgNG_awd"
OvlRoot = "zYXJHFRLuxvUhuisZEXgGgVvdQd6piMfp5jmtT6jdVjvC2gICXquOq-UTwlrSD5I"
Layers = "_F8EDazHbcLeT0sVSQXRN_kn9IjduqJcDYgzXpsT-hpKU4EBcZ0PISN2zchpqMbm"
Net = "CA_FAaSIYJgapBEHV40doxpH23PdUEy_6s1TZc7wfSPN0XYqwGpMceXXDSabGveO"
Promote = "_3LPrLp--4h9k4GsNNApu9hHtAafq-GUhfU6d4hJKBDKT3bz_szOsvkXxc5sK53d"
Work = "FEgHeiCD_WT4wsfB-9kDH5n6cRWCEYtJmXdKZgmUUukAOoXumH_hLlosXREC-tqq"
)

View File

@@ -9,18 +9,38 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"runtime"
"slices" "slices"
"strconv"
"strings" "strings"
"hakurei.app/check"
"hakurei.app/fhs" "hakurei.app/fhs"
"hakurei.app/vfs" "hakurei.app/vfs"
"hakurei.app/internal/pkg/internal/testtool/expected"
) )
func main() { func main() {
log.SetFlags(0) log.SetFlags(0)
log.SetPrefix("testtool: ") log.SetPrefix("testtool: ")
if os.Getenv("HAKUREI_BINFMT") == "1" {
wantArgs := []string{"/interpreter", "/opt/bin/sample"}
if !slices.Equal(os.Args, wantArgs) {
log.Fatalf("Args: %q, want %q", os.Args, wantArgs)
}
if err := os.WriteFile("check", []byte("binfmt"), 0400); err != nil {
log.Fatal(err)
}
return
}
environ := slices.DeleteFunc(slices.Clone(os.Environ()), func(s string) bool {
return s == "CURE_JOBS="+strconv.Itoa(runtime.NumCPU())
})
var hostNet, layers, promote bool var hostNet, layers, promote bool
if len(os.Args) == 2 && os.Args[0] == "testtool" { if len(os.Args) == 2 && os.Args[0] == "testtool" {
switch os.Args[1] { switch os.Args[1] {
@@ -48,15 +68,15 @@ func main() {
var overlayRoot bool var overlayRoot bool
wantEnv := []string{"HAKUREI_TEST=1"} wantEnv := []string{"HAKUREI_TEST=1"}
if len(os.Environ()) == 2 { if len(environ) == 2 {
overlayRoot = true overlayRoot = true
if !layers && !promote { if !layers && !promote {
log.SetPrefix("testtool(overlay root): ") log.SetPrefix("testtool(overlay root): ")
} }
wantEnv = []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"} wantEnv = []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}
} }
if !slices.Equal(wantEnv, os.Environ()) { if !slices.Equal(wantEnv, environ) {
log.Fatalf("Environ: %q, want %q", os.Environ(), wantEnv) log.Fatalf("Environ: %q, want %q", environ, wantEnv)
} }
var overlayWork bool var overlayWork bool
@@ -142,59 +162,40 @@ func main() {
} }
const checksumEmptyDir = "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU" const checksumEmptyDir = "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"
ident := "dztPS6jRjiZtCF4_p8AzfnxGp6obkhrgFVsxdodbKWUoAEVtDz3MykepJB4kI_ks" ident := expected.Offline
log.Println(m) log.Println(m)
next := func() { m = m.Next; log.Println(m) } next := func() { m = m.Next; log.Println(m) }
if overlayRoot { if overlayRoot {
ident = "RdMA-mubnrHuu3Ky1wWyxauSYCO0ZH_zCPUj3uDHqkfwv5sGcByoF_g5PjlGiClb" ident = expected.OvlRoot
if m.Root != "/" || m.Target != "/" || if m.Root != "/" || m.Target != "/" ||
m.Source != "overlay" || m.FsType != "overlay" { m.Source != "overlay" || m.FsType != "overlay" {
log.Fatal("unexpected root mount entry") log.Fatal("unexpected root mount entry")
} }
var lowerdir string var lowerdir []string
for _, o := range strings.Split(m.FsOptstr, ",") { for _, o := range strings.Split(m.FsOptstr, ",") {
const lowerdirKey = "lowerdir=" const lowerdirKey = "lowerdir+="
if strings.HasPrefix(o, lowerdirKey) { if strings.HasPrefix(o, lowerdirKey) {
lowerdir = o[len(lowerdirKey):] lowerdir = append(lowerdir, o[len(lowerdirKey):])
} }
} }
if !layers { if !layers {
if filepath.Base(lowerdir) != checksumEmptyDir { if len(lowerdir) != 1 || filepath.Base(lowerdir[0]) != checksumEmptyDir {
log.Fatal("unexpected artifact checksum") log.Fatal("unexpected artifact checksum")
} }
} else { } else {
ident = "p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT" ident = expected.Layers
lowerdirsEscaped := strings.Split(lowerdir, ":") if len(lowerdir) != 2 ||
lowerdirs := lowerdirsEscaped[:0] filepath.Base(lowerdir[0]) != "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU" ||
// ignore the option separator since it does not appear in ident filepath.Base(lowerdir[1]) != "nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK" {
for i, e := range lowerdirsEscaped { log.Fatalf("unexpected lowerdirs %s", strings.Join(lowerdir, ", "))
if len(e) > 0 &&
e[len(e)-1] == check.SpecialOverlayEscape[0] &&
(len(e) == 1 || e[len(e)-2] != check.SpecialOverlayEscape[0]) {
// ignore escaped pathname separator since it does not
// appear in ident
e = e[:len(e)-1]
if len(lowerdirsEscaped) != i {
lowerdirsEscaped[i+1] = e + lowerdirsEscaped[i+1]
continue
}
}
lowerdirs = append(lowerdirs, e)
}
if len(lowerdirs) != 2 ||
filepath.Base(lowerdirs[0]) != "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU" ||
filepath.Base(lowerdirs[1]) != "nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK" {
log.Fatalf("unexpected lowerdirs %s", strings.Join(lowerdirs, ", "))
} }
} }
} else { } else {
if hostNet { if hostNet {
ident = "G8qPxD9puvvoOVV7lrT80eyDeIl3G_CCFoKw12c8mCjMdG1zF7NEPkwYpNubClK3" ident = expected.Net
} }
if m.Root != "/sysroot" || m.Target != "/" { if m.Root != "/sysroot" || m.Target != "/" {
@@ -213,14 +214,14 @@ func main() {
} }
if promote { if promote {
ident = "xXTIYcXmgJWNLC91c417RRrNM9cjELwEZHpGvf8Fk_GNP5agRJp_SicD0w9aMeLJ" ident = expected.Promote
} }
next() // testtool artifact next() // testtool artifact
next() next()
if overlayWork { if overlayWork {
ident = "5hlaukCirnXE4W_RSLJFOZN47Z5RiHnacXzdFp_70cLgiJUGR6cSb_HaFftkzi0-" ident = expected.Work
if m.Root != "/" || m.Target != "/work" || if m.Root != "/" || m.Target != "/work" ||
m.Source != "overlay" || m.FsType != "overlay" { m.Source != "overlay" || m.FsType != "overlay" {
log.Fatal("unexpected work mount entry") log.Fatal("unexpected work mount entry")

View File

@@ -3,7 +3,6 @@ package pkg
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"context"
"crypto/sha512" "crypto/sha512"
"encoding/binary" "encoding/binary"
"errors" "errors"
@@ -11,6 +10,7 @@ import (
"io" "io"
"slices" "slices"
"strconv" "strconv"
"sync"
"syscall" "syscall"
"unique" "unique"
"unsafe" "unsafe"
@@ -39,22 +39,48 @@ func panicToError(errP *error) {
} }
} }
// irCache implements [IRCache].
type irCache struct {
// Artifact to [unique.Handle] of identifier cache.
artifact sync.Map
// Identifier free list, must not be accessed directly.
identPool sync.Pool
}
// zeroIRCache returns the initialised value of irCache.
func zeroIRCache() irCache {
return irCache{
identPool: sync.Pool{New: func() any { return new(extIdent) }},
}
}
// IRCache provides memory management and caching primitives for IR and
// identifier operations against [Artifact] implementations.
//
// The zero value is not safe for use.
type IRCache struct{ irCache }
// NewIR returns the address of a new [IRCache].
func NewIR() *IRCache {
return &IRCache{zeroIRCache()}
}
// IContext is passed to [Artifact.Params] and provides methods for writing // IContext is passed to [Artifact.Params] and provides methods for writing
// values to the IR writer. It does not expose the underlying [io.Writer]. // values to the IR writer. It does not expose the underlying [io.Writer].
// //
// IContext is valid until [Artifact.Params] returns. // IContext is valid until [Artifact.Params] returns.
type IContext struct { type IContext struct {
// Address of underlying [Cache], should be zeroed or made unusable after // Address of underlying irCache, should be zeroed or made unusable after
// [Artifact.Params] returns and must not be exposed directly. // [Artifact.Params] returns and must not be exposed directly.
cache *Cache ic *irCache
// Written to by various methods, should be zeroed after [Artifact.Params] // Written to by various methods, should be zeroed after [Artifact.Params]
// returns and must not be exposed directly. // returns and must not be exposed directly.
w io.Writer w io.Writer
// Optional [Artifact] to cureRes cache, replaces [IRKindIdent] with
// checksum values if non-nil.
inputs map[Artifact]cureRes
} }
// Unwrap returns the underlying [context.Context].
func (i *IContext) Unwrap() context.Context { return i.cache.ctx }
// irZero is a zero IR word. // irZero is a zero IR word.
var irZero [wordSize]byte var irZero [wordSize]byte
@@ -136,11 +162,19 @@ func (i *IContext) mustWrite(p []byte) {
// WriteIdent is not defined for an [Artifact] not part of the slice returned by // WriteIdent is not defined for an [Artifact] not part of the slice returned by
// [Artifact.Dependencies]. // [Artifact.Dependencies].
func (i *IContext) WriteIdent(a Artifact) { func (i *IContext) WriteIdent(a Artifact) {
buf := i.cache.getIdentBuf() buf := i.ic.getIdentBuf()
defer i.cache.putIdentBuf(buf) defer i.ic.putIdentBuf(buf)
IRKindIdent.encodeHeader(0).put(buf[:]) IRKindIdent.encodeHeader(0).put(buf[:])
*(*ID)(buf[wordSize:]) = i.cache.Ident(a).Value() if i.inputs != nil {
res, ok := i.inputs[a]
if !ok {
panic(InvalidLookupError(i.ic.Ident(a).Value()))
}
*(*ID)(buf[wordSize:]) = res.checksum.Value()
} else {
*(*ID)(buf[wordSize:]) = i.ic.Ident(a).Value()
}
i.mustWrite(buf[:]) i.mustWrite(buf[:])
} }
@@ -183,20 +217,45 @@ func (i *IContext) WriteString(s string) {
// Encode writes a deterministic, efficient representation of a to w and returns // Encode writes a deterministic, efficient representation of a to w and returns
// the first non-nil error encountered while writing to w. // the first non-nil error encountered while writing to w.
func (c *Cache) Encode(w io.Writer, a Artifact) (err error) { func (ic *irCache) Encode(w io.Writer, a Artifact) (err error) {
return ic.encode(w, a, nil)
}
// encode implements Encode but replaces identifiers with their cured checksums
// for a non-nil ident. Caller must acquire Cache.identMu.
func (ic *irCache) encode(
w io.Writer,
a Artifact,
inputs map[Artifact]cureRes,
) (err error) {
deps := a.Dependencies() deps := a.Dependencies()
idents := make([]*extIdent, len(deps)) idents := make([]*extIdent, len(deps))
for i, d := range deps { if inputs == nil {
dbuf, did := c.unsafeIdent(d, true) for i, d := range deps {
if dbuf == nil { dbuf, did := ic.unsafeIdent(d, true)
dbuf = c.getIdentBuf() if dbuf == nil {
binary.LittleEndian.PutUint64(dbuf[:], uint64(d.Kind())) dbuf = ic.getIdentBuf()
*(*ID)(dbuf[wordSize:]) = did.Value() binary.LittleEndian.PutUint64(dbuf[:], uint64(d.Kind()))
} else { *(*ID)(dbuf[wordSize:]) = did.Value()
c.storeIdent(d, dbuf) } else {
ic.storeIdent(d, dbuf)
}
defer ic.putIdentBuf(dbuf)
idents[i] = dbuf
}
} else {
for i, d := range deps {
res, ok := inputs[d]
if !ok {
return InvalidLookupError(ic.Ident(d).Value())
}
dbuf := ic.getIdentBuf()
binary.LittleEndian.PutUint64(dbuf[:], uint64(d.Kind()))
*(*ID)(dbuf[wordSize:]) = res.checksum.Value()
defer ic.putIdentBuf(dbuf)
idents[i] = dbuf
} }
defer c.putIdentBuf(dbuf)
idents[i] = dbuf
} }
slices.SortFunc(idents, func(a, b *extIdent) int { slices.SortFunc(idents, func(a, b *extIdent) int {
return bytes.Compare(a[:], b[:]) return bytes.Compare(a[:], b[:])
@@ -221,10 +280,10 @@ func (c *Cache) Encode(w io.Writer, a Artifact) (err error) {
} }
func() { func() {
i := IContext{c, w} i := IContext{ic, w, inputs}
defer panicToError(&err) defer panicToError(&err)
defer func() { i.cache, i.w = nil, nil }() defer func() { i.ic, i.w = nil, nil }()
a.Params(&i) a.Params(&i)
}() }()
@@ -233,7 +292,7 @@ func (c *Cache) Encode(w io.Writer, a Artifact) (err error) {
} }
var f IREndFlag var f IREndFlag
kcBuf := c.getIdentBuf() kcBuf := ic.getIdentBuf()
sz := wordSize sz := wordSize
if kc, ok := a.(KnownChecksum); ok { if kc, ok := a.(KnownChecksum); ok {
f |= IREndKnownChecksum f |= IREndKnownChecksum
@@ -243,13 +302,13 @@ func (c *Cache) Encode(w io.Writer, a Artifact) (err error) {
IRKindEnd.encodeHeader(uint32(f)).put(kcBuf[:]) IRKindEnd.encodeHeader(uint32(f)).put(kcBuf[:])
_, err = w.Write(kcBuf[:sz]) _, err = w.Write(kcBuf[:sz])
c.putIdentBuf(kcBuf) ic.putIdentBuf(kcBuf)
return return
} }
// encodeAll implements EncodeAll by recursively encoding dependencies and // encodeAll implements EncodeAll by recursively encoding dependencies and
// performs deduplication by value via the encoded map. // performs deduplication by value via the encoded map.
func (c *Cache) encodeAll( func (ic *irCache) encodeAll(
w io.Writer, w io.Writer,
a Artifact, a Artifact,
encoded map[Artifact]struct{}, encoded map[Artifact]struct{},
@@ -259,13 +318,13 @@ func (c *Cache) encodeAll(
} }
for _, d := range a.Dependencies() { for _, d := range a.Dependencies() {
if err = c.encodeAll(w, d, encoded); err != nil { if err = ic.encodeAll(w, d, encoded); err != nil {
return return
} }
} }
encoded[a] = struct{}{} encoded[a] = struct{}{}
return c.Encode(w, a) return ic.Encode(w, a)
} }
// EncodeAll writes a self-describing IR stream of a to w and returns the first // EncodeAll writes a self-describing IR stream of a to w and returns the first
@@ -283,8 +342,8 @@ func (c *Cache) encodeAll(
// the ident cache, nor does it contribute identifiers it computes back to the // the ident cache, nor does it contribute identifiers it computes back to the
// ident cache. Because of this, multiple invocations of EncodeAll will have // ident cache. Because of this, multiple invocations of EncodeAll will have
// similar cost and does not amortise when combined with a call to Cure. // similar cost and does not amortise when combined with a call to Cure.
func (c *Cache) EncodeAll(w io.Writer, a Artifact) error { func (ic *irCache) EncodeAll(w io.Writer, a Artifact) error {
return c.encodeAll(w, a, make(map[Artifact]struct{})) return ic.encodeAll(w, a, make(map[Artifact]struct{}))
} }
// ErrRemainingIR is returned for a [IRReadFunc] that failed to call // ErrRemainingIR is returned for a [IRReadFunc] that failed to call
@@ -409,6 +468,12 @@ func (e InvalidKindError) Error() string {
// register is not safe for concurrent use. register must not be called after // register is not safe for concurrent use. register must not be called after
// the first instance of [Cache] has been opened. // the first instance of [Cache] has been opened.
func register(k Kind, f IRReadFunc) { func register(k Kind, f IRReadFunc) {
openMu.Lock()
defer openMu.Unlock()
if opened {
panic("attempting to register after open")
}
if _, ok := irArtifact[k]; ok { if _, ok := irArtifact[k]; ok {
panic("attempting to register " + strconv.Itoa(int(k)) + " twice") panic("attempting to register " + strconv.Itoa(int(k)) + " twice")
} }

View File

@@ -3,6 +3,7 @@ package pkg_test
import ( import (
"bytes" "bytes"
"io" "io"
"io/fs"
"reflect" "reflect"
"testing" "testing"
@@ -38,7 +39,7 @@ func TestIRRoundtrip(t *testing.T) {
)}, )},
{"exec offline", pkg.NewExec( {"exec offline", pkg.NewExec(
"exec-offline", nil, 0, false, "exec-offline", "", nil, 0, false, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1"}, []string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
@@ -58,9 +59,9 @@ func TestIRRoundtrip(t *testing.T) {
)}, )},
{"exec net", pkg.NewExec( {"exec net", pkg.NewExec(
"exec-net", "exec-net", "",
(*pkg.Checksum)(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))), (*pkg.Checksum)(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))),
0, false, 0, false, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1"}, []string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
@@ -79,6 +80,28 @@ func TestIRRoundtrip(t *testing.T) {
)), )),
)}, )},
{"exec measured", pkg.NewExec(
"exec-measured", "",
(*pkg.Checksum)(bytes.Repeat([]byte{0xfd}, len(pkg.Checksum{}))),
0, false, false,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"),
[]string{"testtool", "measured"},
pkg.MustPath("/file", false, pkg.NewFile("file", []byte(
"stub file",
))), pkg.MustPath("/.hakurei", false, pkg.NewHTTPGetTar(
nil, "file:///hakurei.tar",
pkg.Checksum(bytes.Repeat([]byte{0xfd}, len(pkg.Checksum{}))),
pkg.TarUncompressed,
)), pkg.MustPath("/opt", false, pkg.NewHTTPGetTar(
nil, "file:///testtool.tar.gz",
pkg.Checksum(bytes.Repeat([]byte{0xfd}, len(pkg.Checksum{}))),
pkg.TarGzip,
)),
)},
{"file anonymous", pkg.NewFile("", []byte{0})}, {"file anonymous", pkg.NewFile("", []byte{0})},
{"file", pkg.NewFile("stub", []byte("stub"))}, {"file", pkg.NewFile("stub", []byte("stub"))},
} }
@@ -105,9 +128,13 @@ func TestIRRoundtrip(t *testing.T) {
if err := <-done; err != nil { if err := <-done; err != nil {
t.Fatalf("EncodeAll: error = %v", err) t.Fatalf("EncodeAll: error = %v", err)
} }
}, pkg.MustDecode( }, expectsFS{
"E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C", ".": {Mode: fs.ModeDir | 0700},
), "checksum": {Mode: fs.ModeDir | 0700},
"identifier": {Mode: fs.ModeDir | 0700},
"substitute": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
},
} }
} }
checkWithCache(t, testCasesCache) checkWithCache(t, testCasesCache)

View File

@@ -3,12 +3,12 @@ package pkg_test
import ( import (
"crypto/sha512" "crypto/sha512"
"io" "io"
"io/fs"
"net/http" "net/http"
"reflect" "reflect"
"testing" "testing"
"testing/fstest" "testing/fstest"
"unique" "unique"
"unsafe"
"hakurei.app/check" "hakurei.app/check"
"hakurei.app/internal/pkg" "hakurei.app/internal/pkg"
@@ -33,20 +33,14 @@ func TestHTTPGet(t *testing.T) {
checkWithCache(t, []cacheTestCase{ checkWithCache(t, []cacheTestCase{
{"direct", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) { {"direct", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
var r pkg.RContext r := newRContext(t, c)
rCacheVal := reflect.ValueOf(&r).Elem().FieldByName("cache")
reflect.NewAt(
rCacheVal.Type(),
unsafe.Pointer(rCacheVal.UnsafeAddr()),
).Elem().Set(reflect.ValueOf(c))
f := pkg.NewHTTPGet( f := pkg.NewHTTPGet(
&client, &client,
"file:///testdata", "file:///testdata",
testdataChecksum.Value(), testdataChecksum.Value(),
) )
var got []byte var got []byte
if rc, err := f.Cure(&r); err != nil { if rc, err := f.Cure(r); err != nil {
t.Fatalf("Cure: error = %v", err) t.Fatalf("Cure: error = %v", err)
} else if got, err = io.ReadAll(rc); err != nil { } else if got, err = io.ReadAll(rc); err != nil {
t.Fatalf("ReadAll: error = %v", err) t.Fatalf("ReadAll: error = %v", err)
@@ -65,7 +59,7 @@ func TestHTTPGet(t *testing.T) {
wantErrMismatch := &pkg.ChecksumMismatchError{ wantErrMismatch := &pkg.ChecksumMismatchError{
Got: testdataChecksum.Value(), Got: testdataChecksum.Value(),
} }
if rc, err := f.Cure(&r); err != nil { if rc, err := f.Cure(r); err != nil {
t.Fatalf("Cure: error = %v", err) t.Fatalf("Cure: error = %v", err)
} else if got, err = io.ReadAll(rc); err != nil { } else if got, err = io.ReadAll(rc); err != nil {
t.Fatalf("ReadAll: error = %v", err) t.Fatalf("ReadAll: error = %v", err)
@@ -76,7 +70,7 @@ func TestHTTPGet(t *testing.T) {
} }
// check fallback validation // check fallback validation
if rc, err := f.Cure(&r); err != nil { if rc, err := f.Cure(r); err != nil {
t.Fatalf("Cure: error = %v", err) t.Fatalf("Cure: error = %v", err)
} else if err = rc.Close(); !reflect.DeepEqual(err, wantErrMismatch) { } else if err = rc.Close(); !reflect.DeepEqual(err, wantErrMismatch) {
t.Fatalf("Close: error = %#v, want %#v", err, wantErrMismatch) t.Fatalf("Close: error = %#v, want %#v", err, wantErrMismatch)
@@ -89,18 +83,19 @@ func TestHTTPGet(t *testing.T) {
pkg.Checksum{}, pkg.Checksum{},
) )
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound) wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
if _, err := f.Cure(&r); !reflect.DeepEqual(err, wantErrNotFound) { if _, err := f.Cure(r); !reflect.DeepEqual(err, wantErrNotFound) {
t.Fatalf("Cure: error = %#v, want %#v", err, wantErrNotFound) t.Fatalf("Cure: error = %#v, want %#v", err, wantErrNotFound)
} }
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")}, }, expectsFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"identifier": {Mode: fs.ModeDir | 0700},
"substitute": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"cure", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) { {"cure", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
var r pkg.RContext r := newRContext(t, c)
rCacheVal := reflect.ValueOf(&r).Elem().FieldByName("cache")
reflect.NewAt(
rCacheVal.Type(),
unsafe.Pointer(rCacheVal.UnsafeAddr()),
).Elem().Set(reflect.ValueOf(c))
f := pkg.NewHTTPGet( f := pkg.NewHTTPGet(
&client, &client,
@@ -120,7 +115,7 @@ func TestHTTPGet(t *testing.T) {
} }
var got []byte var got []byte
if rc, err := f.Cure(&r); err != nil { if rc, err := f.Cure(r); err != nil {
t.Fatalf("Cure: error = %v", err) t.Fatalf("Cure: error = %v", err)
} else if got, err = io.ReadAll(rc); err != nil { } else if got, err = io.ReadAll(rc); err != nil {
t.Fatalf("ReadAll: error = %v", err) t.Fatalf("ReadAll: error = %v", err)
@@ -136,7 +131,7 @@ func TestHTTPGet(t *testing.T) {
"file:///testdata", "file:///testdata",
testdataChecksum.Value(), testdataChecksum.Value(),
) )
if rc, err := f.Cure(&r); err != nil { if rc, err := f.Cure(r); err != nil {
t.Fatalf("Cure: error = %v", err) t.Fatalf("Cure: error = %v", err)
} else if got, err = io.ReadAll(rc); err != nil { } else if got, err = io.ReadAll(rc); err != nil {
t.Fatalf("ReadAll: error = %v", err) t.Fatalf("ReadAll: error = %v", err)
@@ -156,6 +151,18 @@ func TestHTTPGet(t *testing.T) {
if _, _, err := c.Cure(f); !reflect.DeepEqual(err, wantErrNotFound) { if _, _, err := c.Cure(f); !reflect.DeepEqual(err, wantErrNotFound) {
t.Fatalf("Pathname: error = %#v, want %#v", err, wantErrNotFound) t.Fatalf("Pathname: error = %#v, want %#v", err, wantErrNotFound)
} }
}, pkg.MustDecode("L_0RFHpr9JUS4Zp14rz2dESSRvfLzpvqsLhR1-YjQt8hYlmEdVl7vI3_-v8UNPKs")}, }, expectsFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU": {Mode: 0400, Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/oM-2pUlk-mOxK1t3aMWZer69UdOQlAXiAgMrpZ1476VoOqpYVP1aGFS9_HYy-D8_": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
"substitute": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
}) })
} }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -43,8 +43,7 @@ var _ fmt.Stringer = new(tarArtifactNamed)
func (a *tarArtifactNamed) String() string { return a.name + "-unpack" } func (a *tarArtifactNamed) String() string { return a.name + "-unpack" }
// NewTar returns a new [Artifact] backed by the supplied [Artifact] and // NewTar returns a new [Artifact] backed by the supplied [Artifact] and
// compression method. The source [Artifact] must be compatible with // compression method. The source [Artifact] must be a [FileArtifact].
// [TContext.Open].
func NewTar(a Artifact, compression uint32) Artifact { func NewTar(a Artifact, compression uint32) Artifact {
ta := tarArtifact{a, compression} ta := tarArtifact{a, compression}
if s, ok := a.(fmt.Stringer); ok { if s, ok := a.(fmt.Stringer); ok {

View File

@@ -20,6 +20,31 @@ import (
func TestTar(t *testing.T) { func TestTar(t *testing.T) {
t.Parallel() t.Parallel()
want := expectsFS{
".": {Mode: fs.ModeDir | 0500},
"checksum": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"identifier": {Mode: fs.ModeDir | 0500},
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"work": {Mode: fs.ModeDir | 0500},
}
wantEncode := pkg.Encode(want.hash())
wantExpand := expectsFS{
".": {Mode: fs.ModeDir | 0500},
"libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
}
wantExpandEncode := pkg.Encode(wantExpand.hash())
checkWithCache(t, []cacheTestCase{ checkWithCache(t, []cacheTestCase{
{"http", 0, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) { {"http", 0, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
checkTarHTTP(t, base, c, fstest.MapFS{ checkTarHTTP(t, base, c, fstest.MapFS{
@@ -37,10 +62,32 @@ func TestTar(t *testing.T) {
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")}, "identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"work": {Mode: fs.ModeDir | 0700}, "work": {Mode: fs.ModeDir | 0700},
}, pkg.MustDecode( }, want)
"cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM", }, expectsFS{
)) ".": {Mode: fs.ModeDir | 0700},
}, pkg.MustDecode("NQTlc466JmSVLIyWklm_u8_g95jEEb98PxJU-kjwxLpfdjwMWJq0G8ze9R4Vo1Vu")},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/checksum": {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
"checksum/" + wantEncode + "/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"checksum/" + wantEncode + "/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/identifier": {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"checksum/" + wantEncode + "/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"checksum/" + wantEncode + "/work": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantEncode)},
"identifier/rg7F1D5hwv6o4xctjD5zDq4i5MD0mArTsUIWfhUbik8xC6Bsyt3mjXXOm3goojTz": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantEncode)},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"http expand", 0, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) { {"http expand", 0, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
checkTarHTTP(t, base, c, fstest.MapFS{ checkTarHTTP(t, base, c, fstest.MapFS{
@@ -48,10 +95,23 @@ func TestTar(t *testing.T) {
"lib": {Mode: fs.ModeDir | 0700}, "lib": {Mode: fs.ModeDir | 0700},
"lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")}, "lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
}, pkg.MustDecode( }, wantExpand)
"CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN", }, expectsFS{
)) ".": {Mode: fs.ModeDir | 0700},
}, pkg.MustDecode("hSoSSgCYTNonX3Q8FjvjD1fBl-E-BQyA6OTXro2OadXqbST4tZ-akGXszdeqphRe")},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantExpandEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantExpandEncode + "/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantExpandEncode)},
"identifier/_v1blm2h-_KA-dVaawdpLas6MjHc6rbhhFS8JWwx8iJxZGUu8EBbRrhr5AaZ9PJL": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantExpandEncode)},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
}) })
} }
@@ -60,7 +120,7 @@ func checkTarHTTP(
base *check.Absolute, base *check.Absolute,
c *pkg.Cache, c *pkg.Cache,
testdataFsys fs.FS, testdataFsys fs.FS,
wantChecksum pkg.Checksum, want expectsKnown,
) { ) {
var testdata string var testdata string
{ {
@@ -194,24 +254,24 @@ func checkTarHTTP(
{"file", a, base.Append( {"file", a, base.Append(
"identifier", "identifier",
pkg.Encode(wantIdent), pkg.Encode(wantIdent),
), wantChecksum, nil}, ), want, nil},
{"directory", pkg.NewTar( {"directory", pkg.NewTar(
&tarDir, &tarDir,
pkg.TarGzip, pkg.TarGzip,
), ignorePathname, wantChecksum, nil}, ), ignorePathname, want, nil},
{"multiple entries", pkg.NewTar( {"multiple entries", pkg.NewTar(
&tarDirMulti, &tarDirMulti,
pkg.TarGzip, pkg.TarGzip,
), nil, pkg.Checksum{}, errors.New( ), nil, nil, errors.New(
"input directory does not contain a single regular file", "input directory does not contain a single regular file",
)}, )},
{"bad type", pkg.NewTar( {"bad type", pkg.NewTar(
&tarDirType, &tarDirType,
pkg.TarGzip, pkg.TarGzip,
), nil, pkg.Checksum{}, errors.New( ), nil, nil, errors.New(
"input directory does not contain a single regular file", "input directory does not contain a single regular file",
)}, )},
@@ -221,6 +281,6 @@ func checkTarHTTP(
cure: func(t *pkg.TContext) error { cure: func(t *pkg.TContext) error {
return stub.UniqueError(0xcafe) return stub.UniqueError(0xcafe)
}, },
}, pkg.TarGzip), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)}, }, pkg.TarGzip), nil, nil, stub.UniqueError(0xcafe)},
}) })
} }

View File

@@ -7,10 +7,10 @@ func (t Toolchain) newAttr() (pkg.Artifact, string) {
version = "2.5.2" version = "2.5.2"
checksum = "YWEphrz6vg1sUMmHHVr1CRo53pFXRhq_pjN-AlG8UgwZK1y6m7zuDhxqJhD0SV0l" checksum = "YWEphrz6vg1sUMmHHVr1CRo53pFXRhq_pjN-AlG8UgwZK1y6m7zuDhxqJhD0SV0l"
) )
return t.NewPackage("attr", version, pkg.NewHTTPGetTar( return t.NewPackage("attr", version, newTar(
nil, "https://download.savannah.nongnu.org/releases/attr/"+ "https://download.savannah.nongnu.org/releases/attr/"+
"attr-"+version+".tar.gz", "attr-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
Patches: []KV{ Patches: []KV{
@@ -81,10 +81,10 @@ func (t Toolchain) newACL() (pkg.Artifact, string) {
version = "2.3.2" version = "2.3.2"
checksum = "-fY5nwH4K8ZHBCRXrzLdguPkqjKI6WIiGu4dBtrZ1o0t6AIU73w8wwJz_UyjIS0P" checksum = "-fY5nwH4K8ZHBCRXrzLdguPkqjKI6WIiGu4dBtrZ1o0t6AIU73w8wwJz_UyjIS0P"
) )
return t.NewPackage("acl", version, pkg.NewHTTPGetTar( return t.NewPackage("acl", version, newTar(
nil, "https://download.savannah.nongnu.org/releases/acl/"+ "https://download.savannah.nongnu.org/releases/acl/"+
"acl-"+version+".tar.gz", "acl-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, &MakeHelper{ ), nil, &MakeHelper{
// makes assumptions about uid_map/gid_map // makes assumptions about uid_map/gid_map

View File

@@ -6,6 +6,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
"runtime"
"strconv" "strconv"
"sync" "sync"
@@ -16,9 +17,7 @@ import (
type PArtifact int type PArtifact int
const ( const (
LLVMCompilerRT PArtifact = iota LLVM PArtifact = iota
LLVMRuntimes
LLVMClang
// EarlyInit is the Rosa OS init program. // EarlyInit is the Rosa OS init program.
EarlyInit EarlyInit
@@ -57,6 +56,8 @@ const (
Fakeroot Fakeroot
Findutils Findutils
Flex Flex
FontUtil
Freetype
Fuse Fuse
GMP GMP
GLib GLib
@@ -64,6 +65,7 @@ const (
GenInitCPIO GenInitCPIO
Gettext Gettext
Git Git
Glslang
GnuTLS GnuTLS
Go Go
Gperf Gperf
@@ -71,31 +73,63 @@ const (
Gzip Gzip
Hakurei Hakurei
HakureiDist HakureiDist
Hwdata
IPTables IPTables
Kmod Kmod
LIT
LibX11
LibXau LibXau
LibXdmcp
LibXext
LibXfixes
LibXfont2
LibXrandr
LibXrender
LibXxf86vm
Libarchive
Libbsd Libbsd
Libcap Libcap
Libconfig
LibdisplayInfo
Libdrm
Libepoxy
Libev Libev
Libexpat Libexpat
Libffi Libffi
Libfontenc
Libgd Libgd
Libglvnd
Libiconv Libiconv
Libmd Libmd
Libmnl Libmnl
Libnftnl Libnftnl
Libpciaccess
Libpng
Libpsl Libpsl
Libseccomp Libseccomp
Libtasn1 Libtasn1
Libtirpc
Libtool Libtool
Libucontext Libucontext
Libunistring Libunistring
Libva
LibxcbRenderUtil
LibxcbUtil
LibxcbUtilImage
LibxcbUtilKeysyms
LibxcbUtilWM
Libxcvt
Libxkbfile
Libxml2 Libxml2
Libxshmfence
Libxslt Libxslt
Libxtrans
LMSensors
M4 M4
MPC MPC
MPFR MPFR
Make Make
Mesa
Meson Meson
Mksh Mksh
MuslFts MuslFts
@@ -117,24 +151,40 @@ const (
PerlPodParser PerlPodParser
PerlSGMLS PerlSGMLS
PerlTermReadKey PerlTermReadKey
PerlTestCmd
PerlTextCharWidth PerlTextCharWidth
PerlTextWrapI18N PerlTextWrapI18N
PerlUnicodeGCString PerlUnicodeLineBreak
PerlYAMLTiny PerlYAMLTiny
Pixman
PkgConfig PkgConfig
Procps Procps
Python Python
PythonFlitCore
PythonHatchling
PythonIniConfig PythonIniConfig
PythonMako
PythonMarkupSafe
PythonPackaging PythonPackaging
PythonPathspec
PythonPluggy PythonPluggy
PythonPyTest PythonPyTest
PythonPyYAML
PythonPycparser
PythonPygments PythonPygments
PythonSetuptools
PythonSetuptoolsSCM
PythonTroveClassifiers
PythonVCSVersioning
PythonWheel
QEMU QEMU
Rdfind Rdfind
Readline Readline
Rsync Rsync
Sed Sed
Setuptools SPIRVHeaders
SPIRVLLVMTranslator
SPIRVTools
SquashfsTools SquashfsTools
Strace Strace
TamaGo TamaGo
@@ -144,23 +194,44 @@ const (
toyboxEarly toyboxEarly
Unzip Unzip
UtilLinux UtilLinux
VIM
Wayland Wayland
WaylandProtocols WaylandProtocols
XCB XCB
XCBProto XCBProto
Xproto XDGDBusProxy
XZ XZ
Xkbcomp
XkeyboardConfig
XorgProto
Xserver
Zlib Zlib
Zstd Zstd
// PresetUnexportedStart is the first unexported preset. // PresetUnexportedStart is the first unexported preset.
PresetUnexportedStart PresetUnexportedStart
buildcatrust = iota - 1 stage0Dist = iota - 1
llvmSource
// earlyCompilerRT is an early, standalone compiler-rt installation for the
// standalone runtimes build.
//
// earlyCompilerRT must only be loaded by [LLVM].
earlyCompilerRT
// earlyRuntimes is an early, standalone installation of LLVM runtimes to
// work around the cmake build system leaking the system LLVM installation
// when invoking the newly built toolchain.
//
// earlyRuntimes must only be loaded by [LLVM].
earlyRuntimes
buildcatrust
utilMacros utilMacros
// Musl is a standalone libc that does not depend on the toolchain. // Musl is a standalone libc that does not depend on the toolchain.
Musl Musl
// muslHeaders is a system installation of [Musl] headers.
muslHeaders
// gcc is a hacked-to-pieces GCC toolchain meant for use in intermediate // gcc is a hacked-to-pieces GCC toolchain meant for use in intermediate
// stages only. This preset and its direct output must never be exposed. // stages only. This preset and its direct output must never be exposed.
@@ -305,15 +376,40 @@ var (
} }
// artifactsOnce is for lazy initialisation of artifacts. // artifactsOnce is for lazy initialisation of artifacts.
artifactsOnce [_toolchainEnd][len(artifactsM)]sync.Once artifactsOnce [_toolchainEnd][len(artifactsM)]sync.Once
// arch is the target architecture.
arch = runtime.GOARCH
// presetOpts globally modifies behaviour of presets.
presetOpts int
) )
const (
// OptSkipCheck skips running all test suites.
OptSkipCheck = 1 << iota
// OptLLVMNoLTO disables LTO in all [LLVM] stages.
OptLLVMNoLTO
)
// Arch returns the target architecture.
func Arch() string { return arch }
// Flags returns the current preset flags
func Flags() int { return presetOpts }
// zero zeros the value pointed to by p. // zero zeros the value pointed to by p.
func zero[T any](p *T) { var v T; *p = v } func zero[T any](p *T) { var v T; *p = v }
// DropCaches arranges for all cached [pkg.Artifact] to be freed some time after // DropCaches arranges for all cached [pkg.Artifact] to be freed some time after
// it returns. Must not be used concurrently with any other function from this // it returns. Must not be used concurrently with any other function from this
// package. // package.
func DropCaches() { func DropCaches(targetArch string, flags int) {
if targetArch == "" {
targetArch = runtime.GOARCH
}
arch = targetArch
presetOpts = flags
zero(&artifacts) zero(&artifacts)
zero(&artifactsOnce) zero(&artifactsOnce)
} }

View File

@@ -20,13 +20,16 @@ func TestLoad(t *testing.T) {
} }
func BenchmarkAll(b *testing.B) { func BenchmarkAll(b *testing.B) {
arch, flags := rosa.Arch(), rosa.Flags()
b.Cleanup(func() { rosa.DropCaches(arch, flags) })
for b.Loop() { for b.Loop() {
for i := range rosa.PresetEnd { for i := range rosa.PresetEnd {
rosa.Std.Load(rosa.PArtifact(i)) rosa.Std.Load(rosa.PArtifact(i))
} }
b.StopTimer() b.StopTimer()
rosa.DropCaches() rosa.DropCaches("", 0)
b.StartTimer() b.StartTimer()
} }
} }

View File

@@ -7,10 +7,10 @@ func (t Toolchain) newArgpStandalone() (pkg.Artifact, string) {
version = "1.3" version = "1.3"
checksum = "vtW0VyO2pJ-hPyYmDI2zwSLS8QL0sPAUKC1t3zNYbwN2TmsaE-fADhaVtNd3eNFl" checksum = "vtW0VyO2pJ-hPyYmDI2zwSLS8QL0sPAUKC1t3zNYbwN2TmsaE-fADhaVtNd3eNFl"
) )
return t.NewPackage("argp-standalone", version, pkg.NewHTTPGetTar( return t.NewPackage("argp-standalone", version, newTar(
nil, "http://www.lysator.liu.se/~nisse/misc/"+ "http://www.lysator.liu.se/~nisse/misc/"+
"argp-standalone-"+version+".tar.gz", "argp-standalone-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
Env: []string{ Env: []string{

View File

@@ -5,7 +5,6 @@ import (
"io" "io"
"net/http" "net/http"
"os" "os"
"runtime"
"time" "time"
"hakurei.app/fhs" "hakurei.app/fhs"
@@ -88,7 +87,7 @@ func (a busyboxBin) Cure(t *pkg.TContext) (err error) {
// the https://busybox.net/downloads/binaries/ binary release. // the https://busybox.net/downloads/binaries/ binary release.
func newBusyboxBin() pkg.Artifact { func newBusyboxBin() pkg.Artifact {
var version, url, checksum string var version, url, checksum string
switch runtime.GOARCH { switch arch {
case "amd64": case "amd64":
version = "1.35.0" version = "1.35.0"
url = "https://busybox.net/downloads/binaries/" + url = "https://busybox.net/downloads/binaries/" +
@@ -101,11 +100,11 @@ func newBusyboxBin() pkg.Artifact {
checksum = "npJjBO7iwhjW6Kx2aXeSxf8kXhVgTCDChOZTTsI8ZfFfa3tbsklxRiidZQdrVERg" checksum = "npJjBO7iwhjW6Kx2aXeSxf8kXhVgTCDChOZTTsI8ZfFfa3tbsklxRiidZQdrVERg"
default: default:
panic("unsupported target " + runtime.GOARCH) panic("unsupported target " + arch)
} }
return pkg.NewExec( return pkg.NewExec(
"busybox-bin-"+version, nil, pkg.ExecTimeoutMax, false, "busybox-bin-"+version, arch, nil, pkg.ExecTimeoutMax, false, false,
fhs.AbsRoot, []string{ fhs.AbsRoot, []string{
"PATH=/system/bin", "PATH=/system/bin",
}, },

View File

@@ -7,9 +7,9 @@ func (t Toolchain) newBzip2() (pkg.Artifact, string) {
version = "1.0.8" version = "1.0.8"
checksum = "cTLykcco7boom-s05H1JVsQi1AtChYL84nXkg_92Dm1Xt94Ob_qlMg_-NSguIK-c" checksum = "cTLykcco7boom-s05H1JVsQi1AtChYL84nXkg_92Dm1Xt94Ob_qlMg_-NSguIK-c"
) )
return t.NewPackage("bzip2", version, pkg.NewHTTPGetTar( return t.NewPackage("bzip2", version, newTar(
nil, "https://sourceware.org/pub/bzip2/bzip2-"+version+".tar.gz", "https://sourceware.org/pub/bzip2/bzip2-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
Writable: true, Writable: true,

View File

@@ -10,13 +10,14 @@ import (
func (t Toolchain) newCMake() (pkg.Artifact, string) { func (t Toolchain) newCMake() (pkg.Artifact, string) {
const ( const (
version = "4.3.1" version = "4.3.2"
checksum = "RHpzZiM1kJ5bwLjo9CpXSeHJJg3hTtV9QxBYpQoYwKFtRh5YhGWpShrqZCSOzQN6" checksum = "6QylwRVKletndTSkZTV2YBRwgd_9rUVgav_QW23HpjUgV21AVYZOUOal8tdBDmO7"
) )
return t.NewPackage("cmake", version, pkg.NewHTTPGetTar( return t.NewPackage("cmake", version, newFromGitHubRelease(
nil, "https://github.com/Kitware/CMake/releases/download/"+ "Kitware/CMake",
"v"+version+"/cmake-"+version+".tar.gz", "v"+version,
mustDecode(checksum), "cmake-"+version+".tar.gz",
checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
// test suite expects writable source tree // test suite expects writable source tree
@@ -90,7 +91,7 @@ index 2ead810437..f85cbb8b1c 100644
ConfigureName: "/usr/src/cmake/bootstrap", ConfigureName: "/usr/src/cmake/bootstrap",
Configure: []KV{ Configure: []KV{
{"prefix", "/system"}, {"prefix", "/system"},
{"parallel", `"$(nproc)"`}, {"parallel", jobsE},
{"--"}, {"--"},
{"-DCMAKE_USE_OPENSSL", "OFF"}, {"-DCMAKE_USE_OPENSSL", "OFF"},
{"-DCMake_TEST_NO_NETWORK", "ON"}, {"-DCMake_TEST_NO_NETWORK", "ON"},
@@ -118,31 +119,27 @@ func init() {
// CMakeHelper is the [CMake] build system helper. // CMakeHelper is the [CMake] build system helper.
type CMakeHelper struct { type CMakeHelper struct {
// Joined with name with a dash if non-empty.
Variant string
// Path elements joined with source. // Path elements joined with source.
Append []string Append []string
// Value of CMAKE_BUILD_TYPE. The zero value is equivalent to "Release".
BuildType string
// CMake CACHE entries. // CMake CACHE entries.
Cache []KV Cache []KV
// Runs after install. // Runs after install.
Script string Script string
// Replaces the default test command.
Test string
// Whether to skip running tests.
SkipTest bool
// Whether to generate Makefile instead. // Whether to generate Makefile instead.
Make bool Make bool
} }
var _ Helper = new(CMakeHelper) var _ Helper = new(CMakeHelper)
// name returns its arguments and an optional variant string joined with '-'.
func (attr *CMakeHelper) name(name, version string) string {
if attr != nil && attr.Variant != "" {
name += "-" + attr.Variant
}
return name + "-" + version
}
// extra returns a hardcoded slice of [CMake] and [Ninja]. // extra returns a hardcoded slice of [CMake] and [Ninja].
func (attr *CMakeHelper) extra(int) P { func (attr *CMakeHelper) extra(int) P {
if attr != nil && attr.Make { if attr != nil && attr.Make {
@@ -169,22 +166,30 @@ func (*CMakeHelper) wantsDir() string { return "/cure/" }
// script generates the cure script. // script generates the cure script.
func (attr *CMakeHelper) script(name string) string { func (attr *CMakeHelper) script(name string) string {
if attr == nil { if attr == nil {
attr = &CMakeHelper{ attr = new(CMakeHelper)
Cache: []KV{
{"CMAKE_BUILD_TYPE", "Release"},
},
}
}
if len(attr.Cache) == 0 {
panic("CACHE must be non-empty")
} }
generate := "Ninja" generate := "Ninja"
jobs := "" test := "ninja " + jobsFlagE + " test"
if attr.Make { if attr.Make {
generate = "'Unix Makefiles'" generate = "'Unix Makefiles'"
jobs += ` "--parallel=$(nproc)"` test = "make " + jobsFlagE + " test"
} }
if attr.Test != "" {
test = attr.Test
}
script := attr.Script
if !attr.SkipTest && presetOpts&OptSkipCheck == 0 {
script += "\n" + test
}
cache := make([]KV, 1, 1+len(attr.Cache))
cache[0] = KV{"CMAKE_BUILD_TYPE", "Release"}
if attr.BuildType != "" {
cache[0][1] = attr.BuildType
}
cache = append(cache, attr.Cache...)
return ` return `
cmake -G ` + generate + ` \ cmake -G ` + generate + ` \
@@ -193,7 +198,7 @@ cmake -G ` + generate + ` \
-DCMAKE_ASM_COMPILER_TARGET="${ROSA_TRIPLE}" \ -DCMAKE_ASM_COMPILER_TARGET="${ROSA_TRIPLE}" \
-DCMAKE_INSTALL_LIBDIR=lib \ -DCMAKE_INSTALL_LIBDIR=lib \
` + strings.Join(slices.Collect(func(yield func(string) bool) { ` + strings.Join(slices.Collect(func(yield func(string) bool) {
for _, v := range attr.Cache { for _, v := range cache {
if !yield("-D" + v[0] + "=" + v[1]) { if !yield("-D" + v[0] + "=" + v[1]) {
return return
} }
@@ -201,7 +206,7 @@ cmake -G ` + generate + ` \
}), " \\\n\t") + ` \ }), " \\\n\t") + ` \
-DCMAKE_INSTALL_PREFIX=/system \ -DCMAKE_INSTALL_PREFIX=/system \
'/usr/src/` + name + `/` + filepath.Join(attr.Append...) + `' '/usr/src/` + name + `/` + filepath.Join(attr.Append...) + `'
cmake --build .` + jobs + ` cmake --build . --parallel=` + jobsE + `
cmake --install . --prefix=/work/system cmake --install . --prefix=/work/system
` + attr.Script ` + script
} }

View File

@@ -7,10 +7,10 @@ func (t Toolchain) newConnman() (pkg.Artifact, string) {
version = "2.0" version = "2.0"
checksum = "MhVTdJOhndnZn2SWd8URKo_Pj7Zvc14tntEbrVOf9L3yVWJvpb3v3Q6104tWJgtW" checksum = "MhVTdJOhndnZn2SWd8URKo_Pj7Zvc14tntEbrVOf9L3yVWJvpb3v3Q6104tWJgtW"
) )
return t.NewPackage("connman", version, pkg.NewHTTPGetTar( return t.NewPackage("connman", version, newTar(
nil, "https://git.kernel.org/pub/scm/network/connman/connman.git/"+ "https://git.kernel.org/pub/scm/network/connman/connman.git/"+
"snapshot/connman-"+version+".tar.gz", "snapshot/connman-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
Patches: []KV{ Patches: []KV{

View File

@@ -4,18 +4,18 @@ import "hakurei.app/internal/pkg"
func (t Toolchain) newCurl() (pkg.Artifact, string) { func (t Toolchain) newCurl() (pkg.Artifact, string) {
const ( const (
version = "8.19.0" version = "8.20.0"
checksum = "YHuVLVVp8q_Y7-JWpID5ReNjq2Zk6t7ArHB6ngQXilp_R5l3cubdxu3UKo-xDByv" checksum = "xyHXwrngIRGMasuzhn-I5MSCOhktwINbsWt1f_LuR-5jRVvyx_g6U1EQfDLEbr9r"
) )
return t.NewPackage("curl", version, pkg.NewHTTPGetTar( return t.NewPackage("curl", version, newTar(
nil, "https://curl.se/download/curl-"+version+".tar.bz2", "https://curl.se/download/curl-"+version+".tar.bz2",
mustDecode(checksum), checksum,
pkg.TarBzip2, pkg.TarBzip2,
), &PackageAttr{ ), &PackageAttr{
// remove broken test // remove broken test
Writable: true, Writable: true,
ScriptEarly: ` ScriptEarly: `
chmod +w tests/data && rm tests/data/test459 chmod +w tests/data && rm -f tests/data/test459
`, `,
}, &MakeHelper{ }, &MakeHelper{
Configure: []KV{ Configure: []KV{
@@ -25,7 +25,7 @@ chmod +w tests/data && rm tests/data/test459
{"disable-smb"}, {"disable-smb"},
}, },
Check: []string{ Check: []string{
`TFLAGS="-j$(expr "$(nproc)" '*' 2)"`, "TFLAGS=" + jobsLFlagE,
"test-nonflaky", "test-nonflaky",
}, },
}, },

View File

@@ -7,11 +7,11 @@ func (t Toolchain) newDBus() (pkg.Artifact, string) {
version = "1.16.2" version = "1.16.2"
checksum = "INwOuNdrDG7XW5ilW_vn8JSxEa444rRNc5ho97i84I1CNF09OmcFcV-gzbF4uCyg" checksum = "INwOuNdrDG7XW5ilW_vn8JSxEa444rRNc5ho97i84I1CNF09OmcFcV-gzbF4uCyg"
) )
return t.NewPackage("dbus", version, pkg.NewHTTPGetTar( return t.NewPackage("dbus", version, newFromGitLab(
nil, "https://gitlab.freedesktop.org/dbus/dbus/-/archive/"+ "gitlab.freedesktop.org",
"dbus-"+version+"/dbus-dbus-"+version+".tar.bz2", "dbus/dbus",
mustDecode(checksum), "dbus-"+version,
pkg.TarBzip2, checksum,
), &PackageAttr{ ), &PackageAttr{
// OSError: [Errno 30] Read-only file system: '/usr/src/dbus/subprojects/packagecache' // OSError: [Errno 30] Read-only file system: '/usr/src/dbus/subprojects/packagecache'
Writable: true, Writable: true,
@@ -44,3 +44,38 @@ func init() {
ID: 5356, ID: 5356,
} }
} }
func (t Toolchain) newXDGDBusProxy() (pkg.Artifact, string) {
const (
version = "0.1.7"
checksum = "UW5Pe-TP-XAaN-kTbxrkOQ7eYdmlAQlr2pdreLtPT0uwdAz-7rzDP8V_8PWuZBup"
)
return t.NewPackage("xdg-dbus-proxy", version, newFromGitHub(
"flatpak/xdg-dbus-proxy",
version,
checksum,
), nil, &MesonHelper{
Setup: []KV{
{"Dman", "disabled"},
},
},
DBus,
GLib,
), version
}
func init() {
artifactsM[XDGDBusProxy] = Metadata{
f: Toolchain.newXDGDBusProxy,
Name: "xdg-dbus-proxy",
Description: "a filtering proxy for D-Bus connections",
Website: "https://github.com/flatpak/xdg-dbus-proxy",
Dependencies: P{
GLib,
},
ID: 58434,
}
}

View File

@@ -7,10 +7,10 @@ func (t Toolchain) newDTC() (pkg.Artifact, string) {
version = "1.7.2" version = "1.7.2"
checksum = "vUoiRynPyYRexTpS6USweT5p4SVHvvVJs8uqFkkVD-YnFjwf6v3elQ0-Etrh00Dt" checksum = "vUoiRynPyYRexTpS6USweT5p4SVHvvVJs8uqFkkVD-YnFjwf6v3elQ0-Etrh00Dt"
) )
return t.NewPackage("dtc", version, pkg.NewHTTPGetTar( return t.NewPackage("dtc", version, newTar(
nil, "https://git.kernel.org/pub/scm/utils/dtc/dtc.git/snapshot/"+ "https://git.kernel.org/pub/scm/utils/dtc/dtc.git/snapshot/"+
"dtc-v"+version+".tar.gz", "dtc-v"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
// works around buggy test: // works around buggy test:

View File

@@ -4,13 +4,13 @@ import "hakurei.app/internal/pkg"
func (t Toolchain) newElfutils() (pkg.Artifact, string) { func (t Toolchain) newElfutils() (pkg.Artifact, string) {
const ( const (
version = "0.194" version = "0.195"
checksum = "Q3XUygUPv9vR1TkWucwUsQ8Kb1_F6gzk-KMPELr3cC_4AcTrprhVPMvN0CKkiYRa" checksum = "JrGnBD38w8Mj0ZxDw3fKlRBFcLvRKu8rcYnX35R9yTlUSYnzTazyLboG-a2CsJlu"
) )
return t.NewPackage("elfutils", version, pkg.NewHTTPGetTar( return t.NewPackage("elfutils", version, newTar(
nil, "https://sourceware.org/elfutils/ftp/"+ "https://sourceware.org/elfutils/ftp/"+
version+"/elfutils-"+version+".tar.bz2", version+"/elfutils-"+version+".tar.bz2",
mustDecode(checksum), checksum,
pkg.TarBzip2, pkg.TarBzip2,
), &PackageAttr{ ), &PackageAttr{
Env: []string{ Env: []string{

View File

@@ -135,10 +135,11 @@ func newIANAEtc() pkg.Artifact {
version = "20251215" version = "20251215"
checksum = "kvKz0gW_rGG5QaNK9ZWmWu1IEgYAdmhj_wR7DYrh3axDfIql_clGRHmelP7525NJ" checksum = "kvKz0gW_rGG5QaNK9ZWmWu1IEgYAdmhj_wR7DYrh3axDfIql_clGRHmelP7525NJ"
) )
return pkg.NewHTTPGetTar( return newFromGitHubRelease(
nil, "https://github.com/Mic92/iana-etc/releases/download/"+ "Mic92/iana-etc",
version+"/iana-etc-"+version+".tar.gz", version,
mustDecode(checksum), "iana-etc-"+version+".tar.gz",
checksum,
pkg.TarGzip, pkg.TarGzip,
) )
} }

View File

@@ -7,11 +7,11 @@ func (t Toolchain) newFakeroot() (pkg.Artifact, string) {
version = "1.37.2" version = "1.37.2"
checksum = "4ve-eDqVspzQ6VWDhPS0NjW3aSenBJcPAJq_BFT7OOFgUdrQzoTBxZWipDAGWxF8" checksum = "4ve-eDqVspzQ6VWDhPS0NjW3aSenBJcPAJq_BFT7OOFgUdrQzoTBxZWipDAGWxF8"
) )
return t.NewPackage("fakeroot", version, pkg.NewHTTPGetTar( return t.NewPackage("fakeroot", version, newFromGitLab(
nil, "https://salsa.debian.org/clint/fakeroot/-/archive/upstream/"+ "salsa.debian.org",
version+"/fakeroot-upstream-"+version+".tar.bz2", "clint/fakeroot",
mustDecode(checksum), "upstream/"+version,
pkg.TarBzip2, checksum,
), &PackageAttr{ ), &PackageAttr{
Patches: []KV{ Patches: []KV{
{"remove-broken-docs", `diff --git a/doc/Makefile.am b/doc/Makefile.am {"remove-broken-docs", `diff --git a/doc/Makefile.am b/doc/Makefile.am

View File

@@ -9,10 +9,11 @@ func (t Toolchain) newFlex() (pkg.Artifact, string) {
version = "2.6.4" version = "2.6.4"
checksum = "p9POjQU7VhgOf3x5iFro8fjhy0NOanvA7CTeuWS_veSNgCixIJshTrWVkc5XLZkB" checksum = "p9POjQU7VhgOf3x5iFro8fjhy0NOanvA7CTeuWS_veSNgCixIJshTrWVkc5XLZkB"
) )
return t.NewPackage("flex", version, pkg.NewHTTPGetTar( return t.NewPackage("flex", version, newFromGitHubRelease(
nil, "https://github.com/westes/flex/releases/download/"+ "westes/flex",
"v"+version+"/flex-"+version+".tar.gz", "v"+version,
mustDecode(checksum), "flex-"+version+".tar.gz",
checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, (*MakeHelper)(nil), ), nil, (*MakeHelper)(nil),
M4, M4,

27
internal/rosa/freetype.go Normal file
View File

@@ -0,0 +1,27 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newFreetype() (pkg.Artifact, string) {
const (
version = "2.14.3"
checksum = "-WfLv8fVJNyCHpP_lriiDzOcVbBL9ajdQ3tl8AzIIUa9-8sVpU9irxOmSMgRHWYz"
)
return t.NewPackage("freetype", version, newTar(
"https://download.savannah.gnu.org/releases/freetype/"+
"freetype-"+version+".tar.gz",
checksum,
pkg.TarGzip,
), nil, (*MakeHelper)(nil)), version
}
func init() {
artifactsM[Freetype] = Metadata{
f: Toolchain.newFreetype,
Name: "freetype",
Description: "a freely available software library to render fonts",
Website: "http://www.freetype.org/",
ID: 854,
}
}

View File

@@ -7,10 +7,11 @@ func (t Toolchain) newFuse() (pkg.Artifact, string) {
version = "3.18.2" version = "3.18.2"
checksum = "iL-7b7eUtmlVSf5cSq0dzow3UiqSjBmzV3cI_ENPs1tXcHdktkG45j1V12h-4jZe" checksum = "iL-7b7eUtmlVSf5cSq0dzow3UiqSjBmzV3cI_ENPs1tXcHdktkG45j1V12h-4jZe"
) )
return t.NewPackage("fuse", version, pkg.NewHTTPGetTar( return t.NewPackage("fuse", version, newFromGitHubRelease(
nil, "https://github.com/libfuse/libfuse/releases/download/"+ "libfuse/libfuse",
"fuse-"+version+"/fuse-"+version+".tar.gz", "fuse-"+version,
mustDecode(checksum), "fuse-"+version+".tar.gz",
checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, &MesonHelper{ ), nil, &MesonHelper{
Setup: []KV{ Setup: []KV{

View File

@@ -9,17 +9,20 @@ import (
func (t Toolchain) newGit() (pkg.Artifact, string) { func (t Toolchain) newGit() (pkg.Artifact, string) {
const ( const (
version = "2.53.0" version = "2.54.0"
checksum = "rlqSTeNgSeVKJA7nvzGqddFH8q3eFEPB4qRZft-4zth8wTHnbTbm7J90kp_obHGm" checksum = "7vGKtFOJGqY8DO4e8UMRax7dLgImXKQz5MMalec6MlgYrsarffSJjgOughwRFpSH"
) )
return t.NewPackage("git", version, pkg.NewHTTPGetTar( return t.NewPackage("git", version, newTar(
nil, "https://www.kernel.org/pub/software/scm/git/"+ "https://www.kernel.org/pub/software/scm/git/"+
"git-"+version+".tar.gz", "git-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
ScriptEarly: ` ScriptEarly: `
ln -s ../../system/bin/perl /usr/bin/ || true ln -s ../../system/bin/perl /usr/bin/ || true
# test suite assumes apache
rm -f /system/bin/httpd
`, `,
// uses source tree as scratch space // uses source tree as scratch space
@@ -38,6 +41,7 @@ function disable_test {
fi fi
} }
disable_test t1800-hook
disable_test t5319-multi-pack-index disable_test t5319-multi-pack-index
disable_test t1305-config-include disable_test t1305-config-include
disable_test t3900-i18n-commit disable_test t3900-i18n-commit
@@ -51,6 +55,14 @@ disable_test t9300-fast-import
disable_test t0211-trace2-perf disable_test t0211-trace2-perf
disable_test t1517-outside-repo disable_test t1517-outside-repo
disable_test t2200-add-update disable_test t2200-add-update
disable_test t0027-auto-crlf
disable_test t7513-interpret-trailers
disable_test t7703-repack-geometric
disable_test t7002-mv-sparse-checkout
disable_test t1451-fsck-buffer
disable_test t4104-apply-boundary
disable_test t4200-rerere
disable_test t5515-fetch-merge-logic
`, `,
Check: []string{ Check: []string{
"-C t", "-C t",
@@ -58,11 +70,14 @@ disable_test t2200-add-update
"prove", "prove",
}, },
Install: `make \ Install: `make \
"-j$(nproc)" \ ` + jobsFlagE + ` \
DESTDIR=/work \ DESTDIR=/work \
NO_INSTALL_HARDLINKS=1 \ NO_INSTALL_HARDLINKS=1 \
install`, install`,
}, },
// test suite hangs on mksh
Bash,
Diffutils, Diffutils,
Autoconf, Autoconf,
Gettext, Gettext,
@@ -98,7 +113,7 @@ func (t Toolchain) NewViaGit(
return t.New(strings.TrimSuffix( return t.New(strings.TrimSuffix(
path.Base(url), path.Base(url),
".git", ".git",
)+"-src-"+path.Base(rev), 0, t.AppendPresets(nil, )+"-src-"+path.Base(rev), THostNet, t.AppendPresets(nil,
NSSCACert, NSSCACert,
Git, Git,
), &checksum, nil, ` ), &checksum, nil, `
@@ -114,3 +129,8 @@ git \
rm -rf /work/.git rm -rf /work/.git
`, resolvconf()) `, resolvconf())
} }
// newTagRemote is a helper around NewViaGit for a tag on a git remote.
func (t Toolchain) newTagRemote(url, tag, checksum string) pkg.Artifact {
return t.NewViaGit(url, "refs/tags/"+tag, mustDecode(checksum))
}

248
internal/rosa/glslang.go Normal file
View File

@@ -0,0 +1,248 @@
package rosa
import (
"slices"
"strings"
"hakurei.app/internal/pkg"
)
func (t Toolchain) newSPIRVHeaders() (pkg.Artifact, string) {
const (
version = "1.4.341.0"
checksum = "0PL43-19Iaw4k7_D8J8BvoJ-iLgCVSYZ2ThgDPGfAJwIJFtre7l0cnQtLjcY-JvD"
)
return t.NewPackage("spirv-headers", version, newFromGitHub(
"KhronosGroup/SPIRV-Headers",
"vulkan-sdk-"+version,
checksum,
), nil, &CMakeHelper{
// upstream has no tests
SkipTest: true,
}), version
}
func init() {
artifactsM[SPIRVHeaders] = Metadata{
f: Toolchain.newSPIRVHeaders,
Name: "spirv-headers",
Description: "machine-readable files for the SPIR-V Registry",
Website: "https://github.com/KhronosGroup/SPIRV-Headers",
ID: 230542,
// upstream changed version scheme, anitya incapable of filtering them
latest: func(v *Versions) string {
for _, s := range v.Stable {
fields := strings.SplitN(s, ".", 4)
if len(fields) != 4 {
continue
}
if slices.ContainsFunc(fields, func(f string) bool {
return slices.ContainsFunc([]byte(f), func(d byte) bool {
return d < '0' || d > '9'
})
}) {
continue
}
return s
}
return v.Latest
},
}
}
func (t Toolchain) newSPIRVTools() (pkg.Artifact, string) {
const (
version = "2026.1"
checksum = "ZSQPQx8NltCDzQLk4qlaVxyWRWeI_JtsjEpeFt3kezTanl9DTHfLixSUCezMFBjv"
)
return t.NewPackage("spirv-tools", version, newFromGitHub(
"KhronosGroup/SPIRV-Tools",
"v"+version,
checksum,
), nil, &CMakeHelper{
Cache: []KV{
{"SPIRV-Headers_SOURCE_DIR", "/system"},
},
},
Python,
SPIRVHeaders,
), version
}
func init() {
artifactsM[SPIRVTools] = Metadata{
f: Toolchain.newSPIRVTools,
Name: "spirv-tools",
Description: "an API and commands for processing SPIR-V modules",
Website: "https://github.com/KhronosGroup/SPIRV-Tools",
Dependencies: P{
SPIRVHeaders,
},
ID: 14894,
latest: (*Versions).getStable,
}
}
func (t Toolchain) newGlslang() (pkg.Artifact, string) {
const (
version = "16.3.0"
checksum = "xyqDf8k3-D0_BXHGi0uLgMglnJ05Rf3j73QgbDs3sGtKNdBIQhY8JfqX1NcNoJQN"
)
return t.NewPackage("glslang", version, newFromGitHub(
"KhronosGroup/glslang",
version,
checksum,
), &PackageAttr{
// test suite writes to source
Writable: true,
Chmod: true,
}, &CMakeHelper{
Cache: []KV{
{"BUILD_SHARED_LIBS", "ON"},
{"ALLOW_EXTERNAL_SPIRV_TOOLS", "ON"},
},
},
Python,
Bash,
Diffutils,
SPIRVTools,
), version
}
func init() {
artifactsM[Glslang] = Metadata{
f: Toolchain.newGlslang,
Name: "glslang",
Description: "reference front end for GLSL/ESSL",
Website: "https://github.com/KhronosGroup/glslang",
ID: 205796,
}
}
func (t Toolchain) newSPIRVLLVMTranslator() (pkg.Artifact, string) {
const (
version = "22.1.2"
checksum = "JZAaV5ewYcm-35YA_U2BM2IcsQouZtX1BLZR0zh2vSlfEXMsT5OCtY4Gh5RJkcGy"
)
skipChecks := []string{
// error: line 13: OpTypeCooperativeMatrixKHR Scope is limited to Workgroup and Subgroup
"cooperative_matrix_constant_null.spvasm",
}
switch arch {
case "arm64":
skipChecks = append(skipChecks,
// LLVM ERROR: unsupported calling convention
"DebugInfo/COFF/no-cus.ll",
"DebugInfo/Generic/2009-11-05-DeadGlobalVariable.ll",
"DebugInfo/Generic/2009-11-10-CurrentFn.ll",
"DebugInfo/Generic/2010-01-05-DbgScope.ll",
"DebugInfo/Generic/2010-03-12-llc-crash.ll",
"DebugInfo/Generic/2010-03-24-MemberFn.ll",
"DebugInfo/Generic/2010-04-19-FramePtr.ll",
"DebugInfo/Generic/2010-06-29-InlinedFnLocalVar.ll",
"DebugInfo/Generic/2010-10-01-crash.ll",
"DebugInfo/Generic/PR20038.ll",
"DebugInfo/Generic/constant-pointers.ll",
"DebugInfo/Generic/dead-argument-order.ll",
"DebugInfo/Generic/debug-info-eis-option.ll",
"DebugInfo/Generic/def-line.ll",
"DebugInfo/Generic/discriminator.ll",
"DebugInfo/Generic/dwarf-public-names.ll",
"DebugInfo/Generic/enum.ll",
"DebugInfo/Generic/func-using-decl.ll",
"DebugInfo/Generic/global.ll",
"DebugInfo/Generic/imported-name-inlined.ll",
"DebugInfo/Generic/incorrect-variable-debugloc1.ll",
"DebugInfo/Generic/inline-scopes.ll",
"DebugInfo/Generic/inlined-arguments.ll",
"DebugInfo/Generic/inlined-vars.ll",
"DebugInfo/Generic/linear-dbg-value.ll",
"DebugInfo/Generic/linkage-name-abstract.ll",
"DebugInfo/Generic/member-order.ll",
"DebugInfo/Generic/missing-abstract-variable.ll",
"DebugInfo/Generic/multiline.ll",
"DebugInfo/Generic/namespace_function_definition.ll",
"DebugInfo/Generic/namespace_inline_function_definition.ll",
"DebugInfo/Generic/noscopes.ll",
"DebugInfo/Generic/ptrsize.ll",
"DebugInfo/Generic/restrict.ll",
"DebugInfo/Generic/two-cus-from-same-file.ll",
"DebugInfo/Generic/version.ll",
"DebugInfo/LocalAddressSpace.ll",
"DebugInfo/UnknownBaseType.ll",
"DebugInfo/expr-opcode.ll",
)
}
return t.NewPackage("spirv-llvm-translator", version, newFromGitHub(
"KhronosGroup/SPIRV-LLVM-Translator",
"v"+version, checksum,
), &PackageAttr{
Patches: []KV{
{"remove-early-prefix", `diff --git a/CMakeLists.txt b/CMakeLists.txt
index c000a77e..f18f3fde 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -164,7 +164,7 @@ install(
${LLVM_SPIRV_INCLUDE_DIRS}/LLVMSPIRVOpts.h
${LLVM_SPIRV_INCLUDE_DIRS}/LLVMSPIRVExtensions.inc
DESTINATION
- ${CMAKE_INSTALL_PREFIX}/include/LLVMSPIRVLib
+ include/LLVMSPIRVLib
)
configure_file(LLVMSPIRVLib.pc.in ${CMAKE_BINARY_DIR}/LLVMSPIRVLib.pc @ONLY)
@@ -172,5 +172,5 @@ install(
FILES
${CMAKE_BINARY_DIR}/LLVMSPIRVLib.pc
DESTINATION
- ${CMAKE_INSTALL_PREFIX}/lib${LLVM_LIBDIR_SUFFIX}/pkgconfig
+ lib${LLVM_LIBDIR_SUFFIX}/pkgconfig
)
;`},
},
// litArgs emits shell syntax
ScriptEarly: `
export LIT_OPTS=` + litArgs(true, skipChecks...) + `
`,
}, &CMakeHelper{
Cache: []KV{
{"CMAKE_SKIP_BUILD_RPATH", "ON"},
{"BUILD_SHARED_LIBS", "ON"},
{"LLVM_SPIRV_ENABLE_LIBSPIRV_DIS", "ON"},
{"LLVM_EXTERNAL_SPIRV_HEADERS_SOURCE_DIR", "/system"},
{"LLVM_EXTERNAL_LIT", "/system/bin/lit"},
{"LLVM_INCLUDE_TESTS", "ON"},
},
},
Bash,
LIT,
SPIRVTools,
), version
}
func init() {
artifactsM[SPIRVLLVMTranslator] = Metadata{
f: Toolchain.newSPIRVLLVMTranslator,
Name: "spirv-llvm-translator",
Description: "bi-directional translation between SPIR-V and LLVM IR",
Website: "https://github.com/KhronosGroup/SPIRV-LLVM-Translator",
Dependencies: P{
SPIRVTools,
},
ID: 227273,
}
}

View File

@@ -1,19 +1,55 @@
package rosa package rosa
import ( import (
"runtime" "slices"
"strconv"
"strings"
"hakurei.app/internal/pkg" "hakurei.app/internal/pkg"
) )
// skipGNUTests generates a string for skipping specific tests by number in a
// GNU test suite. This is nontrivial because the test suite does not support
// excluding tests in any way, so ranges for all but the skipped tests have to
// be specified instead.
//
// For example, to skip test 764, ranges around the skipped test must be
// specified:
//
// 1-763 765-
//
// Tests are numbered starting from 1. The resulting string is unquoted.
func skipGNUTests(tests ...int) string {
tests = slices.Clone(tests)
slices.Sort(tests)
var buf strings.Builder
if tests[0] != 1 {
buf.WriteString("1-")
}
for i, n := range tests {
if n != 1 && (i == 0 || tests[i-1] != n-1) {
buf.WriteString(strconv.Itoa(n - 1))
buf.WriteString(" ")
}
if i == len(tests)-1 || tests[i+1] != n+1 {
buf.WriteString(strconv.Itoa(n + 1))
buf.WriteString("-")
}
}
return buf.String()
}
func (t Toolchain) newM4() (pkg.Artifact, string) { func (t Toolchain) newM4() (pkg.Artifact, string) {
const ( const (
version = "1.4.21" version = "1.4.21"
checksum = "pPa6YOo722Jw80l1OsH1tnUaklnPFjFT-bxGw5iAVrZTm1P8FQaWao_NXop46-pm" checksum = "pPa6YOo722Jw80l1OsH1tnUaklnPFjFT-bxGw5iAVrZTm1P8FQaWao_NXop46-pm"
) )
return t.NewPackage("m4", version, pkg.NewHTTPGetTar( return t.NewPackage("m4", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/m4/m4-"+version+".tar.bz2", "https://ftpmirror.gnu.org/gnu/m4/m4-"+version+".tar.bz2",
mustDecode(checksum), checksum,
pkg.TarBzip2, pkg.TarBzip2,
), &PackageAttr{ ), &PackageAttr{
Writable: true, Writable: true,
@@ -43,11 +79,19 @@ func (t Toolchain) newBison() (pkg.Artifact, string) {
version = "3.8.2" version = "3.8.2"
checksum = "BhRM6K7URj1LNOkIDCFDctSErLS-Xo5d9ba9seg10o6ACrgC1uNhED7CQPgIY29Y" checksum = "BhRM6K7URj1LNOkIDCFDctSErLS-Xo5d9ba9seg10o6ACrgC1uNhED7CQPgIY29Y"
) )
return t.NewPackage("bison", version, pkg.NewHTTPGetTar( return t.NewPackage("bison", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/bison/bison-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/bison/bison-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, (*MakeHelper)(nil), ), nil, &MakeHelper{
Check: []string{
"TESTSUITEFLAGS=" + jobsFlagE + "' " + skipGNUTests(
// clang miscompiles (SIGILL)
764,
) + "'",
"check",
},
},
M4, M4,
Diffutils, Diffutils,
Sed, Sed,
@@ -67,15 +111,17 @@ func init() {
func (t Toolchain) newSed() (pkg.Artifact, string) { func (t Toolchain) newSed() (pkg.Artifact, string) {
const ( const (
version = "4.9" version = "4.10"
checksum = "pe7HWH4PHNYrazOTlUoE1fXmhn2GOPFN_xE62i0llOr3kYGrH1g2_orDz0UtZ9Nt" checksum = "TXTRFQJCyflb-bpBRI2S5Y1DpplwvT7-KfXtpqN4AdZgZ5OtI6yStn1-bkhDKx51"
) )
return t.NewPackage("sed", version, pkg.NewHTTPGetTar( return t.NewPackage("sed", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/sed/sed-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/sed/sed-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, (*MakeHelper)(nil), ), nil, (*MakeHelper)(nil),
Diffutils, Diffutils,
KernelHeaders,
), version ), version
} }
func init() { func init() {
@@ -95,15 +141,15 @@ func (t Toolchain) newAutoconf() (pkg.Artifact, string) {
version = "2.73" version = "2.73"
checksum = "yGabDTeOfaCUB0JX-h3REYLYzMzvpDwFmFFzHNR7QilChCUNE4hR6q7nma4viDYg" checksum = "yGabDTeOfaCUB0JX-h3REYLYzMzvpDwFmFFzHNR7QilChCUNE4hR6q7nma4viDYg"
) )
return t.NewPackage("autoconf", version, pkg.NewHTTPGetTar( return t.NewPackage("autoconf", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/autoconf/autoconf-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/autoconf/autoconf-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
Flag: TExclusive, Flag: TExclusive,
}, &MakeHelper{ }, &MakeHelper{
Check: []string{ Check: []string{
`TESTSUITEFLAGS="-j$(nproc)"`, "TESTSUITEFLAGS=" + jobsFlagE,
"check", "check",
}, },
}, },
@@ -135,9 +181,9 @@ func (t Toolchain) newAutomake() (pkg.Artifact, string) {
version = "1.18.1" version = "1.18.1"
checksum = "FjvLG_GdQP7cThTZJLDMxYpRcKdpAVG-YDs1Fj1yaHlSdh_Kx6nRGN14E0r_BjcG" checksum = "FjvLG_GdQP7cThTZJLDMxYpRcKdpAVG-YDs1Fj1yaHlSdh_Kx6nRGN14E0r_BjcG"
) )
return t.NewPackage("automake", version, pkg.NewHTTPGetTar( return t.NewPackage("automake", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/automake/automake-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/automake/automake-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
Writable: true, Writable: true,
@@ -179,13 +225,16 @@ func (t Toolchain) newLibtool() (pkg.Artifact, string) {
version = "2.5.4" version = "2.5.4"
checksum = "pa6LSrQggh8mSJHQfwGjysAApmZlGJt8wif2cCLzqAAa2jpsTY0jZ-6stS3BWZ2Q" checksum = "pa6LSrQggh8mSJHQfwGjysAApmZlGJt8wif2cCLzqAAa2jpsTY0jZ-6stS3BWZ2Q"
) )
return t.NewPackage("libtool", version, pkg.NewHTTPGetTar( return t.NewPackage("libtool", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/libtool/libtool-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/libtool/libtool-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, &MakeHelper{ ), nil, &MakeHelper{
// _Z2a2c: symbol not found
SkipCheck: t.isStage0(),
Check: []string{ Check: []string{
`TESTSUITEFLAGS="-j$(nproc)"`, "TESTSUITEFLAGS=" + jobsFlagE,
"check", "check",
}, },
}, },
@@ -210,9 +259,9 @@ func (t Toolchain) newGzip() (pkg.Artifact, string) {
version = "1.14" version = "1.14"
checksum = "NWhjUavnNfTDFkZJyAUonL9aCOak8GVajWX2OMlzpFnuI0ErpBFyj88mz2xSjz0q" checksum = "NWhjUavnNfTDFkZJyAUonL9aCOak8GVajWX2OMlzpFnuI0ErpBFyj88mz2xSjz0q"
) )
return t.NewPackage("gzip", version, pkg.NewHTTPGetTar( return t.NewPackage("gzip", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/gzip/gzip-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/gzip/gzip-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, &MakeHelper{ ), nil, &MakeHelper{
// dependency loop // dependency loop
@@ -236,9 +285,9 @@ func (t Toolchain) newGettext() (pkg.Artifact, string) {
version = "1.0" version = "1.0"
checksum = "3MasKeEdPeFEgWgzsBKk7JqWqql1wEMbgPmzAfs-mluyokoW0N8oQVxPQoOnSdgC" checksum = "3MasKeEdPeFEgWgzsBKk7JqWqql1wEMbgPmzAfs-mluyokoW0N8oQVxPQoOnSdgC"
) )
return t.NewPackage("gettext", version, pkg.NewHTTPGetTar( return t.NewPackage("gettext", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/gettext/gettext-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/gettext/gettext-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
Writable: true, Writable: true,
@@ -282,9 +331,9 @@ func (t Toolchain) newDiffutils() (pkg.Artifact, string) {
version = "3.12" version = "3.12"
checksum = "9J5VAq5oA7eqwzS1Yvw-l3G5o-TccUrNQR3PvyB_lgdryOFAfxtvQfKfhdpquE44" checksum = "9J5VAq5oA7eqwzS1Yvw-l3G5o-TccUrNQR3PvyB_lgdryOFAfxtvQfKfhdpquE44"
) )
return t.NewPackage("diffutils", version, pkg.NewHTTPGetTar( return t.NewPackage("diffutils", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/diffutils/diffutils-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/diffutils/diffutils-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
Writable: true, Writable: true,
@@ -315,9 +364,9 @@ func (t Toolchain) newPatch() (pkg.Artifact, string) {
version = "2.8" version = "2.8"
checksum = "MA0BQc662i8QYBD-DdGgyyfTwaeALZ1K0yusV9rAmNiIsQdX-69YC4t9JEGXZkeR" checksum = "MA0BQc662i8QYBD-DdGgyyfTwaeALZ1K0yusV9rAmNiIsQdX-69YC4t9JEGXZkeR"
) )
return t.NewPackage("patch", version, pkg.NewHTTPGetTar( return t.NewPackage("patch", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/patch/patch-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/patch/patch-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
Writable: true, Writable: true,
@@ -347,9 +396,9 @@ func (t Toolchain) newBash() (pkg.Artifact, string) {
version = "5.3" version = "5.3"
checksum = "4LQ_GRoB_ko-Ih8QPf_xRKA02xAm_TOxQgcJLmFDT6udUPxTAWrsj-ZNeuTusyDq" checksum = "4LQ_GRoB_ko-Ih8QPf_xRKA02xAm_TOxQgcJLmFDT6udUPxTAWrsj-ZNeuTusyDq"
) )
return t.NewPackage("bash", version, pkg.NewHTTPGetTar( return t.NewPackage("bash", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/bash/bash-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/bash/bash-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
Flag: TEarly, Flag: TEarly,
@@ -374,12 +423,12 @@ func init() {
func (t Toolchain) newCoreutils() (pkg.Artifact, string) { func (t Toolchain) newCoreutils() (pkg.Artifact, string) {
const ( const (
version = "9.10" version = "9.11"
checksum = "o-B9wssRnZySzJUI1ZJAgw-bZtj1RC67R9po2AcM2OjjS8FQIl16IRHpC6IwO30i" checksum = "t8UMed5wpFEoC56aa42_yidfOAaRGzOfj7MRtQkkqgGbpXiskNA8bd-EmVSQkZie"
) )
return t.NewPackage("coreutils", version, pkg.NewHTTPGetTar( return t.NewPackage("coreutils", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/coreutils/coreutils-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/coreutils/coreutils-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
Writable: true, Writable: true,
@@ -387,106 +436,13 @@ func (t Toolchain) newCoreutils() (pkg.Artifact, string) {
test_disable() { chmod +w "$2" && echo "$1" > "$2"; } test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh
test_disable '#!/bin/sh' tests/split/line-bytes.sh
test_disable '#!/bin/sh' tests/ls/hyperlink.sh test_disable '#!/bin/sh' tests/ls/hyperlink.sh
test_disable '#!/bin/sh' tests/misc/user.sh
test_disable 'int main(){return 0;}' gnulib-tests/test-chown.c test_disable 'int main(){return 0;}' gnulib-tests/test-chown.c
test_disable 'int main(){return 0;}' gnulib-tests/test-fchownat.c test_disable 'int main(){return 0;}' gnulib-tests/test-fchownat.c
test_disable 'int main(){return 0;}' gnulib-tests/test-lchown.c test_disable 'int main(){return 0;}' gnulib-tests/test-lchown.c
`, `,
Patches: []KV{
{"tests-fix-job-control", `From 21d287324aa43aa3a31f39619ade0deac7fd6013 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?P=C3=A1draig=20Brady?= <P@draigBrady.com>
Date: Tue, 24 Feb 2026 15:44:41 +0000
Subject: [PATCH] tests: fix job control triggering test termination
This avoids the test harness being terminated like:
make[1]: *** [Makefile:24419: check-recursive] Hangup
make[3]: *** [Makefile:24668: check-TESTS] Hangup
make: *** [Makefile:24922: check] Hangup
make[2]: *** [Makefile:24920: check-am] Hangup
make[4]: *** [Makefile:24685: tests/misc/usage_vs_refs.log] Error 129
...
This happened sometimes when the tests were being run non interactively.
For example when run like:
setsid make TESTS="tests/timeout/timeout.sh \
tests/tail/overlay-headers.sh" SUBDIRS=. -j2 check
Note the race window can be made bigger by adding a sleep
after tail is stopped in overlay-headers.sh
The race can trigger the kernel to induce its job control
mechanism to prevent stuck processes.
I.e. where it sends SIGHUP + SIGCONT to a process group
when it determines that group may become orphaned,
and there are stopped processes in that group.
* tests/tail/overlay-headers.sh: Use setsid(1) to keep the stopped
tail process in a separate process group, thus avoiding any kernel
job control protection mechanism.
* tests/timeout/timeout.sh: Use setsid(1) to avoid the kernel
checking the main process group when sleep(1) is reparented.
Fixes https://bugs.gnu.org/80477
---
tests/tail/overlay-headers.sh | 8 +++++++-
tests/timeout/timeout.sh | 11 ++++++++---
2 files changed, 15 insertions(+), 4 deletions(-)
diff --git a/tests/tail/overlay-headers.sh b/tests/tail/overlay-headers.sh
index be9b6a7df..1e6da0a3f 100755
--- a/tests/tail/overlay-headers.sh
+++ b/tests/tail/overlay-headers.sh
@@ -20,6 +20,8 @@
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ tail sleep
+setsid true || skip_ 'setsid required to control groups'
+
# Function to count number of lines from tail
# while ignoring transient errors due to resource limits
countlines_ ()
@@ -54,7 +56,11 @@ echo start > file2 || framework_failure_
env sleep 60 & sleep=$!
# Note don't use timeout(1) here as it currently
-# does not propagate SIGCONT
+# does not propagate SIGCONT.
+# Note use setsid here to ensure we're in a separate process group
+# as we're going to STOP this tail process, and this can trigger
+# the kernel to send SIGHUP to a group if other tests have
+# processes that are reparented. (See tests/timeout/timeout.sh).
tail $fastpoll --pid=$sleep -f file1 file2 > out & pid=$!
# Ensure tail is running
diff --git a/tests/timeout/timeout.sh b/tests/timeout/timeout.sh
index 9a395416b..fbb043312 100755
--- a/tests/timeout/timeout.sh
+++ b/tests/timeout/timeout.sh
@@ -56,9 +56,14 @@ returns_ 124 timeout --foreground -s0 -k1 .1 sleep 10 && fail=1
) || fail=1
# Don't be confused when starting off with a child (Bug#9098).
-out=$(sleep .1 & exec timeout .5 sh -c 'sleep 2; echo foo')
-status=$?
-test "$out" = "" && test $status = 124 || fail=1
+# Use setsid to avoid sleep being in the test's process group, as
+# upon reparenting it can trigger an orphaned process group SIGHUP
+# (if there were stopped processes in other tests).
+if setsid true; then
+ out=$(setsid sleep .1 & exec timeout .5 sh -c 'sleep 2; echo foo')
+ status=$?
+ test "$out" = "" && test $status = 124 || fail=1
+fi
# Verify --verbose output
cat > exp <<\EOF
--
2.53.0
`},
},
Flag: TEarly, Flag: TEarly,
}, &MakeHelper{ }, &MakeHelper{
Configure: []KV{ Configure: []KV{
@@ -516,9 +472,9 @@ func (t Toolchain) newTexinfo() (pkg.Artifact, string) {
version = "7.3" version = "7.3"
checksum = "RRmC8Xwdof7JuZJeWGAQ_GeASIHAuJFQMbNONXBz5InooKIQGmqmWRjGNGEr5n4-" checksum = "RRmC8Xwdof7JuZJeWGAQ_GeASIHAuJFQMbNONXBz5InooKIQGmqmWRjGNGEr5n4-"
) )
return t.NewPackage("texinfo", version, pkg.NewHTTPGetTar( return t.NewPackage("texinfo", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/texinfo/texinfo-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/texinfo/texinfo-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, &MakeHelper{ ), nil, &MakeHelper{
// nonstandard glibc extension // nonstandard glibc extension
@@ -549,9 +505,9 @@ func (t Toolchain) newGperf() (pkg.Artifact, string) {
version = "3.3" version = "3.3"
checksum = "RtIy9pPb_Bb8-31J2Nw-rRGso2JlS-lDlVhuNYhqR7Nt4xM_nObznxAlBMnarJv7" checksum = "RtIy9pPb_Bb8-31J2Nw-rRGso2JlS-lDlVhuNYhqR7Nt4xM_nObznxAlBMnarJv7"
) )
return t.NewPackage("gperf", version, pkg.NewHTTPGetTar( return t.NewPackage("gperf", version, newTar(
nil, "https://ftpmirror.gnu.org/gperf/gperf-"+version+".tar.gz", "https://ftpmirror.gnu.org/gperf/gperf-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, (*MakeHelper)(nil), ), nil, (*MakeHelper)(nil),
Diffutils, Diffutils,
@@ -574,9 +530,9 @@ func (t Toolchain) newGawk() (pkg.Artifact, string) {
version = "5.4.0" version = "5.4.0"
checksum = "m0RkIolC-PI7EY5q8pcx5Y-0twlIW0Yp3wXXmV-QaHorSdf8BhZ7kW9F8iWomz0C" checksum = "m0RkIolC-PI7EY5q8pcx5Y-0twlIW0Yp3wXXmV-QaHorSdf8BhZ7kW9F8iWomz0C"
) )
return t.NewPackage("gawk", version, pkg.NewHTTPGetTar( return t.NewPackage("gawk", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/gawk/gawk-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/gawk/gawk-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
Flag: TEarly, Flag: TEarly,
@@ -602,9 +558,9 @@ func (t Toolchain) newGrep() (pkg.Artifact, string) {
version = "3.12" version = "3.12"
checksum = "qMB4RjaPNRRYsxix6YOrjE8gyAT1zVSTy4nW4wKW9fqa0CHYAuWgPwDTirENzm_1" checksum = "qMB4RjaPNRRYsxix6YOrjE8gyAT1zVSTy4nW4wKW9fqa0CHYAuWgPwDTirENzm_1"
) )
return t.NewPackage("grep", version, pkg.NewHTTPGetTar( return t.NewPackage("grep", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/grep/grep-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/grep/grep-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
Writable: true, Writable: true,
@@ -639,7 +595,6 @@ func (t Toolchain) newFindutils() (pkg.Artifact, string) {
nil, "https://ftpmirror.gnu.org/gnu/findutils/findutils-"+version+".tar.xz", nil, "https://ftpmirror.gnu.org/gnu/findutils/findutils-"+version+".tar.xz",
mustDecode(checksum), mustDecode(checksum),
), &PackageAttr{ ), &PackageAttr{
SourceKind: SourceKindTarXZ,
ScriptEarly: ` ScriptEarly: `
echo '#!/bin/sh' > gnulib-tests/test-c32ispunct.sh echo '#!/bin/sh' > gnulib-tests/test-c32ispunct.sh
echo 'int main(){return 0;}' > tests/xargs/test-sigusr.c echo 'int main(){return 0;}' > tests/xargs/test-sigusr.c
@@ -667,9 +622,9 @@ func (t Toolchain) newBC() (pkg.Artifact, string) {
version = "1.08.2" version = "1.08.2"
checksum = "8h6f3hjV80XiFs6v9HOPF2KEyg1kuOgn5eeFdVspV05ODBVQss-ey5glc8AmneLy" checksum = "8h6f3hjV80XiFs6v9HOPF2KEyg1kuOgn5eeFdVspV05ODBVQss-ey5glc8AmneLy"
) )
return t.NewPackage("bc", version, pkg.NewHTTPGetTar( return t.NewPackage("bc", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/bc/bc-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/bc/bc-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
// source expected to be writable // source expected to be writable
@@ -696,9 +651,9 @@ func (t Toolchain) newLibiconv() (pkg.Artifact, string) {
version = "1.19" version = "1.19"
checksum = "UibB6E23y4MksNqYmCCrA3zTFO6vJugD1DEDqqWYFZNuBsUWMVMcncb_5pPAr88x" checksum = "UibB6E23y4MksNqYmCCrA3zTFO6vJugD1DEDqqWYFZNuBsUWMVMcncb_5pPAr88x"
) )
return t.NewPackage("libiconv", version, pkg.NewHTTPGetTar( return t.NewPackage("libiconv", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/libiconv/libiconv-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/libiconv/libiconv-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, (*MakeHelper)(nil)), version ), nil, (*MakeHelper)(nil)), version
} }
@@ -719,9 +674,9 @@ func (t Toolchain) newTar() (pkg.Artifact, string) {
version = "1.35" version = "1.35"
checksum = "zSaoSlVUDW0dSfm4sbL4FrXLFR8U40Fh3zY5DWhR5NCIJ6GjU6Kc4VZo2-ZqpBRA" checksum = "zSaoSlVUDW0dSfm4sbL4FrXLFR8U40Fh3zY5DWhR5NCIJ6GjU6Kc4VZo2-ZqpBRA"
) )
return t.NewPackage("tar", version, pkg.NewHTTPGetTar( return t.NewPackage("tar", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/tar/tar-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/tar/tar-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, &MakeHelper{ ), nil, &MakeHelper{
Configure: []KV{ Configure: []KV{
@@ -733,7 +688,7 @@ func (t Toolchain) newTar() (pkg.Artifact, string) {
// very expensive // very expensive
"TARTEST_SKIP_LARGE_FILES=1", "TARTEST_SKIP_LARGE_FILES=1",
`TESTSUITEFLAGS="-j$(nproc)"`, "TESTSUITEFLAGS=" + jobsFlagE,
"check", "check",
}, },
}, },
@@ -758,15 +713,20 @@ func init() {
func (t Toolchain) newParallel() (pkg.Artifact, string) { func (t Toolchain) newParallel() (pkg.Artifact, string) {
const ( const (
version = "20260322" version = "20260422"
checksum = "gHoPmFkOO62ev4xW59HqyMlodhjp8LvTsBOwsVKHUUdfrt7KwB8koXmSVqQ4VOrB" checksum = "eTsepxgqhXpMEhPd55qh-W5y4vjKn0x9TD2mzbJCNZYtFf4lT4Wzoqr74HGJYBEH"
) )
return t.NewPackage("parallel", version, pkg.NewHTTPGetTar( return t.NewPackage("parallel", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/parallel/parallel-"+version+".tar.bz2", "https://ftpmirror.gnu.org/gnu/parallel/parallel-"+version+".tar.bz2",
mustDecode(checksum), checksum,
pkg.TarBzip2, pkg.TarBzip2,
), nil, (*MakeHelper)(nil), ), &PackageAttr{
ScriptEarly: `
ln -s ../system/bin/bash /bin/
`,
}, (*MakeHelper)(nil),
Perl, Perl,
Bash,
), version ), version
} }
func init() { func init() {
@@ -790,9 +750,9 @@ func (t Toolchain) newLibunistring() (pkg.Artifact, string) {
version = "1.4.2" version = "1.4.2"
checksum = "iW9BbfLoVlXjWoLTZ4AekQSu4cFBnLcZ4W8OHWbv0AhJNgD3j65_zqaLMzFKylg2" checksum = "iW9BbfLoVlXjWoLTZ4AekQSu4cFBnLcZ4W8OHWbv0AhJNgD3j65_zqaLMzFKylg2"
) )
return t.NewPackage("libunistring", version, pkg.NewHTTPGetTar( return t.NewPackage("libunistring", version, newTar(
nil, "https://ftp.gnu.org/gnu/libunistring/libunistring-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/libunistring/libunistring-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
Writable: true, Writable: true,
@@ -823,9 +783,9 @@ func (t Toolchain) newLibtasn1() (pkg.Artifact, string) {
version = "4.21.0" version = "4.21.0"
checksum = "9DYI3UYbfYLy8JsKUcY6f0irskbfL0fHZA91Q-JEOA3kiUwpodyjemRsYRjUpjuq" checksum = "9DYI3UYbfYLy8JsKUcY6f0irskbfL0fHZA91Q-JEOA3kiUwpodyjemRsYRjUpjuq"
) )
return t.NewPackage("libtasn1", version, pkg.NewHTTPGetTar( return t.NewPackage("libtasn1", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/libtasn1/libtasn1-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/libtasn1/libtasn1-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, (*MakeHelper)(nil)), version ), nil, (*MakeHelper)(nil)), version
} }
@@ -846,9 +806,9 @@ func (t Toolchain) newReadline() (pkg.Artifact, string) {
version = "8.3" version = "8.3"
checksum = "r-lcGRJq_MvvBpOq47Z2Y1OI2iqrmtcqhTLVXR0xWo37ZpC2uT_md7gKq5o_qTMV" checksum = "r-lcGRJq_MvvBpOq47Z2Y1OI2iqrmtcqhTLVXR0xWo37ZpC2uT_md7gKq5o_qTMV"
) )
return t.NewPackage("readline", version, pkg.NewHTTPGetTar( return t.NewPackage("readline", version, newTar(
nil, "https://ftp.gnu.org/gnu/readline/readline-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/readline/readline-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, &MakeHelper{ ), nil, &MakeHelper{
Configure: []KV{ Configure: []KV{
@@ -882,17 +842,16 @@ func (t Toolchain) newGnuTLS() (pkg.Artifact, string) {
) )
var configureExtra []KV var configureExtra []KV
switch runtime.GOARCH { switch arch {
case "arm64": case "arm64":
configureExtra = []KV{ configureExtra = []KV{
{"disable-hardware-acceleration"}, {"disable-hardware-acceleration"},
} }
} }
return t.NewPackage("gnutls", version, t.NewViaGit( return t.NewPackage("gnutls", version, t.newTagRemote(
"https://gitlab.com/gnutls/gnutls.git", "https://gitlab.com/gnutls/gnutls.git",
"refs/tags/"+version, version, checksum,
mustDecode(checksum),
), &PackageAttr{ ), &PackageAttr{
Patches: []KV{ Patches: []KV{
{"bootstrap-remove-gtk-doc", `diff --git a/bootstrap.conf b/bootstrap.conf {"bootstrap-remove-gtk-doc", `diff --git a/bootstrap.conf b/bootstrap.conf
@@ -1062,9 +1021,9 @@ func (t Toolchain) newBinutils() (pkg.Artifact, string) {
version = "2.46.0" version = "2.46.0"
checksum = "4kK1_EXQipxSqqyvwD4LbiMLFKCUApjq6PeG4XJP4dzxYGqDeqXfh8zLuTyOuOVR" checksum = "4kK1_EXQipxSqqyvwD4LbiMLFKCUApjq6PeG4XJP4dzxYGqDeqXfh8zLuTyOuOVR"
) )
return t.NewPackage("binutils", version, pkg.NewHTTPGetTar( return t.NewPackage("binutils", version, newTar(
nil, "https://ftpmirror.gnu.org/gnu/binutils/binutils-"+version+".tar.bz2", "https://ftpmirror.gnu.org/gnu/binutils/binutils-"+version+".tar.bz2",
mustDecode(checksum), checksum,
pkg.TarBzip2, pkg.TarBzip2,
), nil, (*MakeHelper)(nil), ), nil, (*MakeHelper)(nil),
Bash, Bash,
@@ -1087,12 +1046,16 @@ func (t Toolchain) newGMP() (pkg.Artifact, string) {
version = "6.3.0" version = "6.3.0"
checksum = "yrgbgEDWKDdMWVHh7gPbVl56-sRtVVhfvv0M_LX7xMUUk_mvZ1QOJEAnt7g4i3k5" checksum = "yrgbgEDWKDdMWVHh7gPbVl56-sRtVVhfvv0M_LX7xMUUk_mvZ1QOJEAnt7g4i3k5"
) )
return t.NewPackage("gmp", version, pkg.NewHTTPGetTar( return t.NewPackage("gmp", version, newTar(
nil, "https://gcc.gnu.org/pub/gcc/infrastructure/"+ "https://gcc.gnu.org/pub/gcc/infrastructure/"+
"gmp-"+version+".tar.bz2", "gmp-"+version+".tar.bz2",
mustDecode(checksum), checksum,
pkg.TarBzip2, pkg.TarBzip2,
), nil, (*MakeHelper)(nil), ), &PackageAttr{
Env: []string{
"CC=cc",
},
}, (*MakeHelper)(nil),
M4, M4,
), version ), version
} }
@@ -1113,10 +1076,10 @@ func (t Toolchain) newMPFR() (pkg.Artifact, string) {
version = "4.2.2" version = "4.2.2"
checksum = "wN3gx0zfIuCn9r3VAn_9bmfvAYILwrRfgBjYSD1IjLqyLrLojNN5vKyQuTE9kA-B" checksum = "wN3gx0zfIuCn9r3VAn_9bmfvAYILwrRfgBjYSD1IjLqyLrLojNN5vKyQuTE9kA-B"
) )
return t.NewPackage("mpfr", version, pkg.NewHTTPGetTar( return t.NewPackage("mpfr", version, newTar(
nil, "https://gcc.gnu.org/pub/gcc/infrastructure/"+ "https://gcc.gnu.org/pub/gcc/infrastructure/"+
"mpfr-"+version+".tar.bz2", "mpfr-"+version+".tar.bz2",
mustDecode(checksum), checksum,
pkg.TarBzip2, pkg.TarBzip2,
), nil, (*MakeHelper)(nil), ), nil, (*MakeHelper)(nil),
GMP, GMP,
@@ -1140,13 +1103,13 @@ func init() {
func (t Toolchain) newMPC() (pkg.Artifact, string) { func (t Toolchain) newMPC() (pkg.Artifact, string) {
const ( const (
version = "1.4.0" version = "1.4.1"
checksum = "TbrxLiE3ipQrHz_F3Xzz4zqBAnkMWyjhNwIK6wh9360RZ39xMt8rxfW3LxA9SnvU" checksum = "ZffaZyWkvIw0iPvRe5EJ7O-VvHtSkbbb3K_7SgPtK810NvGan7nbF0T5-6tozjQN"
) )
return t.NewPackage("mpc", version, t.NewViaGit( return t.NewPackage("mpc", version, newFromGitLab(
"https://gitlab.inria.fr/mpc/mpc.git", "gitlab.inria.fr",
"refs/tags/"+version, "mpc/mpc",
mustDecode(checksum), version, checksum,
), &PackageAttr{ ), &PackageAttr{
// does not find mpc-impl.h otherwise // does not find mpc-impl.h otherwise
EnterSource: true, EnterSource: true,
@@ -1179,13 +1142,20 @@ func init() {
func (t Toolchain) newGCC() (pkg.Artifact, string) { func (t Toolchain) newGCC() (pkg.Artifact, string) {
const ( const (
version = "15.2.0" version = "16.1.0"
checksum = "TXJ5WrbXlGLzy1swghQTr4qxgDCyIZFgJry51XEPTBZ8QYbVmFeB4lZbSMtPJ-a1" checksum = "4ASoWbxaA2FW7PAB0zzHDPC5XnNhyaAyjtDPpGzceSLeYnEIXsNYZR3PA_Zu5P0K"
) )
return t.NewPackage("gcc", version, pkg.NewHTTPGetTar(
nil, "https://ftp.tsukuba.wide.ad.jp/software/gcc/releases/"+ var configureExtra []KV
switch arch {
case "amd64", "arm64":
configureExtra = append(configureExtra, KV{"with-multilib-list", "''"})
}
return t.NewPackage("gcc", version, newTar(
"https://ftp.tsukuba.wide.ad.jp/software/gcc/releases/"+
"gcc-"+version+"/gcc-"+version+".tar.gz", "gcc-"+version+"/gcc-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
Patches: []KV{ Patches: []KV{
@@ -1347,9 +1317,8 @@ ln -s system/lib /work/
// it also saturates the CPU for a consequential amount of time. // it also saturates the CPU for a consequential amount of time.
Flag: TExclusive, Flag: TExclusive,
}, &MakeHelper{ }, &MakeHelper{
Configure: []KV{ Configure: append([]KV{
{"disable-multilib"}, {"disable-multilib"},
{"with-multilib-list", `""`},
{"enable-default-pie"}, {"enable-default-pie"},
{"disable-nls"}, {"disable-nls"},
{"with-gnu-as"}, {"with-gnu-as"},
@@ -1357,7 +1326,7 @@ ln -s system/lib /work/
{"with-system-zlib"}, {"with-system-zlib"},
{"enable-languages", "c,c++,go"}, {"enable-languages", "c,c++,go"},
{"with-native-system-header-dir", "/system/include"}, {"with-native-system-header-dir", "/system/include"},
}, }, configureExtra...),
Make: []string{ Make: []string{
"BOOT_CFLAGS='-O2 -g'", "BOOT_CFLAGS='-O2 -g'",
"bootstrap", "bootstrap",

35
internal/rosa/gnu_test.go Normal file
View File

@@ -0,0 +1,35 @@
package rosa
import (
"slices"
"strconv"
"strings"
"testing"
)
func TestSkipGNUTests(t *testing.T) {
t.Parallel()
testCases := []struct {
tests []int
want string
}{
{[]int{764}, "1-763 765-"},
{[]int{764, 0xcafe, 37, 9}, "1-8 10-36 38-763 765-51965 51967-"},
{[]int{1, 2, 0xbed}, "3-3052 3054-"},
{[]int{3, 4}, "1-2 5-"},
}
for _, tc := range testCases {
t.Run(strings.Join(slices.Collect(func(yield func(string) bool) {
for _, n := range tc.tests {
yield(strconv.Itoa(n))
}
}), ","), func(t *testing.T) {
t.Parallel()
if got := skipGNUTests(tc.tests...); got != tc.want {
t.Errorf("skipGNUTests: %q, want %q", got, tc.want)
}
})
}
}

View File

@@ -1,7 +1,6 @@
package rosa package rosa
import ( import (
"runtime"
"slices" "slices"
"hakurei.app/internal/pkg" "hakurei.app/internal/pkg"
@@ -10,9 +9,9 @@ import (
// newGoBootstrap returns the Go bootstrap toolchain. // newGoBootstrap returns the Go bootstrap toolchain.
func (t Toolchain) newGoBootstrap() pkg.Artifact { func (t Toolchain) newGoBootstrap() pkg.Artifact {
const checksum = "8o9JL_ToiQKadCTb04nvBDkp8O1xiWOolAxVEqaTGodieNe4lOFEjlOxN3bwwe23" const checksum = "8o9JL_ToiQKadCTb04nvBDkp8O1xiWOolAxVEqaTGodieNe4lOFEjlOxN3bwwe23"
return t.New("go1.4-bootstrap", 0, []pkg.Artifact{ return t.New("go1.4-bootstrap", 0, t.AppendPresets(nil,
t.Load(Bash), Bash,
}, nil, []string{ ), nil, []string{
"CGO_ENABLED=0", "CGO_ENABLED=0",
}, ` }, `
mkdir -p /var/tmp/ /work/system/ mkdir -p /var/tmp/ /work/system/
@@ -21,9 +20,9 @@ cd /work/system/go/src
chmod -R +w .. chmod -R +w ..
./make.bash ./make.bash
`, pkg.Path(AbsUsrSrc.Append("go"), false, pkg.NewHTTPGetTar( `, pkg.Path(AbsUsrSrc.Append("go"), false, newTar(
nil, "https://dl.google.com/go/go1.4-bootstrap-20171003.tar.gz", "https://dl.google.com/go/go1.4-bootstrap-20171003.tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
))) )))
} }
@@ -35,9 +34,13 @@ func (t Toolchain) newGo(
script string, script string,
extra ...pkg.Artifact, extra ...pkg.Artifact,
) pkg.Artifact { ) pkg.Artifact {
return t.New("go"+version, 0, slices.Concat([]pkg.Artifact{ name := "all"
t.Load(Bash), if presetOpts&OptSkipCheck != 0 {
}, extra), nil, slices.Concat([]string{ name = "make"
}
return t.New("go"+version, 0, t.AppendPresets(extra,
Bash,
), nil, slices.Concat([]string{
"CC=cc", "CC=cc",
"GOCACHE=/tmp/gocache", "GOCACHE=/tmp/gocache",
"GOROOT_BOOTSTRAP=/system/go", "GOROOT_BOOTSTRAP=/system/go",
@@ -48,16 +51,16 @@ cp -r /usr/src/go /work/system
cd /work/system/go/src cd /work/system/go/src
chmod -R +w .. chmod -R +w ..
`+script+` `+script+`
./all.bash ./`+name+`.bash
mkdir /work/system/bin mkdir /work/system/bin
ln -s \ ln -s \
../go/bin/go \ ../go/bin/go \
../go/bin/gofmt \ ../go/bin/gofmt \
/work/system/bin /work/system/bin
`, pkg.Path(AbsUsrSrc.Append("go"), false, pkg.NewHTTPGetTar( `, pkg.Path(AbsUsrSrc.Append("go"), false, newTar(
nil, "https://go.dev/dl/go"+version+".src.tar.gz", "https://go.dev/dl/go"+version+".src.tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
))) )))
} }
@@ -69,7 +72,7 @@ func (t Toolchain) newGoLatest() (pkg.Artifact, string) {
finalEnv []string finalEnv []string
) )
switch runtime.GOARCH { switch arch {
case "amd64": case "amd64":
bootstrapExtra = append(bootstrapExtra, t.newGoBootstrap()) bootstrapExtra = append(bootstrapExtra, t.newGoBootstrap())
@@ -79,7 +82,7 @@ func (t Toolchain) newGoLatest() (pkg.Artifact, string) {
finalEnv = append(finalEnv, "CGO_ENABLED=0") finalEnv = append(finalEnv, "CGO_ENABLED=0")
default: default:
panic("unsupported target " + runtime.GOARCH) panic("unsupported target " + arch)
} }
go119 := t.newGo( go119 := t.newGo(
@@ -104,7 +107,7 @@ echo \
[]string{"CGO_ENABLED=0"}, ` []string{"CGO_ENABLED=0"}, `
sed -i \ sed -i \
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \ 's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
cmd/link/internal/`+runtime.GOARCH+`/obj.go cmd/link/internal/`+arch+`/obj.go
rm \ rm \
crypto/tls/handshake_client_test.go \ crypto/tls/handshake_client_test.go \
@@ -122,17 +125,17 @@ echo \
[]string{"CGO_ENABLED=0"}, ` []string{"CGO_ENABLED=0"}, `
sed -i \ sed -i \
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \ 's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
cmd/link/internal/`+runtime.GOARCH+`/obj.go cmd/link/internal/`+arch+`/obj.go
`, go121, `, go121,
) )
go125 := t.newGo( go125 := t.newGo(
"1.25.7", "1.25.10",
"fyylHdBVRUobnBjYj3NKBaYPUw3kGmo2mEELiZonOYurPfbarNU1x77B99Fjut7Q", "TwKwatkpwal-j9U2sDSRPEdM3YesI4Gm88YgGV59wtU-L85K9gA7UPy9SCxn6PMb",
[]string{"CGO_ENABLED=0"}, ` []string{"CGO_ENABLED=0"}, `
sed -i \ sed -i \
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \ 's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
cmd/link/internal/`+runtime.GOARCH+`/obj.go cmd/link/internal/`+arch+`/obj.go
rm \ rm \
os/root_unix_test.go \ os/root_unix_test.go \
@@ -141,8 +144,8 @@ rm \
) )
const ( const (
version = "1.26.2" version = "1.26.3"
checksum = "v-6BE89_1g3xYf-9oIYpJKFXlo3xKHYJj2_VGkaUq8ZVkIVQmLwrto-xGG03OISH" checksum = "lEiFocZFnN5fKvZzmwVdqc9pYUjAuhzqZGbuiOqxUP4XdcY8yECisKcqsQ_eNn1N"
) )
return t.newGo( return t.newGo(
version, version,
@@ -150,10 +153,15 @@ rm \
finalEnv, ` finalEnv, `
sed -i \ sed -i \
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \ 's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
cmd/link/internal/`+runtime.GOARCH+`/obj.go cmd/link/internal/`+arch+`/obj.go
sed -i \
's/cpu.X86.HasAVX512VBMI/& \&\& cpu.X86.HasPOPCNT/' \
internal/runtime/gc/scan/scan_amd64.go
rm \ rm \
os/root_unix_test.go os/root_unix_test.go \
cmd/cgo/internal/testsanitizers/tsan_test.go \
cmd/cgo/internal/testsanitizers/cshared_test.go
`, go125, `, go125,
), version ), version
} }

View File

@@ -7,13 +7,12 @@ import (
func (t Toolchain) newGLib() (pkg.Artifact, string) { func (t Toolchain) newGLib() (pkg.Artifact, string) {
const ( const (
version = "2.88.0" version = "2.88.1"
checksum = "T79Cg4z6j-sDZ2yIwvbY4ccRv2-fbwbqgcw59F5NQ6qJT6z4v261vbYp3dHO6Ma3" checksum = "Rkszn6W4RHjyspyqfXdVAVawdwDJCuS0Zu0f7qot7tbJhnw2fUDoUUJB40m-1MCX"
) )
return t.NewPackage("glib", version, t.NewViaGit( return t.NewPackage("glib", version, t.newTagRemote(
"https://gitlab.gnome.org/GNOME/glib.git", "https://gitlab.gnome.org/GNOME/glib.git",
"refs/tags/"+version, version, checksum,
mustDecode(checksum),
), &PackageAttr{ ), &PackageAttr{
Paths: []pkg.ExecPath{ Paths: []pkg.ExecPath{
pkg.Path(fhs.AbsEtc.Append( pkg.Path(fhs.AbsEtc.Append(

View File

@@ -7,9 +7,8 @@ func (t Toolchain) newHakurei(
withHostname bool, withHostname bool,
) pkg.Artifact { ) pkg.Artifact {
hostname := ` hostname := `
echo '# Building test helper (hostname).' echo 'Building test helper (hostname).'
go build -v -o /bin/hostname /usr/src/hostname/main.go go build -o /bin/hostname /usr/src/hostname/main.go
echo
` `
if !withHostname { if !withHostname {
hostname = "" hostname = ""
@@ -64,9 +63,9 @@ func init() {
return t.newHakurei("", ` return t.newHakurei("", `
mkdir -p /work/system/libexec/hakurei/ mkdir -p /work/system/libexec/hakurei/
echo '# Building hakurei.' echo "Building hakurei for $(go env GOOS)/$(go env GOARCH)."
go generate -v ./... go generate ./...
go build -trimpath -v -tags=rosa -o /work/system/libexec/hakurei -ldflags="-s -w go build -trimpath -tags=rosa -o /work/system/libexec/hakurei -ldflags="-s -w
-buildid= -buildid=
-linkmode external -linkmode external
-extldflags=-static -extldflags=-static
@@ -77,7 +76,7 @@ go build -trimpath -v -tags=rosa -o /work/system/libexec/hakurei -ldflags="-s -w
" ./... " ./...
echo echo
echo '# Testing hakurei.' echo '##### Testing hakurei.'
go test -ldflags='-buildid= -linkmode external -extldflags=-static' ./... go test -ldflags='-buildid= -linkmode external -extldflags=-static' ./...
echo echo
@@ -97,9 +96,13 @@ mkdir -p /work/system/bin/
} }
artifactsM[HakureiDist] = Metadata{ artifactsM[HakureiDist] = Metadata{
f: func(t Toolchain) (pkg.Artifact, string) { f: func(t Toolchain) (pkg.Artifact, string) {
name := "all"
if presetOpts&OptSkipCheck != 0 {
name = "make"
}
return t.newHakurei("-dist", ` return t.newHakurei("-dist", `
export HAKUREI_VERSION export HAKUREI_VERSION
DESTDIR=/work /usr/src/hakurei/dist/release.sh DESTDIR=/work /usr/src/hakurei/`+name+`.sh
`, true), hakureiVersion `, true), hakureiVersion
}, },

View File

@@ -4,13 +4,13 @@ package rosa
import "hakurei.app/internal/pkg" import "hakurei.app/internal/pkg"
const hakureiVersion = "0.3.7" const hakureiVersion = "0.4.2"
// hakureiSource is the source code of a hakurei release. // hakureiSource is the source code of a hakurei release.
var hakureiSource = pkg.NewHTTPGetTar( var hakureiSource = newTar(
nil, "https://git.gensokyo.uk/rosa/hakurei/archive/"+ "https://git.gensokyo.uk/rosa/hakurei/archive/"+
"v"+hakureiVersion+".tar.gz", "v"+hakureiVersion+".tar.gz",
mustDecode("Xh_sdITOATEAQN5_UuaOyrWsgboxorqRO9bml3dGm8GAxF8NFpB7MqhSZgjJxAl2"), "jadgaOrxv5ABGvzQ_Rk0aPGz7U8K-427TbMhQNQ32scSizEnlR44Pu7NoWYWVZWq",
pkg.TarGzip, pkg.TarGzip,
) )

34
internal/rosa/hwdata.go Normal file
View File

@@ -0,0 +1,34 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newHwdata() (pkg.Artifact, string) {
const (
version = "0.407"
checksum = "6p1XD0CRuzt6hLfjv4ShKBW934BexmoPkRrmwxD4J63fBVCzVBRHyF8pVJdW_Xjm"
)
return t.NewPackage("hwdata", version, newFromGitHub(
"vcrhonek/hwdata",
"v"+version, checksum,
), &PackageAttr{
Writable: true,
EnterSource: true,
}, &MakeHelper{
// awk: fatal: cannot open file `hwdata.spec' for reading: No such file or directory
InPlace: true,
// lspci: Unknown option 'A' (see "lspci --help")
SkipCheck: true,
}), version
}
func init() {
artifactsM[Hwdata] = Metadata{
f: Toolchain.newHwdata,
Name: "hwdata",
Description: "contains various hardware identification and configuration data",
Website: "https://github.com/vcrhonek/hwdata",
ID: 5387,
}
}

View File

@@ -2,12 +2,12 @@ package rosa
import "hakurei.app/internal/pkg" import "hakurei.app/internal/pkg"
const kernelVersion = "6.12.80" const kernelVersion = "6.12.87"
var kernelSource = pkg.NewHTTPGetTar( var kernelSource = newTar(
nil, "https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/"+ "https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/"+
"snapshot/linux-"+kernelVersion+".tar.gz", "snapshot/linux-"+kernelVersion+".tar.gz",
mustDecode("_iJEAYoQISJxefuWZYfv0RPWUmHHIjHQw33Fapix-irXrEIREP5ruK37UJW4uMZO"), "QTl5teIy0K5KsOLYGHQ3FbnPCZNRH2bySXVzghiOoHDdM3zAcSPUkmdly85lMzHx",
pkg.TarGzip, pkg.TarGzip,
) )
@@ -29,8 +29,22 @@ func init() {
} }
func (t Toolchain) newKernelHeaders() (pkg.Artifact, string) { func (t Toolchain) newKernelHeaders() (pkg.Artifact, string) {
const checksum = "lCmBNcMeUmXifg0vecKOPy3GAaFcJSmOPnf3wit9xYTDSTsFADPt1xxUFfmTn1fD"
return t.NewPackage("kernel-headers", kernelVersion, kernelSource, &PackageAttr{ return t.NewPackage("kernel-headers", kernelVersion, kernelSource, &PackageAttr{
Flag: TEarly, Flag: TEarly,
KnownChecksum: new(mustDecode(checksum)),
Paths: []pkg.ExecPath{
// updated manually for API changes
pkg.Path(AbsUsrSrc.Append("version.h"), false, pkg.NewFile(
"version.h", []byte(`#define LINUX_VERSION_CODE 396372
#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c)))
#define LINUX_VERSION_MAJOR 6
#define LINUX_VERSION_PATCHLEVEL 12
#define LINUX_VERSION_SUBLEVEL 84
`),
)),
},
}, &MakeHelper{ }, &MakeHelper{
SkipConfigure: true, SkipConfigure: true,
@@ -43,7 +57,11 @@ func (t Toolchain) newKernelHeaders() (pkg.Artifact, string) {
"INSTALL_HDR_PATH=/work/system", "INSTALL_HDR_PATH=/work/system",
"headers_install", "headers_install",
}, },
Install: "# headers installed during make", Install: `
cat \
/usr/src/version.h > \
/work/system/include/linux/version.h
`,
}, },
Rsync, Rsync,
), kernelVersion ), kernelVersion
@@ -1221,7 +1239,7 @@ install -Dm0500 \
/sbin/depmod /sbin/depmod
make \ make \
"-j$(nproc)" \ ` + jobsFlagE + ` \
-f /usr/src/kernel/Makefile \ -f /usr/src/kernel/Makefile \
O=/tmp/kbuild \ O=/tmp/kbuild \
LLVM=1 \ LLVM=1 \
@@ -1282,14 +1300,14 @@ func init() {
func (t Toolchain) newFirmware() (pkg.Artifact, string) { func (t Toolchain) newFirmware() (pkg.Artifact, string) {
const ( const (
version = "20260309" version = "20260410"
checksum = "M1az8BxSiOEH3LA11Trc5VAlakwAHhP7-_LKWg6k-SVIzU3xclMDO4Tiujw1gQrC" checksum = "J8PdQlGqwrivpskPzbL6xacqR6mlKtXpe5RpzFfVzKPAgG81ZRXsc3qrxwdGJbil"
) )
return t.NewPackage("firmware", version, pkg.NewHTTPGetTar( return t.NewPackage("firmware", version, newFromGitLab(
nil, "https://gitlab.com/kernel-firmware/linux-firmware/-/"+ "gitlab.com",
"archive/"+version+"/linux-firmware-"+version+".tar.bz2", "kernel-firmware/linux-firmware",
mustDecode(checksum), version,
pkg.TarBzip2, checksum,
), &PackageAttr{ ), &PackageAttr{
// dedup creates temporary file // dedup creates temporary file
Writable: true, Writable: true,
@@ -1309,7 +1327,7 @@ func (t Toolchain) newFirmware() (pkg.Artifact, string) {
"install-zst", "install-zst",
}, },
SkipCheck: true, // requires pre-commit SkipCheck: true, // requires pre-commit
Install: `make "-j$(nproc)" DESTDIR=/work/system dedup`, Install: "make " + jobsFlagE + " DESTDIR=/work/system dedup",
}, },
Parallel, Parallel,
Rdfind, Rdfind,

View File

@@ -1,16 +1,16 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/x86 6.12.80 Kernel Configuration # Linux/x86 6.12.84 Kernel Configuration
# #
CONFIG_CC_VERSION_TEXT="clang version 22.1.2" CONFIG_CC_VERSION_TEXT="clang version 22.1.4"
CONFIG_GCC_VERSION=0 CONFIG_GCC_VERSION=0
CONFIG_CC_IS_CLANG=y CONFIG_CC_IS_CLANG=y
CONFIG_CLANG_VERSION=220102 CONFIG_CLANG_VERSION=220104
CONFIG_AS_IS_LLVM=y CONFIG_AS_IS_LLVM=y
CONFIG_AS_VERSION=220102 CONFIG_AS_VERSION=220104
CONFIG_LD_VERSION=0 CONFIG_LD_VERSION=0
CONFIG_LD_IS_LLD=y CONFIG_LD_IS_LLD=y
CONFIG_LLD_VERSION=220102 CONFIG_LLD_VERSION=220104
CONFIG_RUSTC_VERSION=0 CONFIG_RUSTC_VERSION=0
CONFIG_RUSTC_LLVM_VERSION=0 CONFIG_RUSTC_LLVM_VERSION=0
CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
@@ -3175,14 +3175,8 @@ CONFIG_PATA_ACPI=y
CONFIG_ATA_GENERIC=y CONFIG_ATA_GENERIC=y
CONFIG_PATA_LEGACY=m CONFIG_PATA_LEGACY=m
CONFIG_MD=y CONFIG_MD=y
CONFIG_BLK_DEV_MD=m # CONFIG_BLK_DEV_MD is not set
CONFIG_MD_BITMAP_FILE=y CONFIG_MD_BITMAP_FILE=y
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m CONFIG_BCACHE=m
# CONFIG_BCACHE_DEBUG is not set # CONFIG_BCACHE_DEBUG is not set
# CONFIG_BCACHE_ASYNC_REGISTRATION is not set # CONFIG_BCACHE_ASYNC_REGISTRATION is not set
@@ -3205,7 +3199,7 @@ CONFIG_DM_ERA=m
CONFIG_DM_CLONE=m CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m # CONFIG_DM_RAID is not set
CONFIG_DM_ZERO=m CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_QL=m
@@ -11636,10 +11630,7 @@ CONFIG_RANDSTRUCT_NONE=y
CONFIG_XOR_BLOCKS=m CONFIG_XOR_BLOCKS=m
CONFIG_ASYNC_CORE=m CONFIG_ASYNC_CORE=m
CONFIG_ASYNC_MEMCPY=m
CONFIG_ASYNC_XOR=m CONFIG_ASYNC_XOR=m
CONFIG_ASYNC_PQ=m
CONFIG_ASYNC_RAID6_RECOV=m
CONFIG_CRYPTO=y CONFIG_CRYPTO=y
# #
@@ -11925,8 +11916,6 @@ CONFIG_BINARY_PRINTF=y
# #
# Library routines # Library routines
# #
CONFIG_RAID6_PQ=m
CONFIG_RAID6_PQ_BENCHMARK=y
CONFIG_LINEAR_RANGES=y CONFIG_LINEAR_RANGES=y
CONFIG_PACKING=y CONFIG_PACKING=y
CONFIG_BITREVERSE=y CONFIG_BITREVERSE=y
@@ -12471,7 +12460,6 @@ CONFIG_RUNTIME_TESTING_MENU=y
# CONFIG_INTERVAL_TREE_TEST is not set # CONFIG_INTERVAL_TREE_TEST is not set
# CONFIG_PERCPU_TEST is not set # CONFIG_PERCPU_TEST is not set
# CONFIG_ATOMIC64_SELFTEST is not set # CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_ASYNC_RAID6_TEST is not set
# CONFIG_TEST_HEXDUMP is not set # CONFIG_TEST_HEXDUMP is not set
# CONFIG_TEST_KSTRTOX is not set # CONFIG_TEST_KSTRTOX is not set
# CONFIG_TEST_PRINTF is not set # CONFIG_TEST_PRINTF is not set

View File

@@ -1,16 +1,16 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/arm64 6.12.80 Kernel Configuration # Linux/arm64 6.12.83 Kernel Configuration
# #
CONFIG_CC_VERSION_TEXT="clang version 21.1.8" CONFIG_CC_VERSION_TEXT="clang version 22.1.4"
CONFIG_GCC_VERSION=0 CONFIG_GCC_VERSION=0
CONFIG_CC_IS_CLANG=y CONFIG_CC_IS_CLANG=y
CONFIG_CLANG_VERSION=210108 CONFIG_CLANG_VERSION=220104
CONFIG_AS_IS_LLVM=y CONFIG_AS_IS_LLVM=y
CONFIG_AS_VERSION=210108 CONFIG_AS_VERSION=220104
CONFIG_LD_VERSION=0 CONFIG_LD_VERSION=0
CONFIG_LD_IS_LLD=y CONFIG_LD_IS_LLD=y
CONFIG_LLD_VERSION=210108 CONFIG_LLD_VERSION=220104
CONFIG_RUSTC_VERSION=0 CONFIG_RUSTC_VERSION=0
CONFIG_RUSTC_LLVM_VERSION=0 CONFIG_RUSTC_LLVM_VERSION=0
CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
@@ -3253,14 +3253,8 @@ CONFIG_PATA_ACPI=y
CONFIG_ATA_GENERIC=y CONFIG_ATA_GENERIC=y
CONFIG_PATA_LEGACY=m CONFIG_PATA_LEGACY=m
CONFIG_MD=y CONFIG_MD=y
CONFIG_BLK_DEV_MD=m # CONFIG_BLK_DEV_MD is not set
CONFIG_MD_BITMAP_FILE=y CONFIG_MD_BITMAP_FILE=y
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m CONFIG_BCACHE=m
# CONFIG_BCACHE_DEBUG is not set # CONFIG_BCACHE_DEBUG is not set
# CONFIG_BCACHE_ASYNC_REGISTRATION is not set # CONFIG_BCACHE_ASYNC_REGISTRATION is not set
@@ -3283,7 +3277,7 @@ CONFIG_DM_ERA=m
CONFIG_DM_CLONE=m CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m # CONFIG_DM_RAID is not set
CONFIG_DM_ZERO=m CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_QL=m
@@ -10300,7 +10294,6 @@ CONFIG_ALTERA_MSGDMA=m
# CONFIG_AMBA_PL08X is not set # CONFIG_AMBA_PL08X is not set
CONFIG_APPLE_ADMAC=m CONFIG_APPLE_ADMAC=m
CONFIG_AXI_DMAC=m CONFIG_AXI_DMAC=m
CONFIG_BCM_SBA_RAID=m
CONFIG_DMA_BCM2835=m CONFIG_DMA_BCM2835=m
CONFIG_DMA_SUN6I=m CONFIG_DMA_SUN6I=m
CONFIG_DW_AXI_DMAC=m CONFIG_DW_AXI_DMAC=m
@@ -13292,12 +13285,7 @@ CONFIG_RANDSTRUCT_NONE=y
CONFIG_XOR_BLOCKS=m CONFIG_XOR_BLOCKS=m
CONFIG_ASYNC_CORE=m CONFIG_ASYNC_CORE=m
CONFIG_ASYNC_MEMCPY=m
CONFIG_ASYNC_XOR=m CONFIG_ASYNC_XOR=m
CONFIG_ASYNC_PQ=m
CONFIG_ASYNC_RAID6_RECOV=m
CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA=y
CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA=y
CONFIG_CRYPTO=y CONFIG_CRYPTO=y
# #
@@ -13640,8 +13628,6 @@ CONFIG_BINARY_PRINTF=y
# #
# Library routines # Library routines
# #
CONFIG_RAID6_PQ=m
CONFIG_RAID6_PQ_BENCHMARK=y
CONFIG_LINEAR_RANGES=y CONFIG_LINEAR_RANGES=y
CONFIG_PACKING=y CONFIG_PACKING=y
CONFIG_BITREVERSE=y CONFIG_BITREVERSE=y
@@ -14172,7 +14158,6 @@ CONFIG_RUNTIME_TESTING_MENU=y
# CONFIG_INTERVAL_TREE_TEST is not set # CONFIG_INTERVAL_TREE_TEST is not set
# CONFIG_PERCPU_TEST is not set # CONFIG_PERCPU_TEST is not set
# CONFIG_ATOMIC64_SELFTEST is not set # CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_ASYNC_RAID6_TEST is not set
# CONFIG_TEST_HEXDUMP is not set # CONFIG_TEST_HEXDUMP is not set
# CONFIG_TEST_KSTRTOX is not set # CONFIG_TEST_KSTRTOX is not set
# CONFIG_TEST_PRINTF is not set # CONFIG_TEST_PRINTF is not set

View File

@@ -1,6 +1,6 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/riscv 6.12.77 Kernel Configuration # Linux/riscv 6.12.80 Kernel Configuration
# #
CONFIG_CC_VERSION_TEXT="clang version 22.1.2" CONFIG_CC_VERSION_TEXT="clang version 22.1.2"
CONFIG_GCC_VERSION=0 CONFIG_GCC_VERSION=0
@@ -37,11 +37,6 @@ CONFIG_BUILD_SALT=""
CONFIG_HAVE_KERNEL_GZIP=y CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_ZSTD=y CONFIG_HAVE_KERNEL_ZSTD=y
# CONFIG_KERNEL_GZIP is not set # CONFIG_KERNEL_GZIP is not set
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
# CONFIG_KERNEL_XZ is not set
# CONFIG_KERNEL_LZO is not set
# CONFIG_KERNEL_LZ4 is not set
CONFIG_KERNEL_ZSTD=y CONFIG_KERNEL_ZSTD=y
CONFIG_DEFAULT_INIT="" CONFIG_DEFAULT_INIT=""
CONFIG_DEFAULT_HOSTNAME="rosa-early" CONFIG_DEFAULT_HOSTNAME="rosa-early"
@@ -2848,14 +2843,8 @@ CONFIG_PATA_ACPI=y
CONFIG_ATA_GENERIC=y CONFIG_ATA_GENERIC=y
CONFIG_PATA_LEGACY=m CONFIG_PATA_LEGACY=m
CONFIG_MD=y CONFIG_MD=y
CONFIG_BLK_DEV_MD=m # CONFIG_BLK_DEV_MD is not set
CONFIG_MD_BITMAP_FILE=y CONFIG_MD_BITMAP_FILE=y
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m CONFIG_BCACHE=m
# CONFIG_BCACHE_DEBUG is not set # CONFIG_BCACHE_DEBUG is not set
# CONFIG_BCACHE_ASYNC_REGISTRATION is not set # CONFIG_BCACHE_ASYNC_REGISTRATION is not set
@@ -2878,7 +2867,7 @@ CONFIG_DM_ERA=m
CONFIG_DM_CLONE=m CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m # CONFIG_DM_RAID is not set
CONFIG_DM_ZERO=m CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_QL=m
@@ -10655,10 +10644,7 @@ CONFIG_RANDSTRUCT_NONE=y
CONFIG_XOR_BLOCKS=m CONFIG_XOR_BLOCKS=m
CONFIG_ASYNC_CORE=m CONFIG_ASYNC_CORE=m
CONFIG_ASYNC_MEMCPY=m
CONFIG_ASYNC_XOR=m CONFIG_ASYNC_XOR=m
CONFIG_ASYNC_PQ=m
CONFIG_ASYNC_RAID6_RECOV=m
CONFIG_CRYPTO=y CONFIG_CRYPTO=y
# #
@@ -10918,8 +10904,6 @@ CONFIG_BINARY_PRINTF=y
# #
# Library routines # Library routines
# #
CONFIG_RAID6_PQ=m
CONFIG_RAID6_PQ_BENCHMARK=y
CONFIG_LINEAR_RANGES=y CONFIG_LINEAR_RANGES=y
CONFIG_PACKING=y CONFIG_PACKING=y
CONFIG_BITREVERSE=y CONFIG_BITREVERSE=y
@@ -11408,7 +11392,6 @@ CONFIG_RUNTIME_TESTING_MENU=y
# CONFIG_INTERVAL_TREE_TEST is not set # CONFIG_INTERVAL_TREE_TEST is not set
# CONFIG_PERCPU_TEST is not set # CONFIG_PERCPU_TEST is not set
# CONFIG_ATOMIC64_SELFTEST is not set # CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_ASYNC_RAID6_TEST is not set
# CONFIG_TEST_HEXDUMP is not set # CONFIG_TEST_HEXDUMP is not set
# CONFIG_TEST_KSTRTOX is not set # CONFIG_TEST_KSTRTOX is not set
# CONFIG_TEST_PRINTF is not set # CONFIG_TEST_PRINTF is not set

View File

@@ -7,10 +7,10 @@ func (t Toolchain) newKmod() (pkg.Artifact, string) {
version = "34.2" version = "34.2"
checksum = "0K7POeTKxMhExsaTsnKAC6LUNsRSfe6sSZxWONPbOu-GI_pXOw3toU_BIoqfBhJV" checksum = "0K7POeTKxMhExsaTsnKAC6LUNsRSfe6sSZxWONPbOu-GI_pXOw3toU_BIoqfBhJV"
) )
return t.NewPackage("kmod", version, pkg.NewHTTPGetTar( return t.NewPackage("kmod", version, newTar(
nil, "https://www.kernel.org/pub/linux/utils/kernel/"+ "https://www.kernel.org/pub/linux/utils/kernel/"+
"kmod/kmod-"+version+".tar.gz", "kmod/kmod-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, &MesonHelper{ ), nil, &MesonHelper{
Setup: []KV{ Setup: []KV{

100
internal/rosa/libarchive.go Normal file
View File

@@ -0,0 +1,100 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newLibarchive() (pkg.Artifact, string) {
const (
version = "3.8.7"
checksum = "CUJK4MDQmZmATClgQBH2Wt-7Ts4iiSUlg1J_TVb6-5IK3rVUgVLIMc5k-bnWB9w3"
)
return t.NewPackage("libarchive", version, newFromGitHub(
"libarchive/libarchive",
"v"+version, checksum,
), &PackageAttr{
Paths: []pkg.ExecPath{
pkg.Path(AbsUsrSrc.Append(
"CTestCustom.cmake",
), false, pkg.NewFile("CTestCustom.cmake", []byte(`
list(APPEND CTEST_CUSTOM_TESTS_IGNORE
"libarchive_test_archive_string_conversion_fail_c"
"libarchive_test_archive_string_conversion_fail_latin1"
"libarchive_test_archive_string_update_utf8_koi8"
"libarchive_test_gnutar_filename_encoding_KOI8R_UTF8"
"libarchive_test_gnutar_filename_encoding_KOI8R_CP866"
"libarchive_test_gnutar_filename_encoding_CP1251_UTF8"
"libarchive_test_gnutar_filename_encoding_Russian_Russia"
"libarchive_test_gnutar_filename_encoding_EUCJP_UTF8"
"libarchive_test_gnutar_filename_encoding_EUCJP_CP932"
"libarchive_test_gnutar_filename_encoding_CP932_UTF8"
"libarchive_test_pax_filename_encoding_KOI8R"
"libarchive_test_pax_filename_encoding_CP1251"
"libarchive_test_pax_filename_encoding_EUCJP"
"libarchive_test_pax_filename_encoding_CP932"
"libarchive_test_read_format_cpio_filename_UTF8_eucJP"
"libarchive_test_read_format_cpio_filename_CP866_KOI8R"
"libarchive_test_read_format_cpio_filename_KOI8R_CP866"
"libarchive_test_read_format_cpio_filename_UTF8_KOI8R"
"libarchive_test_read_format_cpio_filename_UTF8_CP866"
"libarchive_test_read_format_cpio_filename_eucJP_CP932"
"libarchive_test_read_format_cpio_filename_UTF8_CP932"
"libarchive_test_read_format_cpio_filename_CP866_CP1251"
"libarchive_test_read_format_cpio_filename_CP866_CP1251_win"
"libarchive_test_read_format_cpio_filename_KOI8R_CP1251"
"libarchive_test_read_format_cpio_filename_UTF8_CP1251"
"libarchive_test_read_format_gtar_filename_CP866_KOI8R"
"libarchive_test_read_format_gtar_filename_KOI8R_CP866"
"libarchive_test_read_format_gtar_filename_eucJP_CP932"
"libarchive_test_read_format_gtar_filename_CP866_CP1251"
"libarchive_test_read_format_gtar_filename_CP866_CP1251_win"
"libarchive_test_read_format_gtar_filename_KOI8R_CP1251"
"libarchive_test_read_format_rar_unicode_CP932"
"libarchive_test_read_format_zip_filename_CP932_eucJP"
"libarchive_test_read_format_zip_filename_UTF8_eucJP"
"libarchive_test_read_format_zip_filename_CP866_KOI8R"
"libarchive_test_read_format_zip_filename_KOI8R_CP866"
"libarchive_test_read_format_zip_filename_UTF8_KOI8R"
"libarchive_test_read_format_zip_filename_UTF8_CP866"
"libarchive_test_read_format_zip_filename_CP932_CP932"
"libarchive_test_read_format_zip_filename_UTF8_CP932"
"libarchive_test_read_format_zip_filename_CP866_CP1251"
"libarchive_test_read_format_zip_filename_CP866_CP1251_win"
"libarchive_test_read_format_zip_filename_KOI8R_CP1251"
"libarchive_test_read_format_zip_filename_UTF8_CP1251"
"libarchive_test_ustar_filename_encoding_KOI8R_UTF8"
"libarchive_test_ustar_filename_encoding_KOI8R_CP866"
"libarchive_test_ustar_filename_encoding_CP1251_UTF8"
"libarchive_test_ustar_filename_encoding_Russian_Russia"
"libarchive_test_ustar_filename_encoding_EUCJP_UTF8"
"libarchive_test_ustar_filename_encoding_EUCJP_CP932"
"libarchive_test_ustar_filename_encoding_CP932_UTF8"
"libarchive_test_zip_filename_encoding_KOI8R"
"libarchive_test_zip_filename_encoding_ru_RU_CP1251"
"libarchive_test_zip_filename_encoding_Russian_Russia"
"libarchive_test_zip_filename_encoding_EUCJP"
"libarchive_test_zip_filename_encoding_CP932"
"libarchive_test_read_format_cab_filename"
"libarchive_test_read_format_lha_filename"
"libarchive_test_read_format_tar_filename"
"libarchive_test_read_format_ustar_filename"
"libarchive_test_read_append_wrong_filter"
)
`))),
},
Writable: true,
ScriptEarly: `
install -Dv /usr/src/CTestCustom.cmake /cure/
`,
}, (*CMakeHelper)(nil)), version
}
func init() {
artifactsM[Libarchive] = Metadata{
f: Toolchain.newLibarchive,
Name: "libarchive",
Description: "multi-format archive and compression library",
Website: "https://www.libarchive.org/",
ID: 1558,
}
}

View File

@@ -4,13 +4,12 @@ import "hakurei.app/internal/pkg"
func (t Toolchain) newLibmd() (pkg.Artifact, string) { func (t Toolchain) newLibmd() (pkg.Artifact, string) {
const ( const (
version = "1.1.0" version = "1.2.0"
checksum = "9apYqPPZm0j5HQT8sCsVIhnVIqRD7XgN7kPIaTwTqnTuUq5waUAMq4M7ev8CODJ1" checksum = "1rJ6joAO0wwMZvSfnRNkc1MOhywyAq7SM8VmF92NvDtv7Qdl1LRbjm5fg_DFFtGj"
) )
return t.NewPackage("libmd", version, t.NewViaGit( return t.NewPackage("libmd", version, t.newTagRemote(
"https://git.hadrons.org/git/libmd.git", "https://git.hadrons.org/git/libmd.git",
"refs/tags/"+version, version, checksum,
mustDecode(checksum),
), nil, &MakeHelper{ ), nil, &MakeHelper{
Generate: "echo '" + version + "' > .dist-version && ./autogen", Generate: "echo '" + version + "' > .dist-version && ./autogen",
ScriptMakeEarly: ` ScriptMakeEarly: `
@@ -38,10 +37,9 @@ func (t Toolchain) newLibbsd() (pkg.Artifact, string) {
version = "0.12.2" version = "0.12.2"
checksum = "NVS0xFLTwSP8JiElEftsZ-e1_C-IgJhHrHE77RwKt5178M7r087waO-zYx2_dfGX" checksum = "NVS0xFLTwSP8JiElEftsZ-e1_C-IgJhHrHE77RwKt5178M7r087waO-zYx2_dfGX"
) )
return t.NewPackage("libbsd", version, t.NewViaGit( return t.NewPackage("libbsd", version, t.newTagRemote(
"https://gitlab.freedesktop.org/libbsd/libbsd.git", "https://gitlab.freedesktop.org/libbsd/libbsd.git",
"refs/tags/"+version, version, checksum,
mustDecode(checksum),
), nil, &MakeHelper{ ), nil, &MakeHelper{
Generate: "echo '" + version + "' > .dist-version && ./autogen", Generate: "echo '" + version + "' > .dist-version && ./autogen",
}, },

View File

@@ -7,10 +7,10 @@ func (t Toolchain) newLibcap() (pkg.Artifact, string) {
version = "2.78" version = "2.78"
checksum = "wFdUkBhFMD9InPnrBZyegWrlPSAg_9JiTBC-eSFyWWlmbzL2qjh2mKxr9Kx2a8ut" checksum = "wFdUkBhFMD9InPnrBZyegWrlPSAg_9JiTBC-eSFyWWlmbzL2qjh2mKxr9Kx2a8ut"
) )
return t.NewPackage("libcap", version, pkg.NewHTTPGetTar( return t.NewPackage("libcap", version, newTar(
nil, "https://git.kernel.org/pub/scm/libs/libcap/libcap.git/"+ "https://git.kernel.org/pub/scm/libs/libcap/libcap.git/"+
"snapshot/libcap-"+version+".tar.gz", "snapshot/libcap-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), &PackageAttr{
// uses source tree as scratch space // uses source tree as scratch space

View File

@@ -0,0 +1,50 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newLibconfig() (pkg.Artifact, string) {
const (
version = "1.8.2"
checksum = "fD32hjeAZuTz98g6WYHRwsxphrgrEFqxi5Z1jlJemPckPBfxpS3i5HgshAuA6vmT"
)
return t.NewPackage("libconfig", version, newFromGitHub(
"hyperrealm/libconfig",
"v"+version,
checksum,
), &PackageAttr{
Patches: []KV{
{"disable-broken-tests", `diff --git a/tests/tests.c b/tests/tests.c
index eba7eae..f916d2e 100644
--- a/tests/tests.c
+++ b/tests/tests.c
@@ -753,7 +753,6 @@ int main(int argc, char **argv)
int failures;
TT_SUITE_START(LibConfigTests);
- TT_SUITE_TEST(LibConfigTests, ParsingAndFormatting);
TT_SUITE_TEST(LibConfigTests, ParseInvalidFiles);
TT_SUITE_TEST(LibConfigTests, ParseInvalidStrings);
TT_SUITE_TEST(LibConfigTests, BigInt1);
@@ -768,7 +767,6 @@ int main(int argc, char **argv)
TT_SUITE_TEST(LibConfigTests, OverrideSetting);
TT_SUITE_TEST(LibConfigTests, SettingLookups);
TT_SUITE_TEST(LibConfigTests, ReadStream);
- TT_SUITE_TEST(LibConfigTests, BinaryAndHex);
TT_SUITE_RUN(LibConfigTests);
failures = TT_SUITE_NUM_FAILURES(LibConfigTests);
TT_SUITE_END(LibConfigTests);
`},
},
}, (*CMakeHelper)(nil)), version
}
func init() {
artifactsM[Libconfig] = Metadata{
f: Toolchain.newLibconfig,
Name: "libconfig",
Description: "a simple library for processing structured configuration files",
Website: "https://hyperrealm.github.io/libconfig/",
ID: 1580,
}
}

View File

@@ -0,0 +1,30 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newLibdisplayInfo() (pkg.Artifact, string) {
const (
version = "0.3.0"
checksum = "yjOqPUHHYgRtpqGw5RI1n2Q1_hO5j0LiFNMbjcRWV4Nf71XwwoC9fZMlKBDeLchT"
)
return t.NewPackage("libdisplay-info", version, newFromGitLab(
"gitlab.freedesktop.org",
"emersion/libdisplay-info",
version, checksum,
), nil, (*MesonHelper)(nil),
Diffutils,
Hwdata,
), version
}
func init() {
artifactsM[LibdisplayInfo] = Metadata{
f: Toolchain.newLibdisplayInfo,
Name: "libdisplay-info",
Description: "EDID and DisplayID library",
Website: "https://gitlab.freedesktop.org/emersion/libdisplay-info",
ID: 326668,
}
}

33
internal/rosa/libepoxy.go Normal file
View File

@@ -0,0 +1,33 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newLibepoxy() (pkg.Artifact, string) {
const (
version = "1.5.10"
checksum = "OHI8wshrlGw6BMGrmSyejJtwzM2gPhyFJrTsKxULyKMmYrfgcOe7Iw2ibVoUND_Q"
)
return t.NewPackage("libepoxy", version, newFromGitHub(
"anholt/libepoxy",
version,
checksum,
), nil, &MesonHelper{
Setup: []KV{
{"Dglx", "no"},
{"Degl", "no"},
},
},
LibX11,
), version
}
func init() {
artifactsM[Libepoxy] = Metadata{
f: Toolchain.newLibepoxy,
Name: "libepoxy",
Description: "a library for handling OpenGL function pointer management",
Website: "https://github.com/anholt/libepoxy",
ID: 6090,
}
}

View File

@@ -7,9 +7,9 @@ func (t Toolchain) newLibev() (pkg.Artifact, string) {
version = "4.33" version = "4.33"
checksum = "774eSXV_4k8PySRprUDChbEwsw-kzjIFnJ3MpNOl5zDpamBRvC3BqPyRxvkwcL6_" checksum = "774eSXV_4k8PySRprUDChbEwsw-kzjIFnJ3MpNOl5zDpamBRvC3BqPyRxvkwcL6_"
) )
return t.NewPackage("libev", version, pkg.NewHTTPGetTar( return t.NewPackage("libev", version, newTar(
nil, "https://dist.schmorp.de/libev/Attic/libev-"+version+".tar.gz", "https://dist.schmorp.de/libev/Attic/libev-"+version+".tar.gz",
mustDecode(checksum), checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, (*MakeHelper)(nil)), version ), nil, (*MakeHelper)(nil)), version
} }

View File

@@ -8,14 +8,14 @@ import (
func (t Toolchain) newLibexpat() (pkg.Artifact, string) { func (t Toolchain) newLibexpat() (pkg.Artifact, string) {
const ( const (
version = "2.7.5" version = "2.8.1"
checksum = "vTRUjjg-qbHSXUBYKXgzVHkUO7UNyuhrkSYrE7ikApQm0g-OvQ8tspw4w55M-1Tp" checksum = "iMEtbOJhQfGof2GxSlxffQSI1va_NDDQ9VIuqcPbNZ0291Dr8wttD5QecYyjIQap"
) )
return t.NewPackage("libexpat", version, pkg.NewHTTPGetTar( return t.NewPackage("libexpat", version, newFromGitHubRelease(
nil, "https://github.com/libexpat/libexpat/releases/download/"+ "libexpat/libexpat",
"R_"+strings.ReplaceAll(version, ".", "_")+"/"+ "R_"+strings.ReplaceAll(version, ".", "_"),
"expat-"+version+".tar.bz2", "expat-"+version+".tar.bz2",
mustDecode(checksum), checksum,
pkg.TarBzip2, pkg.TarBzip2,
), nil, (*MakeHelper)(nil), ), nil, (*MakeHelper)(nil),
Bash, Bash,

View File

@@ -7,10 +7,11 @@ func (t Toolchain) newLibffi() (pkg.Artifact, string) {
version = "3.5.2" version = "3.5.2"
checksum = "2_Q-ZNBBbVhltfL5zEr0wljxPegUimTK4VeMSiwJEGksls3n4gj3lV0Ly3vviSFH" checksum = "2_Q-ZNBBbVhltfL5zEr0wljxPegUimTK4VeMSiwJEGksls3n4gj3lV0Ly3vviSFH"
) )
return t.NewPackage("libffi", version, pkg.NewHTTPGetTar( return t.NewPackage("libffi", version, newFromGitHubRelease(
nil, "https://github.com/libffi/libffi/releases/download/"+ "libffi/libffi",
"v"+version+"/libffi-"+version+".tar.gz", "v"+version,
mustDecode(checksum), "libffi-"+version+".tar.gz",
checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, (*MakeHelper)(nil), ), nil, (*MakeHelper)(nil),
KernelHeaders, KernelHeaders,

Some files were not shown because too many files have changed in this diff Show More