1
0
forked from security/hakurei

83 Commits

Author SHA1 Message Date
Kat
6f78444b11 cmd/pkgserver: fix multi-line JS test output display 2026-03-15 02:02:39 +11:00
Kat
2a3f6f5384 cmd/pkgserver: implement JS test DSL and runner 2026-03-15 02:02:39 +11:00
Kat
ef8663461b cmd/pkgserver: move StreamReporter display() to Reporter interface 2026-03-15 02:02:39 +11:00
Kat
0b3be27b9a cmd/pkgserver: add DOM reporter for JS tests 2026-03-15 02:02:39 +11:00
Kat
61a25c88ae cmd/pkgserver: add basic CLI reporter for testing JS 2026-03-15 02:02:39 +11:00
mae
c7e195fe64 cmd/pkgserver: remove get endpoint count field 2026-03-13 20:51:08 -05:00
mae
d5db9add98 cmd/pkgserver: search endpoint 2026-03-13 20:51:08 -05:00
mae
ab8abdc82b cmd/pkgserver: pagination bugfix 2026-03-13 20:51:08 -05:00
770fd46510 cmd/pkgserver: guard sass/ts behind build tag
Packaging nodejs and ruby is an immense burden for the Rosa OS base system, and these files diff poorly.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
mae
99f1c6aab4 cmd/pkgserver: add size 2026-03-13 20:51:08 -05:00
9ee629d402 cmd/pkgserver: expose size and store pre-encoded ident
This change also handles SIGSEGV correctly in newStatusHandler, and makes serving status fully zero copy.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
f475dde8b9 cmd/pkgserver: look up status by name once
This has far less overhead.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
c43a0c41b6 cmd/pkgserver: refer to preset in index
This enables referencing back to internal/rosa through an entry obtained via the index.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
55827f1a85 cmd/pkgserver: handle unversioned value
This omits the field for an unversioned artifact, and only does so once on startup.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
721bdddfa1 cmd/pkgserver: determine disposition route in mux
This removes duplicate checks and uses the more sound check in mux.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
fb18e599dd cmd/pkgserver: format get error messages
This improves source code readability on smaller displays.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
ec9005c794 cmd/pkgserver: constant string in pattern
This resolves patterns at compile time.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
c6d35b4003 cmd/pkgserver: satisfy handler signature in method
This is somewhat cleaner.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
6401533cc2 cmd/pkgserver: log instead of write encoding error
This message is unlikely to be useful to the user, and output may be partially written at this point, causing the error to be even less intelligible.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
5d6c401beb cmd/pkgserver: appropriately mark test helpers
This improves usefulness of test log messages.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
0a2d6aec14 cmd/pkgserver: do not omit report field
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
67b11335d6 cmd/pkgserver: gracefully shut down on signal
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
ef3bd1b60a cmd/pkgserver: specify full addr string in flag
This allows greater flexibility.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
beae7c89db cmd/pkgserver: make report argument optional
This allows serving metadata only without a populated report. This also removes the out-of-bounds read on args when no arguments are passed.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
ed26d1a1c2 cmd/pkgserver: embed internal/rosa metadata
This change also cleans up and reduces some unnecessary copies.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
faa0006d47 cmd/pkgserver: do not assume default mux
This helps with testing.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
796ddbc977 cmd/pkgserver: create index without report
This is useful for testing, where report testdata is not available.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:51:08 -05:00
mae
98ab020160 cmd/pkgserver: add sort orders, change pagination rules 2026-03-13 20:51:08 -05:00
mae
26a346036d cmd/pkgserver: add /status endpoint 2026-03-13 20:51:08 -05:00
mae
4ac9c72132 cmd/pkgserver: minimum viable frontend 2026-03-13 20:51:08 -05:00
mae
c39c07d440 cmd/pkgserver: api versioning 2026-03-13 20:51:08 -05:00
mae
b3fa0fe271 cmd/pkgserver: add get endpoint 2026-03-13 20:51:08 -05:00
mae
92a90582bb cmd/pkgserver: add count endpoint and restructure 2026-03-13 20:51:08 -05:00
mae
2e5ac56bdf cmd/pkgserver: add status endpoint 2026-03-13 20:51:08 -05:00
mae
75133e0234 cmd/pkgserver: add createPackageIndex 2026-03-13 20:51:08 -05:00
mae
c120d4de4f cmd/pkgserver: add command handler 2026-03-13 20:51:08 -05:00
mae
d6af8edb4a cmd/pkgserver: replace favicon 2026-03-13 20:51:08 -05:00
mae
da25d609d5 cmd/pkgserver: pagination 2026-03-13 20:51:08 -05:00
mae
95ceed0de0 cmd/pkgserver: basic web ui 2026-03-13 20:51:08 -05:00
74c213264a internal/rosa/git: install libexec symlinks
This is less clumsy to represent.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 20:43:23 +09:00
345cffddc2 cmd/mbf: optionally export output
This is for debugging for now, as no program consumes this format yet.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 19:53:55 +09:00
49163758c8 internal/rosa/llvm: 22.1.0 to 22.1.1
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 16:08:49 +09:00
ad22c15fb1 internal/rosa/perl: 5.42.0 to 5.42.1
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 16:08:24 +09:00
9c774f7e0a internal/rosa/python: setuptools 82.0.0 to 82.0.1
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 15:32:00 +09:00
707f0a349f internal/rosa/gtk: glib 2.87.3 to 2.87.5
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 15:26:42 +09:00
7c35be066a internal/rosa/tamago: 1.26.0 to 1.26.1
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 15:23:29 +09:00
f91d55fa5e internal/rosa/curl: 8.18.0 to 8.19.0
The test suite now depends on python to run mock servers. SMB is disabled because it is completely unused, and pulls in a python dependency for tests. A broken test is fixed and the patch hopefully upstreamed before next release.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 15:23:07 +09:00
5862cc1966 internal/rosa/kernel: firmware 20260221 to 20260309
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 14:06:21 +09:00
b3f0360a05 internal/rosa: populate runtime dependencies
This also removes manually resolved indirect dependencies.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 13:23:30 +09:00
8938994036 cmd/mbf: display runtime dependency info
This only presents top-level dependencies, resolving indirect dependencies can be misleading in this context.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 10:46:37 +09:00
96d382f805 cmd/mbf: resolve runtime dependencies
This also adds the collection meta-artifact for concurrent curing.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 10:41:22 +09:00
5c785c135c internal/rosa: collection meta-artifact
This is a stub FloodArtifact for concurrently curing multiple artifacts.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 10:34:45 +09:00
0130f8ea6d internal/rosa: represent runtime dependencies
This also resolves indirect dependencies, reducing noise.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-13 10:31:14 +09:00
faac5c4a83 internal/rosa: store artifact results in struct
This is cleaner and makes adding additional values easier.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-12 18:08:41 +09:00
620062cca9 hst: expose scheduling priority
This is useful when limits are configured to allow it.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-12 02:15:14 +09:00
196b200d0f container: expose priority and SCHED_OTHER policy
The more explicit API removes the arbitrary limit preventing use of SCHED_OTHER (referred to as SCHED_NORMAL in the kernel). This change also exposes priority value to set.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-12 01:14:03 +09:00
04e6bc3c5c hst: expose scheduling policy
This is primarily useful for poorly written music players for now.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-12 00:52:18 +09:00
5c540f90aa internal/outcome: improve doc comments
This improves readability on smaller displays.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-11 21:04:02 +09:00
1e8ac5f68e container: use policy name in log message
This is more helpful than having the user resolve the integer.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-11 20:20:34 +09:00
fd515badff container: move scheduler policy constants to std
This avoids depending on cgo.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-11 20:03:08 +09:00
330a344845 hst: improve doc comments
These now read a lot better both in source and on pkgsite.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-11 19:21:55 +09:00
48cdf8bf85 go: 1.26
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-10 03:29:19 +09:00
7fb42ba49d internal/rosa/llvm: set LLVM_LIT_ARGS
This replaces the progress bar, which was worse than useless.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-10 02:05:11 +09:00
19a2737148 container: sched policy string representation
This also uses priority obtained via sched_get_priority_min, and improves bounds checking.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-09 18:38:31 +09:00
baf2def9cc internal/rosa/kmod: prefix moduledir
This change also works around the kernel build system being unaware of this option.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-09 16:40:55 +09:00
242e042cb9 internal/rosa/nss: rename from ssl
The SSL name came from earlier on and is counterintuitive.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-09 14:58:31 +09:00
6988c9c4db internal/rosa: firmware artifact
Required for generic hardware.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-08 22:50:36 +09:00
d6e0ed8c76 internal/rosa/python: various pypi artifacts
These are dependencies of pre-commit.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-08 22:25:16 +09:00
53be3309c5 internal/rosa: rdfind artifact
Required by linux firmware.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-08 20:26:15 +09:00
644dd18a52 internal/rosa: nettle artifact
Required by rdfind, which is required by linux firmware.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-08 20:22:09 +09:00
27c6f976df internal/rosa/gnu: parallel artifact
Used by linux firmware.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-08 19:56:40 +09:00
279a973633 internal/rosa: build independent earlyinit
This avoids unnecessarily rebuilding hakurei during development.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-08 18:29:04 +09:00
9c1b522689 internal/rosa/hakurei: optional hostname tool
This makes it more efficient to reuse the helper for partial builds.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-08 18:26:03 +09:00
5c8cd46c02 internal/rosa: update arm64 kernel config
This was not feasible during the bump, now there is a viable toolchain.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-08 03:17:53 +09:00
2dba550a2b internal/rosa/zlib: 1.3.1 to 1.3.2
This also switches to the CMake build system because upstream broke their old build system.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-08 02:36:59 +09:00
8c64812b34 internal/rosa: add zlib runtime dependency
For transitioning to dynamically linking zlib.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-08 02:36:09 +09:00
d1423d980d internal/rosa/cmake: bake in CMAKE_INSTALL_LIBDIR
There is never a good reason to set this to anything else, and the default value of lib64 breaks everything. This did not manifest on LLVM (which the CMake helper was initially written for) because it did not use this value.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-08 01:20:41 +09:00
104da0f66a internal/rosa/cmake: pass correct prefix
This can change build output similar to autotools --prefix and DESTDIR, but was not clearly indicated to do so.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-08 01:04:02 +09:00
d996d9fbb7 internal/rosa/cmake: pass parallel argument for make
This uses the default value for each build system, which is parallel for ninja but not for make.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-08 00:55:58 +09:00
469f97ccc1 internal/rosa/gnu: libiconv 1.18 to 1.19
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-08 00:36:38 +09:00
af7a6180a1 internal/rosa/cmake: optionally use makefile
This breaks the dependency loop in zlib.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-07 22:47:30 +09:00
03b5c0e20a internal/rosa/tamago: populate Anitya project id
This had to wait quite a while due to Microsoft Github rate-limiting.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-07 19:37:03 +09:00
6a31fb4fa3 internal/rosa: hakurei 0.3.5 to 0.3.6
This also removes the backport patch.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-03-07 18:53:48 +09:00
101 changed files with 2992 additions and 503 deletions

4
.gitignore vendored
View File

@@ -27,6 +27,10 @@ go.work.sum
# go generate # go generate
/cmd/hakurei/LICENSE /cmd/hakurei/LICENSE
/cmd/pkgserver/.sass-cache
/cmd/pkgserver/ui/static/*.js
/cmd/pkgserver/ui/static/*.css*
/cmd/pkgserver/ui/static/*.css.map
/internal/pkg/testdata/testtool /internal/pkg/testdata/testtool
/internal/rosa/hakurei_current.tar.gz /internal/rosa/hakurei_current.tar.gz

View File

@@ -16,6 +16,7 @@ import (
"hakurei.app/command" "hakurei.app/command"
"hakurei.app/container/check" "hakurei.app/container/check"
"hakurei.app/container/fhs" "hakurei.app/container/fhs"
"hakurei.app/container/std"
"hakurei.app/hst" "hakurei.app/hst"
"hakurei.app/internal/dbus" "hakurei.app/internal/dbus"
"hakurei.app/internal/env" "hakurei.app/internal/env"
@@ -89,6 +90,9 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
flagHomeDir string flagHomeDir string
flagUserName string flagUserName string
flagSchedPolicy string
flagSchedPriority int
flagPrivateRuntime, flagPrivateTmpdir bool flagPrivateRuntime, flagPrivateTmpdir bool
flagWayland, flagX11, flagDBus, flagPipeWire, flagPulse bool flagWayland, flagX11, flagDBus, flagPipeWire, flagPulse bool
@@ -131,7 +135,7 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
log.Fatal(optionalErrorUnwrap(err)) log.Fatal(optionalErrorUnwrap(err))
return err return err
} else if progPath, err = check.NewAbs(p); err != nil { } else if progPath, err = check.NewAbs(p); err != nil {
log.Fatal(err.Error()) log.Fatal(err)
return err return err
} }
} }
@@ -150,7 +154,7 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
et |= hst.EPipeWire et |= hst.EPipeWire
} }
config := &hst.Config{ config := hst.Config{
ID: flagID, ID: flagID,
Identity: flagIdentity, Identity: flagIdentity,
Groups: flagGroups, Groups: flagGroups,
@@ -177,6 +181,13 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
}, },
} }
if err := config.SchedPolicy.UnmarshalText(
[]byte(flagSchedPolicy),
); err != nil {
log.Fatal(err)
}
config.SchedPriority = std.Int(flagSchedPriority)
// bind GPU stuff // bind GPU stuff
if et&(hst.EX11|hst.EWayland) != 0 { if et&(hst.EX11|hst.EWayland) != 0 {
config.Container.Filesystem = append(config.Container.Filesystem, hst.FilesystemConfigJSON{FilesystemConfig: &hst.FSBind{ config.Container.Filesystem = append(config.Container.Filesystem, hst.FilesystemConfigJSON{FilesystemConfig: &hst.FSBind{
@@ -214,7 +225,7 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
homeDir = passwd.HomeDir homeDir = passwd.HomeDir
} }
if a, err := check.NewAbs(homeDir); err != nil { if a, err := check.NewAbs(homeDir); err != nil {
log.Fatal(err.Error()) log.Fatal(err)
return err return err
} else { } else {
config.Container.Home = a config.Container.Home = a
@@ -234,11 +245,11 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
config.SessionBus = dbus.NewConfig(flagID, true, flagDBusMpris) config.SessionBus = dbus.NewConfig(flagID, true, flagDBusMpris)
} else { } else {
if f, err := os.Open(flagDBusConfigSession); err != nil { if f, err := os.Open(flagDBusConfigSession); err != nil {
log.Fatal(err.Error()) log.Fatal(err)
} else { } else {
decodeJSON(log.Fatal, "load session bus proxy config", f, &config.SessionBus) decodeJSON(log.Fatal, "load session bus proxy config", f, &config.SessionBus)
if err = f.Close(); err != nil { if err = f.Close(); err != nil {
log.Fatal(err.Error()) log.Fatal(err)
} }
} }
} }
@@ -246,11 +257,11 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
// system bus proxy is optional // system bus proxy is optional
if flagDBusConfigSystem != "nil" { if flagDBusConfigSystem != "nil" {
if f, err := os.Open(flagDBusConfigSystem); err != nil { if f, err := os.Open(flagDBusConfigSystem); err != nil {
log.Fatal(err.Error()) log.Fatal(err)
} else { } else {
decodeJSON(log.Fatal, "load system bus proxy config", f, &config.SystemBus) decodeJSON(log.Fatal, "load system bus proxy config", f, &config.SystemBus)
if err = f.Close(); err != nil { if err = f.Close(); err != nil {
log.Fatal(err.Error()) log.Fatal(err)
} }
} }
} }
@@ -266,7 +277,7 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
} }
} }
outcome.Main(ctx, msg, config, -1) outcome.Main(ctx, msg, &config, -1)
panic("unreachable") panic("unreachable")
}). }).
Flag(&flagDBusConfigSession, "dbus-config", command.StringFlag("builtin"), Flag(&flagDBusConfigSession, "dbus-config", command.StringFlag("builtin"),
@@ -287,6 +298,10 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
"Container home directory"). "Container home directory").
Flag(&flagUserName, "u", command.StringFlag("chronos"), Flag(&flagUserName, "u", command.StringFlag("chronos"),
"Passwd user name within sandbox"). "Passwd user name within sandbox").
Flag(&flagSchedPolicy, "policy", command.StringFlag(""),
"Scheduling policy to set for the container").
Flag(&flagSchedPriority, "priority", command.IntFlag(0),
"Scheduling priority to set for the container").
Flag(&flagPrivateRuntime, "private-runtime", command.BoolFlag(false), Flag(&flagPrivateRuntime, "private-runtime", command.BoolFlag(false),
"Do not share XDG_RUNTIME_DIR between containers under the same identity"). "Do not share XDG_RUNTIME_DIR between containers under the same identity").
Flag(&flagPrivateTmpdir, "private-tmpdir", command.BoolFlag(false), Flag(&flagPrivateTmpdir, "private-tmpdir", command.BoolFlag(false),

View File

@@ -36,7 +36,7 @@ Commands:
}, },
{ {
"run", []string{"run", "-h"}, ` "run", []string{"run", "-h"}, `
Usage: hakurei run [-h | --help] [--dbus-config <value>] [--dbus-system <value>] [--mpris] [--dbus-log] [--id <value>] [-a <int>] [-g <value>] [-d <value>] [-u <value>] [--private-runtime] [--private-tmpdir] [--wayland] [-X] [--dbus] [--pipewire] [--pulse] COMMAND [OPTIONS] Usage: hakurei run [-h | --help] [--dbus-config <value>] [--dbus-system <value>] [--mpris] [--dbus-log] [--id <value>] [-a <int>] [-g <value>] [-d <value>] [-u <value>] [--policy <value>] [--priority <int>] [--private-runtime] [--private-tmpdir] [--wayland] [-X] [--dbus] [--pipewire] [--pulse] COMMAND [OPTIONS]
Flags: Flags:
-X Enable direct connection to X11 -X Enable direct connection to X11
@@ -60,6 +60,10 @@ Flags:
Allow owning MPRIS D-Bus path, has no effect if custom config is available Allow owning MPRIS D-Bus path, has no effect if custom config is available
-pipewire -pipewire
Enable connection to PipeWire via SecurityContext Enable connection to PipeWire via SecurityContext
-policy string
Scheduling policy to set for the container
-priority int
Scheduling priority to set for the container
-private-runtime -private-runtime
Do not share XDG_RUNTIME_DIR between containers under the same identity Do not share XDG_RUNTIME_DIR between containers under the same identity
-private-tmpdir -private-tmpdir

View File

@@ -87,7 +87,7 @@ func main() {
} }
if flagIdle { if flagIdle {
pkg.SchedPolicy = container.SCHED_IDLE pkg.SetSchedIdle = true
} }
return return
@@ -175,6 +175,17 @@ func main() {
fmt.Println("website : " + fmt.Println("website : " +
strings.TrimSuffix(meta.Website, "/")) strings.TrimSuffix(meta.Website, "/"))
} }
if len(meta.Dependencies) > 0 {
fmt.Print("depends on :")
for _, d := range meta.Dependencies {
s := rosa.GetMetadata(d).Name
if version := rosa.Std.Version(d); version != rosa.Unversioned {
s += "-" + version
}
fmt.Print(" " + s)
}
fmt.Println()
}
const statusPrefix = "status : " const statusPrefix = "status : "
if flagStatus { if flagStatus {
@@ -424,6 +435,7 @@ func main() {
{ {
var ( var (
flagDump string flagDump string
flagExport string
) )
c.NewCommand( c.NewCommand(
"cure", "cure",
@@ -436,10 +448,34 @@ func main() {
return fmt.Errorf("unknown artifact %q", args[0]) return fmt.Errorf("unknown artifact %q", args[0])
} else if flagDump == "" { } else if flagDump == "" {
pathname, _, err := cache.Cure(rosa.Std.Load(p)) pathname, _, err := cache.Cure(rosa.Std.Load(p))
if err == nil { if err != nil {
log.Println(pathname)
}
return err return err
}
log.Println(pathname)
if flagExport != "" {
msg.Verbosef("exporting %s to %s...", args[0], flagExport)
var f *os.File
if f, err = os.OpenFile(
flagExport,
os.O_WRONLY|os.O_CREATE|os.O_EXCL,
0400,
); err != nil {
return err
} else if _, err = pkg.Flatten(
os.DirFS(pathname.String()),
".",
f,
); err != nil {
_ = f.Close()
return err
} else if err = f.Close(); err != nil {
return err
}
}
return nil
} else { } else {
f, err := os.OpenFile( f, err := os.OpenFile(
flagDump, flagDump,
@@ -463,6 +499,11 @@ func main() {
&flagDump, &flagDump,
"dump", command.StringFlag(""), "dump", command.StringFlag(""),
"Write IR to specified pathname and terminate", "Write IR to specified pathname and terminate",
).
Flag(
&flagExport,
"export", command.StringFlag(""),
"Export cured artifact to specified pathname",
) )
} }
@@ -477,17 +518,19 @@ func main() {
"shell", "shell",
"Interactive shell in the specified Rosa OS environment", "Interactive shell in the specified Rosa OS environment",
func(args []string) error { func(args []string) error {
root := make([]pkg.Artifact, 0, 6+len(args)) presets := make([]rosa.PArtifact, len(args))
for _, arg := range args { for i, arg := range args {
p, ok := rosa.ResolveName(arg) p, ok := rosa.ResolveName(arg)
if !ok { if !ok {
return fmt.Errorf("unknown artifact %q", arg) return fmt.Errorf("unknown artifact %q", arg)
} }
root = append(root, rosa.Std.Load(p)) presets[i] = p
} }
root := make(rosa.Collect, 0, 6+len(args))
root = rosa.Std.AppendPresets(root, presets...)
if flagWithToolchain { if flagWithToolchain {
musl, compilerRT, runtimes, clang := rosa.Std.NewLLVM() musl, compilerRT, runtimes, clang := (rosa.Std - 1).NewLLVM()
root = append(root, musl, compilerRT, runtimes, clang) root = append(root, musl, compilerRT, runtimes, clang)
} else { } else {
root = append(root, rosa.Std.Load(rosa.Musl)) root = append(root, rosa.Std.Load(rosa.Musl))
@@ -497,6 +540,12 @@ func main() {
rosa.Std.Load(rosa.Toybox), rosa.Std.Load(rosa.Toybox),
) )
if _, _, err := cache.Cure(&root); err == nil {
return errors.New("unreachable")
} else if !errors.Is(err, rosa.Collected{}) {
return err
}
type cureRes struct { type cureRes struct {
pathname *check.Absolute pathname *check.Absolute
checksum unique.Handle[pkg.Checksum] checksum unique.Handle[pkg.Checksum]

176
cmd/pkgserver/api.go Normal file
View File

@@ -0,0 +1,176 @@
package main
import (
"encoding/json"
"log"
"net/http"
"net/url"
"path"
"strconv"
"sync"
"hakurei.app/internal/info"
"hakurei.app/internal/rosa"
)
// for lazy initialisation of serveInfo
var (
infoPayload struct {
// Current package count.
Count int `json:"count"`
// Hakurei version, set at link time.
HakureiVersion string `json:"hakurei_version"`
}
infoPayloadOnce sync.Once
)
// handleInfo writes constant system information.
func handleInfo(w http.ResponseWriter, _ *http.Request) {
infoPayloadOnce.Do(func() {
infoPayload.Count = int(rosa.PresetUnexportedStart)
infoPayload.HakureiVersion = info.Version()
})
// TODO(mae): cache entire response if no additional fields are planned
writeAPIPayload(w, infoPayload)
}
// newStatusHandler returns a [http.HandlerFunc] that offers status files for
// viewing or download, if available.
func (index *packageIndex) newStatusHandler(disposition bool) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
m, ok := index.names[path.Base(r.URL.Path)]
if !ok || !m.HasReport {
http.NotFound(w, r)
return
}
contentType := "text/plain; charset=utf-8"
if disposition {
contentType = "application/octet-stream"
// quoting like this is unsound, but okay, because metadata is hardcoded
contentDisposition := `attachment; filename="`
contentDisposition += m.Name + "-"
if m.Version != "" {
contentDisposition += m.Version + "-"
}
contentDisposition += m.ids + `.log"`
w.Header().Set("Content-Disposition", contentDisposition)
}
w.Header().Set("Content-Type", contentType)
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
if err := func() (err error) {
defer index.handleAccess(&err)()
_, err = w.Write(m.status)
return
}(); err != nil {
log.Println(err)
http.Error(
w, "cannot deliver status, contact maintainers",
http.StatusInternalServerError,
)
}
}
}
// handleGet writes a slice of metadata with specified order.
func (index *packageIndex) handleGet(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
limit, err := strconv.Atoi(q.Get("limit"))
if err != nil || limit > 100 || limit < 1 {
http.Error(
w, "limit must be an integer between 1 and 100",
http.StatusBadRequest,
)
return
}
i, err := strconv.Atoi(q.Get("index"))
if err != nil || i >= len(index.sorts[0]) || i < 0 {
http.Error(
w, "index must be an integer between 0 and "+
strconv.Itoa(int(rosa.PresetUnexportedStart-1)),
http.StatusBadRequest,
)
return
}
sort, err := strconv.Atoi(q.Get("sort"))
if err != nil || sort >= len(index.sorts) || sort < 0 {
http.Error(
w, "sort must be an integer between 0 and "+
strconv.Itoa(sortOrderEnd),
http.StatusBadRequest,
)
return
}
values := index.sorts[sort][i:min(i+limit, len(index.sorts[sort]))]
writeAPIPayload(w, &struct {
Values []*metadata `json:"values"`
}{values})
}
func (index *packageIndex) handleSearch(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
limit, err := strconv.Atoi(q.Get("limit"))
if err != nil || limit > 100 || limit < 1 {
http.Error(
w, "limit must be an integer between 1 and 100",
http.StatusBadRequest,
)
return
}
i, err := strconv.Atoi(q.Get("index"))
if err != nil || i >= len(index.sorts[0]) || i < 0 {
http.Error(
w, "index must be an integer between 0 and "+
strconv.Itoa(int(rosa.PresetUnexportedStart-1)),
http.StatusBadRequest,
)
return
}
search, err := url.PathUnescape(q.Get("search"))
if len(search) > 100 || err != nil {
http.Error(
w, "search must be a string between 0 and 100 characters long",
http.StatusBadRequest,
)
return
}
desc := q.Get("desc") == "true"
n, res, err := index.performSearchQuery(limit, i, search, desc)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
writeAPIPayload(w, &struct {
Count int `json:"count"`
Results []searchResult `json:"results"`
}{n, res})
}
// apiVersion is the name of the current API revision, as part of the pattern.
const apiVersion = "v1"
// registerAPI registers API handler functions.
func (index *packageIndex) registerAPI(mux *http.ServeMux) {
mux.HandleFunc("GET /api/"+apiVersion+"/info", handleInfo)
mux.HandleFunc("GET /api/"+apiVersion+"/get", index.handleGet)
mux.HandleFunc("GET /api/"+apiVersion+"/search", index.handleSearch)
mux.HandleFunc("GET /api/"+apiVersion+"/status/", index.newStatusHandler(false))
mux.HandleFunc("GET /status/", index.newStatusHandler(true))
}
// writeAPIPayload sets headers common to API responses and encodes payload as
// JSON for the response body.
func writeAPIPayload(w http.ResponseWriter, payload any) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
w.Header().Set("Pragma", "no-cache")
w.Header().Set("Expires", "0")
if err := json.NewEncoder(w).Encode(payload); err != nil {
log.Println(err)
http.Error(
w, "cannot encode payload, contact maintainers",
http.StatusInternalServerError,
)
}
}

183
cmd/pkgserver/api_test.go Normal file
View File

@@ -0,0 +1,183 @@
package main
import (
"net/http"
"net/http/httptest"
"slices"
"strconv"
"testing"
"hakurei.app/internal/info"
"hakurei.app/internal/rosa"
)
// prefix is prepended to every API path.
const prefix = "/api/" + apiVersion + "/"
func TestAPIInfo(t *testing.T) {
t.Parallel()
w := httptest.NewRecorder()
handleInfo(w, httptest.NewRequestWithContext(
t.Context(),
http.MethodGet,
prefix+"info",
nil,
))
resp := w.Result()
checkStatus(t, resp, http.StatusOK)
checkAPIHeader(t, w.Header())
checkPayload(t, resp, struct {
Count int `json:"count"`
HakureiVersion string `json:"hakurei_version"`
}{int(rosa.PresetUnexportedStart), info.Version()})
}
func TestAPIGet(t *testing.T) {
t.Parallel()
const target = prefix + "get"
index := newIndex(t)
newRequest := func(suffix string) *httptest.ResponseRecorder {
w := httptest.NewRecorder()
index.handleGet(w, httptest.NewRequestWithContext(
t.Context(),
http.MethodGet,
target+suffix,
nil,
))
return w
}
checkValidate := func(t *testing.T, suffix string, vmin, vmax int, wantErr string) {
t.Run("invalid", func(t *testing.T) {
t.Parallel()
w := newRequest("?" + suffix + "=invalid")
resp := w.Result()
checkError(t, resp, wantErr, http.StatusBadRequest)
})
t.Run("min", func(t *testing.T) {
t.Parallel()
w := newRequest("?" + suffix + "=" + strconv.Itoa(vmin-1))
resp := w.Result()
checkError(t, resp, wantErr, http.StatusBadRequest)
w = newRequest("?" + suffix + "=" + strconv.Itoa(vmin))
resp = w.Result()
checkStatus(t, resp, http.StatusOK)
})
t.Run("max", func(t *testing.T) {
t.Parallel()
w := newRequest("?" + suffix + "=" + strconv.Itoa(vmax+1))
resp := w.Result()
checkError(t, resp, wantErr, http.StatusBadRequest)
w = newRequest("?" + suffix + "=" + strconv.Itoa(vmax))
resp = w.Result()
checkStatus(t, resp, http.StatusOK)
})
}
t.Run("limit", func(t *testing.T) {
t.Parallel()
checkValidate(
t, "index=0&sort=0&limit", 1, 100,
"limit must be an integer between 1 and 100",
)
})
t.Run("index", func(t *testing.T) {
t.Parallel()
checkValidate(
t, "limit=1&sort=0&index", 0, int(rosa.PresetUnexportedStart-1),
"index must be an integer between 0 and "+strconv.Itoa(int(rosa.PresetUnexportedStart-1)),
)
})
t.Run("sort", func(t *testing.T) {
t.Parallel()
checkValidate(
t, "index=0&limit=1&sort", 0, int(sortOrderEnd),
"sort must be an integer between 0 and "+strconv.Itoa(int(sortOrderEnd)),
)
})
checkWithSuffix := func(name, suffix string, want []*metadata) {
t.Run(name, func(t *testing.T) {
t.Parallel()
w := newRequest(suffix)
resp := w.Result()
checkStatus(t, resp, http.StatusOK)
checkAPIHeader(t, w.Header())
checkPayloadFunc(t, resp, func(got *struct {
Count int `json:"count"`
Values []*metadata `json:"values"`
}) bool {
return got.Count == len(want) &&
slices.EqualFunc(got.Values, want, func(a, b *metadata) bool {
return (a.Version == b.Version ||
a.Version == rosa.Unversioned ||
b.Version == rosa.Unversioned) &&
a.HasReport == b.HasReport &&
a.Name == b.Name &&
a.Description == b.Description &&
a.Website == b.Website
})
})
})
}
checkWithSuffix("declarationAscending", "?limit=2&index=0&sort=0", []*metadata{
{
Metadata: rosa.GetMetadata(0),
Version: rosa.Std.Version(0),
},
{
Metadata: rosa.GetMetadata(1),
Version: rosa.Std.Version(1),
},
})
checkWithSuffix("declarationAscending offset", "?limit=3&index=5&sort=0", []*metadata{
{
Metadata: rosa.GetMetadata(5),
Version: rosa.Std.Version(5),
},
{
Metadata: rosa.GetMetadata(6),
Version: rosa.Std.Version(6),
},
{
Metadata: rosa.GetMetadata(7),
Version: rosa.Std.Version(7),
},
})
checkWithSuffix("declarationDescending", "?limit=3&index=0&sort=1", []*metadata{
{
Metadata: rosa.GetMetadata(rosa.PresetUnexportedStart - 1),
Version: rosa.Std.Version(rosa.PresetUnexportedStart - 1),
},
{
Metadata: rosa.GetMetadata(rosa.PresetUnexportedStart - 2),
Version: rosa.Std.Version(rosa.PresetUnexportedStart - 2),
},
{
Metadata: rosa.GetMetadata(rosa.PresetUnexportedStart - 3),
Version: rosa.Std.Version(rosa.PresetUnexportedStart - 3),
},
})
checkWithSuffix("declarationDescending offset", "?limit=1&index=37&sort=1", []*metadata{
{
Metadata: rosa.GetMetadata(rosa.PresetUnexportedStart - 38),
Version: rosa.Std.Version(rosa.PresetUnexportedStart - 38),
},
})
}

105
cmd/pkgserver/index.go Normal file
View File

@@ -0,0 +1,105 @@
package main
import (
"cmp"
"errors"
"slices"
"strings"
"hakurei.app/internal/pkg"
"hakurei.app/internal/rosa"
)
const (
declarationAscending = iota
declarationDescending
nameAscending
nameDescending
sizeAscending
sizeDescending
sortOrderEnd = iota - 1
)
// packageIndex refers to metadata by name and various sort orders.
type packageIndex struct {
sorts [sortOrderEnd + 1][rosa.PresetUnexportedStart]*metadata
names map[string]*metadata
search searchCache
// Taken from [rosa.Report] if available.
handleAccess func(*error) func()
}
// metadata holds [rosa.Metadata] extended with additional information.
type metadata struct {
p rosa.PArtifact
*rosa.Metadata
// Populated via [rosa.Toolchain.Version], [rosa.Unversioned] is equivalent
// to the zero value. Otherwise, the zero value is invalid.
Version string `json:"version,omitempty"`
// Output data size, available if present in report.
Size int64 `json:"size,omitempty"`
// Whether the underlying [pkg.Artifact] is present in the report.
HasReport bool `json:"report"`
// Ident string encoded ahead of time.
ids string
// Backed by [rosa.Report], access must be prepared by HandleAccess.
status []byte
}
// populate deterministically populates packageIndex, optionally with a report.
func (index *packageIndex) populate(cache *pkg.Cache, report *rosa.Report) (err error) {
if report != nil {
defer report.HandleAccess(&err)()
index.handleAccess = report.HandleAccess
}
var work [rosa.PresetUnexportedStart]*metadata
index.names = make(map[string]*metadata)
for p := range rosa.PresetUnexportedStart {
m := metadata{
p: p,
Metadata: rosa.GetMetadata(p),
Version: rosa.Std.Version(p),
}
if m.Version == "" {
return errors.New("invalid version from " + m.Name)
}
if m.Version == rosa.Unversioned {
m.Version = ""
}
if cache != nil && report != nil {
id := cache.Ident(rosa.Std.Load(p))
m.ids = pkg.Encode(id.Value())
m.status, m.Size = report.ArtifactOf(id)
m.HasReport = m.Size >= 0
}
work[p] = &m
index.names[m.Name] = &m
}
index.sorts[declarationAscending] = work
index.sorts[declarationDescending] = work
slices.Reverse(index.sorts[declarationDescending][:])
index.sorts[nameAscending] = work
slices.SortFunc(index.sorts[nameAscending][:], func(a, b *metadata) int {
return strings.Compare(a.Name, b.Name)
})
index.sorts[nameDescending] = index.sorts[nameAscending]
slices.Reverse(index.sorts[nameDescending][:])
index.sorts[sizeAscending] = work
slices.SortFunc(index.sorts[sizeAscending][:], func(a, b *metadata) int {
return cmp.Compare(a.Size, b.Size)
})
index.sorts[sizeDescending] = index.sorts[sizeAscending]
slices.Reverse(index.sorts[sizeDescending][:])
return
}

114
cmd/pkgserver/main.go Normal file
View File

@@ -0,0 +1,114 @@
package main
import (
"context"
"errors"
"log"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"hakurei.app/command"
"hakurei.app/container/check"
"hakurei.app/internal/pkg"
"hakurei.app/internal/rosa"
"hakurei.app/message"
)
const shutdownTimeout = 15 * time.Second
func main() {
log.SetFlags(0)
log.SetPrefix("pkgserver: ")
var (
flagBaseDir string
flagAddr string
)
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
defer stop()
msg := message.New(log.Default())
c := command.New(os.Stderr, log.Printf, "pkgserver", func(args []string) error {
var (
cache *pkg.Cache
report *rosa.Report
)
switch len(args) {
case 0:
break
case 1:
baseDir, err := check.NewAbs(flagBaseDir)
if err != nil {
return err
}
cache, err = pkg.Open(ctx, msg, 0, baseDir)
if err != nil {
return err
}
defer cache.Close()
report, err = rosa.OpenReport(args[0])
if err != nil {
return err
}
default:
return errors.New("pkgserver requires 1 argument")
}
var index packageIndex
index.search = make(searchCache)
if err := index.populate(cache, report); err != nil {
return err
}
ticker := time.NewTicker(1 * time.Minute)
go func() {
for {
select {
case <-ctx.Done():
ticker.Stop()
return
case <-ticker.C:
index.search.clean()
}
}
}()
var mux http.ServeMux
uiRoutes(&mux)
index.registerAPI(&mux)
server := http.Server{
Addr: flagAddr,
Handler: &mux,
}
go func() {
<-ctx.Done()
c, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
defer cancel()
if err := server.Shutdown(c); err != nil {
log.Fatal(err)
}
}()
return server.ListenAndServe()
}).Flag(
&flagBaseDir,
"b", command.StringFlag(""),
"base directory for cache",
).Flag(
&flagAddr,
"addr", command.StringFlag(":8067"),
"TCP network address to listen on",
)
c.MustParse(os.Args[1:], func(err error) {
if errors.Is(err, http.ErrServerClosed) {
os.Exit(0)
}
log.Fatal(err)
})
}

View File

@@ -0,0 +1,96 @@
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"reflect"
"testing"
)
// newIndex returns the address of a newly populated packageIndex.
func newIndex(t *testing.T) *packageIndex {
t.Helper()
var index packageIndex
if err := index.populate(nil, nil); err != nil {
t.Fatalf("populate: error = %v", err)
}
return &index
}
// checkStatus checks response status code.
func checkStatus(t *testing.T, resp *http.Response, want int) {
t.Helper()
if resp.StatusCode != want {
t.Errorf(
"StatusCode: %s, want %s",
http.StatusText(resp.StatusCode),
http.StatusText(want),
)
}
}
// checkHeader checks the value of a header entry.
func checkHeader(t *testing.T, h http.Header, key, want string) {
t.Helper()
if got := h.Get(key); got != want {
t.Errorf("%s: %q, want %q", key, got, want)
}
}
// checkAPIHeader checks common entries set for API endpoints.
func checkAPIHeader(t *testing.T, h http.Header) {
t.Helper()
checkHeader(t, h, "Content-Type", "application/json; charset=utf-8")
checkHeader(t, h, "Cache-Control", "no-cache, no-store, must-revalidate")
checkHeader(t, h, "Pragma", "no-cache")
checkHeader(t, h, "Expires", "0")
}
// checkPayloadFunc checks the JSON response of an API endpoint by passing it to f.
func checkPayloadFunc[T any](
t *testing.T,
resp *http.Response,
f func(got *T) bool,
) {
t.Helper()
var got T
r := io.Reader(resp.Body)
if testing.Verbose() {
var buf bytes.Buffer
r = io.TeeReader(r, &buf)
defer func() { t.Helper(); t.Log(buf.String()) }()
}
if err := json.NewDecoder(r).Decode(&got); err != nil {
t.Fatalf("Decode: error = %v", err)
}
if !f(&got) {
t.Errorf("Body: %#v", got)
}
}
// checkPayload checks the JSON response of an API endpoint.
func checkPayload[T any](t *testing.T, resp *http.Response, want T) {
t.Helper()
checkPayloadFunc(t, resp, func(got *T) bool {
return reflect.DeepEqual(got, &want)
})
}
func checkError(t *testing.T, resp *http.Response, error string, code int) {
t.Helper()
checkStatus(t, resp, code)
if got, _ := io.ReadAll(resp.Body); string(got) != fmt.Sprintln(error) {
t.Errorf("Body: %q, want %q", string(got), error)
}
}

77
cmd/pkgserver/search.go Normal file
View File

@@ -0,0 +1,77 @@
package main
import (
"cmp"
"maps"
"regexp"
"slices"
"time"
)
type searchCache map[string]searchCacheEntry
type searchResult struct {
NameIndices [][]int `json:"name_matches"`
DescIndices [][]int `json:"desc_matches,omitempty"`
Score float64 `json:"score"`
*metadata
}
type searchCacheEntry struct {
query string
results []searchResult
expiry time.Time
}
func (index *packageIndex) performSearchQuery(limit int, i int, search string, desc bool) (int, []searchResult, error) {
entry, ok := index.search[search]
if ok {
return len(entry.results), entry.results[i:min(i+limit, len(entry.results))], nil
}
regex, err := regexp.Compile(search)
if err != nil {
return 0, make([]searchResult, 0), err
}
res := make([]searchResult, 0)
for p := range maps.Values(index.names) {
nameIndices := regex.FindAllIndex([]byte(p.Name), -1)
var descIndices [][]int = nil
if desc {
descIndices = regex.FindAllIndex([]byte(p.Description), -1)
}
if nameIndices == nil && descIndices == nil {
continue
}
score := float64(indexsum(nameIndices)) / (float64(len(nameIndices)) + 1)
if desc {
score += float64(indexsum(descIndices)) / (float64(len(descIndices)) + 1) / 10.0
}
res = append(res, searchResult{
NameIndices: nameIndices,
DescIndices: descIndices,
Score: score,
metadata: p,
})
}
slices.SortFunc(res[:], func(a, b searchResult) int { return -cmp.Compare(a.Score, b.Score) })
expiry := time.Now().Add(1 * time.Minute)
entry = searchCacheEntry{
query: search,
results: res,
expiry: expiry,
}
index.search[search] = entry
return len(res), res[i:min(i+limit, len(entry.results))], nil
}
func (s *searchCache) clean() {
maps.DeleteFunc(*s, func(_ string, v searchCacheEntry) bool {
return v.expiry.Before(time.Now())
})
}
func indexsum(in [][]int) int {
sum := 0
for i := 0; i < len(in); i++ {
sum += in[i][1] - in[i][0]
}
return sum
}

48
cmd/pkgserver/ui.go Normal file
View File

@@ -0,0 +1,48 @@
package main
import "net/http"
func serveWebUI(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
w.Header().Set("Pragma", "no-cache")
w.Header().Set("Expires", "0")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Set("X-XSS-Protection", "1")
w.Header().Set("X-Frame-Options", "DENY")
http.ServeFileFS(w, r, content, "ui/index.html")
}
func serveStaticContent(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/static/style.css":
darkTheme := r.CookiesNamed("dark_theme")
if len(darkTheme) > 0 && darkTheme[0].Value == "true" {
http.ServeFileFS(w, r, content, "ui/static/dark.css")
} else {
http.ServeFileFS(w, r, content, "ui/static/light.css")
}
case "/favicon.ico":
http.ServeFileFS(w, r, content, "ui/static/favicon.ico")
case "/static/index.js":
http.ServeFileFS(w, r, content, "ui/static/index.js")
case "/static/test.js":
http.ServeFileFS(w, r, content, "ui/static/test.js")
case "/static/test.css":
http.ServeFileFS(w, r, content, "ui/static/test.css")
case "/static/test_tests.js":
http.ServeFileFS(w, r, content, "ui/static/test_tests.js")
default:
http.NotFound(w, r)
}
}
func serveTester(w http.ResponseWriter, r *http.Request) {
http.ServeFileFS(w, r, content, "ui/test.html")
}
func uiRoutes(mux *http.ServeMux) {
mux.HandleFunc("GET /{$}", serveWebUI)
mux.HandleFunc("GET /favicon.ico", serveStaticContent)
mux.HandleFunc("GET /static/", serveStaticContent)
mux.HandleFunc("GET /test.html", serveTester)
}

View File

@@ -0,0 +1,35 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<link rel="stylesheet" href="static/style.css">
<title>Hakurei PkgServer</title>
<script src="static/index.js"></script>
</head>
<body>
<h1>Hakurei PkgServer</h1>
<table id="pkg-list">
<tr><td>Loading...</td></tr>
</table>
<p>Showing entries <span id="entry-counter"></span>.</p>
<span class="bottom-nav"><a href="javascript:prevPage()">&laquo; Previous</a> <span id="page-number">1</span> <a href="javascript:nextPage()">Next &raquo;</a></span>
<span><label for="count">Entries per page: </label><select name="count" id="count">
<option value="10">10</option>
<option value="20">20</option>
<option value="30">30</option>
<option value="50">50</option>
</select></span>
<span><label for="sort">Sort by: </label><select name="sort" id="sort">
<option value="0">Definition (ascending)</option>
<option value="1">Definition (descending)</option>
<option value="2">Name (ascending)</option>
<option value="3">Name (descending)</option>
<option value="4">Size (ascending)</option>
<option value="5">Size (descending)</option>
</select></span>
</body>
<footer>
<p>&copy;<a href="https://hakurei.app/">Hakurei</a> (<span id="hakurei-version">unknown</span>). Licensed under the MIT license.</p>
</footer>
</html>

View File

View File

@@ -0,0 +1,6 @@
@use 'common';
html {
background-color: #2c2c2c;
color: ghostwhite;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

View File

@@ -0,0 +1,155 @@
class PackageIndexEntry {
name: string
size: number | null
description: string | null
website: string | null
version: string | null
report: boolean
}
function toHTML(entry: PackageIndexEntry): HTMLTableRowElement {
let v = entry.version != null ? `<span>${escapeHtml(entry.version)}</span>` : ""
let s = entry.size != null ? `<p>Size: ${toByteSizeString(entry.size)} (${entry.size})</p>` : ""
let d = entry.description != null ? `<p>${escapeHtml(entry.description)}</p>` : ""
let w = entry.website != null ? `<a href="${encodeURI(entry.website)}">Website</a>` : ""
let r = entry.report ? `Log (<a href=\"${encodeURI('/api/v1/status/' + entry.name)}\">View</a> | <a href=\"${encodeURI('/status/' + entry.name)}\">Download</a>)` : ""
let row = <HTMLTableRowElement>(document.createElement('tr'))
row.innerHTML = `<td>
<h2>${escapeHtml(entry.name)} ${v}</h2>
${d}
${s}
${w}
${r}
</td>`
return row
}
function toByteSizeString(bytes: number): string {
if(bytes == null || bytes < 1024) return `${bytes}B`
if(bytes < Math.pow(1024, 2)) return `${(bytes/1024).toFixed(2)}kiB`
if(bytes < Math.pow(1024, 3)) return `${(bytes/Math.pow(1024, 2)).toFixed(2)}MiB`
if(bytes < Math.pow(1024, 4)) return `${(bytes/Math.pow(1024, 3)).toFixed(2)}GiB`
if(bytes < Math.pow(1024, 5)) return `${(bytes/Math.pow(1024, 4)).toFixed(2)}TiB`
return "not only is it big, it's large"
}
const API_VERSION = 1
const ENDPOINT = `/api/v${API_VERSION}`
class InfoPayload {
count: number
hakurei_version: string
}
async function infoRequest(): Promise<InfoPayload> {
const res = await fetch(`${ENDPOINT}/info`)
const payload = await res.json()
return payload as InfoPayload
}
class GetPayload {
values: PackageIndexEntry[]
}
enum SortOrders {
DeclarationAscending,
DeclarationDescending,
NameAscending,
NameDescending
}
async function getRequest(limit: number, index: number, sort: SortOrders): Promise<GetPayload> {
const res = await fetch(`${ENDPOINT}/get?limit=${limit}&index=${index}&sort=${sort.valueOf()}`)
const payload = await res.json()
return payload as GetPayload
}
class State {
entriesPerPage: number = 10
entryIndex: number = 0
maxEntries: number = 0
sort: SortOrders = SortOrders.DeclarationAscending
getEntriesPerPage(): number {
return this.entriesPerPage
}
setEntriesPerPage(entriesPerPage: number) {
this.entriesPerPage = entriesPerPage
this.setEntryIndex(Math.floor(this.getEntryIndex() / entriesPerPage) * entriesPerPage)
}
getEntryIndex(): number {
return this.entryIndex
}
setEntryIndex(entryIndex: number) {
this.entryIndex = entryIndex
this.updatePage()
this.updateRange()
this.updateListings()
}
getMaxEntries(): number {
return this.maxEntries
}
setMaxEntries(max: number) {
this.maxEntries = max
}
getSortOrder(): SortOrders {
return this.sort
}
setSortOrder(sortOrder: SortOrders) {
this.sort = sortOrder
this.setEntryIndex(0)
}
updatePage() {
let page = Math.ceil(((this.getEntryIndex() + this.getEntriesPerPage()) - 1) / this.getEntriesPerPage())
document.getElementById("page-number").innerText = String(page)
}
updateRange() {
let max = Math.min(this.getEntryIndex() + this.getEntriesPerPage(), this.getMaxEntries())
document.getElementById("entry-counter").innerText = `${this.getEntryIndex() + 1}-${max} of ${this.getMaxEntries()}`
}
updateListings() {
getRequest(this.getEntriesPerPage(), this.getEntryIndex(), this.getSortOrder())
.then(res => {
let table = document.getElementById("pkg-list")
table.innerHTML = ''
res.values.forEach((row) => {
table.appendChild(toHTML(row))
})
})
}
}
let STATE: State
function prevPage() {
let index = STATE.getEntryIndex()
STATE.setEntryIndex(Math.max(0, index - STATE.getEntriesPerPage()))
}
function nextPage() {
let index = STATE.getEntryIndex()
STATE.setEntryIndex(Math.min((Math.ceil(STATE.getMaxEntries() / STATE.getEntriesPerPage()) * STATE.getEntriesPerPage()) - STATE.getEntriesPerPage(), index + STATE.getEntriesPerPage()))
}
function escapeHtml(str: string): string {
if(str === undefined) return ""
return str
.replace(/&/g, '&amp;')
.replace(/</g, '&lt;')
.replace(/>/g, '&gt;')
.replace(/"/g, '&quot;')
.replace(/'/g, '&apos;')
}
document.addEventListener("DOMContentLoaded", () => {
STATE = new State()
infoRequest()
.then(res => {
STATE.setMaxEntries(res.count)
document.getElementById("hakurei-version").innerText = res.hakurei_version
STATE.updateRange()
STATE.updateListings()
})
document.getElementById("count").addEventListener("change", (event) => {
STATE.setEntriesPerPage(parseInt((event.target as HTMLSelectElement).value))
})
document.getElementById("sort").addEventListener("change", (event) => {
STATE.setSortOrder(parseInt((event.target as HTMLSelectElement).value))
})
})

View File

@@ -0,0 +1,6 @@
@use 'common';
html {
background-color: #d3d3d3;
color: black;
}

View File

@@ -0,0 +1,4 @@
#!/usr/bin/env node
import "./test_tests.js";
import { run, StreamReporter } from "./test.js";
run(new StreamReporter({ writeln: console.log }));

View File

@@ -0,0 +1,27 @@
.root {
margin: 1rem 0;
}
details.test-node {
margin-left: 1rem;
padding: 0.2rem 0.5rem;
border-left: 2px dashed black;
> summary {
cursor: pointer;
}
&.failure > summary::marker {
color: red;
}
}
p.test-desc {
margin: 0 0 0 1rem;
padding: 2px 0;
> pre {
margin: 0;
}
}
.italic {
font-style: italic;
}

View File

@@ -0,0 +1,250 @@
// =============================================================================
// DSL
type TestTree = { name: string } & (TestGroup | Test);
type TestGroup = { children: TestTree[] };
type Test = { test: (TestController) => void };
let TESTS: ({ name: string } & TestGroup)[] = [];
export function suite(name: string, children: TestTree[]) {
checkDuplicates(name, children)
TESTS.push({ name, children });
}
export function context(name: string, children: TestTree[]): TestTree {
checkDuplicates(name, children)
return { name, children };
}
export const group = context;
export function test(name: string, test: (TestController) => void): TestTree {
return { name, test };
}
function checkDuplicates(parent: string, names: { name: string }[]) {
let seen = new Set<string>();
for (const { name } of names) {
if (seen.has(name)) {
throw new RangeError(`duplicate name '${name}' in '${parent}'`);
}
seen.add(name);
}
}
class FailNowSentinel {}
class TestController {
#logBuf: string[];
#failed: boolean;
constructor() {
this.#logBuf = [];
this.#failed = false;
}
fail() {
this.#failed = true;
}
failed(): boolean {
return this.#failed;
}
failNow(): never {
this.fail();
throw new FailNowSentinel();
}
log(message: string) {
this.#logBuf.push(message);
}
error(message: string) {
this.log(message);
this.fail();
}
fatal(message: string): never {
this.log(message);
this.failNow();
}
getLog(): string {
return this.#logBuf.join("\n");
}
}
// =============================================================================
// Execution
export interface TestResult {
success: boolean;
output: string;
}
function runTests(reporter: Reporter, parents: string[], tree: TestTree) {
const path = [...parents, tree.name];
if ("children" in tree) {
for (const c of tree.children) runTests(reporter, path, c);
return;
}
let controller = new TestController();
let excStr: string;
try {
tree.test(controller);
} catch (e) {
if (!(e instanceof FailNowSentinel)) {
controller.fail();
excStr = extractExceptionString(e);
}
}
const log = controller.getLog();
const output = (log && excStr) ? `${log}\n${excStr}` : `${log}${excStr ?? ''}`;
reporter.update(path, { success: !controller.failed(), output });
}
export function run(reporter: Reporter) {
for (const suite of TESTS) {
for (const c of suite.children) runTests(reporter, [suite.name], c);
}
reporter.finalize();
}
function extractExceptionString(e: any): string {
// String() instead of .toString() as null and undefined don't have
// properties.
const s = String(e);
if (!(e instanceof Error && "stack" in e)) return s;
// v8 (Chromium, NodeJS) include the error message, while
// Firefox and WebKit do not.
if (e.stack.includes(s)) return e.stack;
return `${s}\n${e.stack}`;
}
// =============================================================================
// Reporting
export interface Reporter {
update(path: string[], result: TestResult): void;
finalize(): void;
}
export interface Stream {
writeln(s: string): void;
}
export class StreamReporter implements Reporter {
stream: Stream;
verbose: boolean;
#failures: ({ path: string[] } & TestResult)[];
counts: { successes: number, failures: number };
constructor(stream: Stream, verbose: boolean = false) {
this.stream = stream;
this.verbose = verbose;
this.#failures = [];
this.counts = { successes: 0, failures: 0 };
}
update(path: string[], result: TestResult) {
if (path.length === 0) throw new RangeError("path is empty");
const pathStr = path.join(" ");
if (result.success) {
this.counts.successes++;
if (this.verbose) this.stream.writeln(`✅️ ${pathStr}`);
} else {
this.counts.failures++;
this.stream.writeln(`⚠️ ${pathStr}`);
this.#failures.push({ path, ...result });
}
}
finalize() {
// Transform [{ path: ["a", "b", "c"] }, { path: ["a", "b", "d"] }]
// into { "a b": ["c", "d"] }.
let pathMap = new Map<string, ({ name: string } & TestResult)[]>();
for (const f of this.#failures) {
const key = f.path.slice(0, -1).join(" ");
if (!pathMap.has(key)) pathMap.set(key, []);
pathMap.get(key).push({ name: f.path.at(-1), ...f });
}
this.stream.writeln("");
this.stream.writeln("FAILURES");
this.stream.writeln("========");
for (const [path, tests] of pathMap) {
if (tests.length === 1) {
this.#writeOutput(tests[0], path ? `${path} ` : "", false);
} else {
this.stream.writeln(path);
for (const t of tests) this.#writeOutput(t, " - ", true);
}
}
this.stream.writeln("");
const { successes, failures } = this.counts;
this.stream.writeln(`${successes} succeeded, ${failures} failed`);
}
#writeOutput(test: { name: string } & TestResult, prefix: string, nested: boolean) {
let output = "";
if (test.output) {
const lines = test.output.split("\n");
if (lines.length <= 1) {
output = `: ${test.output}`;
} else {
const padding = nested ? " " : " ";
output = ":\n" + lines.map((line) => padding + line).join("\n");
}
}
this.stream.writeln(`${prefix}${test.name}${output}`);
}
}
export class DOMReporter implements Reporter {
update(path: string[], result: TestResult) {
if (path.length === 0) throw new RangeError("path is empty");
const counter = document.getElementById(result.success ? "successes" : "failures");
counter.innerText = (Number(counter.innerText) + 1).toString();
let parent = document.getElementById("root");
for (const node of path) {
let child = null;
outer: for (const d of parent.children) {
for (const s of d.children) {
if (!(s instanceof HTMLElement)) continue;
if (s.tagName !== "SUMMARY" || s.innerText !== node) continue;
child = d;
break outer;
}
}
if (child === null) {
child = document.createElement("details");
child.className = "test-node";
const summary = document.createElement("summary");
summary.appendChild(document.createTextNode(node));
child.appendChild(summary);
parent.appendChild(child);
}
if (!result.success) {
child.open = true;
child.classList.add("failure");
}
parent = child;
}
const p = document.createElement("p");
p.classList.add("test-desc");
if (result.output) {
const pre = document.createElement("pre");
pre.appendChild(document.createTextNode(result.output));
p.appendChild(pre);
} else {
p.classList.add("italic");
p.appendChild(document.createTextNode("No output."));
}
parent.appendChild(p);
}
finalize() {}
}

View File

@@ -0,0 +1,40 @@
import { context, group, suite, test } from "./test.js";
suite("dog", [
group("tail", [
test("wags when happy", (t) => {
if (0 / 0 !== Infinity / Infinity) {
t.fatal("undefined must not be defined");
}
}),
test("idle when down", (t) => {
t.log("test test");
t.error("dog whining noises go here");
}),
]),
test("likes headpats", (t) => {
if (2 !== 2) {
t.error("IEEE 754 violated: 2 is NaN");
}
}),
context("near cat", [
test("is ecstatic", (t) => {
if (("b" + "a" + + "a" + "a").toLowerCase() == "banana") {
t.error("🍌🍌🍌");
t.error("🍌🍌🍌");
t.error("🍌🍌🍌");
t.failNow();
}
}),
test("playfully bites cats' tails", (t) => {
t.log("arf!");
throw new Error("nom");
}),
]),
]);
suite("cat", [
test("likes headpats", (t) => {
t.log("meow");
}),
]);

View File

@@ -0,0 +1,5 @@
{
"compilerOptions": {
"target": "ES2024"
}
}

View File

@@ -0,0 +1,28 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="static/style.css">
<link rel="stylesheet" href="static/test.css">
<title>PkgServer Tests</title>
</head>
<body>
<h1>PkgServer Tests</h1>
<main>
<div id="counters">
<span id="successes">0</span> succeeded, <span id="failures">0</span> failed.
</div>
<div id="root">
</div>
<script type="module" src="./static/test_tests.js"></script>
<script type="module">
import { DOMReporter, run } from "./static/test.js";
run(new DOMReporter());
</script>
</main>
</body>
</html>

9
cmd/pkgserver/ui_full.go Normal file
View File

@@ -0,0 +1,9 @@
//go:build frontend
package main
import "embed"
//go:generate sh -c "sass ui/static/dark.scss ui/static/dark.css && sass ui/static/light.scss ui/static/light.css && sass ui/static/test.scss ui/static/test.css && tsc -p ui/static"
//go:embed ui/*
var content embed.FS

7
cmd/pkgserver/ui_stub.go Normal file
View File

@@ -0,0 +1,7 @@
//go:build !frontend
package main
import "testing/fstest"
var content fstest.MapFS

View File

@@ -38,9 +38,13 @@ type (
Container struct { Container struct {
// Whether the container init should stay alive after its parent terminates. // Whether the container init should stay alive after its parent terminates.
AllowOrphan bool AllowOrphan bool
// Scheduling policy to set via sched_setscheduler(2). The zero value // Whether to set SchedPolicy and SchedPriority via sched_setscheduler(2).
// skips this call. Supported policies are [SCHED_BATCH], [SCHED_IDLE]. SetScheduler bool
SchedPolicy int // Scheduling policy to set via sched_setscheduler(2).
SchedPolicy std.SchedPolicy
// Scheduling priority to set via sched_setscheduler(2). The zero value
// implies the minimum value supported by the current SchedPolicy.
SchedPriority std.Int
// Cgroup fd, nil to disable. // Cgroup fd, nil to disable.
Cgroup *int Cgroup *int
// ExtraFiles passed through to initial process in the container, with // ExtraFiles passed through to initial process in the container, with
@@ -373,16 +377,38 @@ func (p *Container) Start() error {
// sched_setscheduler: thread-directed but acts on all processes // sched_setscheduler: thread-directed but acts on all processes
// created from the calling thread // created from the calling thread
if p.SchedPolicy > 0 { if p.SetScheduler {
p.msg.Verbosef("setting scheduling policy %d", p.SchedPolicy) if p.SchedPolicy < 0 || p.SchedPolicy > std.SCHED_LAST {
return &StartError{
Fatal: false,
Step: "set scheduling policy",
Err: EINVAL,
}
}
var param schedParam
if priority, err := p.SchedPolicy.GetPriorityMin(); err != nil {
return &StartError{
Fatal: true,
Step: "get minimum priority",
Err: err,
}
} else {
param.priority = max(priority, p.SchedPriority)
}
p.msg.Verbosef(
"setting scheduling policy %s priority %d",
p.SchedPolicy, param.priority,
)
if err := schedSetscheduler( if err := schedSetscheduler(
0, // calling thread 0, // calling thread
p.SchedPolicy, p.SchedPolicy,
&schedParam{0}, &param,
); err != nil { ); err != nil {
return &StartError{ return &StartError{
Fatal: true, Fatal: true,
Step: "enforce landlock ruleset", Step: "set scheduling policy",
Err: err, Err: err,
} }
} }

View File

@@ -1,6 +1,12 @@
package std package std
import "iter" import (
"encoding"
"iter"
"strconv"
"sync"
"syscall"
)
// Syscalls returns an iterator over all wired syscalls. // Syscalls returns an iterator over all wired syscalls.
func Syscalls() iter.Seq2[string, ScmpSyscall] { func Syscalls() iter.Seq2[string, ScmpSyscall] {
@@ -26,3 +32,128 @@ func SyscallResolveName(name string) (num ScmpSyscall, ok bool) {
num, ok = syscallNumExtra[name] num, ok = syscallNumExtra[name]
return return
} }
// SchedPolicy denotes a scheduling policy defined in include/uapi/linux/sched.h.
type SchedPolicy int
// include/uapi/linux/sched.h
const (
SCHED_NORMAL SchedPolicy = iota
SCHED_FIFO
SCHED_RR
SCHED_BATCH
_SCHED_ISO // SCHED_ISO: reserved but not implemented yet
SCHED_IDLE
SCHED_DEADLINE
SCHED_EXT
SCHED_LAST SchedPolicy = iota - 1
)
var _ encoding.TextMarshaler = SCHED_LAST
var _ encoding.TextUnmarshaler = new(SCHED_LAST)
// String returns a unique representation of policy, also used in encoding.
func (policy SchedPolicy) String() string {
switch policy {
case SCHED_NORMAL:
return ""
case SCHED_FIFO:
return "fifo"
case SCHED_RR:
return "rr"
case SCHED_BATCH:
return "batch"
case SCHED_IDLE:
return "idle"
case SCHED_DEADLINE:
return "deadline"
case SCHED_EXT:
return "ext"
default:
return "invalid policy " + strconv.Itoa(int(policy))
}
}
// MarshalText performs bounds checking and returns the result of String.
func (policy SchedPolicy) MarshalText() ([]byte, error) {
if policy == _SCHED_ISO || policy < 0 || policy > SCHED_LAST {
return nil, syscall.EINVAL
}
return []byte(policy.String()), nil
}
// InvalidSchedPolicyError is an invalid string representation of a [SchedPolicy].
type InvalidSchedPolicyError string
func (InvalidSchedPolicyError) Unwrap() error { return syscall.EINVAL }
func (e InvalidSchedPolicyError) Error() string {
return "invalid scheduling policy " + strconv.Quote(string(e))
}
// UnmarshalText is the inverse of MarshalText.
func (policy *SchedPolicy) UnmarshalText(text []byte) error {
switch string(text) {
case "fifo":
*policy = SCHED_FIFO
case "rr":
*policy = SCHED_RR
case "batch":
*policy = SCHED_BATCH
case "idle":
*policy = SCHED_IDLE
case "deadline":
*policy = SCHED_DEADLINE
case "ext":
*policy = SCHED_EXT
case "":
*policy = 0
return nil
default:
return InvalidSchedPolicyError(text)
}
return nil
}
// for sched_get_priority_max and sched_get_priority_min
var (
schedPriority [SCHED_LAST + 1][2]Int
schedPriorityErr [SCHED_LAST + 1][2]error
schedPriorityOnce [SCHED_LAST + 1][2]sync.Once
)
// GetPriorityMax returns the maximum priority value that can be used with the
// scheduling algorithm identified by policy.
func (policy SchedPolicy) GetPriorityMax() (Int, error) {
schedPriorityOnce[policy][0].Do(func() {
priority, _, errno := syscall.Syscall(
syscall.SYS_SCHED_GET_PRIORITY_MAX,
uintptr(policy),
0, 0,
)
schedPriority[policy][0] = Int(priority)
if errno != 0 {
schedPriorityErr[policy][0] = errno
}
})
return schedPriority[policy][0], schedPriorityErr[policy][0]
}
// GetPriorityMin returns the minimum priority value that can be used with the
// scheduling algorithm identified by policy.
func (policy SchedPolicy) GetPriorityMin() (Int, error) {
schedPriorityOnce[policy][1].Do(func() {
priority, _, errno := syscall.Syscall(
syscall.SYS_SCHED_GET_PRIORITY_MIN,
uintptr(policy),
0, 0,
)
schedPriority[policy][1] = Int(priority)
if errno != 0 {
schedPriorityErr[policy][1] = errno
}
})
return schedPriority[policy][1], schedPriorityErr[policy][1]
}

View File

@@ -1,6 +1,11 @@
package std_test package std_test
import ( import (
"encoding/json"
"errors"
"math"
"reflect"
"syscall"
"testing" "testing"
"hakurei.app/container/std" "hakurei.app/container/std"
@@ -19,3 +24,90 @@ func TestSyscallResolveName(t *testing.T) {
}) })
} }
} }
func TestSchedPolicyJSON(t *testing.T) {
t.Parallel()
testCases := []struct {
policy std.SchedPolicy
want string
encodeErr error
decodeErr error
}{
{std.SCHED_NORMAL, `""`, nil, nil},
{std.SCHED_FIFO, `"fifo"`, nil, nil},
{std.SCHED_RR, `"rr"`, nil, nil},
{std.SCHED_BATCH, `"batch"`, nil, nil},
{4, `"invalid policy 4"`, syscall.EINVAL, std.InvalidSchedPolicyError("invalid policy 4")},
{std.SCHED_IDLE, `"idle"`, nil, nil},
{std.SCHED_DEADLINE, `"deadline"`, nil, nil},
{std.SCHED_EXT, `"ext"`, nil, nil},
{math.MaxInt, `"iso"`, syscall.EINVAL, std.InvalidSchedPolicyError("iso")},
}
for _, tc := range testCases {
name := tc.policy.String()
if tc.policy == std.SCHED_NORMAL {
name = "normal"
}
t.Run(name, func(t *testing.T) {
t.Parallel()
got, err := json.Marshal(tc.policy)
if !errors.Is(err, tc.encodeErr) {
t.Fatalf("Marshal: error = %v, want %v", err, tc.encodeErr)
}
if err == nil && string(got) != tc.want {
t.Fatalf("Marshal: %s, want %s", string(got), tc.want)
}
var v std.SchedPolicy
if err = json.Unmarshal([]byte(tc.want), &v); !reflect.DeepEqual(err, tc.decodeErr) {
t.Fatalf("Unmarshal: error = %v, want %v", err, tc.decodeErr)
}
if err == nil && v != tc.policy {
t.Fatalf("Unmarshal: %d, want %d", v, tc.policy)
}
})
}
}
func TestSchedPolicyMinMax(t *testing.T) {
t.Parallel()
testCases := []struct {
policy std.SchedPolicy
min, max std.Int
err error
}{
{std.SCHED_NORMAL, 0, 0, nil},
{std.SCHED_FIFO, 1, 99, nil},
{std.SCHED_RR, 1, 99, nil},
{std.SCHED_BATCH, 0, 0, nil},
{4, -1, -1, syscall.EINVAL},
{std.SCHED_IDLE, 0, 0, nil},
{std.SCHED_DEADLINE, 0, 0, nil},
{std.SCHED_EXT, 0, 0, nil},
}
for _, tc := range testCases {
name := tc.policy.String()
if tc.policy == std.SCHED_NORMAL {
name = "normal"
}
t.Run(name, func(t *testing.T) {
t.Parallel()
if priority, err := tc.policy.GetPriorityMax(); !reflect.DeepEqual(err, tc.err) {
t.Fatalf("GetPriorityMax: error = %v, want %v", err, tc.err)
} else if priority != tc.max {
t.Fatalf("GetPriorityMax: %d, want %d", priority, tc.max)
}
if priority, err := tc.policy.GetPriorityMin(); !reflect.DeepEqual(err, tc.err) {
t.Fatalf("GetPriorityMin: error = %v, want %v", err, tc.err)
} else if priority != tc.min {
t.Fatalf("GetPriorityMin: %d, want %d", priority, tc.min)
}
})
}
}

View File

@@ -43,18 +43,6 @@ func Isatty(fd int) bool {
return r == 0 return r == 0
} }
// include/uapi/linux/sched.h
const (
SCHED_NORMAL = iota
SCHED_FIFO
SCHED_RR
SCHED_BATCH
_ // SCHED_ISO: reserved but not implemented yet
SCHED_IDLE
SCHED_DEADLINE
SCHED_EXT
)
// schedParam is equivalent to struct sched_param from include/linux/sched.h. // schedParam is equivalent to struct sched_param from include/linux/sched.h.
type schedParam struct { type schedParam struct {
// sched_priority // sched_priority
@@ -74,13 +62,13 @@ type schedParam struct {
// this if you do not have something similar in place! // this if you do not have something similar in place!
// //
// [very subtle to use correctly]: https://www.openwall.com/lists/musl/2016/03/01/4 // [very subtle to use correctly]: https://www.openwall.com/lists/musl/2016/03/01/4
func schedSetscheduler(tid, policy int, param *schedParam) error { func schedSetscheduler(tid int, policy std.SchedPolicy, param *schedParam) error {
if r, _, errno := Syscall( if _, _, errno := Syscall(
SYS_SCHED_SETSCHEDULER, SYS_SCHED_SETSCHEDULER,
uintptr(tid), uintptr(tid),
uintptr(policy), uintptr(policy),
uintptr(unsafe.Pointer(param)), uintptr(unsafe.Pointer(param)),
); r < 0 { ); errno != 0 {
return errno return errno
} }
return nil return nil

12
flake.lock generated
View File

@@ -7,11 +7,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1765384171, "lastModified": 1772985280,
"narHash": "sha256-FuFtkJrW1Z7u+3lhzPRau69E0CNjADku1mLQQflUORo=", "narHash": "sha256-FdrNykOoY9VStevU4zjSUdvsL9SzJTcXt4omdEDZDLk=",
"owner": "nix-community", "owner": "nix-community",
"repo": "home-manager", "repo": "home-manager",
"rev": "44777152652bc9eacf8876976fa72cc77ca8b9d8", "rev": "8f736f007139d7f70752657dff6a401a585d6cbc",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -23,11 +23,11 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1765311797, "lastModified": 1772822230,
"narHash": "sha256-mSD5Ob7a+T2RNjvPvOA1dkJHGVrNVl8ZOrAwBjKBDQo=", "narHash": "sha256-yf3iYLGbGVlIthlQIk5/4/EQDZNNEmuqKZkQssMljuw=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "09eb77e94fa25202af8f3e81ddc7353d9970ac1b", "rev": "71caefce12ba78d84fe618cf61644dce01cf3a96",
"type": "github" "type": "github"
}, },
"original": { "original": {

View File

@@ -99,7 +99,7 @@
hakurei = pkgs.pkgsStatic.callPackage ./package.nix { hakurei = pkgs.pkgsStatic.callPackage ./package.nix {
inherit (pkgs) inherit (pkgs)
# passthru.buildInputs # passthru.buildInputs
go go_1_26
clang clang
# nativeBuildInputs # nativeBuildInputs
@@ -182,7 +182,7 @@
let let
# this is used for interactive vm testing during development, where tests might be broken # this is used for interactive vm testing during development, where tests might be broken
package = self.packages.${pkgs.stdenv.hostPlatform.system}.hakurei.override { package = self.packages.${pkgs.stdenv.hostPlatform.system}.hakurei.override {
buildGoModule = previousArgs: pkgs.pkgsStatic.buildGoModule (previousArgs // { doCheck = false; }); buildGo126Module = previousArgs: pkgs.pkgsStatic.buildGo126Module (previousArgs // { doCheck = false; });
}; };
in in
{ {

2
go.mod
View File

@@ -1,3 +1,3 @@
module hakurei.app module hakurei.app
go 1.25 go 1.26

View File

@@ -6,96 +6,137 @@ import (
"strings" "strings"
"hakurei.app/container/check" "hakurei.app/container/check"
"hakurei.app/container/std"
) )
// Config configures an application container, implemented in internal/app. // Config configures an application container.
type Config struct { type Config struct {
// Reverse-DNS style configured arbitrary identifier string. // Reverse-DNS style configured arbitrary identifier string.
// Passed to wayland security-context-v1 and used as part of defaults in dbus session proxy. //
// This value is passed as is to Wayland security-context-v1 and used as
// part of defaults in D-Bus session proxy. The zero value causes a default
// value to be derived from the container instance.
ID string `json:"id,omitempty"` ID string `json:"id,omitempty"`
// System services to make available in the container. // System services to make available in the container.
Enablements *Enablements `json:"enablements,omitempty"` Enablements *Enablements `json:"enablements,omitempty"`
// Session D-Bus proxy configuration. // Session D-Bus proxy configuration.
// If set to nil, session bus proxy assume built-in defaults. //
// Has no effect if [EDBus] but is not set in Enablements. The zero value
// assumes built-in defaults derived from ID.
SessionBus *BusConfig `json:"session_bus,omitempty"` SessionBus *BusConfig `json:"session_bus,omitempty"`
// System D-Bus proxy configuration. // System D-Bus proxy configuration.
// If set to nil, system bus proxy is disabled. //
// Has no effect if [EDBus] but is not set in Enablements. The zero value
// disables system bus proxy.
SystemBus *BusConfig `json:"system_bus,omitempty"` SystemBus *BusConfig `json:"system_bus,omitempty"`
// Direct access to wayland socket, no attempt is made to attach security-context-v1 // Direct access to Wayland socket, no attempt is made to attach
// and the bare socket is made available to the container. // security-context-v1 and the bare socket is made available to the
// container.
// //
// This option is unsupported and most likely enables full control over the Wayland // This option is unsupported and will most likely enable full control over
// session. Do not set this to true unless you are sure you know what you are doing. // the Wayland session from within the container. Do not set this to true
// unless you are sure you know what you are doing.
DirectWayland bool `json:"direct_wayland,omitempty"` DirectWayland bool `json:"direct_wayland,omitempty"`
// Direct access to the PipeWire socket established via SecurityContext::Create, no
// attempt is made to start the pipewire-pulse server. // Direct access to the PipeWire socket established via SecurityContext::Create,
// no attempt is made to start the pipewire-pulse server.
// //
// The SecurityContext machinery is fatally flawed, it blindly sets read and execute // The SecurityContext machinery is fatally flawed, it unconditionally sets
// bits on all objects for clients with the lowest achievable privilege level (by // read and execute bits on all objects for clients with the lowest achievable
// setting PW_KEY_ACCESS to "restricted"). This enables them to call any method // privilege level (by setting PW_KEY_ACCESS to "restricted" or by satisfying
// targeting any object, and since Registry::Destroy checks for the read and execute bit, // all conditions of [the /.flatpak-info hack]). This enables them to call
// allows the destruction of any object other than PW_ID_CORE as well. This behaviour // any method targeting any object, and since Registry::Destroy checks for
// is implemented separately in media-session and wireplumber, with the wireplumber // the read and execute bit, allows the destruction of any object other than
// implementation in Lua via an embedded Lua vm. In all known setups, wireplumber is // PW_ID_CORE as well.
// in use, and there is no known way to change its behaviour and set permissions
// differently without replacing the Lua script. Also, since PipeWire relies on these
// permissions to work, reducing them is not possible.
// //
// Currently, the only other sandboxed use case is flatpak, which is not aware of // This behaviour is implemented separately in media-session and wireplumber,
// PipeWire and blindly exposes the bare PulseAudio socket to the container (behaves // with the wireplumber implementation in Lua via an embedded Lua vm. In all
// like DirectPulse). This socket is backed by the pipewire-pulse compatibility daemon, // known setups, wireplumber is in use, and in that case, no option for
// which obtains client pid via the SO_PEERCRED option. The PipeWire daemon, pipewire-pulse // configuring this behaviour exists, without replacing the Lua script.
// daemon and the session manager daemon then separately performs the /.flatpak-info hack // Also, since PipeWire relies on these permissions to work, reducing them
// described in https://git.gensokyo.uk/security/hakurei/issues/21. Under such use case, // was never possible in the first place.
// since the client has no direct access to PipeWire, insecure parts of the protocol are
// obscured by pipewire-pulse simply not implementing them, and thus hiding the flaws
// described above.
// //
// Hakurei does not rely on the /.flatpak-info hack. Instead, a socket is sets up via // Currently, the only other sandboxed use case is flatpak, which is not
// SecurityContext. A pipewire-pulse server connected through it achieves the same // aware of PipeWire and blindly exposes the bare PulseAudio socket to the
// permissions as flatpak does via the /.flatpak-info hack and is maintained for the // container (behaves like DirectPulse). This socket is backed by the
// life of the container. // pipewire-pulse compatibility daemon, which obtains client pid via the
// SO_PEERCRED option. The PipeWire daemon, pipewire-pulse daemon and the
// session manager daemon then separately performs [the /.flatpak-info hack].
// Under such use case, since the client has no direct access to PipeWire,
// insecure parts of the protocol are obscured by the absence of an
// equivalent API in PulseAudio, or pipewire-pulse simply not implementing
// them.
//
// Hakurei does not rely on [the /.flatpak-info hack]. Instead, a socket is
// sets up via SecurityContext. A pipewire-pulse server connected through it
// achieves the same permissions as flatpak does via [the /.flatpak-info hack]
// and is maintained for the life of the container.
//
// This option is unsupported and enables a denial-of-service attack as the
// sandboxed client is able to destroy any client object and thus
// disconnecting them from PipeWire, or destroy the SecurityContext object,
// preventing any further container creation.
// //
// This option is unsupported and enables a denial-of-service attack as the sandboxed
// client is able to destroy any client object and thus disconnecting them from PipeWire,
// or destroy the SecurityContext object preventing any further container creation.
// Do not set this to true, it is insecure under any configuration. // Do not set this to true, it is insecure under any configuration.
DirectPipeWire bool `json:"direct_pipewire,omitempty"`
// Direct access to PulseAudio socket, no attempt is made to establish pipewire-pulse
// server via a PipeWire socket with a SecurityContext attached and the bare socket
// is made available to the container.
// //
// This option is unsupported and enables arbitrary code execution as the PulseAudio // [the /.flatpak-info hack]: https://git.gensokyo.uk/security/hakurei/issues/21
// server. Do not set this to true, it is insecure under any configuration. DirectPipeWire bool `json:"direct_pipewire,omitempty"`
// Direct access to PulseAudio socket, no attempt is made to establish
// pipewire-pulse server via a PipeWire socket with a SecurityContext
// attached, and the bare socket is made available to the container.
//
// This option is unsupported and enables arbitrary code execution as the
// PulseAudio server.
//
// Do not set this to true, it is insecure under any configuration.
DirectPulse bool `json:"direct_pulse,omitempty"` DirectPulse bool `json:"direct_pulse,omitempty"`
// Extra acl updates to perform before setuid. // Extra acl updates to perform before setuid.
ExtraPerms []ExtraPermConfig `json:"extra_perms,omitempty"` ExtraPerms []ExtraPermConfig `json:"extra_perms,omitempty"`
// Numerical application id, passed to hsu, used to derive init user namespace credentials. // Numerical application id, passed to hsu, used to derive init user
// namespace credentials.
Identity int `json:"identity"` Identity int `json:"identity"`
// Init user namespace supplementary groups inherited by all container processes. // Init user namespace supplementary groups inherited by all container processes.
Groups []string `json:"groups"` Groups []string `json:"groups"`
// Scheduling policy to set for the container.
//
// The zero value retains the current scheduling policy.
SchedPolicy std.SchedPolicy `json:"sched_policy,omitempty"`
// Scheduling priority to set for the container.
//
// The zero value implies the minimum priority of the current SchedPolicy.
// Has no effect if SchedPolicy is zero.
SchedPriority std.Int `json:"sched_priority,omitempty"`
// High level configuration applied to the underlying [container]. // High level configuration applied to the underlying [container].
Container *ContainerConfig `json:"container"` Container *ContainerConfig `json:"container"`
} }
var ( var (
// ErrConfigNull is returned by [Config.Validate] for an invalid configuration that contains a null value for any // ErrConfigNull is returned by [Config.Validate] for an invalid configuration
// field that must not be null. // that contains a null value for any field that must not be null.
ErrConfigNull = errors.New("unexpected null in config") ErrConfigNull = errors.New("unexpected null in config")
// ErrIdentityBounds is returned by [Config.Validate] for an out of bounds [Config.Identity] value. // ErrIdentityBounds is returned by [Config.Validate] for an out of bounds
// [Config.Identity] value.
ErrIdentityBounds = errors.New("identity out of bounds") ErrIdentityBounds = errors.New("identity out of bounds")
// ErrEnviron is returned by [Config.Validate] if an environment variable name contains '=' or NUL. // ErrSchedPolicyBounds is returned by [Config.Validate] for an out of bounds
// [Config.SchedPolicy] value.
ErrSchedPolicyBounds = errors.New("scheduling policy out of bounds")
// ErrEnviron is returned by [Config.Validate] if an environment variable
// name contains '=' or NUL.
ErrEnviron = errors.New("invalid environment variable name") ErrEnviron = errors.New("invalid environment variable name")
// ErrInsecure is returned by [Config.Validate] if the configuration is considered insecure. // ErrInsecure is returned by [Config.Validate] if the configuration is
// considered insecure.
ErrInsecure = errors.New("configuration is insecure") ErrInsecure = errors.New("configuration is insecure")
) )
@@ -112,6 +153,13 @@ func (config *Config) Validate() error {
Msg: "identity " + strconv.Itoa(config.Identity) + " out of range"} Msg: "identity " + strconv.Itoa(config.Identity) + " out of range"}
} }
if config.SchedPolicy < 0 || config.SchedPolicy > std.SCHED_LAST {
return &AppError{Step: "validate configuration", Err: ErrSchedPolicyBounds,
Msg: "scheduling policy " +
strconv.Itoa(int(config.SchedPolicy)) +
" out of range"}
}
if err := config.SessionBus.CheckInterfaces("session"); err != nil { if err := config.SessionBus.CheckInterfaces("session"); err != nil {
return err return err
} }

View File

@@ -22,6 +22,10 @@ func TestConfigValidate(t *testing.T) {
Msg: "identity -1 out of range"}}, Msg: "identity -1 out of range"}},
{"identity upper", &hst.Config{Identity: 10000}, &hst.AppError{Step: "validate configuration", Err: hst.ErrIdentityBounds, {"identity upper", &hst.Config{Identity: 10000}, &hst.AppError{Step: "validate configuration", Err: hst.ErrIdentityBounds,
Msg: "identity 10000 out of range"}}, Msg: "identity 10000 out of range"}},
{"sched lower", &hst.Config{SchedPolicy: -1}, &hst.AppError{Step: "validate configuration", Err: hst.ErrSchedPolicyBounds,
Msg: "scheduling policy -1 out of range"}},
{"sched upper", &hst.Config{SchedPolicy: 0xcafe}, &hst.AppError{Step: "validate configuration", Err: hst.ErrSchedPolicyBounds,
Msg: "scheduling policy 51966 out of range"}},
{"dbus session", &hst.Config{SessionBus: &hst.BusConfig{See: []string{""}}}, {"dbus session", &hst.Config{SessionBus: &hst.BusConfig{See: []string{""}}},
&hst.BadInterfaceError{Interface: "", Segment: "session"}}, &hst.BadInterfaceError{Interface: "", Segment: "session"}},
{"dbus system", &hst.Config{SystemBus: &hst.BusConfig{See: []string{""}}}, {"dbus system", &hst.Config{SystemBus: &hst.BusConfig{See: []string{""}}},

View File

@@ -16,18 +16,20 @@ const PrivateTmp = "/.hakurei"
var AbsPrivateTmp = check.MustAbs(PrivateTmp) var AbsPrivateTmp = check.MustAbs(PrivateTmp)
const ( const (
// WaitDelayDefault is used when WaitDelay has its zero value. // WaitDelayDefault is used when WaitDelay has the zero value.
WaitDelayDefault = 5 * time.Second WaitDelayDefault = 5 * time.Second
// WaitDelayMax is used if WaitDelay exceeds its value. // WaitDelayMax is used when WaitDelay exceeds its value.
WaitDelayMax = 30 * time.Second WaitDelayMax = 30 * time.Second
) )
const ( const (
// ExitFailure is returned if the container fails to start. // ExitFailure is returned if the container fails to start.
ExitFailure = iota + 1 ExitFailure = iota + 1
// ExitCancel is returned if the container is terminated by a shim-directed signal which cancels its context. // ExitCancel is returned if the container is terminated by a shim-directed
// signal which cancels its context.
ExitCancel ExitCancel
// ExitOrphan is returned when the shim is orphaned before priv side delivers a signal. // ExitOrphan is returned when the shim is orphaned before priv side process
// delivers a signal.
ExitOrphan ExitOrphan
// ExitRequest is returned when the priv side process requests shim exit. // ExitRequest is returned when the priv side process requests shim exit.
@@ -38,10 +40,12 @@ const (
type Flags uintptr type Flags uintptr
const ( const (
// FMultiarch unblocks syscalls required for multiarch to work on applicable targets. // FMultiarch unblocks system calls required for multiarch to work on
// multiarch-enabled targets (amd64, arm64).
FMultiarch Flags = 1 << iota FMultiarch Flags = 1 << iota
// FSeccompCompat changes emitted seccomp filter programs to be identical to that of Flatpak. // FSeccompCompat changes emitted seccomp filter programs to be identical to
// that of Flatpak in enabled rulesets.
FSeccompCompat FSeccompCompat
// FDevel unblocks ptrace and friends. // FDevel unblocks ptrace and friends.
FDevel FDevel
@@ -54,12 +58,15 @@ const (
// FTty unblocks dangerous terminal I/O (faking input). // FTty unblocks dangerous terminal I/O (faking input).
FTty FTty
// FMapRealUID maps the target user uid to the privileged user uid in the container user namespace. // FMapRealUID maps the target user uid to the privileged user uid in the
// container user namespace.
//
// Some programs fail to connect to dbus session running as a different uid, // Some programs fail to connect to dbus session running as a different uid,
// this option works around it by mapping priv-side caller uid in container. // this option works around it by mapping priv-side caller uid in container.
FMapRealUID FMapRealUID
// FDevice mount /dev/ from the init mount namespace as-is in the container mount namespace. // FDevice mount /dev/ from the init mount namespace as is in the container
// mount namespace.
FDevice FDevice
// FShareRuntime shares XDG_RUNTIME_DIR between containers under the same identity. // FShareRuntime shares XDG_RUNTIME_DIR between containers under the same identity.
@@ -112,30 +119,37 @@ func (flags Flags) String() string {
} }
} }
// ContainerConfig describes the container configuration to be applied to an underlying [container]. // ContainerConfig describes the container configuration to be applied to an
// underlying [container]. It is validated by [Config.Validate].
type ContainerConfig struct { type ContainerConfig struct {
// Container UTS namespace hostname. // Container UTS namespace hostname.
Hostname string `json:"hostname,omitempty"` Hostname string `json:"hostname,omitempty"`
// Duration in nanoseconds to wait for after interrupting the initial process. // Duration in nanoseconds to wait for after interrupting the initial process.
// Defaults to [WaitDelayDefault] if zero, or [WaitDelayMax] if greater than [WaitDelayMax]. //
// Values lesser than zero is equivalent to zero, bypassing [WaitDelayDefault]. // Defaults to [WaitDelayDefault] if zero, or [WaitDelayMax] if greater than
// [WaitDelayMax]. Values lesser than zero is equivalent to zero, bypassing
// [WaitDelayDefault].
WaitDelay time.Duration `json:"wait_delay,omitempty"` WaitDelay time.Duration `json:"wait_delay,omitempty"`
// Initial process environment variables. // Initial process environment variables.
Env map[string]string `json:"env"` Env map[string]string `json:"env"`
/* Container mount points. // Container mount points.
//
If the first element targets /, it is inserted early and excluded from path hiding. */ // If the first element targets /, it is inserted early and excluded from
// path hiding. Otherwise, an anonymous instance of tmpfs is set up on /.
Filesystem []FilesystemConfigJSON `json:"filesystem"` Filesystem []FilesystemConfigJSON `json:"filesystem"`
// String used as the username of the emulated user, validated against the default NAME_REGEX from adduser. // String used as the username of the emulated user, validated against the
// default NAME_REGEX from adduser.
//
// Defaults to passwd name of target uid or chronos. // Defaults to passwd name of target uid or chronos.
Username string `json:"username,omitempty"` Username string `json:"username,omitempty"`
// Pathname of shell in the container filesystem to use for the emulated user. // Pathname of shell in the container filesystem to use for the emulated user.
Shell *check.Absolute `json:"shell"` Shell *check.Absolute `json:"shell"`
// Directory in the container filesystem to enter and use as the home directory of the emulated user. // Directory in the container filesystem to enter and use as the home
// directory of the emulated user.
Home *check.Absolute `json:"home"` Home *check.Absolute `json:"home"`
// Pathname to executable file in the container filesystem. // Pathname to executable file in the container filesystem.
@@ -148,6 +162,7 @@ type ContainerConfig struct {
} }
// ContainerConfigF is [ContainerConfig] stripped of its methods. // ContainerConfigF is [ContainerConfig] stripped of its methods.
//
// The [ContainerConfig.Flags] field does not survive a [json] round trip. // The [ContainerConfig.Flags] field does not survive a [json] round trip.
type ContainerConfigF ContainerConfig type ContainerConfigF ContainerConfig

View File

@@ -5,8 +5,26 @@ import (
"strings" "strings"
) )
// BadInterfaceError is returned when Interface fails an undocumented check in xdg-dbus-proxy, // BadInterfaceError is returned when Interface fails an undocumented check in
// which would have cause a silent failure. // xdg-dbus-proxy, which would have cause a silent failure.
//
// xdg-dbus-proxy fails without output when this condition is not met:
//
// char *dot = strrchr (filter->interface, '.');
// if (dot != NULL)
// {
// *dot = 0;
// if (strcmp (dot + 1, "*") != 0)
// filter->member = g_strdup (dot + 1);
// }
//
// trim ".*" since they are removed before searching for '.':
//
// if (g_str_has_suffix (name, ".*"))
// {
// name[strlen (name) - 2] = 0;
// wildcard = TRUE;
// }
type BadInterfaceError struct { type BadInterfaceError struct {
// Interface is the offending interface string. // Interface is the offending interface string.
Interface string Interface string
@@ -19,7 +37,8 @@ func (e *BadInterfaceError) Error() string {
if e == nil { if e == nil {
return "<nil>" return "<nil>"
} }
return "bad interface string " + strconv.Quote(e.Interface) + " in " + e.Segment + " bus configuration" return "bad interface string " + strconv.Quote(e.Interface) +
" in " + e.Segment + " bus configuration"
} }
// BusConfig configures the xdg-dbus-proxy process. // BusConfig configures the xdg-dbus-proxy process.
@@ -76,31 +95,14 @@ func (c *BusConfig) Interfaces(yield func(string) bool) {
} }
} }
// CheckInterfaces checks for invalid interface strings based on an undocumented check in xdg-dbus-error, // CheckInterfaces checks for invalid interface strings based on an undocumented
// returning [BadInterfaceError] if one is encountered. // check in xdg-dbus-error, returning [BadInterfaceError] if one is encountered.
func (c *BusConfig) CheckInterfaces(segment string) error { func (c *BusConfig) CheckInterfaces(segment string) error {
if c == nil { if c == nil {
return nil return nil
} }
for iface := range c.Interfaces { for iface := range c.Interfaces {
/*
xdg-dbus-proxy fails without output when this condition is not met:
char *dot = strrchr (filter->interface, '.');
if (dot != NULL)
{
*dot = 0;
if (strcmp (dot + 1, "*") != 0)
filter->member = g_strdup (dot + 1);
}
trim ".*" since they are removed before searching for '.':
if (g_str_has_suffix (name, ".*"))
{
name[strlen (name) - 2] = 0;
wildcard = TRUE;
}
*/
if strings.IndexByte(strings.TrimSuffix(iface, ".*"), '.') == -1 { if strings.IndexByte(strings.TrimSuffix(iface, ".*"), '.') == -1 {
return &BadInterfaceError{iface, segment} return &BadInterfaceError{iface, segment}
} }

View File

@@ -11,15 +11,17 @@ import (
type Enablement byte type Enablement byte
const ( const (
// EWayland exposes a wayland pathname socket via security-context-v1. // EWayland exposes a Wayland pathname socket via security-context-v1.
EWayland Enablement = 1 << iota EWayland Enablement = 1 << iota
// EX11 adds the target user via X11 ChangeHosts and exposes the X11 pathname socket. // EX11 adds the target user via X11 ChangeHosts and exposes the X11
// pathname socket.
EX11 EX11
// EDBus enables the per-container xdg-dbus-proxy daemon. // EDBus enables the per-container xdg-dbus-proxy daemon.
EDBus EDBus
// EPipeWire exposes a pipewire pathname socket via SecurityContext. // EPipeWire exposes a pipewire pathname socket via SecurityContext.
EPipeWire EPipeWire
// EPulse copies the PulseAudio cookie to [hst.PrivateTmp] and exposes the PulseAudio socket. // EPulse copies the PulseAudio cookie to [hst.PrivateTmp] and exposes the
// PulseAudio socket.
EPulse EPulse
// EM is a noop. // EM is a noop.

View File

@@ -24,7 +24,8 @@ type FilesystemConfig interface {
fmt.Stringer fmt.Stringer
} }
// The Ops interface enables [FilesystemConfig] to queue container ops without depending on the container package. // The Ops interface enables [FilesystemConfig] to queue container ops without
// depending on the container package.
type Ops interface { type Ops interface {
// Tmpfs appends an op that mounts tmpfs on a container path. // Tmpfs appends an op that mounts tmpfs on a container path.
Tmpfs(target *check.Absolute, size int, perm os.FileMode) Ops Tmpfs(target *check.Absolute, size int, perm os.FileMode) Ops
@@ -41,12 +42,15 @@ type Ops interface {
// Link appends an op that creates a symlink in the container filesystem. // Link appends an op that creates a symlink in the container filesystem.
Link(target *check.Absolute, linkName string, dereference bool) Ops Link(target *check.Absolute, linkName string, dereference bool) Ops
// Root appends an op that expands a directory into a toplevel bind mount mirror on container root. // Root appends an op that expands a directory into a toplevel bind mount
// mirror on container root.
Root(host *check.Absolute, flags int) Ops Root(host *check.Absolute, flags int) Ops
// Etc appends an op that expands host /etc into a toplevel symlink mirror with /etc semantics. // Etc appends an op that expands host /etc into a toplevel symlink mirror
// with /etc semantics.
Etc(host *check.Absolute, prefix string) Ops Etc(host *check.Absolute, prefix string) Ops
// Daemon appends an op that starts a daemon in the container and blocks until target appears. // Daemon appends an op that starts a daemon in the container and blocks
// until target appears.
Daemon(target, path *check.Absolute, args ...string) Ops Daemon(target, path *check.Absolute, args ...string) Ops
} }
@@ -61,7 +65,8 @@ type ApplyState struct {
// ErrFSNull is returned by [json] on encountering a null [FilesystemConfig] value. // ErrFSNull is returned by [json] on encountering a null [FilesystemConfig] value.
var ErrFSNull = errors.New("unexpected null in mount point") var ErrFSNull = errors.New("unexpected null in mount point")
// FSTypeError is returned when [ContainerConfig.Filesystem] contains an entry with invalid type. // FSTypeError is returned when [ContainerConfig.Filesystem] contains an entry
// with invalid type.
type FSTypeError string type FSTypeError string
func (f FSTypeError) Error() string { return fmt.Sprintf("invalid filesystem type %q", string(f)) } func (f FSTypeError) Error() string { return fmt.Sprintf("invalid filesystem type %q", string(f)) }

View File

@@ -18,7 +18,9 @@ type FSLink struct {
Target *check.Absolute `json:"dst"` Target *check.Absolute `json:"dst"`
// Arbitrary linkname value store in the symlink. // Arbitrary linkname value store in the symlink.
Linkname string `json:"linkname"` Linkname string `json:"linkname"`
// Whether to treat Linkname as an absolute pathname and dereference before creating the link.
// Whether to treat Linkname as an absolute pathname and dereference before
// creating the link.
Dereference bool `json:"dereference,omitempty"` Dereference bool `json:"dereference,omitempty"`
} }

View File

@@ -19,9 +19,11 @@ type FSOverlay struct {
// Any filesystem, does not need to be on a writable filesystem, must not be nil. // Any filesystem, does not need to be on a writable filesystem, must not be nil.
Lower []*check.Absolute `json:"lower"` Lower []*check.Absolute `json:"lower"`
// The upperdir is normally on a writable filesystem, leave as nil to mount Lower readonly. // The upperdir is normally on a writable filesystem, leave as nil to mount
// Lower readonly.
Upper *check.Absolute `json:"upper,omitempty"` Upper *check.Absolute `json:"upper,omitempty"`
// The workdir needs to be an empty directory on the same filesystem as Upper, must not be nil if Upper is populated. // The workdir needs to be an empty directory on the same filesystem as
// Upper, must not be nil if Upper is populated.
Work *check.Absolute `json:"work,omitempty"` Work *check.Absolute `json:"work,omitempty"`
} }

View File

@@ -44,11 +44,13 @@ func (e *AppError) Message() string {
type Paths struct { type Paths struct {
// Temporary directory returned by [os.TempDir], usually equivalent to [fhs.AbsTmp]. // Temporary directory returned by [os.TempDir], usually equivalent to [fhs.AbsTmp].
TempDir *check.Absolute `json:"temp_dir"` TempDir *check.Absolute `json:"temp_dir"`
// Shared directory specific to the hsu userid, usually (`/tmp/hakurei.%d`, [Info.User]). // Shared directory specific to the hsu userid, usually
// (`/tmp/hakurei.%d`, [Info.User]).
SharePath *check.Absolute `json:"share_path"` SharePath *check.Absolute `json:"share_path"`
// Checked XDG_RUNTIME_DIR value, usually (`/run/user/%d`, uid). // Checked XDG_RUNTIME_DIR value, usually (`/run/user/%d`, uid).
RuntimePath *check.Absolute `json:"runtime_path"` RuntimePath *check.Absolute `json:"runtime_path"`
// Shared directory specific to the hsu userid located in RuntimePath, usually (`/run/user/%d/hakurei`, uid). // Shared directory specific to the hsu userid located in RuntimePath,
// usually (`/run/user/%d/hakurei`, uid).
RunDirPath *check.Absolute `json:"run_dir_path"` RunDirPath *check.Absolute `json:"run_dir_path"`
} }
@@ -74,10 +76,23 @@ func Template() *Config {
SessionBus: &BusConfig{ SessionBus: &BusConfig{
See: nil, See: nil,
Talk: []string{"org.freedesktop.Notifications", "org.freedesktop.FileManager1", "org.freedesktop.ScreenSaver", Talk: []string{
"org.freedesktop.secrets", "org.kde.kwalletd5", "org.kde.kwalletd6", "org.gnome.SessionManager"}, "org.freedesktop.Notifications",
Own: []string{"org.chromium.Chromium.*", "org.mpris.MediaPlayer2.org.chromium.Chromium.*", "org.freedesktop.FileManager1",
"org.mpris.MediaPlayer2.chromium.*"}, "org.freedesktop.ScreenSaver",
"org.freedesktop.secrets",
"org.kde.kwalletd5",
"org.kde.kwalletd6",
"org.gnome.SessionManager",
},
Own: []string{
"org.chromium.Chromium.*",
"org.mpris.MediaPlayer2.org.chromium.Chromium.*",
"org.mpris.MediaPlayer2.chromium.*",
},
Call: map[string]string{"org.freedesktop.portal.*": "*"}, Call: map[string]string{"org.freedesktop.portal.*": "*"},
Broadcast: map[string]string{"org.freedesktop.portal.*": "@/org/freedesktop/portal/*"}, Broadcast: map[string]string{"org.freedesktop.portal.*": "@/org/freedesktop/portal/*"},
Log: false, Log: false,
@@ -112,7 +127,12 @@ func Template() *Config {
"GOOGLE_DEFAULT_CLIENT_SECRET": "OTJgUOQcT7lO7GsGZq2G4IlT", "GOOGLE_DEFAULT_CLIENT_SECRET": "OTJgUOQcT7lO7GsGZq2G4IlT",
}, },
Filesystem: []FilesystemConfigJSON{ Filesystem: []FilesystemConfigJSON{
{&FSBind{Target: fhs.AbsRoot, Source: fhs.AbsVarLib.Append("hakurei/base/org.debian"), Write: true, Special: true}}, {&FSBind{
Target: fhs.AbsRoot,
Source: fhs.AbsVarLib.Append("hakurei/base/org.debian"),
Write: true,
Special: true,
}},
{&FSBind{Target: fhs.AbsEtc, Source: fhs.AbsEtc, Special: true}}, {&FSBind{Target: fhs.AbsEtc, Source: fhs.AbsEtc, Special: true}},
{&FSEphemeral{Target: fhs.AbsTmp, Write: true, Perm: 0755}}, {&FSEphemeral{Target: fhs.AbsTmp, Write: true, Perm: 0755}},
{&FSOverlay{ {&FSOverlay{
@@ -121,11 +141,27 @@ func Template() *Config {
Upper: fhs.AbsVarLib.Append("hakurei/nix/u0/org.chromium.Chromium/rw-store/upper"), Upper: fhs.AbsVarLib.Append("hakurei/nix/u0/org.chromium.Chromium/rw-store/upper"),
Work: fhs.AbsVarLib.Append("hakurei/nix/u0/org.chromium.Chromium/rw-store/work"), Work: fhs.AbsVarLib.Append("hakurei/nix/u0/org.chromium.Chromium/rw-store/work"),
}}, }},
{&FSLink{Target: fhs.AbsRun.Append("current-system"), Linkname: "/run/current-system", Dereference: true}}, {&FSLink{
{&FSLink{Target: fhs.AbsRun.Append("opengl-driver"), Linkname: "/run/opengl-driver", Dereference: true}}, Target: fhs.AbsRun.Append("current-system"),
{&FSBind{Source: fhs.AbsVarLib.Append("hakurei/u0/org.chromium.Chromium"), Linkname: "/run/current-system",
Target: check.MustAbs("/data/data/org.chromium.Chromium"), Write: true, Ensure: true}}, Dereference: true,
{&FSBind{Source: fhs.AbsDev.Append("dri"), Device: true, Optional: true}}, }},
{&FSLink{
Target: fhs.AbsRun.Append("opengl-driver"),
Linkname: "/run/opengl-driver",
Dereference: true,
}},
{&FSBind{
Source: fhs.AbsVarLib.Append("hakurei/u0/org.chromium.Chromium"),
Target: check.MustAbs("/data/data/org.chromium.Chromium"),
Write: true,
Ensure: true,
}},
{&FSBind{
Source: fhs.AbsDev.Append("dri"),
Device: true,
Optional: true,
}},
}, },
Username: "chronos", Username: "chronos",

View File

@@ -12,10 +12,12 @@ import (
// An ID is a unique identifier held by a running hakurei container. // An ID is a unique identifier held by a running hakurei container.
type ID [16]byte type ID [16]byte
// ErrIdentifierLength is returned when encountering a [hex] representation of [ID] with unexpected length. // ErrIdentifierLength is returned when encountering a [hex] representation of
// [ID] with unexpected length.
var ErrIdentifierLength = errors.New("identifier string has unexpected length") var ErrIdentifierLength = errors.New("identifier string has unexpected length")
// IdentifierDecodeError is returned by [ID.UnmarshalText] to provide relevant error descriptions. // IdentifierDecodeError is returned by [ID.UnmarshalText] to provide relevant
// error descriptions.
type IdentifierDecodeError struct{ Err error } type IdentifierDecodeError struct{ Err error }
func (e IdentifierDecodeError) Unwrap() error { return e.Err } func (e IdentifierDecodeError) Unwrap() error { return e.Err }
@@ -23,7 +25,10 @@ func (e IdentifierDecodeError) Error() string {
var invalidByteError hex.InvalidByteError var invalidByteError hex.InvalidByteError
switch { switch {
case errors.As(e.Err, &invalidByteError): case errors.As(e.Err, &invalidByteError):
return fmt.Sprintf("got invalid byte %#U in identifier", rune(invalidByteError)) return fmt.Sprintf(
"got invalid byte %#U in identifier",
rune(invalidByteError),
)
case errors.Is(e.Err, hex.ErrLength): case errors.Is(e.Err, hex.ErrLength):
return "odd length identifier hex string" return "odd length identifier hex string"
@@ -41,7 +46,9 @@ func (a *ID) CreationTime() time.Time {
} }
// NewInstanceID creates a new unique [ID]. // NewInstanceID creates a new unique [ID].
func NewInstanceID(id *ID) error { return newInstanceID(id, uint64(time.Now().UnixNano())) } func NewInstanceID(id *ID) error {
return newInstanceID(id, uint64(time.Now().UnixNano()))
}
// newInstanceID creates a new unique [ID] with the specified timestamp. // newInstanceID creates a new unique [ID] with the specified timestamp.
func newInstanceID(id *ID, p uint64) error { func newInstanceID(id *ID, p uint64) error {

View File

@@ -38,6 +38,7 @@ func (h *Hsu) ensureDispatcher() {
} }
// ID returns the current user hsurc identifier. // ID returns the current user hsurc identifier.
//
// [ErrHsuAccess] is returned if the current user is not in hsurc. // [ErrHsuAccess] is returned if the current user is not in hsurc.
func (h *Hsu) ID() (int, error) { func (h *Hsu) ID() (int, error) {
h.ensureDispatcher() h.ensureDispatcher()

View File

@@ -1,4 +1,5 @@
// Package outcome implements the outcome of the privileged and container sides of a hakurei container. // Package outcome implements the outcome of the privileged and container sides
// of a hakurei container.
package outcome package outcome
import ( import (
@@ -27,8 +28,9 @@ func Info() *hst.Info {
return &hi return &hi
} }
// envAllocSize is the initial size of the env map pre-allocated when the configured env map is nil. // envAllocSize is the initial size of the env map pre-allocated when the
// It should be large enough to fit all insertions by outcomeOp.toContainer. // configured env map is nil. It should be large enough to fit all insertions by
// outcomeOp.toContainer.
const envAllocSize = 1 << 6 const envAllocSize = 1 << 6
func newInt(v int) *stringPair[int] { return &stringPair[int]{v, strconv.Itoa(v)} } func newInt(v int) *stringPair[int] { return &stringPair[int]{v, strconv.Itoa(v)} }
@@ -43,7 +45,8 @@ func (s *stringPair[T]) unwrap() T { return s.v }
func (s *stringPair[T]) String() string { return s.s } func (s *stringPair[T]) String() string { return s.s }
// outcomeState is copied to the shim process and available while applying outcomeOp. // outcomeState is copied to the shim process and available while applying outcomeOp.
// This is transmitted from the priv side to the shim, so exported fields should be kept to a minimum. // This is transmitted from the priv side to the shim, so exported fields should
// be kept to a minimum.
type outcomeState struct { type outcomeState struct {
// Params only used by the shim process. Populated by populateEarly. // Params only used by the shim process. Populated by populateEarly.
Shim *shimParams Shim *shimParams
@@ -89,14 +92,25 @@ func (s *outcomeState) valid() bool {
s.Paths != nil s.Paths != nil
} }
// newOutcomeState returns the address of a new outcomeState with its exported fields populated via syscallDispatcher. // newOutcomeState returns the address of a new outcomeState with its exported
// fields populated via syscallDispatcher.
func newOutcomeState(k syscallDispatcher, msg message.Msg, id *hst.ID, config *hst.Config, hsu *Hsu) *outcomeState { func newOutcomeState(k syscallDispatcher, msg message.Msg, id *hst.ID, config *hst.Config, hsu *Hsu) *outcomeState {
s := outcomeState{ s := outcomeState{
Shim: &shimParams{PrivPID: k.getpid(), Verbose: msg.IsVerbose()}, Shim: &shimParams{
PrivPID: k.getpid(),
Verbose: msg.IsVerbose(),
SchedPolicy: config.SchedPolicy,
SchedPriority: config.SchedPriority,
},
ID: id, ID: id,
Identity: config.Identity, Identity: config.Identity,
UserID: hsu.MustID(msg), UserID: hsu.MustID(msg),
Paths: env.CopyPathsFunc(k.fatalf, k.tempdir, func(key string) string { v, _ := k.lookupEnv(key); return v }), Paths: env.CopyPathsFunc(k.fatalf, k.tempdir, func(key string) string {
v, _ := k.lookupEnv(key)
return v
}),
Container: config.Container, Container: config.Container,
} }
@@ -121,6 +135,7 @@ func newOutcomeState(k syscallDispatcher, msg message.Msg, id *hst.ID, config *h
} }
// populateLocal populates unexported fields from transmitted exported fields. // populateLocal populates unexported fields from transmitted exported fields.
//
// These fields are cheaper to recompute per-process. // These fields are cheaper to recompute per-process.
func (s *outcomeState) populateLocal(k syscallDispatcher, msg message.Msg) error { func (s *outcomeState) populateLocal(k syscallDispatcher, msg message.Msg) error {
if !s.valid() || k == nil || msg == nil { if !s.valid() || k == nil || msg == nil {
@@ -136,7 +151,10 @@ func (s *outcomeState) populateLocal(k syscallDispatcher, msg message.Msg) error
s.id = &stringPair[hst.ID]{*s.ID, s.ID.String()} s.id = &stringPair[hst.ID]{*s.ID, s.ID.String()}
s.Copy(&s.sc, s.UserID) s.Copy(&s.sc, s.UserID)
msg.Verbosef("process share directory at %q, runtime directory at %q", s.sc.SharePath, s.sc.RunDirPath) msg.Verbosef(
"process share directory at %q, runtime directory at %q",
s.sc.SharePath, s.sc.RunDirPath,
)
s.identity = newInt(s.Identity) s.identity = newInt(s.Identity)
s.mapuid, s.mapgid = newInt(s.Mapuid), newInt(s.Mapgid) s.mapuid, s.mapgid = newInt(s.Mapuid), newInt(s.Mapgid)
@@ -146,17 +164,25 @@ func (s *outcomeState) populateLocal(k syscallDispatcher, msg message.Msg) error
} }
// instancePath returns a path formatted for outcomeStateSys.instance. // instancePath returns a path formatted for outcomeStateSys.instance.
//
// This method must only be called from outcomeOp.toContainer if // This method must only be called from outcomeOp.toContainer if
// outcomeOp.toSystem has already called outcomeStateSys.instance. // outcomeOp.toSystem has already called outcomeStateSys.instance.
func (s *outcomeState) instancePath() *check.Absolute { return s.sc.SharePath.Append(s.id.String()) } func (s *outcomeState) instancePath() *check.Absolute {
return s.sc.SharePath.Append(s.id.String())
}
// runtimePath returns a path formatted for outcomeStateSys.runtime. // runtimePath returns a path formatted for outcomeStateSys.runtime.
//
// This method must only be called from outcomeOp.toContainer if // This method must only be called from outcomeOp.toContainer if
// outcomeOp.toSystem has already called outcomeStateSys.runtime. // outcomeOp.toSystem has already called outcomeStateSys.runtime.
func (s *outcomeState) runtimePath() *check.Absolute { return s.sc.RunDirPath.Append(s.id.String()) } func (s *outcomeState) runtimePath() *check.Absolute {
return s.sc.RunDirPath.Append(s.id.String())
}
// outcomeStateSys wraps outcomeState and [system.I]. Used on the priv side only. // outcomeStateSys wraps outcomeState and [system.I]. Used on the priv side only.
// Implementations of outcomeOp must not access fields other than sys unless explicitly stated. //
// Implementations of outcomeOp must not access fields other than sys unless
// explicitly stated.
type outcomeStateSys struct { type outcomeStateSys struct {
// Whether XDG_RUNTIME_DIR is used post hsu. // Whether XDG_RUNTIME_DIR is used post hsu.
useRuntimeDir bool useRuntimeDir bool
@@ -219,6 +245,7 @@ func (state *outcomeStateSys) ensureRuntimeDir() {
} }
// instance returns the pathname to a process-specific directory within TMPDIR. // instance returns the pathname to a process-specific directory within TMPDIR.
//
// This directory must only hold entries bound to [system.Process]. // This directory must only hold entries bound to [system.Process].
func (state *outcomeStateSys) instance() *check.Absolute { func (state *outcomeStateSys) instance() *check.Absolute {
if state.sharePath != nil { if state.sharePath != nil {
@@ -230,6 +257,7 @@ func (state *outcomeStateSys) instance() *check.Absolute {
} }
// runtime returns the pathname to a process-specific directory within XDG_RUNTIME_DIR. // runtime returns the pathname to a process-specific directory within XDG_RUNTIME_DIR.
//
// This directory must only hold entries bound to [system.Process]. // This directory must only hold entries bound to [system.Process].
func (state *outcomeStateSys) runtime() *check.Absolute { func (state *outcomeStateSys) runtime() *check.Absolute {
if state.runtimeSharePath != nil { if state.runtimeSharePath != nil {
@@ -242,22 +270,29 @@ func (state *outcomeStateSys) runtime() *check.Absolute {
return state.runtimeSharePath return state.runtimeSharePath
} }
// outcomeStateParams wraps outcomeState and [container.Params]. Used on the shim side only. // outcomeStateParams wraps outcomeState and [container.Params].
//
// Used on the shim side only.
type outcomeStateParams struct { type outcomeStateParams struct {
// Overrides the embedded [container.Params] in [container.Container]. The Env field must not be used. // Overrides the embedded [container.Params] in [container.Container].
//
// The Env field must not be used.
params *container.Params params *container.Params
// Collapsed into the Env slice in [container.Params] by the final outcomeOp. // Collapsed into the Env slice in [container.Params] by the final outcomeOp.
env map[string]string env map[string]string
// Filesystems with the optional root sliced off if present. Populated by spParamsOp. // Filesystems with the optional root sliced off if present.
// Safe for use by spFilesystemOp. //
// Populated by spParamsOp. Safe for use by spFilesystemOp.
filesystem []hst.FilesystemConfigJSON filesystem []hst.FilesystemConfigJSON
// Inner XDG_RUNTIME_DIR default formatting of `/run/user/%d` via mapped uid. // Inner XDG_RUNTIME_DIR default formatting of `/run/user/%d` via mapped uid.
//
// Populated by spRuntimeOp. // Populated by spRuntimeOp.
runtimeDir *check.Absolute runtimeDir *check.Absolute
// Path to pipewire-pulse server. // Path to pipewire-pulse server.
//
// Populated by spPipeWireOp if DirectPipeWire is false. // Populated by spPipeWireOp if DirectPipeWire is false.
pipewirePulsePath *check.Absolute pipewirePulsePath *check.Absolute
@@ -265,25 +300,32 @@ type outcomeStateParams struct {
*outcomeState *outcomeState
} }
// errNotEnabled is returned by outcomeOp.toSystem and used internally to exclude an outcomeOp from transmission. // errNotEnabled is returned by outcomeOp.toSystem and used internally to
// exclude an outcomeOp from transmission.
var errNotEnabled = errors.New("op not enabled in the configuration") var errNotEnabled = errors.New("op not enabled in the configuration")
// An outcomeOp inflicts an outcome on [system.I] and contains enough information to // An outcomeOp inflicts an outcome on [system.I] and contains enough
// inflict it on [container.Params] in a separate process. // information to inflict it on [container.Params] in a separate process.
// An implementation of outcomeOp must store cross-process states in exported fields only. //
// An implementation of outcomeOp must store cross-process states in exported
// fields only.
type outcomeOp interface { type outcomeOp interface {
// toSystem inflicts the current outcome on [system.I] in the priv side process. // toSystem inflicts the current outcome on [system.I] in the priv side process.
toSystem(state *outcomeStateSys) error toSystem(state *outcomeStateSys) error
// toContainer inflicts the current outcome on [container.Params] in the shim process. // toContainer inflicts the current outcome on [container.Params] in the
// The implementation must not write to the Env field of [container.Params] as it will be overwritten // shim process.
// by flattened env map. //
// Implementations must not write to the Env field of [container.Params]
// as it will be overwritten by flattened env map.
toContainer(state *outcomeStateParams) error toContainer(state *outcomeStateParams) error
} }
// toSystem calls the outcomeOp.toSystem method on all outcomeOp implementations and populates shimParams.Ops. // toSystem calls the outcomeOp.toSystem method on all outcomeOp implementations
// This function assumes the caller has already called the Validate method on [hst.Config] // and populates shimParams.Ops.
// and checked that it returns nil. //
// This function assumes the caller has already called the Validate method on
// [hst.Config] and checked that it returns nil.
func (state *outcomeStateSys) toSystem() error { func (state *outcomeStateSys) toSystem() error {
if state.Shim == nil || state.Shim.Ops != nil { if state.Shim == nil || state.Shim.Ops != nil {
return newWithMessage("invalid ops state reached") return newWithMessage("invalid ops state reached")

View File

@@ -30,7 +30,9 @@ const (
) )
// NewStore returns the address of a new instance of [store.Store]. // NewStore returns the address of a new instance of [store.Store].
func NewStore(sc *hst.Paths) *store.Store { return store.New(sc.SharePath.Append("state")) } func NewStore(sc *hst.Paths) *store.Store {
return store.New(sc.SharePath.Append("state"))
}
// main carries out outcome and terminates. main does not return. // main carries out outcome and terminates. main does not return.
func (k *outcome) main(msg message.Msg, identifierFd int) { func (k *outcome) main(msg message.Msg, identifierFd int) {
@@ -116,7 +118,11 @@ func (k *outcome) main(msg message.Msg, identifierFd int) {
processStatePrev, processStateCur = processStateCur, processState processStatePrev, processStateCur = processStateCur, processState
if !processTime.IsZero() && processStatePrev != processLifecycle { if !processTime.IsZero() && processStatePrev != processLifecycle {
msg.Verbosef("state %d took %.2f ms", processStatePrev, float64(time.Since(processTime).Nanoseconds())/1e6) msg.Verbosef(
"state %d took %.2f ms",
processStatePrev,
float64(time.Since(processTime).Nanoseconds())/1e6,
)
} }
processTime = time.Now() processTime = time.Now()
@@ -141,7 +147,10 @@ func (k *outcome) main(msg message.Msg, identifierFd int) {
case processCommit: case processCommit:
if isBeforeRevert { if isBeforeRevert {
perrorFatal(newWithMessage("invalid transition to commit state"), "commit", processLifecycle) perrorFatal(
newWithMessage("invalid transition to commit state"),
"commit", processLifecycle,
)
continue continue
} }
@@ -238,15 +247,26 @@ func (k *outcome) main(msg message.Msg, identifierFd int) {
case <-func() chan struct{} { case <-func() chan struct{} {
w := make(chan struct{}) w := make(chan struct{})
// this ties processLifecycle to ctx with the additional compensated timeout duration // This ties processLifecycle to ctx with the additional
// to allow transition to the next state on a locked up shim // compensated timeout duration to allow transition to the next
go func() { <-ctx.Done(); time.Sleep(k.state.Shim.WaitDelay + shimWaitTimeout); close(w) }() // state on a locked up shim.
go func() {
<-ctx.Done()
time.Sleep(k.state.Shim.WaitDelay + shimWaitTimeout)
close(w)
}()
return w return w
}(): }():
// this is only reachable when wait did not return within shimWaitTimeout, after its WaitDelay has elapsed. // This is only reachable when wait did not return within
// This is different from the container failing to terminate within its timeout period, as that is enforced // shimWaitTimeout, after its WaitDelay has elapsed. This is
// by the shim. This path is instead reached when there is a lockup in shim preventing it from completing. // different from the container failing to terminate within its
msg.GetLogger().Printf("process %d did not terminate", shimCmd.Process.Pid) // timeout period, as that is enforced by the shim. This path is
// instead reached when there is a lockup in shim preventing it
// from completing.
msg.GetLogger().Printf(
"process %d did not terminate",
shimCmd.Process.Pid,
)
} }
msg.Resume() msg.Resume()
@@ -271,8 +291,8 @@ func (k *outcome) main(msg message.Msg, identifierFd int) {
ec := system.Process ec := system.Process
if entries, _, err := handle.Entries(); err != nil { if entries, _, err := handle.Entries(); err != nil {
// it is impossible to continue from this point, // it is impossible to continue from this point, per-process
// per-process state will be reverted to limit damage // state will be reverted to limit damage
perror(err, "read store segment entries") perror(err, "read store segment entries")
} else { } else {
// accumulate enablements of remaining instances // accumulate enablements of remaining instances
@@ -295,7 +315,10 @@ func (k *outcome) main(msg message.Msg, identifierFd int) {
if n == 0 { if n == 0 {
ec |= system.User ec |= system.User
} else { } else {
msg.Verbosef("found %d instances, cleaning up without user-scoped operations", n) msg.Verbosef(
"found %d instances, cleaning up without user-scoped operations",
n,
)
} }
ec |= rt ^ (hst.EWayland | hst.EX11 | hst.EDBus | hst.EPulse) ec |= rt ^ (hst.EWayland | hst.EX11 | hst.EDBus | hst.EPulse)
if msg.IsVerbose() { if msg.IsVerbose() {
@@ -335,7 +358,9 @@ func (k *outcome) main(msg message.Msg, identifierFd int) {
// start starts the shim via cmd/hsu. // start starts the shim via cmd/hsu.
// //
// If successful, a [time.Time] value for [hst.State] is stored in the value pointed to by startTime. // If successful, a [time.Time] value for [hst.State] is stored in the value
// pointed to by startTime.
//
// The resulting [exec.Cmd] and write end of the shim setup pipe is returned. // The resulting [exec.Cmd] and write end of the shim setup pipe is returned.
func (k *outcome) start(ctx context.Context, msg message.Msg, func (k *outcome) start(ctx context.Context, msg message.Msg,
hsuPath *check.Absolute, hsuPath *check.Absolute,

View File

@@ -37,9 +37,12 @@ const (
shimMsgBadPID = C.HAKUREI_SHIM_BAD_PID shimMsgBadPID = C.HAKUREI_SHIM_BAD_PID
) )
// setupContSignal sets up the SIGCONT signal handler for the cross-uid shim exit hack. // setupContSignal sets up the SIGCONT signal handler for the cross-uid shim
// The signal handler is implemented in C, signals can be processed by reading from the returned reader. // exit hack.
// The returned function must be called after all signal processing concludes. //
// The signal handler is implemented in C, signals can be processed by reading
// from the returned reader. The returned function must be called after all
// signal processing concludes.
func setupContSignal(pid int) (io.ReadCloser, func(), error) { func setupContSignal(pid int) (io.ReadCloser, func(), error) {
if r, w, err := os.Pipe(); err != nil { if r, w, err := os.Pipe(); err != nil {
return nil, nil, err return nil, nil, err
@@ -51,22 +54,30 @@ func setupContSignal(pid int) (io.ReadCloser, func(), error) {
} }
} }
// shimEnv is the name of the environment variable storing decimal representation of // shimEnv is the name of the environment variable storing decimal representation
// setup pipe fd for [container.Receive]. // of setup pipe fd for [container.Receive].
const shimEnv = "HAKUREI_SHIM" const shimEnv = "HAKUREI_SHIM"
// shimParams is embedded in outcomeState and transmitted from priv side to shim. // shimParams is embedded in outcomeState and transmitted from priv side to shim.
type shimParams struct { type shimParams struct {
// Priv side pid, checked against ppid in signal handler for the syscall.SIGCONT hack. // Priv side pid, checked against ppid in signal handler for the
// syscall.SIGCONT hack.
PrivPID int PrivPID int
// Duration to wait for after the initial process receives os.Interrupt before the container is killed. // Duration to wait for after the initial process receives os.Interrupt
// before the container is killed.
//
// Limits are enforced on the priv side. // Limits are enforced on the priv side.
WaitDelay time.Duration WaitDelay time.Duration
// Verbosity pass through from [message.Msg]. // Verbosity pass through from [message.Msg].
Verbose bool Verbose bool
// Copied from [hst.Config].
SchedPolicy std.SchedPolicy
// Copied from [hst.Config].
SchedPriority std.Int
// Outcome setup ops, contains setup state. Populated by outcome.finalise. // Outcome setup ops, contains setup state. Populated by outcome.finalise.
Ops []outcomeOp Ops []outcomeOp
} }
@@ -77,7 +88,9 @@ func (p *shimParams) valid() bool { return p != nil && p.PrivPID > 0 }
// shimName is the prefix used by log.std in the shim process. // shimName is the prefix used by log.std in the shim process.
const shimName = "shim" const shimName = "shim"
// Shim is called by the main function of the shim process and runs as the unconstrained target user. // Shim is called by the main function of the shim process and runs as the
// unconstrained target user.
//
// Shim does not return. // Shim does not return.
func Shim(msg message.Msg) { func Shim(msg message.Msg) {
if msg == nil { if msg == nil {
@@ -131,7 +144,8 @@ func (sp *shimPrivate) destroy() {
} }
const ( const (
// shimPipeWireTimeout is the duration pipewire-pulse is allowed to run before its socket becomes available. // shimPipeWireTimeout is the duration pipewire-pulse is allowed to run
// before its socket becomes available.
shimPipeWireTimeout = 5 * time.Second shimPipeWireTimeout = 5 * time.Second
) )
@@ -262,6 +276,9 @@ func shimEntrypoint(k syscallDispatcher) {
cancelContainer.Store(&stop) cancelContainer.Store(&stop)
sp := shimPrivate{k: k, id: state.id} sp := shimPrivate{k: k, id: state.id}
z := container.New(ctx, msg) z := container.New(ctx, msg)
z.SetScheduler = state.Shim.SchedPolicy > 0
z.SchedPolicy = state.Shim.SchedPolicy
z.SchedPriority = state.Shim.SchedPriority
z.Params = *stateParams.params z.Params = *stateParams.params
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr

View File

@@ -27,7 +27,9 @@ const varRunNscd = fhs.Var + "run/nscd"
func init() { gob.Register(new(spParamsOp)) } func init() { gob.Register(new(spParamsOp)) }
// spParamsOp initialises unordered fields of [container.Params] and the optional root filesystem. // spParamsOp initialises unordered fields of [container.Params] and the
// optional root filesystem.
//
// This outcomeOp is hardcoded to always run first. // This outcomeOp is hardcoded to always run first.
type spParamsOp struct { type spParamsOp struct {
// Value of $TERM, stored during toSystem. // Value of $TERM, stored during toSystem.
@@ -67,8 +69,8 @@ func (s *spParamsOp) toContainer(state *outcomeStateParams) error {
state.params.Args = state.Container.Args state.params.Args = state.Container.Args
} }
// the container is canceled when shim is requested to exit or receives an interrupt or termination signal; // The container is cancelled when shim is requested to exit or receives an
// this behaviour is implemented in the shim // interrupt or termination signal. This behaviour is implemented in the shim.
state.params.ForwardCancel = state.Shim.WaitDelay > 0 state.params.ForwardCancel = state.Shim.WaitDelay > 0
if state.Container.Flags&hst.FMultiarch != 0 { if state.Container.Flags&hst.FMultiarch != 0 {
@@ -115,7 +117,8 @@ func (s *spParamsOp) toContainer(state *outcomeStateParams) error {
} else { } else {
state.params.Bind(fhs.AbsDev, fhs.AbsDev, std.BindWritable|std.BindDevice) state.params.Bind(fhs.AbsDev, fhs.AbsDev, std.BindWritable|std.BindDevice)
} }
// /dev is mounted readonly later on, this prevents /dev/shm from going readonly with it // /dev is mounted readonly later on, this prevents /dev/shm from going
// readonly with it
state.params.Tmpfs(fhs.AbsDevShm, 0, 01777) state.params.Tmpfs(fhs.AbsDevShm, 0, 01777)
return nil return nil
@@ -123,7 +126,9 @@ func (s *spParamsOp) toContainer(state *outcomeStateParams) error {
func init() { gob.Register(new(spFilesystemOp)) } func init() { gob.Register(new(spFilesystemOp)) }
// spFilesystemOp applies configured filesystems to [container.Params], excluding the optional root filesystem. // spFilesystemOp applies configured filesystems to [container.Params],
// excluding the optional root filesystem.
//
// This outcomeOp is hardcoded to always run last. // This outcomeOp is hardcoded to always run last.
type spFilesystemOp struct { type spFilesystemOp struct {
// Matched paths to cover. Stored during toSystem. // Matched paths to cover. Stored during toSystem.
@@ -297,8 +302,8 @@ func (s *spFilesystemOp) toContainer(state *outcomeStateParams) error {
return nil return nil
} }
// resolveRoot handles the root filesystem special case for [hst.FilesystemConfig] and additionally resolves autoroot // resolveRoot handles the root filesystem special case for [hst.FilesystemConfig]
// as it requires special handling during path hiding. // and additionally resolves autoroot as it requires special handling during path hiding.
func resolveRoot(c *hst.ContainerConfig) (rootfs hst.FilesystemConfig, filesystem []hst.FilesystemConfigJSON, autoroot *hst.FSBind) { func resolveRoot(c *hst.ContainerConfig) (rootfs hst.FilesystemConfig, filesystem []hst.FilesystemConfigJSON, autoroot *hst.FSBind) {
// root filesystem special case // root filesystem special case
filesystem = c.Filesystem filesystem = c.Filesystem
@@ -316,7 +321,8 @@ func resolveRoot(c *hst.ContainerConfig) (rootfs hst.FilesystemConfig, filesyste
return return
} }
// evalSymlinks calls syscallDispatcher.evalSymlinks but discards errors unwrapping to [fs.ErrNotExist]. // evalSymlinks calls syscallDispatcher.evalSymlinks but discards errors
// unwrapping to [fs.ErrNotExist].
func evalSymlinks(msg message.Msg, k syscallDispatcher, v *string) error { func evalSymlinks(msg message.Msg, k syscallDispatcher, v *string) error {
if p, err := k.evalSymlinks(*v); err != nil { if p, err := k.evalSymlinks(*v); err != nil {
if !errors.Is(err, fs.ErrNotExist) { if !errors.Is(err, fs.ErrNotExist) {

View File

@@ -12,6 +12,7 @@ import (
func init() { gob.Register(new(spDBusOp)) } func init() { gob.Register(new(spDBusOp)) }
// spDBusOp maintains an xdg-dbus-proxy instance for the container. // spDBusOp maintains an xdg-dbus-proxy instance for the container.
//
// Runs after spRuntimeOp. // Runs after spRuntimeOp.
type spDBusOp struct { type spDBusOp struct {
// Whether to bind the system bus socket. Populated during toSystem. // Whether to bind the system bus socket. Populated during toSystem.

View File

@@ -13,9 +13,12 @@ const pipewirePulseName = "pipewire-pulse"
func init() { gob.Register(new(spPipeWireOp)) } func init() { gob.Register(new(spPipeWireOp)) }
// spPipeWireOp exports the PipeWire server to the container via SecurityContext. // spPipeWireOp exports the PipeWire server to the container via SecurityContext.
//
// Runs after spRuntimeOp. // Runs after spRuntimeOp.
type spPipeWireOp struct { type spPipeWireOp struct {
// Path to pipewire-pulse server. Populated during toSystem if DirectPipeWire is false. // Path to pipewire-pulse server.
//
// Populated during toSystem if DirectPipeWire is false.
CompatServerPath *check.Absolute CompatServerPath *check.Absolute
} }

View File

@@ -20,6 +20,7 @@ const pulseCookieSizeMax = 1 << 8
func init() { gob.Register(new(spPulseOp)) } func init() { gob.Register(new(spPulseOp)) }
// spPulseOp exports the PulseAudio server to the container. // spPulseOp exports the PulseAudio server to the container.
//
// Runs after spRuntimeOp. // Runs after spRuntimeOp.
type spPulseOp struct { type spPulseOp struct {
// PulseAudio cookie data, populated during toSystem if a cookie is present. // PulseAudio cookie data, populated during toSystem if a cookie is present.
@@ -37,24 +38,40 @@ func (s *spPulseOp) toSystem(state *outcomeStateSys) error {
if _, err := state.k.stat(pulseRuntimeDir.String()); err != nil { if _, err := state.k.stat(pulseRuntimeDir.String()); err != nil {
if !errors.Is(err, fs.ErrNotExist) { if !errors.Is(err, fs.ErrNotExist) {
return &hst.AppError{Step: fmt.Sprintf("access PulseAudio directory %q", pulseRuntimeDir), Err: err} return &hst.AppError{Step: fmt.Sprintf(
"access PulseAudio directory %q",
pulseRuntimeDir,
), Err: err}
} }
return newWithMessageError(fmt.Sprintf("PulseAudio directory %q not found", pulseRuntimeDir), err) return newWithMessageError(fmt.Sprintf(
"PulseAudio directory %q not found",
pulseRuntimeDir,
), err)
} }
if fi, err := state.k.stat(pulseSocket.String()); err != nil { if fi, err := state.k.stat(pulseSocket.String()); err != nil {
if !errors.Is(err, fs.ErrNotExist) { if !errors.Is(err, fs.ErrNotExist) {
return &hst.AppError{Step: fmt.Sprintf("access PulseAudio socket %q", pulseSocket), Err: err} return &hst.AppError{Step: fmt.Sprintf(
"access PulseAudio socket %q",
pulseSocket,
), Err: err}
} }
return newWithMessageError(fmt.Sprintf("PulseAudio directory %q found but socket does not exist", pulseRuntimeDir), err) return newWithMessageError(fmt.Sprintf(
"PulseAudio directory %q found but socket does not exist",
pulseRuntimeDir,
), err)
} else { } else {
if m := fi.Mode(); m&0o006 != 0o006 { if m := fi.Mode(); m&0o006 != 0o006 {
return newWithMessage(fmt.Sprintf("unexpected permissions on %q: %s", pulseSocket, m)) return newWithMessage(fmt.Sprintf(
"unexpected permissions on %q: %s",
pulseSocket, m,
))
} }
} }
// pulse socket is world writable and its parent directory DAC permissions prevents access; // PulseAudio socket is world writable and its parent directory DAC
// hard link to target-executable share directory to grant access // permissions prevents access. Hard link to target-executable share
// directory to grant access
state.sys.Link(pulseSocket, state.runtime().Append("pulse")) state.sys.Link(pulseSocket, state.runtime().Append("pulse"))
// load up to pulseCookieSizeMax bytes of pulse cookie for transmission to shim // load up to pulseCookieSizeMax bytes of pulse cookie for transmission to shim
@@ -62,7 +79,13 @@ func (s *spPulseOp) toSystem(state *outcomeStateSys) error {
return err return err
} else if a != nil { } else if a != nil {
s.Cookie = new([pulseCookieSizeMax]byte) s.Cookie = new([pulseCookieSizeMax]byte)
if s.CookieSize, err = loadFile(state.msg, state.k, "PulseAudio cookie", a.String(), s.Cookie[:]); err != nil { if s.CookieSize, err = loadFile(
state.msg,
state.k,
"PulseAudio cookie",
a.String(),
s.Cookie[:],
); err != nil {
return err return err
} }
} else { } else {
@@ -101,8 +124,9 @@ func (s *spPulseOp) commonPaths(state *outcomeState) (pulseRuntimeDir, pulseSock
return return
} }
// discoverPulseCookie attempts to discover the pathname of the PulseAudio cookie of the current user. // discoverPulseCookie attempts to discover the pathname of the PulseAudio
// If both returned pathname and error are nil, the cookie is likely unavailable and can be silently skipped. // cookie of the current user. If both returned pathname and error are nil, the
// cookie is likely unavailable and can be silently skipped.
func discoverPulseCookie(k syscallDispatcher) (*check.Absolute, error) { func discoverPulseCookie(k syscallDispatcher) (*check.Absolute, error) {
const paLocateStep = "locate PulseAudio cookie" const paLocateStep = "locate PulseAudio cookie"
@@ -186,7 +210,10 @@ func loadFile(
&os.PathError{Op: "stat", Path: pathname, Err: syscall.ENOMEM}, &os.PathError{Op: "stat", Path: pathname, Err: syscall.ENOMEM},
) )
} else if s < int64(n) { } else if s < int64(n) {
msg.Verbosef("%s at %q is %d bytes shorter than expected", description, pathname, int64(n)-s) msg.Verbosef(
"%s at %q is %d bytes shorter than expected",
description, pathname, int64(n)-s,
)
} else { } else {
msg.Verbosef("loading %d bytes from %q", n, pathname) msg.Verbosef("loading %d bytes from %q", n, pathname)
} }

View File

@@ -67,7 +67,9 @@ const (
// spRuntimeOp sets up XDG_RUNTIME_DIR inside the container. // spRuntimeOp sets up XDG_RUNTIME_DIR inside the container.
type spRuntimeOp struct { type spRuntimeOp struct {
// SessionType determines the value of envXDGSessionType. Populated during toSystem. // SessionType determines the value of envXDGSessionType.
//
// Populated during toSystem.
SessionType uintptr SessionType uintptr
} }

View File

@@ -12,9 +12,12 @@ import (
func init() { gob.Register(new(spWaylandOp)) } func init() { gob.Register(new(spWaylandOp)) }
// spWaylandOp exports the Wayland display server to the container. // spWaylandOp exports the Wayland display server to the container.
//
// Runs after spRuntimeOp. // Runs after spRuntimeOp.
type spWaylandOp struct { type spWaylandOp struct {
// Path to host wayland socket. Populated during toSystem if DirectWayland is true. // Path to host wayland socket.
//
// Populated during toSystem if DirectWayland is true.
SocketPath *check.Absolute SocketPath *check.Absolute
} }

View File

@@ -50,7 +50,10 @@ func (s *spX11Op) toSystem(state *outcomeStateSys) error {
if socketPath != nil { if socketPath != nil {
if _, err := state.k.stat(socketPath.String()); err != nil { if _, err := state.k.stat(socketPath.String()); err != nil {
if !errors.Is(err, fs.ErrNotExist) { if !errors.Is(err, fs.ErrNotExist) {
return &hst.AppError{Step: fmt.Sprintf("access X11 socket %q", socketPath), Err: err} return &hst.AppError{Step: fmt.Sprintf(
"access X11 socket %q",
socketPath,
), Err: err}
} }
} else { } else {
state.sys.UpdatePermType(hst.EX11, socketPath, acl.Read, acl.Write, acl.Execute) state.sys.UpdatePermType(hst.EX11, socketPath, acl.Read, acl.Write, acl.Execute)

View File

@@ -39,8 +39,8 @@ type ExecPath struct {
W bool W bool
} }
// SchedPolicy is the [container] scheduling policy. // SetSchedIdle is whether to set [std.SCHED_IDLE] scheduling priority.
var SchedPolicy int var SetSchedIdle bool
// PromoteLayers returns artifacts with identical-by-content layers promoted to // PromoteLayers returns artifacts with identical-by-content layers promoted to
// the highest priority instance, as if mounted via [ExecPath]. // the highest priority instance, as if mounted via [ExecPath].
@@ -413,7 +413,8 @@ func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
z.ParentPerm = 0700 z.ParentPerm = 0700
z.HostNet = hostNet z.HostNet = hostNet
z.Hostname = "cure" z.Hostname = "cure"
z.SchedPolicy = SchedPolicy z.SetScheduler = SetSchedIdle
z.SchedPolicy = std.SCHED_IDLE
if z.HostNet { if z.HostNet {
z.Hostname = "cure-net" z.Hostname = "cure-net"
} }

View File

@@ -101,6 +101,10 @@ func init() {
Description: "Commands for Manipulating POSIX Access Control Lists", Description: "Commands for Manipulating POSIX Access Control Lists",
Website: "https://savannah.nongnu.org/projects/acl/", Website: "https://savannah.nongnu.org/projects/acl/",
Dependencies: P{
Attr,
},
ID: 16, ID: 16,
} }
} }

View File

@@ -4,6 +4,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"net/http" "net/http"
"strconv" "strconv"
"sync" "sync"
@@ -19,6 +20,8 @@ const (
LLVMRuntimes LLVMRuntimes
LLVMClang LLVMClang
// EarlyInit is the Rosa OS initramfs init program.
EarlyInit
// ImageInitramfs is the Rosa OS initramfs archive. // ImageInitramfs is the Rosa OS initramfs archive.
ImageInitramfs ImageInitramfs
@@ -28,6 +31,8 @@ const (
KernelHeaders KernelHeaders
// KernelSource is a writable kernel source tree installed to [AbsUsrSrc]. // KernelSource is a writable kernel source tree installed to [AbsUsrSrc].
KernelSource KernelSource
// Firmware is firmware blobs for use with the Linux kernel.
Firmware
ACL ACL
ArgpStandalone ArgpStandalone
@@ -85,9 +90,11 @@ const (
NSS NSS
NSSCACert NSSCACert
Ncurses Ncurses
Nettle
Ninja Ninja
OpenSSL OpenSSL
PCRE2 PCRE2
Parallel
Patch Patch
Perl Perl
PerlLocaleGettext PerlLocaleGettext
@@ -103,12 +110,23 @@ const (
PkgConfig PkgConfig
Procps Procps
Python Python
PythonCfgv
PythonDiscovery
PythonDistlib
PythonFilelock
PythonIdentify
PythonIniConfig PythonIniConfig
PythonNodeenv
PythonPackaging PythonPackaging
PythonPlatformdirs
PythonPluggy PythonPluggy
PythonPreCommit
PythonPyTest PythonPyTest
PythonPyYAML
PythonPygments PythonPygments
PythonVirtualenv
QEMU QEMU
Rdfind
Rsync Rsync
Sed Sed
Setuptools Setuptools
@@ -150,6 +168,36 @@ const (
PresetEnd PresetEnd
) )
// P represents multiple [PArtifact] and is stable through JSON.
type P []PArtifact
// MarshalJSON represents [PArtifact] by their [Metadata.Name].
func (s P) MarshalJSON() ([]byte, error) {
names := make([]string, len(s))
for i, p := range s {
names[i] = GetMetadata(p).Name
}
return json.Marshal(names)
}
// UnmarshalJSON resolves the value created by MarshalJSON back to [P].
func (s *P) UnmarshalJSON(data []byte) error {
var names []string
if err := json.Unmarshal(data, &names); err != nil {
return err
}
*s = make(P, len(names))
for i, name := range names {
if p, ok := ResolveName(name); !ok {
return fmt.Errorf("unknown artifact %q", name)
} else {
(*s)[i] = p
}
}
return nil
}
// Metadata is stage-agnostic information of a [PArtifact] not directly // Metadata is stage-agnostic information of a [PArtifact] not directly
// representable in the resulting [pkg.Artifact]. // representable in the resulting [pkg.Artifact].
type Metadata struct { type Metadata struct {
@@ -162,6 +210,9 @@ type Metadata struct {
// Project home page. // Project home page.
Website string `json:"website,omitempty"` Website string `json:"website,omitempty"`
// Runtime dependencies.
Dependencies P `json:"dependencies"`
// Project identifier on [Anitya]. // Project identifier on [Anitya].
// //
// [Anitya]: https://release-monitoring.org/ // [Anitya]: https://release-monitoring.org/
@@ -239,9 +290,10 @@ var (
artifactsM [PresetEnd]Metadata artifactsM [PresetEnd]Metadata
// artifacts stores the result of Metadata.f. // artifacts stores the result of Metadata.f.
artifacts [_toolchainEnd][len(artifactsM)]pkg.Artifact artifacts [_toolchainEnd][len(artifactsM)]struct {
// versions stores the version of [PArtifact]. a pkg.Artifact
versions [_toolchainEnd][len(artifactsM)]string v string
}
// artifactsOnce is for lazy initialisation of artifacts. // artifactsOnce is for lazy initialisation of artifacts.
artifactsOnce [_toolchainEnd][len(artifactsM)]sync.Once artifactsOnce [_toolchainEnd][len(artifactsM)]sync.Once
) )
@@ -249,20 +301,23 @@ var (
// GetMetadata returns [Metadata] of a [PArtifact]. // GetMetadata returns [Metadata] of a [PArtifact].
func GetMetadata(p PArtifact) *Metadata { return &artifactsM[p] } func GetMetadata(p PArtifact) *Metadata { return &artifactsM[p] }
// construct constructs a [pkg.Artifact] corresponding to a [PArtifact] once.
func (t Toolchain) construct(p PArtifact) {
artifactsOnce[t][p].Do(func() {
artifacts[t][p].a, artifacts[t][p].v = artifactsM[p].f(t)
})
}
// Load returns the resulting [pkg.Artifact] of [PArtifact]. // Load returns the resulting [pkg.Artifact] of [PArtifact].
func (t Toolchain) Load(p PArtifact) pkg.Artifact { func (t Toolchain) Load(p PArtifact) pkg.Artifact {
artifactsOnce[t][p].Do(func() { t.construct(p)
artifacts[t][p], versions[t][p] = artifactsM[p].f(t) return artifacts[t][p].a
})
return artifacts[t][p]
} }
// Version returns the version string of [PArtifact]. // Version returns the version string of [PArtifact].
func (t Toolchain) Version(p PArtifact) string { func (t Toolchain) Version(p PArtifact) string {
artifactsOnce[t][p].Do(func() { t.construct(p)
artifacts[t][p], versions[t][p] = artifactsM[p].f(t) return artifacts[t][p].v
})
return versions[t][p]
} }
// ResolveName returns a [PArtifact] by name. // ResolveName returns a [PArtifact] by name.

View File

@@ -128,6 +128,9 @@ type CMakeHelper struct {
Cache [][2]string Cache [][2]string
// Runs after install. // Runs after install.
Script string Script string
// Whether to generate Makefile instead.
Make bool
} }
var _ Helper = new(CMakeHelper) var _ Helper = new(CMakeHelper)
@@ -141,7 +144,10 @@ func (attr *CMakeHelper) name(name, version string) string {
} }
// extra returns a hardcoded slice of [CMake] and [Ninja]. // extra returns a hardcoded slice of [CMake] and [Ninja].
func (*CMakeHelper) extra(int) []PArtifact { func (attr *CMakeHelper) extra(int) []PArtifact {
if attr != nil && attr.Make {
return []PArtifact{CMake, Make}
}
return []PArtifact{CMake, Ninja} return []PArtifact{CMake, Ninja}
} }
@@ -173,11 +179,19 @@ func (attr *CMakeHelper) script(name string) string {
panic("CACHE must be non-empty") panic("CACHE must be non-empty")
} }
generate := "Ninja"
jobs := ""
if attr.Make {
generate = "'Unix Makefiles'"
jobs += ` "--parallel=$(nproc)"`
}
return ` return `
cmake -G Ninja \ cmake -G ` + generate + ` \
-DCMAKE_C_COMPILER_TARGET="${ROSA_TRIPLE}" \ -DCMAKE_C_COMPILER_TARGET="${ROSA_TRIPLE}" \
-DCMAKE_CXX_COMPILER_TARGET="${ROSA_TRIPLE}" \ -DCMAKE_CXX_COMPILER_TARGET="${ROSA_TRIPLE}" \
-DCMAKE_ASM_COMPILER_TARGET="${ROSA_TRIPLE}" \ -DCMAKE_ASM_COMPILER_TARGET="${ROSA_TRIPLE}" \
-DCMAKE_INSTALL_LIBDIR=lib \
` + strings.Join(slices.Collect(func(yield func(string) bool) { ` + strings.Join(slices.Collect(func(yield func(string) bool) {
for _, v := range attr.Cache { for _, v := range attr.Cache {
if !yield("-D" + v[0] + "=" + v[1]) { if !yield("-D" + v[0] + "=" + v[1]) {
@@ -185,9 +199,9 @@ cmake -G Ninja \
} }
} }
}), " \\\n\t") + ` \ }), " \\\n\t") + ` \
-DCMAKE_INSTALL_PREFIX=/work/system \ -DCMAKE_INSTALL_PREFIX=/system \
'/usr/src/` + name + `/` + path.Join(attr.Append...) + `' '/usr/src/` + name + `/` + path.Join(attr.Append...) + `'
cmake --build . cmake --build .` + jobs + `
cmake --install . cmake --install . --prefix=/work/system
` + attr.Script ` + attr.Script
} }

View File

@@ -4,24 +4,48 @@ import "hakurei.app/internal/pkg"
func (t Toolchain) newCurl() (pkg.Artifact, string) { func (t Toolchain) newCurl() (pkg.Artifact, string) {
const ( const (
version = "8.18.0" version = "8.19.0"
checksum = "YpOolP_sx1DIrCEJ3elgVAu0wTLDS-EZMZFvOP0eha7FaLueZUlEpuMwDzJNyi7i" checksum = "YHuVLVVp8q_Y7-JWpID5ReNjq2Zk6t7ArHB6ngQXilp_R5l3cubdxu3UKo-xDByv"
) )
return t.NewPackage("curl", version, pkg.NewHTTPGetTar( return t.NewPackage("curl", version, pkg.NewHTTPGetTar(
nil, "https://curl.se/download/curl-"+version+".tar.bz2", nil, "https://curl.se/download/curl-"+version+".tar.bz2",
mustDecode(checksum), mustDecode(checksum),
pkg.TarBzip2, pkg.TarBzip2,
), nil, &MakeHelper{ ), &PackageAttr{
Patches: [][2]string{
{"test459-misplaced-line-break", `diff --git a/tests/data/test459 b/tests/data/test459
index 7a2e1db7b3..cc716aa65a 100644
--- a/tests/data/test459
+++ b/tests/data/test459
@@ -54,8 +54,8 @@ Content-Type: application/x-www-form-urlencoded
arg
</protocol>
<stderr mode="text">
-Warning: %LOGDIR/config:1 Option 'data' uses argument with unquoted whitespace.%SP
-Warning: This may cause side-effects. Consider double quotes.
+Warning: %LOGDIR/config:1 Option 'data' uses argument with unquoted%SP
+Warning: whitespace. This may cause side-effects. Consider double quotes.
</stderr>
</verify>
</testcase>
`},
},
}, &MakeHelper{
Configure: [][2]string{ Configure: [][2]string{
{"with-openssl"}, {"with-openssl"},
{"with-ca-bundle", "/system/etc/ssl/certs/ca-bundle.crt"}, {"with-ca-bundle", "/system/etc/ssl/certs/ca-bundle.crt"},
{"disable-smb"},
}, },
Check: []string{ Check: []string{
"TFLAGS=-j256", `TFLAGS="-j$(expr "$(nproc)" '*' 2)"`,
"check", "test-nonflaky",
}, },
}, },
Perl, Perl,
Python,
PkgConfig,
Diffutils,
Libpsl, Libpsl,
OpenSSL, OpenSSL,
@@ -35,6 +59,11 @@ func init() {
Description: "command line tool and library for transferring data with URLs", Description: "command line tool and library for transferring data with URLs",
Website: "https://curl.se/", Website: "https://curl.se/",
Dependencies: P{
Libpsl,
OpenSSL,
},
ID: 381, ID: 381,
} }
} }

View File

@@ -46,6 +46,14 @@ func init() {
Description: "utilities and libraries to handle ELF files and DWARF data", Description: "utilities and libraries to handle ELF files and DWARF data",
Website: "https://sourceware.org/elfutils/", Website: "https://sourceware.org/elfutils/",
Dependencies: P{
Zlib,
Bzip2,
Zstd,
MuslFts,
MuslObstack,
},
ID: 5679, ID: 5679,
} }
} }

View File

@@ -36,9 +36,6 @@ index f135ad9..85c784c 100644
// makes assumptions about /etc/passwd // makes assumptions about /etc/passwd
SkipCheck: true, SkipCheck: true,
}, },
M4,
Perl,
Autoconf,
Automake, Automake,
Libtool, Libtool,
PkgConfig, PkgConfig,

View File

@@ -24,10 +24,6 @@ func (t Toolchain) newFuse() (pkg.Artifact, string) {
// this project uses pytest // this project uses pytest
SkipTest: true, SkipTest: true,
}, },
PythonIniConfig,
PythonPackaging,
PythonPluggy,
PythonPygments,
PythonPyTest, PythonPyTest,
KernelHeaders, KernelHeaders,

View File

@@ -52,16 +52,18 @@ disable_test t2200-add-update
`GIT_PROVE_OPTS="--jobs 32 --failures"`, `GIT_PROVE_OPTS="--jobs 32 --failures"`,
"prove", "prove",
}, },
Install: `make \
"-j$(nproc)" \
DESTDIR=/work \
NO_INSTALL_HARDLINKS=1 \
install`,
}, },
Perl,
Diffutils, Diffutils,
M4,
Autoconf, Autoconf,
Gettext, Gettext,
Zlib, Zlib,
Curl, Curl,
OpenSSL,
Libexpat, Libexpat,
), version ), version
} }
@@ -73,6 +75,12 @@ func init() {
Description: "distributed version control system", Description: "distributed version control system",
Website: "https://www.git-scm.com/", Website: "https://www.git-scm.com/",
Dependencies: P{
Zlib,
Curl,
Libexpat,
},
ID: 5350, ID: 5350,
} }
} }
@@ -82,14 +90,10 @@ func (t Toolchain) NewViaGit(
name, url, rev string, name, url, rev string,
checksum pkg.Checksum, checksum pkg.Checksum,
) pkg.Artifact { ) pkg.Artifact {
return t.New(name+"-"+rev, 0, []pkg.Artifact{ return t.New(name+"-"+rev, 0, t.AppendPresets(nil,
t.Load(NSSCACert), NSSCACert,
t.Load(OpenSSL), Git,
t.Load(Libpsl), ), &checksum, nil, `
t.Load(Curl),
t.Load(Libexpat),
t.Load(Git),
}, &checksum, nil, `
git \ git \
-c advice.detachedHead=false \ -c advice.detachedHead=false \
clone \ clone \

View File

@@ -117,6 +117,11 @@ func init() {
Description: "M4 macros to produce self-contained configure script", Description: "M4 macros to produce self-contained configure script",
Website: "https://www.gnu.org/software/autoconf/", Website: "https://www.gnu.org/software/autoconf/",
Dependencies: P{
M4,
Perl,
},
ID: 141, ID: 141,
} }
} }
@@ -143,8 +148,6 @@ test_disable '#!/bin/sh' t/distname.sh
test_disable '#!/bin/sh' t/pr9.sh test_disable '#!/bin/sh' t/pr9.sh
`, `,
}, (*MakeHelper)(nil), }, (*MakeHelper)(nil),
M4,
Perl,
Grep, Grep,
Gzip, Gzip,
Autoconf, Autoconf,
@@ -159,6 +162,10 @@ func init() {
Description: "a tool for automatically generating Makefile.in files", Description: "a tool for automatically generating Makefile.in files",
Website: "https://www.gnu.org/software/automake/", Website: "https://www.gnu.org/software/automake/",
Dependencies: P{
Autoconf,
},
ID: 144, ID: 144,
} }
} }
@@ -524,6 +531,11 @@ func init() {
Description: "the GNU square-wheel-reinvension of man pages", Description: "the GNU square-wheel-reinvension of man pages",
Website: "https://www.gnu.org/software/texinfo/", Website: "https://www.gnu.org/software/texinfo/",
Dependencies: P{
Perl,
Gawk,
},
ID: 4958, ID: 4958,
} }
} }
@@ -660,7 +672,6 @@ func (t Toolchain) newBC() (pkg.Artifact, string) {
Writable: true, Writable: true,
Chmod: true, Chmod: true,
}, (*MakeHelper)(nil), }, (*MakeHelper)(nil),
Perl,
Texinfo, Texinfo,
), version ), version
} }
@@ -678,8 +689,8 @@ func init() {
func (t Toolchain) newLibiconv() (pkg.Artifact, string) { func (t Toolchain) newLibiconv() (pkg.Artifact, string) {
const ( const (
version = "1.18" version = "1.19"
checksum = "iV5q3VxP5VPdJ-X7O5OQI4fGm8VjeYb5viLd1L3eAHg26bbHb2_Qn63XPF3ucVZr" checksum = "UibB6E23y4MksNqYmCCrA3zTFO6vJugD1DEDqqWYFZNuBsUWMVMcncb_5pPAr88x"
) )
return t.NewPackage("libiconv", version, pkg.NewHTTPGetTar( return t.NewPackage("libiconv", version, pkg.NewHTTPGetTar(
nil, "https://ftpmirror.gnu.org/gnu/libiconv/libiconv-"+version+".tar.gz", nil, "https://ftpmirror.gnu.org/gnu/libiconv/libiconv-"+version+".tar.gz",
@@ -741,6 +752,35 @@ func init() {
} }
} }
func (t Toolchain) newParallel() (pkg.Artifact, string) {
const (
version = "20260222"
checksum = "4wxjMi3G2zMxr9hvLcIn6D7_12A3e5UNObeTPhzn7mDAYwsZApmmkxfGPyllQQ7E"
)
return t.NewPackage("parallel", version, pkg.NewHTTPGetTar(
nil, "https://ftpmirror.gnu.org/gnu/parallel/parallel-"+version+".tar.bz2",
mustDecode(checksum),
pkg.TarBzip2,
), nil, (*MakeHelper)(nil),
Perl,
), version
}
func init() {
artifactsM[Parallel] = Metadata{
f: Toolchain.newParallel,
Name: "parallel",
Description: "a shell tool for executing jobs in parallel using one or more computers",
Website: "https://www.gnu.org/software/parallel/",
Dependencies: P{
Perl,
},
ID: 5448,
}
}
func (t Toolchain) newBinutils() (pkg.Artifact, string) { func (t Toolchain) newBinutils() (pkg.Artifact, string) {
const ( const (
version = "2.46.0" version = "2.46.0"
@@ -814,6 +854,10 @@ func init() {
Description: "a C library for multiple-precision floating-point computations", Description: "a C library for multiple-precision floating-point computations",
Website: "https://www.mpfr.org/", Website: "https://www.mpfr.org/",
Dependencies: P{
GMP,
},
ID: 2019, ID: 2019,
} }
} }
@@ -829,7 +873,6 @@ func (t Toolchain) newMPC() (pkg.Artifact, string) {
mustDecode(checksum), mustDecode(checksum),
pkg.TarGzip, pkg.TarGzip,
), nil, (*MakeHelper)(nil), ), nil, (*MakeHelper)(nil),
GMP,
MPFR, MPFR,
), version ), version
} }
@@ -841,6 +884,10 @@ func init() {
Description: "a C library for the arithmetic of complex numbers", Description: "a C library for the arithmetic of complex numbers",
Website: "https://www.multiprecision.org/", Website: "https://www.multiprecision.org/",
Dependencies: P{
MPFR,
},
ID: 1667, ID: 1667,
} }
} }
@@ -1038,10 +1085,7 @@ ln -s system/lib /work/
}, },
Binutils, Binutils,
GMP,
MPFR,
MPC, MPC,
Zlib, Zlib,
Libucontext, Libucontext,
KernelHeaders, KernelHeaders,
@@ -1055,6 +1099,14 @@ func init() {
Description: "The GNU Compiler Collection", Description: "The GNU Compiler Collection",
Website: "https://www.gnu.org/software/gcc/", Website: "https://www.gnu.org/software/gcc/",
Dependencies: P{
Binutils,
MPC,
Zlib,
Libucontext,
},
ID: 6502, ID: 6502,
} }
} }

View File

@@ -74,22 +74,8 @@ func (t Toolchain) newGoLatest() (pkg.Artifact, string) {
bootstrapExtra = append(bootstrapExtra, t.newGoBootstrap()) bootstrapExtra = append(bootstrapExtra, t.newGoBootstrap())
case "arm64": case "arm64":
bootstrapEnv = append(bootstrapEnv, bootstrapEnv = append(bootstrapEnv, "GOROOT_BOOTSTRAP=/system")
"GOROOT_BOOTSTRAP=/system", bootstrapExtra = t.AppendPresets(bootstrapExtra, gcc)
)
bootstrapExtra = append(bootstrapExtra,
t.Load(Binutils),
t.Load(GMP),
t.Load(MPFR),
t.Load(MPC),
t.Load(Zlib),
t.Load(Libucontext),
t.Load(gcc),
)
finalEnv = append(finalEnv, "CGO_ENABLED=0") finalEnv = append(finalEnv, "CGO_ENABLED=0")
default: default:

View File

@@ -9,8 +9,8 @@ import (
func (t Toolchain) newGLib() (pkg.Artifact, string) { func (t Toolchain) newGLib() (pkg.Artifact, string) {
const ( const (
version = "2.87.3" version = "2.87.5"
checksum = "iKSLpzZZVfmAZZmqfO1y6uHdlIks4hzPWrqeUCp4ZeQjrPFA3aAa4OmrBYMNS-Si" checksum = "L5jurSfyCTlcSTfx-1RBHbNZPL0HnNQakmFXidgAV1JFu0lbytowCCBAALTp-WGc"
) )
return t.NewPackage("glib", version, pkg.NewHTTPGet( return t.NewPackage("glib", version, pkg.NewHTTPGet(
nil, "https://download.gnome.org/sources/glib/"+ nil, "https://download.gnome.org/sources/glib/"+
@@ -56,6 +56,12 @@ func init() {
Description: "the GNU library of miscellaneous stuff", Description: "the GNU library of miscellaneous stuff",
Website: "https://developer.gnome.org/glib/", Website: "https://developer.gnome.org/glib/",
Dependencies: P{
PCRE2,
Libffi,
Zlib,
},
ID: 10024, ID: 10024,
} }
} }

View File

@@ -2,44 +2,45 @@ package rosa
import "hakurei.app/internal/pkg" import "hakurei.app/internal/pkg"
func (t Toolchain) newHakurei(suffix, script string) pkg.Artifact { func (t Toolchain) newHakurei(
return t.New("hakurei"+suffix+"-"+hakureiVersion, 0, []pkg.Artifact{ suffix, script string,
t.Load(Go), withHostname bool,
) pkg.Artifact {
t.Load(Gzip), hostname := `
t.Load(PkgConfig),
t.Load(KernelHeaders),
t.Load(Libseccomp),
t.Load(ACL),
t.Load(Attr),
t.Load(Fuse),
t.Load(Xproto),
t.Load(LibXau),
t.Load(XCBProto),
t.Load(XCB),
t.Load(Libffi),
t.Load(Libexpat),
t.Load(Libxml2),
t.Load(Wayland),
t.Load(WaylandProtocols),
}, nil, []string{
"CGO_ENABLED=1",
"GOCACHE=/tmp/gocache",
"CC=clang -O3 -Werror",
}, `
echo '# Building test helper (hostname).' echo '# Building test helper (hostname).'
go build -v -o /bin/hostname /usr/src/hostname/main.go go build -v -o /bin/hostname /usr/src/hostname/main.go
echo echo
`
if !withHostname {
hostname = ""
}
chmod -R +w /usr/src/hakurei return t.New("hakurei"+suffix+"-"+hakureiVersion, 0, t.AppendPresets(nil,
Go,
PkgConfig,
// dist tarball
Gzip,
// statically linked
Libseccomp,
ACL,
Fuse,
XCB,
Wayland,
WaylandProtocols,
KernelHeaders,
), nil, []string{
"CGO_ENABLED=1",
"GOCACHE=/tmp/gocache",
"CC=clang -O3 -Werror",
}, hostname+`
cd /usr/src/hakurei cd /usr/src/hakurei
HAKUREI_VERSION='v`+hakureiVersion+`' HAKUREI_VERSION='v`+hakureiVersion+`'
`+script, pkg.Path(AbsUsrSrc.Append("hakurei"), true, t.NewPatchedSource( `+script, pkg.Path(AbsUsrSrc.Append("hakurei"), true, t.NewPatchedSource(
"hakurei", hakureiVersion, hakureiSource, true, hakureiPatches..., "hakurei", hakureiVersion, hakureiSource, false, hakureiPatches...,
)), pkg.Path(AbsUsrSrc.Append("hostname", "main.go"), false, pkg.NewFile( )), pkg.Path(AbsUsrSrc.Append("hostname", "main.go"), false, pkg.NewFile(
"hostname.go", "hostname.go",
[]byte(` []byte(`
@@ -69,10 +70,11 @@ go build -trimpath -v -o /work/system/libexec/hakurei -ldflags="-s -w
-buildid= -buildid=
-linkmode external -linkmode external
-extldflags=-static -extldflags=-static
-X hakurei.app/internal/info.buildVersion="$HAKUREI_VERSION" -X hakurei.app/internal/info.buildVersion=${HAKUREI_VERSION}
-X hakurei.app/internal/info.hakureiPath=/system/bin/hakurei -X hakurei.app/internal/info.hakureiPath=/system/bin/hakurei
-X hakurei.app/internal/info.hsuPath=/system/bin/hsu -X hakurei.app/internal/info.hsuPath=/system/bin/hsu
-X main.hakureiPath=/system/bin/hakurei" ./... -X main.hakureiPath=/system/bin/hakurei
" ./...
echo echo
echo '# Testing hakurei.' echo '# Testing hakurei.'
@@ -84,7 +86,7 @@ mkdir -p /work/system/bin/
hakurei \ hakurei \
sharefs \ sharefs \
../../bin/) ../../bin/)
`), hakureiVersion `, true), hakureiVersion
}, },
Name: "hakurei", Name: "hakurei",
@@ -98,7 +100,7 @@ mkdir -p /work/system/bin/
return t.newHakurei("-dist", ` return t.newHakurei("-dist", `
export HAKUREI_VERSION export HAKUREI_VERSION
DESTDIR=/work /usr/src/hakurei/dist/release.sh DESTDIR=/work /usr/src/hakurei/dist/release.sh
`), hakureiVersion `, true), hakureiVersion
}, },
Name: "hakurei-dist", Name: "hakurei-dist",

View File

@@ -4,48 +4,15 @@ package rosa
import "hakurei.app/internal/pkg" import "hakurei.app/internal/pkg"
const hakureiVersion = "0.3.5" const hakureiVersion = "0.3.6"
// hakureiSource is the source code of a hakurei release. // hakureiSource is the source code of a hakurei release.
var hakureiSource = pkg.NewHTTPGetTar( var hakureiSource = pkg.NewHTTPGetTar(
nil, "https://git.gensokyo.uk/security/hakurei/archive/"+ nil, "https://git.gensokyo.uk/security/hakurei/archive/"+
"v"+hakureiVersion+".tar.gz", "v"+hakureiVersion+".tar.gz",
mustDecode("6Tn38NLezRD2d3aGdFg5qFfqn8_KvC6HwMKwJMPvaHmVw8xRgxn8B0PObswl2mOk"), mustDecode("Yul9J2yV0x453lQP9KUnG_wEJo_DbKMNM7xHJGt4rITCSeX9VRK2J4kzAxcv_0-b"),
pkg.TarGzip, pkg.TarGzip,
) )
// hakureiPatches are patches applied against a hakurei release. // hakureiPatches are patches applied against a hakurei release.
var hakureiPatches = [][2]string{ var hakureiPatches [][2]string
{"createTemp-error-injection", `diff --git a/container/dispatcher_test.go b/container/dispatcher_test.go
index 5de37fc..fe0c4db 100644
--- a/container/dispatcher_test.go
+++ b/container/dispatcher_test.go
@@ -238,8 +238,11 @@ func sliceAddr[S any](s []S) *[]S { return &s }
func newCheckedFile(t *testing.T, name, wantData string, closeErr error) osFile {
f := &checkedOsFile{t: t, name: name, want: wantData, closeErr: closeErr}
- // check happens in Close, and cleanup is not guaranteed to run, so relying on it for sloppy implementations will cause sporadic test results
- f.cleanup = runtime.AddCleanup(f, func(name string) { f.t.Fatalf("checkedOsFile %s became unreachable without a call to Close", name) }, f.name)
+ // check happens in Close, and cleanup is not guaranteed to run, so relying
+ // on it for sloppy implementations will cause sporadic test results
+ f.cleanup = runtime.AddCleanup(f, func(name string) {
+ panic("checkedOsFile " + name + " became unreachable without a call to Close")
+ }, name)
return f
}
diff --git a/container/initplace_test.go b/container/initplace_test.go
index afeddbe..1c2f20b 100644
--- a/container/initplace_test.go
+++ b/container/initplace_test.go
@@ -21,7 +21,7 @@ func TestTmpfileOp(t *testing.T) {
Path: samplePath,
Data: sampleData,
}, nil, nil, []stub.Call{
- call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), stub.UniqueError(5)),
+ call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, (*checkedOsFile)(nil), stub.UniqueError(5)),
}, stub.UniqueError(5)},
{"Write", &Params{ParentPerm: 0700}, &TmpfileOp{
`},
}

View File

@@ -2,10 +2,32 @@ package rosa
import "hakurei.app/internal/pkg" import "hakurei.app/internal/pkg"
func init() {
artifactsM[EarlyInit] = Metadata{
Name: "earlyinit",
Description: "Rosa OS initramfs init program",
f: func(t Toolchain) (pkg.Artifact, string) {
return t.newHakurei("-early-init", `
mkdir -p /work/system/libexec/hakurei/
echo '# Building earlyinit.'
go build -trimpath -v -o /work/system/libexec/hakurei -ldflags="-s -w
-buildid=
-linkmode external
-extldflags=-static
-X hakurei.app/internal/info.buildVersion=${HAKUREI_VERSION}
" ./cmd/earlyinit
echo
`, false), Unversioned
},
}
}
func (t Toolchain) newImageInitramfs() (pkg.Artifact, string) { func (t Toolchain) newImageInitramfs() (pkg.Artifact, string) {
return t.New("initramfs", TNoToolchain, []pkg.Artifact{ return t.New("initramfs", TNoToolchain, []pkg.Artifact{
t.Load(Zstd), t.Load(Zstd),
t.Load(Hakurei), t.Load(EarlyInit),
t.Load(GenInitCPIO), t.Load(GenInitCPIO),
}, nil, nil, ` }, nil, nil, `
gen_init_cpio -t 4294967295 -c /usr/src/initramfs | zstd > /work/initramfs.zst gen_init_cpio -t 4294967295 -c /usr/src/initramfs | zstd > /work/initramfs.zst

View File

@@ -82,6 +82,11 @@ install -Dm0500 \
echo "Installing linux $1..." echo "Installing linux $1..."
cp -av "$2" "$4" cp -av "$2" "$4"
cp -av "$3" "$4" cp -av "$3" "$4"
`))),
pkg.Path(AbsUsrSrc.Append(
".depmod",
), false, pkg.NewFile("depmod", []byte(`#!/bin/sh
exec /system/sbin/depmod -m /lib/modules "$@"
`))), `))),
}, },
@@ -1210,6 +1215,11 @@ cgit 1.2.3-korg
"all", "all",
}, },
Install: ` Install: `
# kernel is not aware of kmod moduledir
install -Dm0500 \
/usr/src/.depmod \
/sbin/depmod
make \ make \
"-j$(nproc)" \ "-j$(nproc)" \
-f /usr/src/kernel/Makefile \ -f /usr/src/kernel/Makefile \
@@ -1217,9 +1227,10 @@ make \
LLVM=1 \ LLVM=1 \
INSTALL_PATH=/work \ INSTALL_PATH=/work \
install \ install \
INSTALL_MOD_PATH=/work \ INSTALL_MOD_PATH=/work/system \
DEPMOD=/sbin/depmod \
modules_install modules_install
rm -v /work/lib/modules/` + kernelVersion + `/build rm -v /work/system/lib/modules/` + kernelVersion + `/build
`, `,
}, },
Flex, Flex,
@@ -1235,13 +1246,9 @@ rm -v /work/lib/modules/` + kernelVersion + `/build
Python, Python,
XZ, XZ,
Zlib,
Gzip, Gzip,
Bzip2,
Zstd,
Kmod, Kmod,
Elfutils, Elfutils,
OpenSSL,
UtilLinux, UtilLinux,
KernelHeaders, KernelHeaders,
), kernelVersion ), kernelVersion
@@ -1272,3 +1279,53 @@ func init() {
Description: "a program in the kernel source tree for creating initramfs archive", Description: "a program in the kernel source tree for creating initramfs archive",
} }
} }
func (t Toolchain) newFirmware() (pkg.Artifact, string) {
const (
version = "20260309"
checksum = "M1az8BxSiOEH3LA11Trc5VAlakwAHhP7-_LKWg6k-SVIzU3xclMDO4Tiujw1gQrC"
)
return t.NewPackage("firmware", version, pkg.NewHTTPGetTar(
nil, "https://gitlab.com/kernel-firmware/linux-firmware/-/"+
"archive/"+version+"/linux-firmware-"+version+".tar.bz2",
mustDecode(checksum),
pkg.TarBzip2,
), &PackageAttr{
// dedup creates temporary file
Writable: true,
// does not use configure
EnterSource: true,
Env: []string{
"HOME=/proc/nonexistent",
},
}, &MakeHelper{
OmitDefaults: true,
SkipConfigure: true,
InPlace: true,
Make: []string{
"DESTDIR=/work/system",
"install-zst",
},
SkipCheck: true, // requires pre-commit
Install: `make "-j$(nproc)" DESTDIR=/work/system dedup`,
},
Parallel,
Rdfind,
Zstd,
Findutils,
Coreutils,
), version
}
func init() {
artifactsM[Firmware] = Metadata{
f: Toolchain.newFirmware,
Name: "firmware",
Description: "firmware blobs for use with the Linux kernel",
Website: "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/",
ID: 141464,
}
}

View File

@@ -1,16 +1,16 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/arm64 6.12.73 Kernel Configuration # Linux/arm64 6.12.76 Kernel Configuration
# #
CONFIG_CC_VERSION_TEXT="clang version 21.1.8" CONFIG_CC_VERSION_TEXT="clang version 22.1.0"
CONFIG_GCC_VERSION=0 CONFIG_GCC_VERSION=0
CONFIG_CC_IS_CLANG=y CONFIG_CC_IS_CLANG=y
CONFIG_CLANG_VERSION=210108 CONFIG_CLANG_VERSION=220100
CONFIG_AS_IS_LLVM=y CONFIG_AS_IS_LLVM=y
CONFIG_AS_VERSION=210108 CONFIG_AS_VERSION=220100
CONFIG_LD_VERSION=0 CONFIG_LD_VERSION=0
CONFIG_LD_IS_LLD=y CONFIG_LD_IS_LLD=y
CONFIG_LLD_VERSION=210108 CONFIG_LLD_VERSION=220100
CONFIG_RUSTC_VERSION=0 CONFIG_RUSTC_VERSION=0
CONFIG_RUSTC_LLVM_VERSION=0 CONFIG_RUSTC_LLVM_VERSION=0
CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
@@ -4984,7 +4984,7 @@ CONFIG_SERIAL_TEGRA_TCU=m
CONFIG_SERIAL_MAX3100=m CONFIG_SERIAL_MAX3100=m
CONFIG_SERIAL_MAX310X=m CONFIG_SERIAL_MAX310X=m
CONFIG_SERIAL_IMX=m CONFIG_SERIAL_IMX=m
CONFIG_SERIAL_IMX_CONSOLE=m # CONFIG_SERIAL_IMX_CONSOLE is not set
CONFIG_SERIAL_IMX_EARLYCON=y CONFIG_SERIAL_IMX_EARLYCON=y
CONFIG_SERIAL_UARTLITE=m CONFIG_SERIAL_UARTLITE=m
CONFIG_SERIAL_UARTLITE_NR_UARTS=1 CONFIG_SERIAL_UARTLITE_NR_UARTS=1
@@ -5772,6 +5772,7 @@ CONFIG_GPIO_MADERA=m
CONFIG_GPIO_MAX77650=m CONFIG_GPIO_MAX77650=m
CONFIG_GPIO_PMIC_EIC_SPRD=m CONFIG_GPIO_PMIC_EIC_SPRD=m
CONFIG_GPIO_SL28CPLD=m CONFIG_GPIO_SL28CPLD=m
CONFIG_GPIO_TN48M_CPLD=m
CONFIG_GPIO_TPS65086=m CONFIG_GPIO_TPS65086=m
CONFIG_GPIO_TPS65218=m CONFIG_GPIO_TPS65218=m
CONFIG_GPIO_TPS65219=m CONFIG_GPIO_TPS65219=m
@@ -6471,6 +6472,7 @@ CONFIG_MFD_MAX5970=m
# CONFIG_MFD_CS47L85 is not set # CONFIG_MFD_CS47L85 is not set
# CONFIG_MFD_CS47L90 is not set # CONFIG_MFD_CS47L90 is not set
# CONFIG_MFD_CS47L92 is not set # CONFIG_MFD_CS47L92 is not set
CONFIG_MFD_TN48M_CPLD=m
# CONFIG_MFD_DA9052_SPI is not set # CONFIG_MFD_DA9052_SPI is not set
CONFIG_MFD_DA9062=m CONFIG_MFD_DA9062=m
CONFIG_MFD_DA9063=m CONFIG_MFD_DA9063=m
@@ -12532,6 +12534,7 @@ CONFIG_RESET_SUNXI=y
CONFIG_RESET_TI_SCI=m CONFIG_RESET_TI_SCI=m
CONFIG_RESET_TI_SYSCON=m CONFIG_RESET_TI_SYSCON=m
CONFIG_RESET_TI_TPS380X=m CONFIG_RESET_TI_TPS380X=m
CONFIG_RESET_TN48M_CPLD=m
CONFIG_RESET_UNIPHIER=m CONFIG_RESET_UNIPHIER=m
CONFIG_RESET_UNIPHIER_GLUE=m CONFIG_RESET_UNIPHIER_GLUE=m
CONFIG_RESET_ZYNQMP=y CONFIG_RESET_ZYNQMP=y
@@ -14022,7 +14025,6 @@ CONFIG_LOCK_DEBUGGING_SUPPORT=y
# CONFIG_DEBUG_IRQFLAGS is not set # CONFIG_DEBUG_IRQFLAGS is not set
CONFIG_STACKTRACE=y CONFIG_STACKTRACE=y
# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set
# CONFIG_DEBUG_KOBJECT is not set # CONFIG_DEBUG_KOBJECT is not set
# #
@@ -14057,7 +14059,7 @@ CONFIG_USER_STACKTRACE_SUPPORT=y
CONFIG_NOP_TRACER=y CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y CONFIG_HAVE_FUNCTION_GRAPH_FREGS=y
CONFIG_HAVE_DYNAMIC_FTRACE=y CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y

View File

@@ -14,6 +14,7 @@ func (t Toolchain) newKmod() (pkg.Artifact, string) {
pkg.TarGzip, pkg.TarGzip,
), nil, &MesonHelper{ ), nil, &MesonHelper{
Setup: [][2]string{ Setup: [][2]string{
{"Dmoduledir", "/system/lib/modules"},
{"Dsysconfdir", "/system/etc"}, {"Dsysconfdir", "/system/etc"},
{"Dbashcompletiondir", "no"}, {"Dbashcompletiondir", "no"},
{"Dfishcompletiondir", "no"}, {"Dfishcompletiondir", "no"},
@@ -38,6 +39,12 @@ func init() {
Description: "a set of tools to handle common tasks with Linux kernel modules", Description: "a set of tools to handle common tasks with Linux kernel modules",
Website: "https://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git", Website: "https://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git",
Dependencies: P{
Zlib,
Zstd,
OpenSSL,
},
ID: 1517, ID: 1517,
} }
} }

View File

@@ -31,6 +31,10 @@ func init() {
Description: "an open source code library for the dynamic creation of images", Description: "an open source code library for the dynamic creation of images",
Website: "https://libgd.github.io/", Website: "https://libgd.github.io/",
Dependencies: P{
Zlib,
},
ID: 880, ID: 880,
} }
} }

View File

@@ -37,6 +37,10 @@ func init() {
Description: "an XSLT processor based on libxml2", Description: "an XSLT processor based on libxml2",
Website: "https://gitlab.gnome.org/GNOME/libxslt/", Website: "https://gitlab.gnome.org/GNOME/libxslt/",
Dependencies: P{
Libxml2,
},
ID: 13301, ID: 13301,
} }
} }

View File

@@ -75,12 +75,12 @@ func llvmFlagName(flag int) string {
const ( const (
llvmVersionMajor = "22" llvmVersionMajor = "22"
llvmVersion = llvmVersionMajor + ".1.0" llvmVersion = llvmVersionMajor + ".1.1"
) )
// newLLVMVariant returns a [pkg.Artifact] containing a LLVM variant. // newLLVMVariant returns a [pkg.Artifact] containing a LLVM variant.
func (t Toolchain) newLLVMVariant(variant string, attr *llvmAttr) pkg.Artifact { func (t Toolchain) newLLVMVariant(variant string, attr *llvmAttr) pkg.Artifact {
const checksum = "-_Tu5Lt8xkWoxm2VDVV7crh0WqZQbbblN3fYamMdPTDSy_54FAkD2ii7afSymPVV" const checksum = "bQvV6D8AZvQykg7-uQb_saTbVavnSo1ykNJ3g57F5iE-evU3HuOYtcRnVIXTK76e"
if attr == nil { if attr == nil {
panic("LLVM attr must be non-nil") panic("LLVM attr must be non-nil")
@@ -125,6 +125,8 @@ func (t Toolchain) newLLVMVariant(variant string, attr *llvmAttr) pkg.Artifact {
[2]string{"LLVM_INSTALL_BINUTILS_SYMLINKS", "ON"}, [2]string{"LLVM_INSTALL_BINUTILS_SYMLINKS", "ON"},
[2]string{"LLVM_INSTALL_CCTOOLS_SYMLINKS", "ON"}, [2]string{"LLVM_INSTALL_CCTOOLS_SYMLINKS", "ON"},
[2]string{"LLVM_LIT_ARGS", "'--verbose'"},
) )
} }
@@ -187,7 +189,6 @@ ln -s ld.lld /work/system/bin/ld
Append: cmakeAppend, Append: cmakeAppend,
Script: script + attr.script, Script: script + attr.script,
}, },
Libffi,
Python, Python,
Perl, Perl,
Diffutils, Diffutils,

View File

@@ -13,6 +13,7 @@ func (t Toolchain) newMeson() (pkg.Artifact, string) {
checksum = "w895BXF_icncnXatT_OLCFe2PYEtg4KrKooMgUYdN-nQVvbFX3PvYWHGEpogsHtd" checksum = "w895BXF_icncnXatT_OLCFe2PYEtg4KrKooMgUYdN-nQVvbFX3PvYWHGEpogsHtd"
) )
return t.New("meson-"+version, 0, []pkg.Artifact{ return t.New("meson-"+version, 0, []pkg.Artifact{
t.Load(Zlib),
t.Load(Python), t.Load(Python),
t.Load(Setuptools), t.Load(Setuptools),
}, nil, nil, ` }, nil, nil, `
@@ -37,6 +38,13 @@ func init() {
Description: "an open source build system", Description: "an open source build system",
Website: "https://mesonbuild.com/", Website: "https://mesonbuild.com/",
Dependencies: P{
Python,
PkgConfig,
CMake,
Ninja,
},
ID: 6472, ID: 6472,
} }
} }
@@ -65,14 +73,7 @@ func (*MesonHelper) name(name, version string) string {
// extra returns hardcoded meson runtime dependencies. // extra returns hardcoded meson runtime dependencies.
func (*MesonHelper) extra(int) []PArtifact { func (*MesonHelper) extra(int) []PArtifact {
return []PArtifact{ return []PArtifact{Meson}
Python,
Meson,
Ninja,
PkgConfig,
CMake,
}
} }
// wantsChmod returns false. // wantsChmod returns false.

View File

@@ -19,9 +19,6 @@ func (t Toolchain) newMuslFts() (pkg.Artifact, string) {
}, &MakeHelper{ }, &MakeHelper{
Generate: "./bootstrap.sh", Generate: "./bootstrap.sh",
}, },
M4,
Perl,
Autoconf,
Automake, Automake,
Libtool, Libtool,
PkgConfig, PkgConfig,

View File

@@ -19,9 +19,6 @@ func (t Toolchain) newMuslObstack() (pkg.Artifact, string) {
}, &MakeHelper{ }, &MakeHelper{
Generate: "./bootstrap.sh", Generate: "./bootstrap.sh",
}, },
M4,
Perl,
Autoconf,
Automake, Automake,
Libtool, Libtool,
PkgConfig, PkgConfig,

35
internal/rosa/nettle.go Normal file
View File

@@ -0,0 +1,35 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newNettle() (pkg.Artifact, string) {
const (
version = "4.0"
checksum = "6agC-vHzzoqAlaX3K9tX8yHgrm03HLqPZzVzq8jh_ePbuPMIvpxereu_uRJFmQK7"
)
return t.NewPackage("nettle", version, pkg.NewHTTPGetTar(
nil, "https://ftpmirror.gnu.org/gnu/nettle/nettle-"+version+".tar.gz",
mustDecode(checksum),
pkg.TarGzip,
), nil, (*MakeHelper)(nil),
M4,
Diffutils,
GMP,
), version
}
func init() {
artifactsM[Nettle] = Metadata{
f: Toolchain.newNettle,
Name: "nettle",
Description: "a low-level cryptographic library",
Website: "https://www.lysator.liu.se/~nisse/nettle/",
Dependencies: P{
GMP,
},
ID: 2073,
}
}

View File

@@ -75,6 +75,10 @@ func init() {
Description: "Network Security Services", Description: "Network Security Services",
Website: "https://firefox-source-docs.mozilla.org/security/nss/index.html", Website: "https://firefox-source-docs.mozilla.org/security/nss/index.html",
Dependencies: P{
Zlib,
},
ID: 2503, ID: 2503,
} }
} }
@@ -84,7 +88,7 @@ func init() {
artifactsM[buildcatrust] = newViaPip( artifactsM[buildcatrust] = newViaPip(
"buildcatrust", "buildcatrust",
"transform certificate stores between formats", "transform certificate stores between formats",
version, "none", "any", version, "py3", "none", "any",
"k_FGzkRCLjbTWBkuBLzQJ1S8FPAz19neJZlMHm0t10F2Y0hElmvVwdSBRc03Rjo1", "k_FGzkRCLjbTWBkuBLzQJ1S8FPAz19neJZlMHm0t10F2Y0hElmvVwdSBRc03Rjo1",
"https://github.com/nix-community/buildcatrust/"+ "https://github.com/nix-community/buildcatrust/"+
"releases/download/v"+version+"/", "releases/download/v"+version+"/",
@@ -92,13 +96,12 @@ func init() {
} }
func (t Toolchain) newNSSCACert() (pkg.Artifact, string) { func (t Toolchain) newNSSCACert() (pkg.Artifact, string) {
return t.New("nss-cacert", 0, []pkg.Artifact{ return t.New("nss-cacert", 0, t.AppendPresets(nil,
t.Load(Bash), Bash,
t.Load(Python),
t.Load(NSS), NSS,
t.Load(buildcatrust), buildcatrust,
}, nil, nil, ` ), nil, nil, `
mkdir -p /work/system/etc/ssl/{certs/unbundled,certs/hashed,trust-source} mkdir -p /work/system/etc/ssl/{certs/unbundled,certs/hashed,trust-source}
buildcatrust \ buildcatrust \
--certdata_input /system/nss/certdata.txt \ --certdata_input /system/nss/certdata.txt \

View File

@@ -8,8 +8,8 @@ import (
func (t Toolchain) newPerl() (pkg.Artifact, string) { func (t Toolchain) newPerl() (pkg.Artifact, string) {
const ( const (
version = "5.42.0" version = "5.42.1"
checksum = "2KR7Jbpk-ZVn1a30LQRwbgUvg2AXlPQZfzrqCr31qD5-yEsTwVQ_W76eZH-EdxM9" checksum = "FsJVq5CZFA7nZklfUl1eC6z2ECEu02XaB1pqfHSKtRLZWpnaBjlB55QOhjKpjkQ2"
) )
return t.NewPackage("perl", version, pkg.NewHTTPGetTar( return t.NewPackage("perl", version, pkg.NewHTTPGetTar(
nil, "https://www.cpan.org/src/5.0/perl-"+version+".tar.gz", nil, "https://www.cpan.org/src/5.0/perl-"+version+".tar.gz",
@@ -68,14 +68,14 @@ func (t Toolchain) newViaPerlModuleBuild(
name, version string, name, version string,
source pkg.Artifact, source pkg.Artifact,
patches [][2]string, patches [][2]string,
extra ...pkg.Artifact, extra ...PArtifact,
) pkg.Artifact { ) pkg.Artifact {
if name == "" || version == "" { if name == "" || version == "" {
panic("names must be non-empty") panic("names must be non-empty")
} }
return t.New("perl-"+name, 0, slices.Concat(extra, []pkg.Artifact{ return t.New("perl-"+name, 0, t.AppendPresets(nil,
t.Load(Perl), slices.Concat(P{Perl}, extra)...,
}), nil, nil, ` ), nil, nil, `
cd /usr/src/`+name+` cd /usr/src/`+name+`
perl Build.PL --prefix=/system perl Build.PL --prefix=/system
./Build build ./Build build
@@ -105,6 +105,10 @@ func init() {
Name: "perl-Module::Build", Name: "perl-Module::Build",
Description: "build and install Perl modules", Description: "build and install Perl modules",
Website: "https://metacpan.org/release/Module-Build", Website: "https://metacpan.org/release/Module-Build",
Dependencies: P{
Perl,
},
} }
} }
@@ -267,6 +271,10 @@ func init() {
Name: "perl-Text::WrapI18N", Name: "perl-Text::WrapI18N",
Description: "line wrapping module", Description: "line wrapping module",
Website: "https://metacpan.org/release/Text-WrapI18N", Website: "https://metacpan.org/release/Text-WrapI18N",
Dependencies: P{
PerlTextCharWidth,
},
} }
} }
@@ -313,6 +321,10 @@ func init() {
Name: "perl-Unicode::GCString", Name: "perl-Unicode::GCString",
Description: "String as Sequence of UAX #29 Grapheme Clusters", Description: "String as Sequence of UAX #29 Grapheme Clusters",
Website: "https://metacpan.org/release/Unicode-LineBreak", Website: "https://metacpan.org/release/Unicode-LineBreak",
Dependencies: P{
PerlMIMECharset,
},
} }
} }

View File

@@ -18,9 +18,6 @@ func (t Toolchain) newProcps() (pkg.Artifact, string) {
{"without-ncurses"}, {"without-ncurses"},
}, },
}, },
M4,
Perl,
Autoconf,
Automake, Automake,
Gettext, Gettext,
Libtool, Libtool,

View File

@@ -53,11 +53,11 @@ func (t Toolchain) newPython() (pkg.Artifact, string) {
Check: []string{"test"}, Check: []string{"test"},
}, },
Zlib, Zlib,
Bzip2,
Libffi, Libffi,
OpenSSL,
PkgConfig, PkgConfig,
OpenSSL,
Bzip2,
XZ, XZ,
), version ), version
} }
@@ -69,26 +69,28 @@ func init() {
Description: "the Python programming language interpreter", Description: "the Python programming language interpreter",
Website: "https://www.python.org/", Website: "https://www.python.org/",
Dependencies: P{
Zlib,
Bzip2,
Libffi,
OpenSSL,
},
ID: 13254, ID: 13254,
} }
} }
// newViaPip is a helper for installing python dependencies via pip. // newViaPip is a helper for installing python dependencies via pip.
func newViaPip( func newViaPip(
name, description, version, abi, platform, checksum, prefix string, name, description, version, interpreter, abi, platform, checksum, prefix string,
extra ...PArtifact, extra ...PArtifact,
) Metadata { ) Metadata {
wname := name + "-" + version + "-py3-" + abi + "-" + platform + ".whl" wname := name + "-" + version + "-" + interpreter + "-" + abi + "-" + platform + ".whl"
return Metadata{ return Metadata{
f: func(t Toolchain) (pkg.Artifact, string) { f: func(t Toolchain) (pkg.Artifact, string) {
extraRes := make([]pkg.Artifact, len(extra)) return t.New(name+"-"+version, 0, t.AppendPresets(nil,
for i, p := range extra { slices.Concat(P{Python}, extra)...,
extraRes[i] = t.Load(p) ), nil, nil, `
}
return t.New(name+"-"+version, 0, slices.Concat([]pkg.Artifact{
t.Load(Python),
}, extraRes), nil, nil, `
pip3 install \ pip3 install \
--no-index \ --no-index \
--prefix=/system \ --prefix=/system \
@@ -103,17 +105,19 @@ pip3 install \
Name: "python-" + name, Name: "python-" + name,
Description: description, Description: description,
Website: "https://pypi.org/project/" + name + "/", Website: "https://pypi.org/project/" + name + "/",
Dependencies: slices.Concat(P{Python}, extra),
} }
} }
func (t Toolchain) newSetuptools() (pkg.Artifact, string) { func (t Toolchain) newSetuptools() (pkg.Artifact, string) {
const ( const (
version = "82.0.0" version = "82.0.1"
checksum = "K9f8Yi7Gg95zjmQsE1LLw9UBb8NglI6EY6pQpdD6DM0Pmc_Td5w2qs1SMngTI6Jp" checksum = "nznP46Tj539yqswtOrIM4nQgwLA1h-ApKX7z7ghazROCpyF5swtQGwsZoI93wkhc"
) )
return t.New("setuptools-"+version, 0, []pkg.Artifact{ return t.New("setuptools-"+version, 0, t.AppendPresets(nil,
t.Load(Python), Python,
}, nil, nil, ` ), nil, nil, `
pip3 install \ pip3 install \
--no-index \ --no-index \
--prefix=/system \ --prefix=/system \
@@ -130,10 +134,14 @@ func init() {
artifactsM[Setuptools] = Metadata{ artifactsM[Setuptools] = Metadata{
f: Toolchain.newSetuptools, f: Toolchain.newSetuptools,
Name: "setuptools", Name: "python-setuptools",
Description: "the autotools of the Python ecosystem", Description: "the autotools of the Python ecosystem",
Website: "https://pypi.org/project/setuptools/", Website: "https://pypi.org/project/setuptools/",
Dependencies: P{
Python,
},
ID: 4021, ID: 4021,
} }
} }
@@ -142,7 +150,7 @@ func init() {
artifactsM[PythonPygments] = newViaPip( artifactsM[PythonPygments] = newViaPip(
"pygments", "pygments",
" a syntax highlighting package written in Python", " a syntax highlighting package written in Python",
"2.19.2", "none", "any", "2.19.2", "py3", "none", "any",
"ak_lwTalmSr7W4Mjy2XBZPG9I6a0gwSy2pS87N8x4QEuZYif0ie9z0OcfRfi9msd", "ak_lwTalmSr7W4Mjy2XBZPG9I6a0gwSy2pS87N8x4QEuZYif0ie9z0OcfRfi9msd",
"https://files.pythonhosted.org/packages/"+ "https://files.pythonhosted.org/packages/"+
"c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/", "c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/",
@@ -151,7 +159,7 @@ func init() {
artifactsM[PythonPluggy] = newViaPip( artifactsM[PythonPluggy] = newViaPip(
"pluggy", "pluggy",
"the core framework used by the pytest, tox, and devpi projects", "the core framework used by the pytest, tox, and devpi projects",
"1.6.0", "none", "any", "1.6.0", "py3", "none", "any",
"2HWYBaEwM66-y1hSUcWI1MyE7dVVuNNRW24XD6iJBey4YaUdAK8WeXdtFMQGC-4J", "2HWYBaEwM66-y1hSUcWI1MyE7dVVuNNRW24XD6iJBey4YaUdAK8WeXdtFMQGC-4J",
"https://files.pythonhosted.org/packages/"+ "https://files.pythonhosted.org/packages/"+
"54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/", "54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/",
@@ -160,7 +168,7 @@ func init() {
artifactsM[PythonPackaging] = newViaPip( artifactsM[PythonPackaging] = newViaPip(
"packaging", "packaging",
"reusable core utilities for various Python Packaging interoperability specifications", "reusable core utilities for various Python Packaging interoperability specifications",
"26.0", "none", "any", "26.0", "py3", "none", "any",
"iVVXcqdwHDskPKoCFUlh2x8J0Gyq-bhO4ns9DvUJ7oJjeOegRYtSIvLV33Bki-pP", "iVVXcqdwHDskPKoCFUlh2x8J0Gyq-bhO4ns9DvUJ7oJjeOegRYtSIvLV33Bki-pP",
"https://files.pythonhosted.org/packages/"+ "https://files.pythonhosted.org/packages/"+
"b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/", "b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/",
@@ -169,15 +177,16 @@ func init() {
artifactsM[PythonIniConfig] = newViaPip( artifactsM[PythonIniConfig] = newViaPip(
"iniconfig", "iniconfig",
"a small and simple INI-file parser module", "a small and simple INI-file parser module",
"2.3.0", "none", "any", "2.3.0", "py3", "none", "any",
"SDgs4S5bXi77aVOeKTPv2TUrS3M9rduiK4DpU0hCmDsSBWqnZcWInq9lsx6INxut", "SDgs4S5bXi77aVOeKTPv2TUrS3M9rduiK4DpU0hCmDsSBWqnZcWInq9lsx6INxut",
"https://files.pythonhosted.org/packages/"+ "https://files.pythonhosted.org/packages/"+
"cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/", "cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/",
) )
artifactsM[PythonPyTest] = newViaPip( artifactsM[PythonPyTest] = newViaPip(
"pytest", "pytest",
"the pytest framework", "the pytest framework",
"9.0.2", "none", "any", "9.0.2", "py3", "none", "any",
"IM2wDbLke1EtZhF92zvAjUl_Hms1uKDtM7U8Dt4acOaChMnDg1pW7ib8U0wYGDLH", "IM2wDbLke1EtZhF92zvAjUl_Hms1uKDtM7U8Dt4acOaChMnDg1pW7ib8U0wYGDLH",
"https://files.pythonhosted.org/packages/"+ "https://files.pythonhosted.org/packages/"+
"3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/", "3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/",
@@ -186,4 +195,103 @@ func init() {
PythonPluggy, PythonPluggy,
PythonPygments, PythonPygments,
) )
artifactsM[PythonCfgv] = newViaPip(
"cfgv",
"validate configuration and produce human readable error messages",
"3.5.0", "py2.py3", "none", "any",
"yFKTyVRlmnLKAxvvge15kAd_GOP1Xh3fZ0NFImO5pBdD5e0zj3GRmA6Q1HdtLTYO",
"https://files.pythonhosted.org/packages/"+
"db/3c/33bac158f8ab7f89b2e59426d5fe2e4f63f7ed25df84c036890172b412b5/",
)
artifactsM[PythonIdentify] = newViaPip(
"identify",
"file identification library for Python",
"2.6.17", "py2.py3", "none", "any",
"9RxK3igO-Pxxof5AuCAGiF_L1SWi4SpuSF1fWNXCzE2D4oTRSob-9VpFMLlybrSv",
"https://files.pythonhosted.org/packages/"+
"40/66/71c1227dff78aaeb942fed29dd5651f2aec166cc7c9aeea3e8b26a539b7d/",
)
artifactsM[PythonNodeenv] = newViaPip(
"nodeenv",
"a tool to create isolated node.js environments",
"1.10.0", "py2.py3", "none", "any",
"ihUb4-WQXYIhYOOKSsXlKIzjzQieOYl6ojro9H-0DFzGheaRTtuyZgsCmriq58sq",
"https://files.pythonhosted.org/packages/"+
"88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/",
)
artifactsM[PythonPyYAML] = newViaPip(
"pyyaml",
"a complete YAML 1.1 parser",
"6.0.3", "cp314", "cp314", "musllinux_1_2_x86_64",
"4_jhCFpUNtyrFp2HOMqUisR005u90MHId53eS7rkUbcGXkoaJ7JRsY21dREHEfGN",
"https://files.pythonhosted.org/packages/"+
"d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/",
)
artifactsM[PythonDistlib] = newViaPip(
"distlib",
"used as the basis for third-party packaging tools",
"0.4.0", "py2.py3", "none", "any",
"lGLLfYVhUhXOTw_84zULaH2K8n6pk1OOVXmJfGavev7N42msbtHoq-XY5D_xULI_",
"https://files.pythonhosted.org/packages/"+
"33/6b/e0547afaf41bf2c42e52430072fa5658766e3d65bd4b03a563d1b6336f57/",
)
artifactsM[PythonFilelock] = newViaPip(
"filelock",
"a platform-independent file locking library for Python",
"3.25.0", "py3", "none", "any",
"0gSQIYNUEjOs1JBxXjGwfLnwFPFINwqyU_Zqgj7fT_EGafv_HaD5h3Xv2Rq_qQ44",
"https://files.pythonhosted.org/packages/"+
"f9/0b/de6f54d4a8bedfe8645c41497f3c18d749f0bd3218170c667bf4b81d0cdd/",
)
artifactsM[PythonPlatformdirs] = newViaPip(
"platformdirs",
"a Python package for determining platform-specific directories",
"4.9.4", "py3", "none", "any",
"JGNpMCX2JMn-7c9bk3QzOSNDgJRR_5lH-jIqfy0zXMZppRCdLsTNbdp4V7QFwxOI",
"https://files.pythonhosted.org/packages/"+
"63/d7/97f7e3a6abb67d8080dd406fd4df842c2be0efaf712d1c899c32a075027c/",
)
artifactsM[PythonDiscovery] = newViaPip(
"python_discovery",
"looks for a python installation",
"1.1.1", "py3", "none", "any",
"Jk_qGMfZYm0fdNOSvMdVQZuQbJlqu3NWRm7T2fRtiBXmHLQyOdJE3ypI_it1OJR0",
"https://files.pythonhosted.org/packages/"+
"75/0f/2bf7e3b5a4a65f623cb820feb5793e243fad58ae561015ee15a6152f67a2/",
PythonFilelock,
PythonPlatformdirs,
)
artifactsM[PythonVirtualenv] = newViaPip(
"virtualenv",
"a tool for creating isolated virtual python environments",
"21.1.0", "py3", "none", "any",
"SLvdr3gJZ7GTS-kiRyq2RvJdrQ8SZYC1pglbViWCMLCuAIcbLNjVEUJZ4hDtKUxm",
"https://files.pythonhosted.org/packages/"+
"78/55/896b06bf93a49bec0f4ae2a6f1ed12bd05c8860744ac3a70eda041064e4d/",
PythonDistlib,
PythonDiscovery,
)
artifactsM[PythonPreCommit] = newViaPip(
"pre_commit",
"a framework for managing and maintaining multi-language pre-commit hooks",
"4.5.1", "py2.py3", "none", "any",
"9G2Hv5JpvXFZVfw4pv_KAsmHD6bvot9Z0YBDmW6JeJizqTA4xEQCKel-pCERqQFK",
"https://files.pythonhosted.org/packages/"+
"5d/19/fd3ef348460c80af7bb4669ea7926651d1f95c23ff2df18b9d24bab4f3fa/",
PythonCfgv,
PythonIdentify,
PythonNodeenv,
PythonPyYAML,
PythonVirtualenv,
)
} }

View File

@@ -74,21 +74,16 @@ EOF
Bash, Bash,
Python, Python,
Ninja, Ninja,
Bzip2,
PkgConfig, PkgConfig,
Diffutils, Diffutils,
OpenSSL, OpenSSL,
Bzip2,
XZ, XZ,
Flex, Flex,
Bison, Bison,
M4, M4,
PCRE2,
Libffi,
Zlib,
GLib, GLib,
Zstd, Zstd,
DTC, DTC,
@@ -103,6 +98,11 @@ func init() {
Description: "a generic and open source machine emulator and virtualizer", Description: "a generic and open source machine emulator and virtualizer",
Website: "https://www.qemu.org/", Website: "https://www.qemu.org/",
Dependencies: P{
GLib,
Zstd,
},
ID: 13607, ID: 13607,
} }
} }

37
internal/rosa/rdfind.go Normal file
View File

@@ -0,0 +1,37 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newRdfind() (pkg.Artifact, string) {
const (
version = "1.8.0"
checksum = "PoaeJ2WIG6yyfe5VAYZlOdAQiR3mb3WhAUMj2ziTCx_IIEal4640HMJUb4SzU9U3"
)
return t.NewPackage("rdfind", version, pkg.NewHTTPGetTar(
nil, "https://rdfind.pauldreik.se/rdfind-"+version+".tar.gz",
mustDecode(checksum),
pkg.TarGzip,
), nil, &MakeHelper{
// test suite hard codes /bin/echo
ScriptCheckEarly: `
ln -s ../system/bin/toybox /bin/echo
`,
},
Nettle,
), version
}
func init() {
artifactsM[Rdfind] = Metadata{
f: Toolchain.newRdfind,
Name: "rdfind",
Description: "a program that finds duplicate files",
Website: "https://rdfind.pauldreik.se/",
Dependencies: P{
Nettle,
},
ID: 231641,
}
}

View File

@@ -8,6 +8,7 @@ import (
"slices" "slices"
"strconv" "strconv"
"strings" "strings"
"sync"
"hakurei.app/container/fhs" "hakurei.app/container/fhs"
"hakurei.app/internal/pkg" "hakurei.app/internal/pkg"
@@ -19,6 +20,9 @@ const (
// kindBusyboxBin is the kind of [pkg.Artifact] of busyboxBin. // kindBusyboxBin is the kind of [pkg.Artifact] of busyboxBin.
kindBusyboxBin kindBusyboxBin
// kindCollection is the kind of [Collect]. It never cures successfully.
kindCollection
) )
// mustDecode is like [pkg.MustDecode], but replaces the zero value and prints // mustDecode is like [pkg.MustDecode], but replaces the zero value and prints
@@ -454,6 +458,48 @@ type PackageAttr struct {
Flag int Flag int
} }
// pa holds whether a [PArtifact] is present.
type pa = [PresetEnd]bool
// paPool holds addresses of pa.
var paPool = sync.Pool{New: func() any { return new(pa) }}
// paGet returns the address of a new pa.
func paGet() *pa { return paPool.Get().(*pa) }
// paPut returns a pa to paPool.
func paPut(pv *pa) { *pv = pa{}; paPool.Put(pv) }
// appendPreset recursively appends a [PArtifact] and its runtime dependencies.
func (t Toolchain) appendPreset(
a []pkg.Artifact,
pv *pa, p PArtifact,
) []pkg.Artifact {
if pv[p] {
return a
}
pv[p] = true
for _, d := range GetMetadata(p).Dependencies {
a = t.appendPreset(a, pv, d)
}
return append(a, t.Load(p))
}
// AppendPresets recursively appends multiple [PArtifact] and their runtime
// dependencies.
func (t Toolchain) AppendPresets(
a []pkg.Artifact,
presets ...PArtifact,
) []pkg.Artifact {
pv := paGet()
for _, p := range presets {
a = t.appendPreset(a, pv, p)
}
paPut(pv)
return a
}
// NewPackage constructs a [pkg.Artifact] via a build system helper. // NewPackage constructs a [pkg.Artifact] via a build system helper.
func (t Toolchain) NewPackage( func (t Toolchain) NewPackage(
name, version string, name, version string,
@@ -486,12 +532,14 @@ func (t Toolchain) NewPackage(
extraRes := make([]pkg.Artifact, 0, dc) extraRes := make([]pkg.Artifact, 0, dc)
extraRes = append(extraRes, attr.NonStage0...) extraRes = append(extraRes, attr.NonStage0...)
if !t.isStage0() { if !t.isStage0() {
pv := paGet()
for _, p := range helper.extra(attr.Flag) { for _, p := range helper.extra(attr.Flag) {
extraRes = append(extraRes, t.Load(p)) extraRes = t.appendPreset(extraRes, pv, p)
} }
for _, p := range extra { for _, p := range extra {
extraRes = append(extraRes, t.Load(p)) extraRes = t.appendPreset(extraRes, pv, p)
} }
paPut(pv)
} }
var scriptEarly string var scriptEarly string
@@ -543,3 +591,29 @@ cd '/usr/src/` + name + `/'
})..., })...,
) )
} }
// Collected is returned by [Collect.Cure] to indicate a successful collection.
type Collected struct{}
// Error returns a constant string to satisfy error, but should never be seen
// by the user.
func (Collected) Error() string { return "artifacts successfully collected" }
// Collect implements [pkg.FloodArtifact] to concurrently cure multiple
// [pkg.Artifact]. It returns [Collected].
type Collect []pkg.Artifact
// Cure returns [Collected].
func (*Collect) Cure(*pkg.FContext) error { return Collected{} }
// Kind returns the hardcoded [pkg.Kind] value.
func (*Collect) Kind() pkg.Kind { return kindCollection }
// Params does not write anything, dependencies are already represented in the header.
func (*Collect) Params(*pkg.IContext) {}
// Dependencies returns [Collect] as is.
func (c *Collect) Dependencies() []pkg.Artifact { return *c }
// IsExclusive returns false: Cure is a noop.
func (*Collect) IsExclusive() bool { return false }

View File

@@ -48,6 +48,12 @@ func init() {
Description: "tools to create and extract Squashfs filesystems", Description: "tools to create and extract Squashfs filesystems",
Website: "https://github.com/plougher/squashfs-tools", Website: "https://github.com/plougher/squashfs-tools",
Dependencies: P{
Zstd,
Gzip,
Zlib,
},
ID: 4879, ID: 4879,
} }
} }

View File

@@ -15,6 +15,7 @@ func (t Toolchain) newStage0() (pkg.Artifact, string) {
runtimes, runtimes,
clang, clang,
t.Load(Zlib),
t.Load(Bzip2), t.Load(Bzip2),
t.Load(Patch), t.Load(Patch),

View File

@@ -8,13 +8,13 @@ import (
func (t Toolchain) newTamaGo() (pkg.Artifact, string) { func (t Toolchain) newTamaGo() (pkg.Artifact, string) {
const ( const (
version = "1.26.0" version = "1.26.1"
checksum = "5XkfbpTpSdPJfwtTfUegfdu4LUy8nuZ7sCondiRIxTJI9eQONi8z_O_dq9yDkjw8" checksum = "fimZnklQcYWGsTQU8KepLn-yCYaTfNdMI9DCg6NJVQv-3gOJnUEO9mqRCMAHnEXZ"
) )
return t.New("tamago-go"+version, 0, []pkg.Artifact{ return t.New("tamago-go"+version, 0, t.AppendPresets(nil,
t.Load(Bash), Bash,
t.Load(Go), Go,
}, nil, []string{ ), nil, []string{
"CC=cc", "CC=cc",
"GOCACHE=/tmp/gocache", "GOCACHE=/tmp/gocache",
}, ` }, `
@@ -44,5 +44,7 @@ func init() {
Name: "tamago", Name: "tamago",
Description: "a Go toolchain extended with support for bare metal execution", Description: "a Go toolchain extended with support for bare metal execution",
Website: "https://github.com/usbarmory/tamago-go", Website: "https://github.com/usbarmory/tamago-go",
ID: 388872,
} }
} }

View File

@@ -11,10 +11,10 @@ func (t Toolchain) newUnzip() (pkg.Artifact, string) {
version = "6.0" version = "6.0"
checksum = "fcqjB1IOVRNJ16K5gTGEDt3zCJDVBc7EDSra9w3H93stqkNwH1vaPQs_QGOpQZu1" checksum = "fcqjB1IOVRNJ16K5gTGEDt3zCJDVBc7EDSra9w3H93stqkNwH1vaPQs_QGOpQZu1"
) )
return t.New("unzip-"+version, 0, []pkg.Artifact{ return t.New("unzip-"+version, 0, t.AppendPresets(nil,
t.Load(Make), Make,
t.Load(Coreutils), Coreutils,
}, nil, nil, ` ), nil, nil, `
cd /usr/src/unzip/ cd /usr/src/unzip/
unix/configure unix/configure
make -f unix/Makefile generic1 make -f unix/Makefile generic1

View File

@@ -42,6 +42,12 @@ func init() {
Description: "core Wayland window system code and protocol", Description: "core Wayland window system code and protocol",
Website: "https://wayland.freedesktop.org/", Website: "https://wayland.freedesktop.org/",
Dependencies: P{
Libffi,
Libexpat,
Libxml2,
},
ID: 10061, ID: 10061,
} }
} }
@@ -112,9 +118,6 @@ GitLab
}, },
}, (*MesonHelper)(nil), }, (*MesonHelper)(nil),
Wayland, Wayland,
Libffi,
Libexpat,
Libxml2,
), version ), version
} }
func init() { func init() {

View File

@@ -40,9 +40,6 @@ func (t Toolchain) newXproto() (pkg.Artifact, string) {
// ancient configure script // ancient configure script
Generate: "autoreconf -if", Generate: "autoreconf -if",
}, },
M4,
Perl,
Autoconf,
Automake, Automake,
PkgConfig, PkgConfig,
@@ -75,9 +72,6 @@ func (t Toolchain) newLibXau() (pkg.Artifact, string) {
// ancient configure script // ancient configure script
Generate: "autoreconf -if", Generate: "autoreconf -if",
}, },
M4,
Perl,
Autoconf,
Automake, Automake,
Libtool, Libtool,
PkgConfig, PkgConfig,
@@ -94,6 +88,10 @@ func init() {
Description: "functions for handling Xauthority files and entries", Description: "functions for handling Xauthority files and entries",
Website: "https://gitlab.freedesktop.org/xorg/lib/libxau", Website: "https://gitlab.freedesktop.org/xorg/lib/libxau",
Dependencies: P{
Xproto,
},
ID: 1765, ID: 1765,
} }
} }

View File

@@ -41,7 +41,6 @@ func (t Toolchain) newXCB() (pkg.Artifact, string) {
PkgConfig, PkgConfig,
XCBProto, XCBProto,
Xproto,
LibXau, LibXau,
), version ), version
} }
@@ -53,6 +52,11 @@ func init() {
Description: "The X protocol C-language Binding", Description: "The X protocol C-language Binding",
Website: "https://xcb.freedesktop.org/", Website: "https://xcb.freedesktop.org/",
Dependencies: P{
XCBProto,
LibXau,
},
ID: 1767, ID: 1767,
} }
} }

View File

@@ -4,22 +4,28 @@ import "hakurei.app/internal/pkg"
func (t Toolchain) newZlib() (pkg.Artifact, string) { func (t Toolchain) newZlib() (pkg.Artifact, string) {
const ( const (
version = "1.3.1" version = "1.3.2"
checksum = "E-eIpNzE8oJ5DsqH4UuA_0GDKuQF5csqI8ooDx2w7Vx-woJ2mb-YtSbEyIMN44mH" checksum = "KHZrePe42vL2XvOUE3KlJkp1UgWhWkl0jjT_BOvFhuM4GzieEH9S7CioepOFVGYB"
) )
return t.NewPackage("zlib", version, pkg.NewHTTPGetTar( return t.NewPackage("zlib", version, pkg.NewHTTPGetTar(
nil, "https://www.zlib.net/fossils/zlib-"+version+".tar.gz", nil, "https://www.zlib.net/fossils/zlib-"+version+".tar.gz",
mustDecode(checksum), mustDecode(checksum),
pkg.TarGzip, pkg.TarGzip,
), &PackageAttr{ ), nil, &CMakeHelper{
Env: []string{ Cache: [][2]string{
"CC=clang -fPIC", {"CMAKE_BUILD_TYPE", "Release"},
},
}, &MakeHelper{
OmitDefaults: true,
Host: `""`, {"ZLIB_BUILD_TESTING", "OFF"},
Build: `""`, {"ZLIB_BUILD_SHARED", "ON"},
{"ZLIB_BUILD_STATIC", "ON"},
{"ZLIB_BUILD_MINIZIP", "OFF"},
{"ZLIB_INSTALL", "ON"},
{"ZLIB_PREFIX", "OFF"},
},
// ninja dependency loop
Make: true,
}), version }), version
} }
func init() { func init() {

View File

@@ -16,7 +16,6 @@ func (t Toolchain) newZstd() (pkg.Artifact, string) {
Append: []string{"build", "cmake"}, Append: []string{"build", "cmake"},
Cache: [][2]string{ Cache: [][2]string{
{"CMAKE_BUILD_TYPE", "Release"}, {"CMAKE_BUILD_TYPE", "Release"},
{"CMAKE_INSTALL_LIBDIR", "lib"},
}, },
}), version }), version
} }

View File

@@ -139,6 +139,8 @@ in
inherit (app) identity groups enablements; inherit (app) identity groups enablements;
inherit (dbusConfig) session_bus system_bus; inherit (dbusConfig) session_bus system_bus;
direct_wayland = app.insecureWayland; direct_wayland = app.insecureWayland;
sched_policy = app.schedPolicy;
sched_priority = app.schedPriority;
container = { container = {
inherit (app) inherit (app)

View File

@@ -98,6 +98,7 @@ in
ints ints
str str
bool bool
enum
package package
anything anything
submodule submodule
@@ -237,6 +238,29 @@ in
}; };
hostAbstract = mkEnableOption "share abstract unix socket scope"; hostAbstract = mkEnableOption "share abstract unix socket scope";
schedPolicy = mkOption {
type = nullOr (enum [
"fifo"
"rr"
"batch"
"idle"
"deadline"
"ext"
]);
default = null;
description = ''
Scheduling policy to set for the container.
The zero value retains the current scheduling policy.
'';
};
schedPriority = mkOption {
type = nullOr (ints.between 1 99);
default = null;
description = ''
Scheduling priority to set for the container.
'';
};
nix = mkEnableOption "nix daemon access"; nix = mkEnableOption "nix daemon access";
mapRealUid = mkEnableOption "mapping to priv-user uid"; mapRealUid = mkEnableOption "mapping to priv-user uid";
device = mkEnableOption "access to all devices"; device = mkEnableOption "access to all devices";

View File

@@ -1,7 +1,7 @@
{ {
lib, lib,
stdenv, stdenv,
buildGoModule, buildGo126Module,
makeBinaryWrapper, makeBinaryWrapper,
xdg-dbus-proxy, xdg-dbus-proxy,
pkg-config, pkg-config,
@@ -17,7 +17,7 @@
fuse3, fuse3,
# for passthru.buildInputs # for passthru.buildInputs
go, go_1_26,
clang, clang,
# for check # for check
@@ -28,7 +28,7 @@
withStatic ? stdenv.hostPlatform.isStatic, withStatic ? stdenv.hostPlatform.isStatic,
}: }:
buildGoModule rec { buildGo126Module rec {
pname = "hakurei"; pname = "hakurei";
version = "0.3.6"; version = "0.3.6";
@@ -51,7 +51,7 @@ buildGoModule rec {
]; ];
nativeBuildInputs = [ nativeBuildInputs = [
go go_1_26
pkg-config pkg-config
wayland-scanner wayland-scanner
]; ];
@@ -125,8 +125,11 @@ buildGoModule rec {
--inherit-argv0 --prefix PATH : ${lib.makeBinPath appPackages} --inherit-argv0 --prefix PATH : ${lib.makeBinPath appPackages}
''; '';
passthru.targetPkgs = [ passthru = {
go go = go_1_26;
targetPkgs = [
go_1_26
clang clang
xorg.xorgproto xorg.xorgproto
util-linux util-linux
@@ -137,4 +140,5 @@ buildGoModule rec {
] ]
++ buildInputs ++ buildInputs
++ nativeBuildInputs; ++ nativeBuildInputs;
};
} }

View File

@@ -28,6 +28,15 @@
# Automatically login on tty1 as a normal user: # Automatically login on tty1 as a normal user:
services.getty.autologinUser = "alice"; services.getty.autologinUser = "alice";
security.pam.loginLimits = [
{
domain = "@users";
item = "rtprio";
type = "-";
value = 1;
}
];
environment = { environment = {
systemPackages = with pkgs; [ systemPackages = with pkgs; [
# For D-Bus tests: # For D-Bus tests:

View File

@@ -34,7 +34,7 @@ testers.nixosTest {
(writeShellScriptBin "hakurei-test" '' (writeShellScriptBin "hakurei-test" ''
# Assert hst CGO_ENABLED=0: ${ # Assert hst CGO_ENABLED=0: ${
with pkgs; with pkgs;
runCommand "hakurei-hst-cgo" { nativeBuildInputs = [ go ]; } '' runCommand "hakurei-hst-cgo" { nativeBuildInputs = [ self.packages.${system}.hakurei.go ]; } ''
cp -r ${options.environment.hakurei.package.default.src} "$out" cp -r ${options.environment.hakurei.package.default.src} "$out"
chmod -R +w "$out" chmod -R +w "$out"
cp ${writeText "hst_cgo_test.go" ''package hakurei_test;import("testing";"hakurei.app/hst");func TestTemplate(t *testing.T){hst.Template()}''} "$out/hst_cgo_test.go" cp ${writeText "hst_cgo_test.go" ''package hakurei_test;import("testing";"hakurei.app/hst");func TestTemplate(t *testing.T){hst.Template()}''} "$out/hst_cgo_test.go"

View File

@@ -23,6 +23,14 @@
security = { security = {
sudo.wheelNeedsPassword = false; sudo.wheelNeedsPassword = false;
rtkit.enable = true; rtkit.enable = true;
pam.loginLimits = [
{
domain = "@users";
item = "rtprio";
type = "-";
value = 1;
}
];
}; };
services = { services = {

Some files were not shown because too many files have changed in this diff Show More