Compare commits
256 Commits
v0.3.5
...
pkgserver-
| Author | SHA1 | Date | |
|---|---|---|---|
|
6f78444b11
|
|||
|
2a3f6f5384
|
|||
|
ef8663461b
|
|||
|
0b3be27b9a
|
|||
|
61a25c88ae
|
|||
| c7e195fe64 | |||
| d5db9add98 | |||
| ab8abdc82b | |||
| 770fd46510 | |||
| 99f1c6aab4 | |||
| 9ee629d402 | |||
| f475dde8b9 | |||
| c43a0c41b6 | |||
| 55827f1a85 | |||
| 721bdddfa1 | |||
| fb18e599dd | |||
| ec9005c794 | |||
| c6d35b4003 | |||
| 6401533cc2 | |||
| 5d6c401beb | |||
| 0a2d6aec14 | |||
| 67b11335d6 | |||
| ef3bd1b60a | |||
| beae7c89db | |||
| ed26d1a1c2 | |||
| faa0006d47 | |||
| 796ddbc977 | |||
| 98ab020160 | |||
| 26a346036d | |||
| 4ac9c72132 | |||
| c39c07d440 | |||
| b3fa0fe271 | |||
| 92a90582bb | |||
| 2e5ac56bdf | |||
| 75133e0234 | |||
| c120d4de4f | |||
| d6af8edb4a | |||
| da25d609d5 | |||
| 95ceed0de0 | |||
|
74c213264a
|
|||
|
345cffddc2
|
|||
|
49163758c8
|
|||
|
ad22c15fb1
|
|||
|
9c774f7e0a
|
|||
|
707f0a349f
|
|||
|
7c35be066a
|
|||
|
f91d55fa5e
|
|||
|
5862cc1966
|
|||
|
b3f0360a05
|
|||
|
8938994036
|
|||
|
96d382f805
|
|||
|
5c785c135c
|
|||
|
0130f8ea6d
|
|||
|
faac5c4a83
|
|||
|
620062cca9
|
|||
|
196b200d0f
|
|||
|
04e6bc3c5c
|
|||
|
5c540f90aa
|
|||
|
1e8ac5f68e
|
|||
|
fd515badff
|
|||
|
330a344845
|
|||
|
48cdf8bf85
|
|||
|
7fb42ba49d
|
|||
|
19a2737148
|
|||
|
baf2def9cc
|
|||
|
242e042cb9
|
|||
|
6988c9c4db
|
|||
|
d6e0ed8c76
|
|||
|
53be3309c5
|
|||
|
644dd18a52
|
|||
|
27c6f976df
|
|||
|
279a973633
|
|||
|
9c1b522689
|
|||
|
5c8cd46c02
|
|||
|
2dba550a2b
|
|||
|
8c64812b34
|
|||
|
d1423d980d
|
|||
|
104da0f66a
|
|||
|
d996d9fbb7
|
|||
|
469f97ccc1
|
|||
|
af7a6180a1
|
|||
|
03b5c0e20a
|
|||
|
6a31fb4fa3
|
|||
|
bae45363bc
|
|||
|
2c17d1abe0
|
|||
|
0aa459d1a9
|
|||
|
00053e6287
|
|||
|
3a0c020150
|
|||
|
78655f159e
|
|||
|
30bb52e380
|
|||
|
66197ebdb2
|
|||
|
f7a2744025
|
|||
|
f16b7bfaf0
|
|||
|
6228cda7ad
|
|||
|
86c336de88
|
|||
|
ba5d882ef2
|
|||
|
1e0d68a29e
|
|||
|
80f2367c16
|
|||
|
5ea4dae4b8
|
|||
|
eb1a3918a8
|
|||
|
349011a5e6
|
|||
|
861249751a
|
|||
|
e3445c2a7e
|
|||
|
7315e64a8a
|
|||
|
7d74454f6d
|
|||
|
96956c849a
|
|||
|
aabdcbba1c
|
|||
|
38cc4a6429
|
|||
|
27ef7f81fa
|
|||
|
f7888074b9
|
|||
|
95ffe0429c
|
|||
|
16d0cf04c1
|
|||
|
6a2b32b48c
|
|||
|
c1472fc54d
|
|||
|
179cf07e48
|
|||
|
c2d2795e2b
|
|||
|
2c1d7edd7a
|
|||
|
1ee8d09223
|
|||
|
7f01cb3d59
|
|||
|
65ae4f57c2
|
|||
|
77110601cc
|
|||
|
c5b1949430
|
|||
|
17805cdfa8
|
|||
|
9c9befb4c9
|
|||
|
fcdf9ecee4
|
|||
|
fbd97b658f
|
|||
|
c93725ac58
|
|||
|
f14ab80253
|
|||
|
9989881dd9
|
|||
|
a36b3ece16
|
|||
|
75970a5650
|
|||
|
572c99825d
|
|||
|
ebdf9dcecc
|
|||
|
8ea2a56d5b
|
|||
|
159a45c027
|
|||
|
0eb2bfa12e
|
|||
|
e19a98244a
|
|||
|
7e2f13fa1b
|
|||
|
97448e2104
|
|||
|
a87ad28b8b
|
|||
|
883d4ee4af
|
|||
|
d2c6d486b0
|
|||
|
6fdd800b2b
|
|||
|
94e3debc63
|
|||
|
ea87664a75
|
|||
|
04d9984da0
|
|||
|
145ccd1c92
|
|||
|
c5089cad78
|
|||
|
c83905f311
|
|||
|
b7cc14f296
|
|||
|
57e1e5141d
|
|||
|
1440195c3f
|
|||
|
cc60e0d15d
|
|||
|
9deaf853f0
|
|||
|
2baa9df133
|
|||
|
51d3df2419
|
|||
|
1d0fcf3a75
|
|||
|
e92971e0c2
|
|||
|
6159c74e96
|
|||
|
2a34a269d0
|
|||
|
ef130adb27
|
|||
|
5694e528e6
|
|||
|
b4e82e68a7
|
|||
|
d041fee791
|
|||
|
cefd02e960
|
|||
|
ad8f799703
|
|||
|
c74c269b66
|
|||
|
4b0cce4db5
|
|||
|
cd9b534d6b
|
|||
|
84e6922f30
|
|||
|
c16725a679
|
|||
|
a6160cd410
|
|||
|
826347fe1f
|
|||
|
085eaed7ba
|
|||
|
37d368a7f9
|
|||
|
2aeac7f582
|
|||
|
2b93631f52
|
|||
|
b3749aaf0b
|
|||
|
c8bb88cced
|
|||
|
f7f80f95b9
|
|||
|
6ea6c794fb
|
|||
|
6c2da4c4b2
|
|||
|
90f915a708
|
|||
|
a5fea4686e
|
|||
|
ae8c365c0f
|
|||
|
485db515f7
|
|||
|
ec7ee0789e
|
|||
|
42c93a57a4
|
|||
|
b1b14810ac
|
|||
|
de117ef365
|
|||
|
5e4bf23e0c
|
|||
|
d4519e2075
|
|||
|
7f1e4cf43c
|
|||
|
d021621fba
|
|||
|
56567307ec
|
|||
|
0264a1ef09
|
|||
|
0123bbee3d
|
|||
|
771adad603
|
|||
|
178305cb22
|
|||
|
c2456e252c
|
|||
|
273068b90c
|
|||
|
16b20e1d34
|
|||
|
b983917a6e
|
|||
|
e1b8f40add
|
|||
|
6df0d37c5a
|
|||
|
1619b06541
|
|||
|
e335d99c6b
|
|||
|
d888d09b6d
|
|||
|
54176e7315
|
|||
|
3bfe99d3d8
|
|||
|
149dfbb6af
|
|||
|
58801b44d4
|
|||
|
e065bbf792
|
|||
|
a883e57e7d
|
|||
|
ef9bd8ecbf
|
|||
|
a40527dcb2
|
|||
|
88d9a6163e
|
|||
|
47860b0387
|
|||
|
50c9da8b6d
|
|||
|
16966043c7
|
|||
|
a3515a6ef5
|
|||
|
7f05baab28
|
|||
|
d4d5e631ae
|
|||
|
1df3bcc3b9
|
|||
|
1809b53e52
|
|||
|
67b2914c94
|
|||
|
74dee11822
|
|||
|
a58c9258cc
|
|||
|
710b164c91
|
|||
|
93911d6015
|
|||
|
bb097536d4
|
|||
|
49b6526a38
|
|||
|
f9c31df94d
|
|||
|
4f570cc5c9
|
|||
|
5828631e79
|
|||
|
4f9f4875d7
|
|||
|
d49e654482
|
|||
|
b746e352e5
|
|||
|
c620d88dce
|
|||
|
7cd14b8865
|
|||
|
3e18a4b397
|
|||
|
1791b604b5
|
|||
|
59ff6db7ec
|
|||
|
430e099556
|
|||
|
17b64bb42c
|
|||
|
dbb89dfb0f
|
|||
|
de06ea2be4
|
|||
|
1ef7bedfb5
|
|||
|
05a828c474
|
|||
|
0061d11f93
|
|||
|
fb101a02f2
|
|||
|
3dbd67d113
|
|||
|
f511f0a9e9
|
|||
|
47995137b3
|
|||
|
e1b8607101
|
|||
|
3d3bd45b95
|
@@ -89,23 +89,6 @@ jobs:
|
|||||||
path: result/*
|
path: result/*
|
||||||
retention-days: 1
|
retention-days: 1
|
||||||
|
|
||||||
hpkg:
|
|
||||||
name: Hpkg
|
|
||||||
runs-on: nix
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Run NixOS test
|
|
||||||
run: nix build --out-link "result" --print-out-paths --print-build-logs .#checks.x86_64-linux.hpkg
|
|
||||||
|
|
||||||
- name: Upload test output
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: "hpkg-vm-output"
|
|
||||||
path: result/*
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
check:
|
check:
|
||||||
name: Flake checks
|
name: Flake checks
|
||||||
needs:
|
needs:
|
||||||
@@ -114,7 +97,6 @@ jobs:
|
|||||||
- sandbox
|
- sandbox
|
||||||
- sandbox-race
|
- sandbox-race
|
||||||
- sharefs
|
- sharefs
|
||||||
- hpkg
|
|
||||||
runs-on: nix
|
runs-on: nix
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
|
|||||||
5
.github/workflows/README
vendored
5
.github/workflows/README
vendored
@@ -1,5 +0,0 @@
|
|||||||
DO NOT ADD NEW ACTIONS HERE
|
|
||||||
|
|
||||||
This port is solely for releasing to the github mirror and serves no purpose during development.
|
|
||||||
All development happens at https://git.gensokyo.uk/security/hakurei. If you wish to contribute,
|
|
||||||
request for an account on git.gensokyo.uk.
|
|
||||||
46
.github/workflows/release.yml
vendored
46
.github/workflows/release.yml
vendored
@@ -1,46 +0,0 @@
|
|||||||
name: Release
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- 'v*'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
release:
|
|
||||||
name: Create release
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
packages: write
|
|
||||||
contents: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Install Nix
|
|
||||||
uses: nixbuild/nix-quick-install-action@v32
|
|
||||||
with:
|
|
||||||
nix_conf: |
|
|
||||||
keep-env-derivations = true
|
|
||||||
keep-outputs = true
|
|
||||||
|
|
||||||
- name: Restore and cache Nix store
|
|
||||||
uses: nix-community/cache-nix-action@v6
|
|
||||||
with:
|
|
||||||
primary-key: build-${{ runner.os }}-${{ hashFiles('**/*.nix') }}
|
|
||||||
restore-prefixes-first-match: build-${{ runner.os }}-
|
|
||||||
gc-max-store-size-linux: 1G
|
|
||||||
purge: true
|
|
||||||
purge-prefixes: build-${{ runner.os }}-
|
|
||||||
purge-created: 60
|
|
||||||
purge-primary-key: never
|
|
||||||
|
|
||||||
- name: Build for release
|
|
||||||
run: nix build --print-out-paths --print-build-logs .#dist
|
|
||||||
|
|
||||||
- name: Release
|
|
||||||
uses: softprops/action-gh-release@v2
|
|
||||||
with:
|
|
||||||
files: |-
|
|
||||||
result/hakurei-**
|
|
||||||
48
.github/workflows/test.yml
vendored
48
.github/workflows/test.yml
vendored
@@ -1,48 +0,0 @@
|
|||||||
name: Test
|
|
||||||
|
|
||||||
on:
|
|
||||||
- push
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
dist:
|
|
||||||
name: Create distribution
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
actions: write
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Install Nix
|
|
||||||
uses: nixbuild/nix-quick-install-action@v32
|
|
||||||
with:
|
|
||||||
nix_conf: |
|
|
||||||
keep-env-derivations = true
|
|
||||||
keep-outputs = true
|
|
||||||
|
|
||||||
- name: Restore and cache Nix store
|
|
||||||
uses: nix-community/cache-nix-action@v6
|
|
||||||
with:
|
|
||||||
primary-key: build-${{ runner.os }}-${{ hashFiles('**/*.nix') }}
|
|
||||||
restore-prefixes-first-match: build-${{ runner.os }}-
|
|
||||||
gc-max-store-size-linux: 1G
|
|
||||||
purge: true
|
|
||||||
purge-prefixes: build-${{ runner.os }}-
|
|
||||||
purge-created: 60
|
|
||||||
purge-primary-key: never
|
|
||||||
|
|
||||||
- name: Build for test
|
|
||||||
id: build-test
|
|
||||||
run: >-
|
|
||||||
export HAKUREI_REV="$(git rev-parse --short HEAD)" &&
|
|
||||||
sed -i.old 's/version = /version = "0.0.0-'$HAKUREI_REV'"; # version = /' package.nix &&
|
|
||||||
nix build --print-out-paths --print-build-logs .#dist &&
|
|
||||||
mv package.nix.old package.nix &&
|
|
||||||
echo "rev=$HAKUREI_REV" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Upload test build
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: "hakurei-${{ steps.build-test.outputs.rev }}"
|
|
||||||
path: result/*
|
|
||||||
retention-days: 1
|
|
||||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -27,7 +27,12 @@ go.work.sum
|
|||||||
|
|
||||||
# go generate
|
# go generate
|
||||||
/cmd/hakurei/LICENSE
|
/cmd/hakurei/LICENSE
|
||||||
|
/cmd/pkgserver/.sass-cache
|
||||||
|
/cmd/pkgserver/ui/static/*.js
|
||||||
|
/cmd/pkgserver/ui/static/*.css*
|
||||||
|
/cmd/pkgserver/ui/static/*.css.map
|
||||||
/internal/pkg/testdata/testtool
|
/internal/pkg/testdata/testtool
|
||||||
|
/internal/rosa/hakurei_current.tar.gz
|
||||||
|
|
||||||
# release
|
# release
|
||||||
/dist/hakurei-*
|
/dist/hakurei-*
|
||||||
|
|||||||
181
README.md
181
README.md
@@ -15,164 +15,51 @@
|
|||||||
<a href="https://hakurei.app"><img src="https://img.shields.io/website?url=https%3A%2F%2Fhakurei.app" alt="Website" /></a>
|
<a href="https://hakurei.app"><img src="https://img.shields.io/website?url=https%3A%2F%2Fhakurei.app" alt="Website" /></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
Hakurei is a tool for running sandboxed graphical applications as dedicated subordinate users on the Linux kernel.
|
Hakurei is a tool for running sandboxed desktop applications as dedicated
|
||||||
It implements the application container of [planterette (WIP)](https://git.gensokyo.uk/security/planterette),
|
subordinate users on the Linux kernel. It implements the application container
|
||||||
a self-contained Android-like package manager with modern security features.
|
of [planterette (WIP)](https://git.gensokyo.uk/security/planterette), a
|
||||||
|
self-contained Android-like package manager with modern security features.
|
||||||
|
|
||||||
## NixOS Module usage
|
Interaction with hakurei happens entirely through structures described by
|
||||||
|
package [hst](https://pkg.go.dev/hakurei.app/hst). No native API is available
|
||||||
|
due to internal details of uid isolation.
|
||||||
|
|
||||||
The NixOS module currently requires home-manager to configure subordinate users. Full module documentation can be found [here](options.md).
|
## Notable Packages
|
||||||
|
|
||||||
To use the module, import it into your configuration with
|
Package [container](https://pkg.go.dev/hakurei.app/container) is general purpose
|
||||||
|
container tooling. It is used by the hakurei shim process running as the target
|
||||||
|
subordinate user to set up the application container. It has a single dependency,
|
||||||
|
[libseccomp](https://github.com/seccomp/libseccomp), to create BPF programs
|
||||||
|
for the [system call filter](https://www.kernel.org/doc/html/latest/userspace-api/seccomp_filter.html).
|
||||||
|
|
||||||
```nix
|
Package [internal/pkg](https://pkg.go.dev/hakurei.app/internal/pkg) provides
|
||||||
{
|
infrastructure for hermetic builds. This replaces the legacy nix-based testing
|
||||||
inputs = {
|
framework and serves as the build system of Rosa OS, currently developed under
|
||||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
package [internal/rosa](https://pkg.go.dev/hakurei.app/internal/rosa).
|
||||||
|
|
||||||
hakurei = {
|
## Dependencies
|
||||||
url = "git+https://git.gensokyo.uk/security/hakurei";
|
|
||||||
|
|
||||||
# Optional but recommended to limit the size of your system closure.
|
`container` depends on:
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
outputs = { self, nixpkgs, hakurei, ... }:
|
- [libseccomp](https://github.com/seccomp/libseccomp) to generate BPF programs.
|
||||||
{
|
|
||||||
nixosConfigurations.hakurei = nixpkgs.lib.nixosSystem {
|
|
||||||
system = "x86_64-linux";
|
|
||||||
modules = [
|
|
||||||
hakurei.nixosModules.hakurei
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This adds the `environment.hakurei` option:
|
`cmd/hakurei` depends on:
|
||||||
|
|
||||||
```nix
|
- [acl](https://savannah.nongnu.org/projects/acl/) to export sockets to
|
||||||
{ pkgs, ... }:
|
subordinate users.
|
||||||
|
- [wayland](https://gitlab.freedesktop.org/wayland/wayland) to set up
|
||||||
|
[security-context-v1](https://wayland.app/protocols/security-context-v1).
|
||||||
|
- [xcb](https://xcb.freedesktop.org/) to grant and revoke subordinate users
|
||||||
|
access to the X server.
|
||||||
|
|
||||||
{
|
`cmd/sharefs` depends on:
|
||||||
environment.hakurei = {
|
|
||||||
enable = true;
|
|
||||||
stateDir = "/var/lib/hakurei";
|
|
||||||
users = {
|
|
||||||
alice = 0;
|
|
||||||
nixos = 10;
|
|
||||||
};
|
|
||||||
|
|
||||||
commonPaths = [
|
- [fuse](https://github.com/libfuse/libfuse) to implement the filesystem.
|
||||||
{
|
|
||||||
src = "/sdcard";
|
|
||||||
write = true;
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
extraHomeConfig = {
|
New dependencies will generally not be added. Patches adding new dependencies
|
||||||
home.stateVersion = "23.05";
|
are very likely to be rejected.
|
||||||
};
|
|
||||||
|
|
||||||
apps = {
|
## NixOS Module (deprecated)
|
||||||
"org.chromium.Chromium" = {
|
|
||||||
name = "chromium";
|
|
||||||
identity = 1;
|
|
||||||
packages = [ pkgs.chromium ];
|
|
||||||
userns = true;
|
|
||||||
mapRealUid = true;
|
|
||||||
dbus = {
|
|
||||||
system = {
|
|
||||||
filter = true;
|
|
||||||
talk = [
|
|
||||||
"org.bluez"
|
|
||||||
"org.freedesktop.Avahi"
|
|
||||||
"org.freedesktop.UPower"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
session =
|
|
||||||
f:
|
|
||||||
f {
|
|
||||||
talk = [
|
|
||||||
"org.freedesktop.FileManager1"
|
|
||||||
"org.freedesktop.Notifications"
|
|
||||||
"org.freedesktop.ScreenSaver"
|
|
||||||
"org.freedesktop.secrets"
|
|
||||||
"org.kde.kwalletd5"
|
|
||||||
"org.kde.kwalletd6"
|
|
||||||
];
|
|
||||||
own = [
|
|
||||||
"org.chromium.Chromium.*"
|
|
||||||
"org.mpris.MediaPlayer2.org.chromium.Chromium.*"
|
|
||||||
"org.mpris.MediaPlayer2.chromium.*"
|
|
||||||
];
|
|
||||||
call = { };
|
|
||||||
broadcast = { };
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
"org.claws_mail.Claws-Mail" = {
|
The NixOS module is in maintenance mode and will be removed once planterette is
|
||||||
name = "claws-mail";
|
feature-complete. Full module documentation can be found [here](options.md).
|
||||||
identity = 2;
|
|
||||||
packages = [ pkgs.claws-mail ];
|
|
||||||
gpu = false;
|
|
||||||
capability.pulse = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
"org.weechat" = {
|
|
||||||
name = "weechat";
|
|
||||||
identity = 3;
|
|
||||||
shareUid = true;
|
|
||||||
packages = [ pkgs.weechat ];
|
|
||||||
capability = {
|
|
||||||
wayland = false;
|
|
||||||
x11 = false;
|
|
||||||
dbus = true;
|
|
||||||
pulse = false;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
"dev.vencord.Vesktop" = {
|
|
||||||
name = "discord";
|
|
||||||
identity = 3;
|
|
||||||
shareUid = true;
|
|
||||||
packages = [ pkgs.vesktop ];
|
|
||||||
share = pkgs.vesktop;
|
|
||||||
command = "vesktop --ozone-platform-hint=wayland";
|
|
||||||
userns = true;
|
|
||||||
mapRealUid = true;
|
|
||||||
capability.x11 = true;
|
|
||||||
dbus = {
|
|
||||||
session =
|
|
||||||
f:
|
|
||||||
f {
|
|
||||||
talk = [ "org.kde.StatusNotifierWatcher" ];
|
|
||||||
own = [ ];
|
|
||||||
call = { };
|
|
||||||
broadcast = { };
|
|
||||||
};
|
|
||||||
system.filter = true;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
"io.looking-glass" = {
|
|
||||||
name = "looking-glass-client";
|
|
||||||
identity = 4;
|
|
||||||
useCommonPaths = false;
|
|
||||||
groups = [ "plugdev" ];
|
|
||||||
extraPaths = [
|
|
||||||
{
|
|
||||||
src = "/dev/shm/looking-glass";
|
|
||||||
write = true;
|
|
||||||
}
|
|
||||||
];
|
|
||||||
extraConfig = {
|
|
||||||
programs.looking-glass-client.enable = true;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
```
|
|
||||||
58
cmd/earlyinit/main.go
Normal file
58
cmd/earlyinit/main.go
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
. "syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
runtime.LockOSThread()
|
||||||
|
log.SetFlags(0)
|
||||||
|
log.SetPrefix("earlyinit: ")
|
||||||
|
|
||||||
|
if err := Mount(
|
||||||
|
"devtmpfs",
|
||||||
|
"/dev/",
|
||||||
|
"devtmpfs",
|
||||||
|
MS_NOSUID|MS_NOEXEC,
|
||||||
|
"",
|
||||||
|
); err != nil {
|
||||||
|
log.Fatalf("cannot mount devtmpfs: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The kernel might be unable to set up the console. When that happens,
|
||||||
|
// printk is called with "Warning: unable to open an initial console."
|
||||||
|
// and the init runs with no files. The checkfds runtime function
|
||||||
|
// populates 0-2 by opening /dev/null for them.
|
||||||
|
//
|
||||||
|
// This check replaces 1 and 2 with /dev/kmsg to improve the chance
|
||||||
|
// of output being visible to the user.
|
||||||
|
if fi, err := os.Stdout.Stat(); err == nil {
|
||||||
|
if stat, ok := fi.Sys().(*Stat_t); ok {
|
||||||
|
if stat.Rdev == 0x103 {
|
||||||
|
var fd int
|
||||||
|
if fd, err = Open(
|
||||||
|
"/dev/kmsg",
|
||||||
|
O_WRONLY|O_CLOEXEC,
|
||||||
|
0,
|
||||||
|
); err != nil {
|
||||||
|
log.Fatalf("cannot open kmsg: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = Dup3(fd, Stdout, 0); err != nil {
|
||||||
|
log.Fatalf("cannot open stdout: %v", err)
|
||||||
|
}
|
||||||
|
if err = Dup3(fd, Stderr, 0); err != nil {
|
||||||
|
log.Fatalf("cannot open stderr: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = Close(fd); err != nil {
|
||||||
|
log.Printf("cannot close kmsg: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
@@ -16,6 +16,7 @@ import (
|
|||||||
"hakurei.app/command"
|
"hakurei.app/command"
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/container/fhs"
|
"hakurei.app/container/fhs"
|
||||||
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/hst"
|
"hakurei.app/hst"
|
||||||
"hakurei.app/internal/dbus"
|
"hakurei.app/internal/dbus"
|
||||||
"hakurei.app/internal/env"
|
"hakurei.app/internal/env"
|
||||||
@@ -89,6 +90,9 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
flagHomeDir string
|
flagHomeDir string
|
||||||
flagUserName string
|
flagUserName string
|
||||||
|
|
||||||
|
flagSchedPolicy string
|
||||||
|
flagSchedPriority int
|
||||||
|
|
||||||
flagPrivateRuntime, flagPrivateTmpdir bool
|
flagPrivateRuntime, flagPrivateTmpdir bool
|
||||||
|
|
||||||
flagWayland, flagX11, flagDBus, flagPipeWire, flagPulse bool
|
flagWayland, flagX11, flagDBus, flagPipeWire, flagPulse bool
|
||||||
@@ -131,7 +135,7 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
log.Fatal(optionalErrorUnwrap(err))
|
log.Fatal(optionalErrorUnwrap(err))
|
||||||
return err
|
return err
|
||||||
} else if progPath, err = check.NewAbs(p); err != nil {
|
} else if progPath, err = check.NewAbs(p); err != nil {
|
||||||
log.Fatal(err.Error())
|
log.Fatal(err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -150,7 +154,7 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
et |= hst.EPipeWire
|
et |= hst.EPipeWire
|
||||||
}
|
}
|
||||||
|
|
||||||
config := &hst.Config{
|
config := hst.Config{
|
||||||
ID: flagID,
|
ID: flagID,
|
||||||
Identity: flagIdentity,
|
Identity: flagIdentity,
|
||||||
Groups: flagGroups,
|
Groups: flagGroups,
|
||||||
@@ -177,6 +181,13 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := config.SchedPolicy.UnmarshalText(
|
||||||
|
[]byte(flagSchedPolicy),
|
||||||
|
); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
config.SchedPriority = std.Int(flagSchedPriority)
|
||||||
|
|
||||||
// bind GPU stuff
|
// bind GPU stuff
|
||||||
if et&(hst.EX11|hst.EWayland) != 0 {
|
if et&(hst.EX11|hst.EWayland) != 0 {
|
||||||
config.Container.Filesystem = append(config.Container.Filesystem, hst.FilesystemConfigJSON{FilesystemConfig: &hst.FSBind{
|
config.Container.Filesystem = append(config.Container.Filesystem, hst.FilesystemConfigJSON{FilesystemConfig: &hst.FSBind{
|
||||||
@@ -214,7 +225,7 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
homeDir = passwd.HomeDir
|
homeDir = passwd.HomeDir
|
||||||
}
|
}
|
||||||
if a, err := check.NewAbs(homeDir); err != nil {
|
if a, err := check.NewAbs(homeDir); err != nil {
|
||||||
log.Fatal(err.Error())
|
log.Fatal(err)
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
config.Container.Home = a
|
config.Container.Home = a
|
||||||
@@ -234,11 +245,11 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
config.SessionBus = dbus.NewConfig(flagID, true, flagDBusMpris)
|
config.SessionBus = dbus.NewConfig(flagID, true, flagDBusMpris)
|
||||||
} else {
|
} else {
|
||||||
if f, err := os.Open(flagDBusConfigSession); err != nil {
|
if f, err := os.Open(flagDBusConfigSession); err != nil {
|
||||||
log.Fatal(err.Error())
|
log.Fatal(err)
|
||||||
} else {
|
} else {
|
||||||
decodeJSON(log.Fatal, "load session bus proxy config", f, &config.SessionBus)
|
decodeJSON(log.Fatal, "load session bus proxy config", f, &config.SessionBus)
|
||||||
if err = f.Close(); err != nil {
|
if err = f.Close(); err != nil {
|
||||||
log.Fatal(err.Error())
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -246,11 +257,11 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
// system bus proxy is optional
|
// system bus proxy is optional
|
||||||
if flagDBusConfigSystem != "nil" {
|
if flagDBusConfigSystem != "nil" {
|
||||||
if f, err := os.Open(flagDBusConfigSystem); err != nil {
|
if f, err := os.Open(flagDBusConfigSystem); err != nil {
|
||||||
log.Fatal(err.Error())
|
log.Fatal(err)
|
||||||
} else {
|
} else {
|
||||||
decodeJSON(log.Fatal, "load system bus proxy config", f, &config.SystemBus)
|
decodeJSON(log.Fatal, "load system bus proxy config", f, &config.SystemBus)
|
||||||
if err = f.Close(); err != nil {
|
if err = f.Close(); err != nil {
|
||||||
log.Fatal(err.Error())
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -266,7 +277,7 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
outcome.Main(ctx, msg, config, -1)
|
outcome.Main(ctx, msg, &config, -1)
|
||||||
panic("unreachable")
|
panic("unreachable")
|
||||||
}).
|
}).
|
||||||
Flag(&flagDBusConfigSession, "dbus-config", command.StringFlag("builtin"),
|
Flag(&flagDBusConfigSession, "dbus-config", command.StringFlag("builtin"),
|
||||||
@@ -287,6 +298,10 @@ func buildCommand(ctx context.Context, msg message.Msg, early *earlyHardeningErr
|
|||||||
"Container home directory").
|
"Container home directory").
|
||||||
Flag(&flagUserName, "u", command.StringFlag("chronos"),
|
Flag(&flagUserName, "u", command.StringFlag("chronos"),
|
||||||
"Passwd user name within sandbox").
|
"Passwd user name within sandbox").
|
||||||
|
Flag(&flagSchedPolicy, "policy", command.StringFlag(""),
|
||||||
|
"Scheduling policy to set for the container").
|
||||||
|
Flag(&flagSchedPriority, "priority", command.IntFlag(0),
|
||||||
|
"Scheduling priority to set for the container").
|
||||||
Flag(&flagPrivateRuntime, "private-runtime", command.BoolFlag(false),
|
Flag(&flagPrivateRuntime, "private-runtime", command.BoolFlag(false),
|
||||||
"Do not share XDG_RUNTIME_DIR between containers under the same identity").
|
"Do not share XDG_RUNTIME_DIR between containers under the same identity").
|
||||||
Flag(&flagPrivateTmpdir, "private-tmpdir", command.BoolFlag(false),
|
Flag(&flagPrivateTmpdir, "private-tmpdir", command.BoolFlag(false),
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ Commands:
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"run", []string{"run", "-h"}, `
|
"run", []string{"run", "-h"}, `
|
||||||
Usage: hakurei run [-h | --help] [--dbus-config <value>] [--dbus-system <value>] [--mpris] [--dbus-log] [--id <value>] [-a <int>] [-g <value>] [-d <value>] [-u <value>] [--private-runtime] [--private-tmpdir] [--wayland] [-X] [--dbus] [--pipewire] [--pulse] COMMAND [OPTIONS]
|
Usage: hakurei run [-h | --help] [--dbus-config <value>] [--dbus-system <value>] [--mpris] [--dbus-log] [--id <value>] [-a <int>] [-g <value>] [-d <value>] [-u <value>] [--policy <value>] [--priority <int>] [--private-runtime] [--private-tmpdir] [--wayland] [-X] [--dbus] [--pipewire] [--pulse] COMMAND [OPTIONS]
|
||||||
|
|
||||||
Flags:
|
Flags:
|
||||||
-X Enable direct connection to X11
|
-X Enable direct connection to X11
|
||||||
@@ -60,6 +60,10 @@ Flags:
|
|||||||
Allow owning MPRIS D-Bus path, has no effect if custom config is available
|
Allow owning MPRIS D-Bus path, has no effect if custom config is available
|
||||||
-pipewire
|
-pipewire
|
||||||
Enable connection to PipeWire via SecurityContext
|
Enable connection to PipeWire via SecurityContext
|
||||||
|
-policy string
|
||||||
|
Scheduling policy to set for the container
|
||||||
|
-priority int
|
||||||
|
Scheduling priority to set for the container
|
||||||
-private-runtime
|
-private-runtime
|
||||||
Do not share XDG_RUNTIME_DIR between containers under the same identity
|
Do not share XDG_RUNTIME_DIR between containers under the same identity
|
||||||
-private-tmpdir
|
-private-tmpdir
|
||||||
|
|||||||
@@ -1,7 +0,0 @@
|
|||||||
This program is a proof of concept and is now deprecated. It is only kept
|
|
||||||
around for API demonstration purposes and to make the most out of the test
|
|
||||||
suite.
|
|
||||||
|
|
||||||
This program is replaced by planterette, which can be found at
|
|
||||||
https://git.gensokyo.uk/security/planterette. Development effort should be
|
|
||||||
focused there instead.
|
|
||||||
173
cmd/hpkg/app.go
173
cmd/hpkg/app.go
@@ -1,173 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"hakurei.app/container/check"
|
|
||||||
"hakurei.app/container/fhs"
|
|
||||||
"hakurei.app/hst"
|
|
||||||
)
|
|
||||||
|
|
||||||
type appInfo struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Version string `json:"version"`
|
|
||||||
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
ID string `json:"id"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Identity int `json:"identity"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Groups []string `json:"groups,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Devel bool `json:"devel,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Userns bool `json:"userns,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
HostNet bool `json:"net,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
HostAbstract bool `json:"abstract,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Device bool `json:"dev,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Tty bool `json:"tty,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
MapRealUID bool `json:"map_real_uid,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
DirectWayland bool `json:"direct_wayland,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
SystemBus *hst.BusConfig `json:"system_bus,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
SessionBus *hst.BusConfig `json:"session_bus,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Enablements *hst.Enablements `json:"enablements,omitempty"`
|
|
||||||
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Multiarch bool `json:"multiarch,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Bluetooth bool `json:"bluetooth,omitempty"`
|
|
||||||
|
|
||||||
// allow gpu access within sandbox
|
|
||||||
GPU bool `json:"gpu"`
|
|
||||||
// store path to nixGL mesa wrappers
|
|
||||||
Mesa string `json:"mesa,omitempty"`
|
|
||||||
// store path to nixGL source
|
|
||||||
NixGL string `json:"nix_gl,omitempty"`
|
|
||||||
// store path to activate-and-exec script
|
|
||||||
Launcher *check.Absolute `json:"launcher"`
|
|
||||||
// store path to /run/current-system
|
|
||||||
CurrentSystem *check.Absolute `json:"current_system"`
|
|
||||||
// store path to home-manager activation package
|
|
||||||
ActivationPackage string `json:"activation_package"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *appInfo) toHst(pathSet *appPathSet, pathname *check.Absolute, argv []string, flagDropShell bool) *hst.Config {
|
|
||||||
config := &hst.Config{
|
|
||||||
ID: app.ID,
|
|
||||||
|
|
||||||
Enablements: app.Enablements,
|
|
||||||
|
|
||||||
SystemBus: app.SystemBus,
|
|
||||||
SessionBus: app.SessionBus,
|
|
||||||
DirectWayland: app.DirectWayland,
|
|
||||||
|
|
||||||
Identity: app.Identity,
|
|
||||||
Groups: app.Groups,
|
|
||||||
|
|
||||||
Container: &hst.ContainerConfig{
|
|
||||||
Hostname: formatHostname(app.Name),
|
|
||||||
Filesystem: []hst.FilesystemConfigJSON{
|
|
||||||
{FilesystemConfig: &hst.FSBind{Target: fhs.AbsEtc, Source: pathSet.cacheDir.Append("etc"), Special: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: pathSet.nixPath.Append("store"), Target: pathNixStore}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: pathCurrentSystem, Linkname: app.CurrentSystem.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: pathBin, Linkname: pathSwBin.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: fhs.AbsUsrBin, Linkname: pathSwBin.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: pathSet.metaPath, Target: hst.AbsPrivateTmp.Append("app")}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsEtc.Append("resolv.conf"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("block"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("bus"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("class"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("dev"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("devices"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Target: pathDataData.Append(app.ID), Source: pathSet.homeDir, Write: true, Ensure: true}},
|
|
||||||
},
|
|
||||||
|
|
||||||
Username: "hakurei",
|
|
||||||
Shell: pathShell,
|
|
||||||
Home: pathDataData.Append(app.ID),
|
|
||||||
|
|
||||||
Path: pathname,
|
|
||||||
Args: argv,
|
|
||||||
},
|
|
||||||
ExtraPerms: []hst.ExtraPermConfig{
|
|
||||||
{Path: dataHome, Execute: true},
|
|
||||||
{Ensure: true, Path: pathSet.baseDir, Read: true, Write: true, Execute: true},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if app.Devel {
|
|
||||||
config.Container.Flags |= hst.FDevel
|
|
||||||
}
|
|
||||||
if app.Userns {
|
|
||||||
config.Container.Flags |= hst.FUserns
|
|
||||||
}
|
|
||||||
if app.HostNet {
|
|
||||||
config.Container.Flags |= hst.FHostNet
|
|
||||||
}
|
|
||||||
if app.HostAbstract {
|
|
||||||
config.Container.Flags |= hst.FHostAbstract
|
|
||||||
}
|
|
||||||
if app.Device {
|
|
||||||
config.Container.Flags |= hst.FDevice
|
|
||||||
}
|
|
||||||
if app.Tty || flagDropShell {
|
|
||||||
config.Container.Flags |= hst.FTty
|
|
||||||
}
|
|
||||||
if app.MapRealUID {
|
|
||||||
config.Container.Flags |= hst.FMapRealUID
|
|
||||||
}
|
|
||||||
if app.Multiarch {
|
|
||||||
config.Container.Flags |= hst.FMultiarch
|
|
||||||
}
|
|
||||||
config.Container.Flags |= hst.FShareRuntime | hst.FShareTmpdir
|
|
||||||
return config
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadAppInfo(name string, beforeFail func()) *appInfo {
|
|
||||||
bundle := new(appInfo)
|
|
||||||
if f, err := os.Open(name); err != nil {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatalf("cannot open bundle: %v", err)
|
|
||||||
} else if err = json.NewDecoder(f).Decode(&bundle); err != nil {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatalf("cannot parse bundle metadata: %v", err)
|
|
||||||
} else if err = f.Close(); err != nil {
|
|
||||||
log.Printf("cannot close bundle metadata: %v", err)
|
|
||||||
// not fatal
|
|
||||||
}
|
|
||||||
|
|
||||||
if bundle.ID == "" {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatal("application identifier must not be empty")
|
|
||||||
}
|
|
||||||
if bundle.Launcher == nil {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatal("launcher must not be empty")
|
|
||||||
}
|
|
||||||
if bundle.CurrentSystem == nil {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatal("current-system must not be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
return bundle
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatHostname(name string) string {
|
|
||||||
if h, err := os.Hostname(); err != nil {
|
|
||||||
log.Printf("cannot get hostname: %v", err)
|
|
||||||
return "hakurei-" + name
|
|
||||||
} else {
|
|
||||||
return h + "-" + name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,256 +0,0 @@
|
|||||||
{
|
|
||||||
nixpkgsFor,
|
|
||||||
system,
|
|
||||||
nixpkgs,
|
|
||||||
home-manager,
|
|
||||||
}:
|
|
||||||
|
|
||||||
{
|
|
||||||
lib,
|
|
||||||
stdenv,
|
|
||||||
closureInfo,
|
|
||||||
writeScript,
|
|
||||||
runtimeShell,
|
|
||||||
writeText,
|
|
||||||
symlinkJoin,
|
|
||||||
vmTools,
|
|
||||||
runCommand,
|
|
||||||
fetchFromGitHub,
|
|
||||||
|
|
||||||
zstd,
|
|
||||||
nix,
|
|
||||||
sqlite,
|
|
||||||
|
|
||||||
name ? throw "name is required",
|
|
||||||
version ? throw "version is required",
|
|
||||||
pname ? "${name}-${version}",
|
|
||||||
modules ? [ ],
|
|
||||||
nixosModules ? [ ],
|
|
||||||
script ? ''
|
|
||||||
exec "$SHELL" "$@"
|
|
||||||
'',
|
|
||||||
|
|
||||||
id ? name,
|
|
||||||
identity ? throw "identity is required",
|
|
||||||
groups ? [ ],
|
|
||||||
userns ? false,
|
|
||||||
net ? true,
|
|
||||||
dev ? false,
|
|
||||||
no_new_session ? false,
|
|
||||||
map_real_uid ? false,
|
|
||||||
direct_wayland ? false,
|
|
||||||
system_bus ? null,
|
|
||||||
session_bus ? null,
|
|
||||||
|
|
||||||
allow_wayland ? true,
|
|
||||||
allow_x11 ? false,
|
|
||||||
allow_dbus ? true,
|
|
||||||
allow_audio ? true,
|
|
||||||
gpu ? allow_wayland || allow_x11,
|
|
||||||
}:
|
|
||||||
|
|
||||||
let
|
|
||||||
inherit (lib) optionals;
|
|
||||||
|
|
||||||
homeManagerConfiguration = home-manager.lib.homeManagerConfiguration {
|
|
||||||
pkgs = nixpkgsFor.${system};
|
|
||||||
modules = modules ++ [
|
|
||||||
{
|
|
||||||
home = {
|
|
||||||
username = "hakurei";
|
|
||||||
homeDirectory = "/data/data/${id}";
|
|
||||||
stateVersion = "22.11";
|
|
||||||
};
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
launcher = writeScript "hakurei-${pname}" ''
|
|
||||||
#!${runtimeShell} -el
|
|
||||||
${script}
|
|
||||||
'';
|
|
||||||
|
|
||||||
extraNixOSConfig =
|
|
||||||
{ pkgs, ... }:
|
|
||||||
{
|
|
||||||
environment = {
|
|
||||||
etc.nixpkgs.source = nixpkgs.outPath;
|
|
||||||
systemPackages = [ pkgs.nix ];
|
|
||||||
};
|
|
||||||
|
|
||||||
imports = nixosModules;
|
|
||||||
};
|
|
||||||
nixos = nixpkgs.lib.nixosSystem {
|
|
||||||
inherit system;
|
|
||||||
modules = [
|
|
||||||
extraNixOSConfig
|
|
||||||
{ nix.settings.experimental-features = [ "flakes" ]; }
|
|
||||||
{ nix.settings.experimental-features = [ "nix-command" ]; }
|
|
||||||
{ boot.isContainer = true; }
|
|
||||||
{ system.stateVersion = "22.11"; }
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
etc = vmTools.runInLinuxVM (
|
|
||||||
runCommand "etc" { } ''
|
|
||||||
mkdir -p /etc
|
|
||||||
${nixos.config.system.build.etcActivationCommands}
|
|
||||||
|
|
||||||
# remove unused files
|
|
||||||
rm -rf /etc/sudoers
|
|
||||||
|
|
||||||
mkdir -p $out
|
|
||||||
tar -C /etc -cf "$out/etc.tar" .
|
|
||||||
''
|
|
||||||
);
|
|
||||||
|
|
||||||
extendSessionDefault = id: ext: {
|
|
||||||
filter = true;
|
|
||||||
|
|
||||||
talk = [ "org.freedesktop.Notifications" ] ++ ext.talk;
|
|
||||||
own =
|
|
||||||
(optionals (id != null) [
|
|
||||||
"${id}.*"
|
|
||||||
"org.mpris.MediaPlayer2.${id}.*"
|
|
||||||
])
|
|
||||||
++ ext.own;
|
|
||||||
|
|
||||||
inherit (ext) call broadcast;
|
|
||||||
};
|
|
||||||
|
|
||||||
nixGL = fetchFromGitHub {
|
|
||||||
owner = "nix-community";
|
|
||||||
repo = "nixGL";
|
|
||||||
rev = "310f8e49a149e4c9ea52f1adf70cdc768ec53f8a";
|
|
||||||
hash = "sha256-lnzZQYG0+EXl/6NkGpyIz+FEOc/DSEG57AP1VsdeNrM=";
|
|
||||||
};
|
|
||||||
|
|
||||||
mesaWrappers =
|
|
||||||
let
|
|
||||||
isIntelX86Platform = system == "x86_64-linux";
|
|
||||||
nixGLPackages = import (nixGL + "/default.nix") {
|
|
||||||
pkgs = nixpkgs.legacyPackages.${system};
|
|
||||||
enable32bits = isIntelX86Platform;
|
|
||||||
enableIntelX86Extensions = isIntelX86Platform;
|
|
||||||
};
|
|
||||||
in
|
|
||||||
symlinkJoin {
|
|
||||||
name = "nixGL-mesa";
|
|
||||||
paths = with nixGLPackages; [
|
|
||||||
nixGLIntel
|
|
||||||
nixVulkanIntel
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
info = builtins.toJSON {
|
|
||||||
inherit
|
|
||||||
name
|
|
||||||
version
|
|
||||||
id
|
|
||||||
identity
|
|
||||||
launcher
|
|
||||||
groups
|
|
||||||
userns
|
|
||||||
net
|
|
||||||
dev
|
|
||||||
no_new_session
|
|
||||||
map_real_uid
|
|
||||||
direct_wayland
|
|
||||||
system_bus
|
|
||||||
gpu
|
|
||||||
;
|
|
||||||
|
|
||||||
session_bus =
|
|
||||||
if session_bus != null then
|
|
||||||
(session_bus (extendSessionDefault id))
|
|
||||||
else
|
|
||||||
(extendSessionDefault id {
|
|
||||||
talk = [ ];
|
|
||||||
own = [ ];
|
|
||||||
call = { };
|
|
||||||
broadcast = { };
|
|
||||||
});
|
|
||||||
|
|
||||||
enablements = {
|
|
||||||
wayland = allow_wayland;
|
|
||||||
x11 = allow_x11;
|
|
||||||
dbus = allow_dbus;
|
|
||||||
pipewire = allow_audio;
|
|
||||||
};
|
|
||||||
|
|
||||||
mesa = if gpu then mesaWrappers else null;
|
|
||||||
nix_gl = if gpu then nixGL else null;
|
|
||||||
current_system = nixos.config.system.build.toplevel;
|
|
||||||
activation_package = homeManagerConfiguration.activationPackage;
|
|
||||||
};
|
|
||||||
in
|
|
||||||
|
|
||||||
stdenv.mkDerivation {
|
|
||||||
name = "${pname}.pkg";
|
|
||||||
inherit version;
|
|
||||||
__structuredAttrs = true;
|
|
||||||
|
|
||||||
nativeBuildInputs = [
|
|
||||||
zstd
|
|
||||||
nix
|
|
||||||
sqlite
|
|
||||||
];
|
|
||||||
|
|
||||||
buildCommand = ''
|
|
||||||
NIX_ROOT="$(mktemp -d)"
|
|
||||||
export USER="nobody"
|
|
||||||
|
|
||||||
# create bootstrap store
|
|
||||||
bootstrapClosureInfo="${
|
|
||||||
closureInfo {
|
|
||||||
rootPaths = [
|
|
||||||
nix
|
|
||||||
nixos.config.system.build.toplevel
|
|
||||||
];
|
|
||||||
}
|
|
||||||
}"
|
|
||||||
echo "copying bootstrap store paths..."
|
|
||||||
mkdir -p "$NIX_ROOT/nix/store"
|
|
||||||
xargs -n 1 -a "$bootstrapClosureInfo/store-paths" cp -at "$NIX_ROOT/nix/store/"
|
|
||||||
NIX_REMOTE="local?root=$NIX_ROOT" nix-store --load-db < "$bootstrapClosureInfo/registration"
|
|
||||||
NIX_REMOTE="local?root=$NIX_ROOT" nix-store --optimise
|
|
||||||
sqlite3 "$NIX_ROOT/nix/var/nix/db/db.sqlite" "UPDATE ValidPaths SET registrationTime = ''${SOURCE_DATE_EPOCH}"
|
|
||||||
chmod -R +r "$NIX_ROOT/nix/var"
|
|
||||||
|
|
||||||
# create binary cache
|
|
||||||
closureInfo="${
|
|
||||||
closureInfo {
|
|
||||||
rootPaths = [
|
|
||||||
homeManagerConfiguration.activationPackage
|
|
||||||
launcher
|
|
||||||
]
|
|
||||||
++ optionals gpu [
|
|
||||||
mesaWrappers
|
|
||||||
nixGL
|
|
||||||
];
|
|
||||||
}
|
|
||||||
}"
|
|
||||||
echo "copying application paths..."
|
|
||||||
TMP_STORE="$(mktemp -d)"
|
|
||||||
mkdir -p "$TMP_STORE/nix/store"
|
|
||||||
xargs -n 1 -a "$closureInfo/store-paths" cp -at "$TMP_STORE/nix/store/"
|
|
||||||
NIX_REMOTE="local?root=$TMP_STORE" nix-store --load-db < "$closureInfo/registration"
|
|
||||||
sqlite3 "$TMP_STORE/nix/var/nix/db/db.sqlite" "UPDATE ValidPaths SET registrationTime = ''${SOURCE_DATE_EPOCH}"
|
|
||||||
NIX_REMOTE="local?root=$TMP_STORE" nix --offline --extra-experimental-features nix-command \
|
|
||||||
--verbose --log-format raw-with-logs \
|
|
||||||
copy --all --no-check-sigs --to \
|
|
||||||
"file://$NIX_ROOT/res?compression=zstd&compression-level=19¶llel-compression=true"
|
|
||||||
|
|
||||||
# package /etc
|
|
||||||
mkdir -p "$NIX_ROOT/etc"
|
|
||||||
tar -C "$NIX_ROOT/etc" -xf "${etc}/etc.tar"
|
|
||||||
|
|
||||||
# write metadata
|
|
||||||
cp "${writeText "bundle.json" info}" "$NIX_ROOT/bundle.json"
|
|
||||||
|
|
||||||
# create an intermediate file to improve zstd performance
|
|
||||||
INTER="$(mktemp)"
|
|
||||||
tar -C "$NIX_ROOT" -cf "$INTER" .
|
|
||||||
zstd -T0 -19 -fo "$out" "$INTER"
|
|
||||||
'';
|
|
||||||
}
|
|
||||||
335
cmd/hpkg/main.go
335
cmd/hpkg/main.go
@@ -1,335 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"path"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"hakurei.app/command"
|
|
||||||
"hakurei.app/container/check"
|
|
||||||
"hakurei.app/container/fhs"
|
|
||||||
"hakurei.app/hst"
|
|
||||||
"hakurei.app/message"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errSuccess = errors.New("success")
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
log.SetPrefix("hpkg: ")
|
|
||||||
log.SetFlags(0)
|
|
||||||
msg := message.New(log.Default())
|
|
||||||
|
|
||||||
if err := os.Setenv("SHELL", pathShell.String()); err != nil {
|
|
||||||
log.Fatalf("cannot set $SHELL: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if os.Geteuid() == 0 {
|
|
||||||
log.Fatal("this program must not run as root")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, stop := signal.NotifyContext(context.Background(),
|
|
||||||
syscall.SIGINT, syscall.SIGTERM)
|
|
||||||
defer stop() // unreachable
|
|
||||||
|
|
||||||
var (
|
|
||||||
flagVerbose bool
|
|
||||||
flagDropShell bool
|
|
||||||
)
|
|
||||||
c := command.New(os.Stderr, log.Printf, "hpkg", func([]string) error { msg.SwapVerbose(flagVerbose); return nil }).
|
|
||||||
Flag(&flagVerbose, "v", command.BoolFlag(false), "Print debug messages to the console").
|
|
||||||
Flag(&flagDropShell, "s", command.BoolFlag(false), "Drop to a shell in place of next hakurei action")
|
|
||||||
|
|
||||||
{
|
|
||||||
var (
|
|
||||||
flagDropShellActivate bool
|
|
||||||
)
|
|
||||||
c.NewCommand("install", "Install an application from its package", func(args []string) error {
|
|
||||||
if len(args) != 1 {
|
|
||||||
log.Println("invalid argument")
|
|
||||||
return syscall.EINVAL
|
|
||||||
}
|
|
||||||
pkgPath := args[0]
|
|
||||||
if !path.IsAbs(pkgPath) {
|
|
||||||
if dir, err := os.Getwd(); err != nil {
|
|
||||||
log.Printf("cannot get current directory: %v", err)
|
|
||||||
return err
|
|
||||||
} else {
|
|
||||||
pkgPath = path.Join(dir, pkgPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Look up paths to programs started by hpkg.
|
|
||||||
This is done here to ease error handling as cleanup is not yet required.
|
|
||||||
*/
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ = lookPath("zstd")
|
|
||||||
tar = lookPath("tar")
|
|
||||||
chmod = lookPath("chmod")
|
|
||||||
rm = lookPath("rm")
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
Extract package and set up for cleanup.
|
|
||||||
*/
|
|
||||||
|
|
||||||
var workDir *check.Absolute
|
|
||||||
if p, err := os.MkdirTemp("", "hpkg.*"); err != nil {
|
|
||||||
log.Printf("cannot create temporary directory: %v", err)
|
|
||||||
return err
|
|
||||||
} else if workDir, err = check.NewAbs(p); err != nil {
|
|
||||||
log.Printf("invalid temporary directory: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
cleanup := func() {
|
|
||||||
// should be faster than a native implementation
|
|
||||||
mustRun(msg, chmod, "-R", "+w", workDir.String())
|
|
||||||
mustRun(msg, rm, "-rf", workDir.String())
|
|
||||||
}
|
|
||||||
beforeRunFail.Store(&cleanup)
|
|
||||||
|
|
||||||
mustRun(msg, tar, "-C", workDir.String(), "-xf", pkgPath)
|
|
||||||
|
|
||||||
/*
|
|
||||||
Parse bundle and app metadata, do pre-install checks.
|
|
||||||
*/
|
|
||||||
|
|
||||||
bundle := loadAppInfo(path.Join(workDir.String(), "bundle.json"), cleanup)
|
|
||||||
pathSet := pathSetByApp(bundle.ID)
|
|
||||||
|
|
||||||
a := bundle
|
|
||||||
if s, err := os.Stat(pathSet.metaPath.String()); err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
cleanup()
|
|
||||||
log.Printf("cannot access %q: %v", pathSet.metaPath, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// did not modify app, clean installation condition met later
|
|
||||||
} else if s.IsDir() {
|
|
||||||
cleanup()
|
|
||||||
log.Printf("metadata path %q is not a file", pathSet.metaPath)
|
|
||||||
return syscall.EBADMSG
|
|
||||||
} else {
|
|
||||||
a = loadAppInfo(pathSet.metaPath.String(), cleanup)
|
|
||||||
if a.ID != bundle.ID {
|
|
||||||
cleanup()
|
|
||||||
log.Printf("app %q claims to have identifier %q",
|
|
||||||
bundle.ID, a.ID)
|
|
||||||
return syscall.EBADE
|
|
||||||
}
|
|
||||||
// sec: should verify credentials
|
|
||||||
}
|
|
||||||
|
|
||||||
if a != bundle {
|
|
||||||
// do not try to re-install
|
|
||||||
if a.NixGL == bundle.NixGL &&
|
|
||||||
a.CurrentSystem == bundle.CurrentSystem &&
|
|
||||||
a.Launcher == bundle.Launcher &&
|
|
||||||
a.ActivationPackage == bundle.ActivationPackage {
|
|
||||||
cleanup()
|
|
||||||
log.Printf("package %q is identical to local application %q",
|
|
||||||
pkgPath, a.ID)
|
|
||||||
return errSuccess
|
|
||||||
}
|
|
||||||
|
|
||||||
// identity determines uid
|
|
||||||
if a.Identity != bundle.Identity {
|
|
||||||
cleanup()
|
|
||||||
log.Printf("package %q identity %d differs from installed %d",
|
|
||||||
pkgPath, bundle.Identity, a.Identity)
|
|
||||||
return syscall.EBADE
|
|
||||||
}
|
|
||||||
|
|
||||||
// sec: should compare version string
|
|
||||||
msg.Verbosef("installing application %q version %q over local %q",
|
|
||||||
bundle.ID, bundle.Version, a.Version)
|
|
||||||
} else {
|
|
||||||
msg.Verbosef("application %q clean installation", bundle.ID)
|
|
||||||
// sec: should install credentials
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Setup steps for files owned by the target user.
|
|
||||||
*/
|
|
||||||
|
|
||||||
withCacheDir(ctx, msg, "install", []string{
|
|
||||||
// export inner bundle path in the environment
|
|
||||||
"export BUNDLE=" + hst.PrivateTmp + "/bundle",
|
|
||||||
// replace inner /etc
|
|
||||||
"mkdir -p etc",
|
|
||||||
"chmod -R +w etc",
|
|
||||||
"rm -rf etc",
|
|
||||||
"cp -dRf $BUNDLE/etc etc",
|
|
||||||
// replace inner /nix
|
|
||||||
"mkdir -p nix",
|
|
||||||
"chmod -R +w nix",
|
|
||||||
"rm -rf nix",
|
|
||||||
"cp -dRf /nix nix",
|
|
||||||
// copy from binary cache
|
|
||||||
"nix copy --offline --no-check-sigs --all --from file://$BUNDLE/res --to $PWD",
|
|
||||||
// deduplicate nix store
|
|
||||||
"nix store --offline --store $PWD optimise",
|
|
||||||
// make cache directory world-readable for autoetc
|
|
||||||
"chmod 0755 .",
|
|
||||||
}, workDir, bundle, pathSet, flagDropShell, cleanup)
|
|
||||||
|
|
||||||
if bundle.GPU {
|
|
||||||
withCacheDir(ctx, msg, "mesa-wrappers", []string{
|
|
||||||
// link nixGL mesa wrappers
|
|
||||||
"mkdir -p nix/.nixGL",
|
|
||||||
"ln -s " + bundle.Mesa + "/bin/nixGLIntel nix/.nixGL/nixGL",
|
|
||||||
"ln -s " + bundle.Mesa + "/bin/nixVulkanIntel nix/.nixGL/nixVulkan",
|
|
||||||
}, workDir, bundle, pathSet, false, cleanup)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Activate home-manager generation.
|
|
||||||
*/
|
|
||||||
|
|
||||||
withNixDaemon(ctx, msg, "activate", []string{
|
|
||||||
// clean up broken links
|
|
||||||
"mkdir -p .local/state/{nix,home-manager}",
|
|
||||||
"chmod -R +w .local/state/{nix,home-manager}",
|
|
||||||
"rm -rf .local/state/{nix,home-manager}",
|
|
||||||
// run activation script
|
|
||||||
bundle.ActivationPackage + "/activate",
|
|
||||||
}, false, func(config *hst.Config) *hst.Config { return config },
|
|
||||||
bundle, pathSet, flagDropShellActivate, cleanup)
|
|
||||||
|
|
||||||
/*
|
|
||||||
Installation complete. Write metadata to block re-installs or downgrades.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// serialise metadata to ensure consistency
|
|
||||||
if f, err := os.OpenFile(pathSet.metaPath.String()+"~", os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644); err != nil {
|
|
||||||
cleanup()
|
|
||||||
log.Printf("cannot create metadata file: %v", err)
|
|
||||||
return err
|
|
||||||
} else if err = json.NewEncoder(f).Encode(bundle); err != nil {
|
|
||||||
cleanup()
|
|
||||||
log.Printf("cannot write metadata: %v", err)
|
|
||||||
return err
|
|
||||||
} else if err = f.Close(); err != nil {
|
|
||||||
log.Printf("cannot close metadata file: %v", err)
|
|
||||||
// not fatal
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Rename(pathSet.metaPath.String()+"~", pathSet.metaPath.String()); err != nil {
|
|
||||||
cleanup()
|
|
||||||
log.Printf("cannot rename metadata file: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cleanup()
|
|
||||||
return errSuccess
|
|
||||||
}).
|
|
||||||
Flag(&flagDropShellActivate, "s", command.BoolFlag(false), "Drop to a shell on activation")
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
var (
|
|
||||||
flagDropShellNixGL bool
|
|
||||||
flagAutoDrivers bool
|
|
||||||
)
|
|
||||||
c.NewCommand("start", "Start an application", func(args []string) error {
|
|
||||||
if len(args) < 1 {
|
|
||||||
log.Println("invalid argument")
|
|
||||||
return syscall.EINVAL
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Parse app metadata.
|
|
||||||
*/
|
|
||||||
|
|
||||||
id := args[0]
|
|
||||||
pathSet := pathSetByApp(id)
|
|
||||||
a := loadAppInfo(pathSet.metaPath.String(), func() {})
|
|
||||||
if a.ID != id {
|
|
||||||
log.Printf("app %q claims to have identifier %q", id, a.ID)
|
|
||||||
return syscall.EBADE
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Prepare nixGL.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if a.GPU && flagAutoDrivers {
|
|
||||||
withNixDaemon(ctx, msg, "nix-gl", []string{
|
|
||||||
"mkdir -p /nix/.nixGL/auto",
|
|
||||||
"rm -rf /nix/.nixGL/auto",
|
|
||||||
"export NIXPKGS_ALLOW_UNFREE=1",
|
|
||||||
"nix build --impure " +
|
|
||||||
"--out-link /nix/.nixGL/auto/opengl " +
|
|
||||||
"--override-input nixpkgs path:/etc/nixpkgs " +
|
|
||||||
"path:" + a.NixGL,
|
|
||||||
"nix build --impure " +
|
|
||||||
"--out-link /nix/.nixGL/auto/vulkan " +
|
|
||||||
"--override-input nixpkgs path:/etc/nixpkgs " +
|
|
||||||
"path:" + a.NixGL + "#nixVulkanNvidia",
|
|
||||||
}, true, func(config *hst.Config) *hst.Config {
|
|
||||||
config.Container.Filesystem = append(config.Container.Filesystem, []hst.FilesystemConfigJSON{
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsEtc.Append("resolv.conf"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("block"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("bus"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("class"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("dev"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("devices"), Optional: true}},
|
|
||||||
}...)
|
|
||||||
appendGPUFilesystem(config)
|
|
||||||
return config
|
|
||||||
}, a, pathSet, flagDropShellNixGL, func() {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Create app configuration.
|
|
||||||
*/
|
|
||||||
|
|
||||||
pathname := a.Launcher
|
|
||||||
argv := make([]string, 1, len(args))
|
|
||||||
if flagDropShell {
|
|
||||||
pathname = pathShell
|
|
||||||
argv[0] = bash
|
|
||||||
} else {
|
|
||||||
argv[0] = a.Launcher.String()
|
|
||||||
}
|
|
||||||
argv = append(argv, args[1:]...)
|
|
||||||
config := a.toHst(pathSet, pathname, argv, flagDropShell)
|
|
||||||
|
|
||||||
/*
|
|
||||||
Expose GPU devices.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if a.GPU {
|
|
||||||
config.Container.Filesystem = append(config.Container.Filesystem,
|
|
||||||
hst.FilesystemConfigJSON{FilesystemConfig: &hst.FSBind{Source: pathSet.nixPath.Append(".nixGL"), Target: hst.AbsPrivateTmp.Append("nixGL")}})
|
|
||||||
appendGPUFilesystem(config)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Spawn app.
|
|
||||||
*/
|
|
||||||
|
|
||||||
mustRunApp(ctx, msg, config, func() {})
|
|
||||||
return errSuccess
|
|
||||||
}).
|
|
||||||
Flag(&flagDropShellNixGL, "s", command.BoolFlag(false), "Drop to a shell on nixGL build").
|
|
||||||
Flag(&flagAutoDrivers, "auto-drivers", command.BoolFlag(false), "Attempt automatic opengl driver detection")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.MustParse(os.Args[1:], func(err error) {
|
|
||||||
msg.Verbosef("command returned %v", err)
|
|
||||||
if errors.Is(err, errSuccess) {
|
|
||||||
msg.BeforeExit()
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
log.Fatal("unreachable")
|
|
||||||
}
|
|
||||||
@@ -1,117 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"strconv"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"hakurei.app/container/check"
|
|
||||||
"hakurei.app/container/fhs"
|
|
||||||
"hakurei.app/hst"
|
|
||||||
"hakurei.app/message"
|
|
||||||
)
|
|
||||||
|
|
||||||
const bash = "bash"
|
|
||||||
|
|
||||||
var (
|
|
||||||
dataHome *check.Absolute
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// dataHome
|
|
||||||
if a, err := check.NewAbs(os.Getenv("HAKUREI_DATA_HOME")); err == nil {
|
|
||||||
dataHome = a
|
|
||||||
} else {
|
|
||||||
dataHome = fhs.AbsVarLib.Append("hakurei/" + strconv.Itoa(os.Getuid()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
pathBin = fhs.AbsRoot.Append("bin")
|
|
||||||
|
|
||||||
pathNix = check.MustAbs("/nix/")
|
|
||||||
pathNixStore = pathNix.Append("store/")
|
|
||||||
pathCurrentSystem = fhs.AbsRun.Append("current-system")
|
|
||||||
pathSwBin = pathCurrentSystem.Append("sw/bin/")
|
|
||||||
pathShell = pathSwBin.Append(bash)
|
|
||||||
|
|
||||||
pathData = check.MustAbs("/data")
|
|
||||||
pathDataData = pathData.Append("data")
|
|
||||||
)
|
|
||||||
|
|
||||||
func lookPath(file string) string {
|
|
||||||
if p, err := exec.LookPath(file); err != nil {
|
|
||||||
log.Fatalf("%s: command not found", file)
|
|
||||||
return ""
|
|
||||||
} else {
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var beforeRunFail = new(atomic.Pointer[func()])
|
|
||||||
|
|
||||||
func mustRun(msg message.Msg, name string, arg ...string) {
|
|
||||||
msg.Verbosef("spawning process: %q %q", name, arg)
|
|
||||||
cmd := exec.Command(name, arg...)
|
|
||||||
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
if f := beforeRunFail.Swap(nil); f != nil {
|
|
||||||
(*f)()
|
|
||||||
}
|
|
||||||
log.Fatalf("%s: %v", name, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type appPathSet struct {
|
|
||||||
// ${dataHome}/${id}
|
|
||||||
baseDir *check.Absolute
|
|
||||||
// ${baseDir}/app
|
|
||||||
metaPath *check.Absolute
|
|
||||||
// ${baseDir}/files
|
|
||||||
homeDir *check.Absolute
|
|
||||||
// ${baseDir}/cache
|
|
||||||
cacheDir *check.Absolute
|
|
||||||
// ${baseDir}/cache/nix
|
|
||||||
nixPath *check.Absolute
|
|
||||||
}
|
|
||||||
|
|
||||||
func pathSetByApp(id string) *appPathSet {
|
|
||||||
pathSet := new(appPathSet)
|
|
||||||
pathSet.baseDir = dataHome.Append(id)
|
|
||||||
pathSet.metaPath = pathSet.baseDir.Append("app")
|
|
||||||
pathSet.homeDir = pathSet.baseDir.Append("files")
|
|
||||||
pathSet.cacheDir = pathSet.baseDir.Append("cache")
|
|
||||||
pathSet.nixPath = pathSet.cacheDir.Append("nix")
|
|
||||||
return pathSet
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendGPUFilesystem(config *hst.Config) {
|
|
||||||
config.Container.Filesystem = append(config.Container.Filesystem, []hst.FilesystemConfigJSON{
|
|
||||||
// flatpak commit 763a686d874dd668f0236f911de00b80766ffe79
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("dri"), Device: true, Optional: true}},
|
|
||||||
// mali
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("mali"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("mali0"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("umplock"), Device: true, Optional: true}},
|
|
||||||
// nvidia
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidiactl"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia-modeset"), Device: true, Optional: true}},
|
|
||||||
// nvidia OpenCL/CUDA
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia-uvm"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia-uvm-tools"), Device: true, Optional: true}},
|
|
||||||
|
|
||||||
// flatpak commit d2dff2875bb3b7e2cd92d8204088d743fd07f3ff
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia0"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia1"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia2"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia3"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia4"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia5"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia6"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia7"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia8"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia9"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia10"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia11"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia12"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia13"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia14"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia15"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia16"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia17"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia18"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia19"), Device: true, Optional: true}},
|
|
||||||
}...)
|
|
||||||
}
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
|
|
||||||
"hakurei.app/hst"
|
|
||||||
"hakurei.app/internal/info"
|
|
||||||
"hakurei.app/message"
|
|
||||||
)
|
|
||||||
|
|
||||||
var hakureiPathVal = info.MustHakureiPath().String()
|
|
||||||
|
|
||||||
func mustRunApp(ctx context.Context, msg message.Msg, config *hst.Config, beforeFail func()) {
|
|
||||||
var (
|
|
||||||
cmd *exec.Cmd
|
|
||||||
st io.WriteCloser
|
|
||||||
)
|
|
||||||
|
|
||||||
if r, w, err := os.Pipe(); err != nil {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatalf("cannot pipe: %v", err)
|
|
||||||
} else {
|
|
||||||
if msg.IsVerbose() {
|
|
||||||
cmd = exec.CommandContext(ctx, hakureiPathVal, "-v", "app", "3")
|
|
||||||
} else {
|
|
||||||
cmd = exec.CommandContext(ctx, hakureiPathVal, "app", "3")
|
|
||||||
}
|
|
||||||
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
|
|
||||||
cmd.ExtraFiles = []*os.File{r}
|
|
||||||
st = w
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
if err := json.NewEncoder(st).Encode(config); err != nil {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatalf("cannot send configuration: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err := cmd.Start(); err != nil {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatalf("cannot start hakurei: %v", err)
|
|
||||||
}
|
|
||||||
if err := cmd.Wait(); err != nil {
|
|
||||||
var exitError *exec.ExitError
|
|
||||||
if errors.As(err, &exitError) {
|
|
||||||
beforeFail()
|
|
||||||
msg.BeforeExit()
|
|
||||||
os.Exit(exitError.ExitCode())
|
|
||||||
} else {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatalf("cannot wait: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,62 +0,0 @@
|
|||||||
{ pkgs, ... }:
|
|
||||||
{
|
|
||||||
users.users = {
|
|
||||||
alice = {
|
|
||||||
isNormalUser = true;
|
|
||||||
description = "Alice Foobar";
|
|
||||||
password = "foobar";
|
|
||||||
uid = 1000;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
home-manager.users.alice.home.stateVersion = "24.11";
|
|
||||||
|
|
||||||
# Automatically login on tty1 as a normal user:
|
|
||||||
services.getty.autologinUser = "alice";
|
|
||||||
|
|
||||||
environment = {
|
|
||||||
variables = {
|
|
||||||
SWAYSOCK = "/tmp/sway-ipc.sock";
|
|
||||||
WLR_RENDERER = "pixman";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# Automatically configure and start Sway when logging in on tty1:
|
|
||||||
programs.bash.loginShellInit = ''
|
|
||||||
if [ "$(tty)" = "/dev/tty1" ]; then
|
|
||||||
set -e
|
|
||||||
|
|
||||||
mkdir -p ~/.config/sway
|
|
||||||
(sed s/Mod4/Mod1/ /etc/sway/config &&
|
|
||||||
echo 'output * bg ${pkgs.nixos-artwork.wallpapers.simple-light-gray.gnomeFilePath} fill' &&
|
|
||||||
echo 'output Virtual-1 res 1680x1050') > ~/.config/sway/config
|
|
||||||
|
|
||||||
sway --validate
|
|
||||||
systemd-cat --identifier=session sway && touch /tmp/sway-exit-ok
|
|
||||||
fi
|
|
||||||
'';
|
|
||||||
|
|
||||||
programs.sway.enable = true;
|
|
||||||
|
|
||||||
virtualisation = {
|
|
||||||
diskSize = 6 * 1024;
|
|
||||||
|
|
||||||
qemu.options = [
|
|
||||||
# Need to switch to a different GPU driver than the default one (-vga std) so that Sway can launch:
|
|
||||||
"-vga none -device virtio-gpu-pci"
|
|
||||||
|
|
||||||
# Increase zstd performance:
|
|
||||||
"-smp 8"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
environment.hakurei = {
|
|
||||||
enable = true;
|
|
||||||
stateDir = "/var/lib/hakurei";
|
|
||||||
users.alice = 0;
|
|
||||||
|
|
||||||
extraHomeConfig = {
|
|
||||||
home.stateVersion = "23.05";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
{
|
|
||||||
testers,
|
|
||||||
callPackage,
|
|
||||||
|
|
||||||
system,
|
|
||||||
self,
|
|
||||||
}:
|
|
||||||
let
|
|
||||||
buildPackage = self.buildPackage.${system};
|
|
||||||
in
|
|
||||||
testers.nixosTest {
|
|
||||||
name = "hpkg";
|
|
||||||
nodes.machine = {
|
|
||||||
environment.etc = {
|
|
||||||
"foot.pkg".source = callPackage ./foot.nix { inherit buildPackage; };
|
|
||||||
};
|
|
||||||
|
|
||||||
imports = [
|
|
||||||
./configuration.nix
|
|
||||||
|
|
||||||
self.nixosModules.hakurei
|
|
||||||
self.inputs.home-manager.nixosModules.home-manager
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
# adapted from nixos sway integration tests
|
|
||||||
|
|
||||||
# testScriptWithTypes:49: error: Cannot call function of unknown type
|
|
||||||
# (machine.succeed if succeed else machine.execute)(
|
|
||||||
# ^
|
|
||||||
# Found 1 error in 1 file (checked 1 source file)
|
|
||||||
skipTypeCheck = true;
|
|
||||||
testScript = builtins.readFile ./test.py;
|
|
||||||
}
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
{
|
|
||||||
lib,
|
|
||||||
buildPackage,
|
|
||||||
foot,
|
|
||||||
wayland-utils,
|
|
||||||
inconsolata,
|
|
||||||
}:
|
|
||||||
|
|
||||||
buildPackage {
|
|
||||||
name = "foot";
|
|
||||||
inherit (foot) version;
|
|
||||||
|
|
||||||
identity = 2;
|
|
||||||
id = "org.codeberg.dnkl.foot";
|
|
||||||
|
|
||||||
modules = [
|
|
||||||
{
|
|
||||||
home.packages = [
|
|
||||||
foot
|
|
||||||
|
|
||||||
# For wayland-info:
|
|
||||||
wayland-utils
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
nixosModules = [
|
|
||||||
{
|
|
||||||
# To help with OCR:
|
|
||||||
environment.etc."xdg/foot/foot.ini".text = lib.generators.toINI { } {
|
|
||||||
main = {
|
|
||||||
font = "inconsolata:size=14";
|
|
||||||
};
|
|
||||||
colors = rec {
|
|
||||||
foreground = "000000";
|
|
||||||
background = "ffffff";
|
|
||||||
regular2 = foreground;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
fonts.packages = [ inconsolata ];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
script = ''
|
|
||||||
exec foot "$@"
|
|
||||||
'';
|
|
||||||
}
|
|
||||||
@@ -1,110 +0,0 @@
|
|||||||
import json
|
|
||||||
import shlex
|
|
||||||
|
|
||||||
q = shlex.quote
|
|
||||||
NODE_GROUPS = ["nodes", "floating_nodes"]
|
|
||||||
|
|
||||||
|
|
||||||
def swaymsg(command: str = "", succeed=True, type="command"):
|
|
||||||
assert command != "" or type != "command", "Must specify command or type"
|
|
||||||
shell = q(f"swaymsg -t {q(type)} -- {q(command)}")
|
|
||||||
with machine.nested(
|
|
||||||
f"sending swaymsg {shell!r}" + " (allowed to fail)" * (not succeed)
|
|
||||||
):
|
|
||||||
ret = (machine.succeed if succeed else machine.execute)(
|
|
||||||
f"su - alice -c {shell}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# execute also returns a status code, but disregard.
|
|
||||||
if not succeed:
|
|
||||||
_, ret = ret
|
|
||||||
|
|
||||||
if not succeed and not ret:
|
|
||||||
return None
|
|
||||||
|
|
||||||
parsed = json.loads(ret)
|
|
||||||
return parsed
|
|
||||||
|
|
||||||
|
|
||||||
def walk(tree):
|
|
||||||
yield tree
|
|
||||||
for group in NODE_GROUPS:
|
|
||||||
for node in tree.get(group, []):
|
|
||||||
yield from walk(node)
|
|
||||||
|
|
||||||
|
|
||||||
def wait_for_window(pattern):
|
|
||||||
def func(last_chance):
|
|
||||||
nodes = (node["name"] for node in walk(swaymsg(type="get_tree")))
|
|
||||||
|
|
||||||
if last_chance:
|
|
||||||
nodes = list(nodes)
|
|
||||||
machine.log(f"Last call! Current list of windows: {nodes}")
|
|
||||||
|
|
||||||
return any(pattern in name for name in nodes)
|
|
||||||
|
|
||||||
retry(func)
|
|
||||||
|
|
||||||
|
|
||||||
def collect_state_ui(name):
|
|
||||||
swaymsg(f"exec hakurei ps > '/tmp/{name}.ps'")
|
|
||||||
machine.copy_from_vm(f"/tmp/{name}.ps", "")
|
|
||||||
swaymsg(f"exec hakurei --json ps > '/tmp/{name}.json'")
|
|
||||||
machine.copy_from_vm(f"/tmp/{name}.json", "")
|
|
||||||
machine.screenshot(name)
|
|
||||||
|
|
||||||
|
|
||||||
def check_state(name, enablements):
|
|
||||||
instances = json.loads(machine.succeed("sudo -u alice -i XDG_RUNTIME_DIR=/run/user/1000 hakurei --json ps"))
|
|
||||||
if len(instances) != 1:
|
|
||||||
raise Exception(f"unexpected state length {len(instances)}")
|
|
||||||
instance = instances[0]
|
|
||||||
|
|
||||||
if len(instance['container']['args']) != 1 or not (instance['container']['args'][0].startswith("/nix/store/")) or f"hakurei-{name}-" not in (instance['container']['args'][0]):
|
|
||||||
raise Exception(f"unexpected args {instance['container']['args']}")
|
|
||||||
|
|
||||||
if instance['enablements'] != enablements:
|
|
||||||
raise Exception(f"unexpected enablements {instance['enablements']}")
|
|
||||||
|
|
||||||
|
|
||||||
start_all()
|
|
||||||
machine.wait_for_unit("multi-user.target")
|
|
||||||
|
|
||||||
# To check hakurei's version:
|
|
||||||
print(machine.succeed("sudo -u alice -i hakurei version"))
|
|
||||||
|
|
||||||
# Wait for Sway to complete startup:
|
|
||||||
machine.wait_for_file("/run/user/1000/wayland-1")
|
|
||||||
machine.wait_for_file("/tmp/sway-ipc.sock")
|
|
||||||
|
|
||||||
# Prepare hpkg directory:
|
|
||||||
machine.succeed("install -dm 0700 -o alice -g users /var/lib/hakurei/1000")
|
|
||||||
|
|
||||||
# Install hpkg app:
|
|
||||||
swaymsg("exec hpkg -v install /etc/foot.pkg && touch /tmp/hpkg-install-ok")
|
|
||||||
machine.wait_for_file("/tmp/hpkg-install-ok")
|
|
||||||
|
|
||||||
# Start app (foot) with Wayland enablement:
|
|
||||||
swaymsg("exec hpkg -v start org.codeberg.dnkl.foot")
|
|
||||||
wait_for_window("hakurei@machine-foot")
|
|
||||||
machine.send_chars("clear; wayland-info && touch /tmp/success-client\n")
|
|
||||||
machine.wait_for_file("/tmp/hakurei.0/tmpdir/2/success-client")
|
|
||||||
collect_state_ui("app_wayland")
|
|
||||||
check_state("foot", {"wayland": True, "dbus": True, "pipewire": True})
|
|
||||||
# Verify acl on XDG_RUNTIME_DIR:
|
|
||||||
print(machine.succeed("getfacl --absolute-names --omit-header --numeric /tmp/hakurei.0/runtime | grep 10002"))
|
|
||||||
machine.send_chars("exit\n")
|
|
||||||
machine.wait_until_fails("pgrep foot")
|
|
||||||
# Verify acl cleanup on XDG_RUNTIME_DIR:
|
|
||||||
machine.wait_until_fails("getfacl --absolute-names --omit-header --numeric /tmp/hakurei.0/runtime | grep 10002")
|
|
||||||
|
|
||||||
# Exit Sway and verify process exit status 0:
|
|
||||||
swaymsg("exit", succeed=False)
|
|
||||||
machine.wait_for_file("/tmp/sway-exit-ok")
|
|
||||||
|
|
||||||
# Print hakurei share and rundir contents:
|
|
||||||
print(machine.succeed("find /tmp/hakurei.0 "
|
|
||||||
+ "-path '/tmp/hakurei.0/runtime/*/*' -prune -o "
|
|
||||||
+ "-path '/tmp/hakurei.0/tmpdir/*/*' -prune -o "
|
|
||||||
+ "-print"))
|
|
||||||
print(machine.fail("ls /run/user/1000/hakurei"))
|
|
||||||
130
cmd/hpkg/with.go
130
cmd/hpkg/with.go
@@ -1,130 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"hakurei.app/container/check"
|
|
||||||
"hakurei.app/container/fhs"
|
|
||||||
"hakurei.app/hst"
|
|
||||||
"hakurei.app/message"
|
|
||||||
)
|
|
||||||
|
|
||||||
func withNixDaemon(
|
|
||||||
ctx context.Context,
|
|
||||||
msg message.Msg,
|
|
||||||
action string, command []string, net bool, updateConfig func(config *hst.Config) *hst.Config,
|
|
||||||
app *appInfo, pathSet *appPathSet, dropShell bool, beforeFail func(),
|
|
||||||
) {
|
|
||||||
flags := hst.FMultiarch | hst.FUserns // nix sandbox requires userns
|
|
||||||
if net {
|
|
||||||
flags |= hst.FHostNet
|
|
||||||
}
|
|
||||||
if dropShell {
|
|
||||||
flags |= hst.FTty
|
|
||||||
}
|
|
||||||
|
|
||||||
mustRunAppDropShell(ctx, msg, updateConfig(&hst.Config{
|
|
||||||
ID: app.ID,
|
|
||||||
|
|
||||||
ExtraPerms: []hst.ExtraPermConfig{
|
|
||||||
{Path: dataHome, Execute: true},
|
|
||||||
{Ensure: true, Path: pathSet.baseDir, Read: true, Write: true, Execute: true},
|
|
||||||
},
|
|
||||||
|
|
||||||
Identity: app.Identity,
|
|
||||||
|
|
||||||
Container: &hst.ContainerConfig{
|
|
||||||
Hostname: formatHostname(app.Name) + "-" + action,
|
|
||||||
|
|
||||||
Filesystem: []hst.FilesystemConfigJSON{
|
|
||||||
{FilesystemConfig: &hst.FSBind{Target: fhs.AbsEtc, Source: pathSet.cacheDir.Append("etc"), Special: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: pathSet.nixPath, Target: pathNix, Write: true}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: pathCurrentSystem, Linkname: app.CurrentSystem.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: pathBin, Linkname: pathSwBin.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: fhs.AbsUsrBin, Linkname: pathSwBin.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Target: pathDataData.Append(app.ID), Source: pathSet.homeDir, Write: true, Ensure: true}},
|
|
||||||
},
|
|
||||||
|
|
||||||
Username: "hakurei",
|
|
||||||
Shell: pathShell,
|
|
||||||
Home: pathDataData.Append(app.ID),
|
|
||||||
|
|
||||||
Path: pathShell,
|
|
||||||
Args: []string{bash, "-lc", "rm -f /nix/var/nix/daemon-socket/socket && " +
|
|
||||||
// start nix-daemon
|
|
||||||
"nix-daemon --store / & " +
|
|
||||||
// wait for socket to appear
|
|
||||||
"(while [ ! -S /nix/var/nix/daemon-socket/socket ]; do sleep 0.01; done) && " +
|
|
||||||
// create directory so nix stops complaining
|
|
||||||
"mkdir -p /nix/var/nix/profiles/per-user/root/channels && " +
|
|
||||||
strings.Join(command, " && ") +
|
|
||||||
// terminate nix-daemon
|
|
||||||
" && pkill nix-daemon",
|
|
||||||
},
|
|
||||||
|
|
||||||
Flags: flags,
|
|
||||||
},
|
|
||||||
}), dropShell, beforeFail)
|
|
||||||
}
|
|
||||||
|
|
||||||
func withCacheDir(
|
|
||||||
ctx context.Context,
|
|
||||||
msg message.Msg,
|
|
||||||
action string, command []string, workDir *check.Absolute,
|
|
||||||
app *appInfo, pathSet *appPathSet, dropShell bool, beforeFail func(),
|
|
||||||
) {
|
|
||||||
flags := hst.FMultiarch
|
|
||||||
if dropShell {
|
|
||||||
flags |= hst.FTty
|
|
||||||
}
|
|
||||||
|
|
||||||
mustRunAppDropShell(ctx, msg, &hst.Config{
|
|
||||||
ID: app.ID,
|
|
||||||
|
|
||||||
ExtraPerms: []hst.ExtraPermConfig{
|
|
||||||
{Path: dataHome, Execute: true},
|
|
||||||
{Ensure: true, Path: pathSet.baseDir, Read: true, Write: true, Execute: true},
|
|
||||||
{Path: workDir, Execute: true},
|
|
||||||
},
|
|
||||||
|
|
||||||
Identity: app.Identity,
|
|
||||||
|
|
||||||
Container: &hst.ContainerConfig{
|
|
||||||
Hostname: formatHostname(app.Name) + "-" + action,
|
|
||||||
|
|
||||||
Filesystem: []hst.FilesystemConfigJSON{
|
|
||||||
{FilesystemConfig: &hst.FSBind{Target: fhs.AbsEtc, Source: workDir.Append(fhs.Etc), Special: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: workDir.Append("nix"), Target: pathNix}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: pathCurrentSystem, Linkname: app.CurrentSystem.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: pathBin, Linkname: pathSwBin.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: fhs.AbsUsrBin, Linkname: pathSwBin.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: workDir, Target: hst.AbsPrivateTmp.Append("bundle")}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Target: pathDataData.Append(app.ID, "cache"), Source: pathSet.cacheDir, Write: true, Ensure: true}},
|
|
||||||
},
|
|
||||||
|
|
||||||
Username: "nixos",
|
|
||||||
Shell: pathShell,
|
|
||||||
Home: pathDataData.Append(app.ID, "cache"),
|
|
||||||
|
|
||||||
Path: pathShell,
|
|
||||||
Args: []string{bash, "-lc", strings.Join(command, " && ")},
|
|
||||||
|
|
||||||
Flags: flags,
|
|
||||||
},
|
|
||||||
}, dropShell, beforeFail)
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustRunAppDropShell(ctx context.Context, msg message.Msg, config *hst.Config, dropShell bool, beforeFail func()) {
|
|
||||||
if dropShell {
|
|
||||||
if config.Container != nil {
|
|
||||||
config.Container.Args = []string{bash, "-l"}
|
|
||||||
}
|
|
||||||
mustRunApp(ctx, msg, config, beforeFail)
|
|
||||||
beforeFail()
|
|
||||||
msg.BeforeExit()
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
mustRunApp(ctx, msg, config, beforeFail)
|
|
||||||
}
|
|
||||||
483
cmd/mbf/main.go
483
cmd/mbf/main.go
@@ -4,17 +4,26 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
"unique"
|
"unique"
|
||||||
|
|
||||||
"hakurei.app/command"
|
"hakurei.app/command"
|
||||||
"hakurei.app/container"
|
"hakurei.app/container"
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
|
"hakurei.app/container/seccomp"
|
||||||
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/internal/pkg"
|
"hakurei.app/internal/pkg"
|
||||||
"hakurei.app/internal/rosa"
|
"hakurei.app/internal/rosa"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
@@ -51,10 +60,16 @@ func main() {
|
|||||||
flagCures int
|
flagCures int
|
||||||
flagBase string
|
flagBase string
|
||||||
flagTShift int
|
flagTShift int
|
||||||
|
flagIdle bool
|
||||||
)
|
)
|
||||||
c := command.New(os.Stderr, log.Printf, "mbf", func([]string) (err error) {
|
c := command.New(os.Stderr, log.Printf, "mbf", func([]string) (err error) {
|
||||||
msg.SwapVerbose(!flagQuiet)
|
msg.SwapVerbose(!flagQuiet)
|
||||||
|
|
||||||
|
flagBase = os.ExpandEnv(flagBase)
|
||||||
|
if flagBase == "" {
|
||||||
|
flagBase = "cache"
|
||||||
|
}
|
||||||
|
|
||||||
var base *check.Absolute
|
var base *check.Absolute
|
||||||
if flagBase, err = filepath.Abs(flagBase); err != nil {
|
if flagBase, err = filepath.Abs(flagBase); err != nil {
|
||||||
return
|
return
|
||||||
@@ -70,6 +85,11 @@ func main() {
|
|||||||
cache.SetThreshold(1 << flagTShift)
|
cache.SetThreshold(1 << flagTShift)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if flagIdle {
|
||||||
|
pkg.SetSchedIdle = true
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}).Flag(
|
}).Flag(
|
||||||
&flagQuiet,
|
&flagQuiet,
|
||||||
@@ -81,12 +101,16 @@ func main() {
|
|||||||
"Maximum number of dependencies to cure at any given time",
|
"Maximum number of dependencies to cure at any given time",
|
||||||
).Flag(
|
).Flag(
|
||||||
&flagBase,
|
&flagBase,
|
||||||
"d", command.StringFlag("cache"),
|
"d", command.StringFlag("$MBF_CACHE_DIR"),
|
||||||
"Directory to store cured artifacts",
|
"Directory to store cured artifacts",
|
||||||
).Flag(
|
).Flag(
|
||||||
&flagTShift,
|
&flagTShift,
|
||||||
"tshift", command.IntFlag(-1),
|
"tshift", command.IntFlag(-1),
|
||||||
"Dependency graph size exponent, to the power of 2",
|
"Dependency graph size exponent, to the power of 2",
|
||||||
|
).Flag(
|
||||||
|
&flagIdle,
|
||||||
|
"sched-idle", command.BoolFlag(false),
|
||||||
|
"Set SCHED_IDLE scheduling policy",
|
||||||
)
|
)
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -109,13 +133,245 @@ func main() {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
var (
|
||||||
|
flagStatus bool
|
||||||
|
flagReport string
|
||||||
|
)
|
||||||
|
c.NewCommand(
|
||||||
|
"info",
|
||||||
|
"Display out-of-band metadata of an artifact",
|
||||||
|
func(args []string) (err error) {
|
||||||
|
if len(args) == 0 {
|
||||||
|
return errors.New("info requires at least 1 argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
var r *rosa.Report
|
||||||
|
if flagReport != "" {
|
||||||
|
if r, err = rosa.OpenReport(flagReport); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if closeErr := r.Close(); err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
defer r.HandleAccess(&err)()
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, name := range args {
|
||||||
|
if p, ok := rosa.ResolveName(name); !ok {
|
||||||
|
return fmt.Errorf("unknown artifact %q", name)
|
||||||
|
} else {
|
||||||
|
var suffix string
|
||||||
|
if version := rosa.Std.Version(p); version != rosa.Unversioned {
|
||||||
|
suffix += "-" + version
|
||||||
|
}
|
||||||
|
fmt.Println("name : " + name + suffix)
|
||||||
|
|
||||||
|
meta := rosa.GetMetadata(p)
|
||||||
|
fmt.Println("description : " + meta.Description)
|
||||||
|
if meta.Website != "" {
|
||||||
|
fmt.Println("website : " +
|
||||||
|
strings.TrimSuffix(meta.Website, "/"))
|
||||||
|
}
|
||||||
|
if len(meta.Dependencies) > 0 {
|
||||||
|
fmt.Print("depends on :")
|
||||||
|
for _, d := range meta.Dependencies {
|
||||||
|
s := rosa.GetMetadata(d).Name
|
||||||
|
if version := rosa.Std.Version(d); version != rosa.Unversioned {
|
||||||
|
s += "-" + version
|
||||||
|
}
|
||||||
|
fmt.Print(" " + s)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
|
||||||
|
const statusPrefix = "status : "
|
||||||
|
if flagStatus {
|
||||||
|
if r == nil {
|
||||||
|
var f io.ReadSeekCloser
|
||||||
|
f, err = cache.OpenStatus(rosa.Std.Load(p))
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
fmt.Println(
|
||||||
|
statusPrefix + "not yet cured",
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Print(statusPrefix)
|
||||||
|
_, err = io.Copy(os.Stdout, f)
|
||||||
|
if err = errors.Join(err, f.Close()); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
status, n := r.ArtifactOf(cache.Ident(rosa.Std.Load(p)))
|
||||||
|
if status == nil {
|
||||||
|
fmt.Println(
|
||||||
|
statusPrefix + "not in report",
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
fmt.Println("size :", n)
|
||||||
|
fmt.Print(statusPrefix)
|
||||||
|
if _, err = os.Stdout.Write(status); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if i != len(args)-1 {
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagStatus,
|
||||||
|
"status", command.BoolFlag(false),
|
||||||
|
"Display cure status if available",
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagReport,
|
||||||
|
"report", command.StringFlag(""),
|
||||||
|
"Load cure status from this report file instead of cache",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.NewCommand(
|
||||||
|
"report",
|
||||||
|
"Generate an artifact cure report for the current cache",
|
||||||
|
func(args []string) (err error) {
|
||||||
|
var w *os.File
|
||||||
|
switch len(args) {
|
||||||
|
case 0:
|
||||||
|
w = os.Stdout
|
||||||
|
|
||||||
|
case 1:
|
||||||
|
if w, err = os.OpenFile(
|
||||||
|
args[0],
|
||||||
|
os.O_CREATE|os.O_EXCL|syscall.O_WRONLY,
|
||||||
|
0400,
|
||||||
|
); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
closeErr := w.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
default:
|
||||||
|
return errors.New("report requires 1 argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
if container.Isatty(int(w.Fd())) {
|
||||||
|
return errors.New("output appears to be a terminal")
|
||||||
|
}
|
||||||
|
return rosa.WriteReport(msg, w, cache)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
{
|
||||||
|
var flagJobs int
|
||||||
|
c.NewCommand("updates", command.UsageInternal, func([]string) error {
|
||||||
|
var (
|
||||||
|
errsMu sync.Mutex
|
||||||
|
errs []error
|
||||||
|
|
||||||
|
n atomic.Uint64
|
||||||
|
)
|
||||||
|
|
||||||
|
w := make(chan rosa.PArtifact)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for range max(flagJobs, 1) {
|
||||||
|
wg.Go(func() {
|
||||||
|
for p := range w {
|
||||||
|
meta := rosa.GetMetadata(p)
|
||||||
|
if meta.ID == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := meta.GetVersions(ctx)
|
||||||
|
if err != nil {
|
||||||
|
errsMu.Lock()
|
||||||
|
errs = append(errs, err)
|
||||||
|
errsMu.Unlock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if current, latest :=
|
||||||
|
rosa.Std.Version(p),
|
||||||
|
meta.GetLatest(v); current != latest {
|
||||||
|
|
||||||
|
n.Add(1)
|
||||||
|
log.Printf("%s %s < %s", meta.Name, current, latest)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
msg.Verbosef("%s is up to date", meta.Name)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
done:
|
||||||
|
for i := range rosa.PresetEnd {
|
||||||
|
select {
|
||||||
|
case w <- rosa.PArtifact(i):
|
||||||
|
break
|
||||||
|
case <-ctx.Done():
|
||||||
|
break done
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(w)
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if v := n.Load(); v > 0 {
|
||||||
|
errs = append(errs, errors.New(strconv.Itoa(int(v))+
|
||||||
|
" package(s) are out of date"))
|
||||||
|
}
|
||||||
|
return errors.Join(errs...)
|
||||||
|
}).
|
||||||
|
Flag(
|
||||||
|
&flagJobs,
|
||||||
|
"j", command.IntFlag(32),
|
||||||
|
"Maximum number of simultaneous connections",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
var (
|
||||||
|
flagGentoo string
|
||||||
|
flagChecksum string
|
||||||
|
|
||||||
|
flagStage0 bool
|
||||||
|
)
|
||||||
c.NewCommand(
|
c.NewCommand(
|
||||||
"stage3",
|
"stage3",
|
||||||
"Check for toolchain 3-stage non-determinism",
|
"Check for toolchain 3-stage non-determinism",
|
||||||
func(args []string) (err error) {
|
func(args []string) (err error) {
|
||||||
_, _, _, stage1 := (rosa.Std - 2).NewLLVM()
|
t := rosa.Std
|
||||||
_, _, _, stage2 := (rosa.Std - 1).NewLLVM()
|
if flagGentoo != "" {
|
||||||
_, _, _, stage3 := rosa.Std.NewLLVM()
|
t -= 3 // magic number to discourage misuse
|
||||||
|
|
||||||
|
var checksum pkg.Checksum
|
||||||
|
if len(flagChecksum) != 0 {
|
||||||
|
if err = pkg.Decode(&checksum, flagChecksum); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rosa.SetGentooStage3(flagGentoo, checksum)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, _, stage1 := (t - 2).NewLLVM()
|
||||||
|
_, _, _, stage2 := (t - 1).NewLLVM()
|
||||||
|
_, _, _, stage3 := t.NewLLVM()
|
||||||
var (
|
var (
|
||||||
pathname *check.Absolute
|
pathname *check.Absolute
|
||||||
checksum [2]unique.Handle[pkg.Checksum]
|
checksum [2]unique.Handle[pkg.Checksum]
|
||||||
@@ -146,13 +402,40 @@ func main() {
|
|||||||
"("+pkg.Encode(checksum[0].Value())+")",
|
"("+pkg.Encode(checksum[0].Value())+")",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if flagStage0 {
|
||||||
|
if pathname, _, err = cache.Cure(
|
||||||
|
t.Load(rosa.Stage0),
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Println(pathname)
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
},
|
},
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagGentoo,
|
||||||
|
"gentoo", command.StringFlag(""),
|
||||||
|
"Bootstrap from a Gentoo stage3 tarball",
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagChecksum,
|
||||||
|
"checksum", command.StringFlag(""),
|
||||||
|
"Checksum of Gentoo stage3 tarball",
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagStage0,
|
||||||
|
"stage0", command.BoolFlag(false),
|
||||||
|
"Create bootstrap stage0 tarball",
|
||||||
)
|
)
|
||||||
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
var (
|
var (
|
||||||
flagDump string
|
flagDump string
|
||||||
|
flagExport string
|
||||||
)
|
)
|
||||||
c.NewCommand(
|
c.NewCommand(
|
||||||
"cure",
|
"cure",
|
||||||
@@ -162,13 +445,37 @@ func main() {
|
|||||||
return errors.New("cure requires 1 argument")
|
return errors.New("cure requires 1 argument")
|
||||||
}
|
}
|
||||||
if p, ok := rosa.ResolveName(args[0]); !ok {
|
if p, ok := rosa.ResolveName(args[0]); !ok {
|
||||||
return fmt.Errorf("unsupported artifact %q", args[0])
|
return fmt.Errorf("unknown artifact %q", args[0])
|
||||||
} else if flagDump == "" {
|
} else if flagDump == "" {
|
||||||
pathname, _, err := cache.Cure(rosa.Std.Load(p))
|
pathname, _, err := cache.Cure(rosa.Std.Load(p))
|
||||||
if err == nil {
|
if err != nil {
|
||||||
log.Println(pathname)
|
|
||||||
}
|
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
log.Println(pathname)
|
||||||
|
|
||||||
|
if flagExport != "" {
|
||||||
|
msg.Verbosef("exporting %s to %s...", args[0], flagExport)
|
||||||
|
|
||||||
|
var f *os.File
|
||||||
|
if f, err = os.OpenFile(
|
||||||
|
flagExport,
|
||||||
|
os.O_WRONLY|os.O_CREATE|os.O_EXCL,
|
||||||
|
0400,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
} else if _, err = pkg.Flatten(
|
||||||
|
os.DirFS(pathname.String()),
|
||||||
|
".",
|
||||||
|
f,
|
||||||
|
); err != nil {
|
||||||
|
_ = f.Close()
|
||||||
|
return err
|
||||||
|
} else if err = f.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
} else {
|
} else {
|
||||||
f, err := os.OpenFile(
|
f, err := os.OpenFile(
|
||||||
flagDump,
|
flagDump,
|
||||||
@@ -192,13 +499,173 @@ func main() {
|
|||||||
&flagDump,
|
&flagDump,
|
||||||
"dump", command.StringFlag(""),
|
"dump", command.StringFlag(""),
|
||||||
"Write IR to specified pathname and terminate",
|
"Write IR to specified pathname and terminate",
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagExport,
|
||||||
|
"export", command.StringFlag(""),
|
||||||
|
"Export cured artifact to specified pathname",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
var (
|
||||||
|
flagNet bool
|
||||||
|
flagSession bool
|
||||||
|
|
||||||
|
flagWithToolchain bool
|
||||||
|
)
|
||||||
|
c.NewCommand(
|
||||||
|
"shell",
|
||||||
|
"Interactive shell in the specified Rosa OS environment",
|
||||||
|
func(args []string) error {
|
||||||
|
presets := make([]rosa.PArtifact, len(args))
|
||||||
|
for i, arg := range args {
|
||||||
|
p, ok := rosa.ResolveName(arg)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unknown artifact %q", arg)
|
||||||
|
}
|
||||||
|
presets[i] = p
|
||||||
|
}
|
||||||
|
root := make(rosa.Collect, 0, 6+len(args))
|
||||||
|
root = rosa.Std.AppendPresets(root, presets...)
|
||||||
|
|
||||||
|
if flagWithToolchain {
|
||||||
|
musl, compilerRT, runtimes, clang := (rosa.Std - 1).NewLLVM()
|
||||||
|
root = append(root, musl, compilerRT, runtimes, clang)
|
||||||
|
} else {
|
||||||
|
root = append(root, rosa.Std.Load(rosa.Musl))
|
||||||
|
}
|
||||||
|
root = append(root,
|
||||||
|
rosa.Std.Load(rosa.Mksh),
|
||||||
|
rosa.Std.Load(rosa.Toybox),
|
||||||
|
)
|
||||||
|
|
||||||
|
if _, _, err := cache.Cure(&root); err == nil {
|
||||||
|
return errors.New("unreachable")
|
||||||
|
} else if !errors.Is(err, rosa.Collected{}) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type cureRes struct {
|
||||||
|
pathname *check.Absolute
|
||||||
|
checksum unique.Handle[pkg.Checksum]
|
||||||
|
}
|
||||||
|
cured := make(map[pkg.Artifact]cureRes)
|
||||||
|
for _, a := range root {
|
||||||
|
pathname, checksum, err := cache.Cure(a)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cured[a] = cureRes{pathname, checksum}
|
||||||
|
}
|
||||||
|
|
||||||
|
layers := pkg.PromoteLayers(root, func(a pkg.Artifact) (
|
||||||
|
*check.Absolute,
|
||||||
|
unique.Handle[pkg.Checksum],
|
||||||
|
) {
|
||||||
|
res := cured[a]
|
||||||
|
return res.pathname, res.checksum
|
||||||
|
}, func(i int, d pkg.Artifact) {
|
||||||
|
r := pkg.Encode(cache.Ident(d).Value())
|
||||||
|
if s, ok := d.(fmt.Stringer); ok {
|
||||||
|
if name := s.String(); name != "" {
|
||||||
|
r += "-" + name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
msg.Verbosef("promoted layer %d as %s", i, r)
|
||||||
|
})
|
||||||
|
|
||||||
|
z := container.New(ctx, msg)
|
||||||
|
z.WaitDelay = 3 * time.Second
|
||||||
|
z.SeccompPresets = pkg.SeccompPresets
|
||||||
|
z.SeccompFlags |= seccomp.AllowMultiarch
|
||||||
|
z.ParentPerm = 0700
|
||||||
|
z.HostNet = flagNet
|
||||||
|
z.RetainSession = flagSession
|
||||||
|
z.Hostname = "localhost"
|
||||||
|
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
||||||
|
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
|
||||||
|
|
||||||
|
var tempdir *check.Absolute
|
||||||
|
if s, err := filepath.Abs(os.TempDir()); err != nil {
|
||||||
|
return err
|
||||||
|
} else if tempdir, err = check.NewAbs(s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
z.Dir = fhs.AbsRoot
|
||||||
|
z.Env = []string{
|
||||||
|
"SHELL=/system/bin/mksh",
|
||||||
|
"PATH=/system/bin",
|
||||||
|
"HOME=/",
|
||||||
|
}
|
||||||
|
z.Path = rosa.AbsSystem.Append("bin", "mksh")
|
||||||
|
z.Args = []string{"mksh"}
|
||||||
|
z.
|
||||||
|
OverlayEphemeral(fhs.AbsRoot, layers...).
|
||||||
|
Place(
|
||||||
|
fhs.AbsEtc.Append("hosts"),
|
||||||
|
[]byte("127.0.0.1 localhost\n"),
|
||||||
|
).
|
||||||
|
Place(
|
||||||
|
fhs.AbsEtc.Append("passwd"),
|
||||||
|
[]byte("media_rw:x:1023:1023::/:/system/bin/sh\n"+
|
||||||
|
"nobody:x:65534:65534::/proc/nonexistent:/system/bin/false\n"),
|
||||||
|
).
|
||||||
|
Place(
|
||||||
|
fhs.AbsEtc.Append("group"),
|
||||||
|
[]byte("media_rw:x:1023:\nnobody:x:65534:\n"),
|
||||||
|
).
|
||||||
|
Bind(tempdir, fhs.AbsTmp, std.BindWritable).
|
||||||
|
Proc(fhs.AbsProc).Dev(fhs.AbsDev, true)
|
||||||
|
|
||||||
|
if err := z.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := z.Serve(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return z.Wait()
|
||||||
|
},
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagNet,
|
||||||
|
"net", command.BoolFlag(false),
|
||||||
|
"Share host net namespace",
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagSession,
|
||||||
|
"session", command.BoolFlag(false),
|
||||||
|
"Retain session",
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagWithToolchain,
|
||||||
|
"with-toolchain", command.BoolFlag(false),
|
||||||
|
"Include the stage3 LLVM toolchain",
|
||||||
|
)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Command(
|
||||||
|
"help",
|
||||||
|
"Show this help message",
|
||||||
|
func([]string) error { c.PrintHelp(); return nil },
|
||||||
|
)
|
||||||
|
|
||||||
c.MustParse(os.Args[1:], func(err error) {
|
c.MustParse(os.Args[1:], func(err error) {
|
||||||
if cache != nil {
|
if cache != nil {
|
||||||
cache.Close()
|
cache.Close()
|
||||||
}
|
}
|
||||||
|
if w, ok := err.(interface{ Unwrap() []error }); !ok {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
|
} else {
|
||||||
|
errs := w.Unwrap()
|
||||||
|
for i, e := range errs {
|
||||||
|
if i == len(errs)-1 {
|
||||||
|
log.Fatal(e)
|
||||||
|
}
|
||||||
|
log.Println(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
176
cmd/pkgserver/api.go
Normal file
176
cmd/pkgserver/api.go
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"hakurei.app/internal/info"
|
||||||
|
"hakurei.app/internal/rosa"
|
||||||
|
)
|
||||||
|
|
||||||
|
// for lazy initialisation of serveInfo
|
||||||
|
var (
|
||||||
|
infoPayload struct {
|
||||||
|
// Current package count.
|
||||||
|
Count int `json:"count"`
|
||||||
|
// Hakurei version, set at link time.
|
||||||
|
HakureiVersion string `json:"hakurei_version"`
|
||||||
|
}
|
||||||
|
infoPayloadOnce sync.Once
|
||||||
|
)
|
||||||
|
|
||||||
|
// handleInfo writes constant system information.
|
||||||
|
func handleInfo(w http.ResponseWriter, _ *http.Request) {
|
||||||
|
infoPayloadOnce.Do(func() {
|
||||||
|
infoPayload.Count = int(rosa.PresetUnexportedStart)
|
||||||
|
infoPayload.HakureiVersion = info.Version()
|
||||||
|
})
|
||||||
|
// TODO(mae): cache entire response if no additional fields are planned
|
||||||
|
writeAPIPayload(w, infoPayload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newStatusHandler returns a [http.HandlerFunc] that offers status files for
|
||||||
|
// viewing or download, if available.
|
||||||
|
func (index *packageIndex) newStatusHandler(disposition bool) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
m, ok := index.names[path.Base(r.URL.Path)]
|
||||||
|
if !ok || !m.HasReport {
|
||||||
|
http.NotFound(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
contentType := "text/plain; charset=utf-8"
|
||||||
|
if disposition {
|
||||||
|
contentType = "application/octet-stream"
|
||||||
|
|
||||||
|
// quoting like this is unsound, but okay, because metadata is hardcoded
|
||||||
|
contentDisposition := `attachment; filename="`
|
||||||
|
contentDisposition += m.Name + "-"
|
||||||
|
if m.Version != "" {
|
||||||
|
contentDisposition += m.Version + "-"
|
||||||
|
}
|
||||||
|
contentDisposition += m.ids + `.log"`
|
||||||
|
w.Header().Set("Content-Disposition", contentDisposition)
|
||||||
|
}
|
||||||
|
w.Header().Set("Content-Type", contentType)
|
||||||
|
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
||||||
|
if err := func() (err error) {
|
||||||
|
defer index.handleAccess(&err)()
|
||||||
|
_, err = w.Write(m.status)
|
||||||
|
return
|
||||||
|
}(); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
http.Error(
|
||||||
|
w, "cannot deliver status, contact maintainers",
|
||||||
|
http.StatusInternalServerError,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleGet writes a slice of metadata with specified order.
|
||||||
|
func (index *packageIndex) handleGet(w http.ResponseWriter, r *http.Request) {
|
||||||
|
q := r.URL.Query()
|
||||||
|
limit, err := strconv.Atoi(q.Get("limit"))
|
||||||
|
if err != nil || limit > 100 || limit < 1 {
|
||||||
|
http.Error(
|
||||||
|
w, "limit must be an integer between 1 and 100",
|
||||||
|
http.StatusBadRequest,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
i, err := strconv.Atoi(q.Get("index"))
|
||||||
|
if err != nil || i >= len(index.sorts[0]) || i < 0 {
|
||||||
|
http.Error(
|
||||||
|
w, "index must be an integer between 0 and "+
|
||||||
|
strconv.Itoa(int(rosa.PresetUnexportedStart-1)),
|
||||||
|
http.StatusBadRequest,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sort, err := strconv.Atoi(q.Get("sort"))
|
||||||
|
if err != nil || sort >= len(index.sorts) || sort < 0 {
|
||||||
|
http.Error(
|
||||||
|
w, "sort must be an integer between 0 and "+
|
||||||
|
strconv.Itoa(sortOrderEnd),
|
||||||
|
http.StatusBadRequest,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
values := index.sorts[sort][i:min(i+limit, len(index.sorts[sort]))]
|
||||||
|
writeAPIPayload(w, &struct {
|
||||||
|
Values []*metadata `json:"values"`
|
||||||
|
}{values})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (index *packageIndex) handleSearch(w http.ResponseWriter, r *http.Request) {
|
||||||
|
q := r.URL.Query()
|
||||||
|
limit, err := strconv.Atoi(q.Get("limit"))
|
||||||
|
if err != nil || limit > 100 || limit < 1 {
|
||||||
|
http.Error(
|
||||||
|
w, "limit must be an integer between 1 and 100",
|
||||||
|
http.StatusBadRequest,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
i, err := strconv.Atoi(q.Get("index"))
|
||||||
|
if err != nil || i >= len(index.sorts[0]) || i < 0 {
|
||||||
|
http.Error(
|
||||||
|
w, "index must be an integer between 0 and "+
|
||||||
|
strconv.Itoa(int(rosa.PresetUnexportedStart-1)),
|
||||||
|
http.StatusBadRequest,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
search, err := url.PathUnescape(q.Get("search"))
|
||||||
|
if len(search) > 100 || err != nil {
|
||||||
|
http.Error(
|
||||||
|
w, "search must be a string between 0 and 100 characters long",
|
||||||
|
http.StatusBadRequest,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
desc := q.Get("desc") == "true"
|
||||||
|
n, res, err := index.performSearchQuery(limit, i, search, desc)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
writeAPIPayload(w, &struct {
|
||||||
|
Count int `json:"count"`
|
||||||
|
Results []searchResult `json:"results"`
|
||||||
|
}{n, res})
|
||||||
|
}
|
||||||
|
|
||||||
|
// apiVersion is the name of the current API revision, as part of the pattern.
|
||||||
|
const apiVersion = "v1"
|
||||||
|
|
||||||
|
// registerAPI registers API handler functions.
|
||||||
|
func (index *packageIndex) registerAPI(mux *http.ServeMux) {
|
||||||
|
mux.HandleFunc("GET /api/"+apiVersion+"/info", handleInfo)
|
||||||
|
mux.HandleFunc("GET /api/"+apiVersion+"/get", index.handleGet)
|
||||||
|
mux.HandleFunc("GET /api/"+apiVersion+"/search", index.handleSearch)
|
||||||
|
mux.HandleFunc("GET /api/"+apiVersion+"/status/", index.newStatusHandler(false))
|
||||||
|
mux.HandleFunc("GET /status/", index.newStatusHandler(true))
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeAPIPayload sets headers common to API responses and encodes payload as
|
||||||
|
// JSON for the response body.
|
||||||
|
func writeAPIPayload(w http.ResponseWriter, payload any) {
|
||||||
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||||
|
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
||||||
|
w.Header().Set("Pragma", "no-cache")
|
||||||
|
w.Header().Set("Expires", "0")
|
||||||
|
|
||||||
|
if err := json.NewEncoder(w).Encode(payload); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
http.Error(
|
||||||
|
w, "cannot encode payload, contact maintainers",
|
||||||
|
http.StatusInternalServerError,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
183
cmd/pkgserver/api_test.go
Normal file
183
cmd/pkgserver/api_test.go
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"hakurei.app/internal/info"
|
||||||
|
"hakurei.app/internal/rosa"
|
||||||
|
)
|
||||||
|
|
||||||
|
// prefix is prepended to every API path.
|
||||||
|
const prefix = "/api/" + apiVersion + "/"
|
||||||
|
|
||||||
|
func TestAPIInfo(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
handleInfo(w, httptest.NewRequestWithContext(
|
||||||
|
t.Context(),
|
||||||
|
http.MethodGet,
|
||||||
|
prefix+"info",
|
||||||
|
nil,
|
||||||
|
))
|
||||||
|
|
||||||
|
resp := w.Result()
|
||||||
|
checkStatus(t, resp, http.StatusOK)
|
||||||
|
checkAPIHeader(t, w.Header())
|
||||||
|
|
||||||
|
checkPayload(t, resp, struct {
|
||||||
|
Count int `json:"count"`
|
||||||
|
HakureiVersion string `json:"hakurei_version"`
|
||||||
|
}{int(rosa.PresetUnexportedStart), info.Version()})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAPIGet(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
const target = prefix + "get"
|
||||||
|
|
||||||
|
index := newIndex(t)
|
||||||
|
newRequest := func(suffix string) *httptest.ResponseRecorder {
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
index.handleGet(w, httptest.NewRequestWithContext(
|
||||||
|
t.Context(),
|
||||||
|
http.MethodGet,
|
||||||
|
target+suffix,
|
||||||
|
nil,
|
||||||
|
))
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
checkValidate := func(t *testing.T, suffix string, vmin, vmax int, wantErr string) {
|
||||||
|
t.Run("invalid", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
w := newRequest("?" + suffix + "=invalid")
|
||||||
|
resp := w.Result()
|
||||||
|
checkError(t, resp, wantErr, http.StatusBadRequest)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("min", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
w := newRequest("?" + suffix + "=" + strconv.Itoa(vmin-1))
|
||||||
|
resp := w.Result()
|
||||||
|
checkError(t, resp, wantErr, http.StatusBadRequest)
|
||||||
|
|
||||||
|
w = newRequest("?" + suffix + "=" + strconv.Itoa(vmin))
|
||||||
|
resp = w.Result()
|
||||||
|
checkStatus(t, resp, http.StatusOK)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("max", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
w := newRequest("?" + suffix + "=" + strconv.Itoa(vmax+1))
|
||||||
|
resp := w.Result()
|
||||||
|
checkError(t, resp, wantErr, http.StatusBadRequest)
|
||||||
|
|
||||||
|
w = newRequest("?" + suffix + "=" + strconv.Itoa(vmax))
|
||||||
|
resp = w.Result()
|
||||||
|
checkStatus(t, resp, http.StatusOK)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("limit", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
checkValidate(
|
||||||
|
t, "index=0&sort=0&limit", 1, 100,
|
||||||
|
"limit must be an integer between 1 and 100",
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("index", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
checkValidate(
|
||||||
|
t, "limit=1&sort=0&index", 0, int(rosa.PresetUnexportedStart-1),
|
||||||
|
"index must be an integer between 0 and "+strconv.Itoa(int(rosa.PresetUnexportedStart-1)),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("sort", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
checkValidate(
|
||||||
|
t, "index=0&limit=1&sort", 0, int(sortOrderEnd),
|
||||||
|
"sort must be an integer between 0 and "+strconv.Itoa(int(sortOrderEnd)),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
checkWithSuffix := func(name, suffix string, want []*metadata) {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
w := newRequest(suffix)
|
||||||
|
resp := w.Result()
|
||||||
|
checkStatus(t, resp, http.StatusOK)
|
||||||
|
checkAPIHeader(t, w.Header())
|
||||||
|
checkPayloadFunc(t, resp, func(got *struct {
|
||||||
|
Count int `json:"count"`
|
||||||
|
Values []*metadata `json:"values"`
|
||||||
|
}) bool {
|
||||||
|
return got.Count == len(want) &&
|
||||||
|
slices.EqualFunc(got.Values, want, func(a, b *metadata) bool {
|
||||||
|
return (a.Version == b.Version ||
|
||||||
|
a.Version == rosa.Unversioned ||
|
||||||
|
b.Version == rosa.Unversioned) &&
|
||||||
|
a.HasReport == b.HasReport &&
|
||||||
|
a.Name == b.Name &&
|
||||||
|
a.Description == b.Description &&
|
||||||
|
a.Website == b.Website
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
checkWithSuffix("declarationAscending", "?limit=2&index=0&sort=0", []*metadata{
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(0),
|
||||||
|
Version: rosa.Std.Version(0),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(1),
|
||||||
|
Version: rosa.Std.Version(1),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
checkWithSuffix("declarationAscending offset", "?limit=3&index=5&sort=0", []*metadata{
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(5),
|
||||||
|
Version: rosa.Std.Version(5),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(6),
|
||||||
|
Version: rosa.Std.Version(6),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(7),
|
||||||
|
Version: rosa.Std.Version(7),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
checkWithSuffix("declarationDescending", "?limit=3&index=0&sort=1", []*metadata{
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(rosa.PresetUnexportedStart - 1),
|
||||||
|
Version: rosa.Std.Version(rosa.PresetUnexportedStart - 1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(rosa.PresetUnexportedStart - 2),
|
||||||
|
Version: rosa.Std.Version(rosa.PresetUnexportedStart - 2),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(rosa.PresetUnexportedStart - 3),
|
||||||
|
Version: rosa.Std.Version(rosa.PresetUnexportedStart - 3),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
checkWithSuffix("declarationDescending offset", "?limit=1&index=37&sort=1", []*metadata{
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(rosa.PresetUnexportedStart - 38),
|
||||||
|
Version: rosa.Std.Version(rosa.PresetUnexportedStart - 38),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
105
cmd/pkgserver/index.go
Normal file
105
cmd/pkgserver/index.go
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"errors"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
"hakurei.app/internal/rosa"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
declarationAscending = iota
|
||||||
|
declarationDescending
|
||||||
|
nameAscending
|
||||||
|
nameDescending
|
||||||
|
sizeAscending
|
||||||
|
sizeDescending
|
||||||
|
|
||||||
|
sortOrderEnd = iota - 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// packageIndex refers to metadata by name and various sort orders.
|
||||||
|
type packageIndex struct {
|
||||||
|
sorts [sortOrderEnd + 1][rosa.PresetUnexportedStart]*metadata
|
||||||
|
names map[string]*metadata
|
||||||
|
search searchCache
|
||||||
|
// Taken from [rosa.Report] if available.
|
||||||
|
handleAccess func(*error) func()
|
||||||
|
}
|
||||||
|
|
||||||
|
// metadata holds [rosa.Metadata] extended with additional information.
|
||||||
|
type metadata struct {
|
||||||
|
p rosa.PArtifact
|
||||||
|
*rosa.Metadata
|
||||||
|
|
||||||
|
// Populated via [rosa.Toolchain.Version], [rosa.Unversioned] is equivalent
|
||||||
|
// to the zero value. Otherwise, the zero value is invalid.
|
||||||
|
Version string `json:"version,omitempty"`
|
||||||
|
// Output data size, available if present in report.
|
||||||
|
Size int64 `json:"size,omitempty"`
|
||||||
|
// Whether the underlying [pkg.Artifact] is present in the report.
|
||||||
|
HasReport bool `json:"report"`
|
||||||
|
|
||||||
|
// Ident string encoded ahead of time.
|
||||||
|
ids string
|
||||||
|
// Backed by [rosa.Report], access must be prepared by HandleAccess.
|
||||||
|
status []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// populate deterministically populates packageIndex, optionally with a report.
|
||||||
|
func (index *packageIndex) populate(cache *pkg.Cache, report *rosa.Report) (err error) {
|
||||||
|
if report != nil {
|
||||||
|
defer report.HandleAccess(&err)()
|
||||||
|
index.handleAccess = report.HandleAccess
|
||||||
|
}
|
||||||
|
|
||||||
|
var work [rosa.PresetUnexportedStart]*metadata
|
||||||
|
index.names = make(map[string]*metadata)
|
||||||
|
for p := range rosa.PresetUnexportedStart {
|
||||||
|
m := metadata{
|
||||||
|
p: p,
|
||||||
|
|
||||||
|
Metadata: rosa.GetMetadata(p),
|
||||||
|
Version: rosa.Std.Version(p),
|
||||||
|
}
|
||||||
|
if m.Version == "" {
|
||||||
|
return errors.New("invalid version from " + m.Name)
|
||||||
|
}
|
||||||
|
if m.Version == rosa.Unversioned {
|
||||||
|
m.Version = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache != nil && report != nil {
|
||||||
|
id := cache.Ident(rosa.Std.Load(p))
|
||||||
|
m.ids = pkg.Encode(id.Value())
|
||||||
|
m.status, m.Size = report.ArtifactOf(id)
|
||||||
|
m.HasReport = m.Size >= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
work[p] = &m
|
||||||
|
index.names[m.Name] = &m
|
||||||
|
}
|
||||||
|
|
||||||
|
index.sorts[declarationAscending] = work
|
||||||
|
index.sorts[declarationDescending] = work
|
||||||
|
slices.Reverse(index.sorts[declarationDescending][:])
|
||||||
|
|
||||||
|
index.sorts[nameAscending] = work
|
||||||
|
slices.SortFunc(index.sorts[nameAscending][:], func(a, b *metadata) int {
|
||||||
|
return strings.Compare(a.Name, b.Name)
|
||||||
|
})
|
||||||
|
index.sorts[nameDescending] = index.sorts[nameAscending]
|
||||||
|
slices.Reverse(index.sorts[nameDescending][:])
|
||||||
|
|
||||||
|
index.sorts[sizeAscending] = work
|
||||||
|
slices.SortFunc(index.sorts[sizeAscending][:], func(a, b *metadata) int {
|
||||||
|
return cmp.Compare(a.Size, b.Size)
|
||||||
|
})
|
||||||
|
index.sorts[sizeDescending] = index.sorts[sizeAscending]
|
||||||
|
slices.Reverse(index.sorts[sizeDescending][:])
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
114
cmd/pkgserver/main.go
Normal file
114
cmd/pkgserver/main.go
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"hakurei.app/command"
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
"hakurei.app/internal/rosa"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
const shutdownTimeout = 15 * time.Second
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.SetFlags(0)
|
||||||
|
log.SetPrefix("pkgserver: ")
|
||||||
|
|
||||||
|
var (
|
||||||
|
flagBaseDir string
|
||||||
|
flagAddr string
|
||||||
|
)
|
||||||
|
|
||||||
|
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
|
||||||
|
defer stop()
|
||||||
|
msg := message.New(log.Default())
|
||||||
|
|
||||||
|
c := command.New(os.Stderr, log.Printf, "pkgserver", func(args []string) error {
|
||||||
|
var (
|
||||||
|
cache *pkg.Cache
|
||||||
|
report *rosa.Report
|
||||||
|
)
|
||||||
|
switch len(args) {
|
||||||
|
case 0:
|
||||||
|
break
|
||||||
|
|
||||||
|
case 1:
|
||||||
|
baseDir, err := check.NewAbs(flagBaseDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cache, err = pkg.Open(ctx, msg, 0, baseDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer cache.Close()
|
||||||
|
|
||||||
|
report, err = rosa.OpenReport(args[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return errors.New("pkgserver requires 1 argument")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
var index packageIndex
|
||||||
|
index.search = make(searchCache)
|
||||||
|
if err := index.populate(cache, report); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ticker := time.NewTicker(1 * time.Minute)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
ticker.Stop()
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
index.search.clean()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
var mux http.ServeMux
|
||||||
|
uiRoutes(&mux)
|
||||||
|
index.registerAPI(&mux)
|
||||||
|
server := http.Server{
|
||||||
|
Addr: flagAddr,
|
||||||
|
Handler: &mux,
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
c, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
|
||||||
|
defer cancel()
|
||||||
|
if err := server.Shutdown(c); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return server.ListenAndServe()
|
||||||
|
}).Flag(
|
||||||
|
&flagBaseDir,
|
||||||
|
"b", command.StringFlag(""),
|
||||||
|
"base directory for cache",
|
||||||
|
).Flag(
|
||||||
|
&flagAddr,
|
||||||
|
"addr", command.StringFlag(":8067"),
|
||||||
|
"TCP network address to listen on",
|
||||||
|
)
|
||||||
|
c.MustParse(os.Args[1:], func(err error) {
|
||||||
|
if errors.Is(err, http.ErrServerClosed) {
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
log.Fatal(err)
|
||||||
|
})
|
||||||
|
}
|
||||||
96
cmd/pkgserver/main_test.go
Normal file
96
cmd/pkgserver/main_test.go
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// newIndex returns the address of a newly populated packageIndex.
|
||||||
|
func newIndex(t *testing.T) *packageIndex {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var index packageIndex
|
||||||
|
if err := index.populate(nil, nil); err != nil {
|
||||||
|
t.Fatalf("populate: error = %v", err)
|
||||||
|
}
|
||||||
|
return &index
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkStatus checks response status code.
|
||||||
|
func checkStatus(t *testing.T, resp *http.Response, want int) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
if resp.StatusCode != want {
|
||||||
|
t.Errorf(
|
||||||
|
"StatusCode: %s, want %s",
|
||||||
|
http.StatusText(resp.StatusCode),
|
||||||
|
http.StatusText(want),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkHeader checks the value of a header entry.
|
||||||
|
func checkHeader(t *testing.T, h http.Header, key, want string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
if got := h.Get(key); got != want {
|
||||||
|
t.Errorf("%s: %q, want %q", key, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkAPIHeader checks common entries set for API endpoints.
|
||||||
|
func checkAPIHeader(t *testing.T, h http.Header) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
checkHeader(t, h, "Content-Type", "application/json; charset=utf-8")
|
||||||
|
checkHeader(t, h, "Cache-Control", "no-cache, no-store, must-revalidate")
|
||||||
|
checkHeader(t, h, "Pragma", "no-cache")
|
||||||
|
checkHeader(t, h, "Expires", "0")
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkPayloadFunc checks the JSON response of an API endpoint by passing it to f.
|
||||||
|
func checkPayloadFunc[T any](
|
||||||
|
t *testing.T,
|
||||||
|
resp *http.Response,
|
||||||
|
f func(got *T) bool,
|
||||||
|
) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var got T
|
||||||
|
r := io.Reader(resp.Body)
|
||||||
|
if testing.Verbose() {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
r = io.TeeReader(r, &buf)
|
||||||
|
defer func() { t.Helper(); t.Log(buf.String()) }()
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(r).Decode(&got); err != nil {
|
||||||
|
t.Fatalf("Decode: error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !f(&got) {
|
||||||
|
t.Errorf("Body: %#v", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkPayload checks the JSON response of an API endpoint.
|
||||||
|
func checkPayload[T any](t *testing.T, resp *http.Response, want T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
checkPayloadFunc(t, resp, func(got *T) bool {
|
||||||
|
return reflect.DeepEqual(got, &want)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkError(t *testing.T, resp *http.Response, error string, code int) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
checkStatus(t, resp, code)
|
||||||
|
if got, _ := io.ReadAll(resp.Body); string(got) != fmt.Sprintln(error) {
|
||||||
|
t.Errorf("Body: %q, want %q", string(got), error)
|
||||||
|
}
|
||||||
|
}
|
||||||
77
cmd/pkgserver/search.go
Normal file
77
cmd/pkgserver/search.go
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"maps"
|
||||||
|
"regexp"
|
||||||
|
"slices"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type searchCache map[string]searchCacheEntry
|
||||||
|
type searchResult struct {
|
||||||
|
NameIndices [][]int `json:"name_matches"`
|
||||||
|
DescIndices [][]int `json:"desc_matches,omitempty"`
|
||||||
|
Score float64 `json:"score"`
|
||||||
|
*metadata
|
||||||
|
}
|
||||||
|
type searchCacheEntry struct {
|
||||||
|
query string
|
||||||
|
results []searchResult
|
||||||
|
expiry time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (index *packageIndex) performSearchQuery(limit int, i int, search string, desc bool) (int, []searchResult, error) {
|
||||||
|
entry, ok := index.search[search]
|
||||||
|
if ok {
|
||||||
|
return len(entry.results), entry.results[i:min(i+limit, len(entry.results))], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
regex, err := regexp.Compile(search)
|
||||||
|
if err != nil {
|
||||||
|
return 0, make([]searchResult, 0), err
|
||||||
|
}
|
||||||
|
res := make([]searchResult, 0)
|
||||||
|
for p := range maps.Values(index.names) {
|
||||||
|
nameIndices := regex.FindAllIndex([]byte(p.Name), -1)
|
||||||
|
var descIndices [][]int = nil
|
||||||
|
if desc {
|
||||||
|
descIndices = regex.FindAllIndex([]byte(p.Description), -1)
|
||||||
|
}
|
||||||
|
if nameIndices == nil && descIndices == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
score := float64(indexsum(nameIndices)) / (float64(len(nameIndices)) + 1)
|
||||||
|
if desc {
|
||||||
|
score += float64(indexsum(descIndices)) / (float64(len(descIndices)) + 1) / 10.0
|
||||||
|
}
|
||||||
|
res = append(res, searchResult{
|
||||||
|
NameIndices: nameIndices,
|
||||||
|
DescIndices: descIndices,
|
||||||
|
Score: score,
|
||||||
|
metadata: p,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
slices.SortFunc(res[:], func(a, b searchResult) int { return -cmp.Compare(a.Score, b.Score) })
|
||||||
|
expiry := time.Now().Add(1 * time.Minute)
|
||||||
|
entry = searchCacheEntry{
|
||||||
|
query: search,
|
||||||
|
results: res,
|
||||||
|
expiry: expiry,
|
||||||
|
}
|
||||||
|
index.search[search] = entry
|
||||||
|
|
||||||
|
return len(res), res[i:min(i+limit, len(entry.results))], nil
|
||||||
|
}
|
||||||
|
func (s *searchCache) clean() {
|
||||||
|
maps.DeleteFunc(*s, func(_ string, v searchCacheEntry) bool {
|
||||||
|
return v.expiry.Before(time.Now())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
func indexsum(in [][]int) int {
|
||||||
|
sum := 0
|
||||||
|
for i := 0; i < len(in); i++ {
|
||||||
|
sum += in[i][1] - in[i][0]
|
||||||
|
}
|
||||||
|
return sum
|
||||||
|
}
|
||||||
48
cmd/pkgserver/ui.go
Normal file
48
cmd/pkgserver/ui.go
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import "net/http"
|
||||||
|
|
||||||
|
func serveWebUI(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
||||||
|
w.Header().Set("Pragma", "no-cache")
|
||||||
|
w.Header().Set("Expires", "0")
|
||||||
|
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||||
|
w.Header().Set("X-XSS-Protection", "1")
|
||||||
|
w.Header().Set("X-Frame-Options", "DENY")
|
||||||
|
|
||||||
|
http.ServeFileFS(w, r, content, "ui/index.html")
|
||||||
|
}
|
||||||
|
func serveStaticContent(w http.ResponseWriter, r *http.Request) {
|
||||||
|
switch r.URL.Path {
|
||||||
|
case "/static/style.css":
|
||||||
|
darkTheme := r.CookiesNamed("dark_theme")
|
||||||
|
if len(darkTheme) > 0 && darkTheme[0].Value == "true" {
|
||||||
|
http.ServeFileFS(w, r, content, "ui/static/dark.css")
|
||||||
|
} else {
|
||||||
|
http.ServeFileFS(w, r, content, "ui/static/light.css")
|
||||||
|
}
|
||||||
|
case "/favicon.ico":
|
||||||
|
http.ServeFileFS(w, r, content, "ui/static/favicon.ico")
|
||||||
|
case "/static/index.js":
|
||||||
|
http.ServeFileFS(w, r, content, "ui/static/index.js")
|
||||||
|
case "/static/test.js":
|
||||||
|
http.ServeFileFS(w, r, content, "ui/static/test.js")
|
||||||
|
case "/static/test.css":
|
||||||
|
http.ServeFileFS(w, r, content, "ui/static/test.css")
|
||||||
|
case "/static/test_tests.js":
|
||||||
|
http.ServeFileFS(w, r, content, "ui/static/test_tests.js")
|
||||||
|
default:
|
||||||
|
http.NotFound(w, r)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func serveTester(w http.ResponseWriter, r *http.Request) {
|
||||||
|
http.ServeFileFS(w, r, content, "ui/test.html")
|
||||||
|
}
|
||||||
|
|
||||||
|
func uiRoutes(mux *http.ServeMux) {
|
||||||
|
mux.HandleFunc("GET /{$}", serveWebUI)
|
||||||
|
mux.HandleFunc("GET /favicon.ico", serveStaticContent)
|
||||||
|
mux.HandleFunc("GET /static/", serveStaticContent)
|
||||||
|
mux.HandleFunc("GET /test.html", serveTester)
|
||||||
|
}
|
||||||
35
cmd/pkgserver/ui/index.html
Normal file
35
cmd/pkgserver/ui/index.html
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<link rel="stylesheet" href="static/style.css">
|
||||||
|
<title>Hakurei PkgServer</title>
|
||||||
|
<script src="static/index.js"></script>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h1>Hakurei PkgServer</h1>
|
||||||
|
|
||||||
|
<table id="pkg-list">
|
||||||
|
<tr><td>Loading...</td></tr>
|
||||||
|
</table>
|
||||||
|
<p>Showing entries <span id="entry-counter"></span>.</p>
|
||||||
|
<span class="bottom-nav"><a href="javascript:prevPage()">« Previous</a> <span id="page-number">1</span> <a href="javascript:nextPage()">Next »</a></span>
|
||||||
|
<span><label for="count">Entries per page: </label><select name="count" id="count">
|
||||||
|
<option value="10">10</option>
|
||||||
|
<option value="20">20</option>
|
||||||
|
<option value="30">30</option>
|
||||||
|
<option value="50">50</option>
|
||||||
|
</select></span>
|
||||||
|
<span><label for="sort">Sort by: </label><select name="sort" id="sort">
|
||||||
|
<option value="0">Definition (ascending)</option>
|
||||||
|
<option value="1">Definition (descending)</option>
|
||||||
|
<option value="2">Name (ascending)</option>
|
||||||
|
<option value="3">Name (descending)</option>
|
||||||
|
<option value="4">Size (ascending)</option>
|
||||||
|
<option value="5">Size (descending)</option>
|
||||||
|
</select></span>
|
||||||
|
</body>
|
||||||
|
<footer>
|
||||||
|
<p>©<a href="https://hakurei.app/">Hakurei</a> (<span id="hakurei-version">unknown</span>). Licensed under the MIT license.</p>
|
||||||
|
</footer>
|
||||||
|
</html>
|
||||||
0
cmd/pkgserver/ui/static/_common.scss
Normal file
0
cmd/pkgserver/ui/static/_common.scss
Normal file
6
cmd/pkgserver/ui/static/dark.scss
Normal file
6
cmd/pkgserver/ui/static/dark.scss
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
@use 'common';
|
||||||
|
|
||||||
|
html {
|
||||||
|
background-color: #2c2c2c;
|
||||||
|
color: ghostwhite;
|
||||||
|
}
|
||||||
BIN
cmd/pkgserver/ui/static/favicon.ico
Normal file
BIN
cmd/pkgserver/ui/static/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 17 KiB |
155
cmd/pkgserver/ui/static/index.ts
Normal file
155
cmd/pkgserver/ui/static/index.ts
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
class PackageIndexEntry {
|
||||||
|
name: string
|
||||||
|
size: number | null
|
||||||
|
description: string | null
|
||||||
|
website: string | null
|
||||||
|
version: string | null
|
||||||
|
report: boolean
|
||||||
|
}
|
||||||
|
function toHTML(entry: PackageIndexEntry): HTMLTableRowElement {
|
||||||
|
let v = entry.version != null ? `<span>${escapeHtml(entry.version)}</span>` : ""
|
||||||
|
let s = entry.size != null ? `<p>Size: ${toByteSizeString(entry.size)} (${entry.size})</p>` : ""
|
||||||
|
let d = entry.description != null ? `<p>${escapeHtml(entry.description)}</p>` : ""
|
||||||
|
let w = entry.website != null ? `<a href="${encodeURI(entry.website)}">Website</a>` : ""
|
||||||
|
let r = entry.report ? `Log (<a href=\"${encodeURI('/api/v1/status/' + entry.name)}\">View</a> | <a href=\"${encodeURI('/status/' + entry.name)}\">Download</a>)` : ""
|
||||||
|
let row = <HTMLTableRowElement>(document.createElement('tr'))
|
||||||
|
row.innerHTML = `<td>
|
||||||
|
<h2>${escapeHtml(entry.name)} ${v}</h2>
|
||||||
|
${d}
|
||||||
|
${s}
|
||||||
|
${w}
|
||||||
|
${r}
|
||||||
|
</td>`
|
||||||
|
return row
|
||||||
|
}
|
||||||
|
|
||||||
|
function toByteSizeString(bytes: number): string {
|
||||||
|
if(bytes == null || bytes < 1024) return `${bytes}B`
|
||||||
|
if(bytes < Math.pow(1024, 2)) return `${(bytes/1024).toFixed(2)}kiB`
|
||||||
|
if(bytes < Math.pow(1024, 3)) return `${(bytes/Math.pow(1024, 2)).toFixed(2)}MiB`
|
||||||
|
if(bytes < Math.pow(1024, 4)) return `${(bytes/Math.pow(1024, 3)).toFixed(2)}GiB`
|
||||||
|
if(bytes < Math.pow(1024, 5)) return `${(bytes/Math.pow(1024, 4)).toFixed(2)}TiB`
|
||||||
|
return "not only is it big, it's large"
|
||||||
|
}
|
||||||
|
|
||||||
|
const API_VERSION = 1
|
||||||
|
const ENDPOINT = `/api/v${API_VERSION}`
|
||||||
|
class InfoPayload {
|
||||||
|
count: number
|
||||||
|
hakurei_version: string
|
||||||
|
}
|
||||||
|
|
||||||
|
async function infoRequest(): Promise<InfoPayload> {
|
||||||
|
const res = await fetch(`${ENDPOINT}/info`)
|
||||||
|
const payload = await res.json()
|
||||||
|
return payload as InfoPayload
|
||||||
|
}
|
||||||
|
class GetPayload {
|
||||||
|
values: PackageIndexEntry[]
|
||||||
|
}
|
||||||
|
|
||||||
|
enum SortOrders {
|
||||||
|
DeclarationAscending,
|
||||||
|
DeclarationDescending,
|
||||||
|
NameAscending,
|
||||||
|
NameDescending
|
||||||
|
}
|
||||||
|
async function getRequest(limit: number, index: number, sort: SortOrders): Promise<GetPayload> {
|
||||||
|
const res = await fetch(`${ENDPOINT}/get?limit=${limit}&index=${index}&sort=${sort.valueOf()}`)
|
||||||
|
const payload = await res.json()
|
||||||
|
return payload as GetPayload
|
||||||
|
}
|
||||||
|
class State {
|
||||||
|
entriesPerPage: number = 10
|
||||||
|
entryIndex: number = 0
|
||||||
|
maxEntries: number = 0
|
||||||
|
sort: SortOrders = SortOrders.DeclarationAscending
|
||||||
|
|
||||||
|
getEntriesPerPage(): number {
|
||||||
|
return this.entriesPerPage
|
||||||
|
}
|
||||||
|
setEntriesPerPage(entriesPerPage: number) {
|
||||||
|
this.entriesPerPage = entriesPerPage
|
||||||
|
this.setEntryIndex(Math.floor(this.getEntryIndex() / entriesPerPage) * entriesPerPage)
|
||||||
|
}
|
||||||
|
getEntryIndex(): number {
|
||||||
|
return this.entryIndex
|
||||||
|
}
|
||||||
|
setEntryIndex(entryIndex: number) {
|
||||||
|
this.entryIndex = entryIndex
|
||||||
|
this.updatePage()
|
||||||
|
this.updateRange()
|
||||||
|
this.updateListings()
|
||||||
|
}
|
||||||
|
getMaxEntries(): number {
|
||||||
|
return this.maxEntries
|
||||||
|
}
|
||||||
|
setMaxEntries(max: number) {
|
||||||
|
this.maxEntries = max
|
||||||
|
}
|
||||||
|
getSortOrder(): SortOrders {
|
||||||
|
return this.sort
|
||||||
|
}
|
||||||
|
setSortOrder(sortOrder: SortOrders) {
|
||||||
|
this.sort = sortOrder
|
||||||
|
this.setEntryIndex(0)
|
||||||
|
}
|
||||||
|
updatePage() {
|
||||||
|
let page = Math.ceil(((this.getEntryIndex() + this.getEntriesPerPage()) - 1) / this.getEntriesPerPage())
|
||||||
|
document.getElementById("page-number").innerText = String(page)
|
||||||
|
}
|
||||||
|
updateRange() {
|
||||||
|
let max = Math.min(this.getEntryIndex() + this.getEntriesPerPage(), this.getMaxEntries())
|
||||||
|
document.getElementById("entry-counter").innerText = `${this.getEntryIndex() + 1}-${max} of ${this.getMaxEntries()}`
|
||||||
|
}
|
||||||
|
updateListings() {
|
||||||
|
getRequest(this.getEntriesPerPage(), this.getEntryIndex(), this.getSortOrder())
|
||||||
|
.then(res => {
|
||||||
|
let table = document.getElementById("pkg-list")
|
||||||
|
table.innerHTML = ''
|
||||||
|
res.values.forEach((row) => {
|
||||||
|
table.appendChild(toHTML(row))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
let STATE: State
|
||||||
|
|
||||||
|
function prevPage() {
|
||||||
|
let index = STATE.getEntryIndex()
|
||||||
|
STATE.setEntryIndex(Math.max(0, index - STATE.getEntriesPerPage()))
|
||||||
|
}
|
||||||
|
function nextPage() {
|
||||||
|
let index = STATE.getEntryIndex()
|
||||||
|
STATE.setEntryIndex(Math.min((Math.ceil(STATE.getMaxEntries() / STATE.getEntriesPerPage()) * STATE.getEntriesPerPage()) - STATE.getEntriesPerPage(), index + STATE.getEntriesPerPage()))
|
||||||
|
}
|
||||||
|
|
||||||
|
function escapeHtml(str: string): string {
|
||||||
|
if(str === undefined) return ""
|
||||||
|
return str
|
||||||
|
.replace(/&/g, '&')
|
||||||
|
.replace(/</g, '<')
|
||||||
|
.replace(/>/g, '>')
|
||||||
|
.replace(/"/g, '"')
|
||||||
|
.replace(/'/g, ''')
|
||||||
|
}
|
||||||
|
|
||||||
|
document.addEventListener("DOMContentLoaded", () => {
|
||||||
|
STATE = new State()
|
||||||
|
infoRequest()
|
||||||
|
.then(res => {
|
||||||
|
STATE.setMaxEntries(res.count)
|
||||||
|
document.getElementById("hakurei-version").innerText = res.hakurei_version
|
||||||
|
STATE.updateRange()
|
||||||
|
STATE.updateListings()
|
||||||
|
})
|
||||||
|
|
||||||
|
document.getElementById("count").addEventListener("change", (event) => {
|
||||||
|
STATE.setEntriesPerPage(parseInt((event.target as HTMLSelectElement).value))
|
||||||
|
})
|
||||||
|
document.getElementById("sort").addEventListener("change", (event) => {
|
||||||
|
STATE.setSortOrder(parseInt((event.target as HTMLSelectElement).value))
|
||||||
|
})
|
||||||
|
})
|
||||||
6
cmd/pkgserver/ui/static/light.scss
Normal file
6
cmd/pkgserver/ui/static/light.scss
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
@use 'common';
|
||||||
|
|
||||||
|
html {
|
||||||
|
background-color: #d3d3d3;
|
||||||
|
color: black;
|
||||||
|
}
|
||||||
4
cmd/pkgserver/ui/static/run_tests.ts
Normal file
4
cmd/pkgserver/ui/static/run_tests.ts
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
#!/usr/bin/env node
|
||||||
|
import "./test_tests.js";
|
||||||
|
import { run, StreamReporter } from "./test.js";
|
||||||
|
run(new StreamReporter({ writeln: console.log }));
|
||||||
27
cmd/pkgserver/ui/static/test.scss
Normal file
27
cmd/pkgserver/ui/static/test.scss
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
.root {
|
||||||
|
margin: 1rem 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
details.test-node {
|
||||||
|
margin-left: 1rem;
|
||||||
|
padding: 0.2rem 0.5rem;
|
||||||
|
border-left: 2px dashed black;
|
||||||
|
> summary {
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
&.failure > summary::marker {
|
||||||
|
color: red;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p.test-desc {
|
||||||
|
margin: 0 0 0 1rem;
|
||||||
|
padding: 2px 0;
|
||||||
|
> pre {
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
.italic {
|
||||||
|
font-style: italic;
|
||||||
|
}
|
||||||
250
cmd/pkgserver/ui/static/test.ts
Normal file
250
cmd/pkgserver/ui/static/test.ts
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
// =============================================================================
|
||||||
|
// DSL
|
||||||
|
|
||||||
|
type TestTree = { name: string } & (TestGroup | Test);
|
||||||
|
type TestGroup = { children: TestTree[] };
|
||||||
|
type Test = { test: (TestController) => void };
|
||||||
|
|
||||||
|
let TESTS: ({ name: string } & TestGroup)[] = [];
|
||||||
|
|
||||||
|
export function suite(name: string, children: TestTree[]) {
|
||||||
|
checkDuplicates(name, children)
|
||||||
|
TESTS.push({ name, children });
|
||||||
|
}
|
||||||
|
|
||||||
|
export function context(name: string, children: TestTree[]): TestTree {
|
||||||
|
checkDuplicates(name, children)
|
||||||
|
return { name, children };
|
||||||
|
}
|
||||||
|
export const group = context;
|
||||||
|
|
||||||
|
export function test(name: string, test: (TestController) => void): TestTree {
|
||||||
|
return { name, test };
|
||||||
|
}
|
||||||
|
|
||||||
|
function checkDuplicates(parent: string, names: { name: string }[]) {
|
||||||
|
let seen = new Set<string>();
|
||||||
|
for (const { name } of names) {
|
||||||
|
if (seen.has(name)) {
|
||||||
|
throw new RangeError(`duplicate name '${name}' in '${parent}'`);
|
||||||
|
}
|
||||||
|
seen.add(name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class FailNowSentinel {}
|
||||||
|
|
||||||
|
class TestController {
|
||||||
|
#logBuf: string[];
|
||||||
|
#failed: boolean;
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
this.#logBuf = [];
|
||||||
|
this.#failed = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
fail() {
|
||||||
|
this.#failed = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
failed(): boolean {
|
||||||
|
return this.#failed;
|
||||||
|
}
|
||||||
|
|
||||||
|
failNow(): never {
|
||||||
|
this.fail();
|
||||||
|
throw new FailNowSentinel();
|
||||||
|
}
|
||||||
|
|
||||||
|
log(message: string) {
|
||||||
|
this.#logBuf.push(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
error(message: string) {
|
||||||
|
this.log(message);
|
||||||
|
this.fail();
|
||||||
|
}
|
||||||
|
|
||||||
|
fatal(message: string): never {
|
||||||
|
this.log(message);
|
||||||
|
this.failNow();
|
||||||
|
}
|
||||||
|
|
||||||
|
getLog(): string {
|
||||||
|
return this.#logBuf.join("\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// Execution
|
||||||
|
|
||||||
|
export interface TestResult {
|
||||||
|
success: boolean;
|
||||||
|
output: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
function runTests(reporter: Reporter, parents: string[], tree: TestTree) {
|
||||||
|
const path = [...parents, tree.name];
|
||||||
|
if ("children" in tree) {
|
||||||
|
for (const c of tree.children) runTests(reporter, path, c);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let controller = new TestController();
|
||||||
|
let excStr: string;
|
||||||
|
try {
|
||||||
|
tree.test(controller);
|
||||||
|
} catch (e) {
|
||||||
|
if (!(e instanceof FailNowSentinel)) {
|
||||||
|
controller.fail();
|
||||||
|
excStr = extractExceptionString(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const log = controller.getLog();
|
||||||
|
const output = (log && excStr) ? `${log}\n${excStr}` : `${log}${excStr ?? ''}`;
|
||||||
|
reporter.update(path, { success: !controller.failed(), output });
|
||||||
|
}
|
||||||
|
|
||||||
|
export function run(reporter: Reporter) {
|
||||||
|
for (const suite of TESTS) {
|
||||||
|
for (const c of suite.children) runTests(reporter, [suite.name], c);
|
||||||
|
}
|
||||||
|
reporter.finalize();
|
||||||
|
}
|
||||||
|
|
||||||
|
function extractExceptionString(e: any): string {
|
||||||
|
// String() instead of .toString() as null and undefined don't have
|
||||||
|
// properties.
|
||||||
|
const s = String(e);
|
||||||
|
if (!(e instanceof Error && "stack" in e)) return s;
|
||||||
|
// v8 (Chromium, NodeJS) include the error message, while
|
||||||
|
// Firefox and WebKit do not.
|
||||||
|
if (e.stack.includes(s)) return e.stack;
|
||||||
|
return `${s}\n${e.stack}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// Reporting
|
||||||
|
|
||||||
|
export interface Reporter {
|
||||||
|
update(path: string[], result: TestResult): void;
|
||||||
|
finalize(): void;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface Stream {
|
||||||
|
writeln(s: string): void;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class StreamReporter implements Reporter {
|
||||||
|
stream: Stream;
|
||||||
|
verbose: boolean;
|
||||||
|
#failures: ({ path: string[] } & TestResult)[];
|
||||||
|
counts: { successes: number, failures: number };
|
||||||
|
|
||||||
|
constructor(stream: Stream, verbose: boolean = false) {
|
||||||
|
this.stream = stream;
|
||||||
|
this.verbose = verbose;
|
||||||
|
this.#failures = [];
|
||||||
|
this.counts = { successes: 0, failures: 0 };
|
||||||
|
}
|
||||||
|
|
||||||
|
update(path: string[], result: TestResult) {
|
||||||
|
if (path.length === 0) throw new RangeError("path is empty");
|
||||||
|
const pathStr = path.join(" ❯ ");
|
||||||
|
if (result.success) {
|
||||||
|
this.counts.successes++;
|
||||||
|
if (this.verbose) this.stream.writeln(`✅️ ${pathStr}`);
|
||||||
|
} else {
|
||||||
|
this.counts.failures++;
|
||||||
|
this.stream.writeln(`⚠️ ${pathStr}`);
|
||||||
|
this.#failures.push({ path, ...result });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
finalize() {
|
||||||
|
// Transform [{ path: ["a", "b", "c"] }, { path: ["a", "b", "d"] }]
|
||||||
|
// into { "a ❯ b": ["c", "d"] }.
|
||||||
|
let pathMap = new Map<string, ({ name: string } & TestResult)[]>();
|
||||||
|
for (const f of this.#failures) {
|
||||||
|
const key = f.path.slice(0, -1).join(" ❯ ");
|
||||||
|
if (!pathMap.has(key)) pathMap.set(key, []);
|
||||||
|
pathMap.get(key).push({ name: f.path.at(-1), ...f });
|
||||||
|
}
|
||||||
|
|
||||||
|
this.stream.writeln("");
|
||||||
|
this.stream.writeln("FAILURES");
|
||||||
|
this.stream.writeln("========");
|
||||||
|
|
||||||
|
for (const [path, tests] of pathMap) {
|
||||||
|
if (tests.length === 1) {
|
||||||
|
this.#writeOutput(tests[0], path ? `${path} ❯ ` : "", false);
|
||||||
|
} else {
|
||||||
|
this.stream.writeln(path);
|
||||||
|
for (const t of tests) this.#writeOutput(t, " - ", true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
this.stream.writeln("");
|
||||||
|
const { successes, failures } = this.counts;
|
||||||
|
this.stream.writeln(`${successes} succeeded, ${failures} failed`);
|
||||||
|
}
|
||||||
|
|
||||||
|
#writeOutput(test: { name: string } & TestResult, prefix: string, nested: boolean) {
|
||||||
|
let output = "";
|
||||||
|
if (test.output) {
|
||||||
|
const lines = test.output.split("\n");
|
||||||
|
if (lines.length <= 1) {
|
||||||
|
output = `: ${test.output}`;
|
||||||
|
} else {
|
||||||
|
const padding = nested ? " " : " ";
|
||||||
|
output = ":\n" + lines.map((line) => padding + line).join("\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
this.stream.writeln(`${prefix}${test.name}${output}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export class DOMReporter implements Reporter {
|
||||||
|
update(path: string[], result: TestResult) {
|
||||||
|
if (path.length === 0) throw new RangeError("path is empty");
|
||||||
|
const counter = document.getElementById(result.success ? "successes" : "failures");
|
||||||
|
counter.innerText = (Number(counter.innerText) + 1).toString();
|
||||||
|
let parent = document.getElementById("root");
|
||||||
|
for (const node of path) {
|
||||||
|
let child = null;
|
||||||
|
outer: for (const d of parent.children) {
|
||||||
|
for (const s of d.children) {
|
||||||
|
if (!(s instanceof HTMLElement)) continue;
|
||||||
|
if (s.tagName !== "SUMMARY" || s.innerText !== node) continue;
|
||||||
|
child = d;
|
||||||
|
break outer;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (child === null) {
|
||||||
|
child = document.createElement("details");
|
||||||
|
child.className = "test-node";
|
||||||
|
const summary = document.createElement("summary");
|
||||||
|
summary.appendChild(document.createTextNode(node));
|
||||||
|
child.appendChild(summary);
|
||||||
|
parent.appendChild(child);
|
||||||
|
}
|
||||||
|
if (!result.success) {
|
||||||
|
child.open = true;
|
||||||
|
child.classList.add("failure");
|
||||||
|
}
|
||||||
|
parent = child;
|
||||||
|
}
|
||||||
|
const p = document.createElement("p");
|
||||||
|
p.classList.add("test-desc");
|
||||||
|
if (result.output) {
|
||||||
|
const pre = document.createElement("pre");
|
||||||
|
pre.appendChild(document.createTextNode(result.output));
|
||||||
|
p.appendChild(pre);
|
||||||
|
} else {
|
||||||
|
p.classList.add("italic");
|
||||||
|
p.appendChild(document.createTextNode("No output."));
|
||||||
|
}
|
||||||
|
parent.appendChild(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
finalize() {}
|
||||||
|
}
|
||||||
40
cmd/pkgserver/ui/static/test_tests.ts
Normal file
40
cmd/pkgserver/ui/static/test_tests.ts
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
import { context, group, suite, test } from "./test.js";
|
||||||
|
|
||||||
|
suite("dog", [
|
||||||
|
group("tail", [
|
||||||
|
test("wags when happy", (t) => {
|
||||||
|
if (0 / 0 !== Infinity / Infinity) {
|
||||||
|
t.fatal("undefined must not be defined");
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test("idle when down", (t) => {
|
||||||
|
t.log("test test");
|
||||||
|
t.error("dog whining noises go here");
|
||||||
|
}),
|
||||||
|
]),
|
||||||
|
test("likes headpats", (t) => {
|
||||||
|
if (2 !== 2) {
|
||||||
|
t.error("IEEE 754 violated: 2 is NaN");
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
context("near cat", [
|
||||||
|
test("is ecstatic", (t) => {
|
||||||
|
if (("b" + "a" + + "a" + "a").toLowerCase() == "banana") {
|
||||||
|
t.error("🍌🍌🍌");
|
||||||
|
t.error("🍌🍌🍌");
|
||||||
|
t.error("🍌🍌🍌");
|
||||||
|
t.failNow();
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test("playfully bites cats' tails", (t) => {
|
||||||
|
t.log("arf!");
|
||||||
|
throw new Error("nom");
|
||||||
|
}),
|
||||||
|
]),
|
||||||
|
]);
|
||||||
|
|
||||||
|
suite("cat", [
|
||||||
|
test("likes headpats", (t) => {
|
||||||
|
t.log("meow");
|
||||||
|
}),
|
||||||
|
]);
|
||||||
5
cmd/pkgserver/ui/static/tsconfig.json
Normal file
5
cmd/pkgserver/ui/static/tsconfig.json
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{
|
||||||
|
"compilerOptions": {
|
||||||
|
"target": "ES2024"
|
||||||
|
}
|
||||||
|
}
|
||||||
28
cmd/pkgserver/ui/test.html
Normal file
28
cmd/pkgserver/ui/test.html
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<link rel="stylesheet" href="static/style.css">
|
||||||
|
<link rel="stylesheet" href="static/test.css">
|
||||||
|
<title>PkgServer Tests</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h1>PkgServer Tests</h1>
|
||||||
|
|
||||||
|
<main>
|
||||||
|
<div id="counters">
|
||||||
|
<span id="successes">0</span> succeeded, <span id="failures">0</span> failed.
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="root">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<script type="module" src="./static/test_tests.js"></script>
|
||||||
|
<script type="module">
|
||||||
|
import { DOMReporter, run } from "./static/test.js";
|
||||||
|
run(new DOMReporter());
|
||||||
|
</script>
|
||||||
|
</main>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
9
cmd/pkgserver/ui_full.go
Normal file
9
cmd/pkgserver/ui_full.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
//go:build frontend
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import "embed"
|
||||||
|
|
||||||
|
//go:generate sh -c "sass ui/static/dark.scss ui/static/dark.css && sass ui/static/light.scss ui/static/light.css && sass ui/static/test.scss ui/static/test.css && tsc -p ui/static"
|
||||||
|
//go:embed ui/*
|
||||||
|
var content embed.FS
|
||||||
7
cmd/pkgserver/ui_stub.go
Normal file
7
cmd/pkgserver/ui_stub.go
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
//go:build !frontend
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import "testing/fstest"
|
||||||
|
|
||||||
|
var content fstest.MapFS
|
||||||
@@ -33,6 +33,7 @@ import (
|
|||||||
|
|
||||||
"hakurei.app/container"
|
"hakurei.app/container"
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/hst"
|
"hakurei.app/hst"
|
||||||
"hakurei.app/internal/helper/proc"
|
"hakurei.app/internal/helper/proc"
|
||||||
@@ -441,12 +442,7 @@ func _main(s ...string) (exitCode int) {
|
|||||||
// keep fuse_parse_cmdline happy in the container
|
// keep fuse_parse_cmdline happy in the container
|
||||||
z.Tmpfs(check.MustAbs(container.Nonexistent), 1<<10, 0755)
|
z.Tmpfs(check.MustAbs(container.Nonexistent), 1<<10, 0755)
|
||||||
|
|
||||||
if a, err := check.NewAbs(container.MustExecutable(msg)); err != nil {
|
z.Path = fhs.AbsProcSelfExe
|
||||||
log.Println(err)
|
|
||||||
return 5
|
|
||||||
} else {
|
|
||||||
z.Path = a
|
|
||||||
}
|
|
||||||
z.Args = s
|
z.Args = s
|
||||||
z.ForwardCancel = true
|
z.ForwardCancel = true
|
||||||
z.SeccompPresets |= std.PresetStrict
|
z.SeccompPresets |= std.PresetStrict
|
||||||
|
|||||||
@@ -10,8 +10,7 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(AutoEtcOp)) }
|
func init() { gob.Register(new(AutoEtcOp)) }
|
||||||
|
|
||||||
// Etc appends an [Op] that expands host /etc into a toplevel symlink mirror with /etc semantics.
|
// Etc is a helper for appending [AutoEtcOp] to [Ops].
|
||||||
// This is not a generic setup op. It is implemented here to reduce ipc overhead.
|
|
||||||
func (f *Ops) Etc(host *check.Absolute, prefix string) *Ops {
|
func (f *Ops) Etc(host *check.Absolute, prefix string) *Ops {
|
||||||
e := &AutoEtcOp{prefix}
|
e := &AutoEtcOp{prefix}
|
||||||
f.Mkdir(fhs.AbsEtc, 0755)
|
f.Mkdir(fhs.AbsEtc, 0755)
|
||||||
@@ -20,6 +19,9 @@ func (f *Ops) Etc(host *check.Absolute, prefix string) *Ops {
|
|||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AutoEtcOp expands host /etc into a toplevel symlink mirror with /etc semantics.
|
||||||
|
//
|
||||||
|
// This is not a generic setup op. It is implemented here to reduce ipc overhead.
|
||||||
type AutoEtcOp struct{ Prefix string }
|
type AutoEtcOp struct{ Prefix string }
|
||||||
|
|
||||||
func (e *AutoEtcOp) Valid() bool { return e != nil }
|
func (e *AutoEtcOp) Valid() bool { return e != nil }
|
||||||
|
|||||||
@@ -11,13 +11,15 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(AutoRootOp)) }
|
func init() { gob.Register(new(AutoRootOp)) }
|
||||||
|
|
||||||
// Root appends an [Op] that expands a directory into a toplevel bind mount mirror on container root.
|
// Root is a helper for appending [AutoRootOp] to [Ops].
|
||||||
// This is not a generic setup op. It is implemented here to reduce ipc overhead.
|
|
||||||
func (f *Ops) Root(host *check.Absolute, flags int) *Ops {
|
func (f *Ops) Root(host *check.Absolute, flags int) *Ops {
|
||||||
*f = append(*f, &AutoRootOp{host, flags, nil})
|
*f = append(*f, &AutoRootOp{host, flags, nil})
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AutoRootOp expands a directory into a toplevel bind mount mirror on container root.
|
||||||
|
//
|
||||||
|
// This is not a generic setup op. It is implemented here to reduce ipc overhead.
|
||||||
type AutoRootOp struct {
|
type AutoRootOp struct {
|
||||||
Host *check.Absolute
|
Host *check.Absolute
|
||||||
// passed through to bindMount
|
// passed through to bindMount
|
||||||
|
|||||||
@@ -50,10 +50,16 @@ func capset(hdrp *capHeader, datap *[2]capData) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// capBoundingSetDrop drops a capability from the calling thread's capability bounding set.
|
// capBoundingSetDrop drops a capability from the calling thread's capability bounding set.
|
||||||
func capBoundingSetDrop(cap uintptr) error { return Prctl(syscall.PR_CAPBSET_DROP, cap, 0) }
|
func capBoundingSetDrop(cap uintptr) error {
|
||||||
|
return Prctl(syscall.PR_CAPBSET_DROP, cap, 0)
|
||||||
|
}
|
||||||
|
|
||||||
// capAmbientClearAll clears the ambient capability set of the calling thread.
|
// capAmbientClearAll clears the ambient capability set of the calling thread.
|
||||||
func capAmbientClearAll() error { return Prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_CLEAR_ALL, 0) }
|
func capAmbientClearAll() error {
|
||||||
|
return Prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_CLEAR_ALL, 0)
|
||||||
|
}
|
||||||
|
|
||||||
// capAmbientRaise adds to the ambient capability set of the calling thread.
|
// capAmbientRaise adds to the ambient capability set of the calling thread.
|
||||||
func capAmbientRaise(cap uintptr) error { return Prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, cap) }
|
func capAmbientRaise(cap uintptr) error {
|
||||||
|
return Prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, cap)
|
||||||
|
}
|
||||||
|
|||||||
@@ -11,7 +11,8 @@ const (
|
|||||||
SpecialOverlayPath = ":"
|
SpecialOverlayPath = ":"
|
||||||
)
|
)
|
||||||
|
|
||||||
// EscapeOverlayDataSegment escapes a string for formatting into the data argument of an overlay mount call.
|
// EscapeOverlayDataSegment escapes a string for formatting into the data
|
||||||
|
// argument of an overlay mount system call.
|
||||||
func EscapeOverlayDataSegment(s string) string {
|
func EscapeOverlayDataSegment(s string) string {
|
||||||
if s == "" {
|
if s == "" {
|
||||||
return ""
|
return ""
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
// Package container implements unprivileged Linux containers with built-in support for syscall filtering.
|
// Package container implements unprivileged Linux containers with built-in
|
||||||
|
// support for syscall filtering.
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -37,24 +38,34 @@ type (
|
|||||||
Container struct {
|
Container struct {
|
||||||
// Whether the container init should stay alive after its parent terminates.
|
// Whether the container init should stay alive after its parent terminates.
|
||||||
AllowOrphan bool
|
AllowOrphan bool
|
||||||
|
// Whether to set SchedPolicy and SchedPriority via sched_setscheduler(2).
|
||||||
|
SetScheduler bool
|
||||||
|
// Scheduling policy to set via sched_setscheduler(2).
|
||||||
|
SchedPolicy std.SchedPolicy
|
||||||
|
// Scheduling priority to set via sched_setscheduler(2). The zero value
|
||||||
|
// implies the minimum value supported by the current SchedPolicy.
|
||||||
|
SchedPriority std.Int
|
||||||
// Cgroup fd, nil to disable.
|
// Cgroup fd, nil to disable.
|
||||||
Cgroup *int
|
Cgroup *int
|
||||||
// ExtraFiles passed through to initial process in the container,
|
// ExtraFiles passed through to initial process in the container, with
|
||||||
// with behaviour identical to its [exec.Cmd] counterpart.
|
// behaviour identical to its [exec.Cmd] counterpart.
|
||||||
ExtraFiles []*os.File
|
ExtraFiles []*os.File
|
||||||
|
|
||||||
// param pipe for shim and init
|
// Write end of a pipe connected to the init to deliver [Params].
|
||||||
setup *os.File
|
setup *os.File
|
||||||
// cancels cmd
|
// Cancels the context passed to the underlying cmd.
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
// closed after Wait returns
|
// Closed after Wait returns. Keeps the spawning thread alive.
|
||||||
wait chan struct{}
|
wait chan struct{}
|
||||||
|
|
||||||
Stdin io.Reader
|
Stdin io.Reader
|
||||||
Stdout io.Writer
|
Stdout io.Writer
|
||||||
Stderr io.Writer
|
Stderr io.Writer
|
||||||
|
|
||||||
|
// Custom cancellation behaviour for the underlying [exec.Cmd]. Must
|
||||||
|
// deliver [CancelSignal] before returning.
|
||||||
Cancel func(cmd *exec.Cmd) error
|
Cancel func(cmd *exec.Cmd) error
|
||||||
|
// Copied to the underlying [exec.Cmd].
|
||||||
WaitDelay time.Duration
|
WaitDelay time.Duration
|
||||||
|
|
||||||
cmd *exec.Cmd
|
cmd *exec.Cmd
|
||||||
@@ -283,7 +294,11 @@ func (p *Container) Start() error {
|
|||||||
|
|
||||||
// place setup pipe before user supplied extra files, this is later restored by init
|
// place setup pipe before user supplied extra files, this is later restored by init
|
||||||
if fd, f, err := Setup(&p.cmd.ExtraFiles); err != nil {
|
if fd, f, err := Setup(&p.cmd.ExtraFiles); err != nil {
|
||||||
return &StartError{true, "set up params stream", err, false, false}
|
return &StartError{
|
||||||
|
Fatal: true,
|
||||||
|
Step: "set up params stream",
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
p.setup = f
|
p.setup = f
|
||||||
p.cmd.Env = []string{setupEnv + "=" + strconv.Itoa(fd)}
|
p.cmd.Env = []string{setupEnv + "=" + strconv.Itoa(fd)}
|
||||||
@@ -295,10 +310,16 @@ func (p *Container) Start() error {
|
|||||||
runtime.LockOSThread()
|
runtime.LockOSThread()
|
||||||
p.wait = make(chan struct{})
|
p.wait = make(chan struct{})
|
||||||
|
|
||||||
done <- func() error { // setup depending on per-thread state must happen here
|
// setup depending on per-thread state must happen here
|
||||||
// PR_SET_NO_NEW_PRIVS: depends on per-thread state but acts on all processes created from that thread
|
done <- func() error {
|
||||||
|
// PR_SET_NO_NEW_PRIVS: thread-directed but acts on all processes
|
||||||
|
// created from the calling thread
|
||||||
if err := SetNoNewPrivs(); err != nil {
|
if err := SetNoNewPrivs(); err != nil {
|
||||||
return &StartError{true, "prctl(PR_SET_NO_NEW_PRIVS)", err, false, false}
|
return &StartError{
|
||||||
|
Fatal: true,
|
||||||
|
Step: "prctl(PR_SET_NO_NEW_PRIVS)",
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// landlock: depends on per-thread state but acts on a process group
|
// landlock: depends on per-thread state but acts on a process group
|
||||||
@@ -310,28 +331,40 @@ func (p *Container) Start() error {
|
|||||||
|
|
||||||
if abi, err := LandlockGetABI(); err != nil {
|
if abi, err := LandlockGetABI(); err != nil {
|
||||||
if p.HostAbstract {
|
if p.HostAbstract {
|
||||||
// landlock can be skipped here as it restricts access to resources
|
// landlock can be skipped here as it restricts access
|
||||||
// already covered by namespaces (pid)
|
// to resources already covered by namespaces (pid)
|
||||||
goto landlockOut
|
goto landlockOut
|
||||||
}
|
}
|
||||||
return &StartError{false, "get landlock ABI", err, false, false}
|
return &StartError{Step: "get landlock ABI", Err: err}
|
||||||
} else if abi < 6 {
|
} else if abi < 6 {
|
||||||
if p.HostAbstract {
|
if p.HostAbstract {
|
||||||
// see above comment
|
// see above comment
|
||||||
goto landlockOut
|
goto landlockOut
|
||||||
}
|
}
|
||||||
return &StartError{false, "kernel version too old for LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET", ENOSYS, true, false}
|
return &StartError{
|
||||||
|
Step: "kernel too old for LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET",
|
||||||
|
Err: ENOSYS,
|
||||||
|
Origin: true,
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
p.msg.Verbosef("landlock abi version %d", abi)
|
p.msg.Verbosef("landlock abi version %d", abi)
|
||||||
}
|
}
|
||||||
|
|
||||||
if rulesetFd, err := rulesetAttr.Create(0); err != nil {
|
if rulesetFd, err := rulesetAttr.Create(0); err != nil {
|
||||||
return &StartError{true, "create landlock ruleset", err, false, false}
|
return &StartError{
|
||||||
|
Fatal: true,
|
||||||
|
Step: "create landlock ruleset",
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
p.msg.Verbosef("enforcing landlock ruleset %s", rulesetAttr)
|
p.msg.Verbosef("enforcing landlock ruleset %s", rulesetAttr)
|
||||||
if err = LandlockRestrictSelf(rulesetFd, 0); err != nil {
|
if err = LandlockRestrictSelf(rulesetFd, 0); err != nil {
|
||||||
_ = Close(rulesetFd)
|
_ = Close(rulesetFd)
|
||||||
return &StartError{true, "enforce landlock ruleset", err, false, false}
|
return &StartError{
|
||||||
|
Fatal: true,
|
||||||
|
Step: "enforce landlock ruleset",
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if err = Close(rulesetFd); err != nil {
|
if err = Close(rulesetFd); err != nil {
|
||||||
p.msg.Verbosef("cannot close landlock ruleset: %v", err)
|
p.msg.Verbosef("cannot close landlock ruleset: %v", err)
|
||||||
@@ -342,9 +375,52 @@ func (p *Container) Start() error {
|
|||||||
landlockOut:
|
landlockOut:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sched_setscheduler: thread-directed but acts on all processes
|
||||||
|
// created from the calling thread
|
||||||
|
if p.SetScheduler {
|
||||||
|
if p.SchedPolicy < 0 || p.SchedPolicy > std.SCHED_LAST {
|
||||||
|
return &StartError{
|
||||||
|
Fatal: false,
|
||||||
|
Step: "set scheduling policy",
|
||||||
|
Err: EINVAL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var param schedParam
|
||||||
|
if priority, err := p.SchedPolicy.GetPriorityMin(); err != nil {
|
||||||
|
return &StartError{
|
||||||
|
Fatal: true,
|
||||||
|
Step: "get minimum priority",
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
param.priority = max(priority, p.SchedPriority)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.msg.Verbosef(
|
||||||
|
"setting scheduling policy %s priority %d",
|
||||||
|
p.SchedPolicy, param.priority,
|
||||||
|
)
|
||||||
|
if err := schedSetscheduler(
|
||||||
|
0, // calling thread
|
||||||
|
p.SchedPolicy,
|
||||||
|
¶m,
|
||||||
|
); err != nil {
|
||||||
|
return &StartError{
|
||||||
|
Fatal: true,
|
||||||
|
Step: "set scheduling policy",
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
p.msg.Verbose("starting container init")
|
p.msg.Verbose("starting container init")
|
||||||
if err := p.cmd.Start(); err != nil {
|
if err := p.cmd.Start(); err != nil {
|
||||||
return &StartError{false, "start container init", err, false, true}
|
return &StartError{
|
||||||
|
Step: "start container init",
|
||||||
|
Err: err,
|
||||||
|
Passthrough: true,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}()
|
}()
|
||||||
@@ -356,6 +432,7 @@ func (p *Container) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Serve serves [Container.Params] to the container init.
|
// Serve serves [Container.Params] to the container init.
|
||||||
|
//
|
||||||
// Serve must only be called once.
|
// Serve must only be called once.
|
||||||
func (p *Container) Serve() error {
|
func (p *Container) Serve() error {
|
||||||
if p.setup == nil {
|
if p.setup == nil {
|
||||||
@@ -365,12 +442,21 @@ func (p *Container) Serve() error {
|
|||||||
setup := p.setup
|
setup := p.setup
|
||||||
p.setup = nil
|
p.setup = nil
|
||||||
if err := setup.SetDeadline(time.Now().Add(initSetupTimeout)); err != nil {
|
if err := setup.SetDeadline(time.Now().Add(initSetupTimeout)); err != nil {
|
||||||
return &StartError{true, "set init pipe deadline", err, false, true}
|
return &StartError{
|
||||||
|
Fatal: true,
|
||||||
|
Step: "set init pipe deadline",
|
||||||
|
Err: err,
|
||||||
|
Passthrough: true,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.Path == nil {
|
if p.Path == nil {
|
||||||
p.cancel()
|
p.cancel()
|
||||||
return &StartError{false, "invalid executable pathname", EINVAL, true, false}
|
return &StartError{
|
||||||
|
Step: "invalid executable pathname",
|
||||||
|
Err: EINVAL,
|
||||||
|
Origin: true,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// do not transmit nil
|
// do not transmit nil
|
||||||
@@ -395,7 +481,8 @@ func (p *Container) Serve() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait waits for the container init process to exit and releases any resources associated with the [Container].
|
// Wait blocks until the container init process to exit and releases any
|
||||||
|
// resources associated with the [Container].
|
||||||
func (p *Container) Wait() error {
|
func (p *Container) Wait() error {
|
||||||
if p.cmd == nil || p.cmd.Process == nil {
|
if p.cmd == nil || p.cmd.Process == nil {
|
||||||
return EINVAL
|
return EINVAL
|
||||||
@@ -440,11 +527,13 @@ func (p *Container) StderrPipe() (r io.ReadCloser, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Container) String() string {
|
func (p *Container) String() string {
|
||||||
return fmt.Sprintf("argv: %q, filter: %v, rules: %d, flags: %#x, presets: %#x",
|
return fmt.Sprintf(
|
||||||
p.Args, !p.SeccompDisable, len(p.SeccompRules), int(p.SeccompFlags), int(p.SeccompPresets))
|
"argv: %q, filter: %v, rules: %d, flags: %#x, presets: %#x",
|
||||||
|
p.Args, !p.SeccompDisable, len(p.SeccompRules), int(p.SeccompFlags), int(p.SeccompPresets),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcessState returns the address to os.ProcessState held by the underlying [exec.Cmd].
|
// ProcessState returns the address of os.ProcessState held by the underlying [exec.Cmd].
|
||||||
func (p *Container) ProcessState() *os.ProcessState {
|
func (p *Container) ProcessState() *os.ProcessState {
|
||||||
if p.cmd == nil {
|
if p.cmd == nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -452,7 +541,8 @@ func (p *Container) ProcessState() *os.ProcessState {
|
|||||||
return p.cmd.ProcessState
|
return p.cmd.ProcessState
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns the address to a new instance of [Container] that requires further initialisation before use.
|
// New returns the address to a new instance of [Container]. This value requires
|
||||||
|
// further initialisation before use.
|
||||||
func New(ctx context.Context, msg message.Msg) *Container {
|
func New(ctx context.Context, msg message.Msg) *Container {
|
||||||
if msg == nil {
|
if msg == nil {
|
||||||
msg = message.New(nil)
|
msg = message.New(nil)
|
||||||
@@ -461,12 +551,18 @@ func New(ctx context.Context, msg message.Msg) *Container {
|
|||||||
p := &Container{ctx: ctx, msg: msg, Params: Params{Ops: new(Ops)}}
|
p := &Container{ctx: ctx, msg: msg, Params: Params{Ops: new(Ops)}}
|
||||||
c, cancel := context.WithCancel(ctx)
|
c, cancel := context.WithCancel(ctx)
|
||||||
p.cancel = cancel
|
p.cancel = cancel
|
||||||
p.cmd = exec.CommandContext(c, MustExecutable(msg))
|
p.cmd = exec.CommandContext(c, fhs.ProcSelfExe)
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCommand calls [New] and initialises the [Params.Path] and [Params.Args] fields.
|
// NewCommand calls [New] and initialises the [Params.Path] and [Params.Args] fields.
|
||||||
func NewCommand(ctx context.Context, msg message.Msg, pathname *check.Absolute, name string, args ...string) *Container {
|
func NewCommand(
|
||||||
|
ctx context.Context,
|
||||||
|
msg message.Msg,
|
||||||
|
pathname *check.Absolute,
|
||||||
|
name string,
|
||||||
|
args ...string,
|
||||||
|
) *Container {
|
||||||
z := New(ctx, msg)
|
z := New(ctx, msg)
|
||||||
z.Path = pathname
|
z.Path = pathname
|
||||||
z.Args = append([]string{name}, args...)
|
z.Args = append([]string{name}, args...)
|
||||||
|
|||||||
@@ -773,14 +773,13 @@ func TestMain(m *testing.M) {
|
|||||||
func helperNewContainerLibPaths(ctx context.Context, libPaths *[]*check.Absolute, args ...string) (c *container.Container) {
|
func helperNewContainerLibPaths(ctx context.Context, libPaths *[]*check.Absolute, args ...string) (c *container.Container) {
|
||||||
msg := message.New(nil)
|
msg := message.New(nil)
|
||||||
msg.SwapVerbose(testing.Verbose())
|
msg.SwapVerbose(testing.Verbose())
|
||||||
executable := check.MustAbs(container.MustExecutable(msg))
|
|
||||||
|
|
||||||
c = container.NewCommand(ctx, msg, absHelperInnerPath, "helper", args...)
|
c = container.NewCommand(ctx, msg, absHelperInnerPath, "helper", args...)
|
||||||
c.Env = append(c.Env, envDoCheck+"=1")
|
c.Env = append(c.Env, envDoCheck+"=1")
|
||||||
c.Bind(executable, absHelperInnerPath, 0)
|
c.Bind(fhs.AbsProcSelfExe, absHelperInnerPath, 0)
|
||||||
|
|
||||||
// in case test has cgo enabled
|
// in case test has cgo enabled
|
||||||
if entries, err := ldd.Resolve(ctx, msg, executable); err != nil {
|
if entries, err := ldd.Resolve(ctx, msg, nil); err != nil {
|
||||||
log.Fatalf("ldd: %v", err)
|
log.Fatalf("ldd: %v", err)
|
||||||
} else {
|
} else {
|
||||||
*libPaths = ldd.Path(entries)
|
*libPaths = ldd.Path(entries)
|
||||||
|
|||||||
@@ -21,7 +21,8 @@ type osFile interface {
|
|||||||
fs.File
|
fs.File
|
||||||
}
|
}
|
||||||
|
|
||||||
// syscallDispatcher provides methods that make state-dependent system calls as part of their behaviour.
|
// syscallDispatcher provides methods that make state-dependent system calls as
|
||||||
|
// part of their behaviour.
|
||||||
type syscallDispatcher interface {
|
type syscallDispatcher interface {
|
||||||
// new starts a goroutine with a new instance of syscallDispatcher.
|
// new starts a goroutine with a new instance of syscallDispatcher.
|
||||||
// A syscallDispatcher must never be used in any goroutine other than the one owning it,
|
// A syscallDispatcher must never be used in any goroutine other than the one owning it,
|
||||||
|
|||||||
@@ -238,8 +238,11 @@ func sliceAddr[S any](s []S) *[]S { return &s }
|
|||||||
|
|
||||||
func newCheckedFile(t *testing.T, name, wantData string, closeErr error) osFile {
|
func newCheckedFile(t *testing.T, name, wantData string, closeErr error) osFile {
|
||||||
f := &checkedOsFile{t: t, name: name, want: wantData, closeErr: closeErr}
|
f := &checkedOsFile{t: t, name: name, want: wantData, closeErr: closeErr}
|
||||||
// check happens in Close, and cleanup is not guaranteed to run, so relying on it for sloppy implementations will cause sporadic test results
|
// check happens in Close, and cleanup is not guaranteed to run, so relying
|
||||||
f.cleanup = runtime.AddCleanup(f, func(name string) { f.t.Fatalf("checkedOsFile %s became unreachable without a call to Close", name) }, f.name)
|
// on it for sloppy implementations will cause sporadic test results
|
||||||
|
f.cleanup = runtime.AddCleanup(f, func(name string) {
|
||||||
|
panic("checkedOsFile " + name + " became unreachable without a call to Close")
|
||||||
|
}, name)
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -43,7 +43,8 @@ func messageFromError(err error) (m string, ok bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// messagePrefix checks and prefixes the error message of a non-pointer error.
|
// messagePrefix checks and prefixes the error message of a non-pointer error.
|
||||||
// While this is usable for pointer errors, such use should be avoided as nil check is omitted.
|
// While this is usable for pointer errors, such use should be avoided as nil
|
||||||
|
// check is omitted.
|
||||||
func messagePrefix[T error](prefix string, err error) (string, bool) {
|
func messagePrefix[T error](prefix string, err error) (string, bool) {
|
||||||
var targetError T
|
var targetError T
|
||||||
if errors.As(err, &targetError) {
|
if errors.As(err, &targetError) {
|
||||||
|
|||||||
@@ -28,6 +28,9 @@ func copyExecutable(msg message.Msg) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MustExecutable calls [os.Executable] and terminates the process on error.
|
||||||
|
//
|
||||||
|
// Deprecated: This is no longer used and will be removed in 0.4.
|
||||||
func MustExecutable(msg message.Msg) string {
|
func MustExecutable(msg message.Msg) string {
|
||||||
executableOnce.Do(func() { copyExecutable(msg) })
|
executableOnce.Do(func() { copyExecutable(msg) })
|
||||||
return executable
|
return executable
|
||||||
|
|||||||
@@ -42,6 +42,8 @@ var (
|
|||||||
AbsDevShm = unsafeAbs(DevShm)
|
AbsDevShm = unsafeAbs(DevShm)
|
||||||
// AbsProc is [Proc] as [check.Absolute].
|
// AbsProc is [Proc] as [check.Absolute].
|
||||||
AbsProc = unsafeAbs(Proc)
|
AbsProc = unsafeAbs(Proc)
|
||||||
|
// AbsProcSelfExe is [ProcSelfExe] as [check.Absolute].
|
||||||
|
AbsProcSelfExe = unsafeAbs(ProcSelfExe)
|
||||||
// AbsSys is [Sys] as [check.Absolute].
|
// AbsSys is [Sys] as [check.Absolute].
|
||||||
AbsSys = unsafeAbs(Sys)
|
AbsSys = unsafeAbs(Sys)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -9,7 +9,8 @@ const (
|
|||||||
// Tmp points to the place for small temporary files.
|
// Tmp points to the place for small temporary files.
|
||||||
Tmp = "/tmp/"
|
Tmp = "/tmp/"
|
||||||
|
|
||||||
// Run points to a "tmpfs" file system for system packages to place runtime data, socket files, and similar.
|
// Run points to a "tmpfs" file system for system packages to place runtime
|
||||||
|
// data, socket files, and similar.
|
||||||
Run = "/run/"
|
Run = "/run/"
|
||||||
// RunUser points to a directory containing per-user runtime directories,
|
// RunUser points to a directory containing per-user runtime directories,
|
||||||
// each usually individually mounted "tmpfs" instances.
|
// each usually individually mounted "tmpfs" instances.
|
||||||
@@ -17,10 +18,12 @@ const (
|
|||||||
|
|
||||||
// Usr points to vendor-supplied operating system resources.
|
// Usr points to vendor-supplied operating system resources.
|
||||||
Usr = "/usr/"
|
Usr = "/usr/"
|
||||||
// UsrBin points to binaries and executables for user commands that shall appear in the $PATH search path.
|
// UsrBin points to binaries and executables for user commands that shall
|
||||||
|
// appear in the $PATH search path.
|
||||||
UsrBin = Usr + "bin/"
|
UsrBin = Usr + "bin/"
|
||||||
|
|
||||||
// Var points to persistent, variable system data. Writable during normal system operation.
|
// Var points to persistent, variable system data. Writable during normal
|
||||||
|
// system operation.
|
||||||
Var = "/var/"
|
Var = "/var/"
|
||||||
// VarLib points to persistent system data.
|
// VarLib points to persistent system data.
|
||||||
VarLib = Var + "lib/"
|
VarLib = Var + "lib/"
|
||||||
@@ -29,12 +32,20 @@ const (
|
|||||||
|
|
||||||
// Dev points to the root directory for device nodes.
|
// Dev points to the root directory for device nodes.
|
||||||
Dev = "/dev/"
|
Dev = "/dev/"
|
||||||
// DevShm is the place for POSIX shared memory segments, as created via shm_open(3).
|
// DevShm is the place for POSIX shared memory segments, as created via
|
||||||
|
// shm_open(3).
|
||||||
DevShm = "/dev/shm/"
|
DevShm = "/dev/shm/"
|
||||||
// Proc points to a virtual kernel file system exposing the process list and other functionality.
|
// Proc points to a virtual kernel file system exposing the process list and
|
||||||
|
// other functionality.
|
||||||
Proc = "/proc/"
|
Proc = "/proc/"
|
||||||
// ProcSys points to a hierarchy below /proc/ that exposes a number of kernel tunables.
|
// ProcSys points to a hierarchy below /proc/ that exposes a number of
|
||||||
|
// kernel tunables.
|
||||||
ProcSys = Proc + "sys/"
|
ProcSys = Proc + "sys/"
|
||||||
// Sys points to a virtual kernel file system exposing discovered devices and other functionality.
|
// ProcSelf resolves to the process's own /proc/pid directory.
|
||||||
|
ProcSelf = Proc + "self/"
|
||||||
|
// ProcSelfExe is a symbolic link to program pathname.
|
||||||
|
ProcSelfExe = ProcSelf + "exe"
|
||||||
|
// Sys points to a virtual kernel file system exposing discovered devices
|
||||||
|
// and other functionality.
|
||||||
Sys = "/sys/"
|
Sys = "/sys/"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -33,12 +33,12 @@ const (
|
|||||||
- This path is only accessible by init and root:
|
- This path is only accessible by init and root:
|
||||||
The container init sets SUID_DUMP_DISABLE and terminates if that fails.
|
The container init sets SUID_DUMP_DISABLE and terminates if that fails.
|
||||||
|
|
||||||
It should be noted that none of this should become relevant at any point since the resulting
|
It should be noted that none of this should become relevant at any point
|
||||||
intermediate root tmpfs should be effectively anonymous. */
|
since the resulting intermediate root tmpfs should be effectively anonymous. */
|
||||||
intermediateHostPath = fhs.Proc + "self/fd"
|
intermediateHostPath = fhs.Proc + "self/fd"
|
||||||
|
|
||||||
// setupEnv is the name of the environment variable holding the string representation of
|
// setupEnv is the name of the environment variable holding the string
|
||||||
// the read end file descriptor of the setup params pipe.
|
// representation of the read end file descriptor of the setup params pipe.
|
||||||
setupEnv = "HAKUREI_SETUP"
|
setupEnv = "HAKUREI_SETUP"
|
||||||
|
|
||||||
// exitUnexpectedWait4 is the exit code if wait4 returns an unexpected errno.
|
// exitUnexpectedWait4 is the exit code if wait4 returns an unexpected errno.
|
||||||
@@ -59,7 +59,8 @@ type (
|
|||||||
// late is called right before starting the initial process.
|
// late is called right before starting the initial process.
|
||||||
late(state *setupState, k syscallDispatcher) error
|
late(state *setupState, k syscallDispatcher) error
|
||||||
|
|
||||||
// prefix returns a log message prefix, and whether this Op prints no identifying message on its own.
|
// prefix returns a log message prefix, and whether this Op prints no
|
||||||
|
// identifying message on its own.
|
||||||
prefix() (string, bool)
|
prefix() (string, bool)
|
||||||
|
|
||||||
Is(op Op) bool
|
Is(op Op) bool
|
||||||
@@ -71,9 +72,11 @@ type (
|
|||||||
setupState struct {
|
setupState struct {
|
||||||
nonrepeatable uintptr
|
nonrepeatable uintptr
|
||||||
|
|
||||||
// Whether early reaping has concluded. Must only be accessed in the wait4 loop.
|
// Whether early reaping has concluded. Must only be accessed in the
|
||||||
|
// wait4 loop.
|
||||||
processConcluded bool
|
processConcluded bool
|
||||||
// Process to syscall.WaitStatus populated in the wait4 loop. Freed after early reaping concludes.
|
// Process to syscall.WaitStatus populated in the wait4 loop. Freed
|
||||||
|
// after early reaping concludes.
|
||||||
process map[int]WaitStatus
|
process map[int]WaitStatus
|
||||||
// Synchronises access to process.
|
// Synchronises access to process.
|
||||||
processMu sync.RWMutex
|
processMu sync.RWMutex
|
||||||
@@ -216,9 +219,10 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
/* early is called right before pivot_root into intermediate root;
|
/* early is called right before pivot_root into intermediate root;
|
||||||
this step is mostly for gathering information that would otherwise be difficult to obtain
|
this step is mostly for gathering information that would otherwise be
|
||||||
via library functions after pivot_root, and implementations are expected to avoid changing
|
difficult to obtain via library functions after pivot_root, and
|
||||||
the state of the mount namespace */
|
implementations are expected to avoid changing the state of the mount
|
||||||
|
namespace */
|
||||||
for i, op := range *params.Ops {
|
for i, op := range *params.Ops {
|
||||||
if op == nil || !op.Valid() {
|
if op == nil || !op.Valid() {
|
||||||
k.fatalf(msg, "invalid op at index %d", i)
|
k.fatalf(msg, "invalid op at index %d", i)
|
||||||
@@ -258,10 +262,10 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
k.fatalf(msg, "cannot enter intermediate root: %v", err)
|
k.fatalf(msg, "cannot enter intermediate root: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* apply is called right after pivot_root and entering the new root;
|
/* apply is called right after pivot_root and entering the new root. This
|
||||||
this step sets up the container filesystem, and implementations are expected to keep the host root
|
step sets up the container filesystem, and implementations are expected to
|
||||||
and sysroot mount points intact but otherwise can do whatever they need to;
|
keep the host root and sysroot mount points intact but otherwise can do
|
||||||
chdir is allowed but discouraged */
|
whatever they need to. Calling chdir is allowed but discouraged. */
|
||||||
for i, op := range *params.Ops {
|
for i, op := range *params.Ops {
|
||||||
// ops already checked during early setup
|
// ops already checked during early setup
|
||||||
if prefix, ok := op.prefix(); ok {
|
if prefix, ok := op.prefix(); ok {
|
||||||
|
|||||||
@@ -12,14 +12,16 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(BindMountOp)) }
|
func init() { gob.Register(new(BindMountOp)) }
|
||||||
|
|
||||||
// Bind appends an [Op] that bind mounts host path [BindMountOp.Source] on container path [BindMountOp.Target].
|
// Bind is a helper for appending [BindMountOp] to [Ops].
|
||||||
func (f *Ops) Bind(source, target *check.Absolute, flags int) *Ops {
|
func (f *Ops) Bind(source, target *check.Absolute, flags int) *Ops {
|
||||||
*f = append(*f, &BindMountOp{nil, source, target, flags})
|
*f = append(*f, &BindMountOp{nil, source, target, flags})
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
// BindMountOp bind mounts host path Source on container path Target.
|
// BindMountOp creates a bind mount from host path Source to container path Target.
|
||||||
// Note that Flags uses bits declared in this package and should not be set with constants in [syscall].
|
//
|
||||||
|
// Note that Flags uses bits declared in the [std] package and should not be set
|
||||||
|
// with constants in [syscall].
|
||||||
type BindMountOp struct {
|
type BindMountOp struct {
|
||||||
sourceFinal, Source, Target *check.Absolute
|
sourceFinal, Source, Target *check.Absolute
|
||||||
|
|
||||||
|
|||||||
@@ -24,8 +24,7 @@ const (
|
|||||||
daemonTimeout = 5 * time.Second
|
daemonTimeout = 5 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
// Daemon appends an [Op] that starts a daemon in the container and blocks until
|
// Daemon is a helper for appending [DaemonOp] to [Ops].
|
||||||
// [DaemonOp.Target] appears.
|
|
||||||
func (f *Ops) Daemon(target, path *check.Absolute, args ...string) *Ops {
|
func (f *Ops) Daemon(target, path *check.Absolute, args ...string) *Ops {
|
||||||
*f = append(*f, &DaemonOp{target, path, args})
|
*f = append(*f, &DaemonOp{target, path, args})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -19,7 +19,9 @@ func (f *Ops) Dev(target *check.Absolute, mqueue bool) *Ops {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DevWritable appends an [Op] that mounts a writable subset of host /dev.
|
// DevWritable appends an [Op] that mounts a writable subset of host /dev.
|
||||||
// There is usually no good reason to write to /dev, so this should always be followed by a [RemountOp].
|
//
|
||||||
|
// There is usually no good reason to write to /dev, so this should always be
|
||||||
|
// followed by a [RemountOp].
|
||||||
func (f *Ops) DevWritable(target *check.Absolute, mqueue bool) *Ops {
|
func (f *Ops) DevWritable(target *check.Absolute, mqueue bool) *Ops {
|
||||||
*f = append(*f, &MountDevOp{target, mqueue, true})
|
*f = append(*f, &MountDevOp{target, mqueue, true})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(MkdirOp)) }
|
func init() { gob.Register(new(MkdirOp)) }
|
||||||
|
|
||||||
// Mkdir appends an [Op] that creates a directory in the container filesystem.
|
// Mkdir is a helper for appending [MkdirOp] to [Ops].
|
||||||
func (f *Ops) Mkdir(name *check.Absolute, perm os.FileMode) *Ops {
|
func (f *Ops) Mkdir(name *check.Absolute, perm os.FileMode) *Ops {
|
||||||
*f = append(*f, &MkdirOp{name, perm})
|
*f = append(*f, &MkdirOp{name, perm})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -54,8 +54,11 @@ func (e *OverlayArgumentError) Error() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Overlay appends an [Op] that mounts the overlay pseudo filesystem on [MountOverlayOp.Target].
|
// Overlay is a helper for appending [MountOverlayOp] to [Ops].
|
||||||
func (f *Ops) Overlay(target, state, work *check.Absolute, layers ...*check.Absolute) *Ops {
|
func (f *Ops) Overlay(
|
||||||
|
target, state, work *check.Absolute,
|
||||||
|
layers ...*check.Absolute,
|
||||||
|
) *Ops {
|
||||||
*f = append(*f, &MountOverlayOp{
|
*f = append(*f, &MountOverlayOp{
|
||||||
Target: target,
|
Target: target,
|
||||||
Lower: layers,
|
Lower: layers,
|
||||||
@@ -65,13 +68,12 @@ func (f *Ops) Overlay(target, state, work *check.Absolute, layers ...*check.Abso
|
|||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
// OverlayEphemeral appends an [Op] that mounts the overlay pseudo filesystem on [MountOverlayOp.Target]
|
// OverlayEphemeral appends a [MountOverlayOp] with an ephemeral upperdir and workdir.
|
||||||
// with an ephemeral upperdir and workdir.
|
|
||||||
func (f *Ops) OverlayEphemeral(target *check.Absolute, layers ...*check.Absolute) *Ops {
|
func (f *Ops) OverlayEphemeral(target *check.Absolute, layers ...*check.Absolute) *Ops {
|
||||||
return f.Overlay(target, fhs.AbsRoot, nil, layers...)
|
return f.Overlay(target, fhs.AbsRoot, nil, layers...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// OverlayReadonly appends an [Op] that mounts the overlay pseudo filesystem readonly on [MountOverlayOp.Target]
|
// OverlayReadonly appends a readonly [MountOverlayOp].
|
||||||
func (f *Ops) OverlayReadonly(target *check.Absolute, layers ...*check.Absolute) *Ops {
|
func (f *Ops) OverlayReadonly(target *check.Absolute, layers ...*check.Absolute) *Ops {
|
||||||
return f.Overlay(target, nil, nil, layers...)
|
return f.Overlay(target, nil, nil, layers...)
|
||||||
}
|
}
|
||||||
@@ -82,25 +84,34 @@ type MountOverlayOp struct {
|
|||||||
|
|
||||||
// Any filesystem, does not need to be on a writable filesystem.
|
// Any filesystem, does not need to be on a writable filesystem.
|
||||||
Lower []*check.Absolute
|
Lower []*check.Absolute
|
||||||
// formatted for [OptionOverlayLowerdir], resolved, prefixed and escaped during early
|
// Formatted for [OptionOverlayLowerdir].
|
||||||
|
//
|
||||||
|
// Resolved, prefixed and escaped during early.
|
||||||
lower []string
|
lower []string
|
||||||
|
|
||||||
// The upperdir is normally on a writable filesystem.
|
// The upperdir is normally on a writable filesystem.
|
||||||
//
|
//
|
||||||
// If Work is nil and Upper holds the special value [fhs.AbsRoot],
|
// If Work is nil and Upper holds the special value [fhs.AbsRoot], an
|
||||||
// an ephemeral upperdir and workdir will be set up.
|
// ephemeral upperdir and workdir will be set up.
|
||||||
//
|
//
|
||||||
// If both Work and Upper are nil, upperdir and workdir is omitted and the overlay is mounted readonly.
|
// If both Work and Upper are nil, upperdir and workdir is omitted and the
|
||||||
|
// overlay is mounted readonly.
|
||||||
Upper *check.Absolute
|
Upper *check.Absolute
|
||||||
// formatted for [OptionOverlayUpperdir], resolved, prefixed and escaped during early
|
// Formatted for [OptionOverlayUpperdir].
|
||||||
|
//
|
||||||
|
// Resolved, prefixed and escaped during early.
|
||||||
upper string
|
upper string
|
||||||
|
|
||||||
// The workdir needs to be an empty directory on the same filesystem as upperdir.
|
// The workdir needs to be an empty directory on the same filesystem as upperdir.
|
||||||
Work *check.Absolute
|
Work *check.Absolute
|
||||||
// formatted for [OptionOverlayWorkdir], resolved, prefixed and escaped during early
|
// Formatted for [OptionOverlayWorkdir].
|
||||||
|
//
|
||||||
|
// Resolved, prefixed and escaped during early.
|
||||||
work string
|
work string
|
||||||
|
|
||||||
ephemeral bool
|
ephemeral bool
|
||||||
|
|
||||||
// used internally for mounting to the intermediate root
|
// Used internally for mounting to the intermediate root.
|
||||||
noPrefix bool
|
noPrefix bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ const (
|
|||||||
|
|
||||||
func init() { gob.Register(new(TmpfileOp)) }
|
func init() { gob.Register(new(TmpfileOp)) }
|
||||||
|
|
||||||
// Place appends an [Op] that places a file in container path [TmpfileOp.Path] containing [TmpfileOp.Data].
|
// Place is a helper for appending [TmpfileOp] to [Ops].
|
||||||
func (f *Ops) Place(name *check.Absolute, data []byte) *Ops {
|
func (f *Ops) Place(name *check.Absolute, data []byte) *Ops {
|
||||||
*f = append(*f, &TmpfileOp{name, data})
|
*f = append(*f, &TmpfileOp{name, data})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ func TestTmpfileOp(t *testing.T) {
|
|||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), stub.UniqueError(5)),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, (*checkedOsFile)(nil), stub.UniqueError(5)),
|
||||||
}, stub.UniqueError(5)},
|
}, stub.UniqueError(5)},
|
||||||
|
|
||||||
{"Write", &Params{ParentPerm: 0700}, &TmpfileOp{
|
{"Write", &Params{ParentPerm: 0700}, &TmpfileOp{
|
||||||
@@ -35,14 +35,14 @@ func TestTmpfileOp(t *testing.T) {
|
|||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, stub.UniqueError(3)), nil),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.Close", sampleDataString, stub.UniqueError(3)), nil),
|
||||||
}, stub.UniqueError(3)},
|
}, stub.UniqueError(3)},
|
||||||
|
|
||||||
{"ensureFile", &Params{ParentPerm: 0700}, &TmpfileOp{
|
{"ensureFile", &Params{ParentPerm: 0700}, &TmpfileOp{
|
||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), nil),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.ensureFile", sampleDataString, nil), nil),
|
||||||
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, stub.UniqueError(2)),
|
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, stub.UniqueError(2)),
|
||||||
}, stub.UniqueError(2)},
|
}, stub.UniqueError(2)},
|
||||||
|
|
||||||
@@ -50,29 +50,29 @@ func TestTmpfileOp(t *testing.T) {
|
|||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), nil),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.bindMount", sampleDataString, nil), nil),
|
||||||
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
||||||
call("bindMount", stub.ExpectArgs{"tmp.32768", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, stub.UniqueError(1)),
|
call("bindMount", stub.ExpectArgs{"tmp.bindMount", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, stub.UniqueError(1)),
|
||||||
}, stub.UniqueError(1)},
|
}, stub.UniqueError(1)},
|
||||||
|
|
||||||
{"remove", &Params{ParentPerm: 0700}, &TmpfileOp{
|
{"remove", &Params{ParentPerm: 0700}, &TmpfileOp{
|
||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), nil),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.remove", sampleDataString, nil), nil),
|
||||||
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
||||||
call("bindMount", stub.ExpectArgs{"tmp.32768", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, nil),
|
call("bindMount", stub.ExpectArgs{"tmp.remove", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, nil),
|
||||||
call("remove", stub.ExpectArgs{"tmp.32768"}, nil, stub.UniqueError(0)),
|
call("remove", stub.ExpectArgs{"tmp.remove"}, nil, stub.UniqueError(0)),
|
||||||
}, stub.UniqueError(0)},
|
}, stub.UniqueError(0)},
|
||||||
|
|
||||||
{"success", &Params{ParentPerm: 0700}, &TmpfileOp{
|
{"success", &Params{ParentPerm: 0700}, &TmpfileOp{
|
||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), nil),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.success", sampleDataString, nil), nil),
|
||||||
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
||||||
call("bindMount", stub.ExpectArgs{"tmp.32768", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, nil),
|
call("bindMount", stub.ExpectArgs{"tmp.success", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, nil),
|
||||||
call("remove", stub.ExpectArgs{"tmp.32768"}, nil, nil),
|
call("remove", stub.ExpectArgs{"tmp.success"}, nil, nil),
|
||||||
}, nil},
|
}, nil},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(MountProcOp)) }
|
func init() { gob.Register(new(MountProcOp)) }
|
||||||
|
|
||||||
// Proc appends an [Op] that mounts a private instance of proc.
|
// Proc is a helper for appending [MountProcOp] to [Ops].
|
||||||
func (f *Ops) Proc(target *check.Absolute) *Ops {
|
func (f *Ops) Proc(target *check.Absolute) *Ops {
|
||||||
*f = append(*f, &MountProcOp{target})
|
*f = append(*f, &MountProcOp{target})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(RemountOp)) }
|
func init() { gob.Register(new(RemountOp)) }
|
||||||
|
|
||||||
// Remount appends an [Op] that applies [RemountOp.Flags] on container path [RemountOp.Target].
|
// Remount is a helper for appending [RemountOp] to [Ops].
|
||||||
func (f *Ops) Remount(target *check.Absolute, flags uintptr) *Ops {
|
func (f *Ops) Remount(target *check.Absolute, flags uintptr) *Ops {
|
||||||
*f = append(*f, &RemountOp{target, flags})
|
*f = append(*f, &RemountOp{target, flags})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ const (
|
|||||||
_LANDLOCK_ACCESS_FS_DELIM
|
_LANDLOCK_ACCESS_FS_DELIM
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// String returns a space-separated string of [LandlockAccessFS] flags.
|
||||||
func (f LandlockAccessFS) String() string {
|
func (f LandlockAccessFS) String() string {
|
||||||
switch f {
|
switch f {
|
||||||
case LANDLOCK_ACCESS_FS_EXECUTE:
|
case LANDLOCK_ACCESS_FS_EXECUTE:
|
||||||
@@ -116,6 +117,7 @@ const (
|
|||||||
_LANDLOCK_ACCESS_NET_DELIM
|
_LANDLOCK_ACCESS_NET_DELIM
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// String returns a space-separated string of [LandlockAccessNet] flags.
|
||||||
func (f LandlockAccessNet) String() string {
|
func (f LandlockAccessNet) String() string {
|
||||||
switch f {
|
switch f {
|
||||||
case LANDLOCK_ACCESS_NET_BIND_TCP:
|
case LANDLOCK_ACCESS_NET_BIND_TCP:
|
||||||
@@ -152,6 +154,7 @@ const (
|
|||||||
_LANDLOCK_SCOPE_DELIM
|
_LANDLOCK_SCOPE_DELIM
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// String returns a space-separated string of [LandlockScope] flags.
|
||||||
func (f LandlockScope) String() string {
|
func (f LandlockScope) String() string {
|
||||||
switch f {
|
switch f {
|
||||||
case LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET:
|
case LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET:
|
||||||
@@ -184,10 +187,12 @@ type RulesetAttr struct {
|
|||||||
HandledAccessFS LandlockAccessFS
|
HandledAccessFS LandlockAccessFS
|
||||||
// Bitmask of handled network actions.
|
// Bitmask of handled network actions.
|
||||||
HandledAccessNet LandlockAccessNet
|
HandledAccessNet LandlockAccessNet
|
||||||
// Bitmask of scopes restricting a Landlock domain from accessing outside resources (e.g. IPCs).
|
// Bitmask of scopes restricting a Landlock domain from accessing outside
|
||||||
|
// resources (e.g. IPCs).
|
||||||
Scoped LandlockScope
|
Scoped LandlockScope
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// String returns a user-facing description of [RulesetAttr].
|
||||||
func (rulesetAttr *RulesetAttr) String() string {
|
func (rulesetAttr *RulesetAttr) String() string {
|
||||||
if rulesetAttr == nil {
|
if rulesetAttr == nil {
|
||||||
return "NULL"
|
return "NULL"
|
||||||
@@ -208,6 +213,7 @@ func (rulesetAttr *RulesetAttr) String() string {
|
|||||||
return strings.Join(elems, ", ")
|
return strings.Join(elems, ", ")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create loads the ruleset into the kernel.
|
||||||
func (rulesetAttr *RulesetAttr) Create(flags uintptr) (fd int, err error) {
|
func (rulesetAttr *RulesetAttr) Create(flags uintptr) (fd int, err error) {
|
||||||
var pointer, size uintptr
|
var pointer, size uintptr
|
||||||
// NULL needed for abi version
|
// NULL needed for abi version
|
||||||
@@ -216,10 +222,13 @@ func (rulesetAttr *RulesetAttr) Create(flags uintptr) (fd int, err error) {
|
|||||||
size = unsafe.Sizeof(*rulesetAttr)
|
size = unsafe.Sizeof(*rulesetAttr)
|
||||||
}
|
}
|
||||||
|
|
||||||
rulesetFd, _, errno := syscall.Syscall(std.SYS_LANDLOCK_CREATE_RULESET, pointer, size, flags)
|
rulesetFd, _, errno := syscall.Syscall(
|
||||||
|
std.SYS_LANDLOCK_CREATE_RULESET,
|
||||||
|
pointer, size,
|
||||||
|
flags,
|
||||||
|
)
|
||||||
fd = int(rulesetFd)
|
fd = int(rulesetFd)
|
||||||
err = errno
|
err = errno
|
||||||
|
|
||||||
if fd < 0 {
|
if fd < 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -230,12 +239,19 @@ func (rulesetAttr *RulesetAttr) Create(flags uintptr) (fd int, err error) {
|
|||||||
return fd, nil
|
return fd, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LandlockGetABI returns the ABI version supported by the kernel.
|
||||||
func LandlockGetABI() (int, error) {
|
func LandlockGetABI() (int, error) {
|
||||||
return (*RulesetAttr)(nil).Create(LANDLOCK_CREATE_RULESET_VERSION)
|
return (*RulesetAttr)(nil).Create(LANDLOCK_CREATE_RULESET_VERSION)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LandlockRestrictSelf applies a loaded ruleset to the calling thread.
|
||||||
func LandlockRestrictSelf(rulesetFd int, flags uintptr) error {
|
func LandlockRestrictSelf(rulesetFd int, flags uintptr) error {
|
||||||
r, _, errno := syscall.Syscall(std.SYS_LANDLOCK_RESTRICT_SELF, uintptr(rulesetFd), flags, 0)
|
r, _, errno := syscall.Syscall(
|
||||||
|
std.SYS_LANDLOCK_RESTRICT_SELF,
|
||||||
|
uintptr(rulesetFd),
|
||||||
|
flags,
|
||||||
|
0,
|
||||||
|
)
|
||||||
if r != 0 {
|
if r != 0 {
|
||||||
return errno
|
return errno
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ done:
|
|||||||
}
|
}
|
||||||
if m.Header.Type == NLMSG_ERROR {
|
if m.Header.Type == NLMSG_ERROR {
|
||||||
if len(m.Data) >= 4 {
|
if len(m.Data) >= 4 {
|
||||||
errno := Errno(-std.ScmpInt(binary.NativeEndian.Uint32(m.Data)))
|
errno := Errno(-std.Int(binary.NativeEndian.Uint32(m.Data)))
|
||||||
if errno == 0 {
|
if errno == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,10 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// Nonexistent is a path that cannot exist.
|
// Nonexistent is a path that cannot exist.
|
||||||
// /proc is chosen because a system with covered /proc is unsupported by this package.
|
//
|
||||||
|
// This path can never be presented by the kernel if proc is mounted on
|
||||||
|
// /proc/. This can only exist if parts of /proc/ is covered, or proc is not
|
||||||
|
// mounted at all. Neither configuration is supported by this package.
|
||||||
Nonexistent = fhs.Proc + "nonexistent"
|
Nonexistent = fhs.Proc + "nonexistent"
|
||||||
|
|
||||||
hostPath = fhs.Root + hostDir
|
hostPath = fhs.Root + hostDir
|
||||||
|
|||||||
@@ -88,18 +88,22 @@ var resPrefix = [...]string{
|
|||||||
7: "seccomp_load failed",
|
7: "seccomp_load failed",
|
||||||
}
|
}
|
||||||
|
|
||||||
// cbAllocateBuffer is the function signature for the function handle passed to hakurei_export_filter
|
// cbAllocateBuffer is the function signature for the function handle passed to
|
||||||
// which allocates the buffer that the resulting bpf program is copied into, and writes its slice header
|
// hakurei_scmp_make_filter which allocates the buffer that the resulting bpf
|
||||||
// to a value held by the caller.
|
// program is copied into, and writes its slice header to a value held by the caller.
|
||||||
type cbAllocateBuffer = func(len C.size_t) (buf unsafe.Pointer)
|
type cbAllocateBuffer = func(len C.size_t) (buf unsafe.Pointer)
|
||||||
|
|
||||||
|
// hakurei_scmp_allocate allocates a buffer of specified size known to the
|
||||||
|
// runtime through a callback passed in a [cgo.Handle].
|
||||||
|
//
|
||||||
//export hakurei_scmp_allocate
|
//export hakurei_scmp_allocate
|
||||||
func hakurei_scmp_allocate(f C.uintptr_t, len C.size_t) (buf unsafe.Pointer) {
|
func hakurei_scmp_allocate(f C.uintptr_t, len C.size_t) (buf unsafe.Pointer) {
|
||||||
return cgo.Handle(f).Value().(cbAllocateBuffer)(len)
|
return cgo.Handle(f).Value().(cbAllocateBuffer)(len)
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeFilter generates a bpf program from a slice of [std.NativeRule] and writes the resulting byte slice to p.
|
// makeFilter generates a bpf program from a slice of [std.NativeRule] and
|
||||||
// The filter is installed to the current process if p is nil.
|
// writes the resulting byte slice to p. The filter is installed to the current
|
||||||
|
// process if p is nil.
|
||||||
func makeFilter(rules []std.NativeRule, flags ExportFlag, p *[]byte) error {
|
func makeFilter(rules []std.NativeRule, flags ExportFlag, p *[]byte) error {
|
||||||
if len(rules) == 0 {
|
if len(rules) == 0 {
|
||||||
return ErrInvalidRules
|
return ErrInvalidRules
|
||||||
@@ -170,8 +174,8 @@ func Export(rules []std.NativeRule, flags ExportFlag) (data []byte, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load generates a bpf program from a slice of [std.NativeRule] and enforces it on the current process.
|
// Load generates a bpf program from a slice of [std.NativeRule] and enforces it
|
||||||
// Errors returned by libseccomp is wrapped in [LibraryError].
|
// on the current process. Errors returned by libseccomp is wrapped in [LibraryError].
|
||||||
func Load(rules []std.NativeRule, flags ExportFlag) error { return makeFilter(rules, flags, nil) }
|
func Load(rules []std.NativeRule, flags ExportFlag) error { return makeFilter(rules, flags, nil) }
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
|||||||
@@ -24,8 +24,8 @@ func TestSyscallResolveName(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRuleType(t *testing.T) {
|
func TestRuleType(t *testing.T) {
|
||||||
assertKind[std.ScmpUint, scmpUint](t)
|
assertKind[std.Uint, scmpUint](t)
|
||||||
assertKind[std.ScmpInt, scmpInt](t)
|
assertKind[std.Int, scmpInt](t)
|
||||||
|
|
||||||
assertSize[std.NativeRule, syscallRule](t)
|
assertSize[std.NativeRule, syscallRule](t)
|
||||||
assertKind[std.ScmpDatum, scmpDatum](t)
|
assertKind[std.ScmpDatum, scmpDatum](t)
|
||||||
|
|||||||
@@ -7,24 +7,28 @@ import (
|
|||||||
|
|
||||||
type (
|
type (
|
||||||
// ScmpUint is equivalent to C.uint.
|
// ScmpUint is equivalent to C.uint.
|
||||||
ScmpUint uint32
|
//
|
||||||
|
// Deprecated: This type has been renamed to Uint and will be removed in 0.4.
|
||||||
|
ScmpUint = Uint
|
||||||
// ScmpInt is equivalent to C.int.
|
// ScmpInt is equivalent to C.int.
|
||||||
ScmpInt int32
|
//
|
||||||
|
// Deprecated: This type has been renamed to Int and will be removed in 0.4.
|
||||||
|
ScmpInt = Int
|
||||||
|
|
||||||
// ScmpSyscall represents a syscall number passed to libseccomp via [NativeRule.Syscall].
|
// ScmpSyscall represents a syscall number passed to libseccomp via [NativeRule.Syscall].
|
||||||
ScmpSyscall ScmpInt
|
ScmpSyscall Int
|
||||||
// ScmpErrno represents an errno value passed to libseccomp via [NativeRule.Errno].
|
// ScmpErrno represents an errno value passed to libseccomp via [NativeRule.Errno].
|
||||||
ScmpErrno ScmpInt
|
ScmpErrno Int
|
||||||
|
|
||||||
// ScmpCompare is equivalent to enum scmp_compare;
|
// ScmpCompare is equivalent to enum scmp_compare;
|
||||||
ScmpCompare ScmpUint
|
ScmpCompare Uint
|
||||||
// ScmpDatum is equivalent to scmp_datum_t.
|
// ScmpDatum is equivalent to scmp_datum_t.
|
||||||
ScmpDatum uint64
|
ScmpDatum uint64
|
||||||
|
|
||||||
// ScmpArgCmp is equivalent to struct scmp_arg_cmp.
|
// ScmpArgCmp is equivalent to struct scmp_arg_cmp.
|
||||||
ScmpArgCmp struct {
|
ScmpArgCmp struct {
|
||||||
// argument number, starting at 0
|
// argument number, starting at 0
|
||||||
Arg ScmpUint `json:"arg"`
|
Arg Uint `json:"arg"`
|
||||||
// the comparison op, e.g. SCMP_CMP_*
|
// the comparison op, e.g. SCMP_CMP_*
|
||||||
Op ScmpCompare `json:"op"`
|
Op ScmpCompare `json:"op"`
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,12 @@
|
|||||||
package std
|
package std
|
||||||
|
|
||||||
import "iter"
|
import (
|
||||||
|
"encoding"
|
||||||
|
"iter"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
// Syscalls returns an iterator over all wired syscalls.
|
// Syscalls returns an iterator over all wired syscalls.
|
||||||
func Syscalls() iter.Seq2[string, ScmpSyscall] {
|
func Syscalls() iter.Seq2[string, ScmpSyscall] {
|
||||||
@@ -26,3 +32,128 @@ func SyscallResolveName(name string) (num ScmpSyscall, ok bool) {
|
|||||||
num, ok = syscallNumExtra[name]
|
num, ok = syscallNumExtra[name]
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SchedPolicy denotes a scheduling policy defined in include/uapi/linux/sched.h.
|
||||||
|
type SchedPolicy int
|
||||||
|
|
||||||
|
// include/uapi/linux/sched.h
|
||||||
|
const (
|
||||||
|
SCHED_NORMAL SchedPolicy = iota
|
||||||
|
SCHED_FIFO
|
||||||
|
SCHED_RR
|
||||||
|
SCHED_BATCH
|
||||||
|
_SCHED_ISO // SCHED_ISO: reserved but not implemented yet
|
||||||
|
SCHED_IDLE
|
||||||
|
SCHED_DEADLINE
|
||||||
|
SCHED_EXT
|
||||||
|
|
||||||
|
SCHED_LAST SchedPolicy = iota - 1
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ encoding.TextMarshaler = SCHED_LAST
|
||||||
|
var _ encoding.TextUnmarshaler = new(SCHED_LAST)
|
||||||
|
|
||||||
|
// String returns a unique representation of policy, also used in encoding.
|
||||||
|
func (policy SchedPolicy) String() string {
|
||||||
|
switch policy {
|
||||||
|
case SCHED_NORMAL:
|
||||||
|
return ""
|
||||||
|
case SCHED_FIFO:
|
||||||
|
return "fifo"
|
||||||
|
case SCHED_RR:
|
||||||
|
return "rr"
|
||||||
|
case SCHED_BATCH:
|
||||||
|
return "batch"
|
||||||
|
case SCHED_IDLE:
|
||||||
|
return "idle"
|
||||||
|
case SCHED_DEADLINE:
|
||||||
|
return "deadline"
|
||||||
|
case SCHED_EXT:
|
||||||
|
return "ext"
|
||||||
|
|
||||||
|
default:
|
||||||
|
return "invalid policy " + strconv.Itoa(int(policy))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText performs bounds checking and returns the result of String.
|
||||||
|
func (policy SchedPolicy) MarshalText() ([]byte, error) {
|
||||||
|
if policy == _SCHED_ISO || policy < 0 || policy > SCHED_LAST {
|
||||||
|
return nil, syscall.EINVAL
|
||||||
|
}
|
||||||
|
return []byte(policy.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidSchedPolicyError is an invalid string representation of a [SchedPolicy].
|
||||||
|
type InvalidSchedPolicyError string
|
||||||
|
|
||||||
|
func (InvalidSchedPolicyError) Unwrap() error { return syscall.EINVAL }
|
||||||
|
func (e InvalidSchedPolicyError) Error() string {
|
||||||
|
return "invalid scheduling policy " + strconv.Quote(string(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText is the inverse of MarshalText.
|
||||||
|
func (policy *SchedPolicy) UnmarshalText(text []byte) error {
|
||||||
|
switch string(text) {
|
||||||
|
case "fifo":
|
||||||
|
*policy = SCHED_FIFO
|
||||||
|
case "rr":
|
||||||
|
*policy = SCHED_RR
|
||||||
|
case "batch":
|
||||||
|
*policy = SCHED_BATCH
|
||||||
|
case "idle":
|
||||||
|
*policy = SCHED_IDLE
|
||||||
|
case "deadline":
|
||||||
|
*policy = SCHED_DEADLINE
|
||||||
|
case "ext":
|
||||||
|
*policy = SCHED_EXT
|
||||||
|
|
||||||
|
case "":
|
||||||
|
*policy = 0
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return InvalidSchedPolicyError(text)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// for sched_get_priority_max and sched_get_priority_min
|
||||||
|
var (
|
||||||
|
schedPriority [SCHED_LAST + 1][2]Int
|
||||||
|
schedPriorityErr [SCHED_LAST + 1][2]error
|
||||||
|
schedPriorityOnce [SCHED_LAST + 1][2]sync.Once
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetPriorityMax returns the maximum priority value that can be used with the
|
||||||
|
// scheduling algorithm identified by policy.
|
||||||
|
func (policy SchedPolicy) GetPriorityMax() (Int, error) {
|
||||||
|
schedPriorityOnce[policy][0].Do(func() {
|
||||||
|
priority, _, errno := syscall.Syscall(
|
||||||
|
syscall.SYS_SCHED_GET_PRIORITY_MAX,
|
||||||
|
uintptr(policy),
|
||||||
|
0, 0,
|
||||||
|
)
|
||||||
|
schedPriority[policy][0] = Int(priority)
|
||||||
|
if errno != 0 {
|
||||||
|
schedPriorityErr[policy][0] = errno
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return schedPriority[policy][0], schedPriorityErr[policy][0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPriorityMin returns the minimum priority value that can be used with the
|
||||||
|
// scheduling algorithm identified by policy.
|
||||||
|
func (policy SchedPolicy) GetPriorityMin() (Int, error) {
|
||||||
|
schedPriorityOnce[policy][1].Do(func() {
|
||||||
|
priority, _, errno := syscall.Syscall(
|
||||||
|
syscall.SYS_SCHED_GET_PRIORITY_MIN,
|
||||||
|
uintptr(policy),
|
||||||
|
0, 0,
|
||||||
|
)
|
||||||
|
schedPriority[policy][1] = Int(priority)
|
||||||
|
if errno != 0 {
|
||||||
|
schedPriorityErr[policy][1] = errno
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return schedPriority[policy][1], schedPriorityErr[policy][1]
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,11 @@
|
|||||||
package std_test
|
package std_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
@@ -19,3 +24,90 @@ func TestSyscallResolveName(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSchedPolicyJSON(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
policy std.SchedPolicy
|
||||||
|
want string
|
||||||
|
encodeErr error
|
||||||
|
decodeErr error
|
||||||
|
}{
|
||||||
|
{std.SCHED_NORMAL, `""`, nil, nil},
|
||||||
|
{std.SCHED_FIFO, `"fifo"`, nil, nil},
|
||||||
|
{std.SCHED_RR, `"rr"`, nil, nil},
|
||||||
|
{std.SCHED_BATCH, `"batch"`, nil, nil},
|
||||||
|
{4, `"invalid policy 4"`, syscall.EINVAL, std.InvalidSchedPolicyError("invalid policy 4")},
|
||||||
|
{std.SCHED_IDLE, `"idle"`, nil, nil},
|
||||||
|
{std.SCHED_DEADLINE, `"deadline"`, nil, nil},
|
||||||
|
{std.SCHED_EXT, `"ext"`, nil, nil},
|
||||||
|
{math.MaxInt, `"iso"`, syscall.EINVAL, std.InvalidSchedPolicyError("iso")},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
name := tc.policy.String()
|
||||||
|
if tc.policy == std.SCHED_NORMAL {
|
||||||
|
name = "normal"
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
got, err := json.Marshal(tc.policy)
|
||||||
|
if !errors.Is(err, tc.encodeErr) {
|
||||||
|
t.Fatalf("Marshal: error = %v, want %v", err, tc.encodeErr)
|
||||||
|
}
|
||||||
|
if err == nil && string(got) != tc.want {
|
||||||
|
t.Fatalf("Marshal: %s, want %s", string(got), tc.want)
|
||||||
|
}
|
||||||
|
|
||||||
|
var v std.SchedPolicy
|
||||||
|
if err = json.Unmarshal([]byte(tc.want), &v); !reflect.DeepEqual(err, tc.decodeErr) {
|
||||||
|
t.Fatalf("Unmarshal: error = %v, want %v", err, tc.decodeErr)
|
||||||
|
}
|
||||||
|
if err == nil && v != tc.policy {
|
||||||
|
t.Fatalf("Unmarshal: %d, want %d", v, tc.policy)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSchedPolicyMinMax(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
policy std.SchedPolicy
|
||||||
|
min, max std.Int
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{std.SCHED_NORMAL, 0, 0, nil},
|
||||||
|
{std.SCHED_FIFO, 1, 99, nil},
|
||||||
|
{std.SCHED_RR, 1, 99, nil},
|
||||||
|
{std.SCHED_BATCH, 0, 0, nil},
|
||||||
|
{4, -1, -1, syscall.EINVAL},
|
||||||
|
{std.SCHED_IDLE, 0, 0, nil},
|
||||||
|
{std.SCHED_DEADLINE, 0, 0, nil},
|
||||||
|
{std.SCHED_EXT, 0, 0, nil},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
name := tc.policy.String()
|
||||||
|
if tc.policy == std.SCHED_NORMAL {
|
||||||
|
name = "normal"
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if priority, err := tc.policy.GetPriorityMax(); !reflect.DeepEqual(err, tc.err) {
|
||||||
|
t.Fatalf("GetPriorityMax: error = %v, want %v", err, tc.err)
|
||||||
|
} else if priority != tc.max {
|
||||||
|
t.Fatalf("GetPriorityMax: %d, want %d", priority, tc.max)
|
||||||
|
}
|
||||||
|
if priority, err := tc.policy.GetPriorityMin(); !reflect.DeepEqual(err, tc.err) {
|
||||||
|
t.Fatalf("GetPriorityMin: error = %v, want %v", err, tc.err)
|
||||||
|
} else if priority != tc.min {
|
||||||
|
t.Fatalf("GetPriorityMin: %d, want %d", priority, tc.min)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
8
container/std/types.go
Normal file
8
container/std/types.go
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
package std
|
||||||
|
|
||||||
|
type (
|
||||||
|
// Uint is equivalent to C.uint.
|
||||||
|
Uint uint32
|
||||||
|
// Int is equivalent to C.int.
|
||||||
|
Int int32
|
||||||
|
)
|
||||||
@@ -3,6 +3,8 @@ package container
|
|||||||
import (
|
import (
|
||||||
. "syscall"
|
. "syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"hakurei.app/container/std"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Prctl manipulates various aspects of the behavior of the calling thread or process.
|
// Prctl manipulates various aspects of the behavior of the calling thread or process.
|
||||||
@@ -41,6 +43,37 @@ func Isatty(fd int) bool {
|
|||||||
return r == 0
|
return r == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// schedParam is equivalent to struct sched_param from include/linux/sched.h.
|
||||||
|
type schedParam struct {
|
||||||
|
// sched_priority
|
||||||
|
priority std.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
// schedSetscheduler sets both the scheduling policy and parameters for the
|
||||||
|
// thread whose ID is specified in tid. If tid equals zero, the scheduling
|
||||||
|
// policy and parameters of the calling thread will be set.
|
||||||
|
//
|
||||||
|
// This function is unexported because it is [very subtle to use correctly]. The
|
||||||
|
// function signature in libc is misleading: pid actually refers to a thread ID.
|
||||||
|
// The glibc wrapper for this system call ignores this semantic and exposes
|
||||||
|
// this counterintuitive behaviour.
|
||||||
|
//
|
||||||
|
// This function is only called from the container setup thread. Do not reuse
|
||||||
|
// this if you do not have something similar in place!
|
||||||
|
//
|
||||||
|
// [very subtle to use correctly]: https://www.openwall.com/lists/musl/2016/03/01/4
|
||||||
|
func schedSetscheduler(tid int, policy std.SchedPolicy, param *schedParam) error {
|
||||||
|
if _, _, errno := Syscall(
|
||||||
|
SYS_SCHED_SETSCHEDULER,
|
||||||
|
uintptr(tid),
|
||||||
|
uintptr(policy),
|
||||||
|
uintptr(unsafe.Pointer(param)),
|
||||||
|
); errno != 0 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// IgnoringEINTR makes a function call and repeats it if it returns an
|
// IgnoringEINTR makes a function call and repeats it if it returns an
|
||||||
// EINTR error. This appears to be required even though we install all
|
// EINTR error. This appears to be required even though we install all
|
||||||
// signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846.
|
// signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846.
|
||||||
|
|||||||
@@ -2,6 +2,8 @@ package vfs
|
|||||||
|
|
||||||
import "strings"
|
import "strings"
|
||||||
|
|
||||||
|
// Unmangle reverses mangling of strings done by the kernel. Its behaviour is
|
||||||
|
// consistent with the equivalent function in util-linux.
|
||||||
func Unmangle(s string) string {
|
func Unmangle(s string) string {
|
||||||
if !strings.ContainsRune(s, '\\') {
|
if !strings.ContainsRune(s, '\\') {
|
||||||
return s
|
return s
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ var (
|
|||||||
ErrMountInfoSep = errors.New("bad optional fields separator")
|
ErrMountInfoSep = errors.New("bad optional fields separator")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// A DecoderError describes a nonrecoverable error decoding a mountinfo stream.
|
||||||
type DecoderError struct {
|
type DecoderError struct {
|
||||||
Op string
|
Op string
|
||||||
Line int
|
Line int
|
||||||
@@ -51,7 +52,8 @@ func (e *DecoderError) Error() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// A MountInfoDecoder reads and decodes proc_pid_mountinfo(5) entries from an input stream.
|
// A MountInfoDecoder reads and decodes proc_pid_mountinfo(5) entries from
|
||||||
|
// an input stream.
|
||||||
MountInfoDecoder struct {
|
MountInfoDecoder struct {
|
||||||
s *bufio.Scanner
|
s *bufio.Scanner
|
||||||
m *MountInfo
|
m *MountInfo
|
||||||
@@ -72,13 +74,16 @@ type (
|
|||||||
MountInfoEntry struct {
|
MountInfoEntry struct {
|
||||||
// mount ID: a unique ID for the mount (may be reused after umount(2)).
|
// mount ID: a unique ID for the mount (may be reused after umount(2)).
|
||||||
ID int `json:"id"`
|
ID int `json:"id"`
|
||||||
// parent ID: the ID of the parent mount (or of self for the root of this mount namespace's mount tree).
|
// parent ID: the ID of the parent mount (or of self for the root of
|
||||||
|
// this mount namespace's mount tree).
|
||||||
Parent int `json:"parent"`
|
Parent int `json:"parent"`
|
||||||
// major:minor: the value of st_dev for files on this filesystem (see stat(2)).
|
// major:minor: the value of st_dev for files on this filesystem (see stat(2)).
|
||||||
Devno DevT `json:"devno"`
|
Devno DevT `json:"devno"`
|
||||||
// root: the pathname of the directory in the filesystem which forms the root of this mount.
|
// root: the pathname of the directory in the filesystem which forms the
|
||||||
|
// root of this mount.
|
||||||
Root string `json:"root"`
|
Root string `json:"root"`
|
||||||
// mount point: the pathname of the mount point relative to the process's root directory.
|
// mount point: the pathname of the mount point relative to the
|
||||||
|
// process's root directory.
|
||||||
Target string `json:"target"`
|
Target string `json:"target"`
|
||||||
// mount options: per-mount options (see mount(2)).
|
// mount options: per-mount options (see mount(2)).
|
||||||
VfsOptstr string `json:"vfs_optstr"`
|
VfsOptstr string `json:"vfs_optstr"`
|
||||||
@@ -126,7 +131,8 @@ func (e *MountInfoEntry) Flags() (flags uintptr, unmatched []string) {
|
|||||||
|
|
||||||
// NewMountInfoDecoder returns a new decoder that reads from r.
|
// NewMountInfoDecoder returns a new decoder that reads from r.
|
||||||
//
|
//
|
||||||
// The decoder introduces its own buffering and may read data from r beyond the mountinfo entries requested.
|
// The decoder introduces its own buffering and may read data from r beyond the
|
||||||
|
// mountinfo entries requested.
|
||||||
func NewMountInfoDecoder(r io.Reader) *MountInfoDecoder {
|
func NewMountInfoDecoder(r io.Reader) *MountInfoDecoder {
|
||||||
return &MountInfoDecoder{s: bufio.NewScanner(r)}
|
return &MountInfoDecoder{s: bufio.NewScanner(r)}
|
||||||
}
|
}
|
||||||
@@ -271,6 +277,8 @@ func parseMountInfoLine(s string, ent *MountInfoEntry) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EqualWithIgnore compares to [MountInfoEntry] values, ignoring fields that
|
||||||
|
// compare equal to ignore.
|
||||||
func (e *MountInfoEntry) EqualWithIgnore(want *MountInfoEntry, ignore string) bool {
|
func (e *MountInfoEntry) EqualWithIgnore(want *MountInfoEntry, ignore string) bool {
|
||||||
return (e.ID == want.ID || want.ID == -1) &&
|
return (e.ID == want.ID || want.ID == -1) &&
|
||||||
(e.Parent == want.Parent || want.Parent == -1) &&
|
(e.Parent == want.Parent || want.Parent == -1) &&
|
||||||
@@ -284,6 +292,8 @@ func (e *MountInfoEntry) EqualWithIgnore(want *MountInfoEntry, ignore string) bo
|
|||||||
(e.FsOptstr == want.FsOptstr || want.FsOptstr == ignore)
|
(e.FsOptstr == want.FsOptstr || want.FsOptstr == ignore)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// String returns a user-facing representation of a [MountInfoEntry]. It fits
|
||||||
|
// roughly into the mountinfo format, but without mangling.
|
||||||
func (e *MountInfoEntry) String() string {
|
func (e *MountInfoEntry) String() string {
|
||||||
return fmt.Sprintf("%d %d %d:%d %s %s %s %s %s %s %s",
|
return fmt.Sprintf("%d %d %d:%d %s %s %s %s %s %s %s",
|
||||||
e.ID, e.Parent, e.Devno[0], e.Devno[1], e.Root, e.Target, e.VfsOptstr,
|
e.ID, e.Parent, e.Devno[0], e.Devno[1], e.Root, e.Target, e.VfsOptstr,
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// UnfoldTargetError is a pathname that never appeared in a mount hierarchy.
|
||||||
type UnfoldTargetError string
|
type UnfoldTargetError string
|
||||||
|
|
||||||
func (e UnfoldTargetError) Error() string {
|
func (e UnfoldTargetError) Error() string {
|
||||||
@@ -27,6 +28,7 @@ func (n *MountInfoNode) Collective() iter.Seq[*MountInfoNode] {
|
|||||||
return func(yield func(*MountInfoNode) bool) { n.visit(yield) }
|
return func(yield func(*MountInfoNode) bool) { n.visit(yield) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// visit recursively visits all visible mountinfo nodes.
|
||||||
func (n *MountInfoNode) visit(yield func(*MountInfoNode) bool) bool {
|
func (n *MountInfoNode) visit(yield func(*MountInfoNode) bool) bool {
|
||||||
if !n.Covered && !yield(n) {
|
if !n.Covered && !yield(n) {
|
||||||
return false
|
return false
|
||||||
|
|||||||
4
dist/release.sh
vendored
4
dist/release.sh
vendored
@@ -13,7 +13,7 @@ echo
|
|||||||
echo '# Building hakurei.'
|
echo '# Building hakurei.'
|
||||||
go generate ./...
|
go generate ./...
|
||||||
go build -trimpath -v -o "${out}/bin/" -ldflags "-s -w
|
go build -trimpath -v -o "${out}/bin/" -ldflags "-s -w
|
||||||
-buildid= -extldflags '-static'
|
-buildid= -linkmode external -extldflags=-static
|
||||||
-X hakurei.app/internal/info.buildVersion=${VERSION}
|
-X hakurei.app/internal/info.buildVersion=${VERSION}
|
||||||
-X hakurei.app/internal/info.hakureiPath=/usr/bin/hakurei
|
-X hakurei.app/internal/info.hakureiPath=/usr/bin/hakurei
|
||||||
-X hakurei.app/internal/info.hsuPath=/usr/bin/hsu
|
-X hakurei.app/internal/info.hsuPath=/usr/bin/hsu
|
||||||
@@ -21,7 +21,7 @@ go build -trimpath -v -o "${out}/bin/" -ldflags "-s -w
|
|||||||
echo
|
echo
|
||||||
|
|
||||||
echo '# Testing hakurei.'
|
echo '# Testing hakurei.'
|
||||||
go test -ldflags='-buildid= -extldflags=-static' ./...
|
go test -ldflags='-buildid= -linkmode external -extldflags=-static' ./...
|
||||||
echo
|
echo
|
||||||
|
|
||||||
echo '# Creating distribution.'
|
echo '# Creating distribution.'
|
||||||
|
|||||||
12
flake.lock
generated
12
flake.lock
generated
@@ -7,11 +7,11 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1765384171,
|
"lastModified": 1772985280,
|
||||||
"narHash": "sha256-FuFtkJrW1Z7u+3lhzPRau69E0CNjADku1mLQQflUORo=",
|
"narHash": "sha256-FdrNykOoY9VStevU4zjSUdvsL9SzJTcXt4omdEDZDLk=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "home-manager",
|
"repo": "home-manager",
|
||||||
"rev": "44777152652bc9eacf8876976fa72cc77ca8b9d8",
|
"rev": "8f736f007139d7f70752657dff6a401a585d6cbc",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@@ -23,11 +23,11 @@
|
|||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1765311797,
|
"lastModified": 1772822230,
|
||||||
"narHash": "sha256-mSD5Ob7a+T2RNjvPvOA1dkJHGVrNVl8ZOrAwBjKBDQo=",
|
"narHash": "sha256-yf3iYLGbGVlIthlQIk5/4/EQDZNNEmuqKZkQssMljuw=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "09eb77e94fa25202af8f3e81ddc7353d9970ac1b",
|
"rev": "71caefce12ba78d84fe618cf61644dce01cf3a96",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|||||||
27
flake.nix
27
flake.nix
@@ -29,20 +29,6 @@
|
|||||||
{
|
{
|
||||||
nixosModules.hakurei = import ./nixos.nix self.packages;
|
nixosModules.hakurei = import ./nixos.nix self.packages;
|
||||||
|
|
||||||
buildPackage = forAllSystems (
|
|
||||||
system:
|
|
||||||
nixpkgsFor.${system}.callPackage (
|
|
||||||
import ./cmd/hpkg/build.nix {
|
|
||||||
inherit
|
|
||||||
nixpkgsFor
|
|
||||||
system
|
|
||||||
nixpkgs
|
|
||||||
home-manager
|
|
||||||
;
|
|
||||||
}
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
checks = forAllSystems (
|
checks = forAllSystems (
|
||||||
system:
|
system:
|
||||||
let
|
let
|
||||||
@@ -71,8 +57,6 @@
|
|||||||
|
|
||||||
sharefs = callPackage ./cmd/sharefs/test { inherit system self; };
|
sharefs = callPackage ./cmd/sharefs/test { inherit system self; };
|
||||||
|
|
||||||
hpkg = callPackage ./cmd/hpkg/test { inherit system self; };
|
|
||||||
|
|
||||||
formatting = runCommandLocal "check-formatting" { nativeBuildInputs = [ nixfmt-rfc-style ]; } ''
|
formatting = runCommandLocal "check-formatting" { nativeBuildInputs = [ nixfmt-rfc-style ]; } ''
|
||||||
cd ${./.}
|
cd ${./.}
|
||||||
|
|
||||||
@@ -115,7 +99,7 @@
|
|||||||
hakurei = pkgs.pkgsStatic.callPackage ./package.nix {
|
hakurei = pkgs.pkgsStatic.callPackage ./package.nix {
|
||||||
inherit (pkgs)
|
inherit (pkgs)
|
||||||
# passthru.buildInputs
|
# passthru.buildInputs
|
||||||
go
|
go_1_26
|
||||||
clang
|
clang
|
||||||
|
|
||||||
# nativeBuildInputs
|
# nativeBuildInputs
|
||||||
@@ -127,11 +111,6 @@
|
|||||||
glibc
|
glibc
|
||||||
xdg-dbus-proxy
|
xdg-dbus-proxy
|
||||||
|
|
||||||
# hpkg
|
|
||||||
zstd
|
|
||||||
gnutar
|
|
||||||
coreutils
|
|
||||||
|
|
||||||
# for check
|
# for check
|
||||||
util-linux
|
util-linux
|
||||||
nettools
|
nettools
|
||||||
@@ -203,7 +182,7 @@
|
|||||||
let
|
let
|
||||||
# this is used for interactive vm testing during development, where tests might be broken
|
# this is used for interactive vm testing during development, where tests might be broken
|
||||||
package = self.packages.${pkgs.stdenv.hostPlatform.system}.hakurei.override {
|
package = self.packages.${pkgs.stdenv.hostPlatform.system}.hakurei.override {
|
||||||
buildGoModule = previousArgs: pkgs.pkgsStatic.buildGoModule (previousArgs // { doCheck = false; });
|
buildGo126Module = previousArgs: pkgs.pkgsStatic.buildGo126Module (previousArgs // { doCheck = false; });
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
@@ -219,7 +198,7 @@
|
|||||||
./test/interactive/trace.nix
|
./test/interactive/trace.nix
|
||||||
|
|
||||||
self.nixosModules.hakurei
|
self.nixosModules.hakurei
|
||||||
self.inputs.home-manager.nixosModules.home-manager
|
home-manager.nixosModules.home-manager
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
|
|||||||
144
hst/config.go
144
hst/config.go
@@ -6,96 +6,137 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/std"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config configures an application container, implemented in internal/app.
|
// Config configures an application container.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// Reverse-DNS style configured arbitrary identifier string.
|
// Reverse-DNS style configured arbitrary identifier string.
|
||||||
// Passed to wayland security-context-v1 and used as part of defaults in dbus session proxy.
|
//
|
||||||
|
// This value is passed as is to Wayland security-context-v1 and used as
|
||||||
|
// part of defaults in D-Bus session proxy. The zero value causes a default
|
||||||
|
// value to be derived from the container instance.
|
||||||
ID string `json:"id,omitempty"`
|
ID string `json:"id,omitempty"`
|
||||||
|
|
||||||
// System services to make available in the container.
|
// System services to make available in the container.
|
||||||
Enablements *Enablements `json:"enablements,omitempty"`
|
Enablements *Enablements `json:"enablements,omitempty"`
|
||||||
|
|
||||||
// Session D-Bus proxy configuration.
|
// Session D-Bus proxy configuration.
|
||||||
// If set to nil, session bus proxy assume built-in defaults.
|
//
|
||||||
|
// Has no effect if [EDBus] but is not set in Enablements. The zero value
|
||||||
|
// assumes built-in defaults derived from ID.
|
||||||
SessionBus *BusConfig `json:"session_bus,omitempty"`
|
SessionBus *BusConfig `json:"session_bus,omitempty"`
|
||||||
// System D-Bus proxy configuration.
|
// System D-Bus proxy configuration.
|
||||||
// If set to nil, system bus proxy is disabled.
|
//
|
||||||
|
// Has no effect if [EDBus] but is not set in Enablements. The zero value
|
||||||
|
// disables system bus proxy.
|
||||||
SystemBus *BusConfig `json:"system_bus,omitempty"`
|
SystemBus *BusConfig `json:"system_bus,omitempty"`
|
||||||
|
|
||||||
// Direct access to wayland socket, no attempt is made to attach security-context-v1
|
// Direct access to Wayland socket, no attempt is made to attach
|
||||||
// and the bare socket is made available to the container.
|
// security-context-v1 and the bare socket is made available to the
|
||||||
|
// container.
|
||||||
//
|
//
|
||||||
// This option is unsupported and most likely enables full control over the Wayland
|
// This option is unsupported and will most likely enable full control over
|
||||||
// session. Do not set this to true unless you are sure you know what you are doing.
|
// the Wayland session from within the container. Do not set this to true
|
||||||
|
// unless you are sure you know what you are doing.
|
||||||
DirectWayland bool `json:"direct_wayland,omitempty"`
|
DirectWayland bool `json:"direct_wayland,omitempty"`
|
||||||
// Direct access to the PipeWire socket established via SecurityContext::Create, no
|
|
||||||
// attempt is made to start the pipewire-pulse server.
|
// Direct access to the PipeWire socket established via SecurityContext::Create,
|
||||||
|
// no attempt is made to start the pipewire-pulse server.
|
||||||
//
|
//
|
||||||
// The SecurityContext machinery is fatally flawed, it blindly sets read and execute
|
// The SecurityContext machinery is fatally flawed, it unconditionally sets
|
||||||
// bits on all objects for clients with the lowest achievable privilege level (by
|
// read and execute bits on all objects for clients with the lowest achievable
|
||||||
// setting PW_KEY_ACCESS to "restricted"). This enables them to call any method
|
// privilege level (by setting PW_KEY_ACCESS to "restricted" or by satisfying
|
||||||
// targeting any object, and since Registry::Destroy checks for the read and execute bit,
|
// all conditions of [the /.flatpak-info hack]). This enables them to call
|
||||||
// allows the destruction of any object other than PW_ID_CORE as well. This behaviour
|
// any method targeting any object, and since Registry::Destroy checks for
|
||||||
// is implemented separately in media-session and wireplumber, with the wireplumber
|
// the read and execute bit, allows the destruction of any object other than
|
||||||
// implementation in Lua via an embedded Lua vm. In all known setups, wireplumber is
|
// PW_ID_CORE as well.
|
||||||
// in use, and there is no known way to change its behaviour and set permissions
|
|
||||||
// differently without replacing the Lua script. Also, since PipeWire relies on these
|
|
||||||
// permissions to work, reducing them is not possible.
|
|
||||||
//
|
//
|
||||||
// Currently, the only other sandboxed use case is flatpak, which is not aware of
|
// This behaviour is implemented separately in media-session and wireplumber,
|
||||||
// PipeWire and blindly exposes the bare PulseAudio socket to the container (behaves
|
// with the wireplumber implementation in Lua via an embedded Lua vm. In all
|
||||||
// like DirectPulse). This socket is backed by the pipewire-pulse compatibility daemon,
|
// known setups, wireplumber is in use, and in that case, no option for
|
||||||
// which obtains client pid via the SO_PEERCRED option. The PipeWire daemon, pipewire-pulse
|
// configuring this behaviour exists, without replacing the Lua script.
|
||||||
// daemon and the session manager daemon then separately performs the /.flatpak-info hack
|
// Also, since PipeWire relies on these permissions to work, reducing them
|
||||||
// described in https://git.gensokyo.uk/security/hakurei/issues/21. Under such use case,
|
// was never possible in the first place.
|
||||||
// since the client has no direct access to PipeWire, insecure parts of the protocol are
|
|
||||||
// obscured by pipewire-pulse simply not implementing them, and thus hiding the flaws
|
|
||||||
// described above.
|
|
||||||
//
|
//
|
||||||
// Hakurei does not rely on the /.flatpak-info hack. Instead, a socket is sets up via
|
// Currently, the only other sandboxed use case is flatpak, which is not
|
||||||
// SecurityContext. A pipewire-pulse server connected through it achieves the same
|
// aware of PipeWire and blindly exposes the bare PulseAudio socket to the
|
||||||
// permissions as flatpak does via the /.flatpak-info hack and is maintained for the
|
// container (behaves like DirectPulse). This socket is backed by the
|
||||||
// life of the container.
|
// pipewire-pulse compatibility daemon, which obtains client pid via the
|
||||||
|
// SO_PEERCRED option. The PipeWire daemon, pipewire-pulse daemon and the
|
||||||
|
// session manager daemon then separately performs [the /.flatpak-info hack].
|
||||||
|
// Under such use case, since the client has no direct access to PipeWire,
|
||||||
|
// insecure parts of the protocol are obscured by the absence of an
|
||||||
|
// equivalent API in PulseAudio, or pipewire-pulse simply not implementing
|
||||||
|
// them.
|
||||||
|
//
|
||||||
|
// Hakurei does not rely on [the /.flatpak-info hack]. Instead, a socket is
|
||||||
|
// sets up via SecurityContext. A pipewire-pulse server connected through it
|
||||||
|
// achieves the same permissions as flatpak does via [the /.flatpak-info hack]
|
||||||
|
// and is maintained for the life of the container.
|
||||||
|
//
|
||||||
|
// This option is unsupported and enables a denial-of-service attack as the
|
||||||
|
// sandboxed client is able to destroy any client object and thus
|
||||||
|
// disconnecting them from PipeWire, or destroy the SecurityContext object,
|
||||||
|
// preventing any further container creation.
|
||||||
//
|
//
|
||||||
// This option is unsupported and enables a denial-of-service attack as the sandboxed
|
|
||||||
// client is able to destroy any client object and thus disconnecting them from PipeWire,
|
|
||||||
// or destroy the SecurityContext object preventing any further container creation.
|
|
||||||
// Do not set this to true, it is insecure under any configuration.
|
// Do not set this to true, it is insecure under any configuration.
|
||||||
DirectPipeWire bool `json:"direct_pipewire,omitempty"`
|
|
||||||
// Direct access to PulseAudio socket, no attempt is made to establish pipewire-pulse
|
|
||||||
// server via a PipeWire socket with a SecurityContext attached and the bare socket
|
|
||||||
// is made available to the container.
|
|
||||||
//
|
//
|
||||||
// This option is unsupported and enables arbitrary code execution as the PulseAudio
|
// [the /.flatpak-info hack]: https://git.gensokyo.uk/security/hakurei/issues/21
|
||||||
// server. Do not set this to true, it is insecure under any configuration.
|
DirectPipeWire bool `json:"direct_pipewire,omitempty"`
|
||||||
|
|
||||||
|
// Direct access to PulseAudio socket, no attempt is made to establish
|
||||||
|
// pipewire-pulse server via a PipeWire socket with a SecurityContext
|
||||||
|
// attached, and the bare socket is made available to the container.
|
||||||
|
//
|
||||||
|
// This option is unsupported and enables arbitrary code execution as the
|
||||||
|
// PulseAudio server.
|
||||||
|
//
|
||||||
|
// Do not set this to true, it is insecure under any configuration.
|
||||||
DirectPulse bool `json:"direct_pulse,omitempty"`
|
DirectPulse bool `json:"direct_pulse,omitempty"`
|
||||||
|
|
||||||
// Extra acl updates to perform before setuid.
|
// Extra acl updates to perform before setuid.
|
||||||
ExtraPerms []ExtraPermConfig `json:"extra_perms,omitempty"`
|
ExtraPerms []ExtraPermConfig `json:"extra_perms,omitempty"`
|
||||||
|
|
||||||
// Numerical application id, passed to hsu, used to derive init user namespace credentials.
|
// Numerical application id, passed to hsu, used to derive init user
|
||||||
|
// namespace credentials.
|
||||||
Identity int `json:"identity"`
|
Identity int `json:"identity"`
|
||||||
// Init user namespace supplementary groups inherited by all container processes.
|
// Init user namespace supplementary groups inherited by all container processes.
|
||||||
Groups []string `json:"groups"`
|
Groups []string `json:"groups"`
|
||||||
|
|
||||||
|
// Scheduling policy to set for the container.
|
||||||
|
//
|
||||||
|
// The zero value retains the current scheduling policy.
|
||||||
|
SchedPolicy std.SchedPolicy `json:"sched_policy,omitempty"`
|
||||||
|
// Scheduling priority to set for the container.
|
||||||
|
//
|
||||||
|
// The zero value implies the minimum priority of the current SchedPolicy.
|
||||||
|
// Has no effect if SchedPolicy is zero.
|
||||||
|
SchedPriority std.Int `json:"sched_priority,omitempty"`
|
||||||
|
|
||||||
// High level configuration applied to the underlying [container].
|
// High level configuration applied to the underlying [container].
|
||||||
Container *ContainerConfig `json:"container"`
|
Container *ContainerConfig `json:"container"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrConfigNull is returned by [Config.Validate] for an invalid configuration that contains a null value for any
|
// ErrConfigNull is returned by [Config.Validate] for an invalid configuration
|
||||||
// field that must not be null.
|
// that contains a null value for any field that must not be null.
|
||||||
ErrConfigNull = errors.New("unexpected null in config")
|
ErrConfigNull = errors.New("unexpected null in config")
|
||||||
|
|
||||||
// ErrIdentityBounds is returned by [Config.Validate] for an out of bounds [Config.Identity] value.
|
// ErrIdentityBounds is returned by [Config.Validate] for an out of bounds
|
||||||
|
// [Config.Identity] value.
|
||||||
ErrIdentityBounds = errors.New("identity out of bounds")
|
ErrIdentityBounds = errors.New("identity out of bounds")
|
||||||
|
|
||||||
// ErrEnviron is returned by [Config.Validate] if an environment variable name contains '=' or NUL.
|
// ErrSchedPolicyBounds is returned by [Config.Validate] for an out of bounds
|
||||||
|
// [Config.SchedPolicy] value.
|
||||||
|
ErrSchedPolicyBounds = errors.New("scheduling policy out of bounds")
|
||||||
|
|
||||||
|
// ErrEnviron is returned by [Config.Validate] if an environment variable
|
||||||
|
// name contains '=' or NUL.
|
||||||
ErrEnviron = errors.New("invalid environment variable name")
|
ErrEnviron = errors.New("invalid environment variable name")
|
||||||
|
|
||||||
// ErrInsecure is returned by [Config.Validate] if the configuration is considered insecure.
|
// ErrInsecure is returned by [Config.Validate] if the configuration is
|
||||||
|
// considered insecure.
|
||||||
ErrInsecure = errors.New("configuration is insecure")
|
ErrInsecure = errors.New("configuration is insecure")
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -112,6 +153,13 @@ func (config *Config) Validate() error {
|
|||||||
Msg: "identity " + strconv.Itoa(config.Identity) + " out of range"}
|
Msg: "identity " + strconv.Itoa(config.Identity) + " out of range"}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if config.SchedPolicy < 0 || config.SchedPolicy > std.SCHED_LAST {
|
||||||
|
return &AppError{Step: "validate configuration", Err: ErrSchedPolicyBounds,
|
||||||
|
Msg: "scheduling policy " +
|
||||||
|
strconv.Itoa(int(config.SchedPolicy)) +
|
||||||
|
" out of range"}
|
||||||
|
}
|
||||||
|
|
||||||
if err := config.SessionBus.CheckInterfaces("session"); err != nil {
|
if err := config.SessionBus.CheckInterfaces("session"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,6 +22,10 @@ func TestConfigValidate(t *testing.T) {
|
|||||||
Msg: "identity -1 out of range"}},
|
Msg: "identity -1 out of range"}},
|
||||||
{"identity upper", &hst.Config{Identity: 10000}, &hst.AppError{Step: "validate configuration", Err: hst.ErrIdentityBounds,
|
{"identity upper", &hst.Config{Identity: 10000}, &hst.AppError{Step: "validate configuration", Err: hst.ErrIdentityBounds,
|
||||||
Msg: "identity 10000 out of range"}},
|
Msg: "identity 10000 out of range"}},
|
||||||
|
{"sched lower", &hst.Config{SchedPolicy: -1}, &hst.AppError{Step: "validate configuration", Err: hst.ErrSchedPolicyBounds,
|
||||||
|
Msg: "scheduling policy -1 out of range"}},
|
||||||
|
{"sched upper", &hst.Config{SchedPolicy: 0xcafe}, &hst.AppError{Step: "validate configuration", Err: hst.ErrSchedPolicyBounds,
|
||||||
|
Msg: "scheduling policy 51966 out of range"}},
|
||||||
{"dbus session", &hst.Config{SessionBus: &hst.BusConfig{See: []string{""}}},
|
{"dbus session", &hst.Config{SessionBus: &hst.BusConfig{See: []string{""}}},
|
||||||
&hst.BadInterfaceError{Interface: "", Segment: "session"}},
|
&hst.BadInterfaceError{Interface: "", Segment: "session"}},
|
||||||
{"dbus system", &hst.Config{SystemBus: &hst.BusConfig{See: []string{""}}},
|
{"dbus system", &hst.Config{SystemBus: &hst.BusConfig{See: []string{""}}},
|
||||||
|
|||||||
@@ -16,18 +16,20 @@ const PrivateTmp = "/.hakurei"
|
|||||||
var AbsPrivateTmp = check.MustAbs(PrivateTmp)
|
var AbsPrivateTmp = check.MustAbs(PrivateTmp)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// WaitDelayDefault is used when WaitDelay has its zero value.
|
// WaitDelayDefault is used when WaitDelay has the zero value.
|
||||||
WaitDelayDefault = 5 * time.Second
|
WaitDelayDefault = 5 * time.Second
|
||||||
// WaitDelayMax is used if WaitDelay exceeds its value.
|
// WaitDelayMax is used when WaitDelay exceeds its value.
|
||||||
WaitDelayMax = 30 * time.Second
|
WaitDelayMax = 30 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// ExitFailure is returned if the container fails to start.
|
// ExitFailure is returned if the container fails to start.
|
||||||
ExitFailure = iota + 1
|
ExitFailure = iota + 1
|
||||||
// ExitCancel is returned if the container is terminated by a shim-directed signal which cancels its context.
|
// ExitCancel is returned if the container is terminated by a shim-directed
|
||||||
|
// signal which cancels its context.
|
||||||
ExitCancel
|
ExitCancel
|
||||||
// ExitOrphan is returned when the shim is orphaned before priv side delivers a signal.
|
// ExitOrphan is returned when the shim is orphaned before priv side process
|
||||||
|
// delivers a signal.
|
||||||
ExitOrphan
|
ExitOrphan
|
||||||
|
|
||||||
// ExitRequest is returned when the priv side process requests shim exit.
|
// ExitRequest is returned when the priv side process requests shim exit.
|
||||||
@@ -38,10 +40,12 @@ const (
|
|||||||
type Flags uintptr
|
type Flags uintptr
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// FMultiarch unblocks syscalls required for multiarch to work on applicable targets.
|
// FMultiarch unblocks system calls required for multiarch to work on
|
||||||
|
// multiarch-enabled targets (amd64, arm64).
|
||||||
FMultiarch Flags = 1 << iota
|
FMultiarch Flags = 1 << iota
|
||||||
|
|
||||||
// FSeccompCompat changes emitted seccomp filter programs to be identical to that of Flatpak.
|
// FSeccompCompat changes emitted seccomp filter programs to be identical to
|
||||||
|
// that of Flatpak in enabled rulesets.
|
||||||
FSeccompCompat
|
FSeccompCompat
|
||||||
// FDevel unblocks ptrace and friends.
|
// FDevel unblocks ptrace and friends.
|
||||||
FDevel
|
FDevel
|
||||||
@@ -54,12 +58,15 @@ const (
|
|||||||
// FTty unblocks dangerous terminal I/O (faking input).
|
// FTty unblocks dangerous terminal I/O (faking input).
|
||||||
FTty
|
FTty
|
||||||
|
|
||||||
// FMapRealUID maps the target user uid to the privileged user uid in the container user namespace.
|
// FMapRealUID maps the target user uid to the privileged user uid in the
|
||||||
|
// container user namespace.
|
||||||
|
//
|
||||||
// Some programs fail to connect to dbus session running as a different uid,
|
// Some programs fail to connect to dbus session running as a different uid,
|
||||||
// this option works around it by mapping priv-side caller uid in container.
|
// this option works around it by mapping priv-side caller uid in container.
|
||||||
FMapRealUID
|
FMapRealUID
|
||||||
|
|
||||||
// FDevice mount /dev/ from the init mount namespace as-is in the container mount namespace.
|
// FDevice mount /dev/ from the init mount namespace as is in the container
|
||||||
|
// mount namespace.
|
||||||
FDevice
|
FDevice
|
||||||
|
|
||||||
// FShareRuntime shares XDG_RUNTIME_DIR between containers under the same identity.
|
// FShareRuntime shares XDG_RUNTIME_DIR between containers under the same identity.
|
||||||
@@ -112,30 +119,37 @@ func (flags Flags) String() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainerConfig describes the container configuration to be applied to an underlying [container].
|
// ContainerConfig describes the container configuration to be applied to an
|
||||||
|
// underlying [container]. It is validated by [Config.Validate].
|
||||||
type ContainerConfig struct {
|
type ContainerConfig struct {
|
||||||
// Container UTS namespace hostname.
|
// Container UTS namespace hostname.
|
||||||
Hostname string `json:"hostname,omitempty"`
|
Hostname string `json:"hostname,omitempty"`
|
||||||
|
|
||||||
// Duration in nanoseconds to wait for after interrupting the initial process.
|
// Duration in nanoseconds to wait for after interrupting the initial process.
|
||||||
// Defaults to [WaitDelayDefault] if zero, or [WaitDelayMax] if greater than [WaitDelayMax].
|
//
|
||||||
// Values lesser than zero is equivalent to zero, bypassing [WaitDelayDefault].
|
// Defaults to [WaitDelayDefault] if zero, or [WaitDelayMax] if greater than
|
||||||
|
// [WaitDelayMax]. Values lesser than zero is equivalent to zero, bypassing
|
||||||
|
// [WaitDelayDefault].
|
||||||
WaitDelay time.Duration `json:"wait_delay,omitempty"`
|
WaitDelay time.Duration `json:"wait_delay,omitempty"`
|
||||||
|
|
||||||
// Initial process environment variables.
|
// Initial process environment variables.
|
||||||
Env map[string]string `json:"env"`
|
Env map[string]string `json:"env"`
|
||||||
|
|
||||||
/* Container mount points.
|
// Container mount points.
|
||||||
|
//
|
||||||
If the first element targets /, it is inserted early and excluded from path hiding. */
|
// If the first element targets /, it is inserted early and excluded from
|
||||||
|
// path hiding. Otherwise, an anonymous instance of tmpfs is set up on /.
|
||||||
Filesystem []FilesystemConfigJSON `json:"filesystem"`
|
Filesystem []FilesystemConfigJSON `json:"filesystem"`
|
||||||
|
|
||||||
// String used as the username of the emulated user, validated against the default NAME_REGEX from adduser.
|
// String used as the username of the emulated user, validated against the
|
||||||
|
// default NAME_REGEX from adduser.
|
||||||
|
//
|
||||||
// Defaults to passwd name of target uid or chronos.
|
// Defaults to passwd name of target uid or chronos.
|
||||||
Username string `json:"username,omitempty"`
|
Username string `json:"username,omitempty"`
|
||||||
// Pathname of shell in the container filesystem to use for the emulated user.
|
// Pathname of shell in the container filesystem to use for the emulated user.
|
||||||
Shell *check.Absolute `json:"shell"`
|
Shell *check.Absolute `json:"shell"`
|
||||||
// Directory in the container filesystem to enter and use as the home directory of the emulated user.
|
// Directory in the container filesystem to enter and use as the home
|
||||||
|
// directory of the emulated user.
|
||||||
Home *check.Absolute `json:"home"`
|
Home *check.Absolute `json:"home"`
|
||||||
|
|
||||||
// Pathname to executable file in the container filesystem.
|
// Pathname to executable file in the container filesystem.
|
||||||
@@ -148,6 +162,7 @@ type ContainerConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ContainerConfigF is [ContainerConfig] stripped of its methods.
|
// ContainerConfigF is [ContainerConfig] stripped of its methods.
|
||||||
|
//
|
||||||
// The [ContainerConfig.Flags] field does not survive a [json] round trip.
|
// The [ContainerConfig.Flags] field does not survive a [json] round trip.
|
||||||
type ContainerConfigF ContainerConfig
|
type ContainerConfigF ContainerConfig
|
||||||
|
|
||||||
|
|||||||
46
hst/dbus.go
46
hst/dbus.go
@@ -5,8 +5,26 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BadInterfaceError is returned when Interface fails an undocumented check in xdg-dbus-proxy,
|
// BadInterfaceError is returned when Interface fails an undocumented check in
|
||||||
// which would have cause a silent failure.
|
// xdg-dbus-proxy, which would have cause a silent failure.
|
||||||
|
//
|
||||||
|
// xdg-dbus-proxy fails without output when this condition is not met:
|
||||||
|
//
|
||||||
|
// char *dot = strrchr (filter->interface, '.');
|
||||||
|
// if (dot != NULL)
|
||||||
|
// {
|
||||||
|
// *dot = 0;
|
||||||
|
// if (strcmp (dot + 1, "*") != 0)
|
||||||
|
// filter->member = g_strdup (dot + 1);
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// trim ".*" since they are removed before searching for '.':
|
||||||
|
//
|
||||||
|
// if (g_str_has_suffix (name, ".*"))
|
||||||
|
// {
|
||||||
|
// name[strlen (name) - 2] = 0;
|
||||||
|
// wildcard = TRUE;
|
||||||
|
// }
|
||||||
type BadInterfaceError struct {
|
type BadInterfaceError struct {
|
||||||
// Interface is the offending interface string.
|
// Interface is the offending interface string.
|
||||||
Interface string
|
Interface string
|
||||||
@@ -19,7 +37,8 @@ func (e *BadInterfaceError) Error() string {
|
|||||||
if e == nil {
|
if e == nil {
|
||||||
return "<nil>"
|
return "<nil>"
|
||||||
}
|
}
|
||||||
return "bad interface string " + strconv.Quote(e.Interface) + " in " + e.Segment + " bus configuration"
|
return "bad interface string " + strconv.Quote(e.Interface) +
|
||||||
|
" in " + e.Segment + " bus configuration"
|
||||||
}
|
}
|
||||||
|
|
||||||
// BusConfig configures the xdg-dbus-proxy process.
|
// BusConfig configures the xdg-dbus-proxy process.
|
||||||
@@ -76,31 +95,14 @@ func (c *BusConfig) Interfaces(yield func(string) bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckInterfaces checks for invalid interface strings based on an undocumented check in xdg-dbus-error,
|
// CheckInterfaces checks for invalid interface strings based on an undocumented
|
||||||
// returning [BadInterfaceError] if one is encountered.
|
// check in xdg-dbus-error, returning [BadInterfaceError] if one is encountered.
|
||||||
func (c *BusConfig) CheckInterfaces(segment string) error {
|
func (c *BusConfig) CheckInterfaces(segment string) error {
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for iface := range c.Interfaces {
|
for iface := range c.Interfaces {
|
||||||
/*
|
|
||||||
xdg-dbus-proxy fails without output when this condition is not met:
|
|
||||||
char *dot = strrchr (filter->interface, '.');
|
|
||||||
if (dot != NULL)
|
|
||||||
{
|
|
||||||
*dot = 0;
|
|
||||||
if (strcmp (dot + 1, "*") != 0)
|
|
||||||
filter->member = g_strdup (dot + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
trim ".*" since they are removed before searching for '.':
|
|
||||||
if (g_str_has_suffix (name, ".*"))
|
|
||||||
{
|
|
||||||
name[strlen (name) - 2] = 0;
|
|
||||||
wildcard = TRUE;
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
if strings.IndexByte(strings.TrimSuffix(iface, ".*"), '.') == -1 {
|
if strings.IndexByte(strings.TrimSuffix(iface, ".*"), '.') == -1 {
|
||||||
return &BadInterfaceError{iface, segment}
|
return &BadInterfaceError{iface, segment}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,15 +11,17 @@ import (
|
|||||||
type Enablement byte
|
type Enablement byte
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// EWayland exposes a wayland pathname socket via security-context-v1.
|
// EWayland exposes a Wayland pathname socket via security-context-v1.
|
||||||
EWayland Enablement = 1 << iota
|
EWayland Enablement = 1 << iota
|
||||||
// EX11 adds the target user via X11 ChangeHosts and exposes the X11 pathname socket.
|
// EX11 adds the target user via X11 ChangeHosts and exposes the X11
|
||||||
|
// pathname socket.
|
||||||
EX11
|
EX11
|
||||||
// EDBus enables the per-container xdg-dbus-proxy daemon.
|
// EDBus enables the per-container xdg-dbus-proxy daemon.
|
||||||
EDBus
|
EDBus
|
||||||
// EPipeWire exposes a pipewire pathname socket via SecurityContext.
|
// EPipeWire exposes a pipewire pathname socket via SecurityContext.
|
||||||
EPipeWire
|
EPipeWire
|
||||||
// EPulse copies the PulseAudio cookie to [hst.PrivateTmp] and exposes the PulseAudio socket.
|
// EPulse copies the PulseAudio cookie to [hst.PrivateTmp] and exposes the
|
||||||
|
// PulseAudio socket.
|
||||||
EPulse
|
EPulse
|
||||||
|
|
||||||
// EM is a noop.
|
// EM is a noop.
|
||||||
|
|||||||
15
hst/fs.go
15
hst/fs.go
@@ -24,7 +24,8 @@ type FilesystemConfig interface {
|
|||||||
fmt.Stringer
|
fmt.Stringer
|
||||||
}
|
}
|
||||||
|
|
||||||
// The Ops interface enables [FilesystemConfig] to queue container ops without depending on the container package.
|
// The Ops interface enables [FilesystemConfig] to queue container ops without
|
||||||
|
// depending on the container package.
|
||||||
type Ops interface {
|
type Ops interface {
|
||||||
// Tmpfs appends an op that mounts tmpfs on a container path.
|
// Tmpfs appends an op that mounts tmpfs on a container path.
|
||||||
Tmpfs(target *check.Absolute, size int, perm os.FileMode) Ops
|
Tmpfs(target *check.Absolute, size int, perm os.FileMode) Ops
|
||||||
@@ -41,12 +42,15 @@ type Ops interface {
|
|||||||
// Link appends an op that creates a symlink in the container filesystem.
|
// Link appends an op that creates a symlink in the container filesystem.
|
||||||
Link(target *check.Absolute, linkName string, dereference bool) Ops
|
Link(target *check.Absolute, linkName string, dereference bool) Ops
|
||||||
|
|
||||||
// Root appends an op that expands a directory into a toplevel bind mount mirror on container root.
|
// Root appends an op that expands a directory into a toplevel bind mount
|
||||||
|
// mirror on container root.
|
||||||
Root(host *check.Absolute, flags int) Ops
|
Root(host *check.Absolute, flags int) Ops
|
||||||
// Etc appends an op that expands host /etc into a toplevel symlink mirror with /etc semantics.
|
// Etc appends an op that expands host /etc into a toplevel symlink mirror
|
||||||
|
// with /etc semantics.
|
||||||
Etc(host *check.Absolute, prefix string) Ops
|
Etc(host *check.Absolute, prefix string) Ops
|
||||||
|
|
||||||
// Daemon appends an op that starts a daemon in the container and blocks until target appears.
|
// Daemon appends an op that starts a daemon in the container and blocks
|
||||||
|
// until target appears.
|
||||||
Daemon(target, path *check.Absolute, args ...string) Ops
|
Daemon(target, path *check.Absolute, args ...string) Ops
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -61,7 +65,8 @@ type ApplyState struct {
|
|||||||
// ErrFSNull is returned by [json] on encountering a null [FilesystemConfig] value.
|
// ErrFSNull is returned by [json] on encountering a null [FilesystemConfig] value.
|
||||||
var ErrFSNull = errors.New("unexpected null in mount point")
|
var ErrFSNull = errors.New("unexpected null in mount point")
|
||||||
|
|
||||||
// FSTypeError is returned when [ContainerConfig.Filesystem] contains an entry with invalid type.
|
// FSTypeError is returned when [ContainerConfig.Filesystem] contains an entry
|
||||||
|
// with invalid type.
|
||||||
type FSTypeError string
|
type FSTypeError string
|
||||||
|
|
||||||
func (f FSTypeError) Error() string { return fmt.Sprintf("invalid filesystem type %q", string(f)) }
|
func (f FSTypeError) Error() string { return fmt.Sprintf("invalid filesystem type %q", string(f)) }
|
||||||
|
|||||||
@@ -18,7 +18,9 @@ type FSLink struct {
|
|||||||
Target *check.Absolute `json:"dst"`
|
Target *check.Absolute `json:"dst"`
|
||||||
// Arbitrary linkname value store in the symlink.
|
// Arbitrary linkname value store in the symlink.
|
||||||
Linkname string `json:"linkname"`
|
Linkname string `json:"linkname"`
|
||||||
// Whether to treat Linkname as an absolute pathname and dereference before creating the link.
|
|
||||||
|
// Whether to treat Linkname as an absolute pathname and dereference before
|
||||||
|
// creating the link.
|
||||||
Dereference bool `json:"dereference,omitempty"`
|
Dereference bool `json:"dereference,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -19,9 +19,11 @@ type FSOverlay struct {
|
|||||||
|
|
||||||
// Any filesystem, does not need to be on a writable filesystem, must not be nil.
|
// Any filesystem, does not need to be on a writable filesystem, must not be nil.
|
||||||
Lower []*check.Absolute `json:"lower"`
|
Lower []*check.Absolute `json:"lower"`
|
||||||
// The upperdir is normally on a writable filesystem, leave as nil to mount Lower readonly.
|
// The upperdir is normally on a writable filesystem, leave as nil to mount
|
||||||
|
// Lower readonly.
|
||||||
Upper *check.Absolute `json:"upper,omitempty"`
|
Upper *check.Absolute `json:"upper,omitempty"`
|
||||||
// The workdir needs to be an empty directory on the same filesystem as Upper, must not be nil if Upper is populated.
|
// The workdir needs to be an empty directory on the same filesystem as
|
||||||
|
// Upper, must not be nil if Upper is populated.
|
||||||
Work *check.Absolute `json:"work,omitempty"`
|
Work *check.Absolute `json:"work,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
60
hst/hst.go
60
hst/hst.go
@@ -44,11 +44,13 @@ func (e *AppError) Message() string {
|
|||||||
type Paths struct {
|
type Paths struct {
|
||||||
// Temporary directory returned by [os.TempDir], usually equivalent to [fhs.AbsTmp].
|
// Temporary directory returned by [os.TempDir], usually equivalent to [fhs.AbsTmp].
|
||||||
TempDir *check.Absolute `json:"temp_dir"`
|
TempDir *check.Absolute `json:"temp_dir"`
|
||||||
// Shared directory specific to the hsu userid, usually (`/tmp/hakurei.%d`, [Info.User]).
|
// Shared directory specific to the hsu userid, usually
|
||||||
|
// (`/tmp/hakurei.%d`, [Info.User]).
|
||||||
SharePath *check.Absolute `json:"share_path"`
|
SharePath *check.Absolute `json:"share_path"`
|
||||||
// Checked XDG_RUNTIME_DIR value, usually (`/run/user/%d`, uid).
|
// Checked XDG_RUNTIME_DIR value, usually (`/run/user/%d`, uid).
|
||||||
RuntimePath *check.Absolute `json:"runtime_path"`
|
RuntimePath *check.Absolute `json:"runtime_path"`
|
||||||
// Shared directory specific to the hsu userid located in RuntimePath, usually (`/run/user/%d/hakurei`, uid).
|
// Shared directory specific to the hsu userid located in RuntimePath,
|
||||||
|
// usually (`/run/user/%d/hakurei`, uid).
|
||||||
RunDirPath *check.Absolute `json:"run_dir_path"`
|
RunDirPath *check.Absolute `json:"run_dir_path"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,10 +76,23 @@ func Template() *Config {
|
|||||||
|
|
||||||
SessionBus: &BusConfig{
|
SessionBus: &BusConfig{
|
||||||
See: nil,
|
See: nil,
|
||||||
Talk: []string{"org.freedesktop.Notifications", "org.freedesktop.FileManager1", "org.freedesktop.ScreenSaver",
|
Talk: []string{
|
||||||
"org.freedesktop.secrets", "org.kde.kwalletd5", "org.kde.kwalletd6", "org.gnome.SessionManager"},
|
"org.freedesktop.Notifications",
|
||||||
Own: []string{"org.chromium.Chromium.*", "org.mpris.MediaPlayer2.org.chromium.Chromium.*",
|
"org.freedesktop.FileManager1",
|
||||||
"org.mpris.MediaPlayer2.chromium.*"},
|
"org.freedesktop.ScreenSaver",
|
||||||
|
"org.freedesktop.secrets",
|
||||||
|
|
||||||
|
"org.kde.kwalletd5",
|
||||||
|
"org.kde.kwalletd6",
|
||||||
|
|
||||||
|
"org.gnome.SessionManager",
|
||||||
|
},
|
||||||
|
Own: []string{
|
||||||
|
"org.chromium.Chromium.*",
|
||||||
|
|
||||||
|
"org.mpris.MediaPlayer2.org.chromium.Chromium.*",
|
||||||
|
"org.mpris.MediaPlayer2.chromium.*",
|
||||||
|
},
|
||||||
Call: map[string]string{"org.freedesktop.portal.*": "*"},
|
Call: map[string]string{"org.freedesktop.portal.*": "*"},
|
||||||
Broadcast: map[string]string{"org.freedesktop.portal.*": "@/org/freedesktop/portal/*"},
|
Broadcast: map[string]string{"org.freedesktop.portal.*": "@/org/freedesktop/portal/*"},
|
||||||
Log: false,
|
Log: false,
|
||||||
@@ -112,7 +127,12 @@ func Template() *Config {
|
|||||||
"GOOGLE_DEFAULT_CLIENT_SECRET": "OTJgUOQcT7lO7GsGZq2G4IlT",
|
"GOOGLE_DEFAULT_CLIENT_SECRET": "OTJgUOQcT7lO7GsGZq2G4IlT",
|
||||||
},
|
},
|
||||||
Filesystem: []FilesystemConfigJSON{
|
Filesystem: []FilesystemConfigJSON{
|
||||||
{&FSBind{Target: fhs.AbsRoot, Source: fhs.AbsVarLib.Append("hakurei/base/org.debian"), Write: true, Special: true}},
|
{&FSBind{
|
||||||
|
Target: fhs.AbsRoot,
|
||||||
|
Source: fhs.AbsVarLib.Append("hakurei/base/org.debian"),
|
||||||
|
Write: true,
|
||||||
|
Special: true,
|
||||||
|
}},
|
||||||
{&FSBind{Target: fhs.AbsEtc, Source: fhs.AbsEtc, Special: true}},
|
{&FSBind{Target: fhs.AbsEtc, Source: fhs.AbsEtc, Special: true}},
|
||||||
{&FSEphemeral{Target: fhs.AbsTmp, Write: true, Perm: 0755}},
|
{&FSEphemeral{Target: fhs.AbsTmp, Write: true, Perm: 0755}},
|
||||||
{&FSOverlay{
|
{&FSOverlay{
|
||||||
@@ -121,11 +141,27 @@ func Template() *Config {
|
|||||||
Upper: fhs.AbsVarLib.Append("hakurei/nix/u0/org.chromium.Chromium/rw-store/upper"),
|
Upper: fhs.AbsVarLib.Append("hakurei/nix/u0/org.chromium.Chromium/rw-store/upper"),
|
||||||
Work: fhs.AbsVarLib.Append("hakurei/nix/u0/org.chromium.Chromium/rw-store/work"),
|
Work: fhs.AbsVarLib.Append("hakurei/nix/u0/org.chromium.Chromium/rw-store/work"),
|
||||||
}},
|
}},
|
||||||
{&FSLink{Target: fhs.AbsRun.Append("current-system"), Linkname: "/run/current-system", Dereference: true}},
|
{&FSLink{
|
||||||
{&FSLink{Target: fhs.AbsRun.Append("opengl-driver"), Linkname: "/run/opengl-driver", Dereference: true}},
|
Target: fhs.AbsRun.Append("current-system"),
|
||||||
{&FSBind{Source: fhs.AbsVarLib.Append("hakurei/u0/org.chromium.Chromium"),
|
Linkname: "/run/current-system",
|
||||||
Target: check.MustAbs("/data/data/org.chromium.Chromium"), Write: true, Ensure: true}},
|
Dereference: true,
|
||||||
{&FSBind{Source: fhs.AbsDev.Append("dri"), Device: true, Optional: true}},
|
}},
|
||||||
|
{&FSLink{
|
||||||
|
Target: fhs.AbsRun.Append("opengl-driver"),
|
||||||
|
Linkname: "/run/opengl-driver",
|
||||||
|
Dereference: true,
|
||||||
|
}},
|
||||||
|
{&FSBind{
|
||||||
|
Source: fhs.AbsVarLib.Append("hakurei/u0/org.chromium.Chromium"),
|
||||||
|
Target: check.MustAbs("/data/data/org.chromium.Chromium"),
|
||||||
|
Write: true,
|
||||||
|
Ensure: true,
|
||||||
|
}},
|
||||||
|
{&FSBind{
|
||||||
|
Source: fhs.AbsDev.Append("dri"),
|
||||||
|
Device: true,
|
||||||
|
Optional: true,
|
||||||
|
}},
|
||||||
},
|
},
|
||||||
|
|
||||||
Username: "chronos",
|
Username: "chronos",
|
||||||
|
|||||||
@@ -12,10 +12,12 @@ import (
|
|||||||
// An ID is a unique identifier held by a running hakurei container.
|
// An ID is a unique identifier held by a running hakurei container.
|
||||||
type ID [16]byte
|
type ID [16]byte
|
||||||
|
|
||||||
// ErrIdentifierLength is returned when encountering a [hex] representation of [ID] with unexpected length.
|
// ErrIdentifierLength is returned when encountering a [hex] representation of
|
||||||
|
// [ID] with unexpected length.
|
||||||
var ErrIdentifierLength = errors.New("identifier string has unexpected length")
|
var ErrIdentifierLength = errors.New("identifier string has unexpected length")
|
||||||
|
|
||||||
// IdentifierDecodeError is returned by [ID.UnmarshalText] to provide relevant error descriptions.
|
// IdentifierDecodeError is returned by [ID.UnmarshalText] to provide relevant
|
||||||
|
// error descriptions.
|
||||||
type IdentifierDecodeError struct{ Err error }
|
type IdentifierDecodeError struct{ Err error }
|
||||||
|
|
||||||
func (e IdentifierDecodeError) Unwrap() error { return e.Err }
|
func (e IdentifierDecodeError) Unwrap() error { return e.Err }
|
||||||
@@ -23,7 +25,10 @@ func (e IdentifierDecodeError) Error() string {
|
|||||||
var invalidByteError hex.InvalidByteError
|
var invalidByteError hex.InvalidByteError
|
||||||
switch {
|
switch {
|
||||||
case errors.As(e.Err, &invalidByteError):
|
case errors.As(e.Err, &invalidByteError):
|
||||||
return fmt.Sprintf("got invalid byte %#U in identifier", rune(invalidByteError))
|
return fmt.Sprintf(
|
||||||
|
"got invalid byte %#U in identifier",
|
||||||
|
rune(invalidByteError),
|
||||||
|
)
|
||||||
case errors.Is(e.Err, hex.ErrLength):
|
case errors.Is(e.Err, hex.ErrLength):
|
||||||
return "odd length identifier hex string"
|
return "odd length identifier hex string"
|
||||||
|
|
||||||
@@ -41,7 +46,9 @@ func (a *ID) CreationTime() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewInstanceID creates a new unique [ID].
|
// NewInstanceID creates a new unique [ID].
|
||||||
func NewInstanceID(id *ID) error { return newInstanceID(id, uint64(time.Now().UnixNano())) }
|
func NewInstanceID(id *ID) error {
|
||||||
|
return newInstanceID(id, uint64(time.Now().UnixNano()))
|
||||||
|
}
|
||||||
|
|
||||||
// newInstanceID creates a new unique [ID] with the specified timestamp.
|
// newInstanceID creates a new unique [ID] with the specified timestamp.
|
||||||
func newInstanceID(id *ID, p uint64) error {
|
func newInstanceID(id *ID, p uint64) error {
|
||||||
|
|||||||
@@ -8,7 +8,6 @@
|
|||||||
package filelock
|
package filelock
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"io/fs"
|
"io/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -74,10 +73,3 @@ func (lt lockType) String() string {
|
|||||||
return "Unlock"
|
return "Unlock"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNotSupported returns a boolean indicating whether the error is known to
|
|
||||||
// report that a function is not supported (possibly for a specific input).
|
|
||||||
// It is satisfied by errors.ErrUnsupported as well as some syscall errors.
|
|
||||||
func IsNotSupported(err error) bool {
|
|
||||||
return errors.Is(err, errors.ErrUnsupported)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"hakurei.app/container"
|
"hakurei.app/container/fhs"
|
||||||
"hakurei.app/internal/lockedfile/internal/filelock"
|
"hakurei.app/internal/lockedfile/internal/filelock"
|
||||||
"hakurei.app/internal/lockedfile/internal/testexec"
|
"hakurei.app/internal/lockedfile/internal/testexec"
|
||||||
)
|
)
|
||||||
@@ -197,7 +197,7 @@ func TestLockNotDroppedByExecCommand(t *testing.T) {
|
|||||||
// Some kinds of file locks are dropped when a duplicated or forked file
|
// Some kinds of file locks are dropped when a duplicated or forked file
|
||||||
// descriptor is unlocked. Double-check that the approach used by os/exec does
|
// descriptor is unlocked. Double-check that the approach used by os/exec does
|
||||||
// not accidentally drop locks.
|
// not accidentally drop locks.
|
||||||
cmd := testexec.CommandContext(t, t.Context(), container.MustExecutable(nil), "-test.run=^$")
|
cmd := testexec.CommandContext(t, t.Context(), fhs.ProcSelfExe, "-test.run=^$")
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
t.Fatalf("exec failed: %v", err)
|
t.Fatalf("exec failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -94,6 +94,11 @@ func (f *File) Close() error {
|
|||||||
|
|
||||||
err := closeFile(f.osFile.File)
|
err := closeFile(f.osFile.File)
|
||||||
f.cleanup.Stop()
|
f.cleanup.Stop()
|
||||||
|
// f may be dead at the moment after we access f.cleanup,
|
||||||
|
// so the cleanup can fire before Stop completes. Keep f
|
||||||
|
// alive while we call Stop. See the documentation for
|
||||||
|
// runtime.Cleanup.Stop.
|
||||||
|
runtime.KeepAlive(f)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"hakurei.app/container"
|
"hakurei.app/container/fhs"
|
||||||
"hakurei.app/internal/lockedfile"
|
"hakurei.app/internal/lockedfile"
|
||||||
"hakurei.app/internal/lockedfile/internal/testexec"
|
"hakurei.app/internal/lockedfile/internal/testexec"
|
||||||
)
|
)
|
||||||
@@ -215,7 +215,7 @@ func TestSpuriousEDEADLK(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := testexec.CommandContext(t, t.Context(), container.MustExecutable(nil), "-test.run=^"+t.Name()+"$")
|
cmd := testexec.CommandContext(t, t.Context(), fhs.ProcSelfExe, "-test.run=^"+t.Name()+"$")
|
||||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", dirVar, dir))
|
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", dirVar, dir))
|
||||||
|
|
||||||
qDone := make(chan struct{})
|
qDone := make(chan struct{})
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ func (h *Hsu) ensureDispatcher() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ID returns the current user hsurc identifier.
|
// ID returns the current user hsurc identifier.
|
||||||
|
//
|
||||||
// [ErrHsuAccess] is returned if the current user is not in hsurc.
|
// [ErrHsuAccess] is returned if the current user is not in hsurc.
|
||||||
func (h *Hsu) ID() (int, error) {
|
func (h *Hsu) ID() (int, error) {
|
||||||
h.ensureDispatcher()
|
h.ensureDispatcher()
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
// Package outcome implements the outcome of the privileged and container sides of a hakurei container.
|
// Package outcome implements the outcome of the privileged and container sides
|
||||||
|
// of a hakurei container.
|
||||||
package outcome
|
package outcome
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -27,8 +28,9 @@ func Info() *hst.Info {
|
|||||||
return &hi
|
return &hi
|
||||||
}
|
}
|
||||||
|
|
||||||
// envAllocSize is the initial size of the env map pre-allocated when the configured env map is nil.
|
// envAllocSize is the initial size of the env map pre-allocated when the
|
||||||
// It should be large enough to fit all insertions by outcomeOp.toContainer.
|
// configured env map is nil. It should be large enough to fit all insertions by
|
||||||
|
// outcomeOp.toContainer.
|
||||||
const envAllocSize = 1 << 6
|
const envAllocSize = 1 << 6
|
||||||
|
|
||||||
func newInt(v int) *stringPair[int] { return &stringPair[int]{v, strconv.Itoa(v)} }
|
func newInt(v int) *stringPair[int] { return &stringPair[int]{v, strconv.Itoa(v)} }
|
||||||
@@ -43,7 +45,8 @@ func (s *stringPair[T]) unwrap() T { return s.v }
|
|||||||
func (s *stringPair[T]) String() string { return s.s }
|
func (s *stringPair[T]) String() string { return s.s }
|
||||||
|
|
||||||
// outcomeState is copied to the shim process and available while applying outcomeOp.
|
// outcomeState is copied to the shim process and available while applying outcomeOp.
|
||||||
// This is transmitted from the priv side to the shim, so exported fields should be kept to a minimum.
|
// This is transmitted from the priv side to the shim, so exported fields should
|
||||||
|
// be kept to a minimum.
|
||||||
type outcomeState struct {
|
type outcomeState struct {
|
||||||
// Params only used by the shim process. Populated by populateEarly.
|
// Params only used by the shim process. Populated by populateEarly.
|
||||||
Shim *shimParams
|
Shim *shimParams
|
||||||
@@ -89,14 +92,25 @@ func (s *outcomeState) valid() bool {
|
|||||||
s.Paths != nil
|
s.Paths != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newOutcomeState returns the address of a new outcomeState with its exported fields populated via syscallDispatcher.
|
// newOutcomeState returns the address of a new outcomeState with its exported
|
||||||
|
// fields populated via syscallDispatcher.
|
||||||
func newOutcomeState(k syscallDispatcher, msg message.Msg, id *hst.ID, config *hst.Config, hsu *Hsu) *outcomeState {
|
func newOutcomeState(k syscallDispatcher, msg message.Msg, id *hst.ID, config *hst.Config, hsu *Hsu) *outcomeState {
|
||||||
s := outcomeState{
|
s := outcomeState{
|
||||||
Shim: &shimParams{PrivPID: k.getpid(), Verbose: msg.IsVerbose()},
|
Shim: &shimParams{
|
||||||
|
PrivPID: k.getpid(),
|
||||||
|
Verbose: msg.IsVerbose(),
|
||||||
|
|
||||||
|
SchedPolicy: config.SchedPolicy,
|
||||||
|
SchedPriority: config.SchedPriority,
|
||||||
|
},
|
||||||
|
|
||||||
ID: id,
|
ID: id,
|
||||||
Identity: config.Identity,
|
Identity: config.Identity,
|
||||||
UserID: hsu.MustID(msg),
|
UserID: hsu.MustID(msg),
|
||||||
Paths: env.CopyPathsFunc(k.fatalf, k.tempdir, func(key string) string { v, _ := k.lookupEnv(key); return v }),
|
Paths: env.CopyPathsFunc(k.fatalf, k.tempdir, func(key string) string {
|
||||||
|
v, _ := k.lookupEnv(key)
|
||||||
|
return v
|
||||||
|
}),
|
||||||
Container: config.Container,
|
Container: config.Container,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -121,6 +135,7 @@ func newOutcomeState(k syscallDispatcher, msg message.Msg, id *hst.ID, config *h
|
|||||||
}
|
}
|
||||||
|
|
||||||
// populateLocal populates unexported fields from transmitted exported fields.
|
// populateLocal populates unexported fields from transmitted exported fields.
|
||||||
|
//
|
||||||
// These fields are cheaper to recompute per-process.
|
// These fields are cheaper to recompute per-process.
|
||||||
func (s *outcomeState) populateLocal(k syscallDispatcher, msg message.Msg) error {
|
func (s *outcomeState) populateLocal(k syscallDispatcher, msg message.Msg) error {
|
||||||
if !s.valid() || k == nil || msg == nil {
|
if !s.valid() || k == nil || msg == nil {
|
||||||
@@ -136,7 +151,10 @@ func (s *outcomeState) populateLocal(k syscallDispatcher, msg message.Msg) error
|
|||||||
s.id = &stringPair[hst.ID]{*s.ID, s.ID.String()}
|
s.id = &stringPair[hst.ID]{*s.ID, s.ID.String()}
|
||||||
|
|
||||||
s.Copy(&s.sc, s.UserID)
|
s.Copy(&s.sc, s.UserID)
|
||||||
msg.Verbosef("process share directory at %q, runtime directory at %q", s.sc.SharePath, s.sc.RunDirPath)
|
msg.Verbosef(
|
||||||
|
"process share directory at %q, runtime directory at %q",
|
||||||
|
s.sc.SharePath, s.sc.RunDirPath,
|
||||||
|
)
|
||||||
|
|
||||||
s.identity = newInt(s.Identity)
|
s.identity = newInt(s.Identity)
|
||||||
s.mapuid, s.mapgid = newInt(s.Mapuid), newInt(s.Mapgid)
|
s.mapuid, s.mapgid = newInt(s.Mapuid), newInt(s.Mapgid)
|
||||||
@@ -146,17 +164,25 @@ func (s *outcomeState) populateLocal(k syscallDispatcher, msg message.Msg) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
// instancePath returns a path formatted for outcomeStateSys.instance.
|
// instancePath returns a path formatted for outcomeStateSys.instance.
|
||||||
|
//
|
||||||
// This method must only be called from outcomeOp.toContainer if
|
// This method must only be called from outcomeOp.toContainer if
|
||||||
// outcomeOp.toSystem has already called outcomeStateSys.instance.
|
// outcomeOp.toSystem has already called outcomeStateSys.instance.
|
||||||
func (s *outcomeState) instancePath() *check.Absolute { return s.sc.SharePath.Append(s.id.String()) }
|
func (s *outcomeState) instancePath() *check.Absolute {
|
||||||
|
return s.sc.SharePath.Append(s.id.String())
|
||||||
|
}
|
||||||
|
|
||||||
// runtimePath returns a path formatted for outcomeStateSys.runtime.
|
// runtimePath returns a path formatted for outcomeStateSys.runtime.
|
||||||
|
//
|
||||||
// This method must only be called from outcomeOp.toContainer if
|
// This method must only be called from outcomeOp.toContainer if
|
||||||
// outcomeOp.toSystem has already called outcomeStateSys.runtime.
|
// outcomeOp.toSystem has already called outcomeStateSys.runtime.
|
||||||
func (s *outcomeState) runtimePath() *check.Absolute { return s.sc.RunDirPath.Append(s.id.String()) }
|
func (s *outcomeState) runtimePath() *check.Absolute {
|
||||||
|
return s.sc.RunDirPath.Append(s.id.String())
|
||||||
|
}
|
||||||
|
|
||||||
// outcomeStateSys wraps outcomeState and [system.I]. Used on the priv side only.
|
// outcomeStateSys wraps outcomeState and [system.I]. Used on the priv side only.
|
||||||
// Implementations of outcomeOp must not access fields other than sys unless explicitly stated.
|
//
|
||||||
|
// Implementations of outcomeOp must not access fields other than sys unless
|
||||||
|
// explicitly stated.
|
||||||
type outcomeStateSys struct {
|
type outcomeStateSys struct {
|
||||||
// Whether XDG_RUNTIME_DIR is used post hsu.
|
// Whether XDG_RUNTIME_DIR is used post hsu.
|
||||||
useRuntimeDir bool
|
useRuntimeDir bool
|
||||||
@@ -219,6 +245,7 @@ func (state *outcomeStateSys) ensureRuntimeDir() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// instance returns the pathname to a process-specific directory within TMPDIR.
|
// instance returns the pathname to a process-specific directory within TMPDIR.
|
||||||
|
//
|
||||||
// This directory must only hold entries bound to [system.Process].
|
// This directory must only hold entries bound to [system.Process].
|
||||||
func (state *outcomeStateSys) instance() *check.Absolute {
|
func (state *outcomeStateSys) instance() *check.Absolute {
|
||||||
if state.sharePath != nil {
|
if state.sharePath != nil {
|
||||||
@@ -230,6 +257,7 @@ func (state *outcomeStateSys) instance() *check.Absolute {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// runtime returns the pathname to a process-specific directory within XDG_RUNTIME_DIR.
|
// runtime returns the pathname to a process-specific directory within XDG_RUNTIME_DIR.
|
||||||
|
//
|
||||||
// This directory must only hold entries bound to [system.Process].
|
// This directory must only hold entries bound to [system.Process].
|
||||||
func (state *outcomeStateSys) runtime() *check.Absolute {
|
func (state *outcomeStateSys) runtime() *check.Absolute {
|
||||||
if state.runtimeSharePath != nil {
|
if state.runtimeSharePath != nil {
|
||||||
@@ -242,22 +270,29 @@ func (state *outcomeStateSys) runtime() *check.Absolute {
|
|||||||
return state.runtimeSharePath
|
return state.runtimeSharePath
|
||||||
}
|
}
|
||||||
|
|
||||||
// outcomeStateParams wraps outcomeState and [container.Params]. Used on the shim side only.
|
// outcomeStateParams wraps outcomeState and [container.Params].
|
||||||
|
//
|
||||||
|
// Used on the shim side only.
|
||||||
type outcomeStateParams struct {
|
type outcomeStateParams struct {
|
||||||
// Overrides the embedded [container.Params] in [container.Container]. The Env field must not be used.
|
// Overrides the embedded [container.Params] in [container.Container].
|
||||||
|
//
|
||||||
|
// The Env field must not be used.
|
||||||
params *container.Params
|
params *container.Params
|
||||||
// Collapsed into the Env slice in [container.Params] by the final outcomeOp.
|
// Collapsed into the Env slice in [container.Params] by the final outcomeOp.
|
||||||
env map[string]string
|
env map[string]string
|
||||||
|
|
||||||
// Filesystems with the optional root sliced off if present. Populated by spParamsOp.
|
// Filesystems with the optional root sliced off if present.
|
||||||
// Safe for use by spFilesystemOp.
|
//
|
||||||
|
// Populated by spParamsOp. Safe for use by spFilesystemOp.
|
||||||
filesystem []hst.FilesystemConfigJSON
|
filesystem []hst.FilesystemConfigJSON
|
||||||
|
|
||||||
// Inner XDG_RUNTIME_DIR default formatting of `/run/user/%d` via mapped uid.
|
// Inner XDG_RUNTIME_DIR default formatting of `/run/user/%d` via mapped uid.
|
||||||
|
//
|
||||||
// Populated by spRuntimeOp.
|
// Populated by spRuntimeOp.
|
||||||
runtimeDir *check.Absolute
|
runtimeDir *check.Absolute
|
||||||
|
|
||||||
// Path to pipewire-pulse server.
|
// Path to pipewire-pulse server.
|
||||||
|
//
|
||||||
// Populated by spPipeWireOp if DirectPipeWire is false.
|
// Populated by spPipeWireOp if DirectPipeWire is false.
|
||||||
pipewirePulsePath *check.Absolute
|
pipewirePulsePath *check.Absolute
|
||||||
|
|
||||||
@@ -265,25 +300,32 @@ type outcomeStateParams struct {
|
|||||||
*outcomeState
|
*outcomeState
|
||||||
}
|
}
|
||||||
|
|
||||||
// errNotEnabled is returned by outcomeOp.toSystem and used internally to exclude an outcomeOp from transmission.
|
// errNotEnabled is returned by outcomeOp.toSystem and used internally to
|
||||||
|
// exclude an outcomeOp from transmission.
|
||||||
var errNotEnabled = errors.New("op not enabled in the configuration")
|
var errNotEnabled = errors.New("op not enabled in the configuration")
|
||||||
|
|
||||||
// An outcomeOp inflicts an outcome on [system.I] and contains enough information to
|
// An outcomeOp inflicts an outcome on [system.I] and contains enough
|
||||||
// inflict it on [container.Params] in a separate process.
|
// information to inflict it on [container.Params] in a separate process.
|
||||||
// An implementation of outcomeOp must store cross-process states in exported fields only.
|
//
|
||||||
|
// An implementation of outcomeOp must store cross-process states in exported
|
||||||
|
// fields only.
|
||||||
type outcomeOp interface {
|
type outcomeOp interface {
|
||||||
// toSystem inflicts the current outcome on [system.I] in the priv side process.
|
// toSystem inflicts the current outcome on [system.I] in the priv side process.
|
||||||
toSystem(state *outcomeStateSys) error
|
toSystem(state *outcomeStateSys) error
|
||||||
|
|
||||||
// toContainer inflicts the current outcome on [container.Params] in the shim process.
|
// toContainer inflicts the current outcome on [container.Params] in the
|
||||||
// The implementation must not write to the Env field of [container.Params] as it will be overwritten
|
// shim process.
|
||||||
// by flattened env map.
|
//
|
||||||
|
// Implementations must not write to the Env field of [container.Params]
|
||||||
|
// as it will be overwritten by flattened env map.
|
||||||
toContainer(state *outcomeStateParams) error
|
toContainer(state *outcomeStateParams) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// toSystem calls the outcomeOp.toSystem method on all outcomeOp implementations and populates shimParams.Ops.
|
// toSystem calls the outcomeOp.toSystem method on all outcomeOp implementations
|
||||||
// This function assumes the caller has already called the Validate method on [hst.Config]
|
// and populates shimParams.Ops.
|
||||||
// and checked that it returns nil.
|
//
|
||||||
|
// This function assumes the caller has already called the Validate method on
|
||||||
|
// [hst.Config] and checked that it returns nil.
|
||||||
func (state *outcomeStateSys) toSystem() error {
|
func (state *outcomeStateSys) toSystem() error {
|
||||||
if state.Shim == nil || state.Shim.Ops != nil {
|
if state.Shim == nil || state.Shim.Ops != nil {
|
||||||
return newWithMessage("invalid ops state reached")
|
return newWithMessage("invalid ops state reached")
|
||||||
|
|||||||
@@ -30,7 +30,9 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// NewStore returns the address of a new instance of [store.Store].
|
// NewStore returns the address of a new instance of [store.Store].
|
||||||
func NewStore(sc *hst.Paths) *store.Store { return store.New(sc.SharePath.Append("state")) }
|
func NewStore(sc *hst.Paths) *store.Store {
|
||||||
|
return store.New(sc.SharePath.Append("state"))
|
||||||
|
}
|
||||||
|
|
||||||
// main carries out outcome and terminates. main does not return.
|
// main carries out outcome and terminates. main does not return.
|
||||||
func (k *outcome) main(msg message.Msg, identifierFd int) {
|
func (k *outcome) main(msg message.Msg, identifierFd int) {
|
||||||
@@ -116,7 +118,11 @@ func (k *outcome) main(msg message.Msg, identifierFd int) {
|
|||||||
processStatePrev, processStateCur = processStateCur, processState
|
processStatePrev, processStateCur = processStateCur, processState
|
||||||
|
|
||||||
if !processTime.IsZero() && processStatePrev != processLifecycle {
|
if !processTime.IsZero() && processStatePrev != processLifecycle {
|
||||||
msg.Verbosef("state %d took %.2f ms", processStatePrev, float64(time.Since(processTime).Nanoseconds())/1e6)
|
msg.Verbosef(
|
||||||
|
"state %d took %.2f ms",
|
||||||
|
processStatePrev,
|
||||||
|
float64(time.Since(processTime).Nanoseconds())/1e6,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
processTime = time.Now()
|
processTime = time.Now()
|
||||||
|
|
||||||
@@ -141,7 +147,10 @@ func (k *outcome) main(msg message.Msg, identifierFd int) {
|
|||||||
|
|
||||||
case processCommit:
|
case processCommit:
|
||||||
if isBeforeRevert {
|
if isBeforeRevert {
|
||||||
perrorFatal(newWithMessage("invalid transition to commit state"), "commit", processLifecycle)
|
perrorFatal(
|
||||||
|
newWithMessage("invalid transition to commit state"),
|
||||||
|
"commit", processLifecycle,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -238,15 +247,26 @@ func (k *outcome) main(msg message.Msg, identifierFd int) {
|
|||||||
|
|
||||||
case <-func() chan struct{} {
|
case <-func() chan struct{} {
|
||||||
w := make(chan struct{})
|
w := make(chan struct{})
|
||||||
// this ties processLifecycle to ctx with the additional compensated timeout duration
|
// This ties processLifecycle to ctx with the additional
|
||||||
// to allow transition to the next state on a locked up shim
|
// compensated timeout duration to allow transition to the next
|
||||||
go func() { <-ctx.Done(); time.Sleep(k.state.Shim.WaitDelay + shimWaitTimeout); close(w) }()
|
// state on a locked up shim.
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
time.Sleep(k.state.Shim.WaitDelay + shimWaitTimeout)
|
||||||
|
close(w)
|
||||||
|
}()
|
||||||
return w
|
return w
|
||||||
}():
|
}():
|
||||||
// this is only reachable when wait did not return within shimWaitTimeout, after its WaitDelay has elapsed.
|
// This is only reachable when wait did not return within
|
||||||
// This is different from the container failing to terminate within its timeout period, as that is enforced
|
// shimWaitTimeout, after its WaitDelay has elapsed. This is
|
||||||
// by the shim. This path is instead reached when there is a lockup in shim preventing it from completing.
|
// different from the container failing to terminate within its
|
||||||
msg.GetLogger().Printf("process %d did not terminate", shimCmd.Process.Pid)
|
// timeout period, as that is enforced by the shim. This path is
|
||||||
|
// instead reached when there is a lockup in shim preventing it
|
||||||
|
// from completing.
|
||||||
|
msg.GetLogger().Printf(
|
||||||
|
"process %d did not terminate",
|
||||||
|
shimCmd.Process.Pid,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
msg.Resume()
|
msg.Resume()
|
||||||
|
|
||||||
@@ -271,8 +291,8 @@ func (k *outcome) main(msg message.Msg, identifierFd int) {
|
|||||||
ec := system.Process
|
ec := system.Process
|
||||||
|
|
||||||
if entries, _, err := handle.Entries(); err != nil {
|
if entries, _, err := handle.Entries(); err != nil {
|
||||||
// it is impossible to continue from this point,
|
// it is impossible to continue from this point, per-process
|
||||||
// per-process state will be reverted to limit damage
|
// state will be reverted to limit damage
|
||||||
perror(err, "read store segment entries")
|
perror(err, "read store segment entries")
|
||||||
} else {
|
} else {
|
||||||
// accumulate enablements of remaining instances
|
// accumulate enablements of remaining instances
|
||||||
@@ -295,7 +315,10 @@ func (k *outcome) main(msg message.Msg, identifierFd int) {
|
|||||||
if n == 0 {
|
if n == 0 {
|
||||||
ec |= system.User
|
ec |= system.User
|
||||||
} else {
|
} else {
|
||||||
msg.Verbosef("found %d instances, cleaning up without user-scoped operations", n)
|
msg.Verbosef(
|
||||||
|
"found %d instances, cleaning up without user-scoped operations",
|
||||||
|
n,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
ec |= rt ^ (hst.EWayland | hst.EX11 | hst.EDBus | hst.EPulse)
|
ec |= rt ^ (hst.EWayland | hst.EX11 | hst.EDBus | hst.EPulse)
|
||||||
if msg.IsVerbose() {
|
if msg.IsVerbose() {
|
||||||
@@ -335,7 +358,9 @@ func (k *outcome) main(msg message.Msg, identifierFd int) {
|
|||||||
|
|
||||||
// start starts the shim via cmd/hsu.
|
// start starts the shim via cmd/hsu.
|
||||||
//
|
//
|
||||||
// If successful, a [time.Time] value for [hst.State] is stored in the value pointed to by startTime.
|
// If successful, a [time.Time] value for [hst.State] is stored in the value
|
||||||
|
// pointed to by startTime.
|
||||||
|
//
|
||||||
// The resulting [exec.Cmd] and write end of the shim setup pipe is returned.
|
// The resulting [exec.Cmd] and write end of the shim setup pipe is returned.
|
||||||
func (k *outcome) start(ctx context.Context, msg message.Msg,
|
func (k *outcome) start(ctx context.Context, msg message.Msg,
|
||||||
hsuPath *check.Absolute,
|
hsuPath *check.Absolute,
|
||||||
|
|||||||
@@ -37,9 +37,12 @@ const (
|
|||||||
shimMsgBadPID = C.HAKUREI_SHIM_BAD_PID
|
shimMsgBadPID = C.HAKUREI_SHIM_BAD_PID
|
||||||
)
|
)
|
||||||
|
|
||||||
// setupContSignal sets up the SIGCONT signal handler for the cross-uid shim exit hack.
|
// setupContSignal sets up the SIGCONT signal handler for the cross-uid shim
|
||||||
// The signal handler is implemented in C, signals can be processed by reading from the returned reader.
|
// exit hack.
|
||||||
// The returned function must be called after all signal processing concludes.
|
//
|
||||||
|
// The signal handler is implemented in C, signals can be processed by reading
|
||||||
|
// from the returned reader. The returned function must be called after all
|
||||||
|
// signal processing concludes.
|
||||||
func setupContSignal(pid int) (io.ReadCloser, func(), error) {
|
func setupContSignal(pid int) (io.ReadCloser, func(), error) {
|
||||||
if r, w, err := os.Pipe(); err != nil {
|
if r, w, err := os.Pipe(); err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
@@ -51,22 +54,30 @@ func setupContSignal(pid int) (io.ReadCloser, func(), error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// shimEnv is the name of the environment variable storing decimal representation of
|
// shimEnv is the name of the environment variable storing decimal representation
|
||||||
// setup pipe fd for [container.Receive].
|
// of setup pipe fd for [container.Receive].
|
||||||
const shimEnv = "HAKUREI_SHIM"
|
const shimEnv = "HAKUREI_SHIM"
|
||||||
|
|
||||||
// shimParams is embedded in outcomeState and transmitted from priv side to shim.
|
// shimParams is embedded in outcomeState and transmitted from priv side to shim.
|
||||||
type shimParams struct {
|
type shimParams struct {
|
||||||
// Priv side pid, checked against ppid in signal handler for the syscall.SIGCONT hack.
|
// Priv side pid, checked against ppid in signal handler for the
|
||||||
|
// syscall.SIGCONT hack.
|
||||||
PrivPID int
|
PrivPID int
|
||||||
|
|
||||||
// Duration to wait for after the initial process receives os.Interrupt before the container is killed.
|
// Duration to wait for after the initial process receives os.Interrupt
|
||||||
|
// before the container is killed.
|
||||||
|
//
|
||||||
// Limits are enforced on the priv side.
|
// Limits are enforced on the priv side.
|
||||||
WaitDelay time.Duration
|
WaitDelay time.Duration
|
||||||
|
|
||||||
// Verbosity pass through from [message.Msg].
|
// Verbosity pass through from [message.Msg].
|
||||||
Verbose bool
|
Verbose bool
|
||||||
|
|
||||||
|
// Copied from [hst.Config].
|
||||||
|
SchedPolicy std.SchedPolicy
|
||||||
|
// Copied from [hst.Config].
|
||||||
|
SchedPriority std.Int
|
||||||
|
|
||||||
// Outcome setup ops, contains setup state. Populated by outcome.finalise.
|
// Outcome setup ops, contains setup state. Populated by outcome.finalise.
|
||||||
Ops []outcomeOp
|
Ops []outcomeOp
|
||||||
}
|
}
|
||||||
@@ -77,7 +88,9 @@ func (p *shimParams) valid() bool { return p != nil && p.PrivPID > 0 }
|
|||||||
// shimName is the prefix used by log.std in the shim process.
|
// shimName is the prefix used by log.std in the shim process.
|
||||||
const shimName = "shim"
|
const shimName = "shim"
|
||||||
|
|
||||||
// Shim is called by the main function of the shim process and runs as the unconstrained target user.
|
// Shim is called by the main function of the shim process and runs as the
|
||||||
|
// unconstrained target user.
|
||||||
|
//
|
||||||
// Shim does not return.
|
// Shim does not return.
|
||||||
func Shim(msg message.Msg) {
|
func Shim(msg message.Msg) {
|
||||||
if msg == nil {
|
if msg == nil {
|
||||||
@@ -131,7 +144,8 @@ func (sp *shimPrivate) destroy() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// shimPipeWireTimeout is the duration pipewire-pulse is allowed to run before its socket becomes available.
|
// shimPipeWireTimeout is the duration pipewire-pulse is allowed to run
|
||||||
|
// before its socket becomes available.
|
||||||
shimPipeWireTimeout = 5 * time.Second
|
shimPipeWireTimeout = 5 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -262,6 +276,9 @@ func shimEntrypoint(k syscallDispatcher) {
|
|||||||
cancelContainer.Store(&stop)
|
cancelContainer.Store(&stop)
|
||||||
sp := shimPrivate{k: k, id: state.id}
|
sp := shimPrivate{k: k, id: state.id}
|
||||||
z := container.New(ctx, msg)
|
z := container.New(ctx, msg)
|
||||||
|
z.SetScheduler = state.Shim.SchedPolicy > 0
|
||||||
|
z.SchedPolicy = state.Shim.SchedPolicy
|
||||||
|
z.SchedPriority = state.Shim.SchedPriority
|
||||||
z.Params = *stateParams.params
|
z.Params = *stateParams.params
|
||||||
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
|
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user