forked from security/hakurei
Compare commits
218 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| b255f07b0f | |||
| dec4cdd068 | |||
| 73c620ecd5 | |||
| 69467a1542 | |||
|
ebdf9dcecc
|
|||
|
8ea2a56d5b
|
|||
|
159a45c027
|
|||
| 1ae6a35bc8 | |||
|
0eb2bfa12e
|
|||
|
e19a98244a
|
|||
| 9ef5b52b85 | |||
| f93158cb3c | |||
|
7e2f13fa1b
|
|||
|
97448e2104
|
|||
|
a87ad28b8b
|
|||
|
883d4ee4af
|
|||
|
d2c6d486b0
|
|||
|
6fdd800b2b
|
|||
|
94e3debc63
|
|||
|
ea87664a75
|
|||
|
04d9984da0
|
|||
|
145ccd1c92
|
|||
|
c5089cad78
|
|||
|
c83905f311
|
|||
|
b7cc14f296
|
|||
|
57e1e5141d
|
|||
|
1440195c3f
|
|||
|
cc60e0d15d
|
|||
|
9deaf853f0
|
|||
|
2baa9df133
|
|||
|
51d3df2419
|
|||
|
1d0fcf3a75
|
|||
|
e92971e0c2
|
|||
|
6159c74e96
|
|||
|
2a34a269d0
|
|||
|
ef130adb27
|
|||
|
5694e528e6
|
|||
|
b4e82e68a7
|
|||
|
d041fee791
|
|||
|
cefd02e960
|
|||
|
ad8f799703
|
|||
|
c74c269b66
|
|||
|
4b0cce4db5
|
|||
|
cd9b534d6b
|
|||
|
84e6922f30
|
|||
|
c16725a679
|
|||
|
a6160cd410
|
|||
|
826347fe1f
|
|||
|
085eaed7ba
|
|||
|
37d368a7f9
|
|||
|
2aeac7f582
|
|||
|
2b93631f52
|
|||
|
b3749aaf0b
|
|||
|
c8bb88cced
|
|||
|
f7f80f95b9
|
|||
|
6ea6c794fb
|
|||
|
6c2da4c4b2
|
|||
|
90f915a708
|
|||
|
a5fea4686e
|
|||
|
ae8c365c0f
|
|||
|
485db515f7
|
|||
|
ec7ee0789e
|
|||
|
42c93a57a4
|
|||
|
b1b14810ac
|
|||
|
de117ef365
|
|||
|
5e4bf23e0c
|
|||
|
d4519e2075
|
|||
|
7f1e4cf43c
|
|||
|
d021621fba
|
|||
|
56567307ec
|
|||
|
0264a1ef09
|
|||
|
0123bbee3d
|
|||
|
771adad603
|
|||
|
178305cb22
|
|||
|
c2456e252c
|
|||
|
273068b90c
|
|||
|
16b20e1d34
|
|||
|
b983917a6e
|
|||
|
e1b8f40add
|
|||
|
6df0d37c5a
|
|||
|
1619b06541
|
|||
|
e335d99c6b
|
|||
|
d888d09b6d
|
|||
|
54176e7315
|
|||
|
3bfe99d3d8
|
|||
|
149dfbb6af
|
|||
|
58801b44d4
|
|||
|
e065bbf792
|
|||
|
a883e57e7d
|
|||
|
ef9bd8ecbf
|
|||
|
a40527dcb2
|
|||
|
88d9a6163e
|
|||
|
47860b0387
|
|||
|
50c9da8b6d
|
|||
|
16966043c7
|
|||
|
a3515a6ef5
|
|||
|
7f05baab28
|
|||
|
d4d5e631ae
|
|||
|
1df3bcc3b9
|
|||
|
1809b53e52
|
|||
|
67b2914c94
|
|||
|
74dee11822
|
|||
|
a58c9258cc
|
|||
|
710b164c91
|
|||
|
93911d6015
|
|||
|
bb097536d4
|
|||
|
49b6526a38
|
|||
|
f9c31df94d
|
|||
|
4f570cc5c9
|
|||
|
5828631e79
|
|||
|
4f9f4875d7
|
|||
|
d49e654482
|
|||
|
b746e352e5
|
|||
|
c620d88dce
|
|||
|
7cd14b8865
|
|||
|
3e18a4b397
|
|||
|
1791b604b5
|
|||
|
59ff6db7ec
|
|||
|
430e099556
|
|||
|
17b64bb42c
|
|||
|
dbb89dfb0f
|
|||
|
de06ea2be4
|
|||
|
1ef7bedfb5
|
|||
|
05a828c474
|
|||
|
0061d11f93
|
|||
|
fb101a02f2
|
|||
|
3dbd67d113
|
|||
|
f511f0a9e9
|
|||
|
47995137b3
|
|||
|
e1b8607101
|
|||
|
3d3bd45b95
|
|||
|
9fb0b2452e
|
|||
|
a3e87dd0ef
|
|||
|
90a38c0708
|
|||
|
39cc8caa93
|
|||
|
c4f64f7606
|
|||
|
a9e2a5e59f
|
|||
|
9fb0722cdf
|
|||
|
2f3e323c46
|
|||
|
1fc9c3200f
|
|||
|
096a25ad3a
|
|||
|
ffd2f979fb
|
|||
|
31a8cc9b5c
|
|||
|
bb3f60fc74
|
|||
|
697c91e04d
|
|||
|
3f7b8b4332
|
|||
|
fa94155f42
|
|||
|
233bd163fb
|
|||
|
f9b69c94bc
|
|||
|
68aefa6d59
|
|||
|
159fd55dbb
|
|||
|
ce6b3ff53b
|
|||
|
30afa0e2ab
|
|||
|
9b751de078
|
|||
|
d77ad3bb6e
|
|||
|
0142fc90b0
|
|||
|
3c9f7cfcd0
|
|||
|
a3526b3ceb
|
|||
|
6ad21e2288
|
|||
|
27e2e3f996
|
|||
|
e0c720681b
|
|||
|
f982b13a59
|
|||
|
443911ada1
|
|||
|
d7a3706db3
|
|||
|
3226dc44dc
|
|||
|
9f98d12ad8
|
|||
|
550e83dda9
|
|||
|
7877b4e627
|
|||
|
47ce6f5bd0
|
|||
|
48f4ccba33
|
|||
|
c31884bee4
|
|||
|
f8661ad479
|
|||
|
536f0cbae6
|
|||
|
8d872ff1cd
|
|||
|
bf14a412e4
|
|||
|
8b4576bc5f
|
|||
|
29ebc52e26
|
|||
|
5f81aac0e2
|
|||
|
47490823be
|
|||
|
1ac8ca7a80
|
|||
|
fd8b2fd522
|
|||
|
20a8519044
|
|||
|
8c4fd00c50
|
|||
|
bc3dd6fbb0
|
|||
|
616ed29edf
|
|||
|
9d9b7294a4
|
|||
|
6c1e2f10a7
|
|||
|
abf96d2283
|
|||
|
6c90e879da
|
|||
|
d1b404dc3a
|
|||
|
744e4e0632
|
|||
|
85eda49b2b
|
|||
|
b26bc05bb0
|
|||
|
2d63ea8fee
|
|||
|
dd4326418c
|
|||
|
79c0106ea0
|
|||
|
536db533de
|
|||
|
07927006a8
|
|||
|
77ea27b038
|
|||
|
e76bc6a13a
|
|||
|
cc403c96d8
|
|||
|
66118ba941
|
|||
|
823ba08dbc
|
|||
|
660835151e
|
|||
|
53e6df7e81
|
|||
|
bd80327a8f
|
|||
|
41f9aebbb7
|
|||
|
a2a0e36802
|
|||
|
fbe93fc771
|
|||
|
968d8dbaf1
|
|||
|
f1758a6fa8
|
|||
|
88aaa4497c
|
|||
|
b7ea68de35
|
|||
|
67e453f5c4
|
|||
|
67092c835a
|
|||
|
18918d9a0d
|
|||
|
380ca4e022
|
|||
|
887aef8514
|
@@ -89,23 +89,6 @@ jobs:
|
|||||||
path: result/*
|
path: result/*
|
||||||
retention-days: 1
|
retention-days: 1
|
||||||
|
|
||||||
hpkg:
|
|
||||||
name: Hpkg
|
|
||||||
runs-on: nix
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Run NixOS test
|
|
||||||
run: nix build --out-link "result" --print-out-paths --print-build-logs .#checks.x86_64-linux.hpkg
|
|
||||||
|
|
||||||
- name: Upload test output
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: "hpkg-vm-output"
|
|
||||||
path: result/*
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
check:
|
check:
|
||||||
name: Flake checks
|
name: Flake checks
|
||||||
needs:
|
needs:
|
||||||
@@ -114,7 +97,6 @@ jobs:
|
|||||||
- sandbox
|
- sandbox
|
||||||
- sandbox-race
|
- sandbox-race
|
||||||
- sharefs
|
- sharefs
|
||||||
- hpkg
|
|
||||||
runs-on: nix
|
runs-on: nix
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
|
|||||||
5
.github/workflows/README
vendored
5
.github/workflows/README
vendored
@@ -1,5 +0,0 @@
|
|||||||
DO NOT ADD NEW ACTIONS HERE
|
|
||||||
|
|
||||||
This port is solely for releasing to the github mirror and serves no purpose during development.
|
|
||||||
All development happens at https://git.gensokyo.uk/security/hakurei. If you wish to contribute,
|
|
||||||
request for an account on git.gensokyo.uk.
|
|
||||||
46
.github/workflows/release.yml
vendored
46
.github/workflows/release.yml
vendored
@@ -1,46 +0,0 @@
|
|||||||
name: Release
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- 'v*'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
release:
|
|
||||||
name: Create release
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
packages: write
|
|
||||||
contents: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Install Nix
|
|
||||||
uses: nixbuild/nix-quick-install-action@v32
|
|
||||||
with:
|
|
||||||
nix_conf: |
|
|
||||||
keep-env-derivations = true
|
|
||||||
keep-outputs = true
|
|
||||||
|
|
||||||
- name: Restore and cache Nix store
|
|
||||||
uses: nix-community/cache-nix-action@v6
|
|
||||||
with:
|
|
||||||
primary-key: build-${{ runner.os }}-${{ hashFiles('**/*.nix') }}
|
|
||||||
restore-prefixes-first-match: build-${{ runner.os }}-
|
|
||||||
gc-max-store-size-linux: 1G
|
|
||||||
purge: true
|
|
||||||
purge-prefixes: build-${{ runner.os }}-
|
|
||||||
purge-created: 60
|
|
||||||
purge-primary-key: never
|
|
||||||
|
|
||||||
- name: Build for release
|
|
||||||
run: nix build --print-out-paths --print-build-logs .#dist
|
|
||||||
|
|
||||||
- name: Release
|
|
||||||
uses: softprops/action-gh-release@v2
|
|
||||||
with:
|
|
||||||
files: |-
|
|
||||||
result/hakurei-**
|
|
||||||
48
.github/workflows/test.yml
vendored
48
.github/workflows/test.yml
vendored
@@ -1,48 +0,0 @@
|
|||||||
name: Test
|
|
||||||
|
|
||||||
on:
|
|
||||||
- push
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
dist:
|
|
||||||
name: Create distribution
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
actions: write
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Install Nix
|
|
||||||
uses: nixbuild/nix-quick-install-action@v32
|
|
||||||
with:
|
|
||||||
nix_conf: |
|
|
||||||
keep-env-derivations = true
|
|
||||||
keep-outputs = true
|
|
||||||
|
|
||||||
- name: Restore and cache Nix store
|
|
||||||
uses: nix-community/cache-nix-action@v6
|
|
||||||
with:
|
|
||||||
primary-key: build-${{ runner.os }}-${{ hashFiles('**/*.nix') }}
|
|
||||||
restore-prefixes-first-match: build-${{ runner.os }}-
|
|
||||||
gc-max-store-size-linux: 1G
|
|
||||||
purge: true
|
|
||||||
purge-prefixes: build-${{ runner.os }}-
|
|
||||||
purge-created: 60
|
|
||||||
purge-primary-key: never
|
|
||||||
|
|
||||||
- name: Build for test
|
|
||||||
id: build-test
|
|
||||||
run: >-
|
|
||||||
export HAKUREI_REV="$(git rev-parse --short HEAD)" &&
|
|
||||||
sed -i.old 's/version = /version = "0.0.0-'$HAKUREI_REV'"; # version = /' package.nix &&
|
|
||||||
nix build --print-out-paths --print-build-logs .#dist &&
|
|
||||||
mv package.nix.old package.nix &&
|
|
||||||
echo "rev=$HAKUREI_REV" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Upload test build
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: "hakurei-${{ steps.build-test.outputs.rev }}"
|
|
||||||
path: result/*
|
|
||||||
retention-days: 1
|
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -28,6 +28,7 @@ go.work.sum
|
|||||||
# go generate
|
# go generate
|
||||||
/cmd/hakurei/LICENSE
|
/cmd/hakurei/LICENSE
|
||||||
/internal/pkg/testdata/testtool
|
/internal/pkg/testdata/testtool
|
||||||
|
/internal/rosa/hakurei_current.tar.gz
|
||||||
|
|
||||||
# release
|
# release
|
||||||
/dist/hakurei-*
|
/dist/hakurei-*
|
||||||
|
|||||||
181
README.md
181
README.md
@@ -15,164 +15,51 @@
|
|||||||
<a href="https://hakurei.app"><img src="https://img.shields.io/website?url=https%3A%2F%2Fhakurei.app" alt="Website" /></a>
|
<a href="https://hakurei.app"><img src="https://img.shields.io/website?url=https%3A%2F%2Fhakurei.app" alt="Website" /></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
Hakurei is a tool for running sandboxed graphical applications as dedicated subordinate users on the Linux kernel.
|
Hakurei is a tool for running sandboxed desktop applications as dedicated
|
||||||
It implements the application container of [planterette (WIP)](https://git.gensokyo.uk/security/planterette),
|
subordinate users on the Linux kernel. It implements the application container
|
||||||
a self-contained Android-like package manager with modern security features.
|
of [planterette (WIP)](https://git.gensokyo.uk/security/planterette), a
|
||||||
|
self-contained Android-like package manager with modern security features.
|
||||||
|
|
||||||
## NixOS Module usage
|
Interaction with hakurei happens entirely through structures described by
|
||||||
|
package [hst](https://pkg.go.dev/hakurei.app/hst). No native API is available
|
||||||
|
due to internal details of uid isolation.
|
||||||
|
|
||||||
The NixOS module currently requires home-manager to configure subordinate users. Full module documentation can be found [here](options.md).
|
## Notable Packages
|
||||||
|
|
||||||
To use the module, import it into your configuration with
|
Package [container](https://pkg.go.dev/hakurei.app/container) is general purpose
|
||||||
|
container tooling. It is used by the hakurei shim process running as the target
|
||||||
|
subordinate user to set up the application container. It has a single dependency,
|
||||||
|
[libseccomp](https://github.com/seccomp/libseccomp), to create BPF programs
|
||||||
|
for the [system call filter](https://www.kernel.org/doc/html/latest/userspace-api/seccomp_filter.html).
|
||||||
|
|
||||||
```nix
|
Package [internal/pkg](https://pkg.go.dev/hakurei.app/internal/pkg) provides
|
||||||
{
|
infrastructure for hermetic builds. This replaces the legacy nix-based testing
|
||||||
inputs = {
|
framework and serves as the build system of Rosa OS, currently developed under
|
||||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
package [internal/rosa](https://pkg.go.dev/hakurei.app/internal/rosa).
|
||||||
|
|
||||||
hakurei = {
|
## Dependencies
|
||||||
url = "git+https://git.gensokyo.uk/security/hakurei";
|
|
||||||
|
|
||||||
# Optional but recommended to limit the size of your system closure.
|
`container` depends on:
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
outputs = { self, nixpkgs, hakurei, ... }:
|
- [libseccomp](https://github.com/seccomp/libseccomp) to generate BPF programs.
|
||||||
{
|
|
||||||
nixosConfigurations.hakurei = nixpkgs.lib.nixosSystem {
|
|
||||||
system = "x86_64-linux";
|
|
||||||
modules = [
|
|
||||||
hakurei.nixosModules.hakurei
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This adds the `environment.hakurei` option:
|
`cmd/hakurei` depends on:
|
||||||
|
|
||||||
```nix
|
- [acl](https://savannah.nongnu.org/projects/acl/) to export sockets to
|
||||||
{ pkgs, ... }:
|
subordinate users.
|
||||||
|
- [wayland](https://gitlab.freedesktop.org/wayland/wayland) to set up
|
||||||
|
[security-context-v1](https://wayland.app/protocols/security-context-v1).
|
||||||
|
- [xcb](https://xcb.freedesktop.org/) to grant and revoke subordinate users
|
||||||
|
access to the X server.
|
||||||
|
|
||||||
{
|
`cmd/sharefs` depends on:
|
||||||
environment.hakurei = {
|
|
||||||
enable = true;
|
|
||||||
stateDir = "/var/lib/hakurei";
|
|
||||||
users = {
|
|
||||||
alice = 0;
|
|
||||||
nixos = 10;
|
|
||||||
};
|
|
||||||
|
|
||||||
commonPaths = [
|
- [fuse](https://github.com/libfuse/libfuse) to implement the filesystem.
|
||||||
{
|
|
||||||
src = "/sdcard";
|
|
||||||
write = true;
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
extraHomeConfig = {
|
New dependencies will generally not be added. Patches adding new dependencies
|
||||||
home.stateVersion = "23.05";
|
are very likely to be rejected.
|
||||||
};
|
|
||||||
|
|
||||||
apps = {
|
## NixOS Module (deprecated)
|
||||||
"org.chromium.Chromium" = {
|
|
||||||
name = "chromium";
|
|
||||||
identity = 1;
|
|
||||||
packages = [ pkgs.chromium ];
|
|
||||||
userns = true;
|
|
||||||
mapRealUid = true;
|
|
||||||
dbus = {
|
|
||||||
system = {
|
|
||||||
filter = true;
|
|
||||||
talk = [
|
|
||||||
"org.bluez"
|
|
||||||
"org.freedesktop.Avahi"
|
|
||||||
"org.freedesktop.UPower"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
session =
|
|
||||||
f:
|
|
||||||
f {
|
|
||||||
talk = [
|
|
||||||
"org.freedesktop.FileManager1"
|
|
||||||
"org.freedesktop.Notifications"
|
|
||||||
"org.freedesktop.ScreenSaver"
|
|
||||||
"org.freedesktop.secrets"
|
|
||||||
"org.kde.kwalletd5"
|
|
||||||
"org.kde.kwalletd6"
|
|
||||||
];
|
|
||||||
own = [
|
|
||||||
"org.chromium.Chromium.*"
|
|
||||||
"org.mpris.MediaPlayer2.org.chromium.Chromium.*"
|
|
||||||
"org.mpris.MediaPlayer2.chromium.*"
|
|
||||||
];
|
|
||||||
call = { };
|
|
||||||
broadcast = { };
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
"org.claws_mail.Claws-Mail" = {
|
The NixOS module is in maintenance mode and will be removed once planterette is
|
||||||
name = "claws-mail";
|
feature-complete. Full module documentation can be found [here](options.md).
|
||||||
identity = 2;
|
|
||||||
packages = [ pkgs.claws-mail ];
|
|
||||||
gpu = false;
|
|
||||||
capability.pulse = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
"org.weechat" = {
|
|
||||||
name = "weechat";
|
|
||||||
identity = 3;
|
|
||||||
shareUid = true;
|
|
||||||
packages = [ pkgs.weechat ];
|
|
||||||
capability = {
|
|
||||||
wayland = false;
|
|
||||||
x11 = false;
|
|
||||||
dbus = true;
|
|
||||||
pulse = false;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
"dev.vencord.Vesktop" = {
|
|
||||||
name = "discord";
|
|
||||||
identity = 3;
|
|
||||||
shareUid = true;
|
|
||||||
packages = [ pkgs.vesktop ];
|
|
||||||
share = pkgs.vesktop;
|
|
||||||
command = "vesktop --ozone-platform-hint=wayland";
|
|
||||||
userns = true;
|
|
||||||
mapRealUid = true;
|
|
||||||
capability.x11 = true;
|
|
||||||
dbus = {
|
|
||||||
session =
|
|
||||||
f:
|
|
||||||
f {
|
|
||||||
talk = [ "org.kde.StatusNotifierWatcher" ];
|
|
||||||
own = [ ];
|
|
||||||
call = { };
|
|
||||||
broadcast = { };
|
|
||||||
};
|
|
||||||
system.filter = true;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
"io.looking-glass" = {
|
|
||||||
name = "looking-glass-client";
|
|
||||||
identity = 4;
|
|
||||||
useCommonPaths = false;
|
|
||||||
groups = [ "plugdev" ];
|
|
||||||
extraPaths = [
|
|
||||||
{
|
|
||||||
src = "/dev/shm/looking-glass";
|
|
||||||
write = true;
|
|
||||||
}
|
|
||||||
];
|
|
||||||
extraConfig = {
|
|
||||||
programs.looking-glass-client.enable = true;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
```
|
|
||||||
58
cmd/earlyinit/main.go
Normal file
58
cmd/earlyinit/main.go
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
. "syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
runtime.LockOSThread()
|
||||||
|
log.SetFlags(0)
|
||||||
|
log.SetPrefix("earlyinit: ")
|
||||||
|
|
||||||
|
if err := Mount(
|
||||||
|
"devtmpfs",
|
||||||
|
"/dev/",
|
||||||
|
"devtmpfs",
|
||||||
|
MS_NOSUID|MS_NOEXEC,
|
||||||
|
"",
|
||||||
|
); err != nil {
|
||||||
|
log.Fatalf("cannot mount devtmpfs: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The kernel might be unable to set up the console. When that happens,
|
||||||
|
// printk is called with "Warning: unable to open an initial console."
|
||||||
|
// and the init runs with no files. The checkfds runtime function
|
||||||
|
// populates 0-2 by opening /dev/null for them.
|
||||||
|
//
|
||||||
|
// This check replaces 1 and 2 with /dev/kmsg to improve the chance
|
||||||
|
// of output being visible to the user.
|
||||||
|
if fi, err := os.Stdout.Stat(); err == nil {
|
||||||
|
if stat, ok := fi.Sys().(*Stat_t); ok {
|
||||||
|
if stat.Rdev == 0x103 {
|
||||||
|
var fd int
|
||||||
|
if fd, err = Open(
|
||||||
|
"/dev/kmsg",
|
||||||
|
O_WRONLY|O_CLOEXEC,
|
||||||
|
0,
|
||||||
|
); err != nil {
|
||||||
|
log.Fatalf("cannot open kmsg: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = Dup3(fd, Stdout, 0); err != nil {
|
||||||
|
log.Fatalf("cannot open stdout: %v", err)
|
||||||
|
}
|
||||||
|
if err = Dup3(fd, Stderr, 0); err != nil {
|
||||||
|
log.Fatalf("cannot open stderr: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = Close(fd); err != nil {
|
||||||
|
log.Printf("cannot close kmsg: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
This program is a proof of concept and is now deprecated. It is only kept
|
|
||||||
around for API demonstration purposes and to make the most out of the test
|
|
||||||
suite.
|
|
||||||
|
|
||||||
This program is replaced by planterette, which can be found at
|
|
||||||
https://git.gensokyo.uk/security/planterette. Development effort should be
|
|
||||||
focused there instead.
|
|
||||||
173
cmd/hpkg/app.go
173
cmd/hpkg/app.go
@@ -1,173 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"hakurei.app/container/check"
|
|
||||||
"hakurei.app/container/fhs"
|
|
||||||
"hakurei.app/hst"
|
|
||||||
)
|
|
||||||
|
|
||||||
type appInfo struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Version string `json:"version"`
|
|
||||||
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
ID string `json:"id"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Identity int `json:"identity"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Groups []string `json:"groups,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Devel bool `json:"devel,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Userns bool `json:"userns,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
HostNet bool `json:"net,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
HostAbstract bool `json:"abstract,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Device bool `json:"dev,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Tty bool `json:"tty,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
MapRealUID bool `json:"map_real_uid,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
DirectWayland bool `json:"direct_wayland,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
SystemBus *hst.BusConfig `json:"system_bus,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
SessionBus *hst.BusConfig `json:"session_bus,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Enablements *hst.Enablements `json:"enablements,omitempty"`
|
|
||||||
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Multiarch bool `json:"multiarch,omitempty"`
|
|
||||||
// passed through to [hst.Config]
|
|
||||||
Bluetooth bool `json:"bluetooth,omitempty"`
|
|
||||||
|
|
||||||
// allow gpu access within sandbox
|
|
||||||
GPU bool `json:"gpu"`
|
|
||||||
// store path to nixGL mesa wrappers
|
|
||||||
Mesa string `json:"mesa,omitempty"`
|
|
||||||
// store path to nixGL source
|
|
||||||
NixGL string `json:"nix_gl,omitempty"`
|
|
||||||
// store path to activate-and-exec script
|
|
||||||
Launcher *check.Absolute `json:"launcher"`
|
|
||||||
// store path to /run/current-system
|
|
||||||
CurrentSystem *check.Absolute `json:"current_system"`
|
|
||||||
// store path to home-manager activation package
|
|
||||||
ActivationPackage string `json:"activation_package"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *appInfo) toHst(pathSet *appPathSet, pathname *check.Absolute, argv []string, flagDropShell bool) *hst.Config {
|
|
||||||
config := &hst.Config{
|
|
||||||
ID: app.ID,
|
|
||||||
|
|
||||||
Enablements: app.Enablements,
|
|
||||||
|
|
||||||
SystemBus: app.SystemBus,
|
|
||||||
SessionBus: app.SessionBus,
|
|
||||||
DirectWayland: app.DirectWayland,
|
|
||||||
|
|
||||||
Identity: app.Identity,
|
|
||||||
Groups: app.Groups,
|
|
||||||
|
|
||||||
Container: &hst.ContainerConfig{
|
|
||||||
Hostname: formatHostname(app.Name),
|
|
||||||
Filesystem: []hst.FilesystemConfigJSON{
|
|
||||||
{FilesystemConfig: &hst.FSBind{Target: fhs.AbsEtc, Source: pathSet.cacheDir.Append("etc"), Special: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: pathSet.nixPath.Append("store"), Target: pathNixStore}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: pathCurrentSystem, Linkname: app.CurrentSystem.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: pathBin, Linkname: pathSwBin.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: fhs.AbsUsrBin, Linkname: pathSwBin.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: pathSet.metaPath, Target: hst.AbsPrivateTmp.Append("app")}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsEtc.Append("resolv.conf"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("block"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("bus"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("class"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("dev"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("devices"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Target: pathDataData.Append(app.ID), Source: pathSet.homeDir, Write: true, Ensure: true}},
|
|
||||||
},
|
|
||||||
|
|
||||||
Username: "hakurei",
|
|
||||||
Shell: pathShell,
|
|
||||||
Home: pathDataData.Append(app.ID),
|
|
||||||
|
|
||||||
Path: pathname,
|
|
||||||
Args: argv,
|
|
||||||
},
|
|
||||||
ExtraPerms: []hst.ExtraPermConfig{
|
|
||||||
{Path: dataHome, Execute: true},
|
|
||||||
{Ensure: true, Path: pathSet.baseDir, Read: true, Write: true, Execute: true},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if app.Devel {
|
|
||||||
config.Container.Flags |= hst.FDevel
|
|
||||||
}
|
|
||||||
if app.Userns {
|
|
||||||
config.Container.Flags |= hst.FUserns
|
|
||||||
}
|
|
||||||
if app.HostNet {
|
|
||||||
config.Container.Flags |= hst.FHostNet
|
|
||||||
}
|
|
||||||
if app.HostAbstract {
|
|
||||||
config.Container.Flags |= hst.FHostAbstract
|
|
||||||
}
|
|
||||||
if app.Device {
|
|
||||||
config.Container.Flags |= hst.FDevice
|
|
||||||
}
|
|
||||||
if app.Tty || flagDropShell {
|
|
||||||
config.Container.Flags |= hst.FTty
|
|
||||||
}
|
|
||||||
if app.MapRealUID {
|
|
||||||
config.Container.Flags |= hst.FMapRealUID
|
|
||||||
}
|
|
||||||
if app.Multiarch {
|
|
||||||
config.Container.Flags |= hst.FMultiarch
|
|
||||||
}
|
|
||||||
config.Container.Flags |= hst.FShareRuntime | hst.FShareTmpdir
|
|
||||||
return config
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadAppInfo(name string, beforeFail func()) *appInfo {
|
|
||||||
bundle := new(appInfo)
|
|
||||||
if f, err := os.Open(name); err != nil {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatalf("cannot open bundle: %v", err)
|
|
||||||
} else if err = json.NewDecoder(f).Decode(&bundle); err != nil {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatalf("cannot parse bundle metadata: %v", err)
|
|
||||||
} else if err = f.Close(); err != nil {
|
|
||||||
log.Printf("cannot close bundle metadata: %v", err)
|
|
||||||
// not fatal
|
|
||||||
}
|
|
||||||
|
|
||||||
if bundle.ID == "" {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatal("application identifier must not be empty")
|
|
||||||
}
|
|
||||||
if bundle.Launcher == nil {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatal("launcher must not be empty")
|
|
||||||
}
|
|
||||||
if bundle.CurrentSystem == nil {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatal("current-system must not be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
return bundle
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatHostname(name string) string {
|
|
||||||
if h, err := os.Hostname(); err != nil {
|
|
||||||
log.Printf("cannot get hostname: %v", err)
|
|
||||||
return "hakurei-" + name
|
|
||||||
} else {
|
|
||||||
return h + "-" + name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,256 +0,0 @@
|
|||||||
{
|
|
||||||
nixpkgsFor,
|
|
||||||
system,
|
|
||||||
nixpkgs,
|
|
||||||
home-manager,
|
|
||||||
}:
|
|
||||||
|
|
||||||
{
|
|
||||||
lib,
|
|
||||||
stdenv,
|
|
||||||
closureInfo,
|
|
||||||
writeScript,
|
|
||||||
runtimeShell,
|
|
||||||
writeText,
|
|
||||||
symlinkJoin,
|
|
||||||
vmTools,
|
|
||||||
runCommand,
|
|
||||||
fetchFromGitHub,
|
|
||||||
|
|
||||||
zstd,
|
|
||||||
nix,
|
|
||||||
sqlite,
|
|
||||||
|
|
||||||
name ? throw "name is required",
|
|
||||||
version ? throw "version is required",
|
|
||||||
pname ? "${name}-${version}",
|
|
||||||
modules ? [ ],
|
|
||||||
nixosModules ? [ ],
|
|
||||||
script ? ''
|
|
||||||
exec "$SHELL" "$@"
|
|
||||||
'',
|
|
||||||
|
|
||||||
id ? name,
|
|
||||||
identity ? throw "identity is required",
|
|
||||||
groups ? [ ],
|
|
||||||
userns ? false,
|
|
||||||
net ? true,
|
|
||||||
dev ? false,
|
|
||||||
no_new_session ? false,
|
|
||||||
map_real_uid ? false,
|
|
||||||
direct_wayland ? false,
|
|
||||||
system_bus ? null,
|
|
||||||
session_bus ? null,
|
|
||||||
|
|
||||||
allow_wayland ? true,
|
|
||||||
allow_x11 ? false,
|
|
||||||
allow_dbus ? true,
|
|
||||||
allow_audio ? true,
|
|
||||||
gpu ? allow_wayland || allow_x11,
|
|
||||||
}:
|
|
||||||
|
|
||||||
let
|
|
||||||
inherit (lib) optionals;
|
|
||||||
|
|
||||||
homeManagerConfiguration = home-manager.lib.homeManagerConfiguration {
|
|
||||||
pkgs = nixpkgsFor.${system};
|
|
||||||
modules = modules ++ [
|
|
||||||
{
|
|
||||||
home = {
|
|
||||||
username = "hakurei";
|
|
||||||
homeDirectory = "/data/data/${id}";
|
|
||||||
stateVersion = "22.11";
|
|
||||||
};
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
launcher = writeScript "hakurei-${pname}" ''
|
|
||||||
#!${runtimeShell} -el
|
|
||||||
${script}
|
|
||||||
'';
|
|
||||||
|
|
||||||
extraNixOSConfig =
|
|
||||||
{ pkgs, ... }:
|
|
||||||
{
|
|
||||||
environment = {
|
|
||||||
etc.nixpkgs.source = nixpkgs.outPath;
|
|
||||||
systemPackages = [ pkgs.nix ];
|
|
||||||
};
|
|
||||||
|
|
||||||
imports = nixosModules;
|
|
||||||
};
|
|
||||||
nixos = nixpkgs.lib.nixosSystem {
|
|
||||||
inherit system;
|
|
||||||
modules = [
|
|
||||||
extraNixOSConfig
|
|
||||||
{ nix.settings.experimental-features = [ "flakes" ]; }
|
|
||||||
{ nix.settings.experimental-features = [ "nix-command" ]; }
|
|
||||||
{ boot.isContainer = true; }
|
|
||||||
{ system.stateVersion = "22.11"; }
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
etc = vmTools.runInLinuxVM (
|
|
||||||
runCommand "etc" { } ''
|
|
||||||
mkdir -p /etc
|
|
||||||
${nixos.config.system.build.etcActivationCommands}
|
|
||||||
|
|
||||||
# remove unused files
|
|
||||||
rm -rf /etc/sudoers
|
|
||||||
|
|
||||||
mkdir -p $out
|
|
||||||
tar -C /etc -cf "$out/etc.tar" .
|
|
||||||
''
|
|
||||||
);
|
|
||||||
|
|
||||||
extendSessionDefault = id: ext: {
|
|
||||||
filter = true;
|
|
||||||
|
|
||||||
talk = [ "org.freedesktop.Notifications" ] ++ ext.talk;
|
|
||||||
own =
|
|
||||||
(optionals (id != null) [
|
|
||||||
"${id}.*"
|
|
||||||
"org.mpris.MediaPlayer2.${id}.*"
|
|
||||||
])
|
|
||||||
++ ext.own;
|
|
||||||
|
|
||||||
inherit (ext) call broadcast;
|
|
||||||
};
|
|
||||||
|
|
||||||
nixGL = fetchFromGitHub {
|
|
||||||
owner = "nix-community";
|
|
||||||
repo = "nixGL";
|
|
||||||
rev = "310f8e49a149e4c9ea52f1adf70cdc768ec53f8a";
|
|
||||||
hash = "sha256-lnzZQYG0+EXl/6NkGpyIz+FEOc/DSEG57AP1VsdeNrM=";
|
|
||||||
};
|
|
||||||
|
|
||||||
mesaWrappers =
|
|
||||||
let
|
|
||||||
isIntelX86Platform = system == "x86_64-linux";
|
|
||||||
nixGLPackages = import (nixGL + "/default.nix") {
|
|
||||||
pkgs = nixpkgs.legacyPackages.${system};
|
|
||||||
enable32bits = isIntelX86Platform;
|
|
||||||
enableIntelX86Extensions = isIntelX86Platform;
|
|
||||||
};
|
|
||||||
in
|
|
||||||
symlinkJoin {
|
|
||||||
name = "nixGL-mesa";
|
|
||||||
paths = with nixGLPackages; [
|
|
||||||
nixGLIntel
|
|
||||||
nixVulkanIntel
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
info = builtins.toJSON {
|
|
||||||
inherit
|
|
||||||
name
|
|
||||||
version
|
|
||||||
id
|
|
||||||
identity
|
|
||||||
launcher
|
|
||||||
groups
|
|
||||||
userns
|
|
||||||
net
|
|
||||||
dev
|
|
||||||
no_new_session
|
|
||||||
map_real_uid
|
|
||||||
direct_wayland
|
|
||||||
system_bus
|
|
||||||
gpu
|
|
||||||
;
|
|
||||||
|
|
||||||
session_bus =
|
|
||||||
if session_bus != null then
|
|
||||||
(session_bus (extendSessionDefault id))
|
|
||||||
else
|
|
||||||
(extendSessionDefault id {
|
|
||||||
talk = [ ];
|
|
||||||
own = [ ];
|
|
||||||
call = { };
|
|
||||||
broadcast = { };
|
|
||||||
});
|
|
||||||
|
|
||||||
enablements = {
|
|
||||||
wayland = allow_wayland;
|
|
||||||
x11 = allow_x11;
|
|
||||||
dbus = allow_dbus;
|
|
||||||
pipewire = allow_audio;
|
|
||||||
};
|
|
||||||
|
|
||||||
mesa = if gpu then mesaWrappers else null;
|
|
||||||
nix_gl = if gpu then nixGL else null;
|
|
||||||
current_system = nixos.config.system.build.toplevel;
|
|
||||||
activation_package = homeManagerConfiguration.activationPackage;
|
|
||||||
};
|
|
||||||
in
|
|
||||||
|
|
||||||
stdenv.mkDerivation {
|
|
||||||
name = "${pname}.pkg";
|
|
||||||
inherit version;
|
|
||||||
__structuredAttrs = true;
|
|
||||||
|
|
||||||
nativeBuildInputs = [
|
|
||||||
zstd
|
|
||||||
nix
|
|
||||||
sqlite
|
|
||||||
];
|
|
||||||
|
|
||||||
buildCommand = ''
|
|
||||||
NIX_ROOT="$(mktemp -d)"
|
|
||||||
export USER="nobody"
|
|
||||||
|
|
||||||
# create bootstrap store
|
|
||||||
bootstrapClosureInfo="${
|
|
||||||
closureInfo {
|
|
||||||
rootPaths = [
|
|
||||||
nix
|
|
||||||
nixos.config.system.build.toplevel
|
|
||||||
];
|
|
||||||
}
|
|
||||||
}"
|
|
||||||
echo "copying bootstrap store paths..."
|
|
||||||
mkdir -p "$NIX_ROOT/nix/store"
|
|
||||||
xargs -n 1 -a "$bootstrapClosureInfo/store-paths" cp -at "$NIX_ROOT/nix/store/"
|
|
||||||
NIX_REMOTE="local?root=$NIX_ROOT" nix-store --load-db < "$bootstrapClosureInfo/registration"
|
|
||||||
NIX_REMOTE="local?root=$NIX_ROOT" nix-store --optimise
|
|
||||||
sqlite3 "$NIX_ROOT/nix/var/nix/db/db.sqlite" "UPDATE ValidPaths SET registrationTime = ''${SOURCE_DATE_EPOCH}"
|
|
||||||
chmod -R +r "$NIX_ROOT/nix/var"
|
|
||||||
|
|
||||||
# create binary cache
|
|
||||||
closureInfo="${
|
|
||||||
closureInfo {
|
|
||||||
rootPaths = [
|
|
||||||
homeManagerConfiguration.activationPackage
|
|
||||||
launcher
|
|
||||||
]
|
|
||||||
++ optionals gpu [
|
|
||||||
mesaWrappers
|
|
||||||
nixGL
|
|
||||||
];
|
|
||||||
}
|
|
||||||
}"
|
|
||||||
echo "copying application paths..."
|
|
||||||
TMP_STORE="$(mktemp -d)"
|
|
||||||
mkdir -p "$TMP_STORE/nix/store"
|
|
||||||
xargs -n 1 -a "$closureInfo/store-paths" cp -at "$TMP_STORE/nix/store/"
|
|
||||||
NIX_REMOTE="local?root=$TMP_STORE" nix-store --load-db < "$closureInfo/registration"
|
|
||||||
sqlite3 "$TMP_STORE/nix/var/nix/db/db.sqlite" "UPDATE ValidPaths SET registrationTime = ''${SOURCE_DATE_EPOCH}"
|
|
||||||
NIX_REMOTE="local?root=$TMP_STORE" nix --offline --extra-experimental-features nix-command \
|
|
||||||
--verbose --log-format raw-with-logs \
|
|
||||||
copy --all --no-check-sigs --to \
|
|
||||||
"file://$NIX_ROOT/res?compression=zstd&compression-level=19¶llel-compression=true"
|
|
||||||
|
|
||||||
# package /etc
|
|
||||||
mkdir -p "$NIX_ROOT/etc"
|
|
||||||
tar -C "$NIX_ROOT/etc" -xf "${etc}/etc.tar"
|
|
||||||
|
|
||||||
# write metadata
|
|
||||||
cp "${writeText "bundle.json" info}" "$NIX_ROOT/bundle.json"
|
|
||||||
|
|
||||||
# create an intermediate file to improve zstd performance
|
|
||||||
INTER="$(mktemp)"
|
|
||||||
tar -C "$NIX_ROOT" -cf "$INTER" .
|
|
||||||
zstd -T0 -19 -fo "$out" "$INTER"
|
|
||||||
'';
|
|
||||||
}
|
|
||||||
335
cmd/hpkg/main.go
335
cmd/hpkg/main.go
@@ -1,335 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"path"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"hakurei.app/command"
|
|
||||||
"hakurei.app/container/check"
|
|
||||||
"hakurei.app/container/fhs"
|
|
||||||
"hakurei.app/hst"
|
|
||||||
"hakurei.app/message"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errSuccess = errors.New("success")
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
log.SetPrefix("hpkg: ")
|
|
||||||
log.SetFlags(0)
|
|
||||||
msg := message.New(log.Default())
|
|
||||||
|
|
||||||
if err := os.Setenv("SHELL", pathShell.String()); err != nil {
|
|
||||||
log.Fatalf("cannot set $SHELL: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if os.Geteuid() == 0 {
|
|
||||||
log.Fatal("this program must not run as root")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, stop := signal.NotifyContext(context.Background(),
|
|
||||||
syscall.SIGINT, syscall.SIGTERM)
|
|
||||||
defer stop() // unreachable
|
|
||||||
|
|
||||||
var (
|
|
||||||
flagVerbose bool
|
|
||||||
flagDropShell bool
|
|
||||||
)
|
|
||||||
c := command.New(os.Stderr, log.Printf, "hpkg", func([]string) error { msg.SwapVerbose(flagVerbose); return nil }).
|
|
||||||
Flag(&flagVerbose, "v", command.BoolFlag(false), "Print debug messages to the console").
|
|
||||||
Flag(&flagDropShell, "s", command.BoolFlag(false), "Drop to a shell in place of next hakurei action")
|
|
||||||
|
|
||||||
{
|
|
||||||
var (
|
|
||||||
flagDropShellActivate bool
|
|
||||||
)
|
|
||||||
c.NewCommand("install", "Install an application from its package", func(args []string) error {
|
|
||||||
if len(args) != 1 {
|
|
||||||
log.Println("invalid argument")
|
|
||||||
return syscall.EINVAL
|
|
||||||
}
|
|
||||||
pkgPath := args[0]
|
|
||||||
if !path.IsAbs(pkgPath) {
|
|
||||||
if dir, err := os.Getwd(); err != nil {
|
|
||||||
log.Printf("cannot get current directory: %v", err)
|
|
||||||
return err
|
|
||||||
} else {
|
|
||||||
pkgPath = path.Join(dir, pkgPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Look up paths to programs started by hpkg.
|
|
||||||
This is done here to ease error handling as cleanup is not yet required.
|
|
||||||
*/
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ = lookPath("zstd")
|
|
||||||
tar = lookPath("tar")
|
|
||||||
chmod = lookPath("chmod")
|
|
||||||
rm = lookPath("rm")
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
Extract package and set up for cleanup.
|
|
||||||
*/
|
|
||||||
|
|
||||||
var workDir *check.Absolute
|
|
||||||
if p, err := os.MkdirTemp("", "hpkg.*"); err != nil {
|
|
||||||
log.Printf("cannot create temporary directory: %v", err)
|
|
||||||
return err
|
|
||||||
} else if workDir, err = check.NewAbs(p); err != nil {
|
|
||||||
log.Printf("invalid temporary directory: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
cleanup := func() {
|
|
||||||
// should be faster than a native implementation
|
|
||||||
mustRun(msg, chmod, "-R", "+w", workDir.String())
|
|
||||||
mustRun(msg, rm, "-rf", workDir.String())
|
|
||||||
}
|
|
||||||
beforeRunFail.Store(&cleanup)
|
|
||||||
|
|
||||||
mustRun(msg, tar, "-C", workDir.String(), "-xf", pkgPath)
|
|
||||||
|
|
||||||
/*
|
|
||||||
Parse bundle and app metadata, do pre-install checks.
|
|
||||||
*/
|
|
||||||
|
|
||||||
bundle := loadAppInfo(path.Join(workDir.String(), "bundle.json"), cleanup)
|
|
||||||
pathSet := pathSetByApp(bundle.ID)
|
|
||||||
|
|
||||||
a := bundle
|
|
||||||
if s, err := os.Stat(pathSet.metaPath.String()); err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
cleanup()
|
|
||||||
log.Printf("cannot access %q: %v", pathSet.metaPath, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// did not modify app, clean installation condition met later
|
|
||||||
} else if s.IsDir() {
|
|
||||||
cleanup()
|
|
||||||
log.Printf("metadata path %q is not a file", pathSet.metaPath)
|
|
||||||
return syscall.EBADMSG
|
|
||||||
} else {
|
|
||||||
a = loadAppInfo(pathSet.metaPath.String(), cleanup)
|
|
||||||
if a.ID != bundle.ID {
|
|
||||||
cleanup()
|
|
||||||
log.Printf("app %q claims to have identifier %q",
|
|
||||||
bundle.ID, a.ID)
|
|
||||||
return syscall.EBADE
|
|
||||||
}
|
|
||||||
// sec: should verify credentials
|
|
||||||
}
|
|
||||||
|
|
||||||
if a != bundle {
|
|
||||||
// do not try to re-install
|
|
||||||
if a.NixGL == bundle.NixGL &&
|
|
||||||
a.CurrentSystem == bundle.CurrentSystem &&
|
|
||||||
a.Launcher == bundle.Launcher &&
|
|
||||||
a.ActivationPackage == bundle.ActivationPackage {
|
|
||||||
cleanup()
|
|
||||||
log.Printf("package %q is identical to local application %q",
|
|
||||||
pkgPath, a.ID)
|
|
||||||
return errSuccess
|
|
||||||
}
|
|
||||||
|
|
||||||
// identity determines uid
|
|
||||||
if a.Identity != bundle.Identity {
|
|
||||||
cleanup()
|
|
||||||
log.Printf("package %q identity %d differs from installed %d",
|
|
||||||
pkgPath, bundle.Identity, a.Identity)
|
|
||||||
return syscall.EBADE
|
|
||||||
}
|
|
||||||
|
|
||||||
// sec: should compare version string
|
|
||||||
msg.Verbosef("installing application %q version %q over local %q",
|
|
||||||
bundle.ID, bundle.Version, a.Version)
|
|
||||||
} else {
|
|
||||||
msg.Verbosef("application %q clean installation", bundle.ID)
|
|
||||||
// sec: should install credentials
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Setup steps for files owned by the target user.
|
|
||||||
*/
|
|
||||||
|
|
||||||
withCacheDir(ctx, msg, "install", []string{
|
|
||||||
// export inner bundle path in the environment
|
|
||||||
"export BUNDLE=" + hst.PrivateTmp + "/bundle",
|
|
||||||
// replace inner /etc
|
|
||||||
"mkdir -p etc",
|
|
||||||
"chmod -R +w etc",
|
|
||||||
"rm -rf etc",
|
|
||||||
"cp -dRf $BUNDLE/etc etc",
|
|
||||||
// replace inner /nix
|
|
||||||
"mkdir -p nix",
|
|
||||||
"chmod -R +w nix",
|
|
||||||
"rm -rf nix",
|
|
||||||
"cp -dRf /nix nix",
|
|
||||||
// copy from binary cache
|
|
||||||
"nix copy --offline --no-check-sigs --all --from file://$BUNDLE/res --to $PWD",
|
|
||||||
// deduplicate nix store
|
|
||||||
"nix store --offline --store $PWD optimise",
|
|
||||||
// make cache directory world-readable for autoetc
|
|
||||||
"chmod 0755 .",
|
|
||||||
}, workDir, bundle, pathSet, flagDropShell, cleanup)
|
|
||||||
|
|
||||||
if bundle.GPU {
|
|
||||||
withCacheDir(ctx, msg, "mesa-wrappers", []string{
|
|
||||||
// link nixGL mesa wrappers
|
|
||||||
"mkdir -p nix/.nixGL",
|
|
||||||
"ln -s " + bundle.Mesa + "/bin/nixGLIntel nix/.nixGL/nixGL",
|
|
||||||
"ln -s " + bundle.Mesa + "/bin/nixVulkanIntel nix/.nixGL/nixVulkan",
|
|
||||||
}, workDir, bundle, pathSet, false, cleanup)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Activate home-manager generation.
|
|
||||||
*/
|
|
||||||
|
|
||||||
withNixDaemon(ctx, msg, "activate", []string{
|
|
||||||
// clean up broken links
|
|
||||||
"mkdir -p .local/state/{nix,home-manager}",
|
|
||||||
"chmod -R +w .local/state/{nix,home-manager}",
|
|
||||||
"rm -rf .local/state/{nix,home-manager}",
|
|
||||||
// run activation script
|
|
||||||
bundle.ActivationPackage + "/activate",
|
|
||||||
}, false, func(config *hst.Config) *hst.Config { return config },
|
|
||||||
bundle, pathSet, flagDropShellActivate, cleanup)
|
|
||||||
|
|
||||||
/*
|
|
||||||
Installation complete. Write metadata to block re-installs or downgrades.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// serialise metadata to ensure consistency
|
|
||||||
if f, err := os.OpenFile(pathSet.metaPath.String()+"~", os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644); err != nil {
|
|
||||||
cleanup()
|
|
||||||
log.Printf("cannot create metadata file: %v", err)
|
|
||||||
return err
|
|
||||||
} else if err = json.NewEncoder(f).Encode(bundle); err != nil {
|
|
||||||
cleanup()
|
|
||||||
log.Printf("cannot write metadata: %v", err)
|
|
||||||
return err
|
|
||||||
} else if err = f.Close(); err != nil {
|
|
||||||
log.Printf("cannot close metadata file: %v", err)
|
|
||||||
// not fatal
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Rename(pathSet.metaPath.String()+"~", pathSet.metaPath.String()); err != nil {
|
|
||||||
cleanup()
|
|
||||||
log.Printf("cannot rename metadata file: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cleanup()
|
|
||||||
return errSuccess
|
|
||||||
}).
|
|
||||||
Flag(&flagDropShellActivate, "s", command.BoolFlag(false), "Drop to a shell on activation")
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
var (
|
|
||||||
flagDropShellNixGL bool
|
|
||||||
flagAutoDrivers bool
|
|
||||||
)
|
|
||||||
c.NewCommand("start", "Start an application", func(args []string) error {
|
|
||||||
if len(args) < 1 {
|
|
||||||
log.Println("invalid argument")
|
|
||||||
return syscall.EINVAL
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Parse app metadata.
|
|
||||||
*/
|
|
||||||
|
|
||||||
id := args[0]
|
|
||||||
pathSet := pathSetByApp(id)
|
|
||||||
a := loadAppInfo(pathSet.metaPath.String(), func() {})
|
|
||||||
if a.ID != id {
|
|
||||||
log.Printf("app %q claims to have identifier %q", id, a.ID)
|
|
||||||
return syscall.EBADE
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Prepare nixGL.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if a.GPU && flagAutoDrivers {
|
|
||||||
withNixDaemon(ctx, msg, "nix-gl", []string{
|
|
||||||
"mkdir -p /nix/.nixGL/auto",
|
|
||||||
"rm -rf /nix/.nixGL/auto",
|
|
||||||
"export NIXPKGS_ALLOW_UNFREE=1",
|
|
||||||
"nix build --impure " +
|
|
||||||
"--out-link /nix/.nixGL/auto/opengl " +
|
|
||||||
"--override-input nixpkgs path:/etc/nixpkgs " +
|
|
||||||
"path:" + a.NixGL,
|
|
||||||
"nix build --impure " +
|
|
||||||
"--out-link /nix/.nixGL/auto/vulkan " +
|
|
||||||
"--override-input nixpkgs path:/etc/nixpkgs " +
|
|
||||||
"path:" + a.NixGL + "#nixVulkanNvidia",
|
|
||||||
}, true, func(config *hst.Config) *hst.Config {
|
|
||||||
config.Container.Filesystem = append(config.Container.Filesystem, []hst.FilesystemConfigJSON{
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsEtc.Append("resolv.conf"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("block"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("bus"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("class"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("dev"), Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("devices"), Optional: true}},
|
|
||||||
}...)
|
|
||||||
appendGPUFilesystem(config)
|
|
||||||
return config
|
|
||||||
}, a, pathSet, flagDropShellNixGL, func() {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Create app configuration.
|
|
||||||
*/
|
|
||||||
|
|
||||||
pathname := a.Launcher
|
|
||||||
argv := make([]string, 1, len(args))
|
|
||||||
if flagDropShell {
|
|
||||||
pathname = pathShell
|
|
||||||
argv[0] = bash
|
|
||||||
} else {
|
|
||||||
argv[0] = a.Launcher.String()
|
|
||||||
}
|
|
||||||
argv = append(argv, args[1:]...)
|
|
||||||
config := a.toHst(pathSet, pathname, argv, flagDropShell)
|
|
||||||
|
|
||||||
/*
|
|
||||||
Expose GPU devices.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if a.GPU {
|
|
||||||
config.Container.Filesystem = append(config.Container.Filesystem,
|
|
||||||
hst.FilesystemConfigJSON{FilesystemConfig: &hst.FSBind{Source: pathSet.nixPath.Append(".nixGL"), Target: hst.AbsPrivateTmp.Append("nixGL")}})
|
|
||||||
appendGPUFilesystem(config)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Spawn app.
|
|
||||||
*/
|
|
||||||
|
|
||||||
mustRunApp(ctx, msg, config, func() {})
|
|
||||||
return errSuccess
|
|
||||||
}).
|
|
||||||
Flag(&flagDropShellNixGL, "s", command.BoolFlag(false), "Drop to a shell on nixGL build").
|
|
||||||
Flag(&flagAutoDrivers, "auto-drivers", command.BoolFlag(false), "Attempt automatic opengl driver detection")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.MustParse(os.Args[1:], func(err error) {
|
|
||||||
msg.Verbosef("command returned %v", err)
|
|
||||||
if errors.Is(err, errSuccess) {
|
|
||||||
msg.BeforeExit()
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
log.Fatal("unreachable")
|
|
||||||
}
|
|
||||||
@@ -1,117 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"strconv"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"hakurei.app/container/check"
|
|
||||||
"hakurei.app/container/fhs"
|
|
||||||
"hakurei.app/hst"
|
|
||||||
"hakurei.app/message"
|
|
||||||
)
|
|
||||||
|
|
||||||
const bash = "bash"
|
|
||||||
|
|
||||||
var (
|
|
||||||
dataHome *check.Absolute
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// dataHome
|
|
||||||
if a, err := check.NewAbs(os.Getenv("HAKUREI_DATA_HOME")); err == nil {
|
|
||||||
dataHome = a
|
|
||||||
} else {
|
|
||||||
dataHome = fhs.AbsVarLib.Append("hakurei/" + strconv.Itoa(os.Getuid()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
pathBin = fhs.AbsRoot.Append("bin")
|
|
||||||
|
|
||||||
pathNix = check.MustAbs("/nix/")
|
|
||||||
pathNixStore = pathNix.Append("store/")
|
|
||||||
pathCurrentSystem = fhs.AbsRun.Append("current-system")
|
|
||||||
pathSwBin = pathCurrentSystem.Append("sw/bin/")
|
|
||||||
pathShell = pathSwBin.Append(bash)
|
|
||||||
|
|
||||||
pathData = check.MustAbs("/data")
|
|
||||||
pathDataData = pathData.Append("data")
|
|
||||||
)
|
|
||||||
|
|
||||||
func lookPath(file string) string {
|
|
||||||
if p, err := exec.LookPath(file); err != nil {
|
|
||||||
log.Fatalf("%s: command not found", file)
|
|
||||||
return ""
|
|
||||||
} else {
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var beforeRunFail = new(atomic.Pointer[func()])
|
|
||||||
|
|
||||||
func mustRun(msg message.Msg, name string, arg ...string) {
|
|
||||||
msg.Verbosef("spawning process: %q %q", name, arg)
|
|
||||||
cmd := exec.Command(name, arg...)
|
|
||||||
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
if f := beforeRunFail.Swap(nil); f != nil {
|
|
||||||
(*f)()
|
|
||||||
}
|
|
||||||
log.Fatalf("%s: %v", name, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type appPathSet struct {
|
|
||||||
// ${dataHome}/${id}
|
|
||||||
baseDir *check.Absolute
|
|
||||||
// ${baseDir}/app
|
|
||||||
metaPath *check.Absolute
|
|
||||||
// ${baseDir}/files
|
|
||||||
homeDir *check.Absolute
|
|
||||||
// ${baseDir}/cache
|
|
||||||
cacheDir *check.Absolute
|
|
||||||
// ${baseDir}/cache/nix
|
|
||||||
nixPath *check.Absolute
|
|
||||||
}
|
|
||||||
|
|
||||||
func pathSetByApp(id string) *appPathSet {
|
|
||||||
pathSet := new(appPathSet)
|
|
||||||
pathSet.baseDir = dataHome.Append(id)
|
|
||||||
pathSet.metaPath = pathSet.baseDir.Append("app")
|
|
||||||
pathSet.homeDir = pathSet.baseDir.Append("files")
|
|
||||||
pathSet.cacheDir = pathSet.baseDir.Append("cache")
|
|
||||||
pathSet.nixPath = pathSet.cacheDir.Append("nix")
|
|
||||||
return pathSet
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendGPUFilesystem(config *hst.Config) {
|
|
||||||
config.Container.Filesystem = append(config.Container.Filesystem, []hst.FilesystemConfigJSON{
|
|
||||||
// flatpak commit 763a686d874dd668f0236f911de00b80766ffe79
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("dri"), Device: true, Optional: true}},
|
|
||||||
// mali
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("mali"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("mali0"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("umplock"), Device: true, Optional: true}},
|
|
||||||
// nvidia
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidiactl"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia-modeset"), Device: true, Optional: true}},
|
|
||||||
// nvidia OpenCL/CUDA
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia-uvm"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia-uvm-tools"), Device: true, Optional: true}},
|
|
||||||
|
|
||||||
// flatpak commit d2dff2875bb3b7e2cd92d8204088d743fd07f3ff
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia0"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia1"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia2"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia3"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia4"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia5"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia6"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia7"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia8"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia9"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia10"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia11"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia12"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia13"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia14"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia15"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia16"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia17"), Device: true, Optional: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia18"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia19"), Device: true, Optional: true}},
|
|
||||||
}...)
|
|
||||||
}
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
|
|
||||||
"hakurei.app/hst"
|
|
||||||
"hakurei.app/internal/info"
|
|
||||||
"hakurei.app/message"
|
|
||||||
)
|
|
||||||
|
|
||||||
var hakureiPathVal = info.MustHakureiPath().String()
|
|
||||||
|
|
||||||
func mustRunApp(ctx context.Context, msg message.Msg, config *hst.Config, beforeFail func()) {
|
|
||||||
var (
|
|
||||||
cmd *exec.Cmd
|
|
||||||
st io.WriteCloser
|
|
||||||
)
|
|
||||||
|
|
||||||
if r, w, err := os.Pipe(); err != nil {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatalf("cannot pipe: %v", err)
|
|
||||||
} else {
|
|
||||||
if msg.IsVerbose() {
|
|
||||||
cmd = exec.CommandContext(ctx, hakureiPathVal, "-v", "app", "3")
|
|
||||||
} else {
|
|
||||||
cmd = exec.CommandContext(ctx, hakureiPathVal, "app", "3")
|
|
||||||
}
|
|
||||||
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
|
|
||||||
cmd.ExtraFiles = []*os.File{r}
|
|
||||||
st = w
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
if err := json.NewEncoder(st).Encode(config); err != nil {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatalf("cannot send configuration: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err := cmd.Start(); err != nil {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatalf("cannot start hakurei: %v", err)
|
|
||||||
}
|
|
||||||
if err := cmd.Wait(); err != nil {
|
|
||||||
var exitError *exec.ExitError
|
|
||||||
if errors.As(err, &exitError) {
|
|
||||||
beforeFail()
|
|
||||||
msg.BeforeExit()
|
|
||||||
os.Exit(exitError.ExitCode())
|
|
||||||
} else {
|
|
||||||
beforeFail()
|
|
||||||
log.Fatalf("cannot wait: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,62 +0,0 @@
|
|||||||
{ pkgs, ... }:
|
|
||||||
{
|
|
||||||
users.users = {
|
|
||||||
alice = {
|
|
||||||
isNormalUser = true;
|
|
||||||
description = "Alice Foobar";
|
|
||||||
password = "foobar";
|
|
||||||
uid = 1000;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
home-manager.users.alice.home.stateVersion = "24.11";
|
|
||||||
|
|
||||||
# Automatically login on tty1 as a normal user:
|
|
||||||
services.getty.autologinUser = "alice";
|
|
||||||
|
|
||||||
environment = {
|
|
||||||
variables = {
|
|
||||||
SWAYSOCK = "/tmp/sway-ipc.sock";
|
|
||||||
WLR_RENDERER = "pixman";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# Automatically configure and start Sway when logging in on tty1:
|
|
||||||
programs.bash.loginShellInit = ''
|
|
||||||
if [ "$(tty)" = "/dev/tty1" ]; then
|
|
||||||
set -e
|
|
||||||
|
|
||||||
mkdir -p ~/.config/sway
|
|
||||||
(sed s/Mod4/Mod1/ /etc/sway/config &&
|
|
||||||
echo 'output * bg ${pkgs.nixos-artwork.wallpapers.simple-light-gray.gnomeFilePath} fill' &&
|
|
||||||
echo 'output Virtual-1 res 1680x1050') > ~/.config/sway/config
|
|
||||||
|
|
||||||
sway --validate
|
|
||||||
systemd-cat --identifier=session sway && touch /tmp/sway-exit-ok
|
|
||||||
fi
|
|
||||||
'';
|
|
||||||
|
|
||||||
programs.sway.enable = true;
|
|
||||||
|
|
||||||
virtualisation = {
|
|
||||||
diskSize = 6 * 1024;
|
|
||||||
|
|
||||||
qemu.options = [
|
|
||||||
# Need to switch to a different GPU driver than the default one (-vga std) so that Sway can launch:
|
|
||||||
"-vga none -device virtio-gpu-pci"
|
|
||||||
|
|
||||||
# Increase zstd performance:
|
|
||||||
"-smp 8"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
environment.hakurei = {
|
|
||||||
enable = true;
|
|
||||||
stateDir = "/var/lib/hakurei";
|
|
||||||
users.alice = 0;
|
|
||||||
|
|
||||||
extraHomeConfig = {
|
|
||||||
home.stateVersion = "23.05";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
{
|
|
||||||
testers,
|
|
||||||
callPackage,
|
|
||||||
|
|
||||||
system,
|
|
||||||
self,
|
|
||||||
}:
|
|
||||||
let
|
|
||||||
buildPackage = self.buildPackage.${system};
|
|
||||||
in
|
|
||||||
testers.nixosTest {
|
|
||||||
name = "hpkg";
|
|
||||||
nodes.machine = {
|
|
||||||
environment.etc = {
|
|
||||||
"foot.pkg".source = callPackage ./foot.nix { inherit buildPackage; };
|
|
||||||
};
|
|
||||||
|
|
||||||
imports = [
|
|
||||||
./configuration.nix
|
|
||||||
|
|
||||||
self.nixosModules.hakurei
|
|
||||||
self.inputs.home-manager.nixosModules.home-manager
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
# adapted from nixos sway integration tests
|
|
||||||
|
|
||||||
# testScriptWithTypes:49: error: Cannot call function of unknown type
|
|
||||||
# (machine.succeed if succeed else machine.execute)(
|
|
||||||
# ^
|
|
||||||
# Found 1 error in 1 file (checked 1 source file)
|
|
||||||
skipTypeCheck = true;
|
|
||||||
testScript = builtins.readFile ./test.py;
|
|
||||||
}
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
{
|
|
||||||
lib,
|
|
||||||
buildPackage,
|
|
||||||
foot,
|
|
||||||
wayland-utils,
|
|
||||||
inconsolata,
|
|
||||||
}:
|
|
||||||
|
|
||||||
buildPackage {
|
|
||||||
name = "foot";
|
|
||||||
inherit (foot) version;
|
|
||||||
|
|
||||||
identity = 2;
|
|
||||||
id = "org.codeberg.dnkl.foot";
|
|
||||||
|
|
||||||
modules = [
|
|
||||||
{
|
|
||||||
home.packages = [
|
|
||||||
foot
|
|
||||||
|
|
||||||
# For wayland-info:
|
|
||||||
wayland-utils
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
nixosModules = [
|
|
||||||
{
|
|
||||||
# To help with OCR:
|
|
||||||
environment.etc."xdg/foot/foot.ini".text = lib.generators.toINI { } {
|
|
||||||
main = {
|
|
||||||
font = "inconsolata:size=14";
|
|
||||||
};
|
|
||||||
colors = rec {
|
|
||||||
foreground = "000000";
|
|
||||||
background = "ffffff";
|
|
||||||
regular2 = foreground;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
fonts.packages = [ inconsolata ];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
script = ''
|
|
||||||
exec foot "$@"
|
|
||||||
'';
|
|
||||||
}
|
|
||||||
@@ -1,110 +0,0 @@
|
|||||||
import json
|
|
||||||
import shlex
|
|
||||||
|
|
||||||
q = shlex.quote
|
|
||||||
NODE_GROUPS = ["nodes", "floating_nodes"]
|
|
||||||
|
|
||||||
|
|
||||||
def swaymsg(command: str = "", succeed=True, type="command"):
|
|
||||||
assert command != "" or type != "command", "Must specify command or type"
|
|
||||||
shell = q(f"swaymsg -t {q(type)} -- {q(command)}")
|
|
||||||
with machine.nested(
|
|
||||||
f"sending swaymsg {shell!r}" + " (allowed to fail)" * (not succeed)
|
|
||||||
):
|
|
||||||
ret = (machine.succeed if succeed else machine.execute)(
|
|
||||||
f"su - alice -c {shell}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# execute also returns a status code, but disregard.
|
|
||||||
if not succeed:
|
|
||||||
_, ret = ret
|
|
||||||
|
|
||||||
if not succeed and not ret:
|
|
||||||
return None
|
|
||||||
|
|
||||||
parsed = json.loads(ret)
|
|
||||||
return parsed
|
|
||||||
|
|
||||||
|
|
||||||
def walk(tree):
|
|
||||||
yield tree
|
|
||||||
for group in NODE_GROUPS:
|
|
||||||
for node in tree.get(group, []):
|
|
||||||
yield from walk(node)
|
|
||||||
|
|
||||||
|
|
||||||
def wait_for_window(pattern):
|
|
||||||
def func(last_chance):
|
|
||||||
nodes = (node["name"] for node in walk(swaymsg(type="get_tree")))
|
|
||||||
|
|
||||||
if last_chance:
|
|
||||||
nodes = list(nodes)
|
|
||||||
machine.log(f"Last call! Current list of windows: {nodes}")
|
|
||||||
|
|
||||||
return any(pattern in name for name in nodes)
|
|
||||||
|
|
||||||
retry(func)
|
|
||||||
|
|
||||||
|
|
||||||
def collect_state_ui(name):
|
|
||||||
swaymsg(f"exec hakurei ps > '/tmp/{name}.ps'")
|
|
||||||
machine.copy_from_vm(f"/tmp/{name}.ps", "")
|
|
||||||
swaymsg(f"exec hakurei --json ps > '/tmp/{name}.json'")
|
|
||||||
machine.copy_from_vm(f"/tmp/{name}.json", "")
|
|
||||||
machine.screenshot(name)
|
|
||||||
|
|
||||||
|
|
||||||
def check_state(name, enablements):
|
|
||||||
instances = json.loads(machine.succeed("sudo -u alice -i XDG_RUNTIME_DIR=/run/user/1000 hakurei --json ps"))
|
|
||||||
if len(instances) != 1:
|
|
||||||
raise Exception(f"unexpected state length {len(instances)}")
|
|
||||||
instance = instances[0]
|
|
||||||
|
|
||||||
if len(instance['container']['args']) != 1 or not (instance['container']['args'][0].startswith("/nix/store/")) or f"hakurei-{name}-" not in (instance['container']['args'][0]):
|
|
||||||
raise Exception(f"unexpected args {instance['container']['args']}")
|
|
||||||
|
|
||||||
if instance['enablements'] != enablements:
|
|
||||||
raise Exception(f"unexpected enablements {instance['enablements']}")
|
|
||||||
|
|
||||||
|
|
||||||
start_all()
|
|
||||||
machine.wait_for_unit("multi-user.target")
|
|
||||||
|
|
||||||
# To check hakurei's version:
|
|
||||||
print(machine.succeed("sudo -u alice -i hakurei version"))
|
|
||||||
|
|
||||||
# Wait for Sway to complete startup:
|
|
||||||
machine.wait_for_file("/run/user/1000/wayland-1")
|
|
||||||
machine.wait_for_file("/tmp/sway-ipc.sock")
|
|
||||||
|
|
||||||
# Prepare hpkg directory:
|
|
||||||
machine.succeed("install -dm 0700 -o alice -g users /var/lib/hakurei/1000")
|
|
||||||
|
|
||||||
# Install hpkg app:
|
|
||||||
swaymsg("exec hpkg -v install /etc/foot.pkg && touch /tmp/hpkg-install-ok")
|
|
||||||
machine.wait_for_file("/tmp/hpkg-install-ok")
|
|
||||||
|
|
||||||
# Start app (foot) with Wayland enablement:
|
|
||||||
swaymsg("exec hpkg -v start org.codeberg.dnkl.foot")
|
|
||||||
wait_for_window("hakurei@machine-foot")
|
|
||||||
machine.send_chars("clear; wayland-info && touch /tmp/success-client\n")
|
|
||||||
machine.wait_for_file("/tmp/hakurei.0/tmpdir/2/success-client")
|
|
||||||
collect_state_ui("app_wayland")
|
|
||||||
check_state("foot", {"wayland": True, "dbus": True, "pipewire": True})
|
|
||||||
# Verify acl on XDG_RUNTIME_DIR:
|
|
||||||
print(machine.succeed("getfacl --absolute-names --omit-header --numeric /tmp/hakurei.0/runtime | grep 10002"))
|
|
||||||
machine.send_chars("exit\n")
|
|
||||||
machine.wait_until_fails("pgrep foot")
|
|
||||||
# Verify acl cleanup on XDG_RUNTIME_DIR:
|
|
||||||
machine.wait_until_fails("getfacl --absolute-names --omit-header --numeric /tmp/hakurei.0/runtime | grep 10002")
|
|
||||||
|
|
||||||
# Exit Sway and verify process exit status 0:
|
|
||||||
swaymsg("exit", succeed=False)
|
|
||||||
machine.wait_for_file("/tmp/sway-exit-ok")
|
|
||||||
|
|
||||||
# Print hakurei share and rundir contents:
|
|
||||||
print(machine.succeed("find /tmp/hakurei.0 "
|
|
||||||
+ "-path '/tmp/hakurei.0/runtime/*/*' -prune -o "
|
|
||||||
+ "-path '/tmp/hakurei.0/tmpdir/*/*' -prune -o "
|
|
||||||
+ "-print"))
|
|
||||||
print(machine.fail("ls /run/user/1000/hakurei"))
|
|
||||||
130
cmd/hpkg/with.go
130
cmd/hpkg/with.go
@@ -1,130 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"hakurei.app/container/check"
|
|
||||||
"hakurei.app/container/fhs"
|
|
||||||
"hakurei.app/hst"
|
|
||||||
"hakurei.app/message"
|
|
||||||
)
|
|
||||||
|
|
||||||
func withNixDaemon(
|
|
||||||
ctx context.Context,
|
|
||||||
msg message.Msg,
|
|
||||||
action string, command []string, net bool, updateConfig func(config *hst.Config) *hst.Config,
|
|
||||||
app *appInfo, pathSet *appPathSet, dropShell bool, beforeFail func(),
|
|
||||||
) {
|
|
||||||
flags := hst.FMultiarch | hst.FUserns // nix sandbox requires userns
|
|
||||||
if net {
|
|
||||||
flags |= hst.FHostNet
|
|
||||||
}
|
|
||||||
if dropShell {
|
|
||||||
flags |= hst.FTty
|
|
||||||
}
|
|
||||||
|
|
||||||
mustRunAppDropShell(ctx, msg, updateConfig(&hst.Config{
|
|
||||||
ID: app.ID,
|
|
||||||
|
|
||||||
ExtraPerms: []hst.ExtraPermConfig{
|
|
||||||
{Path: dataHome, Execute: true},
|
|
||||||
{Ensure: true, Path: pathSet.baseDir, Read: true, Write: true, Execute: true},
|
|
||||||
},
|
|
||||||
|
|
||||||
Identity: app.Identity,
|
|
||||||
|
|
||||||
Container: &hst.ContainerConfig{
|
|
||||||
Hostname: formatHostname(app.Name) + "-" + action,
|
|
||||||
|
|
||||||
Filesystem: []hst.FilesystemConfigJSON{
|
|
||||||
{FilesystemConfig: &hst.FSBind{Target: fhs.AbsEtc, Source: pathSet.cacheDir.Append("etc"), Special: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: pathSet.nixPath, Target: pathNix, Write: true}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: pathCurrentSystem, Linkname: app.CurrentSystem.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: pathBin, Linkname: pathSwBin.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: fhs.AbsUsrBin, Linkname: pathSwBin.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Target: pathDataData.Append(app.ID), Source: pathSet.homeDir, Write: true, Ensure: true}},
|
|
||||||
},
|
|
||||||
|
|
||||||
Username: "hakurei",
|
|
||||||
Shell: pathShell,
|
|
||||||
Home: pathDataData.Append(app.ID),
|
|
||||||
|
|
||||||
Path: pathShell,
|
|
||||||
Args: []string{bash, "-lc", "rm -f /nix/var/nix/daemon-socket/socket && " +
|
|
||||||
// start nix-daemon
|
|
||||||
"nix-daemon --store / & " +
|
|
||||||
// wait for socket to appear
|
|
||||||
"(while [ ! -S /nix/var/nix/daemon-socket/socket ]; do sleep 0.01; done) && " +
|
|
||||||
// create directory so nix stops complaining
|
|
||||||
"mkdir -p /nix/var/nix/profiles/per-user/root/channels && " +
|
|
||||||
strings.Join(command, " && ") +
|
|
||||||
// terminate nix-daemon
|
|
||||||
" && pkill nix-daemon",
|
|
||||||
},
|
|
||||||
|
|
||||||
Flags: flags,
|
|
||||||
},
|
|
||||||
}), dropShell, beforeFail)
|
|
||||||
}
|
|
||||||
|
|
||||||
func withCacheDir(
|
|
||||||
ctx context.Context,
|
|
||||||
msg message.Msg,
|
|
||||||
action string, command []string, workDir *check.Absolute,
|
|
||||||
app *appInfo, pathSet *appPathSet, dropShell bool, beforeFail func(),
|
|
||||||
) {
|
|
||||||
flags := hst.FMultiarch
|
|
||||||
if dropShell {
|
|
||||||
flags |= hst.FTty
|
|
||||||
}
|
|
||||||
|
|
||||||
mustRunAppDropShell(ctx, msg, &hst.Config{
|
|
||||||
ID: app.ID,
|
|
||||||
|
|
||||||
ExtraPerms: []hst.ExtraPermConfig{
|
|
||||||
{Path: dataHome, Execute: true},
|
|
||||||
{Ensure: true, Path: pathSet.baseDir, Read: true, Write: true, Execute: true},
|
|
||||||
{Path: workDir, Execute: true},
|
|
||||||
},
|
|
||||||
|
|
||||||
Identity: app.Identity,
|
|
||||||
|
|
||||||
Container: &hst.ContainerConfig{
|
|
||||||
Hostname: formatHostname(app.Name) + "-" + action,
|
|
||||||
|
|
||||||
Filesystem: []hst.FilesystemConfigJSON{
|
|
||||||
{FilesystemConfig: &hst.FSBind{Target: fhs.AbsEtc, Source: workDir.Append(fhs.Etc), Special: true}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: workDir.Append("nix"), Target: pathNix}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: pathCurrentSystem, Linkname: app.CurrentSystem.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: pathBin, Linkname: pathSwBin.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSLink{Target: fhs.AbsUsrBin, Linkname: pathSwBin.String()}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Source: workDir, Target: hst.AbsPrivateTmp.Append("bundle")}},
|
|
||||||
{FilesystemConfig: &hst.FSBind{Target: pathDataData.Append(app.ID, "cache"), Source: pathSet.cacheDir, Write: true, Ensure: true}},
|
|
||||||
},
|
|
||||||
|
|
||||||
Username: "nixos",
|
|
||||||
Shell: pathShell,
|
|
||||||
Home: pathDataData.Append(app.ID, "cache"),
|
|
||||||
|
|
||||||
Path: pathShell,
|
|
||||||
Args: []string{bash, "-lc", strings.Join(command, " && ")},
|
|
||||||
|
|
||||||
Flags: flags,
|
|
||||||
},
|
|
||||||
}, dropShell, beforeFail)
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustRunAppDropShell(ctx context.Context, msg message.Msg, config *hst.Config, dropShell bool, beforeFail func()) {
|
|
||||||
if dropShell {
|
|
||||||
if config.Container != nil {
|
|
||||||
config.Container.Args = []string{bash, "-l"}
|
|
||||||
}
|
|
||||||
mustRunApp(ctx, msg, config, beforeFail)
|
|
||||||
beforeFail()
|
|
||||||
msg.BeforeExit()
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
mustRunApp(ctx, msg, config, beforeFail)
|
|
||||||
}
|
|
||||||
352
cmd/mbf/main.go
352
cmd/mbf/main.go
@@ -4,17 +4,22 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
"unique"
|
"unique"
|
||||||
|
|
||||||
"hakurei.app/command"
|
"hakurei.app/command"
|
||||||
"hakurei.app/container"
|
"hakurei.app/container"
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
|
"hakurei.app/container/seccomp"
|
||||||
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/internal/pkg"
|
"hakurei.app/internal/pkg"
|
||||||
"hakurei.app/internal/rosa"
|
"hakurei.app/internal/rosa"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
@@ -51,10 +56,16 @@ func main() {
|
|||||||
flagCures int
|
flagCures int
|
||||||
flagBase string
|
flagBase string
|
||||||
flagTShift int
|
flagTShift int
|
||||||
|
flagIdle bool
|
||||||
)
|
)
|
||||||
c := command.New(os.Stderr, log.Printf, "mbf", func([]string) (err error) {
|
c := command.New(os.Stderr, log.Printf, "mbf", func([]string) (err error) {
|
||||||
msg.SwapVerbose(!flagQuiet)
|
msg.SwapVerbose(!flagQuiet)
|
||||||
|
|
||||||
|
flagBase = os.ExpandEnv(flagBase)
|
||||||
|
if flagBase == "" {
|
||||||
|
flagBase = "cache"
|
||||||
|
}
|
||||||
|
|
||||||
var base *check.Absolute
|
var base *check.Absolute
|
||||||
if flagBase, err = filepath.Abs(flagBase); err != nil {
|
if flagBase, err = filepath.Abs(flagBase); err != nil {
|
||||||
return
|
return
|
||||||
@@ -70,6 +81,11 @@ func main() {
|
|||||||
cache.SetThreshold(1 << flagTShift)
|
cache.SetThreshold(1 << flagTShift)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if flagIdle {
|
||||||
|
pkg.SchedPolicy = container.SCHED_IDLE
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}).Flag(
|
}).Flag(
|
||||||
&flagQuiet,
|
&flagQuiet,
|
||||||
@@ -81,12 +97,16 @@ func main() {
|
|||||||
"Maximum number of dependencies to cure at any given time",
|
"Maximum number of dependencies to cure at any given time",
|
||||||
).Flag(
|
).Flag(
|
||||||
&flagBase,
|
&flagBase,
|
||||||
"d", command.StringFlag("cache"),
|
"d", command.StringFlag("$MBF_CACHE_DIR"),
|
||||||
"Directory to store cured artifacts",
|
"Directory to store cured artifacts",
|
||||||
).Flag(
|
).Flag(
|
||||||
&flagTShift,
|
&flagTShift,
|
||||||
"tshift", command.IntFlag(-1),
|
"tshift", command.IntFlag(-1),
|
||||||
"Dependency graph size exponent, to the power of 2",
|
"Dependency graph size exponent, to the power of 2",
|
||||||
|
).Flag(
|
||||||
|
&flagIdle,
|
||||||
|
"sched-idle", command.BoolFlag(false),
|
||||||
|
"Set SCHED_IDLE scheduling policy",
|
||||||
)
|
)
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -109,17 +129,43 @@ func main() {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
var (
|
||||||
|
flagGentoo string
|
||||||
|
flagChecksum string
|
||||||
|
|
||||||
|
flagStage0 bool
|
||||||
|
)
|
||||||
c.NewCommand(
|
c.NewCommand(
|
||||||
"stage3",
|
"stage3",
|
||||||
"Check for toolchain 3-stage non-determinism",
|
"Check for toolchain 3-stage non-determinism",
|
||||||
func(args []string) (err error) {
|
func(args []string) (err error) {
|
||||||
_, _, _, stage2 := (rosa.Std - 1).NewLLVM()
|
t := rosa.Std
|
||||||
_, _, _, stage3 := rosa.Std.NewLLVM()
|
if flagGentoo != "" {
|
||||||
|
t -= 3 // magic number to discourage misuse
|
||||||
|
|
||||||
|
var checksum pkg.Checksum
|
||||||
|
if len(flagChecksum) != 0 {
|
||||||
|
if err = pkg.Decode(&checksum, flagChecksum); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rosa.SetGentooStage3(flagGentoo, checksum)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, _, stage1 := (t - 2).NewLLVM()
|
||||||
|
_, _, _, stage2 := (t - 1).NewLLVM()
|
||||||
|
_, _, _, stage3 := t.NewLLVM()
|
||||||
var (
|
var (
|
||||||
pathname *check.Absolute
|
pathname *check.Absolute
|
||||||
checksum [2]unique.Handle[pkg.Checksum]
|
checksum [2]unique.Handle[pkg.Checksum]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if pathname, _, err = cache.Cure(stage1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Println("stage1:", pathname)
|
||||||
|
|
||||||
if pathname, checksum[0], err = cache.Cure(stage2); err != nil {
|
if pathname, checksum[0], err = cache.Cure(stage2); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -134,11 +180,46 @@ func main() {
|
|||||||
Got: checksum[0].Value(),
|
Got: checksum[0].Value(),
|
||||||
Want: checksum[1].Value(),
|
Want: checksum[1].Value(),
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
log.Println(
|
||||||
|
"stage2 is identical to stage3",
|
||||||
|
"("+pkg.Encode(checksum[0].Value())+")",
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if flagStage0 {
|
||||||
|
if pathname, _, err = cache.Cure(
|
||||||
|
t.Load(rosa.Stage0),
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Println(pathname)
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
},
|
},
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagGentoo,
|
||||||
|
"gentoo", command.StringFlag(""),
|
||||||
|
"Bootstrap from a Gentoo stage3 tarball",
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagChecksum,
|
||||||
|
"checksum", command.StringFlag(""),
|
||||||
|
"Checksum of Gentoo stage3 tarball",
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagStage0,
|
||||||
|
"stage0", command.BoolFlag(false),
|
||||||
|
"Create bootstrap stage0 tarball",
|
||||||
)
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
var (
|
||||||
|
flagDump string
|
||||||
|
)
|
||||||
c.NewCommand(
|
c.NewCommand(
|
||||||
"cure",
|
"cure",
|
||||||
"Cure the named artifact and show its path",
|
"Cure the named artifact and show its path",
|
||||||
@@ -146,92 +227,199 @@ func main() {
|
|||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
return errors.New("cure requires 1 argument")
|
return errors.New("cure requires 1 argument")
|
||||||
}
|
}
|
||||||
var p rosa.PArtifact
|
if p, ok := rosa.ResolveName(args[0]); !ok {
|
||||||
switch args[0] {
|
return fmt.Errorf("unknown artifact %q", args[0])
|
||||||
case "acl":
|
} else if flagDump == "" {
|
||||||
p = rosa.ACL
|
|
||||||
case "attr":
|
|
||||||
p = rosa.Attr
|
|
||||||
case "autoconf":
|
|
||||||
p = rosa.Autoconf
|
|
||||||
case "bash":
|
|
||||||
p = rosa.Bash
|
|
||||||
case "busybox":
|
|
||||||
p = rosa.Busybox
|
|
||||||
case "cmake":
|
|
||||||
p = rosa.CMake
|
|
||||||
case "coreutils":
|
|
||||||
p = rosa.Coreutils
|
|
||||||
case "diffutils":
|
|
||||||
p = rosa.Diffutils
|
|
||||||
case "gettext":
|
|
||||||
p = rosa.Gettext
|
|
||||||
case "git":
|
|
||||||
p = rosa.Git
|
|
||||||
case "go":
|
|
||||||
p = rosa.Go
|
|
||||||
case "gperf":
|
|
||||||
p = rosa.Gperf
|
|
||||||
case "hakurei":
|
|
||||||
p = rosa.Hakurei
|
|
||||||
case "kernel-headers":
|
|
||||||
p = rosa.KernelHeaders
|
|
||||||
case "libXau":
|
|
||||||
p = rosa.LibXau
|
|
||||||
case "libexpat":
|
|
||||||
p = rosa.Libexpat
|
|
||||||
case "libseccomp":
|
|
||||||
p = rosa.Libseccomp
|
|
||||||
case "libxml2":
|
|
||||||
p = rosa.Libxml2
|
|
||||||
case "libffi":
|
|
||||||
p = rosa.Libffi
|
|
||||||
case "libgd":
|
|
||||||
p = rosa.Libgd
|
|
||||||
case "m4":
|
|
||||||
p = rosa.M4
|
|
||||||
case "make":
|
|
||||||
p = rosa.Make
|
|
||||||
case "meson":
|
|
||||||
p = rosa.Meson
|
|
||||||
case "ninja":
|
|
||||||
p = rosa.Ninja
|
|
||||||
case "patch":
|
|
||||||
p = rosa.Patch
|
|
||||||
case "perl":
|
|
||||||
p = rosa.Perl
|
|
||||||
case "pkg-config":
|
|
||||||
p = rosa.PkgConfig
|
|
||||||
case "python":
|
|
||||||
p = rosa.Python
|
|
||||||
case "rsync":
|
|
||||||
p = rosa.Rsync
|
|
||||||
case "setuptools":
|
|
||||||
p = rosa.Setuptools
|
|
||||||
case "wayland":
|
|
||||||
p = rosa.Wayland
|
|
||||||
case "wayland-protocols":
|
|
||||||
p = rosa.WaylandProtocols
|
|
||||||
case "xcb":
|
|
||||||
p = rosa.XCB
|
|
||||||
case "xcb-proto":
|
|
||||||
p = rosa.XCBProto
|
|
||||||
case "xproto":
|
|
||||||
p = rosa.Xproto
|
|
||||||
case "zlib":
|
|
||||||
p = rosa.Zlib
|
|
||||||
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unsupported artifact %q", args[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
pathname, _, err := cache.Cure(rosa.Std.Load(p))
|
pathname, _, err := cache.Cure(rosa.Std.Load(p))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
log.Println(pathname)
|
log.Println(pathname)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
} else {
|
||||||
|
f, err := os.OpenFile(
|
||||||
|
flagDump,
|
||||||
|
os.O_WRONLY|os.O_CREATE|os.O_EXCL,
|
||||||
|
0644,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = cache.EncodeAll(f, rosa.Std.Load(p)); err != nil {
|
||||||
|
_ = f.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.Close()
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagDump,
|
||||||
|
"dump", command.StringFlag(""),
|
||||||
|
"Write IR to specified pathname and terminate",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.NewCommand(
|
||||||
|
"status",
|
||||||
|
"Display the status file of an artifact",
|
||||||
|
func(args []string) error {
|
||||||
|
if len(args) != 1 {
|
||||||
|
return errors.New("status requires 1 argument")
|
||||||
|
}
|
||||||
|
if p, ok := rosa.ResolveName(args[0]); !ok {
|
||||||
|
return fmt.Errorf("unknown artifact %q", args[0])
|
||||||
|
} else {
|
||||||
|
r, err := cache.OpenStatus(rosa.Std.Load(p))
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
return errors.New(args[0] + " was never cured")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.Copy(os.Stdout, r)
|
||||||
|
return errors.Join(err, r.Close())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
{
|
||||||
|
var (
|
||||||
|
flagNet bool
|
||||||
|
flagSession bool
|
||||||
|
|
||||||
|
flagWithToolchain bool
|
||||||
|
)
|
||||||
|
c.NewCommand(
|
||||||
|
"shell",
|
||||||
|
"Interactive shell in the specified Rosa OS environment",
|
||||||
|
func(args []string) error {
|
||||||
|
root := make([]pkg.Artifact, 0, 6+len(args))
|
||||||
|
for _, arg := range args {
|
||||||
|
p, ok := rosa.ResolveName(arg)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unknown artifact %q", arg)
|
||||||
|
}
|
||||||
|
root = append(root, rosa.Std.Load(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
if flagWithToolchain {
|
||||||
|
musl, compilerRT, runtimes, clang := rosa.Std.NewLLVM()
|
||||||
|
root = append(root, musl, compilerRT, runtimes, clang)
|
||||||
|
} else {
|
||||||
|
root = append(root, rosa.Std.Load(rosa.Musl))
|
||||||
|
}
|
||||||
|
root = append(root,
|
||||||
|
rosa.Std.Load(rosa.Mksh),
|
||||||
|
rosa.Std.Load(rosa.Toybox),
|
||||||
|
)
|
||||||
|
|
||||||
|
type cureRes struct {
|
||||||
|
pathname *check.Absolute
|
||||||
|
checksum unique.Handle[pkg.Checksum]
|
||||||
|
}
|
||||||
|
cured := make(map[pkg.Artifact]cureRes)
|
||||||
|
for _, a := range root {
|
||||||
|
pathname, checksum, err := cache.Cure(a)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cured[a] = cureRes{pathname, checksum}
|
||||||
|
}
|
||||||
|
|
||||||
|
layers := pkg.PromoteLayers(root, func(a pkg.Artifact) (
|
||||||
|
*check.Absolute,
|
||||||
|
unique.Handle[pkg.Checksum],
|
||||||
|
) {
|
||||||
|
res := cured[a]
|
||||||
|
return res.pathname, res.checksum
|
||||||
|
}, func(i int, d pkg.Artifact) {
|
||||||
|
r := pkg.Encode(cache.Ident(d).Value())
|
||||||
|
if s, ok := d.(fmt.Stringer); ok {
|
||||||
|
if name := s.String(); name != "" {
|
||||||
|
r += "-" + name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
msg.Verbosef("promoted layer %d as %s", i, r)
|
||||||
|
})
|
||||||
|
|
||||||
|
z := container.New(ctx, msg)
|
||||||
|
z.WaitDelay = 3 * time.Second
|
||||||
|
z.SeccompPresets = pkg.SeccompPresets
|
||||||
|
z.SeccompFlags |= seccomp.AllowMultiarch
|
||||||
|
z.ParentPerm = 0700
|
||||||
|
z.HostNet = flagNet
|
||||||
|
z.RetainSession = flagSession
|
||||||
|
z.Hostname = "localhost"
|
||||||
|
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
||||||
|
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
|
||||||
|
|
||||||
|
var tempdir *check.Absolute
|
||||||
|
if s, err := filepath.Abs(os.TempDir()); err != nil {
|
||||||
|
return err
|
||||||
|
} else if tempdir, err = check.NewAbs(s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
z.Dir = fhs.AbsRoot
|
||||||
|
z.Env = []string{
|
||||||
|
"SHELL=/system/bin/mksh",
|
||||||
|
"PATH=/system/bin",
|
||||||
|
"HOME=/",
|
||||||
|
}
|
||||||
|
z.Path = rosa.AbsSystem.Append("bin", "mksh")
|
||||||
|
z.Args = []string{"mksh"}
|
||||||
|
z.
|
||||||
|
OverlayEphemeral(fhs.AbsRoot, layers...).
|
||||||
|
Place(
|
||||||
|
fhs.AbsEtc.Append("hosts"),
|
||||||
|
[]byte("127.0.0.1 localhost\n"),
|
||||||
|
).
|
||||||
|
Place(
|
||||||
|
fhs.AbsEtc.Append("passwd"),
|
||||||
|
[]byte("media_rw:x:1023:1023::/:/system/bin/sh\n"+
|
||||||
|
"nobody:x:65534:65534::/proc/nonexistent:/system/bin/false\n"),
|
||||||
|
).
|
||||||
|
Place(
|
||||||
|
fhs.AbsEtc.Append("group"),
|
||||||
|
[]byte("media_rw:x:1023:\nnobody:x:65534:\n"),
|
||||||
|
).
|
||||||
|
Bind(tempdir, fhs.AbsTmp, std.BindWritable).
|
||||||
|
Proc(fhs.AbsProc).Dev(fhs.AbsDev, true)
|
||||||
|
|
||||||
|
if err := z.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := z.Serve(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return z.Wait()
|
||||||
|
},
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagNet,
|
||||||
|
"net", command.BoolFlag(false),
|
||||||
|
"Share host net namespace",
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagSession,
|
||||||
|
"session", command.BoolFlag(false),
|
||||||
|
"Retain session",
|
||||||
|
).
|
||||||
|
Flag(
|
||||||
|
&flagWithToolchain,
|
||||||
|
"with-toolchain", command.BoolFlag(false),
|
||||||
|
"Include the stage3 LLVM toolchain",
|
||||||
|
)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Command(
|
||||||
|
"help",
|
||||||
|
"Show this help message",
|
||||||
|
func([]string) error { c.PrintHelp(); return nil },
|
||||||
)
|
)
|
||||||
|
|
||||||
c.MustParse(os.Args[1:], func(err error) {
|
c.MustParse(os.Args[1:], func(err error) {
|
||||||
|
|||||||
55
cmd/pkgserver/main.go
Normal file
55
cmd/pkgserver/main.go
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"embed"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:generate sh -c "sass ui/static/dark.scss ui/static/dark.css && sass ui/static/light.scss ui/static/light.css && tsc ui/static/index.ts"
|
||||||
|
//go:embed ui/*
|
||||||
|
var content embed.FS
|
||||||
|
|
||||||
|
func serveWebUI(w http.ResponseWriter, r *http.Request) {
|
||||||
|
fmt.Printf("serveWebUI: %s\n", r.URL.Path)
|
||||||
|
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
||||||
|
w.Header().Set("Pragma", "no-cache")
|
||||||
|
w.Header().Set("Expires", "0")
|
||||||
|
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||||
|
w.Header().Set("X-XSS-Protection", "1")
|
||||||
|
w.Header().Set("X-Frame-Options", "DENY")
|
||||||
|
|
||||||
|
http.ServeFileFS(w, r, content, "ui/index.html")
|
||||||
|
}
|
||||||
|
func serveStaticContent(w http.ResponseWriter, r *http.Request) {
|
||||||
|
fmt.Printf("serveStaticContent: %s\n", r.URL.Path)
|
||||||
|
switch r.URL.Path {
|
||||||
|
case "/static/style.css":
|
||||||
|
darkTheme := r.CookiesNamed("dark_theme")
|
||||||
|
if len(darkTheme) > 0 && darkTheme[0].Value == "true" {
|
||||||
|
http.ServeFileFS(w, r, content, "ui/static/dark.css")
|
||||||
|
} else {
|
||||||
|
http.ServeFileFS(w, r, content, "ui/static/light.css")
|
||||||
|
}
|
||||||
|
break
|
||||||
|
case "/favicon.ico":
|
||||||
|
http.ServeFileFS(w, r, content, "ui/static/favicon.ico")
|
||||||
|
break
|
||||||
|
case "/static/index.js":
|
||||||
|
http.ServeFileFS(w, r, content, "ui/static/index.js")
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
http.NotFound(w, r)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func serveAPI(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
}
|
||||||
|
func main() {
|
||||||
|
http.HandleFunc("GET /{$}", serveWebUI)
|
||||||
|
http.HandleFunc("GET /favicon.ico", serveStaticContent)
|
||||||
|
http.HandleFunc("GET /static/", serveStaticContent)
|
||||||
|
http.HandleFunc("GET /api/", serveAPI)
|
||||||
|
http.ListenAndServe(":8067", nil)
|
||||||
|
}
|
||||||
26
cmd/pkgserver/ui/index.html
Normal file
26
cmd/pkgserver/ui/index.html
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<link rel="stylesheet" href="static/style.css">
|
||||||
|
<title>Hakurei PkgServer</title>
|
||||||
|
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.7.1/jquery.min.js"></script>
|
||||||
|
<script src="static/index.js"></script>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h1>Hakurei PkgServer</h1>
|
||||||
|
|
||||||
|
<table id="pkg-list">
|
||||||
|
<tr><th>Status</th><th>Name</th><th>Version</th></tr>
|
||||||
|
</table>
|
||||||
|
<p>Showing entries <span id="entry-counter"></span>.</p>
|
||||||
|
<span class="bottom-nav"><a href="javascript:prevPage()">« Previous</a> <span id="page-number">1</span> <a href="javascript:nextPage()">Next »</a></span>
|
||||||
|
<span><label for="count">Entries per page:</label><select name="count" id="count">
|
||||||
|
<option value="10">10</option>
|
||||||
|
<option value="25">25</option>
|
||||||
|
<option value="50">50</option>
|
||||||
|
<option value="100">100</option>
|
||||||
|
</select></span>
|
||||||
|
</body>
|
||||||
|
<footer>© <a href="https://hakurei.app/">Hakurei</a>. Licensed under the MIT license.</footer>
|
||||||
|
</html>
|
||||||
0
cmd/pkgserver/ui/static/_common.scss
Normal file
0
cmd/pkgserver/ui/static/_common.scss
Normal file
6
cmd/pkgserver/ui/static/dark.css
Normal file
6
cmd/pkgserver/ui/static/dark.css
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
@use 'common';
|
||||||
|
html {
|
||||||
|
background-color: #2c2c2c;
|
||||||
|
color: ghostwhite; }
|
||||||
|
|
||||||
|
/*# sourceMappingURL=dark.css.map */
|
||||||
7
cmd/pkgserver/ui/static/dark.css.map
Normal file
7
cmd/pkgserver/ui/static/dark.css.map
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"version": 3,
|
||||||
|
"mappings": "AAAA,aAAa;AAEb,IAAK;EACH,gBAAgB,EAAE,OAAO;EACzB,KAAK,EAAE,UAAU",
|
||||||
|
"sources": ["dark.scss"],
|
||||||
|
"names": [],
|
||||||
|
"file": "dark.css"
|
||||||
|
}
|
||||||
6
cmd/pkgserver/ui/static/dark.scss
Normal file
6
cmd/pkgserver/ui/static/dark.scss
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
@use 'common';
|
||||||
|
|
||||||
|
html {
|
||||||
|
background-color: #2c2c2c;
|
||||||
|
color: ghostwhite;
|
||||||
|
}
|
||||||
BIN
cmd/pkgserver/ui/static/favicon.ico
Normal file
BIN
cmd/pkgserver/ui/static/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 17 KiB |
67
cmd/pkgserver/ui/static/index.js
Normal file
67
cmd/pkgserver/ui/static/index.js
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
"use strict";
|
||||||
|
var PackageEntry = /** @class */ (function () {
|
||||||
|
function PackageEntry() {
|
||||||
|
}
|
||||||
|
return PackageEntry;
|
||||||
|
}());
|
||||||
|
var State = /** @class */ (function () {
|
||||||
|
function State() {
|
||||||
|
this.entriesPerPage = 10;
|
||||||
|
this.currentPage = 1;
|
||||||
|
this.entryIndex = 0;
|
||||||
|
this.loadedEntries = [];
|
||||||
|
}
|
||||||
|
State.prototype.getEntriesPerPage = function () {
|
||||||
|
return this.entriesPerPage;
|
||||||
|
};
|
||||||
|
State.prototype.setEntriesPerPage = function (entriesPerPage) {
|
||||||
|
this.entriesPerPage = entriesPerPage;
|
||||||
|
this.updateRange();
|
||||||
|
};
|
||||||
|
State.prototype.getCurrentPage = function () {
|
||||||
|
return this.currentPage;
|
||||||
|
};
|
||||||
|
State.prototype.setCurrentPage = function (page) {
|
||||||
|
this.currentPage = page;
|
||||||
|
document.getElementById("page-number").innerText = String(this.currentPage);
|
||||||
|
this.updateRange();
|
||||||
|
};
|
||||||
|
State.prototype.getEntryIndex = function () {
|
||||||
|
return this.entryIndex;
|
||||||
|
};
|
||||||
|
State.prototype.setEntryIndex = function (entryIndex) {
|
||||||
|
this.entryIndex = entryIndex;
|
||||||
|
this.updateRange();
|
||||||
|
};
|
||||||
|
State.prototype.getLoadedEntries = function () {
|
||||||
|
return this.loadedEntries;
|
||||||
|
};
|
||||||
|
State.prototype.getMaxPage = function () {
|
||||||
|
return this.loadedEntries.length / this.entriesPerPage;
|
||||||
|
};
|
||||||
|
State.prototype.updateRange = function () {
|
||||||
|
var max = Math.min(this.entryIndex + this.entriesPerPage, this.loadedEntries.length);
|
||||||
|
document.getElementById("entry-counter").innerText = "".concat(this.entryIndex, "-").concat(max, " of ").concat(this.loadedEntries.length);
|
||||||
|
};
|
||||||
|
return State;
|
||||||
|
}());
|
||||||
|
var STATE;
|
||||||
|
function prevPage() {
|
||||||
|
var current = STATE.getCurrentPage();
|
||||||
|
if (current > 1) {
|
||||||
|
STATE.setCurrentPage(STATE.getCurrentPage() - 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
function nextPage() {
|
||||||
|
var current = STATE.getCurrentPage();
|
||||||
|
if (current < STATE.getMaxPage()) {
|
||||||
|
STATE.setCurrentPage(STATE.getCurrentPage() + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
document.addEventListener("DOMContentLoaded", function () {
|
||||||
|
STATE = new State();
|
||||||
|
STATE.updateRange();
|
||||||
|
document.getElementById("count").addEventListener("change", function (event) {
|
||||||
|
STATE.setEntriesPerPage(parseInt(event.target.value));
|
||||||
|
});
|
||||||
|
});
|
||||||
66
cmd/pkgserver/ui/static/index.ts
Normal file
66
cmd/pkgserver/ui/static/index.ts
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
"use strict"
|
||||||
|
|
||||||
|
class PackageEntry {
|
||||||
|
|
||||||
|
}
|
||||||
|
class State {
|
||||||
|
entriesPerPage: number = 10
|
||||||
|
currentPage: number = 1
|
||||||
|
entryIndex: number = 0
|
||||||
|
loadedEntries: PackageEntry[] = []
|
||||||
|
getEntriesPerPage(): number {
|
||||||
|
return this.entriesPerPage
|
||||||
|
}
|
||||||
|
setEntriesPerPage(entriesPerPage: number) {
|
||||||
|
this.entriesPerPage = entriesPerPage
|
||||||
|
this.updateRange()
|
||||||
|
}
|
||||||
|
getCurrentPage(): number {
|
||||||
|
return this.currentPage
|
||||||
|
}
|
||||||
|
setCurrentPage(page: number) {
|
||||||
|
this.currentPage = page
|
||||||
|
document.getElementById("page-number").innerText = String(this.currentPage)
|
||||||
|
this.updateRange()
|
||||||
|
}
|
||||||
|
getEntryIndex(): number {
|
||||||
|
return this.entryIndex
|
||||||
|
}
|
||||||
|
setEntryIndex(entryIndex: number) {
|
||||||
|
this.entryIndex = entryIndex
|
||||||
|
this.updateRange()
|
||||||
|
}
|
||||||
|
getLoadedEntries(): PackageEntry[] {
|
||||||
|
return this.loadedEntries
|
||||||
|
}
|
||||||
|
getMaxPage(): number {
|
||||||
|
return this.loadedEntries.length / this.entriesPerPage
|
||||||
|
}
|
||||||
|
updateRange() {
|
||||||
|
let max = Math.min(this.entryIndex + this.entriesPerPage, this.loadedEntries.length)
|
||||||
|
document.getElementById("entry-counter").innerText = `${this.entryIndex}-${max} of ${this.loadedEntries.length}`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let STATE: State
|
||||||
|
|
||||||
|
function prevPage() {
|
||||||
|
let current = STATE.getCurrentPage()
|
||||||
|
if (current > 1) {
|
||||||
|
STATE.setCurrentPage(STATE.getCurrentPage() - 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
function nextPage() {
|
||||||
|
let current = STATE.getCurrentPage()
|
||||||
|
if (current < STATE.getMaxPage()) {
|
||||||
|
STATE.setCurrentPage(STATE.getCurrentPage() + 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
document.addEventListener("DOMContentLoaded", () => {
|
||||||
|
STATE = new State()
|
||||||
|
STATE.updateRange()
|
||||||
|
document.getElementById("count").addEventListener("change", (event) => {
|
||||||
|
STATE.setEntriesPerPage(parseInt((event.target as HTMLSelectElement).value))
|
||||||
|
})
|
||||||
|
})
|
||||||
6
cmd/pkgserver/ui/static/light.css
Normal file
6
cmd/pkgserver/ui/static/light.css
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
@use 'common';
|
||||||
|
html {
|
||||||
|
background-color: #d3d3d3;
|
||||||
|
color: black; }
|
||||||
|
|
||||||
|
/*# sourceMappingURL=light.css.map */
|
||||||
7
cmd/pkgserver/ui/static/light.css.map
Normal file
7
cmd/pkgserver/ui/static/light.css.map
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"version": 3,
|
||||||
|
"mappings": "AAAA,aAAa;AAEb,IAAK;EACH,gBAAgB,EAAE,OAAO;EACzB,KAAK,EAAE,KAAK",
|
||||||
|
"sources": ["light.scss"],
|
||||||
|
"names": [],
|
||||||
|
"file": "light.css"
|
||||||
|
}
|
||||||
6
cmd/pkgserver/ui/static/light.scss
Normal file
6
cmd/pkgserver/ui/static/light.scss
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
@use 'common';
|
||||||
|
|
||||||
|
html {
|
||||||
|
background-color: #d3d3d3;
|
||||||
|
color: black;
|
||||||
|
}
|
||||||
@@ -33,6 +33,7 @@ import (
|
|||||||
|
|
||||||
"hakurei.app/container"
|
"hakurei.app/container"
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/hst"
|
"hakurei.app/hst"
|
||||||
"hakurei.app/internal/helper/proc"
|
"hakurei.app/internal/helper/proc"
|
||||||
@@ -441,12 +442,7 @@ func _main(s ...string) (exitCode int) {
|
|||||||
// keep fuse_parse_cmdline happy in the container
|
// keep fuse_parse_cmdline happy in the container
|
||||||
z.Tmpfs(check.MustAbs(container.Nonexistent), 1<<10, 0755)
|
z.Tmpfs(check.MustAbs(container.Nonexistent), 1<<10, 0755)
|
||||||
|
|
||||||
if a, err := check.NewAbs(container.MustExecutable(msg)); err != nil {
|
z.Path = fhs.AbsProcSelfExe
|
||||||
log.Println(err)
|
|
||||||
return 5
|
|
||||||
} else {
|
|
||||||
z.Path = a
|
|
||||||
}
|
|
||||||
z.Args = s
|
z.Args = s
|
||||||
z.ForwardCancel = true
|
z.ForwardCancel = true
|
||||||
z.SeccompPresets |= std.PresetStrict
|
z.SeccompPresets |= std.PresetStrict
|
||||||
|
|||||||
@@ -10,8 +10,7 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(AutoEtcOp)) }
|
func init() { gob.Register(new(AutoEtcOp)) }
|
||||||
|
|
||||||
// Etc appends an [Op] that expands host /etc into a toplevel symlink mirror with /etc semantics.
|
// Etc is a helper for appending [AutoEtcOp] to [Ops].
|
||||||
// This is not a generic setup op. It is implemented here to reduce ipc overhead.
|
|
||||||
func (f *Ops) Etc(host *check.Absolute, prefix string) *Ops {
|
func (f *Ops) Etc(host *check.Absolute, prefix string) *Ops {
|
||||||
e := &AutoEtcOp{prefix}
|
e := &AutoEtcOp{prefix}
|
||||||
f.Mkdir(fhs.AbsEtc, 0755)
|
f.Mkdir(fhs.AbsEtc, 0755)
|
||||||
@@ -20,6 +19,9 @@ func (f *Ops) Etc(host *check.Absolute, prefix string) *Ops {
|
|||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AutoEtcOp expands host /etc into a toplevel symlink mirror with /etc semantics.
|
||||||
|
//
|
||||||
|
// This is not a generic setup op. It is implemented here to reduce ipc overhead.
|
||||||
type AutoEtcOp struct{ Prefix string }
|
type AutoEtcOp struct{ Prefix string }
|
||||||
|
|
||||||
func (e *AutoEtcOp) Valid() bool { return e != nil }
|
func (e *AutoEtcOp) Valid() bool { return e != nil }
|
||||||
|
|||||||
@@ -11,13 +11,15 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(AutoRootOp)) }
|
func init() { gob.Register(new(AutoRootOp)) }
|
||||||
|
|
||||||
// Root appends an [Op] that expands a directory into a toplevel bind mount mirror on container root.
|
// Root is a helper for appending [AutoRootOp] to [Ops].
|
||||||
// This is not a generic setup op. It is implemented here to reduce ipc overhead.
|
|
||||||
func (f *Ops) Root(host *check.Absolute, flags int) *Ops {
|
func (f *Ops) Root(host *check.Absolute, flags int) *Ops {
|
||||||
*f = append(*f, &AutoRootOp{host, flags, nil})
|
*f = append(*f, &AutoRootOp{host, flags, nil})
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AutoRootOp expands a directory into a toplevel bind mount mirror on container root.
|
||||||
|
//
|
||||||
|
// This is not a generic setup op. It is implemented here to reduce ipc overhead.
|
||||||
type AutoRootOp struct {
|
type AutoRootOp struct {
|
||||||
Host *check.Absolute
|
Host *check.Absolute
|
||||||
// passed through to bindMount
|
// passed through to bindMount
|
||||||
|
|||||||
@@ -50,10 +50,16 @@ func capset(hdrp *capHeader, datap *[2]capData) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// capBoundingSetDrop drops a capability from the calling thread's capability bounding set.
|
// capBoundingSetDrop drops a capability from the calling thread's capability bounding set.
|
||||||
func capBoundingSetDrop(cap uintptr) error { return Prctl(syscall.PR_CAPBSET_DROP, cap, 0) }
|
func capBoundingSetDrop(cap uintptr) error {
|
||||||
|
return Prctl(syscall.PR_CAPBSET_DROP, cap, 0)
|
||||||
|
}
|
||||||
|
|
||||||
// capAmbientClearAll clears the ambient capability set of the calling thread.
|
// capAmbientClearAll clears the ambient capability set of the calling thread.
|
||||||
func capAmbientClearAll() error { return Prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_CLEAR_ALL, 0) }
|
func capAmbientClearAll() error {
|
||||||
|
return Prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_CLEAR_ALL, 0)
|
||||||
|
}
|
||||||
|
|
||||||
// capAmbientRaise adds to the ambient capability set of the calling thread.
|
// capAmbientRaise adds to the ambient capability set of the calling thread.
|
||||||
func capAmbientRaise(cap uintptr) error { return Prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, cap) }
|
func capAmbientRaise(cap uintptr) error {
|
||||||
|
return Prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, cap)
|
||||||
|
}
|
||||||
|
|||||||
@@ -11,7 +11,8 @@ const (
|
|||||||
SpecialOverlayPath = ":"
|
SpecialOverlayPath = ":"
|
||||||
)
|
)
|
||||||
|
|
||||||
// EscapeOverlayDataSegment escapes a string for formatting into the data argument of an overlay mount call.
|
// EscapeOverlayDataSegment escapes a string for formatting into the data
|
||||||
|
// argument of an overlay mount system call.
|
||||||
func EscapeOverlayDataSegment(s string) string {
|
func EscapeOverlayDataSegment(s string) string {
|
||||||
if s == "" {
|
if s == "" {
|
||||||
return ""
|
return ""
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
// Package container implements unprivileged Linux containers with built-in support for syscall filtering.
|
// Package container implements unprivileged Linux containers with built-in
|
||||||
|
// support for syscall filtering.
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -37,24 +38,30 @@ type (
|
|||||||
Container struct {
|
Container struct {
|
||||||
// Whether the container init should stay alive after its parent terminates.
|
// Whether the container init should stay alive after its parent terminates.
|
||||||
AllowOrphan bool
|
AllowOrphan bool
|
||||||
|
// Scheduling policy to set via sched_setscheduler(2). The zero value
|
||||||
|
// skips this call. Supported policies are [SCHED_BATCH], [SCHED_IDLE].
|
||||||
|
SchedPolicy int
|
||||||
// Cgroup fd, nil to disable.
|
// Cgroup fd, nil to disable.
|
||||||
Cgroup *int
|
Cgroup *int
|
||||||
// ExtraFiles passed through to initial process in the container,
|
// ExtraFiles passed through to initial process in the container, with
|
||||||
// with behaviour identical to its [exec.Cmd] counterpart.
|
// behaviour identical to its [exec.Cmd] counterpart.
|
||||||
ExtraFiles []*os.File
|
ExtraFiles []*os.File
|
||||||
|
|
||||||
// param pipe for shim and init
|
// Write end of a pipe connected to the init to deliver [Params].
|
||||||
setup *os.File
|
setup *os.File
|
||||||
// cancels cmd
|
// Cancels the context passed to the underlying cmd.
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
// closed after Wait returns
|
// Closed after Wait returns. Keeps the spawning thread alive.
|
||||||
wait chan struct{}
|
wait chan struct{}
|
||||||
|
|
||||||
Stdin io.Reader
|
Stdin io.Reader
|
||||||
Stdout io.Writer
|
Stdout io.Writer
|
||||||
Stderr io.Writer
|
Stderr io.Writer
|
||||||
|
|
||||||
|
// Custom cancellation behaviour for the underlying [exec.Cmd]. Must
|
||||||
|
// deliver [CancelSignal] before returning.
|
||||||
Cancel func(cmd *exec.Cmd) error
|
Cancel func(cmd *exec.Cmd) error
|
||||||
|
// Copied to the underlying [exec.Cmd].
|
||||||
WaitDelay time.Duration
|
WaitDelay time.Duration
|
||||||
|
|
||||||
cmd *exec.Cmd
|
cmd *exec.Cmd
|
||||||
@@ -283,7 +290,11 @@ func (p *Container) Start() error {
|
|||||||
|
|
||||||
// place setup pipe before user supplied extra files, this is later restored by init
|
// place setup pipe before user supplied extra files, this is later restored by init
|
||||||
if fd, f, err := Setup(&p.cmd.ExtraFiles); err != nil {
|
if fd, f, err := Setup(&p.cmd.ExtraFiles); err != nil {
|
||||||
return &StartError{true, "set up params stream", err, false, false}
|
return &StartError{
|
||||||
|
Fatal: true,
|
||||||
|
Step: "set up params stream",
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
p.setup = f
|
p.setup = f
|
||||||
p.cmd.Env = []string{setupEnv + "=" + strconv.Itoa(fd)}
|
p.cmd.Env = []string{setupEnv + "=" + strconv.Itoa(fd)}
|
||||||
@@ -295,10 +306,16 @@ func (p *Container) Start() error {
|
|||||||
runtime.LockOSThread()
|
runtime.LockOSThread()
|
||||||
p.wait = make(chan struct{})
|
p.wait = make(chan struct{})
|
||||||
|
|
||||||
done <- func() error { // setup depending on per-thread state must happen here
|
// setup depending on per-thread state must happen here
|
||||||
// PR_SET_NO_NEW_PRIVS: depends on per-thread state but acts on all processes created from that thread
|
done <- func() error {
|
||||||
|
// PR_SET_NO_NEW_PRIVS: thread-directed but acts on all processes
|
||||||
|
// created from the calling thread
|
||||||
if err := SetNoNewPrivs(); err != nil {
|
if err := SetNoNewPrivs(); err != nil {
|
||||||
return &StartError{true, "prctl(PR_SET_NO_NEW_PRIVS)", err, false, false}
|
return &StartError{
|
||||||
|
Fatal: true,
|
||||||
|
Step: "prctl(PR_SET_NO_NEW_PRIVS)",
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// landlock: depends on per-thread state but acts on a process group
|
// landlock: depends on per-thread state but acts on a process group
|
||||||
@@ -310,28 +327,40 @@ func (p *Container) Start() error {
|
|||||||
|
|
||||||
if abi, err := LandlockGetABI(); err != nil {
|
if abi, err := LandlockGetABI(); err != nil {
|
||||||
if p.HostAbstract {
|
if p.HostAbstract {
|
||||||
// landlock can be skipped here as it restricts access to resources
|
// landlock can be skipped here as it restricts access
|
||||||
// already covered by namespaces (pid)
|
// to resources already covered by namespaces (pid)
|
||||||
goto landlockOut
|
goto landlockOut
|
||||||
}
|
}
|
||||||
return &StartError{false, "get landlock ABI", err, false, false}
|
return &StartError{Step: "get landlock ABI", Err: err}
|
||||||
} else if abi < 6 {
|
} else if abi < 6 {
|
||||||
if p.HostAbstract {
|
if p.HostAbstract {
|
||||||
// see above comment
|
// see above comment
|
||||||
goto landlockOut
|
goto landlockOut
|
||||||
}
|
}
|
||||||
return &StartError{false, "kernel version too old for LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET", ENOSYS, true, false}
|
return &StartError{
|
||||||
|
Step: "kernel too old for LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET",
|
||||||
|
Err: ENOSYS,
|
||||||
|
Origin: true,
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
p.msg.Verbosef("landlock abi version %d", abi)
|
p.msg.Verbosef("landlock abi version %d", abi)
|
||||||
}
|
}
|
||||||
|
|
||||||
if rulesetFd, err := rulesetAttr.Create(0); err != nil {
|
if rulesetFd, err := rulesetAttr.Create(0); err != nil {
|
||||||
return &StartError{true, "create landlock ruleset", err, false, false}
|
return &StartError{
|
||||||
|
Fatal: true,
|
||||||
|
Step: "create landlock ruleset",
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
p.msg.Verbosef("enforcing landlock ruleset %s", rulesetAttr)
|
p.msg.Verbosef("enforcing landlock ruleset %s", rulesetAttr)
|
||||||
if err = LandlockRestrictSelf(rulesetFd, 0); err != nil {
|
if err = LandlockRestrictSelf(rulesetFd, 0); err != nil {
|
||||||
_ = Close(rulesetFd)
|
_ = Close(rulesetFd)
|
||||||
return &StartError{true, "enforce landlock ruleset", err, false, false}
|
return &StartError{
|
||||||
|
Fatal: true,
|
||||||
|
Step: "enforce landlock ruleset",
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if err = Close(rulesetFd); err != nil {
|
if err = Close(rulesetFd); err != nil {
|
||||||
p.msg.Verbosef("cannot close landlock ruleset: %v", err)
|
p.msg.Verbosef("cannot close landlock ruleset: %v", err)
|
||||||
@@ -342,9 +371,30 @@ func (p *Container) Start() error {
|
|||||||
landlockOut:
|
landlockOut:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sched_setscheduler: thread-directed but acts on all processes
|
||||||
|
// created from the calling thread
|
||||||
|
if p.SchedPolicy > 0 {
|
||||||
|
p.msg.Verbosef("setting scheduling policy %d", p.SchedPolicy)
|
||||||
|
if err := schedSetscheduler(
|
||||||
|
0, // calling thread
|
||||||
|
p.SchedPolicy,
|
||||||
|
&schedParam{0},
|
||||||
|
); err != nil {
|
||||||
|
return &StartError{
|
||||||
|
Fatal: true,
|
||||||
|
Step: "enforce landlock ruleset",
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
p.msg.Verbose("starting container init")
|
p.msg.Verbose("starting container init")
|
||||||
if err := p.cmd.Start(); err != nil {
|
if err := p.cmd.Start(); err != nil {
|
||||||
return &StartError{false, "start container init", err, false, true}
|
return &StartError{
|
||||||
|
Step: "start container init",
|
||||||
|
Err: err,
|
||||||
|
Passthrough: true,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}()
|
}()
|
||||||
@@ -356,6 +406,7 @@ func (p *Container) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Serve serves [Container.Params] to the container init.
|
// Serve serves [Container.Params] to the container init.
|
||||||
|
//
|
||||||
// Serve must only be called once.
|
// Serve must only be called once.
|
||||||
func (p *Container) Serve() error {
|
func (p *Container) Serve() error {
|
||||||
if p.setup == nil {
|
if p.setup == nil {
|
||||||
@@ -365,12 +416,21 @@ func (p *Container) Serve() error {
|
|||||||
setup := p.setup
|
setup := p.setup
|
||||||
p.setup = nil
|
p.setup = nil
|
||||||
if err := setup.SetDeadline(time.Now().Add(initSetupTimeout)); err != nil {
|
if err := setup.SetDeadline(time.Now().Add(initSetupTimeout)); err != nil {
|
||||||
return &StartError{true, "set init pipe deadline", err, false, true}
|
return &StartError{
|
||||||
|
Fatal: true,
|
||||||
|
Step: "set init pipe deadline",
|
||||||
|
Err: err,
|
||||||
|
Passthrough: true,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.Path == nil {
|
if p.Path == nil {
|
||||||
p.cancel()
|
p.cancel()
|
||||||
return &StartError{false, "invalid executable pathname", EINVAL, true, false}
|
return &StartError{
|
||||||
|
Step: "invalid executable pathname",
|
||||||
|
Err: EINVAL,
|
||||||
|
Origin: true,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// do not transmit nil
|
// do not transmit nil
|
||||||
@@ -395,7 +455,8 @@ func (p *Container) Serve() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait waits for the container init process to exit and releases any resources associated with the [Container].
|
// Wait blocks until the container init process to exit and releases any
|
||||||
|
// resources associated with the [Container].
|
||||||
func (p *Container) Wait() error {
|
func (p *Container) Wait() error {
|
||||||
if p.cmd == nil || p.cmd.Process == nil {
|
if p.cmd == nil || p.cmd.Process == nil {
|
||||||
return EINVAL
|
return EINVAL
|
||||||
@@ -440,11 +501,13 @@ func (p *Container) StderrPipe() (r io.ReadCloser, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Container) String() string {
|
func (p *Container) String() string {
|
||||||
return fmt.Sprintf("argv: %q, filter: %v, rules: %d, flags: %#x, presets: %#x",
|
return fmt.Sprintf(
|
||||||
p.Args, !p.SeccompDisable, len(p.SeccompRules), int(p.SeccompFlags), int(p.SeccompPresets))
|
"argv: %q, filter: %v, rules: %d, flags: %#x, presets: %#x",
|
||||||
|
p.Args, !p.SeccompDisable, len(p.SeccompRules), int(p.SeccompFlags), int(p.SeccompPresets),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcessState returns the address to os.ProcessState held by the underlying [exec.Cmd].
|
// ProcessState returns the address of os.ProcessState held by the underlying [exec.Cmd].
|
||||||
func (p *Container) ProcessState() *os.ProcessState {
|
func (p *Container) ProcessState() *os.ProcessState {
|
||||||
if p.cmd == nil {
|
if p.cmd == nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -452,7 +515,8 @@ func (p *Container) ProcessState() *os.ProcessState {
|
|||||||
return p.cmd.ProcessState
|
return p.cmd.ProcessState
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns the address to a new instance of [Container] that requires further initialisation before use.
|
// New returns the address to a new instance of [Container]. This value requires
|
||||||
|
// further initialisation before use.
|
||||||
func New(ctx context.Context, msg message.Msg) *Container {
|
func New(ctx context.Context, msg message.Msg) *Container {
|
||||||
if msg == nil {
|
if msg == nil {
|
||||||
msg = message.New(nil)
|
msg = message.New(nil)
|
||||||
@@ -461,12 +525,18 @@ func New(ctx context.Context, msg message.Msg) *Container {
|
|||||||
p := &Container{ctx: ctx, msg: msg, Params: Params{Ops: new(Ops)}}
|
p := &Container{ctx: ctx, msg: msg, Params: Params{Ops: new(Ops)}}
|
||||||
c, cancel := context.WithCancel(ctx)
|
c, cancel := context.WithCancel(ctx)
|
||||||
p.cancel = cancel
|
p.cancel = cancel
|
||||||
p.cmd = exec.CommandContext(c, MustExecutable(msg))
|
p.cmd = exec.CommandContext(c, fhs.ProcSelfExe)
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCommand calls [New] and initialises the [Params.Path] and [Params.Args] fields.
|
// NewCommand calls [New] and initialises the [Params.Path] and [Params.Args] fields.
|
||||||
func NewCommand(ctx context.Context, msg message.Msg, pathname *check.Absolute, name string, args ...string) *Container {
|
func NewCommand(
|
||||||
|
ctx context.Context,
|
||||||
|
msg message.Msg,
|
||||||
|
pathname *check.Absolute,
|
||||||
|
name string,
|
||||||
|
args ...string,
|
||||||
|
) *Container {
|
||||||
z := New(ctx, msg)
|
z := New(ctx, msg)
|
||||||
z.Path = pathname
|
z.Path = pathname
|
||||||
z.Args = append([]string{name}, args...)
|
z.Args = append([]string{name}, args...)
|
||||||
|
|||||||
@@ -274,13 +274,13 @@ var containerTestCases = []struct {
|
|||||||
Dev(check.MustAbs("/dev"), true),
|
Dev(check.MustAbs("/dev"), true),
|
||||||
),
|
),
|
||||||
earlyMnt(
|
earlyMnt(
|
||||||
ent("/", "/dev", "ro,nosuid,nodev,relatime", "tmpfs", "devtmpfs", ignore),
|
ent("/", "/dev", "ro,nosuid,nodev,relatime", "tmpfs", ignore, ignore),
|
||||||
ent("/null", "/dev/null", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/null", "/dev/null", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/zero", "/dev/zero", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/zero", "/dev/zero", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/full", "/dev/full", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/full", "/dev/full", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/random", "/dev/random", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/random", "/dev/random", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/urandom", "/dev/urandom", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/urandom", "/dev/urandom", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/tty", "/dev/tty", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/tty", "/dev/tty", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/", "/dev/pts", "rw,nosuid,noexec,relatime", "devpts", "devpts", "rw,mode=620,ptmxmode=666"),
|
ent("/", "/dev/pts", "rw,nosuid,noexec,relatime", "devpts", "devpts", "rw,mode=620,ptmxmode=666"),
|
||||||
ent("/", "/dev/mqueue", "rw,nosuid,nodev,noexec,relatime", "mqueue", "mqueue", "rw"),
|
ent("/", "/dev/mqueue", "rw,nosuid,nodev,noexec,relatime", "mqueue", "mqueue", "rw"),
|
||||||
ent("/", "/dev/shm", "rw,nosuid,nodev,relatime", "tmpfs", "tmpfs", ignore),
|
ent("/", "/dev/shm", "rw,nosuid,nodev,relatime", "tmpfs", "tmpfs", ignore),
|
||||||
@@ -292,13 +292,13 @@ var containerTestCases = []struct {
|
|||||||
Dev(check.MustAbs("/dev"), false),
|
Dev(check.MustAbs("/dev"), false),
|
||||||
),
|
),
|
||||||
earlyMnt(
|
earlyMnt(
|
||||||
ent("/", "/dev", "ro,nosuid,nodev,relatime", "tmpfs", "devtmpfs", ignore),
|
ent("/", "/dev", "ro,nosuid,nodev,relatime", "tmpfs", ignore, ignore),
|
||||||
ent("/null", "/dev/null", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/null", "/dev/null", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/zero", "/dev/zero", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/zero", "/dev/zero", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/full", "/dev/full", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/full", "/dev/full", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/random", "/dev/random", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/random", "/dev/random", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/urandom", "/dev/urandom", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/urandom", "/dev/urandom", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/tty", "/dev/tty", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
ent("/tty", "/dev/tty", ignore, "devtmpfs", ignore, ignore),
|
||||||
ent("/", "/dev/pts", "rw,nosuid,noexec,relatime", "devpts", "devpts", "rw,mode=620,ptmxmode=666"),
|
ent("/", "/dev/pts", "rw,nosuid,noexec,relatime", "devpts", "devpts", "rw,mode=620,ptmxmode=666"),
|
||||||
ent("/", "/dev/shm", "rw,nosuid,nodev,relatime", "tmpfs", "tmpfs", ignore),
|
ent("/", "/dev/shm", "rw,nosuid,nodev,relatime", "tmpfs", "tmpfs", ignore),
|
||||||
),
|
),
|
||||||
@@ -690,11 +690,22 @@ func init() {
|
|||||||
return fmt.Errorf("got more than %d entries", len(mnt))
|
return fmt.Errorf("got more than %d entries", len(mnt))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ugly hack but should be reliable and is less likely to false negative than comparing by parsed flags
|
// ugly hack but should be reliable and is less likely to
|
||||||
cur.VfsOptstr = strings.TrimSuffix(cur.VfsOptstr, ",relatime")
|
//false negative than comparing by parsed flags
|
||||||
cur.VfsOptstr = strings.TrimSuffix(cur.VfsOptstr, ",noatime")
|
for _, s := range []string{
|
||||||
mnt[i].VfsOptstr = strings.TrimSuffix(mnt[i].VfsOptstr, ",relatime")
|
"relatime",
|
||||||
mnt[i].VfsOptstr = strings.TrimSuffix(mnt[i].VfsOptstr, ",noatime")
|
"noatime",
|
||||||
|
} {
|
||||||
|
cur.VfsOptstr = strings.TrimSuffix(cur.VfsOptstr, ","+s)
|
||||||
|
mnt[i].VfsOptstr = strings.TrimSuffix(mnt[i].VfsOptstr, ","+s)
|
||||||
|
}
|
||||||
|
for _, s := range []string{
|
||||||
|
"seclabel",
|
||||||
|
"inode64",
|
||||||
|
} {
|
||||||
|
cur.FsOptstr = strings.Replace(cur.FsOptstr, ","+s, "", 1)
|
||||||
|
mnt[i].FsOptstr = strings.Replace(mnt[i].FsOptstr, ","+s, "", 1)
|
||||||
|
}
|
||||||
|
|
||||||
if !cur.EqualWithIgnore(mnt[i], "\x00") {
|
if !cur.EqualWithIgnore(mnt[i], "\x00") {
|
||||||
fail = true
|
fail = true
|
||||||
@@ -762,14 +773,13 @@ func TestMain(m *testing.M) {
|
|||||||
func helperNewContainerLibPaths(ctx context.Context, libPaths *[]*check.Absolute, args ...string) (c *container.Container) {
|
func helperNewContainerLibPaths(ctx context.Context, libPaths *[]*check.Absolute, args ...string) (c *container.Container) {
|
||||||
msg := message.New(nil)
|
msg := message.New(nil)
|
||||||
msg.SwapVerbose(testing.Verbose())
|
msg.SwapVerbose(testing.Verbose())
|
||||||
executable := check.MustAbs(container.MustExecutable(msg))
|
|
||||||
|
|
||||||
c = container.NewCommand(ctx, msg, absHelperInnerPath, "helper", args...)
|
c = container.NewCommand(ctx, msg, absHelperInnerPath, "helper", args...)
|
||||||
c.Env = append(c.Env, envDoCheck+"=1")
|
c.Env = append(c.Env, envDoCheck+"=1")
|
||||||
c.Bind(executable, absHelperInnerPath, 0)
|
c.Bind(fhs.AbsProcSelfExe, absHelperInnerPath, 0)
|
||||||
|
|
||||||
// in case test has cgo enabled
|
// in case test has cgo enabled
|
||||||
if entries, err := ldd.Resolve(ctx, msg, executable); err != nil {
|
if entries, err := ldd.Resolve(ctx, msg, nil); err != nil {
|
||||||
log.Fatalf("ldd: %v", err)
|
log.Fatalf("ldd: %v", err)
|
||||||
} else {
|
} else {
|
||||||
*libPaths = ldd.Path(entries)
|
*libPaths = ldd.Path(entries)
|
||||||
|
|||||||
@@ -21,7 +21,8 @@ type osFile interface {
|
|||||||
fs.File
|
fs.File
|
||||||
}
|
}
|
||||||
|
|
||||||
// syscallDispatcher provides methods that make state-dependent system calls as part of their behaviour.
|
// syscallDispatcher provides methods that make state-dependent system calls as
|
||||||
|
// part of their behaviour.
|
||||||
type syscallDispatcher interface {
|
type syscallDispatcher interface {
|
||||||
// new starts a goroutine with a new instance of syscallDispatcher.
|
// new starts a goroutine with a new instance of syscallDispatcher.
|
||||||
// A syscallDispatcher must never be used in any goroutine other than the one owning it,
|
// A syscallDispatcher must never be used in any goroutine other than the one owning it,
|
||||||
|
|||||||
@@ -238,8 +238,11 @@ func sliceAddr[S any](s []S) *[]S { return &s }
|
|||||||
|
|
||||||
func newCheckedFile(t *testing.T, name, wantData string, closeErr error) osFile {
|
func newCheckedFile(t *testing.T, name, wantData string, closeErr error) osFile {
|
||||||
f := &checkedOsFile{t: t, name: name, want: wantData, closeErr: closeErr}
|
f := &checkedOsFile{t: t, name: name, want: wantData, closeErr: closeErr}
|
||||||
// check happens in Close, and cleanup is not guaranteed to run, so relying on it for sloppy implementations will cause sporadic test results
|
// check happens in Close, and cleanup is not guaranteed to run, so relying
|
||||||
f.cleanup = runtime.AddCleanup(f, func(name string) { f.t.Fatalf("checkedOsFile %s became unreachable without a call to Close", name) }, f.name)
|
// on it for sloppy implementations will cause sporadic test results
|
||||||
|
f.cleanup = runtime.AddCleanup(f, func(name string) {
|
||||||
|
panic("checkedOsFile " + name + " became unreachable without a call to Close")
|
||||||
|
}, name)
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -43,7 +43,8 @@ func messageFromError(err error) (m string, ok bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// messagePrefix checks and prefixes the error message of a non-pointer error.
|
// messagePrefix checks and prefixes the error message of a non-pointer error.
|
||||||
// While this is usable for pointer errors, such use should be avoided as nil check is omitted.
|
// While this is usable for pointer errors, such use should be avoided as nil
|
||||||
|
// check is omitted.
|
||||||
func messagePrefix[T error](prefix string, err error) (string, bool) {
|
func messagePrefix[T error](prefix string, err error) (string, bool) {
|
||||||
var targetError T
|
var targetError T
|
||||||
if errors.As(err, &targetError) {
|
if errors.As(err, &targetError) {
|
||||||
|
|||||||
@@ -28,6 +28,9 @@ func copyExecutable(msg message.Msg) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MustExecutable calls [os.Executable] and terminates the process on error.
|
||||||
|
//
|
||||||
|
// Deprecated: This is no longer used and will be removed in 0.4.
|
||||||
func MustExecutable(msg message.Msg) string {
|
func MustExecutable(msg message.Msg) string {
|
||||||
executableOnce.Do(func() { copyExecutable(msg) })
|
executableOnce.Do(func() { copyExecutable(msg) })
|
||||||
return executable
|
return executable
|
||||||
|
|||||||
@@ -42,6 +42,8 @@ var (
|
|||||||
AbsDevShm = unsafeAbs(DevShm)
|
AbsDevShm = unsafeAbs(DevShm)
|
||||||
// AbsProc is [Proc] as [check.Absolute].
|
// AbsProc is [Proc] as [check.Absolute].
|
||||||
AbsProc = unsafeAbs(Proc)
|
AbsProc = unsafeAbs(Proc)
|
||||||
|
// AbsProcSelfExe is [ProcSelfExe] as [check.Absolute].
|
||||||
|
AbsProcSelfExe = unsafeAbs(ProcSelfExe)
|
||||||
// AbsSys is [Sys] as [check.Absolute].
|
// AbsSys is [Sys] as [check.Absolute].
|
||||||
AbsSys = unsafeAbs(Sys)
|
AbsSys = unsafeAbs(Sys)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -9,7 +9,8 @@ const (
|
|||||||
// Tmp points to the place for small temporary files.
|
// Tmp points to the place for small temporary files.
|
||||||
Tmp = "/tmp/"
|
Tmp = "/tmp/"
|
||||||
|
|
||||||
// Run points to a "tmpfs" file system for system packages to place runtime data, socket files, and similar.
|
// Run points to a "tmpfs" file system for system packages to place runtime
|
||||||
|
// data, socket files, and similar.
|
||||||
Run = "/run/"
|
Run = "/run/"
|
||||||
// RunUser points to a directory containing per-user runtime directories,
|
// RunUser points to a directory containing per-user runtime directories,
|
||||||
// each usually individually mounted "tmpfs" instances.
|
// each usually individually mounted "tmpfs" instances.
|
||||||
@@ -17,10 +18,12 @@ const (
|
|||||||
|
|
||||||
// Usr points to vendor-supplied operating system resources.
|
// Usr points to vendor-supplied operating system resources.
|
||||||
Usr = "/usr/"
|
Usr = "/usr/"
|
||||||
// UsrBin points to binaries and executables for user commands that shall appear in the $PATH search path.
|
// UsrBin points to binaries and executables for user commands that shall
|
||||||
|
// appear in the $PATH search path.
|
||||||
UsrBin = Usr + "bin/"
|
UsrBin = Usr + "bin/"
|
||||||
|
|
||||||
// Var points to persistent, variable system data. Writable during normal system operation.
|
// Var points to persistent, variable system data. Writable during normal
|
||||||
|
// system operation.
|
||||||
Var = "/var/"
|
Var = "/var/"
|
||||||
// VarLib points to persistent system data.
|
// VarLib points to persistent system data.
|
||||||
VarLib = Var + "lib/"
|
VarLib = Var + "lib/"
|
||||||
@@ -29,12 +32,20 @@ const (
|
|||||||
|
|
||||||
// Dev points to the root directory for device nodes.
|
// Dev points to the root directory for device nodes.
|
||||||
Dev = "/dev/"
|
Dev = "/dev/"
|
||||||
// DevShm is the place for POSIX shared memory segments, as created via shm_open(3).
|
// DevShm is the place for POSIX shared memory segments, as created via
|
||||||
|
// shm_open(3).
|
||||||
DevShm = "/dev/shm/"
|
DevShm = "/dev/shm/"
|
||||||
// Proc points to a virtual kernel file system exposing the process list and other functionality.
|
// Proc points to a virtual kernel file system exposing the process list and
|
||||||
|
// other functionality.
|
||||||
Proc = "/proc/"
|
Proc = "/proc/"
|
||||||
// ProcSys points to a hierarchy below /proc/ that exposes a number of kernel tunables.
|
// ProcSys points to a hierarchy below /proc/ that exposes a number of
|
||||||
|
// kernel tunables.
|
||||||
ProcSys = Proc + "sys/"
|
ProcSys = Proc + "sys/"
|
||||||
// Sys points to a virtual kernel file system exposing discovered devices and other functionality.
|
// ProcSelf resolves to the process's own /proc/pid directory.
|
||||||
|
ProcSelf = Proc + "self/"
|
||||||
|
// ProcSelfExe is a symbolic link to program pathname.
|
||||||
|
ProcSelfExe = ProcSelf + "exe"
|
||||||
|
// Sys points to a virtual kernel file system exposing discovered devices
|
||||||
|
// and other functionality.
|
||||||
Sys = "/sys/"
|
Sys = "/sys/"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -33,12 +33,12 @@ const (
|
|||||||
- This path is only accessible by init and root:
|
- This path is only accessible by init and root:
|
||||||
The container init sets SUID_DUMP_DISABLE and terminates if that fails.
|
The container init sets SUID_DUMP_DISABLE and terminates if that fails.
|
||||||
|
|
||||||
It should be noted that none of this should become relevant at any point since the resulting
|
It should be noted that none of this should become relevant at any point
|
||||||
intermediate root tmpfs should be effectively anonymous. */
|
since the resulting intermediate root tmpfs should be effectively anonymous. */
|
||||||
intermediateHostPath = fhs.Proc + "self/fd"
|
intermediateHostPath = fhs.Proc + "self/fd"
|
||||||
|
|
||||||
// setupEnv is the name of the environment variable holding the string representation of
|
// setupEnv is the name of the environment variable holding the string
|
||||||
// the read end file descriptor of the setup params pipe.
|
// representation of the read end file descriptor of the setup params pipe.
|
||||||
setupEnv = "HAKUREI_SETUP"
|
setupEnv = "HAKUREI_SETUP"
|
||||||
|
|
||||||
// exitUnexpectedWait4 is the exit code if wait4 returns an unexpected errno.
|
// exitUnexpectedWait4 is the exit code if wait4 returns an unexpected errno.
|
||||||
@@ -59,7 +59,8 @@ type (
|
|||||||
// late is called right before starting the initial process.
|
// late is called right before starting the initial process.
|
||||||
late(state *setupState, k syscallDispatcher) error
|
late(state *setupState, k syscallDispatcher) error
|
||||||
|
|
||||||
// prefix returns a log message prefix, and whether this Op prints no identifying message on its own.
|
// prefix returns a log message prefix, and whether this Op prints no
|
||||||
|
// identifying message on its own.
|
||||||
prefix() (string, bool)
|
prefix() (string, bool)
|
||||||
|
|
||||||
Is(op Op) bool
|
Is(op Op) bool
|
||||||
@@ -71,9 +72,11 @@ type (
|
|||||||
setupState struct {
|
setupState struct {
|
||||||
nonrepeatable uintptr
|
nonrepeatable uintptr
|
||||||
|
|
||||||
// Whether early reaping has concluded. Must only be accessed in the wait4 loop.
|
// Whether early reaping has concluded. Must only be accessed in the
|
||||||
|
// wait4 loop.
|
||||||
processConcluded bool
|
processConcluded bool
|
||||||
// Process to syscall.WaitStatus populated in the wait4 loop. Freed after early reaping concludes.
|
// Process to syscall.WaitStatus populated in the wait4 loop. Freed
|
||||||
|
// after early reaping concludes.
|
||||||
process map[int]WaitStatus
|
process map[int]WaitStatus
|
||||||
// Synchronises access to process.
|
// Synchronises access to process.
|
||||||
processMu sync.RWMutex
|
processMu sync.RWMutex
|
||||||
@@ -216,9 +219,10 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
/* early is called right before pivot_root into intermediate root;
|
/* early is called right before pivot_root into intermediate root;
|
||||||
this step is mostly for gathering information that would otherwise be difficult to obtain
|
this step is mostly for gathering information that would otherwise be
|
||||||
via library functions after pivot_root, and implementations are expected to avoid changing
|
difficult to obtain via library functions after pivot_root, and
|
||||||
the state of the mount namespace */
|
implementations are expected to avoid changing the state of the mount
|
||||||
|
namespace */
|
||||||
for i, op := range *params.Ops {
|
for i, op := range *params.Ops {
|
||||||
if op == nil || !op.Valid() {
|
if op == nil || !op.Valid() {
|
||||||
k.fatalf(msg, "invalid op at index %d", i)
|
k.fatalf(msg, "invalid op at index %d", i)
|
||||||
@@ -258,10 +262,10 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
k.fatalf(msg, "cannot enter intermediate root: %v", err)
|
k.fatalf(msg, "cannot enter intermediate root: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* apply is called right after pivot_root and entering the new root;
|
/* apply is called right after pivot_root and entering the new root. This
|
||||||
this step sets up the container filesystem, and implementations are expected to keep the host root
|
step sets up the container filesystem, and implementations are expected to
|
||||||
and sysroot mount points intact but otherwise can do whatever they need to;
|
keep the host root and sysroot mount points intact but otherwise can do
|
||||||
chdir is allowed but discouraged */
|
whatever they need to. Calling chdir is allowed but discouraged. */
|
||||||
for i, op := range *params.Ops {
|
for i, op := range *params.Ops {
|
||||||
// ops already checked during early setup
|
// ops already checked during early setup
|
||||||
if prefix, ok := op.prefix(); ok {
|
if prefix, ok := op.prefix(); ok {
|
||||||
|
|||||||
@@ -12,14 +12,16 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(BindMountOp)) }
|
func init() { gob.Register(new(BindMountOp)) }
|
||||||
|
|
||||||
// Bind appends an [Op] that bind mounts host path [BindMountOp.Source] on container path [BindMountOp.Target].
|
// Bind is a helper for appending [BindMountOp] to [Ops].
|
||||||
func (f *Ops) Bind(source, target *check.Absolute, flags int) *Ops {
|
func (f *Ops) Bind(source, target *check.Absolute, flags int) *Ops {
|
||||||
*f = append(*f, &BindMountOp{nil, source, target, flags})
|
*f = append(*f, &BindMountOp{nil, source, target, flags})
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
// BindMountOp bind mounts host path Source on container path Target.
|
// BindMountOp creates a bind mount from host path Source to container path Target.
|
||||||
// Note that Flags uses bits declared in this package and should not be set with constants in [syscall].
|
//
|
||||||
|
// Note that Flags uses bits declared in the [std] package and should not be set
|
||||||
|
// with constants in [syscall].
|
||||||
type BindMountOp struct {
|
type BindMountOp struct {
|
||||||
sourceFinal, Source, Target *check.Absolute
|
sourceFinal, Source, Target *check.Absolute
|
||||||
|
|
||||||
|
|||||||
@@ -24,8 +24,7 @@ const (
|
|||||||
daemonTimeout = 5 * time.Second
|
daemonTimeout = 5 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
// Daemon appends an [Op] that starts a daemon in the container and blocks until
|
// Daemon is a helper for appending [DaemonOp] to [Ops].
|
||||||
// [DaemonOp.Target] appears.
|
|
||||||
func (f *Ops) Daemon(target, path *check.Absolute, args ...string) *Ops {
|
func (f *Ops) Daemon(target, path *check.Absolute, args ...string) *Ops {
|
||||||
*f = append(*f, &DaemonOp{target, path, args})
|
*f = append(*f, &DaemonOp{target, path, args})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -19,7 +19,9 @@ func (f *Ops) Dev(target *check.Absolute, mqueue bool) *Ops {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DevWritable appends an [Op] that mounts a writable subset of host /dev.
|
// DevWritable appends an [Op] that mounts a writable subset of host /dev.
|
||||||
// There is usually no good reason to write to /dev, so this should always be followed by a [RemountOp].
|
//
|
||||||
|
// There is usually no good reason to write to /dev, so this should always be
|
||||||
|
// followed by a [RemountOp].
|
||||||
func (f *Ops) DevWritable(target *check.Absolute, mqueue bool) *Ops {
|
func (f *Ops) DevWritable(target *check.Absolute, mqueue bool) *Ops {
|
||||||
*f = append(*f, &MountDevOp{target, mqueue, true})
|
*f = append(*f, &MountDevOp{target, mqueue, true})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(MkdirOp)) }
|
func init() { gob.Register(new(MkdirOp)) }
|
||||||
|
|
||||||
// Mkdir appends an [Op] that creates a directory in the container filesystem.
|
// Mkdir is a helper for appending [MkdirOp] to [Ops].
|
||||||
func (f *Ops) Mkdir(name *check.Absolute, perm os.FileMode) *Ops {
|
func (f *Ops) Mkdir(name *check.Absolute, perm os.FileMode) *Ops {
|
||||||
*f = append(*f, &MkdirOp{name, perm})
|
*f = append(*f, &MkdirOp{name, perm})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -54,8 +54,11 @@ func (e *OverlayArgumentError) Error() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Overlay appends an [Op] that mounts the overlay pseudo filesystem on [MountOverlayOp.Target].
|
// Overlay is a helper for appending [MountOverlayOp] to [Ops].
|
||||||
func (f *Ops) Overlay(target, state, work *check.Absolute, layers ...*check.Absolute) *Ops {
|
func (f *Ops) Overlay(
|
||||||
|
target, state, work *check.Absolute,
|
||||||
|
layers ...*check.Absolute,
|
||||||
|
) *Ops {
|
||||||
*f = append(*f, &MountOverlayOp{
|
*f = append(*f, &MountOverlayOp{
|
||||||
Target: target,
|
Target: target,
|
||||||
Lower: layers,
|
Lower: layers,
|
||||||
@@ -65,13 +68,12 @@ func (f *Ops) Overlay(target, state, work *check.Absolute, layers ...*check.Abso
|
|||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
// OverlayEphemeral appends an [Op] that mounts the overlay pseudo filesystem on [MountOverlayOp.Target]
|
// OverlayEphemeral appends a [MountOverlayOp] with an ephemeral upperdir and workdir.
|
||||||
// with an ephemeral upperdir and workdir.
|
|
||||||
func (f *Ops) OverlayEphemeral(target *check.Absolute, layers ...*check.Absolute) *Ops {
|
func (f *Ops) OverlayEphemeral(target *check.Absolute, layers ...*check.Absolute) *Ops {
|
||||||
return f.Overlay(target, fhs.AbsRoot, nil, layers...)
|
return f.Overlay(target, fhs.AbsRoot, nil, layers...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// OverlayReadonly appends an [Op] that mounts the overlay pseudo filesystem readonly on [MountOverlayOp.Target]
|
// OverlayReadonly appends a readonly [MountOverlayOp].
|
||||||
func (f *Ops) OverlayReadonly(target *check.Absolute, layers ...*check.Absolute) *Ops {
|
func (f *Ops) OverlayReadonly(target *check.Absolute, layers ...*check.Absolute) *Ops {
|
||||||
return f.Overlay(target, nil, nil, layers...)
|
return f.Overlay(target, nil, nil, layers...)
|
||||||
}
|
}
|
||||||
@@ -82,25 +84,34 @@ type MountOverlayOp struct {
|
|||||||
|
|
||||||
// Any filesystem, does not need to be on a writable filesystem.
|
// Any filesystem, does not need to be on a writable filesystem.
|
||||||
Lower []*check.Absolute
|
Lower []*check.Absolute
|
||||||
// formatted for [OptionOverlayLowerdir], resolved, prefixed and escaped during early
|
// Formatted for [OptionOverlayLowerdir].
|
||||||
|
//
|
||||||
|
// Resolved, prefixed and escaped during early.
|
||||||
lower []string
|
lower []string
|
||||||
|
|
||||||
// The upperdir is normally on a writable filesystem.
|
// The upperdir is normally on a writable filesystem.
|
||||||
//
|
//
|
||||||
// If Work is nil and Upper holds the special value [fhs.AbsRoot],
|
// If Work is nil and Upper holds the special value [fhs.AbsRoot], an
|
||||||
// an ephemeral upperdir and workdir will be set up.
|
// ephemeral upperdir and workdir will be set up.
|
||||||
//
|
//
|
||||||
// If both Work and Upper are nil, upperdir and workdir is omitted and the overlay is mounted readonly.
|
// If both Work and Upper are nil, upperdir and workdir is omitted and the
|
||||||
|
// overlay is mounted readonly.
|
||||||
Upper *check.Absolute
|
Upper *check.Absolute
|
||||||
// formatted for [OptionOverlayUpperdir], resolved, prefixed and escaped during early
|
// Formatted for [OptionOverlayUpperdir].
|
||||||
|
//
|
||||||
|
// Resolved, prefixed and escaped during early.
|
||||||
upper string
|
upper string
|
||||||
|
|
||||||
// The workdir needs to be an empty directory on the same filesystem as upperdir.
|
// The workdir needs to be an empty directory on the same filesystem as upperdir.
|
||||||
Work *check.Absolute
|
Work *check.Absolute
|
||||||
// formatted for [OptionOverlayWorkdir], resolved, prefixed and escaped during early
|
// Formatted for [OptionOverlayWorkdir].
|
||||||
|
//
|
||||||
|
// Resolved, prefixed and escaped during early.
|
||||||
work string
|
work string
|
||||||
|
|
||||||
ephemeral bool
|
ephemeral bool
|
||||||
|
|
||||||
// used internally for mounting to the intermediate root
|
// Used internally for mounting to the intermediate root.
|
||||||
noPrefix bool
|
noPrefix bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ const (
|
|||||||
|
|
||||||
func init() { gob.Register(new(TmpfileOp)) }
|
func init() { gob.Register(new(TmpfileOp)) }
|
||||||
|
|
||||||
// Place appends an [Op] that places a file in container path [TmpfileOp.Path] containing [TmpfileOp.Data].
|
// Place is a helper for appending [TmpfileOp] to [Ops].
|
||||||
func (f *Ops) Place(name *check.Absolute, data []byte) *Ops {
|
func (f *Ops) Place(name *check.Absolute, data []byte) *Ops {
|
||||||
*f = append(*f, &TmpfileOp{name, data})
|
*f = append(*f, &TmpfileOp{name, data})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ func TestTmpfileOp(t *testing.T) {
|
|||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), stub.UniqueError(5)),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, (*checkedOsFile)(nil), stub.UniqueError(5)),
|
||||||
}, stub.UniqueError(5)},
|
}, stub.UniqueError(5)},
|
||||||
|
|
||||||
{"Write", &Params{ParentPerm: 0700}, &TmpfileOp{
|
{"Write", &Params{ParentPerm: 0700}, &TmpfileOp{
|
||||||
@@ -35,14 +35,14 @@ func TestTmpfileOp(t *testing.T) {
|
|||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, stub.UniqueError(3)), nil),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.Close", sampleDataString, stub.UniqueError(3)), nil),
|
||||||
}, stub.UniqueError(3)},
|
}, stub.UniqueError(3)},
|
||||||
|
|
||||||
{"ensureFile", &Params{ParentPerm: 0700}, &TmpfileOp{
|
{"ensureFile", &Params{ParentPerm: 0700}, &TmpfileOp{
|
||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), nil),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.ensureFile", sampleDataString, nil), nil),
|
||||||
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, stub.UniqueError(2)),
|
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, stub.UniqueError(2)),
|
||||||
}, stub.UniqueError(2)},
|
}, stub.UniqueError(2)},
|
||||||
|
|
||||||
@@ -50,29 +50,29 @@ func TestTmpfileOp(t *testing.T) {
|
|||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), nil),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.bindMount", sampleDataString, nil), nil),
|
||||||
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
||||||
call("bindMount", stub.ExpectArgs{"tmp.32768", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, stub.UniqueError(1)),
|
call("bindMount", stub.ExpectArgs{"tmp.bindMount", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, stub.UniqueError(1)),
|
||||||
}, stub.UniqueError(1)},
|
}, stub.UniqueError(1)},
|
||||||
|
|
||||||
{"remove", &Params{ParentPerm: 0700}, &TmpfileOp{
|
{"remove", &Params{ParentPerm: 0700}, &TmpfileOp{
|
||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), nil),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.remove", sampleDataString, nil), nil),
|
||||||
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
||||||
call("bindMount", stub.ExpectArgs{"tmp.32768", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, nil),
|
call("bindMount", stub.ExpectArgs{"tmp.remove", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, nil),
|
||||||
call("remove", stub.ExpectArgs{"tmp.32768"}, nil, stub.UniqueError(0)),
|
call("remove", stub.ExpectArgs{"tmp.remove"}, nil, stub.UniqueError(0)),
|
||||||
}, stub.UniqueError(0)},
|
}, stub.UniqueError(0)},
|
||||||
|
|
||||||
{"success", &Params{ParentPerm: 0700}, &TmpfileOp{
|
{"success", &Params{ParentPerm: 0700}, &TmpfileOp{
|
||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), nil),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.success", sampleDataString, nil), nil),
|
||||||
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
||||||
call("bindMount", stub.ExpectArgs{"tmp.32768", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, nil),
|
call("bindMount", stub.ExpectArgs{"tmp.success", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, nil),
|
||||||
call("remove", stub.ExpectArgs{"tmp.32768"}, nil, nil),
|
call("remove", stub.ExpectArgs{"tmp.success"}, nil, nil),
|
||||||
}, nil},
|
}, nil},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(MountProcOp)) }
|
func init() { gob.Register(new(MountProcOp)) }
|
||||||
|
|
||||||
// Proc appends an [Op] that mounts a private instance of proc.
|
// Proc is a helper for appending [MountProcOp] to [Ops].
|
||||||
func (f *Ops) Proc(target *check.Absolute) *Ops {
|
func (f *Ops) Proc(target *check.Absolute) *Ops {
|
||||||
*f = append(*f, &MountProcOp{target})
|
*f = append(*f, &MountProcOp{target})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(RemountOp)) }
|
func init() { gob.Register(new(RemountOp)) }
|
||||||
|
|
||||||
// Remount appends an [Op] that applies [RemountOp.Flags] on container path [RemountOp.Target].
|
// Remount is a helper for appending [RemountOp] to [Ops].
|
||||||
func (f *Ops) Remount(target *check.Absolute, flags uintptr) *Ops {
|
func (f *Ops) Remount(target *check.Absolute, flags uintptr) *Ops {
|
||||||
*f = append(*f, &RemountOp{target, flags})
|
*f = append(*f, &RemountOp{target, flags})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ const (
|
|||||||
_LANDLOCK_ACCESS_FS_DELIM
|
_LANDLOCK_ACCESS_FS_DELIM
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// String returns a space-separated string of [LandlockAccessFS] flags.
|
||||||
func (f LandlockAccessFS) String() string {
|
func (f LandlockAccessFS) String() string {
|
||||||
switch f {
|
switch f {
|
||||||
case LANDLOCK_ACCESS_FS_EXECUTE:
|
case LANDLOCK_ACCESS_FS_EXECUTE:
|
||||||
@@ -116,6 +117,7 @@ const (
|
|||||||
_LANDLOCK_ACCESS_NET_DELIM
|
_LANDLOCK_ACCESS_NET_DELIM
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// String returns a space-separated string of [LandlockAccessNet] flags.
|
||||||
func (f LandlockAccessNet) String() string {
|
func (f LandlockAccessNet) String() string {
|
||||||
switch f {
|
switch f {
|
||||||
case LANDLOCK_ACCESS_NET_BIND_TCP:
|
case LANDLOCK_ACCESS_NET_BIND_TCP:
|
||||||
@@ -152,6 +154,7 @@ const (
|
|||||||
_LANDLOCK_SCOPE_DELIM
|
_LANDLOCK_SCOPE_DELIM
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// String returns a space-separated string of [LandlockScope] flags.
|
||||||
func (f LandlockScope) String() string {
|
func (f LandlockScope) String() string {
|
||||||
switch f {
|
switch f {
|
||||||
case LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET:
|
case LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET:
|
||||||
@@ -184,10 +187,12 @@ type RulesetAttr struct {
|
|||||||
HandledAccessFS LandlockAccessFS
|
HandledAccessFS LandlockAccessFS
|
||||||
// Bitmask of handled network actions.
|
// Bitmask of handled network actions.
|
||||||
HandledAccessNet LandlockAccessNet
|
HandledAccessNet LandlockAccessNet
|
||||||
// Bitmask of scopes restricting a Landlock domain from accessing outside resources (e.g. IPCs).
|
// Bitmask of scopes restricting a Landlock domain from accessing outside
|
||||||
|
// resources (e.g. IPCs).
|
||||||
Scoped LandlockScope
|
Scoped LandlockScope
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// String returns a user-facing description of [RulesetAttr].
|
||||||
func (rulesetAttr *RulesetAttr) String() string {
|
func (rulesetAttr *RulesetAttr) String() string {
|
||||||
if rulesetAttr == nil {
|
if rulesetAttr == nil {
|
||||||
return "NULL"
|
return "NULL"
|
||||||
@@ -208,6 +213,7 @@ func (rulesetAttr *RulesetAttr) String() string {
|
|||||||
return strings.Join(elems, ", ")
|
return strings.Join(elems, ", ")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create loads the ruleset into the kernel.
|
||||||
func (rulesetAttr *RulesetAttr) Create(flags uintptr) (fd int, err error) {
|
func (rulesetAttr *RulesetAttr) Create(flags uintptr) (fd int, err error) {
|
||||||
var pointer, size uintptr
|
var pointer, size uintptr
|
||||||
// NULL needed for abi version
|
// NULL needed for abi version
|
||||||
@@ -216,10 +222,13 @@ func (rulesetAttr *RulesetAttr) Create(flags uintptr) (fd int, err error) {
|
|||||||
size = unsafe.Sizeof(*rulesetAttr)
|
size = unsafe.Sizeof(*rulesetAttr)
|
||||||
}
|
}
|
||||||
|
|
||||||
rulesetFd, _, errno := syscall.Syscall(std.SYS_LANDLOCK_CREATE_RULESET, pointer, size, flags)
|
rulesetFd, _, errno := syscall.Syscall(
|
||||||
|
std.SYS_LANDLOCK_CREATE_RULESET,
|
||||||
|
pointer, size,
|
||||||
|
flags,
|
||||||
|
)
|
||||||
fd = int(rulesetFd)
|
fd = int(rulesetFd)
|
||||||
err = errno
|
err = errno
|
||||||
|
|
||||||
if fd < 0 {
|
if fd < 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -230,12 +239,19 @@ func (rulesetAttr *RulesetAttr) Create(flags uintptr) (fd int, err error) {
|
|||||||
return fd, nil
|
return fd, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LandlockGetABI returns the ABI version supported by the kernel.
|
||||||
func LandlockGetABI() (int, error) {
|
func LandlockGetABI() (int, error) {
|
||||||
return (*RulesetAttr)(nil).Create(LANDLOCK_CREATE_RULESET_VERSION)
|
return (*RulesetAttr)(nil).Create(LANDLOCK_CREATE_RULESET_VERSION)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LandlockRestrictSelf applies a loaded ruleset to the calling thread.
|
||||||
func LandlockRestrictSelf(rulesetFd int, flags uintptr) error {
|
func LandlockRestrictSelf(rulesetFd int, flags uintptr) error {
|
||||||
r, _, errno := syscall.Syscall(std.SYS_LANDLOCK_RESTRICT_SELF, uintptr(rulesetFd), flags, 0)
|
r, _, errno := syscall.Syscall(
|
||||||
|
std.SYS_LANDLOCK_RESTRICT_SELF,
|
||||||
|
uintptr(rulesetFd),
|
||||||
|
flags,
|
||||||
|
0,
|
||||||
|
)
|
||||||
if r != 0 {
|
if r != 0 {
|
||||||
return errno
|
return errno
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ done:
|
|||||||
}
|
}
|
||||||
if m.Header.Type == NLMSG_ERROR {
|
if m.Header.Type == NLMSG_ERROR {
|
||||||
if len(m.Data) >= 4 {
|
if len(m.Data) >= 4 {
|
||||||
errno := Errno(-std.ScmpInt(binary.NativeEndian.Uint32(m.Data)))
|
errno := Errno(-std.Int(binary.NativeEndian.Uint32(m.Data)))
|
||||||
if errno == 0 {
|
if errno == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,10 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// Nonexistent is a path that cannot exist.
|
// Nonexistent is a path that cannot exist.
|
||||||
// /proc is chosen because a system with covered /proc is unsupported by this package.
|
//
|
||||||
|
// This path can never be presented by the kernel if proc is mounted on
|
||||||
|
// /proc/. This can only exist if parts of /proc/ is covered, or proc is not
|
||||||
|
// mounted at all. Neither configuration is supported by this package.
|
||||||
Nonexistent = fhs.Proc + "nonexistent"
|
Nonexistent = fhs.Proc + "nonexistent"
|
||||||
|
|
||||||
hostPath = fhs.Root + hostDir
|
hostPath = fhs.Root + hostDir
|
||||||
|
|||||||
@@ -88,18 +88,22 @@ var resPrefix = [...]string{
|
|||||||
7: "seccomp_load failed",
|
7: "seccomp_load failed",
|
||||||
}
|
}
|
||||||
|
|
||||||
// cbAllocateBuffer is the function signature for the function handle passed to hakurei_export_filter
|
// cbAllocateBuffer is the function signature for the function handle passed to
|
||||||
// which allocates the buffer that the resulting bpf program is copied into, and writes its slice header
|
// hakurei_scmp_make_filter which allocates the buffer that the resulting bpf
|
||||||
// to a value held by the caller.
|
// program is copied into, and writes its slice header to a value held by the caller.
|
||||||
type cbAllocateBuffer = func(len C.size_t) (buf unsafe.Pointer)
|
type cbAllocateBuffer = func(len C.size_t) (buf unsafe.Pointer)
|
||||||
|
|
||||||
|
// hakurei_scmp_allocate allocates a buffer of specified size known to the
|
||||||
|
// runtime through a callback passed in a [cgo.Handle].
|
||||||
|
//
|
||||||
//export hakurei_scmp_allocate
|
//export hakurei_scmp_allocate
|
||||||
func hakurei_scmp_allocate(f C.uintptr_t, len C.size_t) (buf unsafe.Pointer) {
|
func hakurei_scmp_allocate(f C.uintptr_t, len C.size_t) (buf unsafe.Pointer) {
|
||||||
return cgo.Handle(f).Value().(cbAllocateBuffer)(len)
|
return cgo.Handle(f).Value().(cbAllocateBuffer)(len)
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeFilter generates a bpf program from a slice of [std.NativeRule] and writes the resulting byte slice to p.
|
// makeFilter generates a bpf program from a slice of [std.NativeRule] and
|
||||||
// The filter is installed to the current process if p is nil.
|
// writes the resulting byte slice to p. The filter is installed to the current
|
||||||
|
// process if p is nil.
|
||||||
func makeFilter(rules []std.NativeRule, flags ExportFlag, p *[]byte) error {
|
func makeFilter(rules []std.NativeRule, flags ExportFlag, p *[]byte) error {
|
||||||
if len(rules) == 0 {
|
if len(rules) == 0 {
|
||||||
return ErrInvalidRules
|
return ErrInvalidRules
|
||||||
@@ -170,8 +174,8 @@ func Export(rules []std.NativeRule, flags ExportFlag) (data []byte, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load generates a bpf program from a slice of [std.NativeRule] and enforces it on the current process.
|
// Load generates a bpf program from a slice of [std.NativeRule] and enforces it
|
||||||
// Errors returned by libseccomp is wrapped in [LibraryError].
|
// on the current process. Errors returned by libseccomp is wrapped in [LibraryError].
|
||||||
func Load(rules []std.NativeRule, flags ExportFlag) error { return makeFilter(rules, flags, nil) }
|
func Load(rules []std.NativeRule, flags ExportFlag) error { return makeFilter(rules, flags, nil) }
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
|||||||
27
container/seccomp/presets_riscv64_test.go
Normal file
27
container/seccomp/presets_riscv64_test.go
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
package seccomp_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "hakurei.app/container/seccomp"
|
||||||
|
. "hakurei.app/container/std"
|
||||||
|
)
|
||||||
|
|
||||||
|
var bpfExpected = bpfLookup{
|
||||||
|
{AllowMultiarch | AllowCAN |
|
||||||
|
AllowBluetooth, PresetExt |
|
||||||
|
PresetDenyNS | PresetDenyTTY | PresetDenyDevel |
|
||||||
|
PresetLinux32}: toHash(
|
||||||
|
"a1c4ffa35f4bfbf38061184760b9a09edfcb4964c3b534395e47327b83f3fb61f2f9573ddfcc4772424cc2f5dd12fd32471e6531dbe10e85eda3797dd4fa179f"),
|
||||||
|
|
||||||
|
{0, 0}: toHash(
|
||||||
|
"f3910fd727d087def593e3876c2c6ab9ace71d82ec8cbc992a26223e7bba85e1d7a0b56c5fc6303703f24595825dad8561637edaedd5384b34a6cd080946633c"),
|
||||||
|
{0, PresetExt}: toHash(
|
||||||
|
"741438c5e3f11c36c92ae8c5934f13440675c6e719541c2dbffeda79a10081bcfd9ad8314a60c1d1f53db86c8080c13fffa3bbcf7fe753935679b4b902737286"),
|
||||||
|
{0, PresetStrict}: toHash(
|
||||||
|
"79e9e464d02405c6d74fd2c771bd72a1311e488221c73a9c32db9270219837c54fccec2f36fe2474895547e60c311514567e2e6cf4e7a7fcf909c1ecd1e254a7"),
|
||||||
|
{0, PresetDenyNS | PresetDenyTTY | PresetDenyDevel}: toHash(
|
||||||
|
"3c443715a6c1e557a284862ea8efb70a5d4ecbe67d1226627323e861cd3646fb3e7768ec5b94b93760b7f652cf6916f66e317a4fbf8716d10c3673aa4fc3ae58"),
|
||||||
|
{0, PresetExt | PresetDenyDevel}: toHash(
|
||||||
|
"4448a74e8cc75a4ab63799c4f2cc2a5af63e5f4e8e9b8ac15a1873d647dfa67a4c67b39ed466d8dd32abc64136d401879fc6185c9ab00feeaf59ccf4305f8201"),
|
||||||
|
{0, PresetExt | PresetDenyNS | PresetDenyDevel}: toHash(
|
||||||
|
"c7c86e793cb7192f5f6c735f372cda27eb43ae1045e587f8eadb64c849520a3280b6570a3d7b601d32cddb38021585a2234db38e506cebfd10aa3d6c75440f17"),
|
||||||
|
}
|
||||||
@@ -24,8 +24,8 @@ func TestSyscallResolveName(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRuleType(t *testing.T) {
|
func TestRuleType(t *testing.T) {
|
||||||
assertKind[std.ScmpUint, scmpUint](t)
|
assertKind[std.Uint, scmpUint](t)
|
||||||
assertKind[std.ScmpInt, scmpInt](t)
|
assertKind[std.Int, scmpInt](t)
|
||||||
|
|
||||||
assertSize[std.NativeRule, syscallRule](t)
|
assertSize[std.NativeRule, syscallRule](t)
|
||||||
assertKind[std.ScmpDatum, scmpDatum](t)
|
assertKind[std.ScmpDatum, scmpDatum](t)
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ my %syscall_cutoff_arch = (
|
|||||||
"x86" => 340,
|
"x86" => 340,
|
||||||
"x86_64" => 302,
|
"x86_64" => 302,
|
||||||
"aarch64" => 281,
|
"aarch64" => 281,
|
||||||
|
"riscv64" => 281,
|
||||||
);
|
);
|
||||||
|
|
||||||
print <<EOF;
|
print <<EOF;
|
||||||
|
|||||||
@@ -7,24 +7,28 @@ import (
|
|||||||
|
|
||||||
type (
|
type (
|
||||||
// ScmpUint is equivalent to C.uint.
|
// ScmpUint is equivalent to C.uint.
|
||||||
ScmpUint uint32
|
//
|
||||||
|
// Deprecated: This type has been renamed to Uint and will be removed in 0.4.
|
||||||
|
ScmpUint = Uint
|
||||||
// ScmpInt is equivalent to C.int.
|
// ScmpInt is equivalent to C.int.
|
||||||
ScmpInt int32
|
//
|
||||||
|
// Deprecated: This type has been renamed to Int and will be removed in 0.4.
|
||||||
|
ScmpInt = Int
|
||||||
|
|
||||||
// ScmpSyscall represents a syscall number passed to libseccomp via [NativeRule.Syscall].
|
// ScmpSyscall represents a syscall number passed to libseccomp via [NativeRule.Syscall].
|
||||||
ScmpSyscall ScmpInt
|
ScmpSyscall Int
|
||||||
// ScmpErrno represents an errno value passed to libseccomp via [NativeRule.Errno].
|
// ScmpErrno represents an errno value passed to libseccomp via [NativeRule.Errno].
|
||||||
ScmpErrno ScmpInt
|
ScmpErrno Int
|
||||||
|
|
||||||
// ScmpCompare is equivalent to enum scmp_compare;
|
// ScmpCompare is equivalent to enum scmp_compare;
|
||||||
ScmpCompare ScmpUint
|
ScmpCompare Uint
|
||||||
// ScmpDatum is equivalent to scmp_datum_t.
|
// ScmpDatum is equivalent to scmp_datum_t.
|
||||||
ScmpDatum uint64
|
ScmpDatum uint64
|
||||||
|
|
||||||
// ScmpArgCmp is equivalent to struct scmp_arg_cmp.
|
// ScmpArgCmp is equivalent to struct scmp_arg_cmp.
|
||||||
ScmpArgCmp struct {
|
ScmpArgCmp struct {
|
||||||
// argument number, starting at 0
|
// argument number, starting at 0
|
||||||
Arg ScmpUint `json:"arg"`
|
Arg Uint `json:"arg"`
|
||||||
// the comparison op, e.g. SCMP_CMP_*
|
// the comparison op, e.g. SCMP_CMP_*
|
||||||
Op ScmpCompare `json:"op"`
|
Op ScmpCompare `json:"op"`
|
||||||
|
|
||||||
|
|||||||
55
container/std/syscall_extra_linux_riscv64.go
Normal file
55
container/std/syscall_extra_linux_riscv64.go
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
package std
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const (
|
||||||
|
SYS_NEWFSTATAT = syscall.SYS_FSTATAT
|
||||||
|
)
|
||||||
|
|
||||||
|
var syscallNumExtra = map[string]ScmpSyscall{
|
||||||
|
"uselib": SNR_USELIB,
|
||||||
|
"clock_adjtime64": SNR_CLOCK_ADJTIME64,
|
||||||
|
"clock_settime64": SNR_CLOCK_SETTIME64,
|
||||||
|
"umount": SNR_UMOUNT,
|
||||||
|
"chown": SNR_CHOWN,
|
||||||
|
"chown32": SNR_CHOWN32,
|
||||||
|
"fchown32": SNR_FCHOWN32,
|
||||||
|
"lchown": SNR_LCHOWN,
|
||||||
|
"lchown32": SNR_LCHOWN32,
|
||||||
|
"setgid32": SNR_SETGID32,
|
||||||
|
"setgroups32": SNR_SETGROUPS32,
|
||||||
|
"setregid32": SNR_SETREGID32,
|
||||||
|
"setresgid32": SNR_SETRESGID32,
|
||||||
|
"setresuid32": SNR_SETRESUID32,
|
||||||
|
"setreuid32": SNR_SETREUID32,
|
||||||
|
"setuid32": SNR_SETUID32,
|
||||||
|
"modify_ldt": SNR_MODIFY_LDT,
|
||||||
|
"subpage_prot": SNR_SUBPAGE_PROT,
|
||||||
|
"switch_endian": SNR_SWITCH_ENDIAN,
|
||||||
|
"vm86": SNR_VM86,
|
||||||
|
"vm86old": SNR_VM86OLD,
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
SNR_USELIB ScmpSyscall = __PNR_uselib
|
||||||
|
SNR_CLOCK_ADJTIME64 ScmpSyscall = __PNR_clock_adjtime64
|
||||||
|
SNR_CLOCK_SETTIME64 ScmpSyscall = __PNR_clock_settime64
|
||||||
|
SNR_UMOUNT ScmpSyscall = __PNR_umount
|
||||||
|
SNR_CHOWN ScmpSyscall = __PNR_chown
|
||||||
|
SNR_CHOWN32 ScmpSyscall = __PNR_chown32
|
||||||
|
SNR_FCHOWN32 ScmpSyscall = __PNR_fchown32
|
||||||
|
SNR_LCHOWN ScmpSyscall = __PNR_lchown
|
||||||
|
SNR_LCHOWN32 ScmpSyscall = __PNR_lchown32
|
||||||
|
SNR_SETGID32 ScmpSyscall = __PNR_setgid32
|
||||||
|
SNR_SETGROUPS32 ScmpSyscall = __PNR_setgroups32
|
||||||
|
SNR_SETREGID32 ScmpSyscall = __PNR_setregid32
|
||||||
|
SNR_SETRESGID32 ScmpSyscall = __PNR_setresgid32
|
||||||
|
SNR_SETRESUID32 ScmpSyscall = __PNR_setresuid32
|
||||||
|
SNR_SETREUID32 ScmpSyscall = __PNR_setreuid32
|
||||||
|
SNR_SETUID32 ScmpSyscall = __PNR_setuid32
|
||||||
|
SNR_MODIFY_LDT ScmpSyscall = __PNR_modify_ldt
|
||||||
|
SNR_SUBPAGE_PROT ScmpSyscall = __PNR_subpage_prot
|
||||||
|
SNR_SWITCH_ENDIAN ScmpSyscall = __PNR_switch_endian
|
||||||
|
SNR_VM86 ScmpSyscall = __PNR_vm86
|
||||||
|
SNR_VM86OLD ScmpSyscall = __PNR_vm86old
|
||||||
|
)
|
||||||
719
container/std/syscall_linux_riscv64.go
Normal file
719
container/std/syscall_linux_riscv64.go
Normal file
@@ -0,0 +1,719 @@
|
|||||||
|
// mksysnum_linux.pl /usr/include/riscv64-linux-gnu/asm/unistd.h
|
||||||
|
// Code generated by the command above; DO NOT EDIT.
|
||||||
|
|
||||||
|
package std
|
||||||
|
|
||||||
|
import . "syscall"
|
||||||
|
|
||||||
|
var syscallNum = map[string]ScmpSyscall{
|
||||||
|
"io_setup": SNR_IO_SETUP,
|
||||||
|
"io_destroy": SNR_IO_DESTROY,
|
||||||
|
"io_submit": SNR_IO_SUBMIT,
|
||||||
|
"io_cancel": SNR_IO_CANCEL,
|
||||||
|
"io_getevents": SNR_IO_GETEVENTS,
|
||||||
|
"setxattr": SNR_SETXATTR,
|
||||||
|
"lsetxattr": SNR_LSETXATTR,
|
||||||
|
"fsetxattr": SNR_FSETXATTR,
|
||||||
|
"getxattr": SNR_GETXATTR,
|
||||||
|
"lgetxattr": SNR_LGETXATTR,
|
||||||
|
"fgetxattr": SNR_FGETXATTR,
|
||||||
|
"listxattr": SNR_LISTXATTR,
|
||||||
|
"llistxattr": SNR_LLISTXATTR,
|
||||||
|
"flistxattr": SNR_FLISTXATTR,
|
||||||
|
"removexattr": SNR_REMOVEXATTR,
|
||||||
|
"lremovexattr": SNR_LREMOVEXATTR,
|
||||||
|
"fremovexattr": SNR_FREMOVEXATTR,
|
||||||
|
"getcwd": SNR_GETCWD,
|
||||||
|
"lookup_dcookie": SNR_LOOKUP_DCOOKIE,
|
||||||
|
"eventfd2": SNR_EVENTFD2,
|
||||||
|
"epoll_create1": SNR_EPOLL_CREATE1,
|
||||||
|
"epoll_ctl": SNR_EPOLL_CTL,
|
||||||
|
"epoll_pwait": SNR_EPOLL_PWAIT,
|
||||||
|
"dup": SNR_DUP,
|
||||||
|
"dup3": SNR_DUP3,
|
||||||
|
"fcntl": SNR_FCNTL,
|
||||||
|
"inotify_init1": SNR_INOTIFY_INIT1,
|
||||||
|
"inotify_add_watch": SNR_INOTIFY_ADD_WATCH,
|
||||||
|
"inotify_rm_watch": SNR_INOTIFY_RM_WATCH,
|
||||||
|
"ioctl": SNR_IOCTL,
|
||||||
|
"ioprio_set": SNR_IOPRIO_SET,
|
||||||
|
"ioprio_get": SNR_IOPRIO_GET,
|
||||||
|
"flock": SNR_FLOCK,
|
||||||
|
"mknodat": SNR_MKNODAT,
|
||||||
|
"mkdirat": SNR_MKDIRAT,
|
||||||
|
"unlinkat": SNR_UNLINKAT,
|
||||||
|
"symlinkat": SNR_SYMLINKAT,
|
||||||
|
"linkat": SNR_LINKAT,
|
||||||
|
"umount2": SNR_UMOUNT2,
|
||||||
|
"mount": SNR_MOUNT,
|
||||||
|
"pivot_root": SNR_PIVOT_ROOT,
|
||||||
|
"nfsservctl": SNR_NFSSERVCTL,
|
||||||
|
"statfs": SNR_STATFS,
|
||||||
|
"fstatfs": SNR_FSTATFS,
|
||||||
|
"truncate": SNR_TRUNCATE,
|
||||||
|
"ftruncate": SNR_FTRUNCATE,
|
||||||
|
"fallocate": SNR_FALLOCATE,
|
||||||
|
"faccessat": SNR_FACCESSAT,
|
||||||
|
"chdir": SNR_CHDIR,
|
||||||
|
"fchdir": SNR_FCHDIR,
|
||||||
|
"chroot": SNR_CHROOT,
|
||||||
|
"fchmod": SNR_FCHMOD,
|
||||||
|
"fchmodat": SNR_FCHMODAT,
|
||||||
|
"fchownat": SNR_FCHOWNAT,
|
||||||
|
"fchown": SNR_FCHOWN,
|
||||||
|
"openat": SNR_OPENAT,
|
||||||
|
"close": SNR_CLOSE,
|
||||||
|
"vhangup": SNR_VHANGUP,
|
||||||
|
"pipe2": SNR_PIPE2,
|
||||||
|
"quotactl": SNR_QUOTACTL,
|
||||||
|
"getdents64": SNR_GETDENTS64,
|
||||||
|
"lseek": SNR_LSEEK,
|
||||||
|
"read": SNR_READ,
|
||||||
|
"write": SNR_WRITE,
|
||||||
|
"readv": SNR_READV,
|
||||||
|
"writev": SNR_WRITEV,
|
||||||
|
"pread64": SNR_PREAD64,
|
||||||
|
"pwrite64": SNR_PWRITE64,
|
||||||
|
"preadv": SNR_PREADV,
|
||||||
|
"pwritev": SNR_PWRITEV,
|
||||||
|
"sendfile": SNR_SENDFILE,
|
||||||
|
"pselect6": SNR_PSELECT6,
|
||||||
|
"ppoll": SNR_PPOLL,
|
||||||
|
"signalfd4": SNR_SIGNALFD4,
|
||||||
|
"vmsplice": SNR_VMSPLICE,
|
||||||
|
"splice": SNR_SPLICE,
|
||||||
|
"tee": SNR_TEE,
|
||||||
|
"readlinkat": SNR_READLINKAT,
|
||||||
|
"newfstatat": SNR_NEWFSTATAT,
|
||||||
|
"fstat": SNR_FSTAT,
|
||||||
|
"sync": SNR_SYNC,
|
||||||
|
"fsync": SNR_FSYNC,
|
||||||
|
"fdatasync": SNR_FDATASYNC,
|
||||||
|
"sync_file_range": SNR_SYNC_FILE_RANGE,
|
||||||
|
"timerfd_create": SNR_TIMERFD_CREATE,
|
||||||
|
"timerfd_settime": SNR_TIMERFD_SETTIME,
|
||||||
|
"timerfd_gettime": SNR_TIMERFD_GETTIME,
|
||||||
|
"utimensat": SNR_UTIMENSAT,
|
||||||
|
"acct": SNR_ACCT,
|
||||||
|
"capget": SNR_CAPGET,
|
||||||
|
"capset": SNR_CAPSET,
|
||||||
|
"personality": SNR_PERSONALITY,
|
||||||
|
"exit": SNR_EXIT,
|
||||||
|
"exit_group": SNR_EXIT_GROUP,
|
||||||
|
"waitid": SNR_WAITID,
|
||||||
|
"set_tid_address": SNR_SET_TID_ADDRESS,
|
||||||
|
"unshare": SNR_UNSHARE,
|
||||||
|
"futex": SNR_FUTEX,
|
||||||
|
"set_robust_list": SNR_SET_ROBUST_LIST,
|
||||||
|
"get_robust_list": SNR_GET_ROBUST_LIST,
|
||||||
|
"nanosleep": SNR_NANOSLEEP,
|
||||||
|
"getitimer": SNR_GETITIMER,
|
||||||
|
"setitimer": SNR_SETITIMER,
|
||||||
|
"kexec_load": SNR_KEXEC_LOAD,
|
||||||
|
"init_module": SNR_INIT_MODULE,
|
||||||
|
"delete_module": SNR_DELETE_MODULE,
|
||||||
|
"timer_create": SNR_TIMER_CREATE,
|
||||||
|
"timer_gettime": SNR_TIMER_GETTIME,
|
||||||
|
"timer_getoverrun": SNR_TIMER_GETOVERRUN,
|
||||||
|
"timer_settime": SNR_TIMER_SETTIME,
|
||||||
|
"timer_delete": SNR_TIMER_DELETE,
|
||||||
|
"clock_settime": SNR_CLOCK_SETTIME,
|
||||||
|
"clock_gettime": SNR_CLOCK_GETTIME,
|
||||||
|
"clock_getres": SNR_CLOCK_GETRES,
|
||||||
|
"clock_nanosleep": SNR_CLOCK_NANOSLEEP,
|
||||||
|
"syslog": SNR_SYSLOG,
|
||||||
|
"ptrace": SNR_PTRACE,
|
||||||
|
"sched_setparam": SNR_SCHED_SETPARAM,
|
||||||
|
"sched_setscheduler": SNR_SCHED_SETSCHEDULER,
|
||||||
|
"sched_getscheduler": SNR_SCHED_GETSCHEDULER,
|
||||||
|
"sched_getparam": SNR_SCHED_GETPARAM,
|
||||||
|
"sched_setaffinity": SNR_SCHED_SETAFFINITY,
|
||||||
|
"sched_getaffinity": SNR_SCHED_GETAFFINITY,
|
||||||
|
"sched_yield": SNR_SCHED_YIELD,
|
||||||
|
"sched_get_priority_max": SNR_SCHED_GET_PRIORITY_MAX,
|
||||||
|
"sched_get_priority_min": SNR_SCHED_GET_PRIORITY_MIN,
|
||||||
|
"sched_rr_get_interval": SNR_SCHED_RR_GET_INTERVAL,
|
||||||
|
"restart_syscall": SNR_RESTART_SYSCALL,
|
||||||
|
"kill": SNR_KILL,
|
||||||
|
"tkill": SNR_TKILL,
|
||||||
|
"tgkill": SNR_TGKILL,
|
||||||
|
"sigaltstack": SNR_SIGALTSTACK,
|
||||||
|
"rt_sigsuspend": SNR_RT_SIGSUSPEND,
|
||||||
|
"rt_sigaction": SNR_RT_SIGACTION,
|
||||||
|
"rt_sigprocmask": SNR_RT_SIGPROCMASK,
|
||||||
|
"rt_sigpending": SNR_RT_SIGPENDING,
|
||||||
|
"rt_sigtimedwait": SNR_RT_SIGTIMEDWAIT,
|
||||||
|
"rt_sigqueueinfo": SNR_RT_SIGQUEUEINFO,
|
||||||
|
"rt_sigreturn": SNR_RT_SIGRETURN,
|
||||||
|
"setpriority": SNR_SETPRIORITY,
|
||||||
|
"getpriority": SNR_GETPRIORITY,
|
||||||
|
"reboot": SNR_REBOOT,
|
||||||
|
"setregid": SNR_SETREGID,
|
||||||
|
"setgid": SNR_SETGID,
|
||||||
|
"setreuid": SNR_SETREUID,
|
||||||
|
"setuid": SNR_SETUID,
|
||||||
|
"setresuid": SNR_SETRESUID,
|
||||||
|
"getresuid": SNR_GETRESUID,
|
||||||
|
"setresgid": SNR_SETRESGID,
|
||||||
|
"getresgid": SNR_GETRESGID,
|
||||||
|
"setfsuid": SNR_SETFSUID,
|
||||||
|
"setfsgid": SNR_SETFSGID,
|
||||||
|
"times": SNR_TIMES,
|
||||||
|
"setpgid": SNR_SETPGID,
|
||||||
|
"getpgid": SNR_GETPGID,
|
||||||
|
"getsid": SNR_GETSID,
|
||||||
|
"setsid": SNR_SETSID,
|
||||||
|
"getgroups": SNR_GETGROUPS,
|
||||||
|
"setgroups": SNR_SETGROUPS,
|
||||||
|
"uname": SNR_UNAME,
|
||||||
|
"sethostname": SNR_SETHOSTNAME,
|
||||||
|
"setdomainname": SNR_SETDOMAINNAME,
|
||||||
|
"getrlimit": SNR_GETRLIMIT,
|
||||||
|
"setrlimit": SNR_SETRLIMIT,
|
||||||
|
"getrusage": SNR_GETRUSAGE,
|
||||||
|
"umask": SNR_UMASK,
|
||||||
|
"prctl": SNR_PRCTL,
|
||||||
|
"getcpu": SNR_GETCPU,
|
||||||
|
"gettimeofday": SNR_GETTIMEOFDAY,
|
||||||
|
"settimeofday": SNR_SETTIMEOFDAY,
|
||||||
|
"adjtimex": SNR_ADJTIMEX,
|
||||||
|
"getpid": SNR_GETPID,
|
||||||
|
"getppid": SNR_GETPPID,
|
||||||
|
"getuid": SNR_GETUID,
|
||||||
|
"geteuid": SNR_GETEUID,
|
||||||
|
"getgid": SNR_GETGID,
|
||||||
|
"getegid": SNR_GETEGID,
|
||||||
|
"gettid": SNR_GETTID,
|
||||||
|
"sysinfo": SNR_SYSINFO,
|
||||||
|
"mq_open": SNR_MQ_OPEN,
|
||||||
|
"mq_unlink": SNR_MQ_UNLINK,
|
||||||
|
"mq_timedsend": SNR_MQ_TIMEDSEND,
|
||||||
|
"mq_timedreceive": SNR_MQ_TIMEDRECEIVE,
|
||||||
|
"mq_notify": SNR_MQ_NOTIFY,
|
||||||
|
"mq_getsetattr": SNR_MQ_GETSETATTR,
|
||||||
|
"msgget": SNR_MSGGET,
|
||||||
|
"msgctl": SNR_MSGCTL,
|
||||||
|
"msgrcv": SNR_MSGRCV,
|
||||||
|
"msgsnd": SNR_MSGSND,
|
||||||
|
"semget": SNR_SEMGET,
|
||||||
|
"semctl": SNR_SEMCTL,
|
||||||
|
"semtimedop": SNR_SEMTIMEDOP,
|
||||||
|
"semop": SNR_SEMOP,
|
||||||
|
"shmget": SNR_SHMGET,
|
||||||
|
"shmctl": SNR_SHMCTL,
|
||||||
|
"shmat": SNR_SHMAT,
|
||||||
|
"shmdt": SNR_SHMDT,
|
||||||
|
"socket": SNR_SOCKET,
|
||||||
|
"socketpair": SNR_SOCKETPAIR,
|
||||||
|
"bind": SNR_BIND,
|
||||||
|
"listen": SNR_LISTEN,
|
||||||
|
"accept": SNR_ACCEPT,
|
||||||
|
"connect": SNR_CONNECT,
|
||||||
|
"getsockname": SNR_GETSOCKNAME,
|
||||||
|
"getpeername": SNR_GETPEERNAME,
|
||||||
|
"sendto": SNR_SENDTO,
|
||||||
|
"recvfrom": SNR_RECVFROM,
|
||||||
|
"setsockopt": SNR_SETSOCKOPT,
|
||||||
|
"getsockopt": SNR_GETSOCKOPT,
|
||||||
|
"shutdown": SNR_SHUTDOWN,
|
||||||
|
"sendmsg": SNR_SENDMSG,
|
||||||
|
"recvmsg": SNR_RECVMSG,
|
||||||
|
"readahead": SNR_READAHEAD,
|
||||||
|
"brk": SNR_BRK,
|
||||||
|
"munmap": SNR_MUNMAP,
|
||||||
|
"mremap": SNR_MREMAP,
|
||||||
|
"add_key": SNR_ADD_KEY,
|
||||||
|
"request_key": SNR_REQUEST_KEY,
|
||||||
|
"keyctl": SNR_KEYCTL,
|
||||||
|
"clone": SNR_CLONE,
|
||||||
|
"execve": SNR_EXECVE,
|
||||||
|
"mmap": SNR_MMAP,
|
||||||
|
"fadvise64": SNR_FADVISE64,
|
||||||
|
"swapon": SNR_SWAPON,
|
||||||
|
"swapoff": SNR_SWAPOFF,
|
||||||
|
"mprotect": SNR_MPROTECT,
|
||||||
|
"msync": SNR_MSYNC,
|
||||||
|
"mlock": SNR_MLOCK,
|
||||||
|
"munlock": SNR_MUNLOCK,
|
||||||
|
"mlockall": SNR_MLOCKALL,
|
||||||
|
"munlockall": SNR_MUNLOCKALL,
|
||||||
|
"mincore": SNR_MINCORE,
|
||||||
|
"madvise": SNR_MADVISE,
|
||||||
|
"remap_file_pages": SNR_REMAP_FILE_PAGES,
|
||||||
|
"mbind": SNR_MBIND,
|
||||||
|
"get_mempolicy": SNR_GET_MEMPOLICY,
|
||||||
|
"set_mempolicy": SNR_SET_MEMPOLICY,
|
||||||
|
"migrate_pages": SNR_MIGRATE_PAGES,
|
||||||
|
"move_pages": SNR_MOVE_PAGES,
|
||||||
|
"rt_tgsigqueueinfo": SNR_RT_TGSIGQUEUEINFO,
|
||||||
|
"perf_event_open": SNR_PERF_EVENT_OPEN,
|
||||||
|
"accept4": SNR_ACCEPT4,
|
||||||
|
"recvmmsg": SNR_RECVMMSG,
|
||||||
|
"wait4": SNR_WAIT4,
|
||||||
|
"prlimit64": SNR_PRLIMIT64,
|
||||||
|
"fanotify_init": SNR_FANOTIFY_INIT,
|
||||||
|
"fanotify_mark": SNR_FANOTIFY_MARK,
|
||||||
|
"name_to_handle_at": SNR_NAME_TO_HANDLE_AT,
|
||||||
|
"open_by_handle_at": SNR_OPEN_BY_HANDLE_AT,
|
||||||
|
"clock_adjtime": SNR_CLOCK_ADJTIME,
|
||||||
|
"syncfs": SNR_SYNCFS,
|
||||||
|
"setns": SNR_SETNS,
|
||||||
|
"sendmmsg": SNR_SENDMMSG,
|
||||||
|
"process_vm_readv": SNR_PROCESS_VM_READV,
|
||||||
|
"process_vm_writev": SNR_PROCESS_VM_WRITEV,
|
||||||
|
"kcmp": SNR_KCMP,
|
||||||
|
"finit_module": SNR_FINIT_MODULE,
|
||||||
|
"sched_setattr": SNR_SCHED_SETATTR,
|
||||||
|
"sched_getattr": SNR_SCHED_GETATTR,
|
||||||
|
"renameat2": SNR_RENAMEAT2,
|
||||||
|
"seccomp": SNR_SECCOMP,
|
||||||
|
"getrandom": SNR_GETRANDOM,
|
||||||
|
"memfd_create": SNR_MEMFD_CREATE,
|
||||||
|
"bpf": SNR_BPF,
|
||||||
|
"execveat": SNR_EXECVEAT,
|
||||||
|
"userfaultfd": SNR_USERFAULTFD,
|
||||||
|
"membarrier": SNR_MEMBARRIER,
|
||||||
|
"mlock2": SNR_MLOCK2,
|
||||||
|
"copy_file_range": SNR_COPY_FILE_RANGE,
|
||||||
|
"preadv2": SNR_PREADV2,
|
||||||
|
"pwritev2": SNR_PWRITEV2,
|
||||||
|
"pkey_mprotect": SNR_PKEY_MPROTECT,
|
||||||
|
"pkey_alloc": SNR_PKEY_ALLOC,
|
||||||
|
"pkey_free": SNR_PKEY_FREE,
|
||||||
|
"statx": SNR_STATX,
|
||||||
|
"io_pgetevents": SNR_IO_PGETEVENTS,
|
||||||
|
"rseq": SNR_RSEQ,
|
||||||
|
"kexec_file_load": SNR_KEXEC_FILE_LOAD,
|
||||||
|
"pidfd_send_signal": SNR_PIDFD_SEND_SIGNAL,
|
||||||
|
"io_uring_setup": SNR_IO_URING_SETUP,
|
||||||
|
"io_uring_enter": SNR_IO_URING_ENTER,
|
||||||
|
"io_uring_register": SNR_IO_URING_REGISTER,
|
||||||
|
"open_tree": SNR_OPEN_TREE,
|
||||||
|
"move_mount": SNR_MOVE_MOUNT,
|
||||||
|
"fsopen": SNR_FSOPEN,
|
||||||
|
"fsconfig": SNR_FSCONFIG,
|
||||||
|
"fsmount": SNR_FSMOUNT,
|
||||||
|
"fspick": SNR_FSPICK,
|
||||||
|
"pidfd_open": SNR_PIDFD_OPEN,
|
||||||
|
"clone3": SNR_CLONE3,
|
||||||
|
"close_range": SNR_CLOSE_RANGE,
|
||||||
|
"openat2": SNR_OPENAT2,
|
||||||
|
"pidfd_getfd": SNR_PIDFD_GETFD,
|
||||||
|
"faccessat2": SNR_FACCESSAT2,
|
||||||
|
"process_madvise": SNR_PROCESS_MADVISE,
|
||||||
|
"epoll_pwait2": SNR_EPOLL_PWAIT2,
|
||||||
|
"mount_setattr": SNR_MOUNT_SETATTR,
|
||||||
|
"quotactl_fd": SNR_QUOTACTL_FD,
|
||||||
|
"landlock_create_ruleset": SNR_LANDLOCK_CREATE_RULESET,
|
||||||
|
"landlock_add_rule": SNR_LANDLOCK_ADD_RULE,
|
||||||
|
"landlock_restrict_self": SNR_LANDLOCK_RESTRICT_SELF,
|
||||||
|
"memfd_secret": SNR_MEMFD_SECRET,
|
||||||
|
"process_mrelease": SNR_PROCESS_MRELEASE,
|
||||||
|
"futex_waitv": SNR_FUTEX_WAITV,
|
||||||
|
"set_mempolicy_home_node": SNR_SET_MEMPOLICY_HOME_NODE,
|
||||||
|
"cachestat": SNR_CACHESTAT,
|
||||||
|
"fchmodat2": SNR_FCHMODAT2,
|
||||||
|
"map_shadow_stack": SNR_MAP_SHADOW_STACK,
|
||||||
|
"futex_wake": SNR_FUTEX_WAKE,
|
||||||
|
"futex_wait": SNR_FUTEX_WAIT,
|
||||||
|
"futex_requeue": SNR_FUTEX_REQUEUE,
|
||||||
|
"statmount": SNR_STATMOUNT,
|
||||||
|
"listmount": SNR_LISTMOUNT,
|
||||||
|
"lsm_get_self_attr": SNR_LSM_GET_SELF_ATTR,
|
||||||
|
"lsm_set_self_attr": SNR_LSM_SET_SELF_ATTR,
|
||||||
|
"lsm_list_modules": SNR_LSM_LIST_MODULES,
|
||||||
|
"mseal": SNR_MSEAL,
|
||||||
|
"setxattrat": SNR_SETXATTRAT,
|
||||||
|
"getxattrat": SNR_GETXATTRAT,
|
||||||
|
"listxattrat": SNR_LISTXATTRAT,
|
||||||
|
"removexattrat": SNR_REMOVEXATTRAT,
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
SYS_USERFAULTFD = 282
|
||||||
|
SYS_MEMBARRIER = 283
|
||||||
|
SYS_MLOCK2 = 284
|
||||||
|
SYS_COPY_FILE_RANGE = 285
|
||||||
|
SYS_PREADV2 = 286
|
||||||
|
SYS_PWRITEV2 = 287
|
||||||
|
SYS_PKEY_MPROTECT = 288
|
||||||
|
SYS_PKEY_ALLOC = 289
|
||||||
|
SYS_PKEY_FREE = 290
|
||||||
|
SYS_STATX = 291
|
||||||
|
SYS_IO_PGETEVENTS = 292
|
||||||
|
SYS_RSEQ = 293
|
||||||
|
SYS_KEXEC_FILE_LOAD = 294
|
||||||
|
SYS_PIDFD_SEND_SIGNAL = 424
|
||||||
|
SYS_IO_URING_SETUP = 425
|
||||||
|
SYS_IO_URING_ENTER = 426
|
||||||
|
SYS_IO_URING_REGISTER = 427
|
||||||
|
SYS_OPEN_TREE = 428
|
||||||
|
SYS_MOVE_MOUNT = 429
|
||||||
|
SYS_FSOPEN = 430
|
||||||
|
SYS_FSCONFIG = 431
|
||||||
|
SYS_FSMOUNT = 432
|
||||||
|
SYS_FSPICK = 433
|
||||||
|
SYS_PIDFD_OPEN = 434
|
||||||
|
SYS_CLONE3 = 435
|
||||||
|
SYS_CLOSE_RANGE = 436
|
||||||
|
SYS_OPENAT2 = 437
|
||||||
|
SYS_PIDFD_GETFD = 438
|
||||||
|
SYS_FACCESSAT2 = 439
|
||||||
|
SYS_PROCESS_MADVISE = 440
|
||||||
|
SYS_EPOLL_PWAIT2 = 441
|
||||||
|
SYS_MOUNT_SETATTR = 442
|
||||||
|
SYS_QUOTACTL_FD = 443
|
||||||
|
SYS_LANDLOCK_CREATE_RULESET = 444
|
||||||
|
SYS_LANDLOCK_ADD_RULE = 445
|
||||||
|
SYS_LANDLOCK_RESTRICT_SELF = 446
|
||||||
|
SYS_MEMFD_SECRET = 447
|
||||||
|
SYS_PROCESS_MRELEASE = 448
|
||||||
|
SYS_FUTEX_WAITV = 449
|
||||||
|
SYS_SET_MEMPOLICY_HOME_NODE = 450
|
||||||
|
SYS_CACHESTAT = 451
|
||||||
|
SYS_FCHMODAT2 = 452
|
||||||
|
SYS_MAP_SHADOW_STACK = 453
|
||||||
|
SYS_FUTEX_WAKE = 454
|
||||||
|
SYS_FUTEX_WAIT = 455
|
||||||
|
SYS_FUTEX_REQUEUE = 456
|
||||||
|
SYS_STATMOUNT = 457
|
||||||
|
SYS_LISTMOUNT = 458
|
||||||
|
SYS_LSM_GET_SELF_ATTR = 459
|
||||||
|
SYS_LSM_SET_SELF_ATTR = 460
|
||||||
|
SYS_LSM_LIST_MODULES = 461
|
||||||
|
SYS_MSEAL = 462
|
||||||
|
SYS_SETXATTRAT = 463
|
||||||
|
SYS_GETXATTRAT = 464
|
||||||
|
SYS_LISTXATTRAT = 465
|
||||||
|
SYS_REMOVEXATTRAT = 466
|
||||||
|
SYS_OPEN_TREE_ATTR = 467
|
||||||
|
SYS_FILE_GETATTR = 468
|
||||||
|
SYS_FILE_SETATTR = 469
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
SNR_IO_SETUP ScmpSyscall = SYS_IO_SETUP
|
||||||
|
SNR_IO_DESTROY ScmpSyscall = SYS_IO_DESTROY
|
||||||
|
SNR_IO_SUBMIT ScmpSyscall = SYS_IO_SUBMIT
|
||||||
|
SNR_IO_CANCEL ScmpSyscall = SYS_IO_CANCEL
|
||||||
|
SNR_IO_GETEVENTS ScmpSyscall = SYS_IO_GETEVENTS
|
||||||
|
SNR_SETXATTR ScmpSyscall = SYS_SETXATTR
|
||||||
|
SNR_LSETXATTR ScmpSyscall = SYS_LSETXATTR
|
||||||
|
SNR_FSETXATTR ScmpSyscall = SYS_FSETXATTR
|
||||||
|
SNR_GETXATTR ScmpSyscall = SYS_GETXATTR
|
||||||
|
SNR_LGETXATTR ScmpSyscall = SYS_LGETXATTR
|
||||||
|
SNR_FGETXATTR ScmpSyscall = SYS_FGETXATTR
|
||||||
|
SNR_LISTXATTR ScmpSyscall = SYS_LISTXATTR
|
||||||
|
SNR_LLISTXATTR ScmpSyscall = SYS_LLISTXATTR
|
||||||
|
SNR_FLISTXATTR ScmpSyscall = SYS_FLISTXATTR
|
||||||
|
SNR_REMOVEXATTR ScmpSyscall = SYS_REMOVEXATTR
|
||||||
|
SNR_LREMOVEXATTR ScmpSyscall = SYS_LREMOVEXATTR
|
||||||
|
SNR_FREMOVEXATTR ScmpSyscall = SYS_FREMOVEXATTR
|
||||||
|
SNR_GETCWD ScmpSyscall = SYS_GETCWD
|
||||||
|
SNR_LOOKUP_DCOOKIE ScmpSyscall = SYS_LOOKUP_DCOOKIE
|
||||||
|
SNR_EVENTFD2 ScmpSyscall = SYS_EVENTFD2
|
||||||
|
SNR_EPOLL_CREATE1 ScmpSyscall = SYS_EPOLL_CREATE1
|
||||||
|
SNR_EPOLL_CTL ScmpSyscall = SYS_EPOLL_CTL
|
||||||
|
SNR_EPOLL_PWAIT ScmpSyscall = SYS_EPOLL_PWAIT
|
||||||
|
SNR_DUP ScmpSyscall = SYS_DUP
|
||||||
|
SNR_DUP3 ScmpSyscall = SYS_DUP3
|
||||||
|
SNR_FCNTL ScmpSyscall = SYS_FCNTL
|
||||||
|
SNR_INOTIFY_INIT1 ScmpSyscall = SYS_INOTIFY_INIT1
|
||||||
|
SNR_INOTIFY_ADD_WATCH ScmpSyscall = SYS_INOTIFY_ADD_WATCH
|
||||||
|
SNR_INOTIFY_RM_WATCH ScmpSyscall = SYS_INOTIFY_RM_WATCH
|
||||||
|
SNR_IOCTL ScmpSyscall = SYS_IOCTL
|
||||||
|
SNR_IOPRIO_SET ScmpSyscall = SYS_IOPRIO_SET
|
||||||
|
SNR_IOPRIO_GET ScmpSyscall = SYS_IOPRIO_GET
|
||||||
|
SNR_FLOCK ScmpSyscall = SYS_FLOCK
|
||||||
|
SNR_MKNODAT ScmpSyscall = SYS_MKNODAT
|
||||||
|
SNR_MKDIRAT ScmpSyscall = SYS_MKDIRAT
|
||||||
|
SNR_UNLINKAT ScmpSyscall = SYS_UNLINKAT
|
||||||
|
SNR_SYMLINKAT ScmpSyscall = SYS_SYMLINKAT
|
||||||
|
SNR_LINKAT ScmpSyscall = SYS_LINKAT
|
||||||
|
SNR_UMOUNT2 ScmpSyscall = SYS_UMOUNT2
|
||||||
|
SNR_MOUNT ScmpSyscall = SYS_MOUNT
|
||||||
|
SNR_PIVOT_ROOT ScmpSyscall = SYS_PIVOT_ROOT
|
||||||
|
SNR_NFSSERVCTL ScmpSyscall = SYS_NFSSERVCTL
|
||||||
|
SNR_STATFS ScmpSyscall = SYS_STATFS
|
||||||
|
SNR_FSTATFS ScmpSyscall = SYS_FSTATFS
|
||||||
|
SNR_TRUNCATE ScmpSyscall = SYS_TRUNCATE
|
||||||
|
SNR_FTRUNCATE ScmpSyscall = SYS_FTRUNCATE
|
||||||
|
SNR_FALLOCATE ScmpSyscall = SYS_FALLOCATE
|
||||||
|
SNR_FACCESSAT ScmpSyscall = SYS_FACCESSAT
|
||||||
|
SNR_CHDIR ScmpSyscall = SYS_CHDIR
|
||||||
|
SNR_FCHDIR ScmpSyscall = SYS_FCHDIR
|
||||||
|
SNR_CHROOT ScmpSyscall = SYS_CHROOT
|
||||||
|
SNR_FCHMOD ScmpSyscall = SYS_FCHMOD
|
||||||
|
SNR_FCHMODAT ScmpSyscall = SYS_FCHMODAT
|
||||||
|
SNR_FCHOWNAT ScmpSyscall = SYS_FCHOWNAT
|
||||||
|
SNR_FCHOWN ScmpSyscall = SYS_FCHOWN
|
||||||
|
SNR_OPENAT ScmpSyscall = SYS_OPENAT
|
||||||
|
SNR_CLOSE ScmpSyscall = SYS_CLOSE
|
||||||
|
SNR_VHANGUP ScmpSyscall = SYS_VHANGUP
|
||||||
|
SNR_PIPE2 ScmpSyscall = SYS_PIPE2
|
||||||
|
SNR_QUOTACTL ScmpSyscall = SYS_QUOTACTL
|
||||||
|
SNR_GETDENTS64 ScmpSyscall = SYS_GETDENTS64
|
||||||
|
SNR_LSEEK ScmpSyscall = SYS_LSEEK
|
||||||
|
SNR_READ ScmpSyscall = SYS_READ
|
||||||
|
SNR_WRITE ScmpSyscall = SYS_WRITE
|
||||||
|
SNR_READV ScmpSyscall = SYS_READV
|
||||||
|
SNR_WRITEV ScmpSyscall = SYS_WRITEV
|
||||||
|
SNR_PREAD64 ScmpSyscall = SYS_PREAD64
|
||||||
|
SNR_PWRITE64 ScmpSyscall = SYS_PWRITE64
|
||||||
|
SNR_PREADV ScmpSyscall = SYS_PREADV
|
||||||
|
SNR_PWRITEV ScmpSyscall = SYS_PWRITEV
|
||||||
|
SNR_SENDFILE ScmpSyscall = SYS_SENDFILE
|
||||||
|
SNR_PSELECT6 ScmpSyscall = SYS_PSELECT6
|
||||||
|
SNR_PPOLL ScmpSyscall = SYS_PPOLL
|
||||||
|
SNR_SIGNALFD4 ScmpSyscall = SYS_SIGNALFD4
|
||||||
|
SNR_VMSPLICE ScmpSyscall = SYS_VMSPLICE
|
||||||
|
SNR_SPLICE ScmpSyscall = SYS_SPLICE
|
||||||
|
SNR_TEE ScmpSyscall = SYS_TEE
|
||||||
|
SNR_READLINKAT ScmpSyscall = SYS_READLINKAT
|
||||||
|
SNR_NEWFSTATAT ScmpSyscall = SYS_NEWFSTATAT
|
||||||
|
SNR_FSTAT ScmpSyscall = SYS_FSTAT
|
||||||
|
SNR_SYNC ScmpSyscall = SYS_SYNC
|
||||||
|
SNR_FSYNC ScmpSyscall = SYS_FSYNC
|
||||||
|
SNR_FDATASYNC ScmpSyscall = SYS_FDATASYNC
|
||||||
|
SNR_SYNC_FILE_RANGE ScmpSyscall = SYS_SYNC_FILE_RANGE
|
||||||
|
SNR_TIMERFD_CREATE ScmpSyscall = SYS_TIMERFD_CREATE
|
||||||
|
SNR_TIMERFD_SETTIME ScmpSyscall = SYS_TIMERFD_SETTIME
|
||||||
|
SNR_TIMERFD_GETTIME ScmpSyscall = SYS_TIMERFD_GETTIME
|
||||||
|
SNR_UTIMENSAT ScmpSyscall = SYS_UTIMENSAT
|
||||||
|
SNR_ACCT ScmpSyscall = SYS_ACCT
|
||||||
|
SNR_CAPGET ScmpSyscall = SYS_CAPGET
|
||||||
|
SNR_CAPSET ScmpSyscall = SYS_CAPSET
|
||||||
|
SNR_PERSONALITY ScmpSyscall = SYS_PERSONALITY
|
||||||
|
SNR_EXIT ScmpSyscall = SYS_EXIT
|
||||||
|
SNR_EXIT_GROUP ScmpSyscall = SYS_EXIT_GROUP
|
||||||
|
SNR_WAITID ScmpSyscall = SYS_WAITID
|
||||||
|
SNR_SET_TID_ADDRESS ScmpSyscall = SYS_SET_TID_ADDRESS
|
||||||
|
SNR_UNSHARE ScmpSyscall = SYS_UNSHARE
|
||||||
|
SNR_FUTEX ScmpSyscall = SYS_FUTEX
|
||||||
|
SNR_SET_ROBUST_LIST ScmpSyscall = SYS_SET_ROBUST_LIST
|
||||||
|
SNR_GET_ROBUST_LIST ScmpSyscall = SYS_GET_ROBUST_LIST
|
||||||
|
SNR_NANOSLEEP ScmpSyscall = SYS_NANOSLEEP
|
||||||
|
SNR_GETITIMER ScmpSyscall = SYS_GETITIMER
|
||||||
|
SNR_SETITIMER ScmpSyscall = SYS_SETITIMER
|
||||||
|
SNR_KEXEC_LOAD ScmpSyscall = SYS_KEXEC_LOAD
|
||||||
|
SNR_INIT_MODULE ScmpSyscall = SYS_INIT_MODULE
|
||||||
|
SNR_DELETE_MODULE ScmpSyscall = SYS_DELETE_MODULE
|
||||||
|
SNR_TIMER_CREATE ScmpSyscall = SYS_TIMER_CREATE
|
||||||
|
SNR_TIMER_GETTIME ScmpSyscall = SYS_TIMER_GETTIME
|
||||||
|
SNR_TIMER_GETOVERRUN ScmpSyscall = SYS_TIMER_GETOVERRUN
|
||||||
|
SNR_TIMER_SETTIME ScmpSyscall = SYS_TIMER_SETTIME
|
||||||
|
SNR_TIMER_DELETE ScmpSyscall = SYS_TIMER_DELETE
|
||||||
|
SNR_CLOCK_SETTIME ScmpSyscall = SYS_CLOCK_SETTIME
|
||||||
|
SNR_CLOCK_GETTIME ScmpSyscall = SYS_CLOCK_GETTIME
|
||||||
|
SNR_CLOCK_GETRES ScmpSyscall = SYS_CLOCK_GETRES
|
||||||
|
SNR_CLOCK_NANOSLEEP ScmpSyscall = SYS_CLOCK_NANOSLEEP
|
||||||
|
SNR_SYSLOG ScmpSyscall = SYS_SYSLOG
|
||||||
|
SNR_PTRACE ScmpSyscall = SYS_PTRACE
|
||||||
|
SNR_SCHED_SETPARAM ScmpSyscall = SYS_SCHED_SETPARAM
|
||||||
|
SNR_SCHED_SETSCHEDULER ScmpSyscall = SYS_SCHED_SETSCHEDULER
|
||||||
|
SNR_SCHED_GETSCHEDULER ScmpSyscall = SYS_SCHED_GETSCHEDULER
|
||||||
|
SNR_SCHED_GETPARAM ScmpSyscall = SYS_SCHED_GETPARAM
|
||||||
|
SNR_SCHED_SETAFFINITY ScmpSyscall = SYS_SCHED_SETAFFINITY
|
||||||
|
SNR_SCHED_GETAFFINITY ScmpSyscall = SYS_SCHED_GETAFFINITY
|
||||||
|
SNR_SCHED_YIELD ScmpSyscall = SYS_SCHED_YIELD
|
||||||
|
SNR_SCHED_GET_PRIORITY_MAX ScmpSyscall = SYS_SCHED_GET_PRIORITY_MAX
|
||||||
|
SNR_SCHED_GET_PRIORITY_MIN ScmpSyscall = SYS_SCHED_GET_PRIORITY_MIN
|
||||||
|
SNR_SCHED_RR_GET_INTERVAL ScmpSyscall = SYS_SCHED_RR_GET_INTERVAL
|
||||||
|
SNR_RESTART_SYSCALL ScmpSyscall = SYS_RESTART_SYSCALL
|
||||||
|
SNR_KILL ScmpSyscall = SYS_KILL
|
||||||
|
SNR_TKILL ScmpSyscall = SYS_TKILL
|
||||||
|
SNR_TGKILL ScmpSyscall = SYS_TGKILL
|
||||||
|
SNR_SIGALTSTACK ScmpSyscall = SYS_SIGALTSTACK
|
||||||
|
SNR_RT_SIGSUSPEND ScmpSyscall = SYS_RT_SIGSUSPEND
|
||||||
|
SNR_RT_SIGACTION ScmpSyscall = SYS_RT_SIGACTION
|
||||||
|
SNR_RT_SIGPROCMASK ScmpSyscall = SYS_RT_SIGPROCMASK
|
||||||
|
SNR_RT_SIGPENDING ScmpSyscall = SYS_RT_SIGPENDING
|
||||||
|
SNR_RT_SIGTIMEDWAIT ScmpSyscall = SYS_RT_SIGTIMEDWAIT
|
||||||
|
SNR_RT_SIGQUEUEINFO ScmpSyscall = SYS_RT_SIGQUEUEINFO
|
||||||
|
SNR_RT_SIGRETURN ScmpSyscall = SYS_RT_SIGRETURN
|
||||||
|
SNR_SETPRIORITY ScmpSyscall = SYS_SETPRIORITY
|
||||||
|
SNR_GETPRIORITY ScmpSyscall = SYS_GETPRIORITY
|
||||||
|
SNR_REBOOT ScmpSyscall = SYS_REBOOT
|
||||||
|
SNR_SETREGID ScmpSyscall = SYS_SETREGID
|
||||||
|
SNR_SETGID ScmpSyscall = SYS_SETGID
|
||||||
|
SNR_SETREUID ScmpSyscall = SYS_SETREUID
|
||||||
|
SNR_SETUID ScmpSyscall = SYS_SETUID
|
||||||
|
SNR_SETRESUID ScmpSyscall = SYS_SETRESUID
|
||||||
|
SNR_GETRESUID ScmpSyscall = SYS_GETRESUID
|
||||||
|
SNR_SETRESGID ScmpSyscall = SYS_SETRESGID
|
||||||
|
SNR_GETRESGID ScmpSyscall = SYS_GETRESGID
|
||||||
|
SNR_SETFSUID ScmpSyscall = SYS_SETFSUID
|
||||||
|
SNR_SETFSGID ScmpSyscall = SYS_SETFSGID
|
||||||
|
SNR_TIMES ScmpSyscall = SYS_TIMES
|
||||||
|
SNR_SETPGID ScmpSyscall = SYS_SETPGID
|
||||||
|
SNR_GETPGID ScmpSyscall = SYS_GETPGID
|
||||||
|
SNR_GETSID ScmpSyscall = SYS_GETSID
|
||||||
|
SNR_SETSID ScmpSyscall = SYS_SETSID
|
||||||
|
SNR_GETGROUPS ScmpSyscall = SYS_GETGROUPS
|
||||||
|
SNR_SETGROUPS ScmpSyscall = SYS_SETGROUPS
|
||||||
|
SNR_UNAME ScmpSyscall = SYS_UNAME
|
||||||
|
SNR_SETHOSTNAME ScmpSyscall = SYS_SETHOSTNAME
|
||||||
|
SNR_SETDOMAINNAME ScmpSyscall = SYS_SETDOMAINNAME
|
||||||
|
SNR_GETRLIMIT ScmpSyscall = SYS_GETRLIMIT
|
||||||
|
SNR_SETRLIMIT ScmpSyscall = SYS_SETRLIMIT
|
||||||
|
SNR_GETRUSAGE ScmpSyscall = SYS_GETRUSAGE
|
||||||
|
SNR_UMASK ScmpSyscall = SYS_UMASK
|
||||||
|
SNR_PRCTL ScmpSyscall = SYS_PRCTL
|
||||||
|
SNR_GETCPU ScmpSyscall = SYS_GETCPU
|
||||||
|
SNR_GETTIMEOFDAY ScmpSyscall = SYS_GETTIMEOFDAY
|
||||||
|
SNR_SETTIMEOFDAY ScmpSyscall = SYS_SETTIMEOFDAY
|
||||||
|
SNR_ADJTIMEX ScmpSyscall = SYS_ADJTIMEX
|
||||||
|
SNR_GETPID ScmpSyscall = SYS_GETPID
|
||||||
|
SNR_GETPPID ScmpSyscall = SYS_GETPPID
|
||||||
|
SNR_GETUID ScmpSyscall = SYS_GETUID
|
||||||
|
SNR_GETEUID ScmpSyscall = SYS_GETEUID
|
||||||
|
SNR_GETGID ScmpSyscall = SYS_GETGID
|
||||||
|
SNR_GETEGID ScmpSyscall = SYS_GETEGID
|
||||||
|
SNR_GETTID ScmpSyscall = SYS_GETTID
|
||||||
|
SNR_SYSINFO ScmpSyscall = SYS_SYSINFO
|
||||||
|
SNR_MQ_OPEN ScmpSyscall = SYS_MQ_OPEN
|
||||||
|
SNR_MQ_UNLINK ScmpSyscall = SYS_MQ_UNLINK
|
||||||
|
SNR_MQ_TIMEDSEND ScmpSyscall = SYS_MQ_TIMEDSEND
|
||||||
|
SNR_MQ_TIMEDRECEIVE ScmpSyscall = SYS_MQ_TIMEDRECEIVE
|
||||||
|
SNR_MQ_NOTIFY ScmpSyscall = SYS_MQ_NOTIFY
|
||||||
|
SNR_MQ_GETSETATTR ScmpSyscall = SYS_MQ_GETSETATTR
|
||||||
|
SNR_MSGGET ScmpSyscall = SYS_MSGGET
|
||||||
|
SNR_MSGCTL ScmpSyscall = SYS_MSGCTL
|
||||||
|
SNR_MSGRCV ScmpSyscall = SYS_MSGRCV
|
||||||
|
SNR_MSGSND ScmpSyscall = SYS_MSGSND
|
||||||
|
SNR_SEMGET ScmpSyscall = SYS_SEMGET
|
||||||
|
SNR_SEMCTL ScmpSyscall = SYS_SEMCTL
|
||||||
|
SNR_SEMTIMEDOP ScmpSyscall = SYS_SEMTIMEDOP
|
||||||
|
SNR_SEMOP ScmpSyscall = SYS_SEMOP
|
||||||
|
SNR_SHMGET ScmpSyscall = SYS_SHMGET
|
||||||
|
SNR_SHMCTL ScmpSyscall = SYS_SHMCTL
|
||||||
|
SNR_SHMAT ScmpSyscall = SYS_SHMAT
|
||||||
|
SNR_SHMDT ScmpSyscall = SYS_SHMDT
|
||||||
|
SNR_SOCKET ScmpSyscall = SYS_SOCKET
|
||||||
|
SNR_SOCKETPAIR ScmpSyscall = SYS_SOCKETPAIR
|
||||||
|
SNR_BIND ScmpSyscall = SYS_BIND
|
||||||
|
SNR_LISTEN ScmpSyscall = SYS_LISTEN
|
||||||
|
SNR_ACCEPT ScmpSyscall = SYS_ACCEPT
|
||||||
|
SNR_CONNECT ScmpSyscall = SYS_CONNECT
|
||||||
|
SNR_GETSOCKNAME ScmpSyscall = SYS_GETSOCKNAME
|
||||||
|
SNR_GETPEERNAME ScmpSyscall = SYS_GETPEERNAME
|
||||||
|
SNR_SENDTO ScmpSyscall = SYS_SENDTO
|
||||||
|
SNR_RECVFROM ScmpSyscall = SYS_RECVFROM
|
||||||
|
SNR_SETSOCKOPT ScmpSyscall = SYS_SETSOCKOPT
|
||||||
|
SNR_GETSOCKOPT ScmpSyscall = SYS_GETSOCKOPT
|
||||||
|
SNR_SHUTDOWN ScmpSyscall = SYS_SHUTDOWN
|
||||||
|
SNR_SENDMSG ScmpSyscall = SYS_SENDMSG
|
||||||
|
SNR_RECVMSG ScmpSyscall = SYS_RECVMSG
|
||||||
|
SNR_READAHEAD ScmpSyscall = SYS_READAHEAD
|
||||||
|
SNR_BRK ScmpSyscall = SYS_BRK
|
||||||
|
SNR_MUNMAP ScmpSyscall = SYS_MUNMAP
|
||||||
|
SNR_MREMAP ScmpSyscall = SYS_MREMAP
|
||||||
|
SNR_ADD_KEY ScmpSyscall = SYS_ADD_KEY
|
||||||
|
SNR_REQUEST_KEY ScmpSyscall = SYS_REQUEST_KEY
|
||||||
|
SNR_KEYCTL ScmpSyscall = SYS_KEYCTL
|
||||||
|
SNR_CLONE ScmpSyscall = SYS_CLONE
|
||||||
|
SNR_EXECVE ScmpSyscall = SYS_EXECVE
|
||||||
|
SNR_MMAP ScmpSyscall = SYS_MMAP
|
||||||
|
SNR_FADVISE64 ScmpSyscall = SYS_FADVISE64
|
||||||
|
SNR_SWAPON ScmpSyscall = SYS_SWAPON
|
||||||
|
SNR_SWAPOFF ScmpSyscall = SYS_SWAPOFF
|
||||||
|
SNR_MPROTECT ScmpSyscall = SYS_MPROTECT
|
||||||
|
SNR_MSYNC ScmpSyscall = SYS_MSYNC
|
||||||
|
SNR_MLOCK ScmpSyscall = SYS_MLOCK
|
||||||
|
SNR_MUNLOCK ScmpSyscall = SYS_MUNLOCK
|
||||||
|
SNR_MLOCKALL ScmpSyscall = SYS_MLOCKALL
|
||||||
|
SNR_MUNLOCKALL ScmpSyscall = SYS_MUNLOCKALL
|
||||||
|
SNR_MINCORE ScmpSyscall = SYS_MINCORE
|
||||||
|
SNR_MADVISE ScmpSyscall = SYS_MADVISE
|
||||||
|
SNR_REMAP_FILE_PAGES ScmpSyscall = SYS_REMAP_FILE_PAGES
|
||||||
|
SNR_MBIND ScmpSyscall = SYS_MBIND
|
||||||
|
SNR_GET_MEMPOLICY ScmpSyscall = SYS_GET_MEMPOLICY
|
||||||
|
SNR_SET_MEMPOLICY ScmpSyscall = SYS_SET_MEMPOLICY
|
||||||
|
SNR_MIGRATE_PAGES ScmpSyscall = SYS_MIGRATE_PAGES
|
||||||
|
SNR_MOVE_PAGES ScmpSyscall = SYS_MOVE_PAGES
|
||||||
|
SNR_RT_TGSIGQUEUEINFO ScmpSyscall = SYS_RT_TGSIGQUEUEINFO
|
||||||
|
SNR_PERF_EVENT_OPEN ScmpSyscall = SYS_PERF_EVENT_OPEN
|
||||||
|
SNR_ACCEPT4 ScmpSyscall = SYS_ACCEPT4
|
||||||
|
SNR_RECVMMSG ScmpSyscall = SYS_RECVMMSG
|
||||||
|
SNR_WAIT4 ScmpSyscall = SYS_WAIT4
|
||||||
|
SNR_PRLIMIT64 ScmpSyscall = SYS_PRLIMIT64
|
||||||
|
SNR_FANOTIFY_INIT ScmpSyscall = SYS_FANOTIFY_INIT
|
||||||
|
SNR_FANOTIFY_MARK ScmpSyscall = SYS_FANOTIFY_MARK
|
||||||
|
SNR_NAME_TO_HANDLE_AT ScmpSyscall = SYS_NAME_TO_HANDLE_AT
|
||||||
|
SNR_OPEN_BY_HANDLE_AT ScmpSyscall = SYS_OPEN_BY_HANDLE_AT
|
||||||
|
SNR_CLOCK_ADJTIME ScmpSyscall = SYS_CLOCK_ADJTIME
|
||||||
|
SNR_SYNCFS ScmpSyscall = SYS_SYNCFS
|
||||||
|
SNR_SETNS ScmpSyscall = SYS_SETNS
|
||||||
|
SNR_SENDMMSG ScmpSyscall = SYS_SENDMMSG
|
||||||
|
SNR_PROCESS_VM_READV ScmpSyscall = SYS_PROCESS_VM_READV
|
||||||
|
SNR_PROCESS_VM_WRITEV ScmpSyscall = SYS_PROCESS_VM_WRITEV
|
||||||
|
SNR_KCMP ScmpSyscall = SYS_KCMP
|
||||||
|
SNR_FINIT_MODULE ScmpSyscall = SYS_FINIT_MODULE
|
||||||
|
SNR_SCHED_SETATTR ScmpSyscall = SYS_SCHED_SETATTR
|
||||||
|
SNR_SCHED_GETATTR ScmpSyscall = SYS_SCHED_GETATTR
|
||||||
|
SNR_RENAMEAT2 ScmpSyscall = SYS_RENAMEAT2
|
||||||
|
SNR_SECCOMP ScmpSyscall = SYS_SECCOMP
|
||||||
|
SNR_GETRANDOM ScmpSyscall = SYS_GETRANDOM
|
||||||
|
SNR_MEMFD_CREATE ScmpSyscall = SYS_MEMFD_CREATE
|
||||||
|
SNR_BPF ScmpSyscall = SYS_BPF
|
||||||
|
SNR_EXECVEAT ScmpSyscall = SYS_EXECVEAT
|
||||||
|
SNR_USERFAULTFD ScmpSyscall = SYS_USERFAULTFD
|
||||||
|
SNR_MEMBARRIER ScmpSyscall = SYS_MEMBARRIER
|
||||||
|
SNR_MLOCK2 ScmpSyscall = SYS_MLOCK2
|
||||||
|
SNR_COPY_FILE_RANGE ScmpSyscall = SYS_COPY_FILE_RANGE
|
||||||
|
SNR_PREADV2 ScmpSyscall = SYS_PREADV2
|
||||||
|
SNR_PWRITEV2 ScmpSyscall = SYS_PWRITEV2
|
||||||
|
SNR_PKEY_MPROTECT ScmpSyscall = SYS_PKEY_MPROTECT
|
||||||
|
SNR_PKEY_ALLOC ScmpSyscall = SYS_PKEY_ALLOC
|
||||||
|
SNR_PKEY_FREE ScmpSyscall = SYS_PKEY_FREE
|
||||||
|
SNR_STATX ScmpSyscall = SYS_STATX
|
||||||
|
SNR_IO_PGETEVENTS ScmpSyscall = SYS_IO_PGETEVENTS
|
||||||
|
SNR_RSEQ ScmpSyscall = SYS_RSEQ
|
||||||
|
SNR_KEXEC_FILE_LOAD ScmpSyscall = SYS_KEXEC_FILE_LOAD
|
||||||
|
SNR_PIDFD_SEND_SIGNAL ScmpSyscall = SYS_PIDFD_SEND_SIGNAL
|
||||||
|
SNR_IO_URING_SETUP ScmpSyscall = SYS_IO_URING_SETUP
|
||||||
|
SNR_IO_URING_ENTER ScmpSyscall = SYS_IO_URING_ENTER
|
||||||
|
SNR_IO_URING_REGISTER ScmpSyscall = SYS_IO_URING_REGISTER
|
||||||
|
SNR_OPEN_TREE ScmpSyscall = SYS_OPEN_TREE
|
||||||
|
SNR_MOVE_MOUNT ScmpSyscall = SYS_MOVE_MOUNT
|
||||||
|
SNR_FSOPEN ScmpSyscall = SYS_FSOPEN
|
||||||
|
SNR_FSCONFIG ScmpSyscall = SYS_FSCONFIG
|
||||||
|
SNR_FSMOUNT ScmpSyscall = SYS_FSMOUNT
|
||||||
|
SNR_FSPICK ScmpSyscall = SYS_FSPICK
|
||||||
|
SNR_PIDFD_OPEN ScmpSyscall = SYS_PIDFD_OPEN
|
||||||
|
SNR_CLONE3 ScmpSyscall = SYS_CLONE3
|
||||||
|
SNR_CLOSE_RANGE ScmpSyscall = SYS_CLOSE_RANGE
|
||||||
|
SNR_OPENAT2 ScmpSyscall = SYS_OPENAT2
|
||||||
|
SNR_PIDFD_GETFD ScmpSyscall = SYS_PIDFD_GETFD
|
||||||
|
SNR_FACCESSAT2 ScmpSyscall = SYS_FACCESSAT2
|
||||||
|
SNR_PROCESS_MADVISE ScmpSyscall = SYS_PROCESS_MADVISE
|
||||||
|
SNR_EPOLL_PWAIT2 ScmpSyscall = SYS_EPOLL_PWAIT2
|
||||||
|
SNR_MOUNT_SETATTR ScmpSyscall = SYS_MOUNT_SETATTR
|
||||||
|
SNR_QUOTACTL_FD ScmpSyscall = SYS_QUOTACTL_FD
|
||||||
|
SNR_LANDLOCK_CREATE_RULESET ScmpSyscall = SYS_LANDLOCK_CREATE_RULESET
|
||||||
|
SNR_LANDLOCK_ADD_RULE ScmpSyscall = SYS_LANDLOCK_ADD_RULE
|
||||||
|
SNR_LANDLOCK_RESTRICT_SELF ScmpSyscall = SYS_LANDLOCK_RESTRICT_SELF
|
||||||
|
SNR_MEMFD_SECRET ScmpSyscall = SYS_MEMFD_SECRET
|
||||||
|
SNR_PROCESS_MRELEASE ScmpSyscall = SYS_PROCESS_MRELEASE
|
||||||
|
SNR_FUTEX_WAITV ScmpSyscall = SYS_FUTEX_WAITV
|
||||||
|
SNR_SET_MEMPOLICY_HOME_NODE ScmpSyscall = SYS_SET_MEMPOLICY_HOME_NODE
|
||||||
|
SNR_CACHESTAT ScmpSyscall = SYS_CACHESTAT
|
||||||
|
SNR_FCHMODAT2 ScmpSyscall = SYS_FCHMODAT2
|
||||||
|
SNR_MAP_SHADOW_STACK ScmpSyscall = SYS_MAP_SHADOW_STACK
|
||||||
|
SNR_FUTEX_WAKE ScmpSyscall = SYS_FUTEX_WAKE
|
||||||
|
SNR_FUTEX_WAIT ScmpSyscall = SYS_FUTEX_WAIT
|
||||||
|
SNR_FUTEX_REQUEUE ScmpSyscall = SYS_FUTEX_REQUEUE
|
||||||
|
SNR_STATMOUNT ScmpSyscall = SYS_STATMOUNT
|
||||||
|
SNR_LISTMOUNT ScmpSyscall = SYS_LISTMOUNT
|
||||||
|
SNR_LSM_GET_SELF_ATTR ScmpSyscall = SYS_LSM_GET_SELF_ATTR
|
||||||
|
SNR_LSM_SET_SELF_ATTR ScmpSyscall = SYS_LSM_SET_SELF_ATTR
|
||||||
|
SNR_LSM_LIST_MODULES ScmpSyscall = SYS_LSM_LIST_MODULES
|
||||||
|
SNR_MSEAL ScmpSyscall = SYS_MSEAL
|
||||||
|
SNR_SETXATTRAT ScmpSyscall = SYS_SETXATTRAT
|
||||||
|
SNR_GETXATTRAT ScmpSyscall = SYS_GETXATTRAT
|
||||||
|
SNR_LISTXATTRAT ScmpSyscall = SYS_LISTXATTRAT
|
||||||
|
SNR_REMOVEXATTRAT ScmpSyscall = SYS_REMOVEXATTRAT
|
||||||
|
SNR_OPEN_TREE_ATTR ScmpSyscall = SYS_OPEN_TREE_ATTR
|
||||||
|
SNR_FILE_GETATTR ScmpSyscall = SYS_FILE_GETATTR
|
||||||
|
SNR_FILE_SETATTR ScmpSyscall = SYS_FILE_SETATTR
|
||||||
|
)
|
||||||
8
container/std/types.go
Normal file
8
container/std/types.go
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
package std
|
||||||
|
|
||||||
|
type (
|
||||||
|
// Uint is equivalent to C.uint.
|
||||||
|
Uint uint32
|
||||||
|
// Int is equivalent to C.int.
|
||||||
|
Int int32
|
||||||
|
)
|
||||||
@@ -3,6 +3,8 @@ package container
|
|||||||
import (
|
import (
|
||||||
. "syscall"
|
. "syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"hakurei.app/container/std"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Prctl manipulates various aspects of the behavior of the calling thread or process.
|
// Prctl manipulates various aspects of the behavior of the calling thread or process.
|
||||||
@@ -41,6 +43,49 @@ func Isatty(fd int) bool {
|
|||||||
return r == 0
|
return r == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// include/uapi/linux/sched.h
|
||||||
|
const (
|
||||||
|
SCHED_NORMAL = iota
|
||||||
|
SCHED_FIFO
|
||||||
|
SCHED_RR
|
||||||
|
SCHED_BATCH
|
||||||
|
_ // SCHED_ISO: reserved but not implemented yet
|
||||||
|
SCHED_IDLE
|
||||||
|
SCHED_DEADLINE
|
||||||
|
SCHED_EXT
|
||||||
|
)
|
||||||
|
|
||||||
|
// schedParam is equivalent to struct sched_param from include/linux/sched.h.
|
||||||
|
type schedParam struct {
|
||||||
|
// sched_priority
|
||||||
|
priority std.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
// schedSetscheduler sets both the scheduling policy and parameters for the
|
||||||
|
// thread whose ID is specified in tid. If tid equals zero, the scheduling
|
||||||
|
// policy and parameters of the calling thread will be set.
|
||||||
|
//
|
||||||
|
// This function is unexported because it is [very subtle to use correctly]. The
|
||||||
|
// function signature in libc is misleading: pid actually refers to a thread ID.
|
||||||
|
// The glibc wrapper for this system call ignores this semantic and exposes
|
||||||
|
// this counterintuitive behaviour.
|
||||||
|
//
|
||||||
|
// This function is only called from the container setup thread. Do not reuse
|
||||||
|
// this if you do not have something similar in place!
|
||||||
|
//
|
||||||
|
// [very subtle to use correctly]: https://www.openwall.com/lists/musl/2016/03/01/4
|
||||||
|
func schedSetscheduler(tid, policy int, param *schedParam) error {
|
||||||
|
if r, _, errno := Syscall(
|
||||||
|
SYS_SCHED_SETSCHEDULER,
|
||||||
|
uintptr(tid),
|
||||||
|
uintptr(policy),
|
||||||
|
uintptr(unsafe.Pointer(param)),
|
||||||
|
); r < 0 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// IgnoringEINTR makes a function call and repeats it if it returns an
|
// IgnoringEINTR makes a function call and repeats it if it returns an
|
||||||
// EINTR error. This appears to be required even though we install all
|
// EINTR error. This appears to be required even though we install all
|
||||||
// signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846.
|
// signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846.
|
||||||
|
|||||||
@@ -2,6 +2,8 @@ package vfs
|
|||||||
|
|
||||||
import "strings"
|
import "strings"
|
||||||
|
|
||||||
|
// Unmangle reverses mangling of strings done by the kernel. Its behaviour is
|
||||||
|
// consistent with the equivalent function in util-linux.
|
||||||
func Unmangle(s string) string {
|
func Unmangle(s string) string {
|
||||||
if !strings.ContainsRune(s, '\\') {
|
if !strings.ContainsRune(s, '\\') {
|
||||||
return s
|
return s
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ var (
|
|||||||
ErrMountInfoSep = errors.New("bad optional fields separator")
|
ErrMountInfoSep = errors.New("bad optional fields separator")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// A DecoderError describes a nonrecoverable error decoding a mountinfo stream.
|
||||||
type DecoderError struct {
|
type DecoderError struct {
|
||||||
Op string
|
Op string
|
||||||
Line int
|
Line int
|
||||||
@@ -51,7 +52,8 @@ func (e *DecoderError) Error() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// A MountInfoDecoder reads and decodes proc_pid_mountinfo(5) entries from an input stream.
|
// A MountInfoDecoder reads and decodes proc_pid_mountinfo(5) entries from
|
||||||
|
// an input stream.
|
||||||
MountInfoDecoder struct {
|
MountInfoDecoder struct {
|
||||||
s *bufio.Scanner
|
s *bufio.Scanner
|
||||||
m *MountInfo
|
m *MountInfo
|
||||||
@@ -72,13 +74,16 @@ type (
|
|||||||
MountInfoEntry struct {
|
MountInfoEntry struct {
|
||||||
// mount ID: a unique ID for the mount (may be reused after umount(2)).
|
// mount ID: a unique ID for the mount (may be reused after umount(2)).
|
||||||
ID int `json:"id"`
|
ID int `json:"id"`
|
||||||
// parent ID: the ID of the parent mount (or of self for the root of this mount namespace's mount tree).
|
// parent ID: the ID of the parent mount (or of self for the root of
|
||||||
|
// this mount namespace's mount tree).
|
||||||
Parent int `json:"parent"`
|
Parent int `json:"parent"`
|
||||||
// major:minor: the value of st_dev for files on this filesystem (see stat(2)).
|
// major:minor: the value of st_dev for files on this filesystem (see stat(2)).
|
||||||
Devno DevT `json:"devno"`
|
Devno DevT `json:"devno"`
|
||||||
// root: the pathname of the directory in the filesystem which forms the root of this mount.
|
// root: the pathname of the directory in the filesystem which forms the
|
||||||
|
// root of this mount.
|
||||||
Root string `json:"root"`
|
Root string `json:"root"`
|
||||||
// mount point: the pathname of the mount point relative to the process's root directory.
|
// mount point: the pathname of the mount point relative to the
|
||||||
|
// process's root directory.
|
||||||
Target string `json:"target"`
|
Target string `json:"target"`
|
||||||
// mount options: per-mount options (see mount(2)).
|
// mount options: per-mount options (see mount(2)).
|
||||||
VfsOptstr string `json:"vfs_optstr"`
|
VfsOptstr string `json:"vfs_optstr"`
|
||||||
@@ -126,7 +131,8 @@ func (e *MountInfoEntry) Flags() (flags uintptr, unmatched []string) {
|
|||||||
|
|
||||||
// NewMountInfoDecoder returns a new decoder that reads from r.
|
// NewMountInfoDecoder returns a new decoder that reads from r.
|
||||||
//
|
//
|
||||||
// The decoder introduces its own buffering and may read data from r beyond the mountinfo entries requested.
|
// The decoder introduces its own buffering and may read data from r beyond the
|
||||||
|
// mountinfo entries requested.
|
||||||
func NewMountInfoDecoder(r io.Reader) *MountInfoDecoder {
|
func NewMountInfoDecoder(r io.Reader) *MountInfoDecoder {
|
||||||
return &MountInfoDecoder{s: bufio.NewScanner(r)}
|
return &MountInfoDecoder{s: bufio.NewScanner(r)}
|
||||||
}
|
}
|
||||||
@@ -271,6 +277,8 @@ func parseMountInfoLine(s string, ent *MountInfoEntry) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EqualWithIgnore compares to [MountInfoEntry] values, ignoring fields that
|
||||||
|
// compare equal to ignore.
|
||||||
func (e *MountInfoEntry) EqualWithIgnore(want *MountInfoEntry, ignore string) bool {
|
func (e *MountInfoEntry) EqualWithIgnore(want *MountInfoEntry, ignore string) bool {
|
||||||
return (e.ID == want.ID || want.ID == -1) &&
|
return (e.ID == want.ID || want.ID == -1) &&
|
||||||
(e.Parent == want.Parent || want.Parent == -1) &&
|
(e.Parent == want.Parent || want.Parent == -1) &&
|
||||||
@@ -284,6 +292,8 @@ func (e *MountInfoEntry) EqualWithIgnore(want *MountInfoEntry, ignore string) bo
|
|||||||
(e.FsOptstr == want.FsOptstr || want.FsOptstr == ignore)
|
(e.FsOptstr == want.FsOptstr || want.FsOptstr == ignore)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// String returns a user-facing representation of a [MountInfoEntry]. It fits
|
||||||
|
// roughly into the mountinfo format, but without mangling.
|
||||||
func (e *MountInfoEntry) String() string {
|
func (e *MountInfoEntry) String() string {
|
||||||
return fmt.Sprintf("%d %d %d:%d %s %s %s %s %s %s %s",
|
return fmt.Sprintf("%d %d %d:%d %s %s %s %s %s %s %s",
|
||||||
e.ID, e.Parent, e.Devno[0], e.Devno[1], e.Root, e.Target, e.VfsOptstr,
|
e.ID, e.Parent, e.Devno[0], e.Devno[1], e.Root, e.Target, e.VfsOptstr,
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// UnfoldTargetError is a pathname that never appeared in a mount hierarchy.
|
||||||
type UnfoldTargetError string
|
type UnfoldTargetError string
|
||||||
|
|
||||||
func (e UnfoldTargetError) Error() string {
|
func (e UnfoldTargetError) Error() string {
|
||||||
@@ -27,6 +28,7 @@ func (n *MountInfoNode) Collective() iter.Seq[*MountInfoNode] {
|
|||||||
return func(yield func(*MountInfoNode) bool) { n.visit(yield) }
|
return func(yield func(*MountInfoNode) bool) { n.visit(yield) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// visit recursively visits all visible mountinfo nodes.
|
||||||
func (n *MountInfoNode) visit(yield func(*MountInfoNode) bool) bool {
|
func (n *MountInfoNode) visit(yield func(*MountInfoNode) bool) bool {
|
||||||
if !n.Covered && !yield(n) {
|
if !n.Covered && !yield(n) {
|
||||||
return false
|
return false
|
||||||
|
|||||||
23
dist/release.sh
vendored
23
dist/release.sh
vendored
@@ -1,20 +1,31 @@
|
|||||||
#!/bin/sh -e
|
#!/bin/sh -e
|
||||||
cd "$(dirname -- "$0")/.."
|
cd "$(dirname -- "$0")/.."
|
||||||
VERSION="${HAKUREI_VERSION:-untagged}"
|
VERSION="${HAKUREI_VERSION:-untagged}"
|
||||||
pname="hakurei-${VERSION}"
|
pname="hakurei-${VERSION}-$(go env GOARCH)"
|
||||||
out="dist/${pname}"
|
out="${DESTDIR:-dist}/${pname}"
|
||||||
|
|
||||||
|
echo '# Preparing distribution files.'
|
||||||
mkdir -p "${out}"
|
mkdir -p "${out}"
|
||||||
cp -v "README.md" "dist/hsurc.default" "dist/install.sh" "${out}"
|
cp -v "README.md" "dist/hsurc.default" "dist/install.sh" "${out}"
|
||||||
cp -rv "dist/comp" "${out}"
|
cp -rv "dist/comp" "${out}"
|
||||||
|
echo
|
||||||
|
|
||||||
|
echo '# Building hakurei.'
|
||||||
go generate ./...
|
go generate ./...
|
||||||
go build -trimpath -v -o "${out}/bin/" -ldflags "-s -w -buildid= -extldflags '-static'
|
go build -trimpath -v -o "${out}/bin/" -ldflags "-s -w
|
||||||
|
-buildid= -linkmode external -extldflags=-static
|
||||||
-X hakurei.app/internal/info.buildVersion=${VERSION}
|
-X hakurei.app/internal/info.buildVersion=${VERSION}
|
||||||
-X hakurei.app/internal/info.hakureiPath=/usr/bin/hakurei
|
-X hakurei.app/internal/info.hakureiPath=/usr/bin/hakurei
|
||||||
-X hakurei.app/internal/info.hsuPath=/usr/bin/hsu
|
-X hakurei.app/internal/info.hsuPath=/usr/bin/hsu
|
||||||
-X main.hakureiPath=/usr/bin/hakurei" ./...
|
-X main.hakureiPath=/usr/bin/hakurei" ./...
|
||||||
|
echo
|
||||||
|
|
||||||
rm -f "./${out}.tar.gz" && tar -C dist -czf "${out}.tar.gz" "${pname}"
|
echo '# Testing hakurei.'
|
||||||
rm -rf "./${out}"
|
go test -ldflags='-buildid= -linkmode external -extldflags=-static' ./...
|
||||||
(cd dist && sha512sum "${pname}.tar.gz" > "${pname}.tar.gz.sha512")
|
echo
|
||||||
|
|
||||||
|
echo '# Creating distribution.'
|
||||||
|
rm -f "${out}.tar.gz" && tar -C "${out}/.." -vczf "${out}.tar.gz" "${pname}"
|
||||||
|
rm -rf "${out}"
|
||||||
|
(cd "${out}/.." && sha512sum "${pname}.tar.gz" > "${pname}.tar.gz.sha512")
|
||||||
|
echo
|
||||||
|
|||||||
45
flake.nix
45
flake.nix
@@ -29,20 +29,6 @@
|
|||||||
{
|
{
|
||||||
nixosModules.hakurei = import ./nixos.nix self.packages;
|
nixosModules.hakurei = import ./nixos.nix self.packages;
|
||||||
|
|
||||||
buildPackage = forAllSystems (
|
|
||||||
system:
|
|
||||||
nixpkgsFor.${system}.callPackage (
|
|
||||||
import ./cmd/hpkg/build.nix {
|
|
||||||
inherit
|
|
||||||
nixpkgsFor
|
|
||||||
system
|
|
||||||
nixpkgs
|
|
||||||
home-manager
|
|
||||||
;
|
|
||||||
}
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
checks = forAllSystems (
|
checks = forAllSystems (
|
||||||
system:
|
system:
|
||||||
let
|
let
|
||||||
@@ -71,8 +57,6 @@
|
|||||||
|
|
||||||
sharefs = callPackage ./cmd/sharefs/test { inherit system self; };
|
sharefs = callPackage ./cmd/sharefs/test { inherit system self; };
|
||||||
|
|
||||||
hpkg = callPackage ./cmd/hpkg/test { inherit system self; };
|
|
||||||
|
|
||||||
formatting = runCommandLocal "check-formatting" { nativeBuildInputs = [ nixfmt-rfc-style ]; } ''
|
formatting = runCommandLocal "check-formatting" { nativeBuildInputs = [ nixfmt-rfc-style ]; } ''
|
||||||
cd ${./.}
|
cd ${./.}
|
||||||
|
|
||||||
@@ -127,11 +111,6 @@
|
|||||||
glibc
|
glibc
|
||||||
xdg-dbus-proxy
|
xdg-dbus-proxy
|
||||||
|
|
||||||
# hpkg
|
|
||||||
zstd
|
|
||||||
gnutar
|
|
||||||
coreutils
|
|
||||||
|
|
||||||
# for check
|
# for check
|
||||||
util-linux
|
util-linux
|
||||||
nettools
|
nettools
|
||||||
@@ -143,18 +122,26 @@
|
|||||||
"bin/mount.fuse.sharefs" = "${hakurei}/libexec/sharefs";
|
"bin/mount.fuse.sharefs" = "${hakurei}/libexec/sharefs";
|
||||||
};
|
};
|
||||||
|
|
||||||
dist = pkgs.runCommand "${hakurei.name}-dist" { buildInputs = hakurei.targetPkgs ++ [ pkgs.pkgsStatic.musl ]; } ''
|
dist =
|
||||||
# go requires XDG_CACHE_HOME for the build cache
|
pkgs.runCommand "${hakurei.name}-dist"
|
||||||
export XDG_CACHE_HOME="$(mktemp -d)"
|
{
|
||||||
|
buildInputs = hakurei.targetPkgs ++ [
|
||||||
# get a different workdir as go does not like /build
|
pkgs.pkgsStatic.musl
|
||||||
|
];
|
||||||
|
}
|
||||||
|
''
|
||||||
cd $(mktemp -d) \
|
cd $(mktemp -d) \
|
||||||
&& cp -r ${hakurei.src}/. . \
|
&& cp -r ${hakurei.src}/. . \
|
||||||
&& chmod +w cmd && cp -r ${hsu.src}/. cmd/hsu/ \
|
&& chmod +w cmd && cp -r ${hsu.src}/. cmd/hsu/ \
|
||||||
&& chmod -R +w .
|
&& chmod -R +w .
|
||||||
|
|
||||||
export HAKUREI_VERSION="v${hakurei.version}"
|
CC="musl-clang -O3 -Werror -Qunused-arguments" \
|
||||||
CC="clang -O3 -Werror" ./dist/release.sh && mkdir $out && cp -v "dist/hakurei-$HAKUREI_VERSION.tar.gz"* $out
|
GOCACHE="$(mktemp -d)" \
|
||||||
|
HAKUREI_TEST_SKIP_ACL=1 \
|
||||||
|
PATH="${pkgs.pkgsStatic.musl.bin}/bin:$PATH" \
|
||||||
|
DESTDIR="$out" \
|
||||||
|
HAKUREI_VERSION="v${hakurei.version}" \
|
||||||
|
./dist/release.sh
|
||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
@@ -211,7 +198,7 @@
|
|||||||
./test/interactive/trace.nix
|
./test/interactive/trace.nix
|
||||||
|
|
||||||
self.nixosModules.hakurei
|
self.nixosModules.hakurei
|
||||||
self.inputs.home-manager.nixosModules.home-manager
|
home-manager.nixosModules.home-manager
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestUpdate(t *testing.T) {
|
func TestUpdate(t *testing.T) {
|
||||||
if os.Getenv("GO_TEST_SKIP_ACL") == "1" {
|
if os.Getenv("HAKUREI_TEST_SKIP_ACL") == "1" {
|
||||||
t.Skip("acl test skipped")
|
t.Skip("acl test skipped")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
0
internal/azalea/azalea.bnf
Normal file
0
internal/azalea/azalea.bnf
Normal file
69
internal/azalea/azalea.go
Normal file
69
internal/azalea/azalea.go
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
//go:generate gocc -a azalea.bnf
|
||||||
|
package azalea
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Parser struct {
|
||||||
|
Generator
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewParser(gen Generator) *Parser {
|
||||||
|
return &Parser{
|
||||||
|
Generator: gen,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (p Parser) Initialise() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Parser) Consume(ns string, file io.Reader) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeDir walks a directory and consumes all Azalea source files within it and all its subdirectories, as long as they end with the .az extension.
|
||||||
|
func (p Parser) ConsumeDir(dir *check.Absolute) error {
|
||||||
|
ds := dir.String()
|
||||||
|
return filepath.WalkDir(ds, func(path string, d fs.DirEntry, err error) (e error) {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if d.IsDir() || !strings.HasSuffix(d.Name(), ".az") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rel, e := filepath.Rel(ds, path)
|
||||||
|
ns := strings.TrimSuffix(rel, ".az")
|
||||||
|
f, e := os.Open(path)
|
||||||
|
return p.Consume(ns, f)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeAll consumes all provided readers as Azalea source code, each given the namespace `r%d` where `%d` is the index of the reader in the provided arguments.
|
||||||
|
func (p Parser) ConsumeAll(in ...io.Reader) error {
|
||||||
|
for i, r := range in {
|
||||||
|
err := p.Consume("r"+strconv.FormatInt(int64(i), 10), r)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeStrings consumes all provided strings as Azalea source code, each given the namespace `s%d` where `%d` is the index of the string in the provided arugments.
|
||||||
|
func (p Parser) ConsumeStrings(in ...string) error {
|
||||||
|
for i, s := range in {
|
||||||
|
err := p.Consume("s"+strconv.FormatInt(int64(i), 10), strings.NewReader(s))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
36
internal/azalea/generator.go
Normal file
36
internal/azalea/generator.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
package azalea
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Generator interface {
|
||||||
|
Finalise() (error, io.Writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
type JsonGenerator struct {
|
||||||
|
t any
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewJsonGenerator[T any]() JsonGenerator {
|
||||||
|
t := new(T)
|
||||||
|
|
||||||
|
return JsonGenerator{
|
||||||
|
t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JsonGenerator) Finalise() (error, io.Writer) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
type PkgIRGenerator struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPkgIRGenerator() PkgIRGenerator {
|
||||||
|
return PkgIRGenerator{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PkgIRGenerator) Finalise() (error, io.Writer) {
|
||||||
|
|
||||||
|
}
|
||||||
@@ -8,7 +8,6 @@
|
|||||||
package filelock
|
package filelock
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"io/fs"
|
"io/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -74,10 +73,3 @@ func (lt lockType) String() string {
|
|||||||
return "Unlock"
|
return "Unlock"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNotSupported returns a boolean indicating whether the error is known to
|
|
||||||
// report that a function is not supported (possibly for a specific input).
|
|
||||||
// It is satisfied by errors.ErrUnsupported as well as some syscall errors.
|
|
||||||
func IsNotSupported(err error) bool {
|
|
||||||
return errors.Is(err, errors.ErrUnsupported)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"hakurei.app/container"
|
"hakurei.app/container/fhs"
|
||||||
"hakurei.app/internal/lockedfile/internal/filelock"
|
"hakurei.app/internal/lockedfile/internal/filelock"
|
||||||
"hakurei.app/internal/lockedfile/internal/testexec"
|
"hakurei.app/internal/lockedfile/internal/testexec"
|
||||||
)
|
)
|
||||||
@@ -197,7 +197,7 @@ func TestLockNotDroppedByExecCommand(t *testing.T) {
|
|||||||
// Some kinds of file locks are dropped when a duplicated or forked file
|
// Some kinds of file locks are dropped when a duplicated or forked file
|
||||||
// descriptor is unlocked. Double-check that the approach used by os/exec does
|
// descriptor is unlocked. Double-check that the approach used by os/exec does
|
||||||
// not accidentally drop locks.
|
// not accidentally drop locks.
|
||||||
cmd := testexec.CommandContext(t, t.Context(), container.MustExecutable(nil), "-test.run=^$")
|
cmd := testexec.CommandContext(t, t.Context(), fhs.ProcSelfExe, "-test.run=^$")
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
t.Fatalf("exec failed: %v", err)
|
t.Fatalf("exec failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -94,6 +94,11 @@ func (f *File) Close() error {
|
|||||||
|
|
||||||
err := closeFile(f.osFile.File)
|
err := closeFile(f.osFile.File)
|
||||||
f.cleanup.Stop()
|
f.cleanup.Stop()
|
||||||
|
// f may be dead at the moment after we access f.cleanup,
|
||||||
|
// so the cleanup can fire before Stop completes. Keep f
|
||||||
|
// alive while we call Stop. See the documentation for
|
||||||
|
// runtime.Cleanup.Stop.
|
||||||
|
runtime.KeepAlive(f)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"hakurei.app/container"
|
"hakurei.app/container/fhs"
|
||||||
"hakurei.app/internal/lockedfile"
|
"hakurei.app/internal/lockedfile"
|
||||||
"hakurei.app/internal/lockedfile/internal/testexec"
|
"hakurei.app/internal/lockedfile/internal/testexec"
|
||||||
)
|
)
|
||||||
@@ -215,7 +215,7 @@ func TestSpuriousEDEADLK(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := testexec.CommandContext(t, t.Context(), container.MustExecutable(nil), "-test.run=^"+t.Name()+"$")
|
cmd := testexec.CommandContext(t, t.Context(), fhs.ProcSelfExe, "-test.run=^"+t.Name()+"$")
|
||||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", dirVar, dir))
|
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", dirVar, dir))
|
||||||
|
|
||||||
qDone := make(chan struct{})
|
qDone := make(chan struct{})
|
||||||
|
|||||||
@@ -28,14 +28,6 @@ type FlatEntry struct {
|
|||||||
| data []byte |
|
| data []byte |
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// wordSize is the boundary which binary segments are always aligned to.
|
|
||||||
const wordSize = 8
|
|
||||||
|
|
||||||
// alignSize returns the padded size for aligning sz.
|
|
||||||
func alignSize(sz int) int {
|
|
||||||
return sz + (wordSize-(sz)%wordSize)%wordSize
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode encodes the entry for transmission or hashing.
|
// Encode encodes the entry for transmission or hashing.
|
||||||
func (ent *FlatEntry) Encode(w io.Writer) (n int, err error) {
|
func (ent *FlatEntry) Encode(w io.Writer) (n int, err error) {
|
||||||
pPathSize := alignSize(len(ent.Path))
|
pPathSize := alignSize(len(ent.Path))
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ func TestFlatten(t *testing.T) {
|
|||||||
"checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU": {Mode: 0400, Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
|
"checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU": {Mode: 0400, Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
|
||||||
|
|
||||||
"identifier": {Mode: fs.ModeDir | 0700},
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
"identifier/NqVORkT6L9HX6Za7kT2zcibY10qFqBaxEjPiYFrBQX-ZFr3yxCzJxbKOP0zVjeWb": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
|
"identifier/oM-2pUlk-mOxK1t3aMWZer69UdOQlAXiAgMrpZ1476VoOqpYVP1aGFS9_HYy-D8_": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
|
||||||
|
|
||||||
"work": {Mode: fs.ModeDir | 0700},
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
}, []pkg.FlatEntry{
|
}, []pkg.FlatEntry{
|
||||||
@@ -86,10 +86,10 @@ func TestFlatten(t *testing.T) {
|
|||||||
{Mode: 0400, Path: "checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU", Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
|
{Mode: 0400, Path: "checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU", Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/NqVORkT6L9HX6Za7kT2zcibY10qFqBaxEjPiYFrBQX-ZFr3yxCzJxbKOP0zVjeWb", Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/oM-2pUlk-mOxK1t3aMWZer69UdOQlAXiAgMrpZ1476VoOqpYVP1aGFS9_HYy-D8_", Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
}, pkg.MustDecode("bqtn69RkV5E7V7GhhgCFjcvbxmaqrO8DywamM4Tyjf10F6EJBHjXiIa_tFRtF4iN"), nil},
|
}, pkg.MustDecode("L_0RFHpr9JUS4Zp14rz2dESSRvfLzpvqsLhR1-YjQt8hYlmEdVl7vI3_-v8UNPKs"), nil},
|
||||||
|
|
||||||
{"sample directory step simple", fstest.MapFS{
|
{"sample directory step simple", fstest.MapFS{
|
||||||
".": {Mode: fs.ModeDir | 0500},
|
".": {Mode: fs.ModeDir | 0500},
|
||||||
@@ -208,8 +208,8 @@ func TestFlatten(t *testing.T) {
|
|||||||
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work": {Mode: fs.ModeDir | 0500},
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
"identifier": {Mode: fs.ModeDir | 0700},
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
"identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
"identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||||
"identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
"identifier/rg7F1D5hwv6o4xctjD5zDq4i5MD0mArTsUIWfhUbik8xC6Bsyt3mjXXOm3goojTz": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||||
|
|
||||||
"temp": {Mode: fs.ModeDir | 0700},
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
"work": {Mode: fs.ModeDir | 0700},
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
@@ -230,12 +230,12 @@ func TestFlatten(t *testing.T) {
|
|||||||
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work"},
|
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work"},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/rg7F1D5hwv6o4xctjD5zDq4i5MD0mArTsUIWfhUbik8xC6Bsyt3mjXXOm3goojTz", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
}, pkg.MustDecode("sxbgyX-bPoezbha214n2lbQhiVfTUBkhZ0EX6zI7mmkMdrCdwuMwhMBJphLQsy94"), nil},
|
}, pkg.MustDecode("NQTlc466JmSVLIyWklm_u8_g95jEEb98PxJU-kjwxLpfdjwMWJq0G8ze9R4Vo1Vu"), nil},
|
||||||
|
|
||||||
{"sample tar expand step unpack", fstest.MapFS{
|
{"sample tar expand step unpack", fstest.MapFS{
|
||||||
".": {Mode: fs.ModeDir | 0500},
|
".": {Mode: fs.ModeDir | 0500},
|
||||||
@@ -255,8 +255,8 @@ func TestFlatten(t *testing.T) {
|
|||||||
"checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
"checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
|
||||||
"identifier": {Mode: fs.ModeDir | 0700},
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
"identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
"identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||||
"identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
"identifier/_v1blm2h-_KA-dVaawdpLas6MjHc6rbhhFS8JWwx8iJxZGUu8EBbRrhr5AaZ9PJL": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||||
|
|
||||||
"temp": {Mode: fs.ModeDir | 0700},
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
"work": {Mode: fs.ModeDir | 0700},
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
@@ -268,12 +268,12 @@ func TestFlatten(t *testing.T) {
|
|||||||
{Mode: fs.ModeSymlink | 0777, Path: "checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
{Mode: fs.ModeSymlink | 0777, Path: "checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_v1blm2h-_KA-dVaawdpLas6MjHc6rbhhFS8JWwx8iJxZGUu8EBbRrhr5AaZ9PJL", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
}, pkg.MustDecode("4I8wx_h7NSJTlG5lbuz-GGEXrOg0GYC3M_503LYEBhv5XGWXfNIdIY9Q3eVSYldX"), nil},
|
}, pkg.MustDecode("hSoSSgCYTNonX3Q8FjvjD1fBl-E-BQyA6OTXro2OadXqbST4tZ-akGXszdeqphRe"), nil},
|
||||||
|
|
||||||
{"testtool", fstest.MapFS{
|
{"testtool", fstest.MapFS{
|
||||||
".": {Mode: fs.ModeDir | 0500},
|
".": {Mode: fs.ModeDir | 0500},
|
||||||
@@ -295,9 +295,9 @@ func TestFlatten(t *testing.T) {
|
|||||||
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
|
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
|
||||||
|
|
||||||
"identifier": {Mode: fs.ModeDir | 0700},
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
"identifier/U2cbgVgEtjfRuvHfE1cQnZ3t8yoexULQyo_VLgvxAVJSsobMcNaFIsuDWtmt7kzK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
|
||||||
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
"identifier/dztPS6jRjiZtCF4_p8AzfnxGp6obkhrgFVsxdodbKWUoAEVtDz3MykepJB4kI_ks": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
"temp": {Mode: fs.ModeDir | 0700},
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
"work": {Mode: fs.ModeDir | 0700},
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
@@ -311,13 +311,13 @@ func TestFlatten(t *testing.T) {
|
|||||||
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
|
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/U2cbgVgEtjfRuvHfE1cQnZ3t8yoexULQyo_VLgvxAVJSsobMcNaFIsuDWtmt7kzK", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/dztPS6jRjiZtCF4_p8AzfnxGp6obkhrgFVsxdodbKWUoAEVtDz3MykepJB4kI_ks", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
}, pkg.MustDecode("UiV6kMz7KrTsc_yphiyQzFLqjRanHxUOwrBMtkKuWo4mOO6WgPFAcoUEeSp7eVIW"), nil},
|
}, pkg.MustDecode("Q5DluWQCAeohLoiGRImurwFp3vdz9IfQCoj7Fuhh73s4KQPRHpEQEnHTdNHmB8Fx"), nil},
|
||||||
|
|
||||||
{"testtool net", fstest.MapFS{
|
{"testtool net", fstest.MapFS{
|
||||||
".": {Mode: fs.ModeDir | 0500},
|
".": {Mode: fs.ModeDir | 0500},
|
||||||
@@ -339,9 +339,9 @@ func TestFlatten(t *testing.T) {
|
|||||||
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check": {Mode: 0400, Data: []byte("net")},
|
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check": {Mode: 0400, Data: []byte("net")},
|
||||||
|
|
||||||
"identifier": {Mode: fs.ModeDir | 0700},
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/G8qPxD9puvvoOVV7lrT80eyDeIl3G_CCFoKw12c8mCjMdG1zF7NEPkwYpNubClK3": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
|
||||||
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
"identifier/QdsJhGgnk5N2xdUNGcndXQxFKifxf1V_2t9X8CQ-pDcg24x6mGJC_BiLfGbs6Qml": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
|
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
|
||||||
|
|
||||||
"temp": {Mode: fs.ModeDir | 0700},
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
"work": {Mode: fs.ModeDir | 0700},
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
@@ -355,13 +355,13 @@ func TestFlatten(t *testing.T) {
|
|||||||
{Mode: 0400, Path: "checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check", Data: []byte("net")},
|
{Mode: 0400, Path: "checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check", Data: []byte("net")},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/QdsJhGgnk5N2xdUNGcndXQxFKifxf1V_2t9X8CQ-pDcg24x6mGJC_BiLfGbs6Qml", Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/G8qPxD9puvvoOVV7lrT80eyDeIl3G_CCFoKw12c8mCjMdG1zF7NEPkwYpNubClK3", Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
}, pkg.MustDecode("ek4K-0d4iRSArkY2TCs3WK34DbiYeOmhE_4vsJTSu_6roY4ZF3YG6eKRooal-i1o"), nil},
|
}, pkg.MustDecode("bPYvvqxpfV7xcC1EptqyKNK1klLJgYHMDkzBcoOyK6j_Aj5hb0mXNPwTwPSK5F6Z"), nil},
|
||||||
|
|
||||||
{"sample exec container overlay root", fstest.MapFS{
|
{"sample exec container overlay root", fstest.MapFS{
|
||||||
".": {Mode: fs.ModeDir | 0700},
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
@@ -372,8 +372,8 @@ func TestFlatten(t *testing.T) {
|
|||||||
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
"identifier": {Mode: fs.ModeDir | 0700},
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
"identifier/5ey2wpmMpj483YYa7ZZQciYLA2cx3_l167JCqWW4Pd-5DVp81dj9EsBtVTwYptF6": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
"identifier/RdMA-mubnrHuu3Ky1wWyxauSYCO0ZH_zCPUj3uDHqkfwv5sGcByoF_g5PjlGiClb": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
"temp": {Mode: fs.ModeDir | 0700},
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
"work": {Mode: fs.ModeDir | 0700},
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
@@ -386,12 +386,12 @@ func TestFlatten(t *testing.T) {
|
|||||||
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/5ey2wpmMpj483YYa7ZZQciYLA2cx3_l167JCqWW4Pd-5DVp81dj9EsBtVTwYptF6", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/RdMA-mubnrHuu3Ky1wWyxauSYCO0ZH_zCPUj3uDHqkfwv5sGcByoF_g5PjlGiClb", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
}, pkg.MustDecode("VIqqpf0ip9jcyw63i6E8lCMGUcLivQBe4Bevt3WusNac-1MSy5bzB647qGUBzl-W"), nil},
|
}, pkg.MustDecode("PO2DSSCa4yoSgEYRcCSZfQfwow1yRigL3Ry-hI0RDI4aGuFBha-EfXeSJnG_5_Rl"), nil},
|
||||||
|
|
||||||
{"sample exec container overlay work", fstest.MapFS{
|
{"sample exec container overlay work", fstest.MapFS{
|
||||||
".": {Mode: fs.ModeDir | 0700},
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
@@ -402,8 +402,8 @@ func TestFlatten(t *testing.T) {
|
|||||||
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
"identifier": {Mode: fs.ModeDir | 0700},
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
"identifier/acaDzHZv40dZaz4cGAXayqbRMgbEOuiuiUijZL8IgDQvyeCNMFE3onBMYfny-kXA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
"identifier/5hlaukCirnXE4W_RSLJFOZN47Z5RiHnacXzdFp_70cLgiJUGR6cSb_HaFftkzi0-": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
"temp": {Mode: fs.ModeDir | 0700},
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
"work": {Mode: fs.ModeDir | 0700},
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
@@ -416,12 +416,12 @@ func TestFlatten(t *testing.T) {
|
|||||||
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/acaDzHZv40dZaz4cGAXayqbRMgbEOuiuiUijZL8IgDQvyeCNMFE3onBMYfny-kXA", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/5hlaukCirnXE4W_RSLJFOZN47Z5RiHnacXzdFp_70cLgiJUGR6cSb_HaFftkzi0-", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
}, pkg.MustDecode("q8x2zQg4YZbKpPqKlEBj_uxXD9vOBaZ852qOuIsl9QdO73I_UMNpuUoPLtunxUYl"), nil},
|
}, pkg.MustDecode("iaRt6l_Wm2n-h5UsDewZxQkCmjZjyL8r7wv32QT2kyV55-Lx09Dq4gfg9BiwPnKs"), nil},
|
||||||
|
|
||||||
{"sample exec container multiple layers", fstest.MapFS{
|
{"sample exec container multiple layers", fstest.MapFS{
|
||||||
".": {Mode: fs.ModeDir | 0700},
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
@@ -436,9 +436,9 @@ func TestFlatten(t *testing.T) {
|
|||||||
|
|
||||||
"identifier": {Mode: fs.ModeDir | 0700},
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
"identifier/B-kc5iJMx8GtlCua4dz6BiJHnDAOUfPjgpbKq4e-QEn0_CZkSYs3fOA1ve06qMs2": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
|
||||||
"identifier/rXLKjjYfGSyoWmuvEJooHkvGJIZaC0IAWnKGvtPZkM15gBxAgW7mIXcxRVNOXAr4": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
|
"identifier/p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
"identifier/tfjrsVuBuFgzWgwz-yPppFtylYuC1VFWnKhyBiHbWTGkyz8lt7Ee9QXWaIHPXs4x": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
"temp": {Mode: fs.ModeDir | 0700},
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
"work": {Mode: fs.ModeDir | 0700},
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
@@ -454,14 +454,14 @@ func TestFlatten(t *testing.T) {
|
|||||||
{Mode: 0400, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check", Data: []byte("layers")},
|
{Mode: 0400, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check", Data: []byte("layers")},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/B-kc5iJMx8GtlCua4dz6BiJHnDAOUfPjgpbKq4e-QEn0_CZkSYs3fOA1ve06qMs2", Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/rXLKjjYfGSyoWmuvEJooHkvGJIZaC0IAWnKGvtPZkM15gBxAgW7mIXcxRVNOXAr4", Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/tfjrsVuBuFgzWgwz-yPppFtylYuC1VFWnKhyBiHbWTGkyz8lt7Ee9QXWaIHPXs4x", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
}, pkg.MustDecode("SITnQ6PTV12PAQQjIuLUxkvsXQiC9Gq_HJQlcb4BPL5YnRHnx8lsW7PRM9YMLBsx"), nil},
|
}, pkg.MustDecode("O2YzyR7IUGU5J2CADy0hUZ3A5NkP_Vwzs4UadEdn2oMZZVWRtH0xZGJ3HXiimTnZ"), nil},
|
||||||
|
|
||||||
{"sample exec container layer promotion", fstest.MapFS{
|
{"sample exec container layer promotion", fstest.MapFS{
|
||||||
".": {Mode: fs.ModeDir | 0700},
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
@@ -472,9 +472,9 @@ func TestFlatten(t *testing.T) {
|
|||||||
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
"identifier": {Mode: fs.ModeDir | 0700},
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
"identifier/1tQZOGmVk_JkpyiG84AKW_BXmlK_MvHUbh5WtMuthGbHUq7i7nL1bvdF-LoJbqNh": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
"identifier/kvJIqZo5DKFOxC2ZQ-8_nPaQzEAz9cIm3p6guO-uLqm-xaiPu7oRkSnsu411jd_U": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
"identifier/O-6VjlIUxc4PYLf5v35uhIeL8kkYCbHYklqlmDjFPXe0m4j6GkUDg5qwTzBRESnf": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
"identifier/xXTIYcXmgJWNLC91c417RRrNM9cjELwEZHpGvf8Fk_GNP5agRJp_SicD0w9aMeLJ": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
|
||||||
"temp": {Mode: fs.ModeDir | 0700},
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
"work": {Mode: fs.ModeDir | 0700},
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
@@ -487,13 +487,13 @@ func TestFlatten(t *testing.T) {
|
|||||||
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/1tQZOGmVk_JkpyiG84AKW_BXmlK_MvHUbh5WtMuthGbHUq7i7nL1bvdF-LoJbqNh", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/kvJIqZo5DKFOxC2ZQ-8_nPaQzEAz9cIm3p6guO-uLqm-xaiPu7oRkSnsu411jd_U", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/O-6VjlIUxc4PYLf5v35uhIeL8kkYCbHYklqlmDjFPXe0m4j6GkUDg5qwTzBRESnf", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/xXTIYcXmgJWNLC91c417RRrNM9cjELwEZHpGvf8Fk_GNP5agRJp_SicD0w9aMeLJ", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
}, pkg.MustDecode("fuC20BhMKr86TYzNPP2A-9P7mGLvdcOiG10exlhRvZm8ySI7csf0LhW3im_26l1N"), nil},
|
}, pkg.MustDecode("3EaW6WibLi9gl03_UieiFPaFcPy5p4x3JPxrnLJxGaTI-bh3HU9DK9IMx7c3rrNm"), nil},
|
||||||
|
|
||||||
{"sample file short", fstest.MapFS{
|
{"sample file short", fstest.MapFS{
|
||||||
".": {Mode: fs.ModeDir | 0700},
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
@@ -502,7 +502,7 @@ func TestFlatten(t *testing.T) {
|
|||||||
"checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: 0400, Data: []byte{0}},
|
"checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: 0400, Data: []byte{0}},
|
||||||
|
|
||||||
"identifier": {Mode: fs.ModeDir | 0700},
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
"identifier/lIx_W4M7tVOcQ8jh08EJOfXf4brRmkEEjvUa7c17vVUzlmtUxlhhrgqmc9aZhjbn": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
"identifier/3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||||
|
|
||||||
"work": {Mode: fs.ModeDir | 0700},
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
}, []pkg.FlatEntry{
|
}, []pkg.FlatEntry{
|
||||||
@@ -511,10 +511,10 @@ func TestFlatten(t *testing.T) {
|
|||||||
{Mode: 0400, Path: "checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte{0}},
|
{Mode: 0400, Path: "checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte{0}},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/lIx_W4M7tVOcQ8jh08EJOfXf4brRmkEEjvUa7c17vVUzlmtUxlhhrgqmc9aZhjbn", Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi", Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||||
|
|
||||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
}, pkg.MustDecode("hnrfmJtivNKcgtETsKnU9gP_OwPgpNY3DSUJnmxnmeOODSO-YBvEBiTgieY4AAd7"), nil},
|
}, pkg.MustDecode("iR6H5OIsyOW4EwEgtm9rGzGF6DVtyHLySEtwnFE8bnus9VJcoCbR4JIek7Lw-vwT"), nil},
|
||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
|||||||
@@ -18,11 +18,12 @@ import (
|
|||||||
"hakurei.app/container"
|
"hakurei.app/container"
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/container/fhs"
|
"hakurei.app/container/fhs"
|
||||||
|
"hakurei.app/container/seccomp"
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AbsWork is the container pathname [CureContext.GetWorkDir] is mounted on.
|
// AbsWork is the container pathname [TContext.GetWorkDir] is mounted on.
|
||||||
var AbsWork = fhs.AbsRoot.Append("work/")
|
var AbsWork = fhs.AbsRoot.Append("work/")
|
||||||
|
|
||||||
// ExecPath is a slice of [Artifact] and the [check.Absolute] pathname to make
|
// ExecPath is a slice of [Artifact] and the [check.Absolute] pathname to make
|
||||||
@@ -38,22 +39,23 @@ type ExecPath struct {
|
|||||||
W bool
|
W bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// layers returns pathnames collected from A deduplicated by checksum.
|
// SchedPolicy is the [container] scheduling policy.
|
||||||
func (p *ExecPath) layers(f *FContext) []*check.Absolute {
|
var SchedPolicy int
|
||||||
msg := f.GetMessage()
|
|
||||||
|
|
||||||
layers := make([]*check.Absolute, 0, len(p.A))
|
// PromoteLayers returns artifacts with identical-by-content layers promoted to
|
||||||
checksums := make(map[unique.Handle[Checksum]]struct{}, len(p.A))
|
// the highest priority instance, as if mounted via [ExecPath].
|
||||||
for i := range p.A {
|
func PromoteLayers(
|
||||||
d := p.A[len(p.A)-1-i]
|
artifacts []Artifact,
|
||||||
pathname, checksum := f.GetArtifact(d)
|
getArtifact func(Artifact) (*check.Absolute, unique.Handle[Checksum]),
|
||||||
|
report func(i int, d Artifact),
|
||||||
|
) []*check.Absolute {
|
||||||
|
layers := make([]*check.Absolute, 0, len(artifacts))
|
||||||
|
checksums := make(map[unique.Handle[Checksum]]struct{}, len(artifacts))
|
||||||
|
for i := range artifacts {
|
||||||
|
d := artifacts[len(artifacts)-1-i]
|
||||||
|
pathname, checksum := getArtifact(d)
|
||||||
if _, ok := checksums[checksum]; ok {
|
if _, ok := checksums[checksum]; ok {
|
||||||
if msg.IsVerbose() {
|
report(len(artifacts)-1-i, d)
|
||||||
msg.Verbosef(
|
|
||||||
"promoted layer %d as %s",
|
|
||||||
len(p.A)-1-i, reportName(d, f.cache.Ident(d)),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
checksums[checksum] = struct{}{}
|
checksums[checksum] = struct{}{}
|
||||||
@@ -63,6 +65,19 @@ func (p *ExecPath) layers(f *FContext) []*check.Absolute {
|
|||||||
return layers
|
return layers
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// layers returns pathnames collected from A deduplicated via [PromoteLayers].
|
||||||
|
func (p *ExecPath) layers(f *FContext) []*check.Absolute {
|
||||||
|
msg := f.GetMessage()
|
||||||
|
return PromoteLayers(p.A, f.GetArtifact, func(i int, d Artifact) {
|
||||||
|
if msg.IsVerbose() {
|
||||||
|
msg.Verbosef(
|
||||||
|
"promoted layer %d as %s",
|
||||||
|
i, reportName(d, f.cache.Ident(d)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Path returns a populated [ExecPath].
|
// Path returns a populated [ExecPath].
|
||||||
func Path(pathname *check.Absolute, writable bool, a ...Artifact) ExecPath {
|
func Path(pathname *check.Absolute, writable bool, a ...Artifact) ExecPath {
|
||||||
return ExecPath{pathname, a, writable}
|
return ExecPath{pathname, a, writable}
|
||||||
@@ -102,8 +117,7 @@ type execArtifact struct {
|
|||||||
args []string
|
args []string
|
||||||
|
|
||||||
// Duration the initial process is allowed to run. The zero value is
|
// Duration the initial process is allowed to run. The zero value is
|
||||||
// equivalent to execTimeoutDefault. This value is never encoded in Params
|
// equivalent to [ExecTimeoutDefault].
|
||||||
// because it cannot affect outcome.
|
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
|
|
||||||
// Caller-supplied exclusivity value, returned as is by IsExclusive.
|
// Caller-supplied exclusivity value, returned as is by IsExclusive.
|
||||||
@@ -128,12 +142,6 @@ func (a *execNetArtifact) Checksum() Checksum { return a.checksum }
|
|||||||
// Kind returns the hardcoded [Kind] constant.
|
// Kind returns the hardcoded [Kind] constant.
|
||||||
func (*execNetArtifact) Kind() Kind { return KindExecNet }
|
func (*execNetArtifact) Kind() Kind { return KindExecNet }
|
||||||
|
|
||||||
// Params is [Checksum] concatenated with [KindExec] params.
|
|
||||||
func (a *execNetArtifact) Params(ctx *IContext) {
|
|
||||||
ctx.GetHash().Write(a.checksum[:])
|
|
||||||
a.execArtifact.Params(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cure cures the [Artifact] in the container described by the caller. The
|
// Cure cures the [Artifact] in the container described by the caller. The
|
||||||
// container retains host networking.
|
// container retains host networking.
|
||||||
func (a *execNetArtifact) Cure(f *FContext) error {
|
func (a *execNetArtifact) Cure(f *FContext) error {
|
||||||
@@ -197,38 +205,131 @@ func (*execArtifact) Kind() Kind { return KindExec }
|
|||||||
|
|
||||||
// Params writes paths, executable pathname and args.
|
// Params writes paths, executable pathname and args.
|
||||||
func (a *execArtifact) Params(ctx *IContext) {
|
func (a *execArtifact) Params(ctx *IContext) {
|
||||||
h := ctx.GetHash()
|
ctx.WriteString(a.name)
|
||||||
|
|
||||||
_0, _1 := []byte{0}, []byte{1}
|
ctx.WriteUint32(uint32(len(a.paths)))
|
||||||
for _, p := range a.paths {
|
for _, p := range a.paths {
|
||||||
if p.W {
|
|
||||||
h.Write(_1)
|
|
||||||
} else {
|
|
||||||
h.Write(_0)
|
|
||||||
}
|
|
||||||
if p.P != nil {
|
if p.P != nil {
|
||||||
h.Write([]byte(p.P.String()))
|
ctx.WriteString(p.P.String())
|
||||||
} else {
|
} else {
|
||||||
h.Write([]byte("invalid P\x00"))
|
ctx.WriteString("invalid P\x00")
|
||||||
}
|
}
|
||||||
h.Write(_0)
|
|
||||||
|
ctx.WriteUint32(uint32(len(p.A)))
|
||||||
for _, d := range p.A {
|
for _, d := range p.A {
|
||||||
ctx.WriteIdent(d)
|
ctx.WriteIdent(d)
|
||||||
}
|
}
|
||||||
h.Write(_0)
|
|
||||||
|
if p.W {
|
||||||
|
ctx.WriteUint32(1)
|
||||||
|
} else {
|
||||||
|
ctx.WriteUint32(0)
|
||||||
}
|
}
|
||||||
h.Write(_0)
|
}
|
||||||
h.Write([]byte(a.dir.String()))
|
|
||||||
h.Write(_0)
|
ctx.WriteString(a.dir.String())
|
||||||
|
|
||||||
|
ctx.WriteUint32(uint32(len(a.env)))
|
||||||
for _, e := range a.env {
|
for _, e := range a.env {
|
||||||
h.Write([]byte(e))
|
ctx.WriteString(e)
|
||||||
}
|
}
|
||||||
h.Write(_0)
|
|
||||||
h.Write([]byte(a.path.String()))
|
ctx.WriteString(a.path.String())
|
||||||
h.Write(_0)
|
|
||||||
|
ctx.WriteUint32(uint32(len(a.args)))
|
||||||
for _, arg := range a.args {
|
for _, arg := range a.args {
|
||||||
h.Write([]byte(arg))
|
ctx.WriteString(arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx.WriteUint32(uint32(a.timeout & 0xffffffff))
|
||||||
|
ctx.WriteUint32(uint32(a.timeout >> 32))
|
||||||
|
|
||||||
|
if a.exclusive {
|
||||||
|
ctx.WriteUint32(1)
|
||||||
|
} else {
|
||||||
|
ctx.WriteUint32(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// readExecArtifact interprets IR values and returns the address of execArtifact
|
||||||
|
// or execNetArtifact.
|
||||||
|
func readExecArtifact(r *IRReader, net bool) Artifact {
|
||||||
|
r.DiscardAll()
|
||||||
|
|
||||||
|
name := r.ReadString()
|
||||||
|
|
||||||
|
sz := r.ReadUint32()
|
||||||
|
if sz > irMaxDeps {
|
||||||
|
panic(ErrIRDepend)
|
||||||
|
}
|
||||||
|
paths := make([]ExecPath, sz)
|
||||||
|
for i := range paths {
|
||||||
|
paths[i].P = check.MustAbs(r.ReadString())
|
||||||
|
|
||||||
|
sz = r.ReadUint32()
|
||||||
|
if sz > irMaxDeps {
|
||||||
|
panic(ErrIRDepend)
|
||||||
|
}
|
||||||
|
paths[i].A = make([]Artifact, sz)
|
||||||
|
for j := range paths[i].A {
|
||||||
|
paths[i].A[j] = r.ReadIdent()
|
||||||
|
}
|
||||||
|
|
||||||
|
paths[i].W = r.ReadUint32() != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := check.MustAbs(r.ReadString())
|
||||||
|
|
||||||
|
sz = r.ReadUint32()
|
||||||
|
if sz > irMaxValues {
|
||||||
|
panic(ErrIRValues)
|
||||||
|
}
|
||||||
|
env := make([]string, sz)
|
||||||
|
for i := range env {
|
||||||
|
env[i] = r.ReadString()
|
||||||
|
}
|
||||||
|
|
||||||
|
pathname := check.MustAbs(r.ReadString())
|
||||||
|
|
||||||
|
sz = r.ReadUint32()
|
||||||
|
if sz > irMaxValues {
|
||||||
|
panic(ErrIRValues)
|
||||||
|
}
|
||||||
|
args := make([]string, sz)
|
||||||
|
for i := range args {
|
||||||
|
args[i] = r.ReadString()
|
||||||
|
}
|
||||||
|
|
||||||
|
timeout := time.Duration(r.ReadUint32())
|
||||||
|
timeout |= time.Duration(r.ReadUint32()) << 32
|
||||||
|
|
||||||
|
exclusive := r.ReadUint32() != 0
|
||||||
|
|
||||||
|
checksum, ok := r.Finalise()
|
||||||
|
|
||||||
|
var checksumP *Checksum
|
||||||
|
if net {
|
||||||
|
if !ok {
|
||||||
|
panic(ErrExpectedChecksum)
|
||||||
|
}
|
||||||
|
checksumVal := checksum.Value()
|
||||||
|
checksumP = &checksumVal
|
||||||
|
} else {
|
||||||
|
if ok {
|
||||||
|
panic(ErrUnexpectedChecksum)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewExec(
|
||||||
|
name, checksumP, timeout, exclusive, dir, env, pathname, args, paths...,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
register(KindExec,
|
||||||
|
func(r *IRReader) Artifact { return readExecArtifact(r, false) })
|
||||||
|
register(KindExecNet,
|
||||||
|
func(r *IRReader) Artifact { return readExecArtifact(r, true) })
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dependencies returns a slice of all artifacts collected from caller-supplied
|
// Dependencies returns a slice of all artifacts collected from caller-supplied
|
||||||
@@ -260,6 +361,7 @@ const (
|
|||||||
// scanVerbose prefixes program output for a verbose [message.Msg].
|
// scanVerbose prefixes program output for a verbose [message.Msg].
|
||||||
func scanVerbose(
|
func scanVerbose(
|
||||||
msg message.Msg,
|
msg message.Msg,
|
||||||
|
cancel context.CancelFunc,
|
||||||
done chan<- struct{},
|
done chan<- struct{},
|
||||||
prefix string,
|
prefix string,
|
||||||
r io.Reader,
|
r io.Reader,
|
||||||
@@ -274,10 +376,15 @@ func scanVerbose(
|
|||||||
msg.Verbose(prefix, s.Text())
|
msg.Verbose(prefix, s.Text())
|
||||||
}
|
}
|
||||||
if err := s.Err(); err != nil && !errors.Is(err, os.ErrClosed) {
|
if err := s.Err(); err != nil && !errors.Is(err, os.ErrClosed) {
|
||||||
|
cancel()
|
||||||
msg.Verbose("*"+prefix, err)
|
msg.Verbose("*"+prefix, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SeccompPresets is the [seccomp] presets used by exec artifacts.
|
||||||
|
const SeccompPresets = std.PresetStrict &
|
||||||
|
^(std.PresetDenyNS | std.PresetDenyDevel)
|
||||||
|
|
||||||
// cure is like Cure but allows optional host net namespace. This is used for
|
// cure is like Cure but allows optional host net namespace. This is used for
|
||||||
// the [KnownChecksum] variant where networking is allowed.
|
// the [KnownChecksum] variant where networking is allowed.
|
||||||
func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
|
func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
|
||||||
@@ -301,14 +408,22 @@ func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
|
|||||||
|
|
||||||
z := container.New(ctx, f.GetMessage())
|
z := container.New(ctx, f.GetMessage())
|
||||||
z.WaitDelay = execWaitDelay
|
z.WaitDelay = execWaitDelay
|
||||||
z.SeccompPresets |= std.PresetStrict & ^std.PresetDenyNS
|
z.SeccompPresets = SeccompPresets
|
||||||
|
z.SeccompFlags |= seccomp.AllowMultiarch
|
||||||
z.ParentPerm = 0700
|
z.ParentPerm = 0700
|
||||||
z.HostNet = hostNet
|
z.HostNet = hostNet
|
||||||
z.Hostname = "cure"
|
z.Hostname = "cure"
|
||||||
|
z.SchedPolicy = SchedPolicy
|
||||||
if z.HostNet {
|
if z.HostNet {
|
||||||
z.Hostname = "cure-net"
|
z.Hostname = "cure-net"
|
||||||
}
|
}
|
||||||
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
||||||
|
|
||||||
|
var status io.Writer
|
||||||
|
if status, err = f.GetStatusWriter(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if msg := f.GetMessage(); msg.IsVerbose() {
|
if msg := f.GetMessage(); msg.IsVerbose() {
|
||||||
var stdout, stderr io.ReadCloser
|
var stdout, stderr io.ReadCloser
|
||||||
if stdout, err = z.StdoutPipe(); err != nil {
|
if stdout, err = z.StdoutPipe(); err != nil {
|
||||||
@@ -325,10 +440,31 @@ func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
bw := f.cache.getWriter(status)
|
||||||
stdoutDone, stderrDone := make(chan struct{}), make(chan struct{})
|
stdoutDone, stderrDone := make(chan struct{}), make(chan struct{})
|
||||||
go scanVerbose(msg, stdoutDone, "("+a.name+":1)", stdout)
|
go scanVerbose(
|
||||||
go scanVerbose(msg, stderrDone, "("+a.name+":2)", stderr)
|
msg, cancel, stdoutDone,
|
||||||
defer func() { <-stdoutDone; <-stderrDone }()
|
"("+a.name+":1)",
|
||||||
|
io.TeeReader(stdout, bw),
|
||||||
|
)
|
||||||
|
go scanVerbose(
|
||||||
|
msg, cancel, stderrDone,
|
||||||
|
"("+a.name+":2)",
|
||||||
|
io.TeeReader(stderr, bw),
|
||||||
|
)
|
||||||
|
defer func() {
|
||||||
|
<-stdoutDone
|
||||||
|
<-stderrDone
|
||||||
|
|
||||||
|
flushErr := bw.Flush()
|
||||||
|
if err == nil {
|
||||||
|
err = flushErr
|
||||||
|
}
|
||||||
|
f.cache.putWriter(bw)
|
||||||
|
|
||||||
|
}()
|
||||||
|
} else {
|
||||||
|
z.Stdout, z.Stderr = status, status
|
||||||
}
|
}
|
||||||
|
|
||||||
z.Dir, z.Env, z.Path, z.Args = a.dir, a.env, a.path, a.args
|
z.Dir, z.Env, z.Path, z.Args = a.dir, a.env, a.path, a.args
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ func TestExec(t *testing.T) {
|
|||||||
), nil, pkg.Checksum{}, &pkg.DependencyCureError{
|
), nil, pkg.Checksum{}, &pkg.DependencyCureError{
|
||||||
{
|
{
|
||||||
Ident: unique.Make(pkg.ID(pkg.MustDecode(
|
Ident: unique.Make(pkg.ID(pkg.MustDecode(
|
||||||
"CWEoJqnSBpWf8uryC2qnIe3O1a_FZWUWZGbiVPsQFGW7pvDHiSwoK3QCU9-uxN87",
|
"Sowo6oZRmG6xVtUaxB6bDWZhVsqAJsIJWUp0OPKlE103cY0lodx7dem8J-qQF0Z1",
|
||||||
))),
|
))),
|
||||||
Err: stub.UniqueError(0xcafe),
|
Err: stub.UniqueError(0xcafe),
|
||||||
},
|
},
|
||||||
@@ -109,7 +109,7 @@ func TestExec(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
testtoolDestroy(t, base, c)
|
testtoolDestroy(t, base, c)
|
||||||
}, pkg.MustDecode("UiV6kMz7KrTsc_yphiyQzFLqjRanHxUOwrBMtkKuWo4mOO6WgPFAcoUEeSp7eVIW")},
|
}, pkg.MustDecode("Q5DluWQCAeohLoiGRImurwFp3vdz9IfQCoj7Fuhh73s4KQPRHpEQEnHTdNHmB8Fx")},
|
||||||
|
|
||||||
{"net", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
{"net", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
c.SetStrict(true)
|
c.SetStrict(true)
|
||||||
@@ -144,7 +144,7 @@ func TestExec(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
testtoolDestroy(t, base, c)
|
testtoolDestroy(t, base, c)
|
||||||
}, pkg.MustDecode("ek4K-0d4iRSArkY2TCs3WK34DbiYeOmhE_4vsJTSu_6roY4ZF3YG6eKRooal-i1o")},
|
}, pkg.MustDecode("bPYvvqxpfV7xcC1EptqyKNK1klLJgYHMDkzBcoOyK6j_Aj5hb0mXNPwTwPSK5F6Z")},
|
||||||
|
|
||||||
{"overlay root", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
{"overlay root", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
c.SetStrict(true)
|
c.SetStrict(true)
|
||||||
@@ -170,7 +170,7 @@ func TestExec(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
testtoolDestroy(t, base, c)
|
testtoolDestroy(t, base, c)
|
||||||
}, pkg.MustDecode("VIqqpf0ip9jcyw63i6E8lCMGUcLivQBe4Bevt3WusNac-1MSy5bzB647qGUBzl-W")},
|
}, pkg.MustDecode("PO2DSSCa4yoSgEYRcCSZfQfwow1yRigL3Ry-hI0RDI4aGuFBha-EfXeSJnG_5_Rl")},
|
||||||
|
|
||||||
{"overlay work", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
{"overlay work", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
c.SetStrict(true)
|
c.SetStrict(true)
|
||||||
@@ -201,7 +201,7 @@ func TestExec(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
testtoolDestroy(t, base, c)
|
testtoolDestroy(t, base, c)
|
||||||
}, pkg.MustDecode("q8x2zQg4YZbKpPqKlEBj_uxXD9vOBaZ852qOuIsl9QdO73I_UMNpuUoPLtunxUYl")},
|
}, pkg.MustDecode("iaRt6l_Wm2n-h5UsDewZxQkCmjZjyL8r7wv32QT2kyV55-Lx09Dq4gfg9BiwPnKs")},
|
||||||
|
|
||||||
{"multiple layers", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
{"multiple layers", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
c.SetStrict(true)
|
c.SetStrict(true)
|
||||||
@@ -254,7 +254,7 @@ func TestExec(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
testtoolDestroy(t, base, c)
|
testtoolDestroy(t, base, c)
|
||||||
}, pkg.MustDecode("SITnQ6PTV12PAQQjIuLUxkvsXQiC9Gq_HJQlcb4BPL5YnRHnx8lsW7PRM9YMLBsx")},
|
}, pkg.MustDecode("O2YzyR7IUGU5J2CADy0hUZ3A5NkP_Vwzs4UadEdn2oMZZVWRtH0xZGJ3HXiimTnZ")},
|
||||||
|
|
||||||
{"overlay layer promotion", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
{"overlay layer promotion", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
c.SetStrict(true)
|
c.SetStrict(true)
|
||||||
@@ -286,7 +286,7 @@ func TestExec(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
testtoolDestroy(t, base, c)
|
testtoolDestroy(t, base, c)
|
||||||
}, pkg.MustDecode("fuC20BhMKr86TYzNPP2A-9P7mGLvdcOiG10exlhRvZm8ySI7csf0LhW3im_26l1N")},
|
}, pkg.MustDecode("3EaW6WibLi9gl03_UieiFPaFcPy5p4x3JPxrnLJxGaTI-bh3HU9DK9IMx7c3rrNm")},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -25,6 +25,12 @@ var _ KnownChecksum = new(fileArtifactNamed)
|
|||||||
// String returns the caller-supplied reporting name.
|
// String returns the caller-supplied reporting name.
|
||||||
func (a *fileArtifactNamed) String() string { return a.name }
|
func (a *fileArtifactNamed) String() string { return a.name }
|
||||||
|
|
||||||
|
// Params writes the caller-supplied reporting name and the file body.
|
||||||
|
func (a *fileArtifactNamed) Params(ctx *IContext) {
|
||||||
|
ctx.WriteString(a.name)
|
||||||
|
ctx.Write(a.fileArtifact)
|
||||||
|
}
|
||||||
|
|
||||||
// NewFile returns a [FileArtifact] that cures into a caller-supplied byte slice.
|
// NewFile returns a [FileArtifact] that cures into a caller-supplied byte slice.
|
||||||
//
|
//
|
||||||
// Caller must not modify data after NewFile returns.
|
// Caller must not modify data after NewFile returns.
|
||||||
@@ -39,8 +45,22 @@ func NewFile(name string, data []byte) FileArtifact {
|
|||||||
// Kind returns the hardcoded [Kind] constant.
|
// Kind returns the hardcoded [Kind] constant.
|
||||||
func (*fileArtifact) Kind() Kind { return KindFile }
|
func (*fileArtifact) Kind() Kind { return KindFile }
|
||||||
|
|
||||||
// Params writes the result of Cure.
|
// Params writes an empty string and the file body.
|
||||||
func (a *fileArtifact) Params(ctx *IContext) { ctx.GetHash().Write(*a) }
|
func (a *fileArtifact) Params(ctx *IContext) {
|
||||||
|
ctx.WriteString("")
|
||||||
|
ctx.Write(*a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
register(KindFile, func(r *IRReader) Artifact {
|
||||||
|
name := r.ReadString()
|
||||||
|
data := r.ReadStringBytes()
|
||||||
|
if _, ok := r.Finalise(); !ok {
|
||||||
|
panic(ErrExpectedChecksum)
|
||||||
|
}
|
||||||
|
return NewFile(name, data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Dependencies returns a nil slice.
|
// Dependencies returns a nil slice.
|
||||||
func (*fileArtifact) Dependencies() []Artifact { return nil }
|
func (*fileArtifact) Dependencies() []Artifact { return nil }
|
||||||
|
|||||||
@@ -17,13 +17,13 @@ func TestFile(t *testing.T) {
|
|||||||
cureMany(t, c, []cureStep{
|
cureMany(t, c, []cureStep{
|
||||||
{"short", pkg.NewFile("null", []byte{0}), base.Append(
|
{"short", pkg.NewFile("null", []byte{0}), base.Append(
|
||||||
"identifier",
|
"identifier",
|
||||||
"lIx_W4M7tVOcQ8jh08EJOfXf4brRmkEEjvUa7c17vVUzlmtUxlhhrgqmc9aZhjbn",
|
"3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi",
|
||||||
), pkg.MustDecode(
|
), pkg.MustDecode(
|
||||||
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
|
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
|
||||||
), nil},
|
), nil},
|
||||||
})
|
})
|
||||||
}, pkg.MustDecode(
|
}, pkg.MustDecode(
|
||||||
"hnrfmJtivNKcgtETsKnU9gP_OwPgpNY3DSUJnmxnmeOODSO-YBvEBiTgieY4AAd7",
|
"iR6H5OIsyOW4EwEgtm9rGzGF6DVtyHLySEtwnFE8bnus9VJcoCbR4JIek7Lw-vwT",
|
||||||
)},
|
)},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
769
internal/pkg/ir.go
Normal file
769
internal/pkg/ir.go
Normal file
@@ -0,0 +1,769 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/sha512"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"syscall"
|
||||||
|
"unique"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// wordSize is the boundary which binary segments are always aligned to.
|
||||||
|
const wordSize = 8
|
||||||
|
|
||||||
|
// alignSize returns the padded size for aligning sz.
|
||||||
|
func alignSize(sz int) int {
|
||||||
|
return sz + (wordSize-(sz)%wordSize)%wordSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// panicToError recovers from a panic and replaces a nil error with the panicked
|
||||||
|
// error value. If the value does not implement error, it is re-panicked.
|
||||||
|
func panicToError(errP *error) {
|
||||||
|
r := recover()
|
||||||
|
if r == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err, ok := r.(error); !ok {
|
||||||
|
panic(r)
|
||||||
|
} else if *errP == nil {
|
||||||
|
*errP = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IContext is passed to [Artifact.Params] and provides methods for writing
|
||||||
|
// values to the IR writer. It does not expose the underlying [io.Writer].
|
||||||
|
//
|
||||||
|
// IContext is valid until [Artifact.Params] returns.
|
||||||
|
type IContext struct {
|
||||||
|
// Address of underlying [Cache], should be zeroed or made unusable after
|
||||||
|
// [Artifact.Params] returns and must not be exposed directly.
|
||||||
|
cache *Cache
|
||||||
|
// Written to by various methods, should be zeroed after [Artifact.Params]
|
||||||
|
// returns and must not be exposed directly.
|
||||||
|
w io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap returns the underlying [context.Context].
|
||||||
|
func (i *IContext) Unwrap() context.Context { return i.cache.ctx }
|
||||||
|
|
||||||
|
// irZero is a zero IR word.
|
||||||
|
var irZero [wordSize]byte
|
||||||
|
|
||||||
|
// IRValueKind denotes the kind of encoded value.
|
||||||
|
type IRValueKind uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// IRKindEnd denotes the end of the current parameters stream. The ancillary
|
||||||
|
// value is interpreted as [IREndFlag].
|
||||||
|
IRKindEnd IRValueKind = iota
|
||||||
|
// IRKindIdent denotes the identifier of a dependency [Artifact]. The
|
||||||
|
// ancillary value is reserved for future use.
|
||||||
|
IRKindIdent
|
||||||
|
// IRKindUint32 denotes an inlined uint32 value.
|
||||||
|
IRKindUint32
|
||||||
|
// IRKindString denotes a string with its true length encoded in header
|
||||||
|
// ancillary data. Its wire length is always aligned to 8 byte boundary.
|
||||||
|
IRKindString
|
||||||
|
|
||||||
|
irHeaderShift = 32
|
||||||
|
irHeaderMask = 0xffffffff
|
||||||
|
)
|
||||||
|
|
||||||
|
// String returns a user-facing name of k.
|
||||||
|
func (k IRValueKind) String() string {
|
||||||
|
switch k {
|
||||||
|
case IRKindEnd:
|
||||||
|
return "terminator"
|
||||||
|
case IRKindIdent:
|
||||||
|
return "ident"
|
||||||
|
case IRKindUint32:
|
||||||
|
return "uint32"
|
||||||
|
case IRKindString:
|
||||||
|
return "string"
|
||||||
|
default:
|
||||||
|
return "invalid kind " + strconv.Itoa(int(k))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// irValueHeader encodes [IRValueKind] and a 32-bit ancillary value.
|
||||||
|
type irValueHeader uint64
|
||||||
|
|
||||||
|
// encodeHeader returns irValueHeader encoding [IRValueKind] and ancillary data.
|
||||||
|
func (k IRValueKind) encodeHeader(v uint32) irValueHeader {
|
||||||
|
return irValueHeader(v)<<irHeaderShift | irValueHeader(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// put stores h in b[0:8].
|
||||||
|
func (h irValueHeader) put(b []byte) {
|
||||||
|
binary.LittleEndian.PutUint64(b[:], uint64(h))
|
||||||
|
}
|
||||||
|
|
||||||
|
// append appends the bytes of h to b and returns the appended slice.
|
||||||
|
func (h irValueHeader) append(b []byte) []byte {
|
||||||
|
return binary.LittleEndian.AppendUint64(b, uint64(h))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IREndFlag is ancillary data encoded in the header of an [IRKindEnd] value and
|
||||||
|
// specifies the presence of optional fields in the remaining [IRKindEnd] data.
|
||||||
|
// Order of present fields is the order of their corresponding constants defined
|
||||||
|
// below.
|
||||||
|
type IREndFlag uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// IREndKnownChecksum denotes a [KnownChecksum] artifact. For an [IRKindEnd]
|
||||||
|
// value with this flag set, the remaining data contains the [Checksum].
|
||||||
|
IREndKnownChecksum IREndFlag = 1 << iota
|
||||||
|
)
|
||||||
|
|
||||||
|
// mustWrite writes to IContext.w and panics on error. The panic is recovered
|
||||||
|
// from by the caller and used as the return value.
|
||||||
|
func (i *IContext) mustWrite(p []byte) {
|
||||||
|
if _, err := i.w.Write(p); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteIdent writes the identifier of [Artifact] to the IR. The behaviour of
|
||||||
|
// WriteIdent is not defined for an [Artifact] not part of the slice returned by
|
||||||
|
// [Artifact.Dependencies].
|
||||||
|
func (i *IContext) WriteIdent(a Artifact) {
|
||||||
|
buf := i.cache.getIdentBuf()
|
||||||
|
defer i.cache.putIdentBuf(buf)
|
||||||
|
|
||||||
|
IRKindIdent.encodeHeader(0).put(buf[:])
|
||||||
|
*(*ID)(buf[wordSize:]) = i.cache.Ident(a).Value()
|
||||||
|
i.mustWrite(buf[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteUint32 writes a uint32 value to the IR.
|
||||||
|
func (i *IContext) WriteUint32(v uint32) {
|
||||||
|
i.mustWrite(IRKindUint32.encodeHeader(v).append(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// irMaxStringLength is the maximum acceptable wire size of [IRKindString].
|
||||||
|
const irMaxStringLength = 1 << 24
|
||||||
|
|
||||||
|
// IRStringError is a string value too big to encode in IR.
|
||||||
|
type IRStringError string
|
||||||
|
|
||||||
|
func (IRStringError) Error() string {
|
||||||
|
return "params value too big to encode in IR"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes p as a string value to the IR.
|
||||||
|
func (i *IContext) Write(p []byte) {
|
||||||
|
sz := alignSize(len(p))
|
||||||
|
if len(p) > irMaxStringLength || sz > irMaxStringLength {
|
||||||
|
panic(IRStringError(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
i.mustWrite(IRKindString.encodeHeader(uint32(len(p))).append(nil))
|
||||||
|
i.mustWrite(p)
|
||||||
|
|
||||||
|
psz := sz - len(p)
|
||||||
|
if psz > 0 {
|
||||||
|
i.mustWrite(irZero[:psz])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteString writes s as a string value to the IR.
|
||||||
|
func (i *IContext) WriteString(s string) {
|
||||||
|
p := unsafe.Slice(unsafe.StringData(s), len(s))
|
||||||
|
i.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode writes a deterministic, efficient representation of a to w and returns
|
||||||
|
// the first non-nil error encountered while writing to w.
|
||||||
|
func (c *Cache) Encode(w io.Writer, a Artifact) (err error) {
|
||||||
|
deps := a.Dependencies()
|
||||||
|
idents := make([]*extIdent, len(deps))
|
||||||
|
for i, d := range deps {
|
||||||
|
dbuf, did := c.unsafeIdent(d, true)
|
||||||
|
if dbuf == nil {
|
||||||
|
dbuf = c.getIdentBuf()
|
||||||
|
binary.LittleEndian.PutUint64(dbuf[:], uint64(d.Kind()))
|
||||||
|
*(*ID)(dbuf[wordSize:]) = did.Value()
|
||||||
|
} else {
|
||||||
|
c.storeIdent(d, dbuf)
|
||||||
|
}
|
||||||
|
defer c.putIdentBuf(dbuf)
|
||||||
|
idents[i] = dbuf
|
||||||
|
}
|
||||||
|
slices.SortFunc(idents, func(a, b *extIdent) int {
|
||||||
|
return bytes.Compare(a[:], b[:])
|
||||||
|
})
|
||||||
|
idents = slices.CompactFunc(idents, func(a, b *extIdent) bool {
|
||||||
|
return *a == *b
|
||||||
|
})
|
||||||
|
|
||||||
|
// kind uint64 | deps_sz uint64
|
||||||
|
var buf [wordSize * 2]byte
|
||||||
|
binary.LittleEndian.PutUint64(buf[:], uint64(a.Kind()))
|
||||||
|
binary.LittleEndian.PutUint64(buf[wordSize:], uint64(len(idents)))
|
||||||
|
if _, err = w.Write(buf[:]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, dn := range idents {
|
||||||
|
// kind uint64 | ident ID
|
||||||
|
if _, err = w.Write(dn[:]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func() {
|
||||||
|
i := IContext{c, w}
|
||||||
|
|
||||||
|
defer panicToError(&err)
|
||||||
|
defer func() { i.cache, i.w = nil, nil }()
|
||||||
|
|
||||||
|
a.Params(&i)
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var f IREndFlag
|
||||||
|
kcBuf := c.getIdentBuf()
|
||||||
|
sz := wordSize
|
||||||
|
if kc, ok := a.(KnownChecksum); ok {
|
||||||
|
f |= IREndKnownChecksum
|
||||||
|
*(*Checksum)(kcBuf[wordSize:]) = kc.Checksum()
|
||||||
|
sz += len(Checksum{})
|
||||||
|
}
|
||||||
|
IRKindEnd.encodeHeader(uint32(f)).put(kcBuf[:])
|
||||||
|
|
||||||
|
_, err = w.Write(kcBuf[:sz])
|
||||||
|
c.putIdentBuf(kcBuf)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeAll implements EncodeAll by recursively encoding dependencies and
|
||||||
|
// performs deduplication by value via the encoded map.
|
||||||
|
func (c *Cache) encodeAll(
|
||||||
|
w io.Writer,
|
||||||
|
a Artifact,
|
||||||
|
encoded map[Artifact]struct{},
|
||||||
|
) (err error) {
|
||||||
|
if _, ok := encoded[a]; ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range a.Dependencies() {
|
||||||
|
if err = c.encodeAll(w, d, encoded); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
encoded[a] = struct{}{}
|
||||||
|
return c.Encode(w, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeAll writes a self-describing IR stream of a to w and returns the first
|
||||||
|
// non-nil error encountered while writing to w.
|
||||||
|
//
|
||||||
|
// EncodeAll tries to avoid encoding the same [Artifact] more than once, however
|
||||||
|
// it will fail to do so if they do not compare equal by value, as that will
|
||||||
|
// require buffering and greatly reduce performance. It is therefore up to the
|
||||||
|
// caller to avoid causing dependencies to be represented in a way such that
|
||||||
|
// two equivalent artifacts do not compare equal. While an IR stream with
|
||||||
|
// repeated artifacts is valid, it is somewhat inefficient, and the reference
|
||||||
|
// [IRDecoder] implementation produces a warning for it.
|
||||||
|
//
|
||||||
|
// Note that while EncodeAll makes use of the ident free list, it does not use
|
||||||
|
// the ident cache, nor does it contribute identifiers it computes back to the
|
||||||
|
// ident cache. Because of this, multiple invocations of EncodeAll will have
|
||||||
|
// similar cost and does not amortise when combined with a call to Cure.
|
||||||
|
func (c *Cache) EncodeAll(w io.Writer, a Artifact) error {
|
||||||
|
return c.encodeAll(w, a, make(map[Artifact]struct{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrRemainingIR is returned for a [IRReadFunc] that failed to call
|
||||||
|
// [IRReader.Finalise] before returning.
|
||||||
|
var ErrRemainingIR = errors.New("implementation did not consume final value")
|
||||||
|
|
||||||
|
// DanglingIdentError is an identifier in a [IRKindIdent] value that was never
|
||||||
|
// described in the IR stream before it was encountered.
|
||||||
|
type DanglingIdentError unique.Handle[ID]
|
||||||
|
|
||||||
|
func (e DanglingIdentError) Error() string {
|
||||||
|
return "artifact " + Encode(unique.Handle[ID](e).Value()) +
|
||||||
|
" was never described"
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// IRDecoder decodes [Artifact] from an IR stream. The stream is read to
|
||||||
|
// EOF and the final [Artifact] is returned. Previous artifacts may be
|
||||||
|
// looked up by their identifier.
|
||||||
|
//
|
||||||
|
// An [Artifact] may appear more than once in the same IR stream. A
|
||||||
|
// repeating [Artifact] generates a warning via [Cache] and will appear if
|
||||||
|
// verbose logging is enabled. Artifacts may only depend on artifacts
|
||||||
|
// previously described in the IR stream.
|
||||||
|
//
|
||||||
|
// IRDecoder rejects an IR stream on the first decoding error, it does not
|
||||||
|
// check against nonzero reserved ancillary data or incorrectly ordered or
|
||||||
|
// redundant unstructured dependencies. An invalid IR stream as such will
|
||||||
|
// yield [Artifact] values with identifiers disagreeing with those computed
|
||||||
|
// by IRDecoder. For this reason, IRDecoder does not access the ident cache
|
||||||
|
// to avoid putting [Cache] into an inconsistent state.
|
||||||
|
//
|
||||||
|
// Methods of IRDecoder are not safe for concurrent use.
|
||||||
|
IRDecoder struct {
|
||||||
|
// Address of underlying [Cache], must not be exposed directly.
|
||||||
|
c *Cache
|
||||||
|
|
||||||
|
// Underlying IR reader. Methods of [IRReader] must not use this as it
|
||||||
|
// bypasses ident measurement.
|
||||||
|
r io.Reader
|
||||||
|
// Artifacts already seen in the IR stream.
|
||||||
|
ident map[unique.Handle[ID]]Artifact
|
||||||
|
|
||||||
|
// Whether Decode returned, and the entire IR stream was decoded.
|
||||||
|
done, ok bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// IRReader provides methods to decode the IR wire format and read values
|
||||||
|
// from the reader embedded in the underlying [IRDecoder]. It is
|
||||||
|
// deliberately impossible to obtain the [IRValueKind] of the next value,
|
||||||
|
// and callers must never recover from panics in any read method.
|
||||||
|
//
|
||||||
|
// It is the responsibility of the caller to call Finalise after all IR
|
||||||
|
// values have been read. Failure to call Finalise causes the resulting
|
||||||
|
// [Artifact] to be rejected with [ErrRemainingIR].
|
||||||
|
//
|
||||||
|
// For an [Artifact] expected to have dependencies, the caller must consume
|
||||||
|
// all dependencies by calling Next until all dependencies are depleted, or
|
||||||
|
// call DiscardAll to explicitly discard them and rely on values encoded as
|
||||||
|
// [IRKindIdent] instead. Failure to consume all unstructured dependencies
|
||||||
|
// causes the resulting [Artifact] to be rejected with [MissedDependencyError].
|
||||||
|
//
|
||||||
|
// Requesting the value of an unstructured dependency not yet described in
|
||||||
|
// the IR stream via Next, or reading an [IRKindIdent] value not part of
|
||||||
|
// unstructured dependencies via ReadIdent may cause the resulting
|
||||||
|
// [Artifact] to be rejected with [DanglingIdentError], however either
|
||||||
|
// method may return a non-nil [Artifact] implementation of unspecified
|
||||||
|
// value.
|
||||||
|
IRReader struct {
|
||||||
|
// Address of underlying [IRDecoder], should be zeroed or made unusable
|
||||||
|
// after finalisation and must not be exposed directly.
|
||||||
|
d *IRDecoder
|
||||||
|
// Common buffer for word-sized reads.
|
||||||
|
buf [wordSize]byte
|
||||||
|
|
||||||
|
// Dependencies sent before params, sorted by identifier. Resliced on
|
||||||
|
// each call to Next and checked to be depleted during Finalise.
|
||||||
|
deps []*extIdent
|
||||||
|
|
||||||
|
// Number of values already read, -1 denotes a finalised IRReader.
|
||||||
|
count int
|
||||||
|
// Header of value currently being read.
|
||||||
|
h irValueHeader
|
||||||
|
|
||||||
|
// Measured IR reader. All reads for the current [Artifact] must go
|
||||||
|
// through this to produce a correct ident.
|
||||||
|
r io.Reader
|
||||||
|
// Buffers measure writes. Flushed and returned to d during Finalise.
|
||||||
|
ibw *bufio.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// IRReadFunc reads IR values written by [Artifact.Params] to produce an
|
||||||
|
// instance of [Artifact] identical to the one to produce these values.
|
||||||
|
IRReadFunc func(r *IRReader) Artifact
|
||||||
|
)
|
||||||
|
|
||||||
|
// kind returns the [IRValueKind] encoded in h.
|
||||||
|
func (h irValueHeader) kind() IRValueKind {
|
||||||
|
return IRValueKind(h & irHeaderMask)
|
||||||
|
}
|
||||||
|
|
||||||
|
// value returns ancillary data encoded in h.
|
||||||
|
func (h irValueHeader) value() uint32 {
|
||||||
|
return uint32(h >> irHeaderShift)
|
||||||
|
}
|
||||||
|
|
||||||
|
// irArtifact refers to artifact IR interpretation functions and must not be
|
||||||
|
// written to directly.
|
||||||
|
var irArtifact = make(map[Kind]IRReadFunc)
|
||||||
|
|
||||||
|
// InvalidKindError is an unregistered [Kind] value.
|
||||||
|
type InvalidKindError Kind
|
||||||
|
|
||||||
|
func (e InvalidKindError) Error() string {
|
||||||
|
return "invalid artifact kind " + strconv.Itoa(int(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
// register records the [IRReadFunc] of an implementation of [Artifact] under
|
||||||
|
// the specified [Kind]. Expecting to be used only during initialization, it
|
||||||
|
// panics if the mapping between [Kind] and [IRReadFunc] is not a bijection.
|
||||||
|
//
|
||||||
|
// register is not safe for concurrent use. register must not be called after
|
||||||
|
// the first instance of [Cache] has been opened.
|
||||||
|
func register(k Kind, f IRReadFunc) {
|
||||||
|
if _, ok := irArtifact[k]; ok {
|
||||||
|
panic("attempting to register " + strconv.Itoa(int(k)) + " twice")
|
||||||
|
}
|
||||||
|
irArtifact[k] = f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register records the [IRReadFunc] of a custom implementation of [Artifact]
|
||||||
|
// under the specified [Kind]. Expecting to be used only during initialization,
|
||||||
|
// it panics if the mapping between [Kind] and [IRReadFunc] is not a bijection,
|
||||||
|
// or the specified [Kind] is below [KindCustomOffset].
|
||||||
|
//
|
||||||
|
// Register is not safe for concurrent use. Register must not be called after
|
||||||
|
// the first instance of [Cache] has been opened.
|
||||||
|
func Register(k Kind, f IRReadFunc) {
|
||||||
|
if k < KindCustomOffset {
|
||||||
|
panic("attempting to register within internal kind range")
|
||||||
|
}
|
||||||
|
register(k, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a new [IRDecoder] that reads from the [io.Reader].
|
||||||
|
func (c *Cache) NewDecoder(r io.Reader) *IRDecoder {
|
||||||
|
return &IRDecoder{c, r, make(map[unique.Handle[ID]]Artifact), false, false}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// irMaxValues is the arbitrary maximum number of values allowed to be
|
||||||
|
// written by [Artifact.Params] and subsequently read via [IRReader].
|
||||||
|
irMaxValues = 1 << 12
|
||||||
|
|
||||||
|
// irMaxDeps is the arbitrary maximum number of direct dependencies allowed
|
||||||
|
// to be returned by [Artifact.Dependencies] and subsequently decoded by
|
||||||
|
// [IRDecoder].
|
||||||
|
irMaxDeps = 1 << 10
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrIRValues is returned for an [Artifact] with too many parameter values.
|
||||||
|
ErrIRValues = errors.New("artifact has too many IR parameter values")
|
||||||
|
|
||||||
|
// ErrIRDepend is returned for an [Artifact] with too many dependencies.
|
||||||
|
ErrIRDepend = errors.New("artifact has too many dependencies")
|
||||||
|
|
||||||
|
// ErrAlreadyFinalised is returned when attempting to use an [IRReader] that
|
||||||
|
// has already been finalised.
|
||||||
|
ErrAlreadyFinalised = errors.New("reader has already finalised")
|
||||||
|
)
|
||||||
|
|
||||||
|
// enterReader panics with an appropriate error for an out-of-bounds count and
|
||||||
|
// must be called at some point in any exported method.
|
||||||
|
func (ir *IRReader) enterReader(read bool) {
|
||||||
|
if ir.count < 0 {
|
||||||
|
panic(ErrAlreadyFinalised)
|
||||||
|
}
|
||||||
|
if ir.count >= irMaxValues {
|
||||||
|
panic(ErrIRValues)
|
||||||
|
}
|
||||||
|
|
||||||
|
if read {
|
||||||
|
ir.count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IRKindError describes an attempt to read an IR value of unexpected kind.
|
||||||
|
type IRKindError struct {
|
||||||
|
Got, Want IRValueKind
|
||||||
|
Ancillary uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *IRKindError) Error() string {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"got %s IR value (%#x) instead of %s",
|
||||||
|
e.Got, e.Ancillary, e.Want,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// readFull reads until either p is filled or an error is encountered.
|
||||||
|
func (ir *IRReader) readFull(p []byte) (n int, err error) {
|
||||||
|
for n < len(p) && err == nil {
|
||||||
|
var nn int
|
||||||
|
nn, err = ir.r.Read(p[n:])
|
||||||
|
n += nn
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustRead reads from the underlying measured reader and panics on error. If
|
||||||
|
// an [io.EOF] is encountered and n != len(p), the error is promoted to a
|
||||||
|
// [io.ErrUnexpectedEOF], if n == 0, [io.EOF] is kept as is, otherwise it is
|
||||||
|
// zeroed.
|
||||||
|
func (ir *IRReader) mustRead(p []byte) {
|
||||||
|
n, err := ir.readFull(p)
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
if n == len(p) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustReadHeader reads the next header via d and checks its kind.
|
||||||
|
func (ir *IRReader) mustReadHeader(k IRValueKind) {
|
||||||
|
ir.mustRead(ir.buf[:])
|
||||||
|
ir.h = irValueHeader(binary.LittleEndian.Uint64(ir.buf[:]))
|
||||||
|
if wk := ir.h.kind(); wk != k {
|
||||||
|
panic(&IRKindError{wk, k, ir.h.value()})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// putAll returns all dependency buffers to the underlying [Cache].
|
||||||
|
func (ir *IRReader) putAll() {
|
||||||
|
for _, buf := range ir.deps {
|
||||||
|
ir.d.c.putIdentBuf(buf)
|
||||||
|
}
|
||||||
|
ir.deps = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiscardAll discards all unstructured dependencies. This is useful to
|
||||||
|
// implementations that encode dependencies as [IRKindIdent] which are read back
|
||||||
|
// via ReadIdent.
|
||||||
|
func (ir *IRReader) DiscardAll() {
|
||||||
|
if ir.deps == nil {
|
||||||
|
panic("attempting to discard dependencies twice")
|
||||||
|
}
|
||||||
|
ir.putAll()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrDependencyDepleted is returned when attempting to advance to the next
|
||||||
|
// unstructured dependency when there are none left.
|
||||||
|
var ErrDependencyDepleted = errors.New("reading past end of dependencies")
|
||||||
|
|
||||||
|
// Next returns the next unstructured dependency.
|
||||||
|
func (ir *IRReader) Next() Artifact {
|
||||||
|
if len(ir.deps) == 0 {
|
||||||
|
panic(ErrDependencyDepleted)
|
||||||
|
}
|
||||||
|
|
||||||
|
id := unique.Make(ID(ir.deps[0][wordSize:]))
|
||||||
|
ir.d.c.putIdentBuf(ir.deps[0])
|
||||||
|
ir.deps = ir.deps[1:]
|
||||||
|
|
||||||
|
if a, ok := ir.d.ident[id]; !ok {
|
||||||
|
ir.putAll()
|
||||||
|
panic(DanglingIdentError(id))
|
||||||
|
} else {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MissedDependencyError is the number of unstructured dependencies remaining
|
||||||
|
// in [IRReader] that was never requested or explicitly discarded before
|
||||||
|
// finalisation.
|
||||||
|
type MissedDependencyError int
|
||||||
|
|
||||||
|
func (e MissedDependencyError) Error() string {
|
||||||
|
return "missed " + strconv.Itoa(int(e)) + " unstructured dependencies"
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrUnexpectedChecksum is returned by a [IRReadFunc] that does not expect
|
||||||
|
// a checksum but received one in [IRKindEnd] anyway.
|
||||||
|
ErrUnexpectedChecksum = errors.New("checksum specified on unsupported artifact")
|
||||||
|
// ErrExpectedChecksum is returned by a [IRReadFunc] that expects a checksum
|
||||||
|
// but did not receive one in [IRKindEnd].
|
||||||
|
ErrExpectedChecksum = errors.New("checksum required but not specified")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Finalise reads the final [IRKindEnd] value and marks r as finalised. Methods
|
||||||
|
// of r are invalid upon entry into Finalise. If a [Checksum] is available via
|
||||||
|
// [IREndKnownChecksum], its handle is returned and the caller must store its
|
||||||
|
// value in the resulting [Artifact].
|
||||||
|
func (ir *IRReader) Finalise() (checksum unique.Handle[Checksum], ok bool) {
|
||||||
|
ir.enterReader(true)
|
||||||
|
ir.count = -1
|
||||||
|
|
||||||
|
ir.mustReadHeader(IRKindEnd)
|
||||||
|
f := IREndFlag(ir.h.value())
|
||||||
|
|
||||||
|
if f&IREndKnownChecksum != 0 {
|
||||||
|
buf := ir.d.c.getIdentBuf()
|
||||||
|
defer ir.d.c.putIdentBuf(buf)
|
||||||
|
|
||||||
|
ir.mustRead(buf[wordSize:])
|
||||||
|
checksum = unique.Make(Checksum(buf[wordSize:]))
|
||||||
|
ok = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ir.ibw.Flush(); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
ir.r, ir.ibw = nil, nil
|
||||||
|
|
||||||
|
if len(ir.deps) != 0 {
|
||||||
|
panic(MissedDependencyError(len(ir.deps)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadIdent reads the next value as [IRKindIdent].
|
||||||
|
func (ir *IRReader) ReadIdent() Artifact {
|
||||||
|
ir.enterReader(true)
|
||||||
|
ir.mustReadHeader(IRKindIdent)
|
||||||
|
|
||||||
|
buf := ir.d.c.getIdentBuf()
|
||||||
|
defer ir.d.c.putIdentBuf(buf)
|
||||||
|
|
||||||
|
ir.mustRead(buf[wordSize:])
|
||||||
|
id := unique.Make(ID(buf[wordSize:]))
|
||||||
|
|
||||||
|
if a, ok := ir.d.ident[id]; !ok {
|
||||||
|
panic(DanglingIdentError(id))
|
||||||
|
} else {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadUint32 reads the next value as [IRKindUint32].
|
||||||
|
func (ir *IRReader) ReadUint32() uint32 {
|
||||||
|
ir.enterReader(true)
|
||||||
|
ir.mustReadHeader(IRKindUint32)
|
||||||
|
return ir.h.value()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadStringBytes reads the next value as [IRKindString] but returns it as a
|
||||||
|
// byte slice instead.
|
||||||
|
func (ir *IRReader) ReadStringBytes() []byte {
|
||||||
|
ir.enterReader(true)
|
||||||
|
ir.mustReadHeader(IRKindString)
|
||||||
|
|
||||||
|
sz := int(ir.h.value())
|
||||||
|
szWire := alignSize(sz)
|
||||||
|
if szWire > irMaxStringLength {
|
||||||
|
panic(IRStringError("\x00"))
|
||||||
|
}
|
||||||
|
|
||||||
|
p := make([]byte, szWire)
|
||||||
|
ir.mustRead(p)
|
||||||
|
return p[:sz]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadString reads the next value as [IRKindString].
|
||||||
|
func (ir *IRReader) ReadString() string {
|
||||||
|
p := ir.ReadStringBytes()
|
||||||
|
return unsafe.String(unsafe.SliceData(p), len(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode decodes the next [Artifact] in the IR stream and returns any buffer
|
||||||
|
// originating from [Cache] before returning. decode returns [io.EOF] if and
|
||||||
|
// only if the underlying [io.Reader] is already read to EOF.
|
||||||
|
func (d *IRDecoder) decode() (a Artifact, err error) {
|
||||||
|
defer panicToError(&err)
|
||||||
|
var ir IRReader
|
||||||
|
|
||||||
|
defer func() { ir.d = nil }()
|
||||||
|
ir.d = d
|
||||||
|
|
||||||
|
h := sha512.New384()
|
||||||
|
ir.ibw = d.c.getWriter(h)
|
||||||
|
defer d.c.putWriter(ir.ibw)
|
||||||
|
ir.r = io.TeeReader(d.r, ir.ibw)
|
||||||
|
|
||||||
|
if n, _err := ir.readFull(ir.buf[:]); _err != nil {
|
||||||
|
if errors.Is(_err, io.EOF) {
|
||||||
|
if n != 0 {
|
||||||
|
_err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = _err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ak := Kind(binary.LittleEndian.Uint64(ir.buf[:]))
|
||||||
|
f, ok := irArtifact[ak]
|
||||||
|
if !ok {
|
||||||
|
err = InvalidKindError(ak)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer ir.putAll()
|
||||||
|
ir.mustRead(ir.buf[:])
|
||||||
|
sz := binary.LittleEndian.Uint64(ir.buf[:])
|
||||||
|
if sz > irMaxDeps {
|
||||||
|
err = ErrIRDepend
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ir.deps = make([]*extIdent, sz)
|
||||||
|
for i := range ir.deps {
|
||||||
|
ir.deps[i] = d.c.getIdentBuf()
|
||||||
|
}
|
||||||
|
for _, buf := range ir.deps {
|
||||||
|
ir.mustRead(buf[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
a = f(&ir)
|
||||||
|
if a == nil {
|
||||||
|
err = syscall.ENOTRECOVERABLE
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ir.count != -1 {
|
||||||
|
err = ErrRemainingIR
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := d.c.getIdentBuf()
|
||||||
|
h.Sum(buf[wordSize:wordSize])
|
||||||
|
id := unique.Make(ID(buf[wordSize:]))
|
||||||
|
d.c.putIdentBuf(buf)
|
||||||
|
if _, ok = d.ident[id]; !ok {
|
||||||
|
d.ident[id] = a
|
||||||
|
} else {
|
||||||
|
d.c.msg.Verbosef(
|
||||||
|
"artifact %s appeared more than once in IR stream",
|
||||||
|
Encode(id.Value()),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode consumes the IR stream to EOF and returns the final [Artifact]. After
|
||||||
|
// Decode returns, Lookup is available and Decode must not be called again.
|
||||||
|
func (d *IRDecoder) Decode() (a Artifact, err error) {
|
||||||
|
if d.done {
|
||||||
|
panic("attempting to decode an IR stream twice")
|
||||||
|
}
|
||||||
|
defer func() { d.done = true }()
|
||||||
|
|
||||||
|
var cur Artifact
|
||||||
|
next:
|
||||||
|
a, err = d.decode()
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
cur = a
|
||||||
|
goto next
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
a, err = cur, nil
|
||||||
|
d.ok = true
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup looks up an [Artifact] described by the IR stream by its identifier.
|
||||||
|
func (d *IRDecoder) Lookup(id unique.Handle[ID]) (a Artifact, ok bool) {
|
||||||
|
if !d.ok {
|
||||||
|
panic("attempting to look up artifact without full IR stream")
|
||||||
|
}
|
||||||
|
a, ok = d.ident[id]
|
||||||
|
return
|
||||||
|
}
|
||||||
114
internal/pkg/ir_test.go
Normal file
114
internal/pkg/ir_test.go
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
package pkg_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIRRoundtrip(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
a pkg.Artifact
|
||||||
|
}{
|
||||||
|
{"http get aligned", pkg.NewHTTPGet(
|
||||||
|
nil, "file:///testdata",
|
||||||
|
pkg.Checksum(bytes.Repeat([]byte{0xfd}, len(pkg.Checksum{}))),
|
||||||
|
)},
|
||||||
|
{"http get unaligned", pkg.NewHTTPGet(
|
||||||
|
nil, "https://hakurei.app",
|
||||||
|
pkg.Checksum(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))),
|
||||||
|
)},
|
||||||
|
|
||||||
|
{"http get tar", pkg.NewHTTPGetTar(
|
||||||
|
nil, "file:///testdata",
|
||||||
|
pkg.Checksum(bytes.Repeat([]byte{0xff}, len(pkg.Checksum{}))),
|
||||||
|
pkg.TarBzip2,
|
||||||
|
)},
|
||||||
|
{"http get tar unaligned", pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://hakurei.app",
|
||||||
|
pkg.Checksum(bytes.Repeat([]byte{0xfe}, len(pkg.Checksum{}))),
|
||||||
|
pkg.TarUncompressed,
|
||||||
|
)},
|
||||||
|
|
||||||
|
{"exec offline", pkg.NewExec(
|
||||||
|
"exec-offline", nil, 0, false,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool"},
|
||||||
|
|
||||||
|
pkg.MustPath("/file", false, pkg.NewFile("file", []byte(
|
||||||
|
"stub file",
|
||||||
|
))), pkg.MustPath("/.hakurei", false, pkg.NewHTTPGetTar(
|
||||||
|
nil, "file:///hakurei.tar",
|
||||||
|
pkg.Checksum(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))),
|
||||||
|
pkg.TarUncompressed,
|
||||||
|
)), pkg.MustPath("/opt", false, pkg.NewHTTPGetTar(
|
||||||
|
nil, "file:///testtool.tar.gz",
|
||||||
|
pkg.Checksum(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))),
|
||||||
|
pkg.TarGzip,
|
||||||
|
)),
|
||||||
|
)},
|
||||||
|
|
||||||
|
{"exec net", pkg.NewExec(
|
||||||
|
"exec-net",
|
||||||
|
(*pkg.Checksum)(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))),
|
||||||
|
0, false,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool", "net"},
|
||||||
|
|
||||||
|
pkg.MustPath("/file", false, pkg.NewFile("file", []byte(
|
||||||
|
"stub file",
|
||||||
|
))), pkg.MustPath("/.hakurei", false, pkg.NewHTTPGetTar(
|
||||||
|
nil, "file:///hakurei.tar",
|
||||||
|
pkg.Checksum(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))),
|
||||||
|
pkg.TarUncompressed,
|
||||||
|
)), pkg.MustPath("/opt", false, pkg.NewHTTPGetTar(
|
||||||
|
nil, "file:///testtool.tar.gz",
|
||||||
|
pkg.Checksum(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))),
|
||||||
|
pkg.TarGzip,
|
||||||
|
)),
|
||||||
|
)},
|
||||||
|
|
||||||
|
{"file anonymous", pkg.NewFile("", []byte{0})},
|
||||||
|
{"file", pkg.NewFile("stub", []byte("stub"))},
|
||||||
|
}
|
||||||
|
testCasesCache := make([]cacheTestCase, len(testCases))
|
||||||
|
for i, tc := range testCases {
|
||||||
|
want := tc.a
|
||||||
|
testCasesCache[i] = cacheTestCase{tc.name, nil,
|
||||||
|
func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
r, w := io.Pipe()
|
||||||
|
|
||||||
|
done := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
t.Helper()
|
||||||
|
done <- c.EncodeAll(w, want)
|
||||||
|
_ = w.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
if got, err := c.NewDecoder(r).Decode(); err != nil {
|
||||||
|
t.Fatalf("Decode: error = %v", err)
|
||||||
|
} else if !reflect.DeepEqual(got, want) {
|
||||||
|
t.Fatalf("Decode: %#v, want %#v", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := <-done; err != nil {
|
||||||
|
t.Fatalf("EncodeAll: error = %v", err)
|
||||||
|
}
|
||||||
|
}, pkg.MustDecode(
|
||||||
|
"E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
checkWithCache(t, testCasesCache)
|
||||||
|
}
|
||||||
@@ -19,8 +19,8 @@ type httpArtifact struct {
|
|||||||
// closing the [io.ReadCloser] returned by Cure.
|
// closing the [io.ReadCloser] returned by Cure.
|
||||||
checksum unique.Handle[Checksum]
|
checksum unique.Handle[Checksum]
|
||||||
|
|
||||||
// doFunc is the Do method of [http.Client] supplied by the caller.
|
// client is the address of the caller-supplied [http.Client].
|
||||||
doFunc func(req *http.Request) (*http.Response, error)
|
client *http.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ KnownChecksum = new(httpArtifact)
|
var _ KnownChecksum = new(httpArtifact)
|
||||||
@@ -33,10 +33,7 @@ func NewHTTPGet(
|
|||||||
url string,
|
url string,
|
||||||
checksum Checksum,
|
checksum Checksum,
|
||||||
) FileArtifact {
|
) FileArtifact {
|
||||||
if c == nil {
|
return &httpArtifact{url: url, checksum: unique.Make(checksum), client: c}
|
||||||
c = http.DefaultClient
|
|
||||||
}
|
|
||||||
return &httpArtifact{url: url, checksum: unique.Make(checksum), doFunc: c.Do}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Kind returns the hardcoded [Kind] constant.
|
// Kind returns the hardcoded [Kind] constant.
|
||||||
@@ -44,8 +41,17 @@ func (*httpArtifact) Kind() Kind { return KindHTTPGet }
|
|||||||
|
|
||||||
// Params writes the backing url string. Client is not represented as it does
|
// Params writes the backing url string. Client is not represented as it does
|
||||||
// not affect [Cache.Cure] outcome.
|
// not affect [Cache.Cure] outcome.
|
||||||
func (a *httpArtifact) Params(ctx *IContext) {
|
func (a *httpArtifact) Params(ctx *IContext) { ctx.WriteString(a.url) }
|
||||||
ctx.GetHash().Write([]byte(a.url))
|
|
||||||
|
func init() {
|
||||||
|
register(KindHTTPGet, func(r *IRReader) Artifact {
|
||||||
|
url := r.ReadString()
|
||||||
|
checksum, ok := r.Finalise()
|
||||||
|
if !ok {
|
||||||
|
panic(ErrExpectedChecksum)
|
||||||
|
}
|
||||||
|
return NewHTTPGet(nil, url, checksum.Value())
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dependencies returns a nil slice.
|
// Dependencies returns a nil slice.
|
||||||
@@ -78,9 +84,15 @@ func (a *httpArtifact) Cure(r *RContext) (rc io.ReadCloser, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
req.Header.Set("User-Agent", "Hakurei/1.1")
|
||||||
|
|
||||||
|
c := a.client
|
||||||
|
if c == nil {
|
||||||
|
c = http.DefaultClient
|
||||||
|
}
|
||||||
|
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
if resp, err = a.doFunc(req); err != nil {
|
if resp, err = c.Do(req); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ func TestHTTPGet(t *testing.T) {
|
|||||||
)
|
)
|
||||||
wantPathname := base.Append(
|
wantPathname := base.Append(
|
||||||
"identifier",
|
"identifier",
|
||||||
"NqVORkT6L9HX6Za7kT2zcibY10qFqBaxEjPiYFrBQX-ZFr3yxCzJxbKOP0zVjeWb",
|
"oM-2pUlk-mOxK1t3aMWZer69UdOQlAXiAgMrpZ1476VoOqpYVP1aGFS9_HYy-D8_",
|
||||||
)
|
)
|
||||||
if pathname, checksum, err := c.Cure(f); err != nil {
|
if pathname, checksum, err := c.Cure(f); err != nil {
|
||||||
t.Fatalf("Cure: error = %v", err)
|
t.Fatalf("Cure: error = %v", err)
|
||||||
@@ -156,6 +156,6 @@ func TestHTTPGet(t *testing.T) {
|
|||||||
if _, _, err := c.Cure(f); !reflect.DeepEqual(err, wantErrNotFound) {
|
if _, _, err := c.Cure(f); !reflect.DeepEqual(err, wantErrNotFound) {
|
||||||
t.Fatalf("Pathname: error = %#v, want %#v", err, wantErrNotFound)
|
t.Fatalf("Pathname: error = %#v, want %#v", err, wantErrNotFound)
|
||||||
}
|
}
|
||||||
}, pkg.MustDecode("bqtn69RkV5E7V7GhhgCFjcvbxmaqrO8DywamM4Tyjf10F6EJBHjXiIa_tFRtF4iN")},
|
}, pkg.MustDecode("L_0RFHpr9JUS4Zp14rz2dESSRvfLzpvqsLhR1-YjQt8hYlmEdVl7vI3_-v8UNPKs")},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,15 +28,21 @@ import (
|
|||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/internal/info"
|
||||||
"hakurei.app/internal/lockedfile"
|
"hakurei.app/internal/lockedfile"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// programName is the string identifying this build system.
|
||||||
|
programName = "internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// A Checksum is a SHA-384 checksum computed for a cured [Artifact].
|
// A Checksum is a SHA-384 checksum computed for a cured [Artifact].
|
||||||
Checksum = [sha512.Size384]byte
|
Checksum = [sha512.Size384]byte
|
||||||
|
|
||||||
// An ID is a unique identifier returned by [Artifact.ID]. This value must
|
// An ID is a unique identifier returned by [KnownIdent.ID]. This value must
|
||||||
// be deterministically determined ahead of time.
|
// be deterministically determined ahead of time.
|
||||||
ID Checksum
|
ID Checksum
|
||||||
)
|
)
|
||||||
@@ -65,33 +71,11 @@ func MustDecode(s string) (checksum Checksum) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// IContext is passed to [Artifact.Params] and provides identifier information
|
// common holds elements and receives methods shared between different contexts.
|
||||||
// and the target [hash.Hash] for writing params into.
|
type common struct {
|
||||||
//
|
|
||||||
// Methods of IContext are safe for concurrent use. IContext is valid
|
|
||||||
// until [Artifact.Params] returns.
|
|
||||||
type IContext struct {
|
|
||||||
// Address of underlying [Cache], should be zeroed or made unusable after
|
// Address of underlying [Cache], should be zeroed or made unusable after
|
||||||
// [Artifact.Params] returns and must not be exposed directly.
|
// Cure returns and must not be exposed directly.
|
||||||
cache *Cache
|
cache *Cache
|
||||||
// Made available for writing, should be zeroed after [Artifact.Params]
|
|
||||||
// returns. Internal state must not be inspected.
|
|
||||||
h hash.Hash
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unwrap returns the underlying [context.Context].
|
|
||||||
func (i *IContext) Unwrap() context.Context { return i.cache.ctx }
|
|
||||||
|
|
||||||
// GetHash returns the underlying [hash.Hash] for writing. Callers must not
|
|
||||||
// attempt to inspect its internal state.
|
|
||||||
func (i *IContext) GetHash() hash.Hash { return i.h }
|
|
||||||
|
|
||||||
// WriteIdent writes the identifier of [Artifact] to the underlying [hash.Hash].
|
|
||||||
func (i *IContext) WriteIdent(a Artifact) {
|
|
||||||
buf := i.cache.getIdentBuf()
|
|
||||||
*(*ID)(buf[wordSize:]) = i.cache.Ident(a).Value()
|
|
||||||
i.h.Write(buf[wordSize:])
|
|
||||||
i.cache.putIdentBuf(buf)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TContext is passed to [TrivialArtifact.Cure] and provides information and
|
// TContext is passed to [TrivialArtifact.Cure] and provides information and
|
||||||
@@ -100,12 +84,62 @@ func (i *IContext) WriteIdent(a Artifact) {
|
|||||||
// Methods of TContext are safe for concurrent use. TContext is valid
|
// Methods of TContext are safe for concurrent use. TContext is valid
|
||||||
// until [TrivialArtifact.Cure] returns.
|
// until [TrivialArtifact.Cure] returns.
|
||||||
type TContext struct {
|
type TContext struct {
|
||||||
// Address of underlying [Cache], should be zeroed or made unusable after
|
|
||||||
// [TrivialArtifact.Cure] returns and must not be exposed directly.
|
|
||||||
cache *Cache
|
|
||||||
|
|
||||||
// Populated during [Cache.Cure].
|
// Populated during [Cache.Cure].
|
||||||
work, temp *check.Absolute
|
work, temp *check.Absolute
|
||||||
|
|
||||||
|
// Target [Artifact] encoded identifier.
|
||||||
|
ids string
|
||||||
|
// Pathname status was created at.
|
||||||
|
statusPath *check.Absolute
|
||||||
|
// File statusHeader and logs are written to.
|
||||||
|
status *os.File
|
||||||
|
// Error value during prepareStatus.
|
||||||
|
statusErr error
|
||||||
|
|
||||||
|
common
|
||||||
|
}
|
||||||
|
|
||||||
|
// statusHeader is the header written to all status files in dirStatus.
|
||||||
|
var statusHeader = func() string {
|
||||||
|
s := programName
|
||||||
|
if v := info.Version(); v != info.FallbackVersion {
|
||||||
|
s += " " + v
|
||||||
|
}
|
||||||
|
s += " (" + runtime.GOARCH + ")"
|
||||||
|
if name, err := os.Hostname(); err == nil {
|
||||||
|
s += " on " + name
|
||||||
|
}
|
||||||
|
s += "\n\n"
|
||||||
|
return s
|
||||||
|
}()
|
||||||
|
|
||||||
|
// prepareStatus initialises the status file once.
|
||||||
|
func (t *TContext) prepareStatus() error {
|
||||||
|
if t.statusPath != nil || t.status != nil {
|
||||||
|
return t.statusErr
|
||||||
|
}
|
||||||
|
|
||||||
|
t.statusPath = t.cache.base.Append(
|
||||||
|
dirStatus,
|
||||||
|
t.ids,
|
||||||
|
)
|
||||||
|
if t.status, t.statusErr = os.OpenFile(
|
||||||
|
t.statusPath.String(),
|
||||||
|
syscall.O_CREAT|syscall.O_EXCL|syscall.O_WRONLY,
|
||||||
|
0400,
|
||||||
|
); t.statusErr != nil {
|
||||||
|
return t.statusErr
|
||||||
|
}
|
||||||
|
|
||||||
|
_, t.statusErr = t.status.WriteString(statusHeader)
|
||||||
|
return t.statusErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStatusWriter returns a [io.Writer] for build logs. The caller must not
|
||||||
|
// seek this writer before the position it was first returned in.
|
||||||
|
func (t *TContext) GetStatusWriter() (io.Writer, error) {
|
||||||
|
err := t.prepareStatus()
|
||||||
|
return t.status, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// destroy destroys the temporary directory and joins its errors with the error
|
// destroy destroys the temporary directory and joins its errors with the error
|
||||||
@@ -113,12 +147,15 @@ type TContext struct {
|
|||||||
// directory is removed similarly. [Cache] is responsible for making sure work
|
// directory is removed similarly. [Cache] is responsible for making sure work
|
||||||
// is never left behind for a successful [Cache.Cure].
|
// is never left behind for a successful [Cache.Cure].
|
||||||
//
|
//
|
||||||
|
// If implementation had requested status, it is closed with error joined with
|
||||||
|
// the error referred to by errP. If the error referred to by errP is non-nil,
|
||||||
|
// the status file is removed from the filesystem.
|
||||||
|
//
|
||||||
// destroy must be deferred by [Cache.Cure] if [TContext] is passed to any Cure
|
// destroy must be deferred by [Cache.Cure] if [TContext] is passed to any Cure
|
||||||
// implementation. It should not be called prior to that point.
|
// implementation. It should not be called prior to that point.
|
||||||
func (t *TContext) destroy(errP *error) {
|
func (t *TContext) destroy(errP *error) {
|
||||||
if chmodErr, removeErr := removeAll(t.temp); chmodErr != nil || removeErr != nil {
|
if chmodErr, removeErr := removeAll(t.temp); chmodErr != nil || removeErr != nil {
|
||||||
*errP = errors.Join(*errP, chmodErr, removeErr)
|
*errP = errors.Join(*errP, chmodErr, removeErr)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if *errP != nil {
|
if *errP != nil {
|
||||||
@@ -126,17 +163,31 @@ func (t *TContext) destroy(errP *error) {
|
|||||||
if chmodErr != nil || removeErr != nil {
|
if chmodErr != nil || removeErr != nil {
|
||||||
*errP = errors.Join(*errP, chmodErr, removeErr)
|
*errP = errors.Join(*errP, chmodErr, removeErr)
|
||||||
} else if errors.Is(*errP, os.ErrExist) {
|
} else if errors.Is(*errP, os.ErrExist) {
|
||||||
|
var linkError *os.LinkError
|
||||||
|
if errors.As(*errP, &linkError) && linkError != nil &&
|
||||||
|
linkError.Op == "rename" {
|
||||||
// two artifacts may be backed by the same file
|
// two artifacts may be backed by the same file
|
||||||
*errP = nil
|
*errP = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if t.status != nil {
|
||||||
|
if err := t.status.Close(); err != nil {
|
||||||
|
*errP = errors.Join(*errP, err)
|
||||||
|
}
|
||||||
|
if *errP != nil {
|
||||||
|
*errP = errors.Join(*errP, os.Remove(t.statusPath.String()))
|
||||||
|
}
|
||||||
|
t.status = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Unwrap returns the underlying [context.Context].
|
// Unwrap returns the underlying [context.Context].
|
||||||
func (t *TContext) Unwrap() context.Context { return t.cache.ctx }
|
func (c *common) Unwrap() context.Context { return c.cache.ctx }
|
||||||
|
|
||||||
// GetMessage returns [message.Msg] held by the underlying [Cache].
|
// GetMessage returns [message.Msg] held by the underlying [Cache].
|
||||||
func (t *TContext) GetMessage() message.Msg { return t.cache.msg }
|
func (c *common) GetMessage() message.Msg { return c.cache.msg }
|
||||||
|
|
||||||
// GetWorkDir returns a pathname to a directory which [Artifact] is expected to
|
// GetWorkDir returns a pathname to a directory which [Artifact] is expected to
|
||||||
// write its output to. This is not the final resting place of the [Artifact]
|
// write its output to. This is not the final resting place of the [Artifact]
|
||||||
@@ -155,13 +206,13 @@ func (t *TContext) GetTempDir() *check.Absolute { return t.temp }
|
|||||||
// If err is nil, the caller must close the resulting [io.ReadCloser] and return
|
// If err is nil, the caller must close the resulting [io.ReadCloser] and return
|
||||||
// its error, if any. Failure to read r to EOF may result in a spurious
|
// its error, if any. Failure to read r to EOF may result in a spurious
|
||||||
// [ChecksumMismatchError], or the underlying implementation may block on Close.
|
// [ChecksumMismatchError], or the underlying implementation may block on Close.
|
||||||
func (t *TContext) Open(a Artifact) (r io.ReadCloser, err error) {
|
func (c *common) Open(a Artifact) (r io.ReadCloser, err error) {
|
||||||
if f, ok := a.(FileArtifact); ok {
|
if f, ok := a.(FileArtifact); ok {
|
||||||
return t.cache.openFile(f)
|
return c.cache.openFile(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
var pathname *check.Absolute
|
var pathname *check.Absolute
|
||||||
if pathname, _, err = t.cache.Cure(a); err != nil {
|
if pathname, _, err = c.cache.Cure(a); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -193,7 +244,7 @@ type FContext struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// InvalidLookupError is the identifier of non-dependency [Artifact] looked up
|
// InvalidLookupError is the identifier of non-dependency [Artifact] looked up
|
||||||
// via [FContext.Pathname] by a misbehaving [Artifact] implementation.
|
// via [FContext.GetArtifact] by a misbehaving [Artifact] implementation.
|
||||||
type InvalidLookupError ID
|
type InvalidLookupError ID
|
||||||
|
|
||||||
func (e InvalidLookupError) Error() string {
|
func (e InvalidLookupError) Error() string {
|
||||||
@@ -220,14 +271,7 @@ func (f *FContext) GetArtifact(a Artifact) (
|
|||||||
//
|
//
|
||||||
// Methods of RContext are safe for concurrent use. RContext is valid
|
// Methods of RContext are safe for concurrent use. RContext is valid
|
||||||
// until [FileArtifact.Cure] returns.
|
// until [FileArtifact.Cure] returns.
|
||||||
type RContext struct {
|
type RContext struct{ common }
|
||||||
// Address of underlying [Cache], should be zeroed or made unusable after
|
|
||||||
// [FileArtifact.Cure] returns and must not be exposed directly.
|
|
||||||
cache *Cache
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unwrap returns the underlying [context.Context].
|
|
||||||
func (r *RContext) Unwrap() context.Context { return r.cache.ctx }
|
|
||||||
|
|
||||||
// An Artifact is a read-only reference to a piece of data that may be created
|
// An Artifact is a read-only reference to a piece of data that may be created
|
||||||
// deterministically but might not currently be available in memory or on the
|
// deterministically but might not currently be available in memory or on the
|
||||||
@@ -238,10 +282,12 @@ type Artifact interface {
|
|||||||
// [Artifact] is allowed to return the same [Kind] value.
|
// [Artifact] is allowed to return the same [Kind] value.
|
||||||
Kind() Kind
|
Kind() Kind
|
||||||
|
|
||||||
// Params writes opaque bytes that describes [Artifact]. Implementations
|
// Params writes deterministic values describing [Artifact]. Implementations
|
||||||
// must guarantee that these values are unique among differing instances
|
// must guarantee that these values are unique among differing instances
|
||||||
// of the same implementation with the same dependencies. Callers must not
|
// of the same implementation with identical dependencies and conveys enough
|
||||||
// attempt to interpret these params.
|
// information to create another instance of [Artifact] identical to the
|
||||||
|
// instance emitting these values. The new instance created via [IRReadFunc]
|
||||||
|
// from these values must then produce identical IR values.
|
||||||
//
|
//
|
||||||
// Result must remain identical across multiple invocations.
|
// Result must remain identical across multiple invocations.
|
||||||
Params(ctx *IContext)
|
Params(ctx *IContext)
|
||||||
@@ -404,6 +450,9 @@ const (
|
|||||||
// dirChecksum is the directory name appended to Cache.base for storing
|
// dirChecksum is the directory name appended to Cache.base for storing
|
||||||
// artifacts named after their [Checksum].
|
// artifacts named after their [Checksum].
|
||||||
dirChecksum = "checksum"
|
dirChecksum = "checksum"
|
||||||
|
// dirStatus is the directory name appended to Cache.base for storing
|
||||||
|
// artifact metadata and logs named after their [ID].
|
||||||
|
dirStatus = "status"
|
||||||
|
|
||||||
// dirWork is the directory name appended to Cache.base for working
|
// dirWork is the directory name appended to Cache.base for working
|
||||||
// pathnames set up during [Cache.Cure].
|
// pathnames set up during [Cache.Cure].
|
||||||
@@ -493,7 +542,7 @@ type Cache struct {
|
|||||||
// Synchronises entry into exclusive artifacts for the cure method.
|
// Synchronises entry into exclusive artifacts for the cure method.
|
||||||
exclMu sync.Mutex
|
exclMu sync.Mutex
|
||||||
// Buffered I/O free list, must not be accessed directly.
|
// Buffered I/O free list, must not be accessed directly.
|
||||||
bufioPool sync.Pool
|
brPool, bwPool sync.Pool
|
||||||
|
|
||||||
// Unlocks the on-filesystem cache. Must only be called from Close.
|
// Unlocks the on-filesystem cache. Must only be called from Close.
|
||||||
unlock func()
|
unlock func()
|
||||||
@@ -564,42 +613,37 @@ func (c *Cache) unsafeIdent(a Artifact, encodeKind bool) (
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
deps := a.Dependencies()
|
|
||||||
idents := make([]*extIdent, len(deps))
|
|
||||||
for i, d := range deps {
|
|
||||||
dbuf, did := c.unsafeIdent(d, true)
|
|
||||||
if dbuf == nil {
|
|
||||||
dbuf = c.getIdentBuf()
|
|
||||||
binary.LittleEndian.PutUint64(dbuf[:], uint64(d.Kind()))
|
|
||||||
*(*ID)(dbuf[wordSize:]) = did.Value()
|
|
||||||
} else {
|
|
||||||
c.storeIdent(d, dbuf)
|
|
||||||
}
|
|
||||||
defer c.putIdentBuf(dbuf)
|
|
||||||
idents[i] = dbuf
|
|
||||||
}
|
|
||||||
slices.SortFunc(idents, func(a, b *extIdent) int {
|
|
||||||
return bytes.Compare(a[:], b[:])
|
|
||||||
})
|
|
||||||
idents = slices.CompactFunc(idents, func(a, b *extIdent) bool {
|
|
||||||
return *a == *b
|
|
||||||
})
|
|
||||||
|
|
||||||
buf = c.getIdentBuf()
|
buf = c.getIdentBuf()
|
||||||
h := sha512.New384()
|
h := sha512.New384()
|
||||||
binary.LittleEndian.PutUint64(buf[:], uint64(a.Kind()))
|
if err := c.Encode(h, a); err != nil {
|
||||||
h.Write(buf[:wordSize])
|
// unreachable
|
||||||
i := IContext{c, h}
|
panic(err)
|
||||||
a.Params(&i)
|
|
||||||
i.cache, i.h = nil, nil
|
|
||||||
for _, dn := range idents {
|
|
||||||
h.Write(dn[:])
|
|
||||||
}
|
}
|
||||||
|
binary.LittleEndian.PutUint64(buf[:], uint64(a.Kind()))
|
||||||
h.Sum(buf[wordSize:wordSize])
|
h.Sum(buf[wordSize:wordSize])
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getReader is like [bufio.NewReader] but for brPool.
|
||||||
|
func (c *Cache) getReader(r io.Reader) *bufio.Reader {
|
||||||
|
br := c.brPool.Get().(*bufio.Reader)
|
||||||
|
br.Reset(r)
|
||||||
|
return br
|
||||||
|
}
|
||||||
|
|
||||||
|
// putReader adds br to brPool.
|
||||||
|
func (c *Cache) putReader(br *bufio.Reader) { c.brPool.Put(br) }
|
||||||
|
|
||||||
|
// getWriter is like [bufio.NewWriter] but for bwPool.
|
||||||
|
func (c *Cache) getWriter(w io.Writer) *bufio.Writer {
|
||||||
|
bw := c.bwPool.Get().(*bufio.Writer)
|
||||||
|
bw.Reset(w)
|
||||||
|
return bw
|
||||||
|
}
|
||||||
|
|
||||||
|
// putWriter adds bw to bwPool.
|
||||||
|
func (c *Cache) putWriter(bw *bufio.Writer) { c.bwPool.Put(bw) }
|
||||||
|
|
||||||
// A ChecksumMismatchError describes an [Artifact] with unexpected content.
|
// A ChecksumMismatchError describes an [Artifact] with unexpected content.
|
||||||
type ChecksumMismatchError struct {
|
type ChecksumMismatchError struct {
|
||||||
// Actual and expected checksums.
|
// Actual and expected checksums.
|
||||||
@@ -621,6 +665,9 @@ type ScrubError struct {
|
|||||||
// Dangling identifier symlinks. This can happen if the content-addressed
|
// Dangling identifier symlinks. This can happen if the content-addressed
|
||||||
// entry was removed while scrubbing due to a checksum mismatch.
|
// entry was removed while scrubbing due to a checksum mismatch.
|
||||||
DanglingIdentifiers []ID
|
DanglingIdentifiers []ID
|
||||||
|
// Dangling status files. This can happen if a dangling status symlink was
|
||||||
|
// removed while scrubbing.
|
||||||
|
DanglingStatus []ID
|
||||||
// Miscellaneous errors, including [os.ReadDir] on checksum and identifier
|
// Miscellaneous errors, including [os.ReadDir] on checksum and identifier
|
||||||
// directories, [Decode] on entry names and [os.RemoveAll] on inconsistent
|
// directories, [Decode] on entry names and [os.RemoveAll] on inconsistent
|
||||||
// entries.
|
// entries.
|
||||||
@@ -672,6 +719,13 @@ func (e *ScrubError) Error() string {
|
|||||||
}
|
}
|
||||||
segments = append(segments, s)
|
segments = append(segments, s)
|
||||||
}
|
}
|
||||||
|
if len(e.DanglingStatus) > 0 {
|
||||||
|
s := "dangling status:\n"
|
||||||
|
for _, id := range e.DanglingStatus {
|
||||||
|
s += Encode(id) + "\n"
|
||||||
|
}
|
||||||
|
segments = append(segments, s)
|
||||||
|
}
|
||||||
if len(e.Errs) > 0 {
|
if len(e.Errs) > 0 {
|
||||||
s := "errors during scrub:\n"
|
s := "errors during scrub:\n"
|
||||||
for pathname, errs := range e.errs {
|
for pathname, errs := range e.errs {
|
||||||
@@ -850,6 +904,36 @@ func (c *Cache) Scrub(checks int) error {
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dir = c.base.Append(dirStatus)
|
||||||
|
if entries, readdirErr := os.ReadDir(dir.String()); readdirErr != nil {
|
||||||
|
if !errors.Is(readdirErr, os.ErrNotExist) {
|
||||||
|
addErr(dir, readdirErr)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
wg.Add(len(entries))
|
||||||
|
for _, ent := range entries {
|
||||||
|
w <- checkEntry{ent, func(ent os.DirEntry, want *Checksum) bool {
|
||||||
|
got := p.Get().(*Checksum)
|
||||||
|
defer p.Put(got)
|
||||||
|
|
||||||
|
if _, err := os.Stat(c.base.Append(
|
||||||
|
dirIdentifier,
|
||||||
|
ent.Name(),
|
||||||
|
).String()); err != nil {
|
||||||
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
|
addErr(dir.Append(ent.Name()), err)
|
||||||
|
}
|
||||||
|
seMu.Lock()
|
||||||
|
se.DanglingStatus = append(se.DanglingStatus, *want)
|
||||||
|
seMu.Unlock()
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
if len(c.identPending) > 0 {
|
if len(c.identPending) > 0 {
|
||||||
addErr(c.base, errors.New(
|
addErr(c.base, errors.New(
|
||||||
"scrub began with pending artifacts",
|
"scrub began with pending artifacts",
|
||||||
@@ -880,6 +964,7 @@ func (c *Cache) Scrub(checks int) error {
|
|||||||
|
|
||||||
if len(se.ChecksumMismatches) > 0 ||
|
if len(se.ChecksumMismatches) > 0 ||
|
||||||
len(se.DanglingIdentifiers) > 0 ||
|
len(se.DanglingIdentifiers) > 0 ||
|
||||||
|
len(se.DanglingStatus) > 0 ||
|
||||||
len(se.Errs) > 0 {
|
len(se.Errs) > 0 {
|
||||||
slices.SortFunc(se.ChecksumMismatches, func(a, b ChecksumMismatchError) int {
|
slices.SortFunc(se.ChecksumMismatches, func(a, b ChecksumMismatchError) int {
|
||||||
return bytes.Compare(a.Want[:], b.Want[:])
|
return bytes.Compare(a.Want[:], b.Want[:])
|
||||||
@@ -887,6 +972,9 @@ func (c *Cache) Scrub(checks int) error {
|
|||||||
slices.SortFunc(se.DanglingIdentifiers, func(a, b ID) int {
|
slices.SortFunc(se.DanglingIdentifiers, func(a, b ID) int {
|
||||||
return bytes.Compare(a[:], b[:])
|
return bytes.Compare(a[:], b[:])
|
||||||
})
|
})
|
||||||
|
slices.SortFunc(se.DanglingStatus, func(a, b ID) int {
|
||||||
|
return bytes.Compare(a[:], b[:])
|
||||||
|
})
|
||||||
return &se
|
return &se
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
@@ -977,22 +1065,24 @@ func (c *Cache) openFile(f FileArtifact) (r io.ReadCloser, err error) {
|
|||||||
if !errors.Is(err, os.ErrNotExist) {
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
id := c.Ident(f)
|
||||||
if c.msg.IsVerbose() {
|
if c.msg.IsVerbose() {
|
||||||
rn := reportName(f, c.Ident(f))
|
rn := reportName(f, id)
|
||||||
c.msg.Verbosef("curing %s to memory...", rn)
|
c.msg.Verbosef("curing %s in memory...", rn)
|
||||||
defer func() {
|
defer func() {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
c.msg.Verbosef("cured %s to memory", rn)
|
c.msg.Verbosef("opened %s for reading", rn)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
return f.Cure(&RContext{c})
|
return f.Cure(&RContext{common{c}})
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// InvalidFileModeError describes an [Artifact.Cure] that did not result in
|
// InvalidFileModeError describes a [FloodArtifact.Cure] or
|
||||||
// a regular file or directory located at the work pathname.
|
// [TrivialArtifact.Cure] that did not result in a regular file or directory
|
||||||
|
// located at the work pathname.
|
||||||
type InvalidFileModeError fs.FileMode
|
type InvalidFileModeError fs.FileMode
|
||||||
|
|
||||||
// Error returns a constant string.
|
// Error returns a constant string.
|
||||||
@@ -1000,8 +1090,8 @@ func (e InvalidFileModeError) Error() string {
|
|||||||
return "artifact did not produce a regular file or directory"
|
return "artifact did not produce a regular file or directory"
|
||||||
}
|
}
|
||||||
|
|
||||||
// NoOutputError describes an [Artifact.Cure] that did not populate its
|
// NoOutputError describes a [FloodArtifact.Cure] or [TrivialArtifact.Cure]
|
||||||
// work pathname despite completing successfully.
|
// that did not populate its work pathname despite completing successfully.
|
||||||
type NoOutputError struct{}
|
type NoOutputError struct{}
|
||||||
|
|
||||||
// Unwrap returns [os.ErrNotExist].
|
// Unwrap returns [os.ErrNotExist].
|
||||||
@@ -1179,39 +1269,41 @@ func (e *CureError) Error() string { return e.Err.Error() }
|
|||||||
// A DependencyCureError wraps errors returned while curing dependencies.
|
// A DependencyCureError wraps errors returned while curing dependencies.
|
||||||
type DependencyCureError []*CureError
|
type DependencyCureError []*CureError
|
||||||
|
|
||||||
// sort sorts underlying errors by their identifier.
|
// unwrapM recursively expands underlying errors into a caller-supplied map.
|
||||||
func (e *DependencyCureError) sort() {
|
func (e *DependencyCureError) unwrapM(me map[unique.Handle[ID]]*CureError) {
|
||||||
var identBuf [2]ID
|
for _, err := range *e {
|
||||||
slices.SortFunc(*e, func(a, b *CureError) int {
|
if _, ok := me[err.Ident]; ok {
|
||||||
identBuf[0], identBuf[1] = a.Ident.Value(), b.Ident.Value()
|
continue
|
||||||
return slices.Compare(identBuf[0][:], identBuf[1][:])
|
}
|
||||||
})
|
if _e, ok := err.Err.(*DependencyCureError); ok {
|
||||||
|
_e.unwrapM(me)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
me[err.Ident] = err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// unwrap recursively expands and deduplicates underlying errors.
|
// unwrap recursively expands and deduplicates underlying errors.
|
||||||
func (e *DependencyCureError) unwrap() DependencyCureError {
|
func (e *DependencyCureError) unwrap() DependencyCureError {
|
||||||
errs := make(DependencyCureError, 0, len(*e))
|
me := make(map[unique.Handle[ID]]*CureError)
|
||||||
for _, err := range *e {
|
e.unwrapM(me)
|
||||||
if _e, ok := err.Err.(*DependencyCureError); ok {
|
errs := slices.AppendSeq(
|
||||||
errs = append(errs, _e.unwrap()...)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
errs = append(errs, err)
|
|
||||||
}
|
|
||||||
me := make(map[unique.Handle[ID]]*CureError, len(errs))
|
|
||||||
for _, err := range errs {
|
|
||||||
me[err.Ident] = err
|
|
||||||
}
|
|
||||||
return slices.AppendSeq(
|
|
||||||
make(DependencyCureError, 0, len(me)),
|
make(DependencyCureError, 0, len(me)),
|
||||||
maps.Values(me),
|
maps.Values(me),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var identBuf [2]ID
|
||||||
|
slices.SortFunc(errs, func(a, b *CureError) int {
|
||||||
|
identBuf[0], identBuf[1] = a.Ident.Value(), b.Ident.Value()
|
||||||
|
return slices.Compare(identBuf[0][:], identBuf[1][:])
|
||||||
|
})
|
||||||
|
|
||||||
|
return errs
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unwrap returns a deduplicated slice of underlying errors.
|
// Unwrap returns a deduplicated slice of underlying errors.
|
||||||
func (e *DependencyCureError) Unwrap() []error {
|
func (e *DependencyCureError) Unwrap() []error {
|
||||||
errs := e.unwrap()
|
errs := e.unwrap()
|
||||||
errs.sort()
|
|
||||||
_errs := make([]error, len(errs))
|
_errs := make([]error, len(errs))
|
||||||
for i, err := range errs {
|
for i, err := range errs {
|
||||||
_errs[i] = err
|
_errs[i] = err
|
||||||
@@ -1222,7 +1314,6 @@ func (e *DependencyCureError) Unwrap() []error {
|
|||||||
// Error returns a user-facing multiline error message.
|
// Error returns a user-facing multiline error message.
|
||||||
func (e *DependencyCureError) Error() string {
|
func (e *DependencyCureError) Error() string {
|
||||||
errs := e.unwrap()
|
errs := e.unwrap()
|
||||||
errs.sort()
|
|
||||||
if len(errs) == 0 {
|
if len(errs) == 0 {
|
||||||
return "invalid dependency cure outcome"
|
return "invalid dependency cure outcome"
|
||||||
}
|
}
|
||||||
@@ -1267,13 +1358,6 @@ func (c *Cache) exitCure(a Artifact, curesExempt bool) {
|
|||||||
<-c.cures
|
<-c.cures
|
||||||
}
|
}
|
||||||
|
|
||||||
// getWriter is like [bufio.NewWriter] but for bufioPool.
|
|
||||||
func (c *Cache) getWriter(w io.Writer) *bufio.Writer {
|
|
||||||
bw := c.bufioPool.Get().(*bufio.Writer)
|
|
||||||
bw.Reset(w)
|
|
||||||
return bw
|
|
||||||
}
|
|
||||||
|
|
||||||
// measuredReader implements [io.ReadCloser] and measures the checksum during
|
// measuredReader implements [io.ReadCloser] and measures the checksum during
|
||||||
// Close. If the underlying reader is not read to EOF, Close blocks until all
|
// Close. If the underlying reader is not read to EOF, Close blocks until all
|
||||||
// remaining data is consumed and validated.
|
// remaining data is consumed and validated.
|
||||||
@@ -1356,9 +1440,6 @@ func (r *RContext) NewMeasuredReader(
|
|||||||
return r.cache.newMeasuredReader(rc, checksum)
|
return r.cache.newMeasuredReader(rc, checksum)
|
||||||
}
|
}
|
||||||
|
|
||||||
// putWriter adds bw to bufioPool.
|
|
||||||
func (c *Cache) putWriter(bw *bufio.Writer) { c.bufioPool.Put(bw) }
|
|
||||||
|
|
||||||
// cure implements Cure without checking the full dependency graph.
|
// cure implements Cure without checking the full dependency graph.
|
||||||
func (c *Cache) cure(a Artifact, curesExempt bool) (
|
func (c *Cache) cure(a Artifact, curesExempt bool) (
|
||||||
pathname *check.Absolute,
|
pathname *check.Absolute,
|
||||||
@@ -1490,7 +1571,7 @@ func (c *Cache) cure(a Artifact, curesExempt bool) (
|
|||||||
if err = c.enterCure(a, curesExempt); err != nil {
|
if err = c.enterCure(a, curesExempt); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r, err = f.Cure(&RContext{c})
|
r, err = f.Cure(&RContext{common{c}})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if checksumPathname == nil || c.IsStrict() {
|
if checksumPathname == nil || c.IsStrict() {
|
||||||
h := sha512.New384()
|
h := sha512.New384()
|
||||||
@@ -1566,7 +1647,12 @@ func (c *Cache) cure(a Artifact, curesExempt bool) (
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
t := TContext{c, c.base.Append(dirWork, ids), c.base.Append(dirTemp, ids)}
|
t := TContext{
|
||||||
|
c.base.Append(dirWork, ids),
|
||||||
|
c.base.Append(dirTemp, ids),
|
||||||
|
ids, nil, nil, nil,
|
||||||
|
common{c},
|
||||||
|
}
|
||||||
switch ca := a.(type) {
|
switch ca := a.(type) {
|
||||||
case TrivialArtifact:
|
case TrivialArtifact:
|
||||||
defer t.destroy(&err)
|
defer t.destroy(&err)
|
||||||
@@ -1704,6 +1790,18 @@ func (pending *pendingArtifactDep) cure(c *Cache) {
|
|||||||
pending.errsMu.Unlock()
|
pending.errsMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OpenStatus attempts to open the status file associated to an [Artifact]. If
|
||||||
|
// err is nil, the caller must close the resulting reader.
|
||||||
|
func (c *Cache) OpenStatus(a Artifact) (r io.ReadSeekCloser, err error) {
|
||||||
|
c.identMu.RLock()
|
||||||
|
r, err = os.Open(c.base.Append(
|
||||||
|
dirStatus,
|
||||||
|
Encode(c.Ident(a).Value())).String(),
|
||||||
|
)
|
||||||
|
c.identMu.RUnlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Close cancels all pending cures and waits for them to clean up.
|
// Close cancels all pending cures and waits for them to clean up.
|
||||||
func (c *Cache) Close() {
|
func (c *Cache) Close() {
|
||||||
c.closeOnce.Do(func() {
|
c.closeOnce.Do(func() {
|
||||||
@@ -1752,6 +1850,7 @@ func open(
|
|||||||
for _, name := range []string{
|
for _, name := range []string{
|
||||||
dirIdentifier,
|
dirIdentifier,
|
||||||
dirChecksum,
|
dirChecksum,
|
||||||
|
dirStatus,
|
||||||
dirWork,
|
dirWork,
|
||||||
} {
|
} {
|
||||||
if err := os.MkdirAll(base.Append(name).String(), 0700); err != nil &&
|
if err := os.MkdirAll(base.Append(name).String(), 0700); err != nil &&
|
||||||
@@ -1766,13 +1865,16 @@ func open(
|
|||||||
msg: msg,
|
msg: msg,
|
||||||
base: base,
|
base: base,
|
||||||
|
|
||||||
|
identPool: sync.Pool{New: func() any { return new(extIdent) }},
|
||||||
|
|
||||||
ident: make(map[unique.Handle[ID]]unique.Handle[Checksum]),
|
ident: make(map[unique.Handle[ID]]unique.Handle[Checksum]),
|
||||||
identErr: make(map[unique.Handle[ID]]error),
|
identErr: make(map[unique.Handle[ID]]error),
|
||||||
identPending: make(map[unique.Handle[ID]]<-chan struct{}),
|
identPending: make(map[unique.Handle[ID]]<-chan struct{}),
|
||||||
|
|
||||||
|
brPool: sync.Pool{New: func() any { return new(bufio.Reader) }},
|
||||||
|
bwPool: sync.Pool{New: func() any { return new(bufio.Writer) }},
|
||||||
}
|
}
|
||||||
c.ctx, c.cancel = context.WithCancel(ctx)
|
c.ctx, c.cancel = context.WithCancel(ctx)
|
||||||
c.identPool.New = func() any { return new(extIdent) }
|
|
||||||
c.bufioPool.New = func() any { return new(bufio.Writer) }
|
|
||||||
|
|
||||||
if lock || !testing.Testing() {
|
if lock || !testing.Testing() {
|
||||||
if unlock, err := lockedfile.MutexAt(
|
if unlock, err := lockedfile.MutexAt(
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strconv"
|
||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
"unique"
|
"unique"
|
||||||
@@ -93,7 +94,7 @@ type stubArtifact struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a *stubArtifact) Kind() pkg.Kind { return a.kind }
|
func (a *stubArtifact) Kind() pkg.Kind { return a.kind }
|
||||||
func (a *stubArtifact) Params(ctx *pkg.IContext) { ctx.GetHash().Write(a.params) }
|
func (a *stubArtifact) Params(ctx *pkg.IContext) { ctx.Write(a.params) }
|
||||||
func (a *stubArtifact) Dependencies() []pkg.Artifact { return a.deps }
|
func (a *stubArtifact) Dependencies() []pkg.Artifact { return a.deps }
|
||||||
func (a *stubArtifact) Cure(t *pkg.TContext) error { return a.cure(t) }
|
func (a *stubArtifact) Cure(t *pkg.TContext) error { return a.cure(t) }
|
||||||
func (*stubArtifact) IsExclusive() bool { return false }
|
func (*stubArtifact) IsExclusive() bool { return false }
|
||||||
@@ -109,7 +110,7 @@ type stubArtifactF struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a *stubArtifactF) Kind() pkg.Kind { return a.kind }
|
func (a *stubArtifactF) Kind() pkg.Kind { return a.kind }
|
||||||
func (a *stubArtifactF) Params(ctx *pkg.IContext) { ctx.GetHash().Write(a.params) }
|
func (a *stubArtifactF) Params(ctx *pkg.IContext) { ctx.Write(a.params) }
|
||||||
func (a *stubArtifactF) Dependencies() []pkg.Artifact { return a.deps }
|
func (a *stubArtifactF) Dependencies() []pkg.Artifact { return a.deps }
|
||||||
func (a *stubArtifactF) Cure(f *pkg.FContext) error { return a.cure(f) }
|
func (a *stubArtifactF) Cure(f *pkg.FContext) error { return a.cure(f) }
|
||||||
func (a *stubArtifactF) IsExclusive() bool { return a.excl }
|
func (a *stubArtifactF) IsExclusive() bool { return a.excl }
|
||||||
@@ -218,7 +219,7 @@ func TestIdent(t *testing.T) {
|
|||||||
},
|
},
|
||||||
nil,
|
nil,
|
||||||
}, unique.Make[pkg.ID](pkg.MustDecode(
|
}, unique.Make[pkg.ID](pkg.MustDecode(
|
||||||
"HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY",
|
"WKErnjTOVbuH2P9a0gM4OcAAO4p-CoX2HQu7CbZrg8ZOzApvWoO3-ISzPw6av_rN",
|
||||||
))},
|
))},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -313,6 +314,11 @@ func checkWithCache(t *testing.T, testCases []cacheTestCase) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// destroy non-deterministic status files
|
||||||
|
if err := os.RemoveAll(base.Append("status").String()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
var checksum pkg.Checksum
|
var checksum pkg.Checksum
|
||||||
if err := pkg.HashDir(&checksum, base); err != nil {
|
if err := pkg.HashDir(&checksum, base); err != nil {
|
||||||
t.Fatalf("HashDir: error = %v", err)
|
t.Fatalf("HashDir: error = %v", err)
|
||||||
@@ -381,6 +387,9 @@ func cureMany(t *testing.T, c *pkg.Cache, steps []cureStep) {
|
|||||||
} else if step.pathname != ignorePathname && !pathname.Is(step.pathname) {
|
} else if step.pathname != ignorePathname && !pathname.Is(step.pathname) {
|
||||||
t.Fatalf("Cure: pathname = %q, want %q", pathname, step.pathname)
|
t.Fatalf("Cure: pathname = %q, want %q", pathname, step.pathname)
|
||||||
} else if checksum != makeChecksumH(step.checksum) {
|
} else if checksum != makeChecksumH(step.checksum) {
|
||||||
|
if checksum == (unique.Handle[pkg.Checksum]{}) {
|
||||||
|
checksum = unique.Make(pkg.Checksum{})
|
||||||
|
}
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Cure: checksum = %s, want %s",
|
"Cure: checksum = %s, want %s",
|
||||||
pkg.Encode(checksum.Value()), pkg.Encode(step.checksum),
|
pkg.Encode(checksum.Value()), pkg.Encode(step.checksum),
|
||||||
@@ -531,7 +540,7 @@ func TestCache(t *testing.T) {
|
|||||||
kind: pkg.KindExec,
|
kind: pkg.KindExec,
|
||||||
params: []byte("artifact overridden to be incomplete"),
|
params: []byte("artifact overridden to be incomplete"),
|
||||||
}}, nil, pkg.Checksum{}, pkg.InvalidArtifactError(pkg.MustDecode(
|
}}, nil, pkg.Checksum{}, pkg.InvalidArtifactError(pkg.MustDecode(
|
||||||
"da4kLKa94g1wN2M0qcKflqgf2-Y2UL36iehhczqsIIW8G0LGvM7S8jjtnBc0ftB0",
|
"E__uZ1sLIvb84vzSm5Uezb03RogsiaeTt1nfIVv8TKnnf4LqwtSi-smdHhlkZrUJ",
|
||||||
))},
|
))},
|
||||||
|
|
||||||
{"error passthrough", newStubFile(
|
{"error passthrough", newStubFile(
|
||||||
@@ -953,6 +962,17 @@ func TestErrors(t *testing.T) {
|
|||||||
{"NoOutputError", pkg.NoOutputError{
|
{"NoOutputError", pkg.NoOutputError{
|
||||||
// empty struct
|
// empty struct
|
||||||
}, "artifact cured successfully but did not produce any output"},
|
}, "artifact cured successfully but did not produce any output"},
|
||||||
|
|
||||||
|
{"IRKindError", &pkg.IRKindError{
|
||||||
|
Got: pkg.IRKindEnd,
|
||||||
|
Want: pkg.IRKindIdent,
|
||||||
|
Ancillary: 0xcafebabe,
|
||||||
|
}, "got terminator IR value (0xcafebabe) instead of ident"},
|
||||||
|
{"IRKindError invalid", &pkg.IRKindError{
|
||||||
|
Got: 0xbeef,
|
||||||
|
Want: pkg.IRKindIdent,
|
||||||
|
Ancillary: 0xcafe,
|
||||||
|
}, "got invalid kind 48879 IR value (0xcafe) instead of ident"},
|
||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
@@ -1137,6 +1157,40 @@ func TestDependencyCureError(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// earlyFailureF is a [FloodArtifact] with a large dependency graph resulting in
|
||||||
|
// a large [DependencyCureError].
|
||||||
|
type earlyFailureF int
|
||||||
|
|
||||||
|
func (earlyFailureF) Kind() pkg.Kind { return pkg.KindExec }
|
||||||
|
func (earlyFailureF) Params(*pkg.IContext) {}
|
||||||
|
func (earlyFailureF) IsExclusive() bool { return false }
|
||||||
|
|
||||||
|
func (a earlyFailureF) Dependencies() []pkg.Artifact {
|
||||||
|
deps := make([]pkg.Artifact, a)
|
||||||
|
for i := range deps {
|
||||||
|
deps[i] = a - 1
|
||||||
|
}
|
||||||
|
return deps
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a earlyFailureF) Cure(*pkg.FContext) error {
|
||||||
|
if a != 0 {
|
||||||
|
panic("unexpected cure on " + strconv.Itoa(int(a)))
|
||||||
|
}
|
||||||
|
return stub.UniqueError(0xcafe)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDependencyCureErrorEarly(t *testing.T) {
|
||||||
|
checkWithCache(t, []cacheTestCase{
|
||||||
|
{"early", nil, func(t *testing.T, _ *check.Absolute, c *pkg.Cache) {
|
||||||
|
_, _, err := c.Cure(earlyFailureF(8))
|
||||||
|
if !errors.Is(err, stub.UniqueError(0xcafe)) {
|
||||||
|
t.Fatalf("Cure: error = %v", err)
|
||||||
|
}
|
||||||
|
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestNew(t *testing.T) {
|
func TestNew(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
|||||||
@@ -4,15 +4,13 @@ import (
|
|||||||
"archive/tar"
|
"archive/tar"
|
||||||
"compress/bzip2"
|
"compress/bzip2"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"hakurei.app/container/check"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -29,7 +27,7 @@ type tarArtifact struct {
|
|||||||
// Caller-supplied backing tarball.
|
// Caller-supplied backing tarball.
|
||||||
f Artifact
|
f Artifact
|
||||||
// Compression on top of the tarball.
|
// Compression on top of the tarball.
|
||||||
compression uint64
|
compression uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// tarArtifactNamed embeds tarArtifact for a [fmt.Stringer] tarball.
|
// tarArtifactNamed embeds tarArtifact for a [fmt.Stringer] tarball.
|
||||||
@@ -47,7 +45,7 @@ func (a *tarArtifactNamed) String() string { return a.name + "-unpack" }
|
|||||||
// NewTar returns a new [Artifact] backed by the supplied [Artifact] and
|
// NewTar returns a new [Artifact] backed by the supplied [Artifact] and
|
||||||
// compression method. The source [Artifact] must be compatible with
|
// compression method. The source [Artifact] must be compatible with
|
||||||
// [TContext.Open].
|
// [TContext.Open].
|
||||||
func NewTar(a Artifact, compression uint64) Artifact {
|
func NewTar(a Artifact, compression uint32) Artifact {
|
||||||
ta := tarArtifact{a, compression}
|
ta := tarArtifact{a, compression}
|
||||||
if s, ok := a.(fmt.Stringer); ok {
|
if s, ok := a.(fmt.Stringer); ok {
|
||||||
if name := s.String(); name != "" {
|
if name := s.String(); name != "" {
|
||||||
@@ -62,7 +60,7 @@ func NewHTTPGetTar(
|
|||||||
hc *http.Client,
|
hc *http.Client,
|
||||||
url string,
|
url string,
|
||||||
checksum Checksum,
|
checksum Checksum,
|
||||||
compression uint64,
|
compression uint32,
|
||||||
) Artifact {
|
) Artifact {
|
||||||
return NewTar(NewHTTPGet(hc, url, checksum), compression)
|
return NewTar(NewHTTPGet(hc, url, checksum), compression)
|
||||||
}
|
}
|
||||||
@@ -71,8 +69,16 @@ func NewHTTPGetTar(
|
|||||||
func (a *tarArtifact) Kind() Kind { return KindTar }
|
func (a *tarArtifact) Kind() Kind { return KindTar }
|
||||||
|
|
||||||
// Params writes compression encoded in little endian.
|
// Params writes compression encoded in little endian.
|
||||||
func (a *tarArtifact) Params(ctx *IContext) {
|
func (a *tarArtifact) Params(ctx *IContext) { ctx.WriteUint32(a.compression) }
|
||||||
ctx.GetHash().Write(binary.LittleEndian.AppendUint64(nil, a.compression))
|
|
||||||
|
func init() {
|
||||||
|
register(KindTar, func(r *IRReader) Artifact {
|
||||||
|
a := NewTar(r.Next(), r.ReadUint32())
|
||||||
|
if _, ok := r.Finalise(); ok {
|
||||||
|
panic(ErrUnexpectedChecksum)
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dependencies returns a slice containing the backing file.
|
// Dependencies returns a slice containing the backing file.
|
||||||
@@ -93,7 +99,6 @@ func (e DisallowedTypeflagError) Error() string {
|
|||||||
|
|
||||||
// Cure cures the [Artifact], producing a directory located at work.
|
// Cure cures the [Artifact], producing a directory located at work.
|
||||||
func (a *tarArtifact) Cure(t *TContext) (err error) {
|
func (a *tarArtifact) Cure(t *TContext) (err error) {
|
||||||
temp := t.GetTempDir()
|
|
||||||
var tr io.ReadCloser
|
var tr io.ReadCloser
|
||||||
if tr, err = t.Open(a.f); err != nil {
|
if tr, err = t.Open(a.f); err != nil {
|
||||||
return
|
return
|
||||||
@@ -109,7 +114,9 @@ func (a *tarArtifact) Cure(t *TContext) (err error) {
|
|||||||
err = closeErr
|
err = closeErr
|
||||||
}
|
}
|
||||||
}(tr)
|
}(tr)
|
||||||
tr = io.NopCloser(tr)
|
br := t.cache.getReader(tr)
|
||||||
|
defer t.cache.putReader(br)
|
||||||
|
tr = io.NopCloser(br)
|
||||||
|
|
||||||
switch a.compression {
|
switch a.compression {
|
||||||
case TarUncompressed:
|
case TarUncompressed:
|
||||||
@@ -130,14 +137,24 @@ func (a *tarArtifact) Cure(t *TContext) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type dirTargetPerm struct {
|
type dirTargetPerm struct {
|
||||||
path *check.Absolute
|
path string
|
||||||
mode fs.FileMode
|
mode fs.FileMode
|
||||||
}
|
}
|
||||||
var madeDirectories []dirTargetPerm
|
var madeDirectories []dirTargetPerm
|
||||||
|
|
||||||
if err = os.MkdirAll(temp.String(), 0700); err != nil {
|
if err = os.MkdirAll(t.GetTempDir().String(), 0700); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
var root *os.Root
|
||||||
|
if root, err = os.OpenRoot(t.GetTempDir().String()); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
closeErr := root.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
var header *tar.Header
|
var header *tar.Header
|
||||||
r := tar.NewReader(tr)
|
r := tar.NewReader(tr)
|
||||||
@@ -151,9 +168,8 @@ func (a *tarArtifact) Cure(t *TContext) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pathname := temp.Append(header.Name)
|
|
||||||
if typeflag >= '0' && typeflag <= '9' && typeflag != tar.TypeDir {
|
if typeflag >= '0' && typeflag <= '9' && typeflag != tar.TypeDir {
|
||||||
if err = os.MkdirAll(pathname.Dir().String(), 0700); err != nil {
|
if err = root.MkdirAll(path.Dir(header.Name), 0700); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -161,8 +177,8 @@ func (a *tarArtifact) Cure(t *TContext) (err error) {
|
|||||||
switch typeflag {
|
switch typeflag {
|
||||||
case tar.TypeReg:
|
case tar.TypeReg:
|
||||||
var f *os.File
|
var f *os.File
|
||||||
if f, err = os.OpenFile(
|
if f, err = root.OpenFile(
|
||||||
pathname.String(),
|
header.Name,
|
||||||
os.O_CREATE|os.O_EXCL|os.O_WRONLY,
|
os.O_CREATE|os.O_EXCL|os.O_WRONLY,
|
||||||
header.FileInfo().Mode()&0500,
|
header.FileInfo().Mode()&0500,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
@@ -177,26 +193,29 @@ func (a *tarArtifact) Cure(t *TContext) (err error) {
|
|||||||
break
|
break
|
||||||
|
|
||||||
case tar.TypeLink:
|
case tar.TypeLink:
|
||||||
if err = os.Link(
|
if err = root.Link(
|
||||||
temp.Append(header.Linkname).String(),
|
header.Linkname,
|
||||||
pathname.String(),
|
header.Name,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
|
|
||||||
case tar.TypeSymlink:
|
case tar.TypeSymlink:
|
||||||
if err = os.Symlink(header.Linkname, pathname.String()); err != nil {
|
if err = root.Symlink(
|
||||||
|
header.Linkname,
|
||||||
|
header.Name,
|
||||||
|
); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
|
|
||||||
case tar.TypeDir:
|
case tar.TypeDir:
|
||||||
madeDirectories = append(madeDirectories, dirTargetPerm{
|
madeDirectories = append(madeDirectories, dirTargetPerm{
|
||||||
path: pathname,
|
path: header.Name,
|
||||||
mode: header.FileInfo().Mode(),
|
mode: header.FileInfo().Mode(),
|
||||||
})
|
})
|
||||||
if err = os.MkdirAll(pathname.String(), 0700); err != nil {
|
if err = root.MkdirAll(header.Name, 0700); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
@@ -213,7 +232,7 @@ func (a *tarArtifact) Cure(t *TContext) (err error) {
|
|||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for _, e := range madeDirectories {
|
for _, e := range madeDirectories {
|
||||||
if err = os.Chmod(e.path.String(), e.mode&0500); err != nil {
|
if err = root.Chmod(e.path, e.mode&0500); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -221,6 +240,7 @@ func (a *tarArtifact) Cure(t *TContext) (err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
temp := t.GetTempDir()
|
||||||
if err = os.Chmod(temp.String(), 0700); err != nil {
|
if err = os.Chmod(temp.String(), 0700); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ func TestTar(t *testing.T) {
|
|||||||
}, pkg.MustDecode(
|
}, pkg.MustDecode(
|
||||||
"cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM",
|
"cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM",
|
||||||
))
|
))
|
||||||
}, pkg.MustDecode("sxbgyX-bPoezbha214n2lbQhiVfTUBkhZ0EX6zI7mmkMdrCdwuMwhMBJphLQsy94")},
|
}, pkg.MustDecode("NQTlc466JmSVLIyWklm_u8_g95jEEb98PxJU-kjwxLpfdjwMWJq0G8ze9R4Vo1Vu")},
|
||||||
|
|
||||||
{"http expand", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
{"http expand", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
checkTarHTTP(t, base, c, fstest.MapFS{
|
checkTarHTTP(t, base, c, fstest.MapFS{
|
||||||
@@ -51,7 +51,7 @@ func TestTar(t *testing.T) {
|
|||||||
}, pkg.MustDecode(
|
}, pkg.MustDecode(
|
||||||
"CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN",
|
"CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN",
|
||||||
))
|
))
|
||||||
}, pkg.MustDecode("4I8wx_h7NSJTlG5lbuz-GGEXrOg0GYC3M_503LYEBhv5XGWXfNIdIY9Q3eVSYldX")},
|
}, pkg.MustDecode("hSoSSgCYTNonX3Q8FjvjD1fBl-E-BQyA6OTXro2OadXqbST4tZ-akGXszdeqphRe")},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -98,14 +98,37 @@ func checkTarHTTP(
|
|||||||
|
|
||||||
wantIdent := func() pkg.ID {
|
wantIdent := func() pkg.ID {
|
||||||
h := sha512.New384()
|
h := sha512.New384()
|
||||||
h.Write([]byte{byte(pkg.KindTar), 0, 0, 0, 0, 0, 0, 0})
|
|
||||||
h.Write([]byte{pkg.TarGzip, 0, 0, 0, 0, 0, 0, 0})
|
|
||||||
h.Write([]byte{byte(pkg.KindHTTPGet), 0, 0, 0, 0, 0, 0, 0})
|
|
||||||
|
|
||||||
|
// kind uint64
|
||||||
|
h.Write([]byte{byte(pkg.KindTar), 0, 0, 0, 0, 0, 0, 0})
|
||||||
|
// deps_sz uint64
|
||||||
|
h.Write([]byte{1, 0, 0, 0, 0, 0, 0, 0})
|
||||||
|
|
||||||
|
// kind uint64
|
||||||
|
h.Write([]byte{byte(pkg.KindHTTPGet), 0, 0, 0, 0, 0, 0, 0})
|
||||||
|
// ident ID
|
||||||
h0 := sha512.New384()
|
h0 := sha512.New384()
|
||||||
|
// kind uint64
|
||||||
h0.Write([]byte{byte(pkg.KindHTTPGet), 0, 0, 0, 0, 0, 0, 0})
|
h0.Write([]byte{byte(pkg.KindHTTPGet), 0, 0, 0, 0, 0, 0, 0})
|
||||||
|
// deps_sz uint64
|
||||||
|
h0.Write([]byte{0, 0, 0, 0, 0, 0, 0, 0})
|
||||||
|
// url string
|
||||||
|
h0.Write([]byte{byte(pkg.IRKindString), 0, 0, 0})
|
||||||
|
h0.Write([]byte{0x10, 0, 0, 0})
|
||||||
h0.Write([]byte("file:///testdata"))
|
h0.Write([]byte("file:///testdata"))
|
||||||
|
// end(KnownChecksum)
|
||||||
|
h0.Write([]byte{byte(pkg.IRKindEnd), 0, 0, 0})
|
||||||
|
h0.Write([]byte{byte(pkg.IREndKnownChecksum), 0, 0, 0})
|
||||||
|
// checksum Checksum
|
||||||
|
h0.Write(testdataChecksum[:])
|
||||||
h.Write(h0.Sum(nil))
|
h.Write(h0.Sum(nil))
|
||||||
|
// compression uint32
|
||||||
|
h.Write([]byte{byte(pkg.IRKindUint32), 0, 0, 0})
|
||||||
|
h.Write([]byte{pkg.TarGzip, 0, 0, 0})
|
||||||
|
// end
|
||||||
|
h.Write([]byte{byte(pkg.IRKindEnd), 0, 0, 0})
|
||||||
|
h.Write([]byte{0, 0, 0, 0})
|
||||||
|
|
||||||
return pkg.ID(h.Sum(nil))
|
return pkg.ID(h.Sum(nil))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|||||||
12
internal/pkg/testdata/main.go
vendored
12
internal/pkg/testdata/main.go
vendored
@@ -142,12 +142,12 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const checksumEmptyDir = "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"
|
const checksumEmptyDir = "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"
|
||||||
ident := "U2cbgVgEtjfRuvHfE1cQnZ3t8yoexULQyo_VLgvxAVJSsobMcNaFIsuDWtmt7kzK"
|
ident := "dztPS6jRjiZtCF4_p8AzfnxGp6obkhrgFVsxdodbKWUoAEVtDz3MykepJB4kI_ks"
|
||||||
log.Println(m)
|
log.Println(m)
|
||||||
next := func() { m = m.Next; log.Println(m) }
|
next := func() { m = m.Next; log.Println(m) }
|
||||||
|
|
||||||
if overlayRoot {
|
if overlayRoot {
|
||||||
ident = "5ey2wpmMpj483YYa7ZZQciYLA2cx3_l167JCqWW4Pd-5DVp81dj9EsBtVTwYptF6"
|
ident = "RdMA-mubnrHuu3Ky1wWyxauSYCO0ZH_zCPUj3uDHqkfwv5sGcByoF_g5PjlGiClb"
|
||||||
|
|
||||||
if m.Root != "/" || m.Target != "/" ||
|
if m.Root != "/" || m.Target != "/" ||
|
||||||
m.Source != "overlay" || m.FsType != "overlay" {
|
m.Source != "overlay" || m.FsType != "overlay" {
|
||||||
@@ -165,7 +165,7 @@ func main() {
|
|||||||
log.Fatal("unexpected artifact checksum")
|
log.Fatal("unexpected artifact checksum")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ident = "tfjrsVuBuFgzWgwz-yPppFtylYuC1VFWnKhyBiHbWTGkyz8lt7Ee9QXWaIHPXs4x"
|
ident = "p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT"
|
||||||
|
|
||||||
lowerdirsEscaped := strings.Split(lowerdir, ":")
|
lowerdirsEscaped := strings.Split(lowerdir, ":")
|
||||||
lowerdirs := lowerdirsEscaped[:0]
|
lowerdirs := lowerdirsEscaped[:0]
|
||||||
@@ -194,7 +194,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if hostNet {
|
if hostNet {
|
||||||
ident = "QdsJhGgnk5N2xdUNGcndXQxFKifxf1V_2t9X8CQ-pDcg24x6mGJC_BiLfGbs6Qml"
|
ident = "G8qPxD9puvvoOVV7lrT80eyDeIl3G_CCFoKw12c8mCjMdG1zF7NEPkwYpNubClK3"
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.Root != "/sysroot" || m.Target != "/" {
|
if m.Root != "/sysroot" || m.Target != "/" {
|
||||||
@@ -213,14 +213,14 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if promote {
|
if promote {
|
||||||
ident = "O-6VjlIUxc4PYLf5v35uhIeL8kkYCbHYklqlmDjFPXe0m4j6GkUDg5qwTzBRESnf"
|
ident = "xXTIYcXmgJWNLC91c417RRrNM9cjELwEZHpGvf8Fk_GNP5agRJp_SicD0w9aMeLJ"
|
||||||
}
|
}
|
||||||
|
|
||||||
next() // testtool artifact
|
next() // testtool artifact
|
||||||
|
|
||||||
next()
|
next()
|
||||||
if overlayWork {
|
if overlayWork {
|
||||||
ident = "acaDzHZv40dZaz4cGAXayqbRMgbEOuiuiUijZL8IgDQvyeCNMFE3onBMYfny-kXA"
|
ident = "5hlaukCirnXE4W_RSLJFOZN47Z5RiHnacXzdFp_70cLgiJUGR6cSb_HaFftkzi0-"
|
||||||
if m.Root != "/" || m.Target != "/work" ||
|
if m.Root != "/" || m.Target != "/work" ||
|
||||||
m.Source != "overlay" || m.FsType != "overlay" {
|
m.Source != "overlay" || m.FsType != "overlay" {
|
||||||
log.Fatal("unexpected work mount entry")
|
log.Fatal("unexpected work mount entry")
|
||||||
|
|||||||
@@ -2,32 +2,19 @@ package rosa
|
|||||||
|
|
||||||
import "hakurei.app/internal/pkg"
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
func (t Toolchain) newAttr() pkg.Artifact {
|
func (t Toolchain) newAttr() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "2.5.2"
|
version = "2.5.2"
|
||||||
checksum = "YWEphrz6vg1sUMmHHVr1CRo53pFXRhq_pjN-AlG8UgwZK1y6m7zuDhxqJhD0SV0l"
|
checksum = "YWEphrz6vg1sUMmHHVr1CRo53pFXRhq_pjN-AlG8UgwZK1y6m7zuDhxqJhD0SV0l"
|
||||||
)
|
)
|
||||||
return t.New("attr-"+version, false, []pkg.Artifact{
|
return t.NewPackage("attr", version, pkg.NewHTTPGetTar(
|
||||||
t.Load(Make),
|
nil, "https://download.savannah.nongnu.org/releases/attr/"+
|
||||||
t.Load(Perl),
|
|
||||||
}, nil, nil, `
|
|
||||||
ln -s ../../system/bin/perl /usr/bin
|
|
||||||
|
|
||||||
cd "$(mktemp -d)"
|
|
||||||
/usr/src/attr/configure \
|
|
||||||
--prefix=/system \
|
|
||||||
--build="${ROSA_TRIPLE}" \
|
|
||||||
--enable-static
|
|
||||||
make "-j$(nproc)" check
|
|
||||||
make DESTDIR=/work install
|
|
||||||
`, pkg.Path(AbsUsrSrc.Append("attr"), true, t.NewPatchedSource(
|
|
||||||
"attr", version, pkg.NewHTTPGetTar(
|
|
||||||
nil,
|
|
||||||
"https://download.savannah.nongnu.org/releases/attr/"+
|
|
||||||
"attr-"+version+".tar.gz",
|
"attr-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), true, [2]string{"libgen-basename", `From 8a80d895dfd779373363c3a4b62ecce5a549efb2 Mon Sep 17 00:00:00 2001
|
), &PackageAttr{
|
||||||
|
Patches: [][2]string{
|
||||||
|
{"libgen-basename", `From 8a80d895dfd779373363c3a4b62ecce5a549efb2 Mon Sep 17 00:00:00 2001
|
||||||
From: "Haelwenn (lanodan) Monnier" <contact@hacktivis.me>
|
From: "Haelwenn (lanodan) Monnier" <contact@hacktivis.me>
|
||||||
Date: Sat, 30 Mar 2024 10:17:10 +0100
|
Date: Sat, 30 Mar 2024 10:17:10 +0100
|
||||||
Subject: tools/attr.c: Add missing libgen.h include for basename(3)
|
Subject: tools/attr.c: Add missing libgen.h include for basename(3)
|
||||||
@@ -52,7 +39,9 @@ index f12e4af..6a3c1e9 100644
|
|||||||
#include <attr/attributes.h>
|
#include <attr/attributes.h>
|
||||||
|
|
||||||
--
|
--
|
||||||
cgit v1.1`}, [2]string{"musl-errno", `diff --git a/test/attr.test b/test/attr.test
|
cgit v1.1`},
|
||||||
|
|
||||||
|
{"musl-errno", `diff --git a/test/attr.test b/test/attr.test
|
||||||
index 6ce2f9b..e9bde92 100644
|
index 6ce2f9b..e9bde92 100644
|
||||||
--- a/test/attr.test
|
--- a/test/attr.test
|
||||||
+++ b/test/attr.test
|
+++ b/test/attr.test
|
||||||
@@ -66,33 +55,48 @@ index 6ce2f9b..e9bde92 100644
|
|||||||
$ setfattr -n user. -v value f
|
$ setfattr -n user. -v value f
|
||||||
> setfattr: f: Invalid argument
|
> setfattr: f: Invalid argument
|
||||||
`},
|
`},
|
||||||
)))
|
},
|
||||||
}
|
|
||||||
func init() { artifactsF[Attr] = Toolchain.newAttr }
|
|
||||||
|
|
||||||
func (t Toolchain) newACL() pkg.Artifact {
|
ScriptEarly: `
|
||||||
|
ln -s ../../system/bin/perl /usr/bin
|
||||||
|
`,
|
||||||
|
}, (*MakeHelper)(nil),
|
||||||
|
Perl,
|
||||||
|
), version
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
artifactsM[Attr] = Metadata{
|
||||||
|
f: Toolchain.newAttr,
|
||||||
|
|
||||||
|
Name: "attr",
|
||||||
|
Description: "Commands for Manipulating Filesystem Extended Attributes",
|
||||||
|
Website: "https://savannah.nongnu.org/projects/attr/",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Toolchain) newACL() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "2.3.2"
|
version = "2.3.2"
|
||||||
checksum = "-fY5nwH4K8ZHBCRXrzLdguPkqjKI6WIiGu4dBtrZ1o0t6AIU73w8wwJz_UyjIS0P"
|
checksum = "-fY5nwH4K8ZHBCRXrzLdguPkqjKI6WIiGu4dBtrZ1o0t6AIU73w8wwJz_UyjIS0P"
|
||||||
)
|
)
|
||||||
return t.New("acl-"+version, false, []pkg.Artifact{
|
return t.NewPackage("acl", version, pkg.NewHTTPGetTar(
|
||||||
t.Load(Make),
|
nil, "https://download.savannah.nongnu.org/releases/acl/"+
|
||||||
|
|
||||||
t.Load(Attr),
|
|
||||||
}, nil, nil, `
|
|
||||||
cd "$(mktemp -d)"
|
|
||||||
/usr/src/acl/configure \
|
|
||||||
--prefix=/system \
|
|
||||||
--build="${ROSA_TRIPLE}" \
|
|
||||||
--enable-static
|
|
||||||
make "-j$(nproc)"
|
|
||||||
make DESTDIR=/work install
|
|
||||||
`, pkg.Path(AbsUsrSrc.Append("acl"), true, pkg.NewHTTPGetTar(
|
|
||||||
nil,
|
|
||||||
"https://download.savannah.nongnu.org/releases/acl/"+
|
|
||||||
"acl-"+version+".tar.gz",
|
"acl-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
)))
|
), nil, &MakeHelper{
|
||||||
|
// makes assumptions about uid_map/gid_map
|
||||||
|
SkipCheck: true,
|
||||||
|
},
|
||||||
|
Attr,
|
||||||
|
), version
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
artifactsM[ACL] = Metadata{
|
||||||
|
f: Toolchain.newACL,
|
||||||
|
|
||||||
|
Name: "acl",
|
||||||
|
Description: "Commands for Manipulating POSIX Access Control Lists",
|
||||||
|
Website: "https://savannah.nongnu.org/projects/acl/",
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func init() { artifactsF[ACL] = Toolchain.newACL }
|
|
||||||
|
|||||||
@@ -10,61 +10,190 @@ import (
|
|||||||
type PArtifact int
|
type PArtifact int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ACL PArtifact = iota
|
// ImageInitramfs is the Rosa OS initramfs archive.
|
||||||
|
ImageInitramfs PArtifact = iota
|
||||||
|
|
||||||
|
// Kernel is the generic Rosa OS Linux kernel.
|
||||||
|
Kernel
|
||||||
|
// KernelHeaders is an installation of kernel headers for [Kernel].
|
||||||
|
KernelHeaders
|
||||||
|
// KernelSource is a writable kernel source tree installed to [AbsUsrSrc].
|
||||||
|
KernelSource
|
||||||
|
|
||||||
|
ACL
|
||||||
|
ArgpStandalone
|
||||||
Attr
|
Attr
|
||||||
Autoconf
|
Autoconf
|
||||||
|
Automake
|
||||||
|
BC
|
||||||
Bash
|
Bash
|
||||||
Busybox
|
Binutils
|
||||||
|
Bison
|
||||||
|
Bzip2
|
||||||
CMake
|
CMake
|
||||||
Coreutils
|
Coreutils
|
||||||
|
Curl
|
||||||
|
DTC
|
||||||
Diffutils
|
Diffutils
|
||||||
|
Elfutils
|
||||||
|
Fakeroot
|
||||||
|
Findutils
|
||||||
|
Flex
|
||||||
|
Fuse
|
||||||
|
GMP
|
||||||
|
GLib
|
||||||
|
Gawk
|
||||||
|
GenInitCPIO
|
||||||
Gettext
|
Gettext
|
||||||
Git
|
Git
|
||||||
Go
|
Go
|
||||||
Gperf
|
Gperf
|
||||||
|
Grep
|
||||||
|
Gzip
|
||||||
Hakurei
|
Hakurei
|
||||||
KernelHeaders
|
HakureiDist
|
||||||
|
IniConfig
|
||||||
|
Kmod
|
||||||
LibXau
|
LibXau
|
||||||
|
Libcap
|
||||||
Libexpat
|
Libexpat
|
||||||
|
Libiconv
|
||||||
|
Libpsl
|
||||||
Libffi
|
Libffi
|
||||||
Libgd
|
Libgd
|
||||||
|
Libtool
|
||||||
Libseccomp
|
Libseccomp
|
||||||
|
Libucontext
|
||||||
Libxml2
|
Libxml2
|
||||||
|
Libxslt
|
||||||
M4
|
M4
|
||||||
|
MPC
|
||||||
|
MPFR
|
||||||
Make
|
Make
|
||||||
Meson
|
Meson
|
||||||
|
Mksh
|
||||||
|
MuslFts
|
||||||
|
MuslObstack
|
||||||
|
NSS
|
||||||
|
NSSCACert
|
||||||
|
Ncurses
|
||||||
Ninja
|
Ninja
|
||||||
|
OpenSSL
|
||||||
|
PCRE2
|
||||||
|
Packaging
|
||||||
Patch
|
Patch
|
||||||
Perl
|
Perl
|
||||||
|
PerlLocaleGettext
|
||||||
|
PerlMIMECharset
|
||||||
|
PerlModuleBuild
|
||||||
|
PerlPodParser
|
||||||
|
PerlSGMLS
|
||||||
|
PerlTermReadKey
|
||||||
|
PerlTextCharWidth
|
||||||
|
PerlTextWrapI18N
|
||||||
|
PerlUnicodeGCString
|
||||||
|
PerlYAMLTiny
|
||||||
PkgConfig
|
PkgConfig
|
||||||
|
Pluggy
|
||||||
|
Procps
|
||||||
|
PyTest
|
||||||
|
Pygments
|
||||||
Python
|
Python
|
||||||
|
QEMU
|
||||||
Rsync
|
Rsync
|
||||||
|
Sed
|
||||||
Setuptools
|
Setuptools
|
||||||
|
SquashfsTools
|
||||||
|
TamaGo
|
||||||
|
Tar
|
||||||
|
Texinfo
|
||||||
|
Toybox
|
||||||
|
toyboxEarly
|
||||||
|
Unzip
|
||||||
|
UtilLinux
|
||||||
Wayland
|
Wayland
|
||||||
WaylandProtocols
|
WaylandProtocols
|
||||||
XCB
|
XCB
|
||||||
XCBProto
|
XCBProto
|
||||||
Xproto
|
Xproto
|
||||||
|
XZ
|
||||||
Zlib
|
Zlib
|
||||||
|
Zstd
|
||||||
|
|
||||||
// _presetEnd is the total number of presets and does not denote a preset.
|
// PresetUnexportedStart is the first unexported preset.
|
||||||
_presetEnd
|
PresetUnexportedStart
|
||||||
|
|
||||||
|
buildcatrust = iota - 1
|
||||||
|
utilMacros
|
||||||
|
|
||||||
|
// Musl is a standalone libc that does not depend on the toolchain.
|
||||||
|
Musl
|
||||||
|
|
||||||
|
// gcc is a hacked-to-pieces GCC toolchain meant for use in intermediate
|
||||||
|
// stages only. This preset and its direct output must never be exposed.
|
||||||
|
gcc
|
||||||
|
|
||||||
|
// Stage0 is a tarball containing all compile-time dependencies of artifacts
|
||||||
|
// part of the [Std] toolchain.
|
||||||
|
Stage0
|
||||||
|
|
||||||
|
// PresetEnd is the total number of presets and does not denote a preset.
|
||||||
|
PresetEnd
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Metadata is stage-agnostic information of a [PArtifact] not directly
|
||||||
|
// representable in the resulting [pkg.Artifact].
|
||||||
|
type Metadata struct {
|
||||||
|
f func(t Toolchain) (a pkg.Artifact, version string)
|
||||||
|
|
||||||
|
// Unique package name.
|
||||||
|
Name string `json:"name"`
|
||||||
|
// Short user-facing description.
|
||||||
|
Description string `json:"description"`
|
||||||
|
// Project home page.
|
||||||
|
Website string `json:"website,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unversioned denotes an unversioned [PArtifact].
|
||||||
|
const Unversioned = "\x00"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// artifactsF is an array of functions for the result of [PArtifact].
|
// artifactsM is an array of [PArtifact] metadata.
|
||||||
artifactsF [_presetEnd]func(t Toolchain) pkg.Artifact
|
artifactsM [PresetEnd]Metadata
|
||||||
|
|
||||||
// artifacts stores the result of artifactsF.
|
// artifacts stores the result of Metadata.f.
|
||||||
artifacts [_toolchainEnd][len(artifactsF)]pkg.Artifact
|
artifacts [_toolchainEnd][len(artifactsM)]pkg.Artifact
|
||||||
|
// versions stores the version of [PArtifact].
|
||||||
|
versions [_toolchainEnd][len(artifactsM)]string
|
||||||
// artifactsOnce is for lazy initialisation of artifacts.
|
// artifactsOnce is for lazy initialisation of artifacts.
|
||||||
artifactsOnce [_toolchainEnd][len(artifactsF)]sync.Once
|
artifactsOnce [_toolchainEnd][len(artifactsM)]sync.Once
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// GetMetadata returns [Metadata] of a [PArtifact].
|
||||||
|
func GetMetadata(p PArtifact) *Metadata { return &artifactsM[p] }
|
||||||
|
|
||||||
// Load returns the resulting [pkg.Artifact] of [PArtifact].
|
// Load returns the resulting [pkg.Artifact] of [PArtifact].
|
||||||
func (t Toolchain) Load(p PArtifact) pkg.Artifact {
|
func (t Toolchain) Load(p PArtifact) pkg.Artifact {
|
||||||
artifactsOnce[t][p].Do(func() {
|
artifactsOnce[t][p].Do(func() {
|
||||||
artifacts[t][p] = artifactsF[p](t)
|
artifacts[t][p], versions[t][p] = artifactsM[p].f(t)
|
||||||
})
|
})
|
||||||
return artifacts[t][p]
|
return artifacts[t][p]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Version returns the version string of [PArtifact].
|
||||||
|
func (t Toolchain) Version(p PArtifact) string {
|
||||||
|
artifactsOnce[t][p].Do(func() {
|
||||||
|
artifacts[t][p], versions[t][p] = artifactsM[p].f(t)
|
||||||
|
})
|
||||||
|
return versions[t][p]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolveName returns a [PArtifact] by name.
|
||||||
|
func ResolveName(name string) (p PArtifact, ok bool) {
|
||||||
|
for i := range PresetUnexportedStart {
|
||||||
|
if name == artifactsM[i].Name {
|
||||||
|
return i, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|||||||
53
internal/rosa/all_test.go
Normal file
53
internal/rosa/all_test.go
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
package rosa_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"hakurei.app/internal/rosa"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLoad(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
for i := range rosa.PresetEnd {
|
||||||
|
p := rosa.PArtifact(i)
|
||||||
|
t.Run(rosa.GetMetadata(p).Name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
rosa.Std.Load(p)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveName(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
for i := range rosa.PresetUnexportedStart {
|
||||||
|
p := i
|
||||||
|
name := rosa.GetMetadata(p).Name
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if got, ok := rosa.ResolveName(name); !ok {
|
||||||
|
t.Fatal("ResolveName: ok = false")
|
||||||
|
} else if got != p {
|
||||||
|
t.Fatalf("ResolveName: %d, want %d", got, p)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestResolveNameUnexported(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
for i := rosa.PresetUnexportedStart; i < rosa.PresetEnd; i++ {
|
||||||
|
p := i
|
||||||
|
name := rosa.GetMetadata(p).Name
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if got, ok := rosa.ResolveName(name); ok {
|
||||||
|
t.Fatalf("ResolveName: resolved unexported preset %d", got)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
36
internal/rosa/argp-standalone.go
Normal file
36
internal/rosa/argp-standalone.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
|
func (t Toolchain) newArgpStandalone() (pkg.Artifact, string) {
|
||||||
|
const (
|
||||||
|
version = "1.3"
|
||||||
|
checksum = "vtW0VyO2pJ-hPyYmDI2zwSLS8QL0sPAUKC1t3zNYbwN2TmsaE-fADhaVtNd3eNFl"
|
||||||
|
)
|
||||||
|
return t.NewPackage("argp-standalone", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "http://www.lysator.liu.se/~nisse/misc/"+
|
||||||
|
"argp-standalone-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &PackageAttr{
|
||||||
|
Env: []string{
|
||||||
|
"CC=cc -std=gnu89 -fPIC",
|
||||||
|
},
|
||||||
|
}, &MakeHelper{
|
||||||
|
Install: `
|
||||||
|
install -D -m644 /usr/src/argp-standalone/argp.h /work/system/include/argp.h
|
||||||
|
install -D -m755 libargp.a /work/system/lib/libargp.a
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
Diffutils,
|
||||||
|
), version
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
artifactsM[ArgpStandalone] = Metadata{
|
||||||
|
f: Toolchain.newArgpStandalone,
|
||||||
|
|
||||||
|
Name: "argp-standalone",
|
||||||
|
Description: "hierarchical argument parsing library broken out from glibc",
|
||||||
|
Website: "http://www.lysator.liu.se/~nisse/misc/",
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -5,8 +5,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"slices"
|
"runtime"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"hakurei.app/container/fhs"
|
"hakurei.app/container/fhs"
|
||||||
@@ -33,6 +32,16 @@ func (a busyboxBin) Dependencies() []pkg.Artifact {
|
|||||||
return []pkg.Artifact{a.bin}
|
return []pkg.Artifact{a.bin}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
pkg.Register(kindBusyboxBin, func(r *pkg.IRReader) pkg.Artifact {
|
||||||
|
a := busyboxBin{r.Next().(pkg.FileArtifact)}
|
||||||
|
if _, ok := r.Finalise(); ok {
|
||||||
|
panic(pkg.ErrUnexpectedChecksum)
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// String returns the reporting name of the underlying file prefixed with expand.
|
// String returns the reporting name of the underlying file prefixed with expand.
|
||||||
func (a busyboxBin) String() string {
|
func (a busyboxBin) String() string {
|
||||||
return "expand-" + a.bin.(fmt.Stringer).String()
|
return "expand-" + a.bin.(fmt.Stringer).String()
|
||||||
@@ -78,10 +87,23 @@ func (a busyboxBin) Cure(t *pkg.TContext) (err error) {
|
|||||||
// newBusyboxBin returns a [pkg.Artifact] containing a busybox installation from
|
// newBusyboxBin returns a [pkg.Artifact] containing a busybox installation from
|
||||||
// the https://busybox.net/downloads/binaries/ binary release.
|
// the https://busybox.net/downloads/binaries/ binary release.
|
||||||
func newBusyboxBin() pkg.Artifact {
|
func newBusyboxBin() pkg.Artifact {
|
||||||
const (
|
var version, url, checksum string
|
||||||
|
switch runtime.GOARCH {
|
||||||
|
case "amd64":
|
||||||
version = "1.35.0"
|
version = "1.35.0"
|
||||||
|
url = "https://busybox.net/downloads/binaries/" +
|
||||||
|
version + "-" + linuxArch() + "-linux-musl/busybox"
|
||||||
checksum = "L7OBIsPu9enNHn7FqpBT1kOg_mCLNmetSeNMA3i4Y60Z5jTgnlX3qX3zcQtLx5AB"
|
checksum = "L7OBIsPu9enNHn7FqpBT1kOg_mCLNmetSeNMA3i4Y60Z5jTgnlX3qX3zcQtLx5AB"
|
||||||
)
|
case "arm64":
|
||||||
|
version = "1.31.0"
|
||||||
|
url = "https://busybox.net/downloads/binaries/" +
|
||||||
|
version + "-defconfig-multiarch-musl/busybox-armv8l"
|
||||||
|
checksum = "npJjBO7iwhjW6Kx2aXeSxf8kXhVgTCDChOZTTsI8ZfFfa3tbsklxRiidZQdrVERg"
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic("unsupported target " + runtime.GOARCH)
|
||||||
|
}
|
||||||
|
|
||||||
return pkg.NewExec(
|
return pkg.NewExec(
|
||||||
"busybox-bin-"+version, nil, pkg.ExecTimeoutMax, false,
|
"busybox-bin-"+version, nil, pkg.ExecTimeoutMax, false,
|
||||||
fhs.AbsRoot, []string{
|
fhs.AbsRoot, []string{
|
||||||
@@ -96,264 +118,8 @@ func newBusyboxBin() pkg.Artifact {
|
|||||||
&http.Client{Transport: &http.Transport{
|
&http.Client{Transport: &http.Transport{
|
||||||
// busybox website is really slow to respond
|
// busybox website is really slow to respond
|
||||||
TLSHandshakeTimeout: 2 * time.Minute,
|
TLSHandshakeTimeout: 2 * time.Minute,
|
||||||
}},
|
}}, url,
|
||||||
"https://busybox.net/downloads/binaries/"+
|
|
||||||
version+"-"+linuxArch()+"-linux-musl/busybox",
|
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
)}),
|
)}),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t Toolchain) newBusybox() pkg.Artifact {
|
|
||||||
const (
|
|
||||||
version = "1.37.0"
|
|
||||||
checksum = "Ial94Tnt7esJ_YEeb0AxunVL6MGYFyOw7Rtu2o87CXCi1TLrc6rlznVsN1rZk7it"
|
|
||||||
)
|
|
||||||
|
|
||||||
var env []string
|
|
||||||
if t == toolchainStage3 {
|
|
||||||
env = append(env, "EXTRA_LDFLAGS=-static")
|
|
||||||
}
|
|
||||||
|
|
||||||
return t.New("busybox-"+version, false, stage3Concat(t, []pkg.Artifact{},
|
|
||||||
t.Load(Make),
|
|
||||||
t.Load(KernelHeaders),
|
|
||||||
), nil, slices.Concat([]string{
|
|
||||||
"ROSA_BUSYBOX_ENABLE=" + strings.Join([]string{
|
|
||||||
"STATIC",
|
|
||||||
"PIE",
|
|
||||||
}, " "),
|
|
||||||
"ROSA_BUSYBOX_DISABLE=" + strings.Join([]string{
|
|
||||||
"FEATURE_IPV6",
|
|
||||||
"FEATURE_PREFER_IPV4_ADDRESS",
|
|
||||||
"FEATURE_HWIB",
|
|
||||||
"ARP",
|
|
||||||
"ARPING",
|
|
||||||
"BRCTL",
|
|
||||||
"FEATURE_BRCTL_FANCY",
|
|
||||||
"FEATURE_BRCTL_SHOW",
|
|
||||||
"DNSD",
|
|
||||||
"ETHER_WAKE",
|
|
||||||
"FTPD",
|
|
||||||
"FEATURE_FTPD_WRITE",
|
|
||||||
"FEATURE_FTPD_ACCEPT_BROKEN_LIST",
|
|
||||||
"FEATURE_FTPD_AUTHENTICATION",
|
|
||||||
"FTPGET",
|
|
||||||
"FTPPUT",
|
|
||||||
"FEATURE_FTPGETPUT_LONG_OPTIONS",
|
|
||||||
"HOSTNAME",
|
|
||||||
"DNSDOMAINNAME",
|
|
||||||
"HTTPD",
|
|
||||||
"FEATURE_HTTPD_PORT_DEFAULT",
|
|
||||||
"FEATURE_HTTPD_RANGES",
|
|
||||||
"FEATURE_HTTPD_SETUID",
|
|
||||||
"FEATURE_HTTPD_BASIC_AUTH",
|
|
||||||
"FEATURE_HTTPD_AUTH_MD5",
|
|
||||||
"FEATURE_HTTPD_CGI",
|
|
||||||
"FEATURE_HTTPD_CONFIG_WITH_SCRIPT_INTERPR",
|
|
||||||
"FEATURE_HTTPD_SET_REMOTE_PORT_TO_ENV",
|
|
||||||
"FEATURE_HTTPD_ENCODE_URL_STR",
|
|
||||||
"FEATURE_HTTPD_ERROR_PAGES",
|
|
||||||
"FEATURE_HTTPD_PROXY",
|
|
||||||
"FEATURE_HTTPD_GZIP",
|
|
||||||
"FEATURE_HTTPD_ETAG",
|
|
||||||
"FEATURE_HTTPD_LAST_MODIFIED",
|
|
||||||
"FEATURE_HTTPD_DATE",
|
|
||||||
"FEATURE_HTTPD_ACL_IP",
|
|
||||||
"IFCONFIG",
|
|
||||||
"FEATURE_IFCONFIG_STATUS",
|
|
||||||
"FEATURE_IFCONFIG_SLIP",
|
|
||||||
"FEATURE_IFCONFIG_MEMSTART_IOADDR_IRQ",
|
|
||||||
"FEATURE_IFCONFIG_HW",
|
|
||||||
"FEATURE_IFCONFIG_BROADCAST_PLUS",
|
|
||||||
"IFENSLAVE",
|
|
||||||
"IFPLUGD",
|
|
||||||
"IFUP",
|
|
||||||
"IFDOWN",
|
|
||||||
"IFUPDOWN_IFSTATE_PATH",
|
|
||||||
"FEATURE_IFUPDOWN_IP",
|
|
||||||
"FEATURE_IFUPDOWN_IPV4",
|
|
||||||
"FEATURE_IFUPDOWN_IPV6",
|
|
||||||
"FEATURE_IFUPDOWN_MAPPING",
|
|
||||||
"INETD",
|
|
||||||
"FEATURE_INETD_SUPPORT_BUILTIN_ECHO",
|
|
||||||
"FEATURE_INETD_SUPPORT_BUILTIN_DISCARD",
|
|
||||||
"FEATURE_INETD_SUPPORT_BUILTIN_TIME",
|
|
||||||
"FEATURE_INETD_SUPPORT_BUILTIN_DAYTIME",
|
|
||||||
"FEATURE_INETD_SUPPORT_BUILTIN_CHARGEN",
|
|
||||||
"IP",
|
|
||||||
"IPADDR",
|
|
||||||
"IPLINK",
|
|
||||||
"IPROUTE",
|
|
||||||
"IPTUNNEL",
|
|
||||||
"IPRULE",
|
|
||||||
"IPNEIGH",
|
|
||||||
"FEATURE_IP_ADDRESS",
|
|
||||||
"FEATURE_IP_LINK",
|
|
||||||
"FEATURE_IP_LINK_CAN",
|
|
||||||
"FEATURE_IP_ROUTE",
|
|
||||||
"FEATURE_IP_ROUTE_DIR",
|
|
||||||
"FEATURE_IP_TUNNEL",
|
|
||||||
"FEATURE_IP_RULE",
|
|
||||||
"FEATURE_IP_NEIGH",
|
|
||||||
"IPCALC",
|
|
||||||
"FEATURE_IPCALC_LONG_OPTIONS",
|
|
||||||
"FEATURE_IPCALC_FANCY",
|
|
||||||
"FAKEIDENTD",
|
|
||||||
"NAMEIF",
|
|
||||||
"FEATURE_NAMEIF_EXTENDED",
|
|
||||||
"NBDCLIENT",
|
|
||||||
"NC",
|
|
||||||
"NC_SERVER",
|
|
||||||
"NC_EXTRA",
|
|
||||||
"NC_110_COMPAT",
|
|
||||||
"NETSTAT",
|
|
||||||
"FEATURE_NETSTAT_WIDE",
|
|
||||||
"FEATURE_NETSTAT_PRG",
|
|
||||||
"NSLOOKUP",
|
|
||||||
"FEATURE_NSLOOKUP_BIG",
|
|
||||||
"FEATURE_NSLOOKUP_LONG_OPTIONS",
|
|
||||||
"NTPD",
|
|
||||||
"FEATURE_NTPD_SERVER",
|
|
||||||
"FEATURE_NTPD_CONF",
|
|
||||||
"FEATURE_NTP_AUTH",
|
|
||||||
"PING",
|
|
||||||
"PING6",
|
|
||||||
"FEATURE_FANCY_PING",
|
|
||||||
"PSCAN",
|
|
||||||
"ROUTE",
|
|
||||||
"SLATTACH",
|
|
||||||
"SSL_CLIENT",
|
|
||||||
"TC",
|
|
||||||
"FEATURE_TC_INGRESS",
|
|
||||||
"TCPSVD",
|
|
||||||
"UDPSVD",
|
|
||||||
"TELNET",
|
|
||||||
"FEATURE_TELNET_TTYPE",
|
|
||||||
"FEATURE_TELNET_AUTOLOGIN",
|
|
||||||
"FEATURE_TELNET_WIDTH",
|
|
||||||
"TELNETD",
|
|
||||||
"FEATURE_TELNETD_STANDALONE",
|
|
||||||
"FEATURE_TELNETD_PORT_DEFAULT",
|
|
||||||
"FEATURE_TELNETD_INETD_WAIT",
|
|
||||||
"TFTP",
|
|
||||||
"FEATURE_TFTP_PROGRESS_BAR",
|
|
||||||
"FEATURE_TFTP_HPA_COMPAT",
|
|
||||||
"TFTPD",
|
|
||||||
"FEATURE_TFTP_GET",
|
|
||||||
"FEATURE_TFTP_PUT",
|
|
||||||
"FEATURE_TFTP_BLOCKSIZE",
|
|
||||||
"TLS",
|
|
||||||
"TRACEROUTE",
|
|
||||||
"TRACEROUTE6",
|
|
||||||
"FEATURE_TRACEROUTE_VERBOSE",
|
|
||||||
"FEATURE_TRACEROUTE_USE_ICMP",
|
|
||||||
"TUNCTL",
|
|
||||||
"FEATURE_TUNCTL_UG",
|
|
||||||
"VCONFIG",
|
|
||||||
"WGET",
|
|
||||||
"FEATURE_WGET_LONG_OPTIONS",
|
|
||||||
"FEATURE_WGET_STATUSBAR",
|
|
||||||
"FEATURE_WGET_FTP",
|
|
||||||
"FEATURE_WGET_AUTHENTICATION",
|
|
||||||
"FEATURE_WGET_TIMEOUT",
|
|
||||||
"FEATURE_WGET_HTTPS",
|
|
||||||
"FEATURE_WGET_OPENSSL",
|
|
||||||
"WHOIS",
|
|
||||||
"ZCIP",
|
|
||||||
"UDHCPD",
|
|
||||||
"FEATURE_UDHCPD_BOOTP",
|
|
||||||
"FEATURE_UDHCPD_WRITE_LEASES_EARLY",
|
|
||||||
"DHCPD_LEASES_FILE",
|
|
||||||
"DUMPLEASES",
|
|
||||||
"DHCPRELAY",
|
|
||||||
"UDHCPC",
|
|
||||||
"FEATURE_UDHCPC_ARPING",
|
|
||||||
"FEATURE_UDHCPC_SANITIZEOPT",
|
|
||||||
"UDHCPC_DEFAULT_SCRIPT",
|
|
||||||
"UDHCPC6_DEFAULT_SCRIPT",
|
|
||||||
"UDHCPC6",
|
|
||||||
"FEATURE_UDHCPC6_RFC3646",
|
|
||||||
"FEATURE_UDHCPC6_RFC4704",
|
|
||||||
"FEATURE_UDHCPC6_RFC4833",
|
|
||||||
"FEATURE_UDHCPC6_RFC5970",
|
|
||||||
}, " "),
|
|
||||||
}, env), `
|
|
||||||
config_enable() {
|
|
||||||
for ent in "$@"; do
|
|
||||||
sed "s/^# CONFIG_${ent}.*/CONFIG_${ent}=y/" -i .config
|
|
||||||
shift
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
config_disable() {
|
|
||||||
for ent in "$@"; do
|
|
||||||
sed "s/^CONFIG_${ent}=y/# CONFIG_${ent} is not set/" -i .config
|
|
||||||
shift
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
cat > /bin/gcc << EOF
|
|
||||||
exec clang \
|
|
||||||
-Wno-ignored-optimization-argument \
|
|
||||||
${LDFLAGS} \
|
|
||||||
\$@
|
|
||||||
EOF
|
|
||||||
chmod +x /bin/gcc
|
|
||||||
|
|
||||||
cd /usr/src/busybox
|
|
||||||
chmod +w editors editors/awk.c
|
|
||||||
patch -p 1 < /usr/src/patches/awk-fix-literal-backslash.patch
|
|
||||||
|
|
||||||
cd "$(mktemp -d)"
|
|
||||||
make \
|
|
||||||
KBUILD_SRC=/usr/src/busybox \
|
|
||||||
-f /usr/src/busybox/Makefile \
|
|
||||||
defconfig
|
|
||||||
|
|
||||||
config_enable $ROSA_BUSYBOX_ENABLE
|
|
||||||
config_disable $ROSA_BUSYBOX_DISABLE
|
|
||||||
ln -s ../system/bin/pwd /bin/pwd || true
|
|
||||||
make CFLAGS_busybox="${LDFLAGS} ${EXTRA_LDFLAGS}" "-j$(nproc)"
|
|
||||||
|
|
||||||
mkdir -p /system/bin/ /work/bin/
|
|
||||||
cp busybox /system/bin/
|
|
||||||
|
|
||||||
mkdir -pv /work/system/bin/
|
|
||||||
busybox --install -s /work/system/bin/
|
|
||||||
cp -v busybox /work/system/bin/
|
|
||||||
ln -vs ../system/bin/hush /work/bin/sh
|
|
||||||
mkdir -vp /work/usr/bin/
|
|
||||||
ln -vs ../../system/bin/busybox /work/usr/bin/env
|
|
||||||
`, pkg.Path(AbsUsrSrc.Append("busybox"), true, pkg.NewHTTPGetTar(
|
|
||||||
&http.Client{Transport: &http.Transport{
|
|
||||||
// busybox website is really slow to respond
|
|
||||||
TLSHandshakeTimeout: 2 * time.Minute,
|
|
||||||
}},
|
|
||||||
"https://busybox.net/downloads/busybox-"+version+".tar.bz2",
|
|
||||||
mustDecode(checksum),
|
|
||||||
pkg.TarBzip2,
|
|
||||||
)), pkg.Path(
|
|
||||||
AbsUsrSrc.Append("patches", "awk-fix-literal-backslash.patch"), false,
|
|
||||||
pkg.NewFile("awk-fix-literal-backslash.patch", []byte(`diff --git a/editors/awk.c b/editors/awk.c
|
|
||||||
index 64e752f4b..40f5ba7f7 100644
|
|
||||||
--- a/editors/awk.c
|
|
||||||
+++ b/editors/awk.c
|
|
||||||
@@ -2636,8 +2636,13 @@ static int awk_sub(node *rn, const char *repl, int nm, var *src, var *dest /*,in
|
|
||||||
resbuf = qrealloc(resbuf, residx + replen + n, &resbufsize);
|
|
||||||
memcpy(resbuf + residx, sp + pmatch[j].rm_so - start_ofs, n);
|
|
||||||
residx += n;
|
|
||||||
- } else
|
|
||||||
+ } else {
|
|
||||||
+/* '\\' and '&' following a backslash keep its original meaning, any other
|
|
||||||
+ * occurrence of a '\\' should be treated as literal */
|
|
||||||
+ if (bslash && c != '\\' && c != '&')
|
|
||||||
+ resbuf[residx++] = '\\';
|
|
||||||
resbuf[residx++] = c;
|
|
||||||
+ }
|
|
||||||
bslash = 0;
|
|
||||||
}
|
|
||||||
}`)),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
func init() { artifactsF[Busybox] = Toolchain.newBusybox }
|
|
||||||
|
|||||||
36
internal/rosa/bzip2.go
Normal file
36
internal/rosa/bzip2.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
|
func (t Toolchain) newBzip2() (pkg.Artifact, string) {
|
||||||
|
const (
|
||||||
|
version = "1.0.8"
|
||||||
|
checksum = "cTLykcco7boom-s05H1JVsQi1AtChYL84nXkg_92Dm1Xt94Ob_qlMg_-NSguIK-c"
|
||||||
|
)
|
||||||
|
return t.NewPackage("bzip2", version, pkg.NewHTTPGetTar(
|
||||||
|
nil, "https://sourceware.org/pub/bzip2/bzip2-"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), &PackageAttr{
|
||||||
|
Writable: true,
|
||||||
|
EnterSource: true,
|
||||||
|
}, &MakeHelper{
|
||||||
|
// uses source tree as scratch space
|
||||||
|
SkipConfigure: true,
|
||||||
|
SkipCheck: true,
|
||||||
|
InPlace: true,
|
||||||
|
Make: []string{
|
||||||
|
"CC=cc",
|
||||||
|
},
|
||||||
|
Install: "make PREFIX=/work/system install",
|
||||||
|
}), version
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
artifactsM[Bzip2] = Metadata{
|
||||||
|
f: Toolchain.newBzip2,
|
||||||
|
|
||||||
|
Name: "bzip2",
|
||||||
|
Description: "a freely available, patent free, high-quality data compressor",
|
||||||
|
Website: "https://sourceware.org/bzip2/",
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user