forked from security/hakurei
Compare commits
3 Commits
pkgserver
...
wip-irdump
| Author | SHA1 | Date | |
|---|---|---|---|
|
d89715c20b
|
|||
|
c7ba7f2a31
|
|||
|
5db302110f
|
@@ -89,6 +89,23 @@ jobs:
|
|||||||
path: result/*
|
path: result/*
|
||||||
retention-days: 1
|
retention-days: 1
|
||||||
|
|
||||||
|
hpkg:
|
||||||
|
name: Hpkg
|
||||||
|
runs-on: nix
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Run NixOS test
|
||||||
|
run: nix build --out-link "result" --print-out-paths --print-build-logs .#checks.x86_64-linux.hpkg
|
||||||
|
|
||||||
|
- name: Upload test output
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: "hpkg-vm-output"
|
||||||
|
path: result/*
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
check:
|
check:
|
||||||
name: Flake checks
|
name: Flake checks
|
||||||
needs:
|
needs:
|
||||||
@@ -97,6 +114,7 @@ jobs:
|
|||||||
- sandbox
|
- sandbox
|
||||||
- sandbox-race
|
- sandbox-race
|
||||||
- sharefs
|
- sharefs
|
||||||
|
- hpkg
|
||||||
runs-on: nix
|
runs-on: nix
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
|
|||||||
5
.github/workflows/README
vendored
Normal file
5
.github/workflows/README
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
DO NOT ADD NEW ACTIONS HERE
|
||||||
|
|
||||||
|
This port is solely for releasing to the github mirror and serves no purpose during development.
|
||||||
|
All development happens at https://git.gensokyo.uk/security/hakurei. If you wish to contribute,
|
||||||
|
request for an account on git.gensokyo.uk.
|
||||||
46
.github/workflows/release.yml
vendored
Normal file
46
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
name: Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- 'v*'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release:
|
||||||
|
name: Create release
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
packages: write
|
||||||
|
contents: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Install Nix
|
||||||
|
uses: nixbuild/nix-quick-install-action@v32
|
||||||
|
with:
|
||||||
|
nix_conf: |
|
||||||
|
keep-env-derivations = true
|
||||||
|
keep-outputs = true
|
||||||
|
|
||||||
|
- name: Restore and cache Nix store
|
||||||
|
uses: nix-community/cache-nix-action@v6
|
||||||
|
with:
|
||||||
|
primary-key: build-${{ runner.os }}-${{ hashFiles('**/*.nix') }}
|
||||||
|
restore-prefixes-first-match: build-${{ runner.os }}-
|
||||||
|
gc-max-store-size-linux: 1G
|
||||||
|
purge: true
|
||||||
|
purge-prefixes: build-${{ runner.os }}-
|
||||||
|
purge-created: 60
|
||||||
|
purge-primary-key: never
|
||||||
|
|
||||||
|
- name: Build for release
|
||||||
|
run: nix build --print-out-paths --print-build-logs .#dist
|
||||||
|
|
||||||
|
- name: Release
|
||||||
|
uses: softprops/action-gh-release@v2
|
||||||
|
with:
|
||||||
|
files: |-
|
||||||
|
result/hakurei-**
|
||||||
48
.github/workflows/test.yml
vendored
Normal file
48
.github/workflows/test.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
name: Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
- push
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
dist:
|
||||||
|
name: Create distribution
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
actions: write
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Install Nix
|
||||||
|
uses: nixbuild/nix-quick-install-action@v32
|
||||||
|
with:
|
||||||
|
nix_conf: |
|
||||||
|
keep-env-derivations = true
|
||||||
|
keep-outputs = true
|
||||||
|
|
||||||
|
- name: Restore and cache Nix store
|
||||||
|
uses: nix-community/cache-nix-action@v6
|
||||||
|
with:
|
||||||
|
primary-key: build-${{ runner.os }}-${{ hashFiles('**/*.nix') }}
|
||||||
|
restore-prefixes-first-match: build-${{ runner.os }}-
|
||||||
|
gc-max-store-size-linux: 1G
|
||||||
|
purge: true
|
||||||
|
purge-prefixes: build-${{ runner.os }}-
|
||||||
|
purge-created: 60
|
||||||
|
purge-primary-key: never
|
||||||
|
|
||||||
|
- name: Build for test
|
||||||
|
id: build-test
|
||||||
|
run: >-
|
||||||
|
export HAKUREI_REV="$(git rev-parse --short HEAD)" &&
|
||||||
|
sed -i.old 's/version = /version = "0.0.0-'$HAKUREI_REV'"; # version = /' package.nix &&
|
||||||
|
nix build --print-out-paths --print-build-logs .#dist &&
|
||||||
|
mv package.nix.old package.nix &&
|
||||||
|
echo "rev=$HAKUREI_REV" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Upload test build
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: "hakurei-${{ steps.build-test.outputs.rev }}"
|
||||||
|
path: result/*
|
||||||
|
retention-days: 1
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -28,7 +28,6 @@ go.work.sum
|
|||||||
# go generate
|
# go generate
|
||||||
/cmd/hakurei/LICENSE
|
/cmd/hakurei/LICENSE
|
||||||
/internal/pkg/testdata/testtool
|
/internal/pkg/testdata/testtool
|
||||||
/internal/rosa/hakurei_current.tar.gz
|
|
||||||
|
|
||||||
# release
|
# release
|
||||||
/dist/hakurei-*
|
/dist/hakurei-*
|
||||||
|
|||||||
181
README.md
181
README.md
@@ -15,51 +15,164 @@
|
|||||||
<a href="https://hakurei.app"><img src="https://img.shields.io/website?url=https%3A%2F%2Fhakurei.app" alt="Website" /></a>
|
<a href="https://hakurei.app"><img src="https://img.shields.io/website?url=https%3A%2F%2Fhakurei.app" alt="Website" /></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
Hakurei is a tool for running sandboxed desktop applications as dedicated
|
Hakurei is a tool for running sandboxed graphical applications as dedicated subordinate users on the Linux kernel.
|
||||||
subordinate users on the Linux kernel. It implements the application container
|
It implements the application container of [planterette (WIP)](https://git.gensokyo.uk/security/planterette),
|
||||||
of [planterette (WIP)](https://git.gensokyo.uk/security/planterette), a
|
a self-contained Android-like package manager with modern security features.
|
||||||
self-contained Android-like package manager with modern security features.
|
|
||||||
|
|
||||||
Interaction with hakurei happens entirely through structures described by
|
## NixOS Module usage
|
||||||
package [hst](https://pkg.go.dev/hakurei.app/hst). No native API is available
|
|
||||||
due to internal details of uid isolation.
|
|
||||||
|
|
||||||
## Notable Packages
|
The NixOS module currently requires home-manager to configure subordinate users. Full module documentation can be found [here](options.md).
|
||||||
|
|
||||||
Package [container](https://pkg.go.dev/hakurei.app/container) is general purpose
|
To use the module, import it into your configuration with
|
||||||
container tooling. It is used by the hakurei shim process running as the target
|
|
||||||
subordinate user to set up the application container. It has a single dependency,
|
|
||||||
[libseccomp](https://github.com/seccomp/libseccomp), to create BPF programs
|
|
||||||
for the [system call filter](https://www.kernel.org/doc/html/latest/userspace-api/seccomp_filter.html).
|
|
||||||
|
|
||||||
Package [internal/pkg](https://pkg.go.dev/hakurei.app/internal/pkg) provides
|
```nix
|
||||||
infrastructure for hermetic builds. This replaces the legacy nix-based testing
|
{
|
||||||
framework and serves as the build system of Rosa OS, currently developed under
|
inputs = {
|
||||||
package [internal/rosa](https://pkg.go.dev/hakurei.app/internal/rosa).
|
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
||||||
|
|
||||||
## Dependencies
|
hakurei = {
|
||||||
|
url = "git+https://git.gensokyo.uk/security/hakurei";
|
||||||
|
|
||||||
`container` depends on:
|
# Optional but recommended to limit the size of your system closure.
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
- [libseccomp](https://github.com/seccomp/libseccomp) to generate BPF programs.
|
outputs = { self, nixpkgs, hakurei, ... }:
|
||||||
|
{
|
||||||
|
nixosConfigurations.hakurei = nixpkgs.lib.nixosSystem {
|
||||||
|
system = "x86_64-linux";
|
||||||
|
modules = [
|
||||||
|
hakurei.nixosModules.hakurei
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
`cmd/hakurei` depends on:
|
This adds the `environment.hakurei` option:
|
||||||
|
|
||||||
- [acl](https://savannah.nongnu.org/projects/acl/) to export sockets to
|
```nix
|
||||||
subordinate users.
|
{ pkgs, ... }:
|
||||||
- [wayland](https://gitlab.freedesktop.org/wayland/wayland) to set up
|
|
||||||
[security-context-v1](https://wayland.app/protocols/security-context-v1).
|
|
||||||
- [xcb](https://xcb.freedesktop.org/) to grant and revoke subordinate users
|
|
||||||
access to the X server.
|
|
||||||
|
|
||||||
`cmd/sharefs` depends on:
|
{
|
||||||
|
environment.hakurei = {
|
||||||
|
enable = true;
|
||||||
|
stateDir = "/var/lib/hakurei";
|
||||||
|
users = {
|
||||||
|
alice = 0;
|
||||||
|
nixos = 10;
|
||||||
|
};
|
||||||
|
|
||||||
- [fuse](https://github.com/libfuse/libfuse) to implement the filesystem.
|
commonPaths = [
|
||||||
|
{
|
||||||
|
src = "/sdcard";
|
||||||
|
write = true;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
New dependencies will generally not be added. Patches adding new dependencies
|
extraHomeConfig = {
|
||||||
are very likely to be rejected.
|
home.stateVersion = "23.05";
|
||||||
|
};
|
||||||
|
|
||||||
## NixOS Module (deprecated)
|
apps = {
|
||||||
|
"org.chromium.Chromium" = {
|
||||||
|
name = "chromium";
|
||||||
|
identity = 1;
|
||||||
|
packages = [ pkgs.chromium ];
|
||||||
|
userns = true;
|
||||||
|
mapRealUid = true;
|
||||||
|
dbus = {
|
||||||
|
system = {
|
||||||
|
filter = true;
|
||||||
|
talk = [
|
||||||
|
"org.bluez"
|
||||||
|
"org.freedesktop.Avahi"
|
||||||
|
"org.freedesktop.UPower"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
session =
|
||||||
|
f:
|
||||||
|
f {
|
||||||
|
talk = [
|
||||||
|
"org.freedesktop.FileManager1"
|
||||||
|
"org.freedesktop.Notifications"
|
||||||
|
"org.freedesktop.ScreenSaver"
|
||||||
|
"org.freedesktop.secrets"
|
||||||
|
"org.kde.kwalletd5"
|
||||||
|
"org.kde.kwalletd6"
|
||||||
|
];
|
||||||
|
own = [
|
||||||
|
"org.chromium.Chromium.*"
|
||||||
|
"org.mpris.MediaPlayer2.org.chromium.Chromium.*"
|
||||||
|
"org.mpris.MediaPlayer2.chromium.*"
|
||||||
|
];
|
||||||
|
call = { };
|
||||||
|
broadcast = { };
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
The NixOS module is in maintenance mode and will be removed once planterette is
|
"org.claws_mail.Claws-Mail" = {
|
||||||
feature-complete. Full module documentation can be found [here](options.md).
|
name = "claws-mail";
|
||||||
|
identity = 2;
|
||||||
|
packages = [ pkgs.claws-mail ];
|
||||||
|
gpu = false;
|
||||||
|
capability.pulse = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
"org.weechat" = {
|
||||||
|
name = "weechat";
|
||||||
|
identity = 3;
|
||||||
|
shareUid = true;
|
||||||
|
packages = [ pkgs.weechat ];
|
||||||
|
capability = {
|
||||||
|
wayland = false;
|
||||||
|
x11 = false;
|
||||||
|
dbus = true;
|
||||||
|
pulse = false;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
"dev.vencord.Vesktop" = {
|
||||||
|
name = "discord";
|
||||||
|
identity = 3;
|
||||||
|
shareUid = true;
|
||||||
|
packages = [ pkgs.vesktop ];
|
||||||
|
share = pkgs.vesktop;
|
||||||
|
command = "vesktop --ozone-platform-hint=wayland";
|
||||||
|
userns = true;
|
||||||
|
mapRealUid = true;
|
||||||
|
capability.x11 = true;
|
||||||
|
dbus = {
|
||||||
|
session =
|
||||||
|
f:
|
||||||
|
f {
|
||||||
|
talk = [ "org.kde.StatusNotifierWatcher" ];
|
||||||
|
own = [ ];
|
||||||
|
call = { };
|
||||||
|
broadcast = { };
|
||||||
|
};
|
||||||
|
system.filter = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
"io.looking-glass" = {
|
||||||
|
name = "looking-glass-client";
|
||||||
|
identity = 4;
|
||||||
|
useCommonPaths = false;
|
||||||
|
groups = [ "plugdev" ];
|
||||||
|
extraPaths = [
|
||||||
|
{
|
||||||
|
src = "/dev/shm/looking-glass";
|
||||||
|
write = true;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
extraConfig = {
|
||||||
|
programs.looking-glass-client.enable = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|||||||
@@ -1,58 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
. "syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
runtime.LockOSThread()
|
|
||||||
log.SetFlags(0)
|
|
||||||
log.SetPrefix("earlyinit: ")
|
|
||||||
|
|
||||||
if err := Mount(
|
|
||||||
"devtmpfs",
|
|
||||||
"/dev/",
|
|
||||||
"devtmpfs",
|
|
||||||
MS_NOSUID|MS_NOEXEC,
|
|
||||||
"",
|
|
||||||
); err != nil {
|
|
||||||
log.Fatalf("cannot mount devtmpfs: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The kernel might be unable to set up the console. When that happens,
|
|
||||||
// printk is called with "Warning: unable to open an initial console."
|
|
||||||
// and the init runs with no files. The checkfds runtime function
|
|
||||||
// populates 0-2 by opening /dev/null for them.
|
|
||||||
//
|
|
||||||
// This check replaces 1 and 2 with /dev/kmsg to improve the chance
|
|
||||||
// of output being visible to the user.
|
|
||||||
if fi, err := os.Stdout.Stat(); err == nil {
|
|
||||||
if stat, ok := fi.Sys().(*Stat_t); ok {
|
|
||||||
if stat.Rdev == 0x103 {
|
|
||||||
var fd int
|
|
||||||
if fd, err = Open(
|
|
||||||
"/dev/kmsg",
|
|
||||||
O_WRONLY|O_CLOEXEC,
|
|
||||||
0,
|
|
||||||
); err != nil {
|
|
||||||
log.Fatalf("cannot open kmsg: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = Dup3(fd, Stdout, 0); err != nil {
|
|
||||||
log.Fatalf("cannot open stdout: %v", err)
|
|
||||||
}
|
|
||||||
if err = Dup3(fd, Stderr, 0); err != nil {
|
|
||||||
log.Fatalf("cannot open stderr: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = Close(fd); err != nil {
|
|
||||||
log.Printf("cannot close kmsg: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
7
cmd/hpkg/README
Normal file
7
cmd/hpkg/README
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
This program is a proof of concept and is now deprecated. It is only kept
|
||||||
|
around for API demonstration purposes and to make the most out of the test
|
||||||
|
suite.
|
||||||
|
|
||||||
|
This program is replaced by planterette, which can be found at
|
||||||
|
https://git.gensokyo.uk/security/planterette. Development effort should be
|
||||||
|
focused there instead.
|
||||||
173
cmd/hpkg/app.go
Normal file
173
cmd/hpkg/app.go
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
|
"hakurei.app/hst"
|
||||||
|
)
|
||||||
|
|
||||||
|
type appInfo struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
|
||||||
|
// passed through to [hst.Config]
|
||||||
|
ID string `json:"id"`
|
||||||
|
// passed through to [hst.Config]
|
||||||
|
Identity int `json:"identity"`
|
||||||
|
// passed through to [hst.Config]
|
||||||
|
Groups []string `json:"groups,omitempty"`
|
||||||
|
// passed through to [hst.Config]
|
||||||
|
Devel bool `json:"devel,omitempty"`
|
||||||
|
// passed through to [hst.Config]
|
||||||
|
Userns bool `json:"userns,omitempty"`
|
||||||
|
// passed through to [hst.Config]
|
||||||
|
HostNet bool `json:"net,omitempty"`
|
||||||
|
// passed through to [hst.Config]
|
||||||
|
HostAbstract bool `json:"abstract,omitempty"`
|
||||||
|
// passed through to [hst.Config]
|
||||||
|
Device bool `json:"dev,omitempty"`
|
||||||
|
// passed through to [hst.Config]
|
||||||
|
Tty bool `json:"tty,omitempty"`
|
||||||
|
// passed through to [hst.Config]
|
||||||
|
MapRealUID bool `json:"map_real_uid,omitempty"`
|
||||||
|
// passed through to [hst.Config]
|
||||||
|
DirectWayland bool `json:"direct_wayland,omitempty"`
|
||||||
|
// passed through to [hst.Config]
|
||||||
|
SystemBus *hst.BusConfig `json:"system_bus,omitempty"`
|
||||||
|
// passed through to [hst.Config]
|
||||||
|
SessionBus *hst.BusConfig `json:"session_bus,omitempty"`
|
||||||
|
// passed through to [hst.Config]
|
||||||
|
Enablements *hst.Enablements `json:"enablements,omitempty"`
|
||||||
|
|
||||||
|
// passed through to [hst.Config]
|
||||||
|
Multiarch bool `json:"multiarch,omitempty"`
|
||||||
|
// passed through to [hst.Config]
|
||||||
|
Bluetooth bool `json:"bluetooth,omitempty"`
|
||||||
|
|
||||||
|
// allow gpu access within sandbox
|
||||||
|
GPU bool `json:"gpu"`
|
||||||
|
// store path to nixGL mesa wrappers
|
||||||
|
Mesa string `json:"mesa,omitempty"`
|
||||||
|
// store path to nixGL source
|
||||||
|
NixGL string `json:"nix_gl,omitempty"`
|
||||||
|
// store path to activate-and-exec script
|
||||||
|
Launcher *check.Absolute `json:"launcher"`
|
||||||
|
// store path to /run/current-system
|
||||||
|
CurrentSystem *check.Absolute `json:"current_system"`
|
||||||
|
// store path to home-manager activation package
|
||||||
|
ActivationPackage string `json:"activation_package"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *appInfo) toHst(pathSet *appPathSet, pathname *check.Absolute, argv []string, flagDropShell bool) *hst.Config {
|
||||||
|
config := &hst.Config{
|
||||||
|
ID: app.ID,
|
||||||
|
|
||||||
|
Enablements: app.Enablements,
|
||||||
|
|
||||||
|
SystemBus: app.SystemBus,
|
||||||
|
SessionBus: app.SessionBus,
|
||||||
|
DirectWayland: app.DirectWayland,
|
||||||
|
|
||||||
|
Identity: app.Identity,
|
||||||
|
Groups: app.Groups,
|
||||||
|
|
||||||
|
Container: &hst.ContainerConfig{
|
||||||
|
Hostname: formatHostname(app.Name),
|
||||||
|
Filesystem: []hst.FilesystemConfigJSON{
|
||||||
|
{FilesystemConfig: &hst.FSBind{Target: fhs.AbsEtc, Source: pathSet.cacheDir.Append("etc"), Special: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: pathSet.nixPath.Append("store"), Target: pathNixStore}},
|
||||||
|
{FilesystemConfig: &hst.FSLink{Target: pathCurrentSystem, Linkname: app.CurrentSystem.String()}},
|
||||||
|
{FilesystemConfig: &hst.FSLink{Target: pathBin, Linkname: pathSwBin.String()}},
|
||||||
|
{FilesystemConfig: &hst.FSLink{Target: fhs.AbsUsrBin, Linkname: pathSwBin.String()}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: pathSet.metaPath, Target: hst.AbsPrivateTmp.Append("app")}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsEtc.Append("resolv.conf"), Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("block"), Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("bus"), Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("class"), Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("dev"), Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("devices"), Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Target: pathDataData.Append(app.ID), Source: pathSet.homeDir, Write: true, Ensure: true}},
|
||||||
|
},
|
||||||
|
|
||||||
|
Username: "hakurei",
|
||||||
|
Shell: pathShell,
|
||||||
|
Home: pathDataData.Append(app.ID),
|
||||||
|
|
||||||
|
Path: pathname,
|
||||||
|
Args: argv,
|
||||||
|
},
|
||||||
|
ExtraPerms: []hst.ExtraPermConfig{
|
||||||
|
{Path: dataHome, Execute: true},
|
||||||
|
{Ensure: true, Path: pathSet.baseDir, Read: true, Write: true, Execute: true},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if app.Devel {
|
||||||
|
config.Container.Flags |= hst.FDevel
|
||||||
|
}
|
||||||
|
if app.Userns {
|
||||||
|
config.Container.Flags |= hst.FUserns
|
||||||
|
}
|
||||||
|
if app.HostNet {
|
||||||
|
config.Container.Flags |= hst.FHostNet
|
||||||
|
}
|
||||||
|
if app.HostAbstract {
|
||||||
|
config.Container.Flags |= hst.FHostAbstract
|
||||||
|
}
|
||||||
|
if app.Device {
|
||||||
|
config.Container.Flags |= hst.FDevice
|
||||||
|
}
|
||||||
|
if app.Tty || flagDropShell {
|
||||||
|
config.Container.Flags |= hst.FTty
|
||||||
|
}
|
||||||
|
if app.MapRealUID {
|
||||||
|
config.Container.Flags |= hst.FMapRealUID
|
||||||
|
}
|
||||||
|
if app.Multiarch {
|
||||||
|
config.Container.Flags |= hst.FMultiarch
|
||||||
|
}
|
||||||
|
config.Container.Flags |= hst.FShareRuntime | hst.FShareTmpdir
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadAppInfo(name string, beforeFail func()) *appInfo {
|
||||||
|
bundle := new(appInfo)
|
||||||
|
if f, err := os.Open(name); err != nil {
|
||||||
|
beforeFail()
|
||||||
|
log.Fatalf("cannot open bundle: %v", err)
|
||||||
|
} else if err = json.NewDecoder(f).Decode(&bundle); err != nil {
|
||||||
|
beforeFail()
|
||||||
|
log.Fatalf("cannot parse bundle metadata: %v", err)
|
||||||
|
} else if err = f.Close(); err != nil {
|
||||||
|
log.Printf("cannot close bundle metadata: %v", err)
|
||||||
|
// not fatal
|
||||||
|
}
|
||||||
|
|
||||||
|
if bundle.ID == "" {
|
||||||
|
beforeFail()
|
||||||
|
log.Fatal("application identifier must not be empty")
|
||||||
|
}
|
||||||
|
if bundle.Launcher == nil {
|
||||||
|
beforeFail()
|
||||||
|
log.Fatal("launcher must not be empty")
|
||||||
|
}
|
||||||
|
if bundle.CurrentSystem == nil {
|
||||||
|
beforeFail()
|
||||||
|
log.Fatal("current-system must not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
return bundle
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatHostname(name string) string {
|
||||||
|
if h, err := os.Hostname(); err != nil {
|
||||||
|
log.Printf("cannot get hostname: %v", err)
|
||||||
|
return "hakurei-" + name
|
||||||
|
} else {
|
||||||
|
return h + "-" + name
|
||||||
|
}
|
||||||
|
}
|
||||||
256
cmd/hpkg/build.nix
Normal file
256
cmd/hpkg/build.nix
Normal file
@@ -0,0 +1,256 @@
|
|||||||
|
{
|
||||||
|
nixpkgsFor,
|
||||||
|
system,
|
||||||
|
nixpkgs,
|
||||||
|
home-manager,
|
||||||
|
}:
|
||||||
|
|
||||||
|
{
|
||||||
|
lib,
|
||||||
|
stdenv,
|
||||||
|
closureInfo,
|
||||||
|
writeScript,
|
||||||
|
runtimeShell,
|
||||||
|
writeText,
|
||||||
|
symlinkJoin,
|
||||||
|
vmTools,
|
||||||
|
runCommand,
|
||||||
|
fetchFromGitHub,
|
||||||
|
|
||||||
|
zstd,
|
||||||
|
nix,
|
||||||
|
sqlite,
|
||||||
|
|
||||||
|
name ? throw "name is required",
|
||||||
|
version ? throw "version is required",
|
||||||
|
pname ? "${name}-${version}",
|
||||||
|
modules ? [ ],
|
||||||
|
nixosModules ? [ ],
|
||||||
|
script ? ''
|
||||||
|
exec "$SHELL" "$@"
|
||||||
|
'',
|
||||||
|
|
||||||
|
id ? name,
|
||||||
|
identity ? throw "identity is required",
|
||||||
|
groups ? [ ],
|
||||||
|
userns ? false,
|
||||||
|
net ? true,
|
||||||
|
dev ? false,
|
||||||
|
no_new_session ? false,
|
||||||
|
map_real_uid ? false,
|
||||||
|
direct_wayland ? false,
|
||||||
|
system_bus ? null,
|
||||||
|
session_bus ? null,
|
||||||
|
|
||||||
|
allow_wayland ? true,
|
||||||
|
allow_x11 ? false,
|
||||||
|
allow_dbus ? true,
|
||||||
|
allow_audio ? true,
|
||||||
|
gpu ? allow_wayland || allow_x11,
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
inherit (lib) optionals;
|
||||||
|
|
||||||
|
homeManagerConfiguration = home-manager.lib.homeManagerConfiguration {
|
||||||
|
pkgs = nixpkgsFor.${system};
|
||||||
|
modules = modules ++ [
|
||||||
|
{
|
||||||
|
home = {
|
||||||
|
username = "hakurei";
|
||||||
|
homeDirectory = "/data/data/${id}";
|
||||||
|
stateVersion = "22.11";
|
||||||
|
};
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
launcher = writeScript "hakurei-${pname}" ''
|
||||||
|
#!${runtimeShell} -el
|
||||||
|
${script}
|
||||||
|
'';
|
||||||
|
|
||||||
|
extraNixOSConfig =
|
||||||
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
|
environment = {
|
||||||
|
etc.nixpkgs.source = nixpkgs.outPath;
|
||||||
|
systemPackages = [ pkgs.nix ];
|
||||||
|
};
|
||||||
|
|
||||||
|
imports = nixosModules;
|
||||||
|
};
|
||||||
|
nixos = nixpkgs.lib.nixosSystem {
|
||||||
|
inherit system;
|
||||||
|
modules = [
|
||||||
|
extraNixOSConfig
|
||||||
|
{ nix.settings.experimental-features = [ "flakes" ]; }
|
||||||
|
{ nix.settings.experimental-features = [ "nix-command" ]; }
|
||||||
|
{ boot.isContainer = true; }
|
||||||
|
{ system.stateVersion = "22.11"; }
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
etc = vmTools.runInLinuxVM (
|
||||||
|
runCommand "etc" { } ''
|
||||||
|
mkdir -p /etc
|
||||||
|
${nixos.config.system.build.etcActivationCommands}
|
||||||
|
|
||||||
|
# remove unused files
|
||||||
|
rm -rf /etc/sudoers
|
||||||
|
|
||||||
|
mkdir -p $out
|
||||||
|
tar -C /etc -cf "$out/etc.tar" .
|
||||||
|
''
|
||||||
|
);
|
||||||
|
|
||||||
|
extendSessionDefault = id: ext: {
|
||||||
|
filter = true;
|
||||||
|
|
||||||
|
talk = [ "org.freedesktop.Notifications" ] ++ ext.talk;
|
||||||
|
own =
|
||||||
|
(optionals (id != null) [
|
||||||
|
"${id}.*"
|
||||||
|
"org.mpris.MediaPlayer2.${id}.*"
|
||||||
|
])
|
||||||
|
++ ext.own;
|
||||||
|
|
||||||
|
inherit (ext) call broadcast;
|
||||||
|
};
|
||||||
|
|
||||||
|
nixGL = fetchFromGitHub {
|
||||||
|
owner = "nix-community";
|
||||||
|
repo = "nixGL";
|
||||||
|
rev = "310f8e49a149e4c9ea52f1adf70cdc768ec53f8a";
|
||||||
|
hash = "sha256-lnzZQYG0+EXl/6NkGpyIz+FEOc/DSEG57AP1VsdeNrM=";
|
||||||
|
};
|
||||||
|
|
||||||
|
mesaWrappers =
|
||||||
|
let
|
||||||
|
isIntelX86Platform = system == "x86_64-linux";
|
||||||
|
nixGLPackages = import (nixGL + "/default.nix") {
|
||||||
|
pkgs = nixpkgs.legacyPackages.${system};
|
||||||
|
enable32bits = isIntelX86Platform;
|
||||||
|
enableIntelX86Extensions = isIntelX86Platform;
|
||||||
|
};
|
||||||
|
in
|
||||||
|
symlinkJoin {
|
||||||
|
name = "nixGL-mesa";
|
||||||
|
paths = with nixGLPackages; [
|
||||||
|
nixGLIntel
|
||||||
|
nixVulkanIntel
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
info = builtins.toJSON {
|
||||||
|
inherit
|
||||||
|
name
|
||||||
|
version
|
||||||
|
id
|
||||||
|
identity
|
||||||
|
launcher
|
||||||
|
groups
|
||||||
|
userns
|
||||||
|
net
|
||||||
|
dev
|
||||||
|
no_new_session
|
||||||
|
map_real_uid
|
||||||
|
direct_wayland
|
||||||
|
system_bus
|
||||||
|
gpu
|
||||||
|
;
|
||||||
|
|
||||||
|
session_bus =
|
||||||
|
if session_bus != null then
|
||||||
|
(session_bus (extendSessionDefault id))
|
||||||
|
else
|
||||||
|
(extendSessionDefault id {
|
||||||
|
talk = [ ];
|
||||||
|
own = [ ];
|
||||||
|
call = { };
|
||||||
|
broadcast = { };
|
||||||
|
});
|
||||||
|
|
||||||
|
enablements = {
|
||||||
|
wayland = allow_wayland;
|
||||||
|
x11 = allow_x11;
|
||||||
|
dbus = allow_dbus;
|
||||||
|
pipewire = allow_audio;
|
||||||
|
};
|
||||||
|
|
||||||
|
mesa = if gpu then mesaWrappers else null;
|
||||||
|
nix_gl = if gpu then nixGL else null;
|
||||||
|
current_system = nixos.config.system.build.toplevel;
|
||||||
|
activation_package = homeManagerConfiguration.activationPackage;
|
||||||
|
};
|
||||||
|
in
|
||||||
|
|
||||||
|
stdenv.mkDerivation {
|
||||||
|
name = "${pname}.pkg";
|
||||||
|
inherit version;
|
||||||
|
__structuredAttrs = true;
|
||||||
|
|
||||||
|
nativeBuildInputs = [
|
||||||
|
zstd
|
||||||
|
nix
|
||||||
|
sqlite
|
||||||
|
];
|
||||||
|
|
||||||
|
buildCommand = ''
|
||||||
|
NIX_ROOT="$(mktemp -d)"
|
||||||
|
export USER="nobody"
|
||||||
|
|
||||||
|
# create bootstrap store
|
||||||
|
bootstrapClosureInfo="${
|
||||||
|
closureInfo {
|
||||||
|
rootPaths = [
|
||||||
|
nix
|
||||||
|
nixos.config.system.build.toplevel
|
||||||
|
];
|
||||||
|
}
|
||||||
|
}"
|
||||||
|
echo "copying bootstrap store paths..."
|
||||||
|
mkdir -p "$NIX_ROOT/nix/store"
|
||||||
|
xargs -n 1 -a "$bootstrapClosureInfo/store-paths" cp -at "$NIX_ROOT/nix/store/"
|
||||||
|
NIX_REMOTE="local?root=$NIX_ROOT" nix-store --load-db < "$bootstrapClosureInfo/registration"
|
||||||
|
NIX_REMOTE="local?root=$NIX_ROOT" nix-store --optimise
|
||||||
|
sqlite3 "$NIX_ROOT/nix/var/nix/db/db.sqlite" "UPDATE ValidPaths SET registrationTime = ''${SOURCE_DATE_EPOCH}"
|
||||||
|
chmod -R +r "$NIX_ROOT/nix/var"
|
||||||
|
|
||||||
|
# create binary cache
|
||||||
|
closureInfo="${
|
||||||
|
closureInfo {
|
||||||
|
rootPaths = [
|
||||||
|
homeManagerConfiguration.activationPackage
|
||||||
|
launcher
|
||||||
|
]
|
||||||
|
++ optionals gpu [
|
||||||
|
mesaWrappers
|
||||||
|
nixGL
|
||||||
|
];
|
||||||
|
}
|
||||||
|
}"
|
||||||
|
echo "copying application paths..."
|
||||||
|
TMP_STORE="$(mktemp -d)"
|
||||||
|
mkdir -p "$TMP_STORE/nix/store"
|
||||||
|
xargs -n 1 -a "$closureInfo/store-paths" cp -at "$TMP_STORE/nix/store/"
|
||||||
|
NIX_REMOTE="local?root=$TMP_STORE" nix-store --load-db < "$closureInfo/registration"
|
||||||
|
sqlite3 "$TMP_STORE/nix/var/nix/db/db.sqlite" "UPDATE ValidPaths SET registrationTime = ''${SOURCE_DATE_EPOCH}"
|
||||||
|
NIX_REMOTE="local?root=$TMP_STORE" nix --offline --extra-experimental-features nix-command \
|
||||||
|
--verbose --log-format raw-with-logs \
|
||||||
|
copy --all --no-check-sigs --to \
|
||||||
|
"file://$NIX_ROOT/res?compression=zstd&compression-level=19¶llel-compression=true"
|
||||||
|
|
||||||
|
# package /etc
|
||||||
|
mkdir -p "$NIX_ROOT/etc"
|
||||||
|
tar -C "$NIX_ROOT/etc" -xf "${etc}/etc.tar"
|
||||||
|
|
||||||
|
# write metadata
|
||||||
|
cp "${writeText "bundle.json" info}" "$NIX_ROOT/bundle.json"
|
||||||
|
|
||||||
|
# create an intermediate file to improve zstd performance
|
||||||
|
INTER="$(mktemp)"
|
||||||
|
tar -C "$NIX_ROOT" -cf "$INTER" .
|
||||||
|
zstd -T0 -19 -fo "$out" "$INTER"
|
||||||
|
'';
|
||||||
|
}
|
||||||
335
cmd/hpkg/main.go
Normal file
335
cmd/hpkg/main.go
Normal file
@@ -0,0 +1,335 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"path"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"hakurei.app/command"
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
|
"hakurei.app/hst"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errSuccess = errors.New("success")
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.SetPrefix("hpkg: ")
|
||||||
|
log.SetFlags(0)
|
||||||
|
msg := message.New(log.Default())
|
||||||
|
|
||||||
|
if err := os.Setenv("SHELL", pathShell.String()); err != nil {
|
||||||
|
log.Fatalf("cannot set $SHELL: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if os.Geteuid() == 0 {
|
||||||
|
log.Fatal("this program must not run as root")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, stop := signal.NotifyContext(context.Background(),
|
||||||
|
syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
defer stop() // unreachable
|
||||||
|
|
||||||
|
var (
|
||||||
|
flagVerbose bool
|
||||||
|
flagDropShell bool
|
||||||
|
)
|
||||||
|
c := command.New(os.Stderr, log.Printf, "hpkg", func([]string) error { msg.SwapVerbose(flagVerbose); return nil }).
|
||||||
|
Flag(&flagVerbose, "v", command.BoolFlag(false), "Print debug messages to the console").
|
||||||
|
Flag(&flagDropShell, "s", command.BoolFlag(false), "Drop to a shell in place of next hakurei action")
|
||||||
|
|
||||||
|
{
|
||||||
|
var (
|
||||||
|
flagDropShellActivate bool
|
||||||
|
)
|
||||||
|
c.NewCommand("install", "Install an application from its package", func(args []string) error {
|
||||||
|
if len(args) != 1 {
|
||||||
|
log.Println("invalid argument")
|
||||||
|
return syscall.EINVAL
|
||||||
|
}
|
||||||
|
pkgPath := args[0]
|
||||||
|
if !path.IsAbs(pkgPath) {
|
||||||
|
if dir, err := os.Getwd(); err != nil {
|
||||||
|
log.Printf("cannot get current directory: %v", err)
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
pkgPath = path.Join(dir, pkgPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Look up paths to programs started by hpkg.
|
||||||
|
This is done here to ease error handling as cleanup is not yet required.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ = lookPath("zstd")
|
||||||
|
tar = lookPath("tar")
|
||||||
|
chmod = lookPath("chmod")
|
||||||
|
rm = lookPath("rm")
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Extract package and set up for cleanup.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var workDir *check.Absolute
|
||||||
|
if p, err := os.MkdirTemp("", "hpkg.*"); err != nil {
|
||||||
|
log.Printf("cannot create temporary directory: %v", err)
|
||||||
|
return err
|
||||||
|
} else if workDir, err = check.NewAbs(p); err != nil {
|
||||||
|
log.Printf("invalid temporary directory: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cleanup := func() {
|
||||||
|
// should be faster than a native implementation
|
||||||
|
mustRun(msg, chmod, "-R", "+w", workDir.String())
|
||||||
|
mustRun(msg, rm, "-rf", workDir.String())
|
||||||
|
}
|
||||||
|
beforeRunFail.Store(&cleanup)
|
||||||
|
|
||||||
|
mustRun(msg, tar, "-C", workDir.String(), "-xf", pkgPath)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Parse bundle and app metadata, do pre-install checks.
|
||||||
|
*/
|
||||||
|
|
||||||
|
bundle := loadAppInfo(path.Join(workDir.String(), "bundle.json"), cleanup)
|
||||||
|
pathSet := pathSetByApp(bundle.ID)
|
||||||
|
|
||||||
|
a := bundle
|
||||||
|
if s, err := os.Stat(pathSet.metaPath.String()); err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
cleanup()
|
||||||
|
log.Printf("cannot access %q: %v", pathSet.metaPath, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// did not modify app, clean installation condition met later
|
||||||
|
} else if s.IsDir() {
|
||||||
|
cleanup()
|
||||||
|
log.Printf("metadata path %q is not a file", pathSet.metaPath)
|
||||||
|
return syscall.EBADMSG
|
||||||
|
} else {
|
||||||
|
a = loadAppInfo(pathSet.metaPath.String(), cleanup)
|
||||||
|
if a.ID != bundle.ID {
|
||||||
|
cleanup()
|
||||||
|
log.Printf("app %q claims to have identifier %q",
|
||||||
|
bundle.ID, a.ID)
|
||||||
|
return syscall.EBADE
|
||||||
|
}
|
||||||
|
// sec: should verify credentials
|
||||||
|
}
|
||||||
|
|
||||||
|
if a != bundle {
|
||||||
|
// do not try to re-install
|
||||||
|
if a.NixGL == bundle.NixGL &&
|
||||||
|
a.CurrentSystem == bundle.CurrentSystem &&
|
||||||
|
a.Launcher == bundle.Launcher &&
|
||||||
|
a.ActivationPackage == bundle.ActivationPackage {
|
||||||
|
cleanup()
|
||||||
|
log.Printf("package %q is identical to local application %q",
|
||||||
|
pkgPath, a.ID)
|
||||||
|
return errSuccess
|
||||||
|
}
|
||||||
|
|
||||||
|
// identity determines uid
|
||||||
|
if a.Identity != bundle.Identity {
|
||||||
|
cleanup()
|
||||||
|
log.Printf("package %q identity %d differs from installed %d",
|
||||||
|
pkgPath, bundle.Identity, a.Identity)
|
||||||
|
return syscall.EBADE
|
||||||
|
}
|
||||||
|
|
||||||
|
// sec: should compare version string
|
||||||
|
msg.Verbosef("installing application %q version %q over local %q",
|
||||||
|
bundle.ID, bundle.Version, a.Version)
|
||||||
|
} else {
|
||||||
|
msg.Verbosef("application %q clean installation", bundle.ID)
|
||||||
|
// sec: should install credentials
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Setup steps for files owned by the target user.
|
||||||
|
*/
|
||||||
|
|
||||||
|
withCacheDir(ctx, msg, "install", []string{
|
||||||
|
// export inner bundle path in the environment
|
||||||
|
"export BUNDLE=" + hst.PrivateTmp + "/bundle",
|
||||||
|
// replace inner /etc
|
||||||
|
"mkdir -p etc",
|
||||||
|
"chmod -R +w etc",
|
||||||
|
"rm -rf etc",
|
||||||
|
"cp -dRf $BUNDLE/etc etc",
|
||||||
|
// replace inner /nix
|
||||||
|
"mkdir -p nix",
|
||||||
|
"chmod -R +w nix",
|
||||||
|
"rm -rf nix",
|
||||||
|
"cp -dRf /nix nix",
|
||||||
|
// copy from binary cache
|
||||||
|
"nix copy --offline --no-check-sigs --all --from file://$BUNDLE/res --to $PWD",
|
||||||
|
// deduplicate nix store
|
||||||
|
"nix store --offline --store $PWD optimise",
|
||||||
|
// make cache directory world-readable for autoetc
|
||||||
|
"chmod 0755 .",
|
||||||
|
}, workDir, bundle, pathSet, flagDropShell, cleanup)
|
||||||
|
|
||||||
|
if bundle.GPU {
|
||||||
|
withCacheDir(ctx, msg, "mesa-wrappers", []string{
|
||||||
|
// link nixGL mesa wrappers
|
||||||
|
"mkdir -p nix/.nixGL",
|
||||||
|
"ln -s " + bundle.Mesa + "/bin/nixGLIntel nix/.nixGL/nixGL",
|
||||||
|
"ln -s " + bundle.Mesa + "/bin/nixVulkanIntel nix/.nixGL/nixVulkan",
|
||||||
|
}, workDir, bundle, pathSet, false, cleanup)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Activate home-manager generation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
withNixDaemon(ctx, msg, "activate", []string{
|
||||||
|
// clean up broken links
|
||||||
|
"mkdir -p .local/state/{nix,home-manager}",
|
||||||
|
"chmod -R +w .local/state/{nix,home-manager}",
|
||||||
|
"rm -rf .local/state/{nix,home-manager}",
|
||||||
|
// run activation script
|
||||||
|
bundle.ActivationPackage + "/activate",
|
||||||
|
}, false, func(config *hst.Config) *hst.Config { return config },
|
||||||
|
bundle, pathSet, flagDropShellActivate, cleanup)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Installation complete. Write metadata to block re-installs or downgrades.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// serialise metadata to ensure consistency
|
||||||
|
if f, err := os.OpenFile(pathSet.metaPath.String()+"~", os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644); err != nil {
|
||||||
|
cleanup()
|
||||||
|
log.Printf("cannot create metadata file: %v", err)
|
||||||
|
return err
|
||||||
|
} else if err = json.NewEncoder(f).Encode(bundle); err != nil {
|
||||||
|
cleanup()
|
||||||
|
log.Printf("cannot write metadata: %v", err)
|
||||||
|
return err
|
||||||
|
} else if err = f.Close(); err != nil {
|
||||||
|
log.Printf("cannot close metadata file: %v", err)
|
||||||
|
// not fatal
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Rename(pathSet.metaPath.String()+"~", pathSet.metaPath.String()); err != nil {
|
||||||
|
cleanup()
|
||||||
|
log.Printf("cannot rename metadata file: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup()
|
||||||
|
return errSuccess
|
||||||
|
}).
|
||||||
|
Flag(&flagDropShellActivate, "s", command.BoolFlag(false), "Drop to a shell on activation")
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
var (
|
||||||
|
flagDropShellNixGL bool
|
||||||
|
flagAutoDrivers bool
|
||||||
|
)
|
||||||
|
c.NewCommand("start", "Start an application", func(args []string) error {
|
||||||
|
if len(args) < 1 {
|
||||||
|
log.Println("invalid argument")
|
||||||
|
return syscall.EINVAL
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Parse app metadata.
|
||||||
|
*/
|
||||||
|
|
||||||
|
id := args[0]
|
||||||
|
pathSet := pathSetByApp(id)
|
||||||
|
a := loadAppInfo(pathSet.metaPath.String(), func() {})
|
||||||
|
if a.ID != id {
|
||||||
|
log.Printf("app %q claims to have identifier %q", id, a.ID)
|
||||||
|
return syscall.EBADE
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Prepare nixGL.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if a.GPU && flagAutoDrivers {
|
||||||
|
withNixDaemon(ctx, msg, "nix-gl", []string{
|
||||||
|
"mkdir -p /nix/.nixGL/auto",
|
||||||
|
"rm -rf /nix/.nixGL/auto",
|
||||||
|
"export NIXPKGS_ALLOW_UNFREE=1",
|
||||||
|
"nix build --impure " +
|
||||||
|
"--out-link /nix/.nixGL/auto/opengl " +
|
||||||
|
"--override-input nixpkgs path:/etc/nixpkgs " +
|
||||||
|
"path:" + a.NixGL,
|
||||||
|
"nix build --impure " +
|
||||||
|
"--out-link /nix/.nixGL/auto/vulkan " +
|
||||||
|
"--override-input nixpkgs path:/etc/nixpkgs " +
|
||||||
|
"path:" + a.NixGL + "#nixVulkanNvidia",
|
||||||
|
}, true, func(config *hst.Config) *hst.Config {
|
||||||
|
config.Container.Filesystem = append(config.Container.Filesystem, []hst.FilesystemConfigJSON{
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsEtc.Append("resolv.conf"), Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("block"), Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("bus"), Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("class"), Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("dev"), Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsSys.Append("devices"), Optional: true}},
|
||||||
|
}...)
|
||||||
|
appendGPUFilesystem(config)
|
||||||
|
return config
|
||||||
|
}, a, pathSet, flagDropShellNixGL, func() {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Create app configuration.
|
||||||
|
*/
|
||||||
|
|
||||||
|
pathname := a.Launcher
|
||||||
|
argv := make([]string, 1, len(args))
|
||||||
|
if flagDropShell {
|
||||||
|
pathname = pathShell
|
||||||
|
argv[0] = bash
|
||||||
|
} else {
|
||||||
|
argv[0] = a.Launcher.String()
|
||||||
|
}
|
||||||
|
argv = append(argv, args[1:]...)
|
||||||
|
config := a.toHst(pathSet, pathname, argv, flagDropShell)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Expose GPU devices.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if a.GPU {
|
||||||
|
config.Container.Filesystem = append(config.Container.Filesystem,
|
||||||
|
hst.FilesystemConfigJSON{FilesystemConfig: &hst.FSBind{Source: pathSet.nixPath.Append(".nixGL"), Target: hst.AbsPrivateTmp.Append("nixGL")}})
|
||||||
|
appendGPUFilesystem(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Spawn app.
|
||||||
|
*/
|
||||||
|
|
||||||
|
mustRunApp(ctx, msg, config, func() {})
|
||||||
|
return errSuccess
|
||||||
|
}).
|
||||||
|
Flag(&flagDropShellNixGL, "s", command.BoolFlag(false), "Drop to a shell on nixGL build").
|
||||||
|
Flag(&flagAutoDrivers, "auto-drivers", command.BoolFlag(false), "Attempt automatic opengl driver detection")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.MustParse(os.Args[1:], func(err error) {
|
||||||
|
msg.Verbosef("command returned %v", err)
|
||||||
|
if errors.Is(err, errSuccess) {
|
||||||
|
msg.BeforeExit()
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
log.Fatal("unreachable")
|
||||||
|
}
|
||||||
117
cmd/hpkg/paths.go
Normal file
117
cmd/hpkg/paths.go
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
|
"hakurei.app/hst"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
const bash = "bash"
|
||||||
|
|
||||||
|
var (
|
||||||
|
dataHome *check.Absolute
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// dataHome
|
||||||
|
if a, err := check.NewAbs(os.Getenv("HAKUREI_DATA_HOME")); err == nil {
|
||||||
|
dataHome = a
|
||||||
|
} else {
|
||||||
|
dataHome = fhs.AbsVarLib.Append("hakurei/" + strconv.Itoa(os.Getuid()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
pathBin = fhs.AbsRoot.Append("bin")
|
||||||
|
|
||||||
|
pathNix = check.MustAbs("/nix/")
|
||||||
|
pathNixStore = pathNix.Append("store/")
|
||||||
|
pathCurrentSystem = fhs.AbsRun.Append("current-system")
|
||||||
|
pathSwBin = pathCurrentSystem.Append("sw/bin/")
|
||||||
|
pathShell = pathSwBin.Append(bash)
|
||||||
|
|
||||||
|
pathData = check.MustAbs("/data")
|
||||||
|
pathDataData = pathData.Append("data")
|
||||||
|
)
|
||||||
|
|
||||||
|
func lookPath(file string) string {
|
||||||
|
if p, err := exec.LookPath(file); err != nil {
|
||||||
|
log.Fatalf("%s: command not found", file)
|
||||||
|
return ""
|
||||||
|
} else {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var beforeRunFail = new(atomic.Pointer[func()])
|
||||||
|
|
||||||
|
func mustRun(msg message.Msg, name string, arg ...string) {
|
||||||
|
msg.Verbosef("spawning process: %q %q", name, arg)
|
||||||
|
cmd := exec.Command(name, arg...)
|
||||||
|
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
if f := beforeRunFail.Swap(nil); f != nil {
|
||||||
|
(*f)()
|
||||||
|
}
|
||||||
|
log.Fatalf("%s: %v", name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type appPathSet struct {
|
||||||
|
// ${dataHome}/${id}
|
||||||
|
baseDir *check.Absolute
|
||||||
|
// ${baseDir}/app
|
||||||
|
metaPath *check.Absolute
|
||||||
|
// ${baseDir}/files
|
||||||
|
homeDir *check.Absolute
|
||||||
|
// ${baseDir}/cache
|
||||||
|
cacheDir *check.Absolute
|
||||||
|
// ${baseDir}/cache/nix
|
||||||
|
nixPath *check.Absolute
|
||||||
|
}
|
||||||
|
|
||||||
|
func pathSetByApp(id string) *appPathSet {
|
||||||
|
pathSet := new(appPathSet)
|
||||||
|
pathSet.baseDir = dataHome.Append(id)
|
||||||
|
pathSet.metaPath = pathSet.baseDir.Append("app")
|
||||||
|
pathSet.homeDir = pathSet.baseDir.Append("files")
|
||||||
|
pathSet.cacheDir = pathSet.baseDir.Append("cache")
|
||||||
|
pathSet.nixPath = pathSet.cacheDir.Append("nix")
|
||||||
|
return pathSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendGPUFilesystem(config *hst.Config) {
|
||||||
|
config.Container.Filesystem = append(config.Container.Filesystem, []hst.FilesystemConfigJSON{
|
||||||
|
// flatpak commit 763a686d874dd668f0236f911de00b80766ffe79
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("dri"), Device: true, Optional: true}},
|
||||||
|
// mali
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("mali"), Device: true, Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("mali0"), Device: true, Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("umplock"), Device: true, Optional: true}},
|
||||||
|
// nvidia
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidiactl"), Device: true, Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia-modeset"), Device: true, Optional: true}},
|
||||||
|
// nvidia OpenCL/CUDA
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia-uvm"), Device: true, Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia-uvm-tools"), Device: true, Optional: true}},
|
||||||
|
|
||||||
|
// flatpak commit d2dff2875bb3b7e2cd92d8204088d743fd07f3ff
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia0"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia1"), Device: true, Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia2"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia3"), Device: true, Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia4"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia5"), Device: true, Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia6"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia7"), Device: true, Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia8"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia9"), Device: true, Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia10"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia11"), Device: true, Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia12"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia13"), Device: true, Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia14"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia15"), Device: true, Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia16"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia17"), Device: true, Optional: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia18"), Device: true, Optional: true}}, {FilesystemConfig: &hst.FSBind{Source: fhs.AbsDev.Append("nvidia19"), Device: true, Optional: true}},
|
||||||
|
}...)
|
||||||
|
}
|
||||||
61
cmd/hpkg/proc.go
Normal file
61
cmd/hpkg/proc.go
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
|
||||||
|
"hakurei.app/hst"
|
||||||
|
"hakurei.app/internal/info"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
var hakureiPathVal = info.MustHakureiPath().String()
|
||||||
|
|
||||||
|
func mustRunApp(ctx context.Context, msg message.Msg, config *hst.Config, beforeFail func()) {
|
||||||
|
var (
|
||||||
|
cmd *exec.Cmd
|
||||||
|
st io.WriteCloser
|
||||||
|
)
|
||||||
|
|
||||||
|
if r, w, err := os.Pipe(); err != nil {
|
||||||
|
beforeFail()
|
||||||
|
log.Fatalf("cannot pipe: %v", err)
|
||||||
|
} else {
|
||||||
|
if msg.IsVerbose() {
|
||||||
|
cmd = exec.CommandContext(ctx, hakureiPathVal, "-v", "app", "3")
|
||||||
|
} else {
|
||||||
|
cmd = exec.CommandContext(ctx, hakureiPathVal, "app", "3")
|
||||||
|
}
|
||||||
|
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
|
||||||
|
cmd.ExtraFiles = []*os.File{r}
|
||||||
|
st = w
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if err := json.NewEncoder(st).Encode(config); err != nil {
|
||||||
|
beforeFail()
|
||||||
|
log.Fatalf("cannot send configuration: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
beforeFail()
|
||||||
|
log.Fatalf("cannot start hakurei: %v", err)
|
||||||
|
}
|
||||||
|
if err := cmd.Wait(); err != nil {
|
||||||
|
var exitError *exec.ExitError
|
||||||
|
if errors.As(err, &exitError) {
|
||||||
|
beforeFail()
|
||||||
|
msg.BeforeExit()
|
||||||
|
os.Exit(exitError.ExitCode())
|
||||||
|
} else {
|
||||||
|
beforeFail()
|
||||||
|
log.Fatalf("cannot wait: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
62
cmd/hpkg/test/configuration.nix
Normal file
62
cmd/hpkg/test/configuration.nix
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
|
users.users = {
|
||||||
|
alice = {
|
||||||
|
isNormalUser = true;
|
||||||
|
description = "Alice Foobar";
|
||||||
|
password = "foobar";
|
||||||
|
uid = 1000;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
home-manager.users.alice.home.stateVersion = "24.11";
|
||||||
|
|
||||||
|
# Automatically login on tty1 as a normal user:
|
||||||
|
services.getty.autologinUser = "alice";
|
||||||
|
|
||||||
|
environment = {
|
||||||
|
variables = {
|
||||||
|
SWAYSOCK = "/tmp/sway-ipc.sock";
|
||||||
|
WLR_RENDERER = "pixman";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Automatically configure and start Sway when logging in on tty1:
|
||||||
|
programs.bash.loginShellInit = ''
|
||||||
|
if [ "$(tty)" = "/dev/tty1" ]; then
|
||||||
|
set -e
|
||||||
|
|
||||||
|
mkdir -p ~/.config/sway
|
||||||
|
(sed s/Mod4/Mod1/ /etc/sway/config &&
|
||||||
|
echo 'output * bg ${pkgs.nixos-artwork.wallpapers.simple-light-gray.gnomeFilePath} fill' &&
|
||||||
|
echo 'output Virtual-1 res 1680x1050') > ~/.config/sway/config
|
||||||
|
|
||||||
|
sway --validate
|
||||||
|
systemd-cat --identifier=session sway && touch /tmp/sway-exit-ok
|
||||||
|
fi
|
||||||
|
'';
|
||||||
|
|
||||||
|
programs.sway.enable = true;
|
||||||
|
|
||||||
|
virtualisation = {
|
||||||
|
diskSize = 6 * 1024;
|
||||||
|
|
||||||
|
qemu.options = [
|
||||||
|
# Need to switch to a different GPU driver than the default one (-vga std) so that Sway can launch:
|
||||||
|
"-vga none -device virtio-gpu-pci"
|
||||||
|
|
||||||
|
# Increase zstd performance:
|
||||||
|
"-smp 8"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
environment.hakurei = {
|
||||||
|
enable = true;
|
||||||
|
stateDir = "/var/lib/hakurei";
|
||||||
|
users.alice = 0;
|
||||||
|
|
||||||
|
extraHomeConfig = {
|
||||||
|
home.stateVersion = "23.05";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
34
cmd/hpkg/test/default.nix
Normal file
34
cmd/hpkg/test/default.nix
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
{
|
||||||
|
testers,
|
||||||
|
callPackage,
|
||||||
|
|
||||||
|
system,
|
||||||
|
self,
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
buildPackage = self.buildPackage.${system};
|
||||||
|
in
|
||||||
|
testers.nixosTest {
|
||||||
|
name = "hpkg";
|
||||||
|
nodes.machine = {
|
||||||
|
environment.etc = {
|
||||||
|
"foot.pkg".source = callPackage ./foot.nix { inherit buildPackage; };
|
||||||
|
};
|
||||||
|
|
||||||
|
imports = [
|
||||||
|
./configuration.nix
|
||||||
|
|
||||||
|
self.nixosModules.hakurei
|
||||||
|
self.inputs.home-manager.nixosModules.home-manager
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
# adapted from nixos sway integration tests
|
||||||
|
|
||||||
|
# testScriptWithTypes:49: error: Cannot call function of unknown type
|
||||||
|
# (machine.succeed if succeed else machine.execute)(
|
||||||
|
# ^
|
||||||
|
# Found 1 error in 1 file (checked 1 source file)
|
||||||
|
skipTypeCheck = true;
|
||||||
|
testScript = builtins.readFile ./test.py;
|
||||||
|
}
|
||||||
48
cmd/hpkg/test/foot.nix
Normal file
48
cmd/hpkg/test/foot.nix
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
{
|
||||||
|
lib,
|
||||||
|
buildPackage,
|
||||||
|
foot,
|
||||||
|
wayland-utils,
|
||||||
|
inconsolata,
|
||||||
|
}:
|
||||||
|
|
||||||
|
buildPackage {
|
||||||
|
name = "foot";
|
||||||
|
inherit (foot) version;
|
||||||
|
|
||||||
|
identity = 2;
|
||||||
|
id = "org.codeberg.dnkl.foot";
|
||||||
|
|
||||||
|
modules = [
|
||||||
|
{
|
||||||
|
home.packages = [
|
||||||
|
foot
|
||||||
|
|
||||||
|
# For wayland-info:
|
||||||
|
wayland-utils
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
nixosModules = [
|
||||||
|
{
|
||||||
|
# To help with OCR:
|
||||||
|
environment.etc."xdg/foot/foot.ini".text = lib.generators.toINI { } {
|
||||||
|
main = {
|
||||||
|
font = "inconsolata:size=14";
|
||||||
|
};
|
||||||
|
colors = rec {
|
||||||
|
foreground = "000000";
|
||||||
|
background = "ffffff";
|
||||||
|
regular2 = foreground;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
fonts.packages = [ inconsolata ];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
script = ''
|
||||||
|
exec foot "$@"
|
||||||
|
'';
|
||||||
|
}
|
||||||
110
cmd/hpkg/test/test.py
Normal file
110
cmd/hpkg/test/test.py
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
import json
|
||||||
|
import shlex
|
||||||
|
|
||||||
|
q = shlex.quote
|
||||||
|
NODE_GROUPS = ["nodes", "floating_nodes"]
|
||||||
|
|
||||||
|
|
||||||
|
def swaymsg(command: str = "", succeed=True, type="command"):
|
||||||
|
assert command != "" or type != "command", "Must specify command or type"
|
||||||
|
shell = q(f"swaymsg -t {q(type)} -- {q(command)}")
|
||||||
|
with machine.nested(
|
||||||
|
f"sending swaymsg {shell!r}" + " (allowed to fail)" * (not succeed)
|
||||||
|
):
|
||||||
|
ret = (machine.succeed if succeed else machine.execute)(
|
||||||
|
f"su - alice -c {shell}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# execute also returns a status code, but disregard.
|
||||||
|
if not succeed:
|
||||||
|
_, ret = ret
|
||||||
|
|
||||||
|
if not succeed and not ret:
|
||||||
|
return None
|
||||||
|
|
||||||
|
parsed = json.loads(ret)
|
||||||
|
return parsed
|
||||||
|
|
||||||
|
|
||||||
|
def walk(tree):
|
||||||
|
yield tree
|
||||||
|
for group in NODE_GROUPS:
|
||||||
|
for node in tree.get(group, []):
|
||||||
|
yield from walk(node)
|
||||||
|
|
||||||
|
|
||||||
|
def wait_for_window(pattern):
|
||||||
|
def func(last_chance):
|
||||||
|
nodes = (node["name"] for node in walk(swaymsg(type="get_tree")))
|
||||||
|
|
||||||
|
if last_chance:
|
||||||
|
nodes = list(nodes)
|
||||||
|
machine.log(f"Last call! Current list of windows: {nodes}")
|
||||||
|
|
||||||
|
return any(pattern in name for name in nodes)
|
||||||
|
|
||||||
|
retry(func)
|
||||||
|
|
||||||
|
|
||||||
|
def collect_state_ui(name):
|
||||||
|
swaymsg(f"exec hakurei ps > '/tmp/{name}.ps'")
|
||||||
|
machine.copy_from_vm(f"/tmp/{name}.ps", "")
|
||||||
|
swaymsg(f"exec hakurei --json ps > '/tmp/{name}.json'")
|
||||||
|
machine.copy_from_vm(f"/tmp/{name}.json", "")
|
||||||
|
machine.screenshot(name)
|
||||||
|
|
||||||
|
|
||||||
|
def check_state(name, enablements):
|
||||||
|
instances = json.loads(machine.succeed("sudo -u alice -i XDG_RUNTIME_DIR=/run/user/1000 hakurei --json ps"))
|
||||||
|
if len(instances) != 1:
|
||||||
|
raise Exception(f"unexpected state length {len(instances)}")
|
||||||
|
instance = instances[0]
|
||||||
|
|
||||||
|
if len(instance['container']['args']) != 1 or not (instance['container']['args'][0].startswith("/nix/store/")) or f"hakurei-{name}-" not in (instance['container']['args'][0]):
|
||||||
|
raise Exception(f"unexpected args {instance['container']['args']}")
|
||||||
|
|
||||||
|
if instance['enablements'] != enablements:
|
||||||
|
raise Exception(f"unexpected enablements {instance['enablements']}")
|
||||||
|
|
||||||
|
|
||||||
|
start_all()
|
||||||
|
machine.wait_for_unit("multi-user.target")
|
||||||
|
|
||||||
|
# To check hakurei's version:
|
||||||
|
print(machine.succeed("sudo -u alice -i hakurei version"))
|
||||||
|
|
||||||
|
# Wait for Sway to complete startup:
|
||||||
|
machine.wait_for_file("/run/user/1000/wayland-1")
|
||||||
|
machine.wait_for_file("/tmp/sway-ipc.sock")
|
||||||
|
|
||||||
|
# Prepare hpkg directory:
|
||||||
|
machine.succeed("install -dm 0700 -o alice -g users /var/lib/hakurei/1000")
|
||||||
|
|
||||||
|
# Install hpkg app:
|
||||||
|
swaymsg("exec hpkg -v install /etc/foot.pkg && touch /tmp/hpkg-install-ok")
|
||||||
|
machine.wait_for_file("/tmp/hpkg-install-ok")
|
||||||
|
|
||||||
|
# Start app (foot) with Wayland enablement:
|
||||||
|
swaymsg("exec hpkg -v start org.codeberg.dnkl.foot")
|
||||||
|
wait_for_window("hakurei@machine-foot")
|
||||||
|
machine.send_chars("clear; wayland-info && touch /tmp/success-client\n")
|
||||||
|
machine.wait_for_file("/tmp/hakurei.0/tmpdir/2/success-client")
|
||||||
|
collect_state_ui("app_wayland")
|
||||||
|
check_state("foot", {"wayland": True, "dbus": True, "pipewire": True})
|
||||||
|
# Verify acl on XDG_RUNTIME_DIR:
|
||||||
|
print(machine.succeed("getfacl --absolute-names --omit-header --numeric /tmp/hakurei.0/runtime | grep 10002"))
|
||||||
|
machine.send_chars("exit\n")
|
||||||
|
machine.wait_until_fails("pgrep foot")
|
||||||
|
# Verify acl cleanup on XDG_RUNTIME_DIR:
|
||||||
|
machine.wait_until_fails("getfacl --absolute-names --omit-header --numeric /tmp/hakurei.0/runtime | grep 10002")
|
||||||
|
|
||||||
|
# Exit Sway and verify process exit status 0:
|
||||||
|
swaymsg("exit", succeed=False)
|
||||||
|
machine.wait_for_file("/tmp/sway-exit-ok")
|
||||||
|
|
||||||
|
# Print hakurei share and rundir contents:
|
||||||
|
print(machine.succeed("find /tmp/hakurei.0 "
|
||||||
|
+ "-path '/tmp/hakurei.0/runtime/*/*' -prune -o "
|
||||||
|
+ "-path '/tmp/hakurei.0/tmpdir/*/*' -prune -o "
|
||||||
|
+ "-print"))
|
||||||
|
print(machine.fail("ls /run/user/1000/hakurei"))
|
||||||
130
cmd/hpkg/with.go
Normal file
130
cmd/hpkg/with.go
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
|
"hakurei.app/hst"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
func withNixDaemon(
|
||||||
|
ctx context.Context,
|
||||||
|
msg message.Msg,
|
||||||
|
action string, command []string, net bool, updateConfig func(config *hst.Config) *hst.Config,
|
||||||
|
app *appInfo, pathSet *appPathSet, dropShell bool, beforeFail func(),
|
||||||
|
) {
|
||||||
|
flags := hst.FMultiarch | hst.FUserns // nix sandbox requires userns
|
||||||
|
if net {
|
||||||
|
flags |= hst.FHostNet
|
||||||
|
}
|
||||||
|
if dropShell {
|
||||||
|
flags |= hst.FTty
|
||||||
|
}
|
||||||
|
|
||||||
|
mustRunAppDropShell(ctx, msg, updateConfig(&hst.Config{
|
||||||
|
ID: app.ID,
|
||||||
|
|
||||||
|
ExtraPerms: []hst.ExtraPermConfig{
|
||||||
|
{Path: dataHome, Execute: true},
|
||||||
|
{Ensure: true, Path: pathSet.baseDir, Read: true, Write: true, Execute: true},
|
||||||
|
},
|
||||||
|
|
||||||
|
Identity: app.Identity,
|
||||||
|
|
||||||
|
Container: &hst.ContainerConfig{
|
||||||
|
Hostname: formatHostname(app.Name) + "-" + action,
|
||||||
|
|
||||||
|
Filesystem: []hst.FilesystemConfigJSON{
|
||||||
|
{FilesystemConfig: &hst.FSBind{Target: fhs.AbsEtc, Source: pathSet.cacheDir.Append("etc"), Special: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: pathSet.nixPath, Target: pathNix, Write: true}},
|
||||||
|
{FilesystemConfig: &hst.FSLink{Target: pathCurrentSystem, Linkname: app.CurrentSystem.String()}},
|
||||||
|
{FilesystemConfig: &hst.FSLink{Target: pathBin, Linkname: pathSwBin.String()}},
|
||||||
|
{FilesystemConfig: &hst.FSLink{Target: fhs.AbsUsrBin, Linkname: pathSwBin.String()}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Target: pathDataData.Append(app.ID), Source: pathSet.homeDir, Write: true, Ensure: true}},
|
||||||
|
},
|
||||||
|
|
||||||
|
Username: "hakurei",
|
||||||
|
Shell: pathShell,
|
||||||
|
Home: pathDataData.Append(app.ID),
|
||||||
|
|
||||||
|
Path: pathShell,
|
||||||
|
Args: []string{bash, "-lc", "rm -f /nix/var/nix/daemon-socket/socket && " +
|
||||||
|
// start nix-daemon
|
||||||
|
"nix-daemon --store / & " +
|
||||||
|
// wait for socket to appear
|
||||||
|
"(while [ ! -S /nix/var/nix/daemon-socket/socket ]; do sleep 0.01; done) && " +
|
||||||
|
// create directory so nix stops complaining
|
||||||
|
"mkdir -p /nix/var/nix/profiles/per-user/root/channels && " +
|
||||||
|
strings.Join(command, " && ") +
|
||||||
|
// terminate nix-daemon
|
||||||
|
" && pkill nix-daemon",
|
||||||
|
},
|
||||||
|
|
||||||
|
Flags: flags,
|
||||||
|
},
|
||||||
|
}), dropShell, beforeFail)
|
||||||
|
}
|
||||||
|
|
||||||
|
func withCacheDir(
|
||||||
|
ctx context.Context,
|
||||||
|
msg message.Msg,
|
||||||
|
action string, command []string, workDir *check.Absolute,
|
||||||
|
app *appInfo, pathSet *appPathSet, dropShell bool, beforeFail func(),
|
||||||
|
) {
|
||||||
|
flags := hst.FMultiarch
|
||||||
|
if dropShell {
|
||||||
|
flags |= hst.FTty
|
||||||
|
}
|
||||||
|
|
||||||
|
mustRunAppDropShell(ctx, msg, &hst.Config{
|
||||||
|
ID: app.ID,
|
||||||
|
|
||||||
|
ExtraPerms: []hst.ExtraPermConfig{
|
||||||
|
{Path: dataHome, Execute: true},
|
||||||
|
{Ensure: true, Path: pathSet.baseDir, Read: true, Write: true, Execute: true},
|
||||||
|
{Path: workDir, Execute: true},
|
||||||
|
},
|
||||||
|
|
||||||
|
Identity: app.Identity,
|
||||||
|
|
||||||
|
Container: &hst.ContainerConfig{
|
||||||
|
Hostname: formatHostname(app.Name) + "-" + action,
|
||||||
|
|
||||||
|
Filesystem: []hst.FilesystemConfigJSON{
|
||||||
|
{FilesystemConfig: &hst.FSBind{Target: fhs.AbsEtc, Source: workDir.Append(fhs.Etc), Special: true}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: workDir.Append("nix"), Target: pathNix}},
|
||||||
|
{FilesystemConfig: &hst.FSLink{Target: pathCurrentSystem, Linkname: app.CurrentSystem.String()}},
|
||||||
|
{FilesystemConfig: &hst.FSLink{Target: pathBin, Linkname: pathSwBin.String()}},
|
||||||
|
{FilesystemConfig: &hst.FSLink{Target: fhs.AbsUsrBin, Linkname: pathSwBin.String()}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Source: workDir, Target: hst.AbsPrivateTmp.Append("bundle")}},
|
||||||
|
{FilesystemConfig: &hst.FSBind{Target: pathDataData.Append(app.ID, "cache"), Source: pathSet.cacheDir, Write: true, Ensure: true}},
|
||||||
|
},
|
||||||
|
|
||||||
|
Username: "nixos",
|
||||||
|
Shell: pathShell,
|
||||||
|
Home: pathDataData.Append(app.ID, "cache"),
|
||||||
|
|
||||||
|
Path: pathShell,
|
||||||
|
Args: []string{bash, "-lc", strings.Join(command, " && ")},
|
||||||
|
|
||||||
|
Flags: flags,
|
||||||
|
},
|
||||||
|
}, dropShell, beforeFail)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustRunAppDropShell(ctx context.Context, msg message.Msg, config *hst.Config, dropShell bool, beforeFail func()) {
|
||||||
|
if dropShell {
|
||||||
|
if config.Container != nil {
|
||||||
|
config.Container.Args = []string{bash, "-l"}
|
||||||
|
}
|
||||||
|
mustRunApp(ctx, msg, config, beforeFail)
|
||||||
|
beforeFail()
|
||||||
|
msg.BeforeExit()
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
mustRunApp(ctx, msg, config, beforeFail)
|
||||||
|
}
|
||||||
76
cmd/irdump/main.go
Normal file
76
cmd/irdump/main.go
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"hakurei.app/command"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.SetFlags(0)
|
||||||
|
log.SetPrefix("irdump: ")
|
||||||
|
|
||||||
|
var (
|
||||||
|
flagOutput string
|
||||||
|
flagReal bool
|
||||||
|
flagHeader bool
|
||||||
|
flagForce bool
|
||||||
|
flagRaw bool
|
||||||
|
)
|
||||||
|
c := command.New(os.Stderr, log.Printf, "irdump", func(args []string) (err error) {
|
||||||
|
var input *os.File
|
||||||
|
if len(args) != 1 {
|
||||||
|
return errors.New("irdump requires 1 argument")
|
||||||
|
}
|
||||||
|
if input, err = os.Open(args[0]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer input.Close()
|
||||||
|
|
||||||
|
var output *os.File
|
||||||
|
if flagOutput == "" {
|
||||||
|
output = os.Stdout
|
||||||
|
} else {
|
||||||
|
defer output.Close()
|
||||||
|
if output, err = os.Create(flagOutput); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var out string
|
||||||
|
if out, err = pkg.Disassemble(input, flagReal, flagHeader, flagForce, flagRaw); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = output.WriteString(out); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}).Flag(
|
||||||
|
&flagOutput,
|
||||||
|
"o", command.StringFlag(""),
|
||||||
|
"Output file for asm (leave empty for stdout)",
|
||||||
|
).Flag(
|
||||||
|
&flagReal,
|
||||||
|
"r", command.BoolFlag(false),
|
||||||
|
"skip label generation; idents print real value",
|
||||||
|
).Flag(
|
||||||
|
&flagHeader,
|
||||||
|
"H", command.BoolFlag(false),
|
||||||
|
"display artifact headers",
|
||||||
|
).Flag(
|
||||||
|
&flagForce,
|
||||||
|
"f", command.BoolFlag(false),
|
||||||
|
"force display (skip validations)",
|
||||||
|
).Flag(
|
||||||
|
&flagRaw,
|
||||||
|
"R", command.BoolFlag(false),
|
||||||
|
"don't format output",
|
||||||
|
)
|
||||||
|
|
||||||
|
c.MustParse(os.Args[1:], func(err error) {
|
||||||
|
log.Fatal(err)
|
||||||
|
})
|
||||||
|
}
|
||||||
237
cmd/mbf/main.go
237
cmd/mbf/main.go
@@ -4,22 +4,17 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
|
||||||
"unique"
|
"unique"
|
||||||
|
|
||||||
"hakurei.app/command"
|
"hakurei.app/command"
|
||||||
"hakurei.app/container"
|
"hakurei.app/container"
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/container/fhs"
|
|
||||||
"hakurei.app/container/seccomp"
|
|
||||||
"hakurei.app/container/std"
|
|
||||||
"hakurei.app/internal/pkg"
|
"hakurei.app/internal/pkg"
|
||||||
"hakurei.app/internal/rosa"
|
"hakurei.app/internal/rosa"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
@@ -56,16 +51,10 @@ func main() {
|
|||||||
flagCures int
|
flagCures int
|
||||||
flagBase string
|
flagBase string
|
||||||
flagTShift int
|
flagTShift int
|
||||||
flagIdle bool
|
|
||||||
)
|
)
|
||||||
c := command.New(os.Stderr, log.Printf, "mbf", func([]string) (err error) {
|
c := command.New(os.Stderr, log.Printf, "mbf", func([]string) (err error) {
|
||||||
msg.SwapVerbose(!flagQuiet)
|
msg.SwapVerbose(!flagQuiet)
|
||||||
|
|
||||||
flagBase = os.ExpandEnv(flagBase)
|
|
||||||
if flagBase == "" {
|
|
||||||
flagBase = "cache"
|
|
||||||
}
|
|
||||||
|
|
||||||
var base *check.Absolute
|
var base *check.Absolute
|
||||||
if flagBase, err = filepath.Abs(flagBase); err != nil {
|
if flagBase, err = filepath.Abs(flagBase); err != nil {
|
||||||
return
|
return
|
||||||
@@ -81,11 +70,6 @@ func main() {
|
|||||||
cache.SetThreshold(1 << flagTShift)
|
cache.SetThreshold(1 << flagTShift)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagIdle {
|
|
||||||
pkg.SchedPolicy = container.SCHED_IDLE
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}).Flag(
|
}).Flag(
|
||||||
&flagQuiet,
|
&flagQuiet,
|
||||||
@@ -97,16 +81,12 @@ func main() {
|
|||||||
"Maximum number of dependencies to cure at any given time",
|
"Maximum number of dependencies to cure at any given time",
|
||||||
).Flag(
|
).Flag(
|
||||||
&flagBase,
|
&flagBase,
|
||||||
"d", command.StringFlag("$MBF_CACHE_DIR"),
|
"d", command.StringFlag("cache"),
|
||||||
"Directory to store cured artifacts",
|
"Directory to store cured artifacts",
|
||||||
).Flag(
|
).Flag(
|
||||||
&flagTShift,
|
&flagTShift,
|
||||||
"tshift", command.IntFlag(-1),
|
"tshift", command.IntFlag(-1),
|
||||||
"Dependency graph size exponent, to the power of 2",
|
"Dependency graph size exponent, to the power of 2",
|
||||||
).Flag(
|
|
||||||
&flagIdle,
|
|
||||||
"sched-idle", command.BoolFlag(false),
|
|
||||||
"Set SCHED_IDLE scheduling policy",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -129,33 +109,13 @@ func main() {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
|
||||||
var (
|
|
||||||
flagGentoo string
|
|
||||||
flagChecksum string
|
|
||||||
|
|
||||||
flagStage0 bool
|
|
||||||
)
|
|
||||||
c.NewCommand(
|
c.NewCommand(
|
||||||
"stage3",
|
"stage3",
|
||||||
"Check for toolchain 3-stage non-determinism",
|
"Check for toolchain 3-stage non-determinism",
|
||||||
func(args []string) (err error) {
|
func(args []string) (err error) {
|
||||||
t := rosa.Std
|
_, _, _, stage1 := (rosa.Std - 2).NewLLVM()
|
||||||
if flagGentoo != "" {
|
_, _, _, stage2 := (rosa.Std - 1).NewLLVM()
|
||||||
t -= 3 // magic number to discourage misuse
|
_, _, _, stage3 := rosa.Std.NewLLVM()
|
||||||
|
|
||||||
var checksum pkg.Checksum
|
|
||||||
if len(flagChecksum) != 0 {
|
|
||||||
if err = pkg.Decode(&checksum, flagChecksum); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rosa.SetGentooStage3(flagGentoo, checksum)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, _, stage1 := (t - 2).NewLLVM()
|
|
||||||
_, _, _, stage2 := (t - 1).NewLLVM()
|
|
||||||
_, _, _, stage3 := t.NewLLVM()
|
|
||||||
var (
|
var (
|
||||||
pathname *check.Absolute
|
pathname *check.Absolute
|
||||||
checksum [2]unique.Handle[pkg.Checksum]
|
checksum [2]unique.Handle[pkg.Checksum]
|
||||||
@@ -186,35 +146,9 @@ func main() {
|
|||||||
"("+pkg.Encode(checksum[0].Value())+")",
|
"("+pkg.Encode(checksum[0].Value())+")",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagStage0 {
|
|
||||||
if pathname, _, err = cache.Cure(
|
|
||||||
t.Load(rosa.Stage0),
|
|
||||||
); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Println(pathname)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
return
|
||||||
},
|
},
|
||||||
).
|
|
||||||
Flag(
|
|
||||||
&flagGentoo,
|
|
||||||
"gentoo", command.StringFlag(""),
|
|
||||||
"Bootstrap from a Gentoo stage3 tarball",
|
|
||||||
).
|
|
||||||
Flag(
|
|
||||||
&flagChecksum,
|
|
||||||
"checksum", command.StringFlag(""),
|
|
||||||
"Checksum of Gentoo stage3 tarball",
|
|
||||||
).
|
|
||||||
Flag(
|
|
||||||
&flagStage0,
|
|
||||||
"stage0", command.BoolFlag(false),
|
|
||||||
"Create bootstrap stage0 tarball",
|
|
||||||
)
|
)
|
||||||
}
|
|
||||||
|
|
||||||
{
|
{
|
||||||
var (
|
var (
|
||||||
@@ -228,7 +162,7 @@ func main() {
|
|||||||
return errors.New("cure requires 1 argument")
|
return errors.New("cure requires 1 argument")
|
||||||
}
|
}
|
||||||
if p, ok := rosa.ResolveName(args[0]); !ok {
|
if p, ok := rosa.ResolveName(args[0]); !ok {
|
||||||
return fmt.Errorf("unknown artifact %q", args[0])
|
return fmt.Errorf("unsupported artifact %q", args[0])
|
||||||
} else if flagDump == "" {
|
} else if flagDump == "" {
|
||||||
pathname, _, err := cache.Cure(rosa.Std.Load(p))
|
pathname, _, err := cache.Cure(rosa.Std.Load(p))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -261,167 +195,6 @@ func main() {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.NewCommand(
|
|
||||||
"status",
|
|
||||||
"Display the status file of an artifact",
|
|
||||||
func(args []string) error {
|
|
||||||
if len(args) != 1 {
|
|
||||||
return errors.New("status requires 1 argument")
|
|
||||||
}
|
|
||||||
if p, ok := rosa.ResolveName(args[0]); !ok {
|
|
||||||
return fmt.Errorf("unknown artifact %q", args[0])
|
|
||||||
} else {
|
|
||||||
r, err := cache.OpenStatus(rosa.Std.Load(p))
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
return errors.New(args[0] + " was never cured")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = io.Copy(os.Stdout, r)
|
|
||||||
return errors.Join(err, r.Close())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
{
|
|
||||||
var (
|
|
||||||
flagNet bool
|
|
||||||
flagSession bool
|
|
||||||
|
|
||||||
flagWithToolchain bool
|
|
||||||
)
|
|
||||||
c.NewCommand(
|
|
||||||
"shell",
|
|
||||||
"Interactive shell in the specified Rosa OS environment",
|
|
||||||
func(args []string) error {
|
|
||||||
root := make([]pkg.Artifact, 0, 6+len(args))
|
|
||||||
for _, arg := range args {
|
|
||||||
p, ok := rosa.ResolveName(arg)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("unknown artifact %q", arg)
|
|
||||||
}
|
|
||||||
root = append(root, rosa.Std.Load(p))
|
|
||||||
}
|
|
||||||
|
|
||||||
if flagWithToolchain {
|
|
||||||
musl, compilerRT, runtimes, clang := rosa.Std.NewLLVM()
|
|
||||||
root = append(root, musl, compilerRT, runtimes, clang)
|
|
||||||
} else {
|
|
||||||
root = append(root, rosa.Std.Load(rosa.Musl))
|
|
||||||
}
|
|
||||||
root = append(root,
|
|
||||||
rosa.Std.Load(rosa.Mksh),
|
|
||||||
rosa.Std.Load(rosa.Toybox),
|
|
||||||
)
|
|
||||||
|
|
||||||
type cureRes struct {
|
|
||||||
pathname *check.Absolute
|
|
||||||
checksum unique.Handle[pkg.Checksum]
|
|
||||||
}
|
|
||||||
cured := make(map[pkg.Artifact]cureRes)
|
|
||||||
for _, a := range root {
|
|
||||||
pathname, checksum, err := cache.Cure(a)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
cured[a] = cureRes{pathname, checksum}
|
|
||||||
}
|
|
||||||
|
|
||||||
layers := pkg.PromoteLayers(root, func(a pkg.Artifact) (
|
|
||||||
*check.Absolute,
|
|
||||||
unique.Handle[pkg.Checksum],
|
|
||||||
) {
|
|
||||||
res := cured[a]
|
|
||||||
return res.pathname, res.checksum
|
|
||||||
}, func(i int, d pkg.Artifact) {
|
|
||||||
r := pkg.Encode(cache.Ident(d).Value())
|
|
||||||
if s, ok := d.(fmt.Stringer); ok {
|
|
||||||
if name := s.String(); name != "" {
|
|
||||||
r += "-" + name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
msg.Verbosef("promoted layer %d as %s", i, r)
|
|
||||||
})
|
|
||||||
|
|
||||||
z := container.New(ctx, msg)
|
|
||||||
z.WaitDelay = 3 * time.Second
|
|
||||||
z.SeccompPresets = pkg.SeccompPresets
|
|
||||||
z.SeccompFlags |= seccomp.AllowMultiarch
|
|
||||||
z.ParentPerm = 0700
|
|
||||||
z.HostNet = flagNet
|
|
||||||
z.RetainSession = flagSession
|
|
||||||
z.Hostname = "localhost"
|
|
||||||
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
|
||||||
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
|
|
||||||
|
|
||||||
var tempdir *check.Absolute
|
|
||||||
if s, err := filepath.Abs(os.TempDir()); err != nil {
|
|
||||||
return err
|
|
||||||
} else if tempdir, err = check.NewAbs(s); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
z.Dir = fhs.AbsRoot
|
|
||||||
z.Env = []string{
|
|
||||||
"SHELL=/system/bin/mksh",
|
|
||||||
"PATH=/system/bin",
|
|
||||||
"HOME=/",
|
|
||||||
}
|
|
||||||
z.Path = rosa.AbsSystem.Append("bin", "mksh")
|
|
||||||
z.Args = []string{"mksh"}
|
|
||||||
z.
|
|
||||||
OverlayEphemeral(fhs.AbsRoot, layers...).
|
|
||||||
Place(
|
|
||||||
fhs.AbsEtc.Append("hosts"),
|
|
||||||
[]byte("127.0.0.1 localhost\n"),
|
|
||||||
).
|
|
||||||
Place(
|
|
||||||
fhs.AbsEtc.Append("passwd"),
|
|
||||||
[]byte("media_rw:x:1023:1023::/:/system/bin/sh\n"+
|
|
||||||
"nobody:x:65534:65534::/proc/nonexistent:/system/bin/false\n"),
|
|
||||||
).
|
|
||||||
Place(
|
|
||||||
fhs.AbsEtc.Append("group"),
|
|
||||||
[]byte("media_rw:x:1023:\nnobody:x:65534:\n"),
|
|
||||||
).
|
|
||||||
Bind(tempdir, fhs.AbsTmp, std.BindWritable).
|
|
||||||
Proc(fhs.AbsProc).Dev(fhs.AbsDev, true)
|
|
||||||
|
|
||||||
if err := z.Start(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := z.Serve(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return z.Wait()
|
|
||||||
},
|
|
||||||
).
|
|
||||||
Flag(
|
|
||||||
&flagNet,
|
|
||||||
"net", command.BoolFlag(false),
|
|
||||||
"Share host net namespace",
|
|
||||||
).
|
|
||||||
Flag(
|
|
||||||
&flagSession,
|
|
||||||
"session", command.BoolFlag(false),
|
|
||||||
"Retain session",
|
|
||||||
).
|
|
||||||
Flag(
|
|
||||||
&flagWithToolchain,
|
|
||||||
"with-toolchain", command.BoolFlag(false),
|
|
||||||
"Include the stage3 LLVM toolchain",
|
|
||||||
)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Command(
|
|
||||||
"help",
|
|
||||||
"Show this help message",
|
|
||||||
func([]string) error { c.PrintHelp(); return nil },
|
|
||||||
)
|
|
||||||
|
|
||||||
c.MustParse(os.Args[1:], func(err error) {
|
c.MustParse(os.Args[1:], func(err error) {
|
||||||
if cache != nil {
|
if cache != nil {
|
||||||
cache.Close()
|
cache.Close()
|
||||||
|
|||||||
@@ -1,55 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"embed"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
//go:generate sh -c "sass ui/static/dark.scss ui/static/dark.css && sass ui/static/light.scss ui/static/light.css && tsc ui/static/index.ts"
|
|
||||||
//go:embed ui/*
|
|
||||||
var content embed.FS
|
|
||||||
|
|
||||||
func serveWebUI(w http.ResponseWriter, r *http.Request) {
|
|
||||||
fmt.Printf("serveWebUI: %s\n", r.URL.Path)
|
|
||||||
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
|
||||||
w.Header().Set("Pragma", "no-cache")
|
|
||||||
w.Header().Set("Expires", "0")
|
|
||||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
|
||||||
w.Header().Set("X-XSS-Protection", "1")
|
|
||||||
w.Header().Set("X-Frame-Options", "DENY")
|
|
||||||
|
|
||||||
http.ServeFileFS(w, r, content, "ui/index.html")
|
|
||||||
}
|
|
||||||
func serveStaticContent(w http.ResponseWriter, r *http.Request) {
|
|
||||||
fmt.Printf("serveStaticContent: %s\n", r.URL.Path)
|
|
||||||
switch r.URL.Path {
|
|
||||||
case "/static/style.css":
|
|
||||||
darkTheme := r.CookiesNamed("dark_theme")
|
|
||||||
if len(darkTheme) > 0 && darkTheme[0].Value == "true" {
|
|
||||||
http.ServeFileFS(w, r, content, "ui/static/dark.css")
|
|
||||||
} else {
|
|
||||||
http.ServeFileFS(w, r, content, "ui/static/light.css")
|
|
||||||
}
|
|
||||||
break
|
|
||||||
case "/favicon.ico":
|
|
||||||
http.ServeFileFS(w, r, content, "ui/static/favicon.ico")
|
|
||||||
break
|
|
||||||
case "/static/index.js":
|
|
||||||
http.ServeFileFS(w, r, content, "ui/static/index.js")
|
|
||||||
break
|
|
||||||
default:
|
|
||||||
http.NotFound(w, r)
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func serveAPI(w http.ResponseWriter, r *http.Request) {
|
|
||||||
|
|
||||||
}
|
|
||||||
func main() {
|
|
||||||
http.HandleFunc("GET /{$}", serveWebUI)
|
|
||||||
http.HandleFunc("GET /favicon.ico", serveStaticContent)
|
|
||||||
http.HandleFunc("GET /static/", serveStaticContent)
|
|
||||||
http.HandleFunc("GET /api/", serveAPI)
|
|
||||||
http.ListenAndServe(":8067", nil)
|
|
||||||
}
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="UTF-8">
|
|
||||||
<link rel="stylesheet" href="static/style.css">
|
|
||||||
<title>Hakurei PkgServer</title>
|
|
||||||
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.7.1/jquery.min.js"></script>
|
|
||||||
<script src="static/index.js"></script>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<h1>Hakurei PkgServer</h1>
|
|
||||||
|
|
||||||
<table id="pkg-list">
|
|
||||||
<tr><th>Status</th><th>Name</th><th>Version</th></tr>
|
|
||||||
</table>
|
|
||||||
<p>Showing entries <span id="entry-counter"></span>.</p>
|
|
||||||
<span class="bottom-nav"><a href="javascript:prevPage()">« Previous</a> <span id="page-number">1</span> <a href="javascript:nextPage()">Next »</a></span>
|
|
||||||
<span><label for="count">Entries per page:</label><select name="count" id="count">
|
|
||||||
<option value="10">10</option>
|
|
||||||
<option value="25">25</option>
|
|
||||||
<option value="50">50</option>
|
|
||||||
<option value="100">100</option>
|
|
||||||
</select></span>
|
|
||||||
</body>
|
|
||||||
<footer>© <a href="https://hakurei.app/">Hakurei</a>. Licensed under the MIT license.</footer>
|
|
||||||
</html>
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
@use 'common';
|
|
||||||
html {
|
|
||||||
background-color: #2c2c2c;
|
|
||||||
color: ghostwhite; }
|
|
||||||
|
|
||||||
/*# sourceMappingURL=dark.css.map */
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
{
|
|
||||||
"version": 3,
|
|
||||||
"mappings": "AAAA,aAAa;AAEb,IAAK;EACH,gBAAgB,EAAE,OAAO;EACzB,KAAK,EAAE,UAAU",
|
|
||||||
"sources": ["dark.scss"],
|
|
||||||
"names": [],
|
|
||||||
"file": "dark.css"
|
|
||||||
}
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
@use 'common';
|
|
||||||
|
|
||||||
html {
|
|
||||||
background-color: #2c2c2c;
|
|
||||||
color: ghostwhite;
|
|
||||||
}
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 17 KiB |
@@ -1,67 +0,0 @@
|
|||||||
"use strict";
|
|
||||||
var PackageEntry = /** @class */ (function () {
|
|
||||||
function PackageEntry() {
|
|
||||||
}
|
|
||||||
return PackageEntry;
|
|
||||||
}());
|
|
||||||
var State = /** @class */ (function () {
|
|
||||||
function State() {
|
|
||||||
this.entriesPerPage = 10;
|
|
||||||
this.currentPage = 1;
|
|
||||||
this.entryIndex = 0;
|
|
||||||
this.loadedEntries = [];
|
|
||||||
}
|
|
||||||
State.prototype.getEntriesPerPage = function () {
|
|
||||||
return this.entriesPerPage;
|
|
||||||
};
|
|
||||||
State.prototype.setEntriesPerPage = function (entriesPerPage) {
|
|
||||||
this.entriesPerPage = entriesPerPage;
|
|
||||||
this.updateRange();
|
|
||||||
};
|
|
||||||
State.prototype.getCurrentPage = function () {
|
|
||||||
return this.currentPage;
|
|
||||||
};
|
|
||||||
State.prototype.setCurrentPage = function (page) {
|
|
||||||
this.currentPage = page;
|
|
||||||
document.getElementById("page-number").innerText = String(this.currentPage);
|
|
||||||
this.updateRange();
|
|
||||||
};
|
|
||||||
State.prototype.getEntryIndex = function () {
|
|
||||||
return this.entryIndex;
|
|
||||||
};
|
|
||||||
State.prototype.setEntryIndex = function (entryIndex) {
|
|
||||||
this.entryIndex = entryIndex;
|
|
||||||
this.updateRange();
|
|
||||||
};
|
|
||||||
State.prototype.getLoadedEntries = function () {
|
|
||||||
return this.loadedEntries;
|
|
||||||
};
|
|
||||||
State.prototype.getMaxPage = function () {
|
|
||||||
return this.loadedEntries.length / this.entriesPerPage;
|
|
||||||
};
|
|
||||||
State.prototype.updateRange = function () {
|
|
||||||
var max = Math.min(this.entryIndex + this.entriesPerPage, this.loadedEntries.length);
|
|
||||||
document.getElementById("entry-counter").innerText = "".concat(this.entryIndex, "-").concat(max, " of ").concat(this.loadedEntries.length);
|
|
||||||
};
|
|
||||||
return State;
|
|
||||||
}());
|
|
||||||
var STATE;
|
|
||||||
function prevPage() {
|
|
||||||
var current = STATE.getCurrentPage();
|
|
||||||
if (current > 1) {
|
|
||||||
STATE.setCurrentPage(STATE.getCurrentPage() - 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
function nextPage() {
|
|
||||||
var current = STATE.getCurrentPage();
|
|
||||||
if (current < STATE.getMaxPage()) {
|
|
||||||
STATE.setCurrentPage(STATE.getCurrentPage() + 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
document.addEventListener("DOMContentLoaded", function () {
|
|
||||||
STATE = new State();
|
|
||||||
STATE.updateRange();
|
|
||||||
document.getElementById("count").addEventListener("change", function (event) {
|
|
||||||
STATE.setEntriesPerPage(parseInt(event.target.value));
|
|
||||||
});
|
|
||||||
});
|
|
||||||
@@ -1,66 +0,0 @@
|
|||||||
"use strict"
|
|
||||||
|
|
||||||
class PackageEntry {
|
|
||||||
|
|
||||||
}
|
|
||||||
class State {
|
|
||||||
entriesPerPage: number = 10
|
|
||||||
currentPage: number = 1
|
|
||||||
entryIndex: number = 0
|
|
||||||
loadedEntries: PackageEntry[] = []
|
|
||||||
getEntriesPerPage(): number {
|
|
||||||
return this.entriesPerPage
|
|
||||||
}
|
|
||||||
setEntriesPerPage(entriesPerPage: number) {
|
|
||||||
this.entriesPerPage = entriesPerPage
|
|
||||||
this.updateRange()
|
|
||||||
}
|
|
||||||
getCurrentPage(): number {
|
|
||||||
return this.currentPage
|
|
||||||
}
|
|
||||||
setCurrentPage(page: number) {
|
|
||||||
this.currentPage = page
|
|
||||||
document.getElementById("page-number").innerText = String(this.currentPage)
|
|
||||||
this.updateRange()
|
|
||||||
}
|
|
||||||
getEntryIndex(): number {
|
|
||||||
return this.entryIndex
|
|
||||||
}
|
|
||||||
setEntryIndex(entryIndex: number) {
|
|
||||||
this.entryIndex = entryIndex
|
|
||||||
this.updateRange()
|
|
||||||
}
|
|
||||||
getLoadedEntries(): PackageEntry[] {
|
|
||||||
return this.loadedEntries
|
|
||||||
}
|
|
||||||
getMaxPage(): number {
|
|
||||||
return this.loadedEntries.length / this.entriesPerPage
|
|
||||||
}
|
|
||||||
updateRange() {
|
|
||||||
let max = Math.min(this.entryIndex + this.entriesPerPage, this.loadedEntries.length)
|
|
||||||
document.getElementById("entry-counter").innerText = `${this.entryIndex}-${max} of ${this.loadedEntries.length}`
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let STATE: State
|
|
||||||
|
|
||||||
function prevPage() {
|
|
||||||
let current = STATE.getCurrentPage()
|
|
||||||
if (current > 1) {
|
|
||||||
STATE.setCurrentPage(STATE.getCurrentPage() - 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
function nextPage() {
|
|
||||||
let current = STATE.getCurrentPage()
|
|
||||||
if (current < STATE.getMaxPage()) {
|
|
||||||
STATE.setCurrentPage(STATE.getCurrentPage() + 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
document.addEventListener("DOMContentLoaded", () => {
|
|
||||||
STATE = new State()
|
|
||||||
STATE.updateRange()
|
|
||||||
document.getElementById("count").addEventListener("change", (event) => {
|
|
||||||
STATE.setEntriesPerPage(parseInt((event.target as HTMLSelectElement).value))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
@use 'common';
|
|
||||||
html {
|
|
||||||
background-color: #d3d3d3;
|
|
||||||
color: black; }
|
|
||||||
|
|
||||||
/*# sourceMappingURL=light.css.map */
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
{
|
|
||||||
"version": 3,
|
|
||||||
"mappings": "AAAA,aAAa;AAEb,IAAK;EACH,gBAAgB,EAAE,OAAO;EACzB,KAAK,EAAE,KAAK",
|
|
||||||
"sources": ["light.scss"],
|
|
||||||
"names": [],
|
|
||||||
"file": "light.css"
|
|
||||||
}
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
@use 'common';
|
|
||||||
|
|
||||||
html {
|
|
||||||
background-color: #d3d3d3;
|
|
||||||
color: black;
|
|
||||||
}
|
|
||||||
@@ -33,7 +33,6 @@ import (
|
|||||||
|
|
||||||
"hakurei.app/container"
|
"hakurei.app/container"
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/container/fhs"
|
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/hst"
|
"hakurei.app/hst"
|
||||||
"hakurei.app/internal/helper/proc"
|
"hakurei.app/internal/helper/proc"
|
||||||
@@ -442,7 +441,12 @@ func _main(s ...string) (exitCode int) {
|
|||||||
// keep fuse_parse_cmdline happy in the container
|
// keep fuse_parse_cmdline happy in the container
|
||||||
z.Tmpfs(check.MustAbs(container.Nonexistent), 1<<10, 0755)
|
z.Tmpfs(check.MustAbs(container.Nonexistent), 1<<10, 0755)
|
||||||
|
|
||||||
z.Path = fhs.AbsProcSelfExe
|
if a, err := check.NewAbs(container.MustExecutable(msg)); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return 5
|
||||||
|
} else {
|
||||||
|
z.Path = a
|
||||||
|
}
|
||||||
z.Args = s
|
z.Args = s
|
||||||
z.ForwardCancel = true
|
z.ForwardCancel = true
|
||||||
z.SeccompPresets |= std.PresetStrict
|
z.SeccompPresets |= std.PresetStrict
|
||||||
|
|||||||
@@ -10,7 +10,8 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(AutoEtcOp)) }
|
func init() { gob.Register(new(AutoEtcOp)) }
|
||||||
|
|
||||||
// Etc is a helper for appending [AutoEtcOp] to [Ops].
|
// Etc appends an [Op] that expands host /etc into a toplevel symlink mirror with /etc semantics.
|
||||||
|
// This is not a generic setup op. It is implemented here to reduce ipc overhead.
|
||||||
func (f *Ops) Etc(host *check.Absolute, prefix string) *Ops {
|
func (f *Ops) Etc(host *check.Absolute, prefix string) *Ops {
|
||||||
e := &AutoEtcOp{prefix}
|
e := &AutoEtcOp{prefix}
|
||||||
f.Mkdir(fhs.AbsEtc, 0755)
|
f.Mkdir(fhs.AbsEtc, 0755)
|
||||||
@@ -19,9 +20,6 @@ func (f *Ops) Etc(host *check.Absolute, prefix string) *Ops {
|
|||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
// AutoEtcOp expands host /etc into a toplevel symlink mirror with /etc semantics.
|
|
||||||
//
|
|
||||||
// This is not a generic setup op. It is implemented here to reduce ipc overhead.
|
|
||||||
type AutoEtcOp struct{ Prefix string }
|
type AutoEtcOp struct{ Prefix string }
|
||||||
|
|
||||||
func (e *AutoEtcOp) Valid() bool { return e != nil }
|
func (e *AutoEtcOp) Valid() bool { return e != nil }
|
||||||
|
|||||||
@@ -11,15 +11,13 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(AutoRootOp)) }
|
func init() { gob.Register(new(AutoRootOp)) }
|
||||||
|
|
||||||
// Root is a helper for appending [AutoRootOp] to [Ops].
|
// Root appends an [Op] that expands a directory into a toplevel bind mount mirror on container root.
|
||||||
|
// This is not a generic setup op. It is implemented here to reduce ipc overhead.
|
||||||
func (f *Ops) Root(host *check.Absolute, flags int) *Ops {
|
func (f *Ops) Root(host *check.Absolute, flags int) *Ops {
|
||||||
*f = append(*f, &AutoRootOp{host, flags, nil})
|
*f = append(*f, &AutoRootOp{host, flags, nil})
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
// AutoRootOp expands a directory into a toplevel bind mount mirror on container root.
|
|
||||||
//
|
|
||||||
// This is not a generic setup op. It is implemented here to reduce ipc overhead.
|
|
||||||
type AutoRootOp struct {
|
type AutoRootOp struct {
|
||||||
Host *check.Absolute
|
Host *check.Absolute
|
||||||
// passed through to bindMount
|
// passed through to bindMount
|
||||||
|
|||||||
@@ -50,16 +50,10 @@ func capset(hdrp *capHeader, datap *[2]capData) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// capBoundingSetDrop drops a capability from the calling thread's capability bounding set.
|
// capBoundingSetDrop drops a capability from the calling thread's capability bounding set.
|
||||||
func capBoundingSetDrop(cap uintptr) error {
|
func capBoundingSetDrop(cap uintptr) error { return Prctl(syscall.PR_CAPBSET_DROP, cap, 0) }
|
||||||
return Prctl(syscall.PR_CAPBSET_DROP, cap, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// capAmbientClearAll clears the ambient capability set of the calling thread.
|
// capAmbientClearAll clears the ambient capability set of the calling thread.
|
||||||
func capAmbientClearAll() error {
|
func capAmbientClearAll() error { return Prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_CLEAR_ALL, 0) }
|
||||||
return Prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_CLEAR_ALL, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// capAmbientRaise adds to the ambient capability set of the calling thread.
|
// capAmbientRaise adds to the ambient capability set of the calling thread.
|
||||||
func capAmbientRaise(cap uintptr) error {
|
func capAmbientRaise(cap uintptr) error { return Prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, cap) }
|
||||||
return Prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, cap)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -11,8 +11,7 @@ const (
|
|||||||
SpecialOverlayPath = ":"
|
SpecialOverlayPath = ":"
|
||||||
)
|
)
|
||||||
|
|
||||||
// EscapeOverlayDataSegment escapes a string for formatting into the data
|
// EscapeOverlayDataSegment escapes a string for formatting into the data argument of an overlay mount call.
|
||||||
// argument of an overlay mount system call.
|
|
||||||
func EscapeOverlayDataSegment(s string) string {
|
func EscapeOverlayDataSegment(s string) string {
|
||||||
if s == "" {
|
if s == "" {
|
||||||
return ""
|
return ""
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
// Package container implements unprivileged Linux containers with built-in
|
// Package container implements unprivileged Linux containers with built-in support for syscall filtering.
|
||||||
// support for syscall filtering.
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -38,30 +37,24 @@ type (
|
|||||||
Container struct {
|
Container struct {
|
||||||
// Whether the container init should stay alive after its parent terminates.
|
// Whether the container init should stay alive after its parent terminates.
|
||||||
AllowOrphan bool
|
AllowOrphan bool
|
||||||
// Scheduling policy to set via sched_setscheduler(2). The zero value
|
|
||||||
// skips this call. Supported policies are [SCHED_BATCH], [SCHED_IDLE].
|
|
||||||
SchedPolicy int
|
|
||||||
// Cgroup fd, nil to disable.
|
// Cgroup fd, nil to disable.
|
||||||
Cgroup *int
|
Cgroup *int
|
||||||
// ExtraFiles passed through to initial process in the container, with
|
// ExtraFiles passed through to initial process in the container,
|
||||||
// behaviour identical to its [exec.Cmd] counterpart.
|
// with behaviour identical to its [exec.Cmd] counterpart.
|
||||||
ExtraFiles []*os.File
|
ExtraFiles []*os.File
|
||||||
|
|
||||||
// Write end of a pipe connected to the init to deliver [Params].
|
// param pipe for shim and init
|
||||||
setup *os.File
|
setup *os.File
|
||||||
// Cancels the context passed to the underlying cmd.
|
// cancels cmd
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
// Closed after Wait returns. Keeps the spawning thread alive.
|
// closed after Wait returns
|
||||||
wait chan struct{}
|
wait chan struct{}
|
||||||
|
|
||||||
Stdin io.Reader
|
Stdin io.Reader
|
||||||
Stdout io.Writer
|
Stdout io.Writer
|
||||||
Stderr io.Writer
|
Stderr io.Writer
|
||||||
|
|
||||||
// Custom cancellation behaviour for the underlying [exec.Cmd]. Must
|
|
||||||
// deliver [CancelSignal] before returning.
|
|
||||||
Cancel func(cmd *exec.Cmd) error
|
Cancel func(cmd *exec.Cmd) error
|
||||||
// Copied to the underlying [exec.Cmd].
|
|
||||||
WaitDelay time.Duration
|
WaitDelay time.Duration
|
||||||
|
|
||||||
cmd *exec.Cmd
|
cmd *exec.Cmd
|
||||||
@@ -290,11 +283,7 @@ func (p *Container) Start() error {
|
|||||||
|
|
||||||
// place setup pipe before user supplied extra files, this is later restored by init
|
// place setup pipe before user supplied extra files, this is later restored by init
|
||||||
if fd, f, err := Setup(&p.cmd.ExtraFiles); err != nil {
|
if fd, f, err := Setup(&p.cmd.ExtraFiles); err != nil {
|
||||||
return &StartError{
|
return &StartError{true, "set up params stream", err, false, false}
|
||||||
Fatal: true,
|
|
||||||
Step: "set up params stream",
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
p.setup = f
|
p.setup = f
|
||||||
p.cmd.Env = []string{setupEnv + "=" + strconv.Itoa(fd)}
|
p.cmd.Env = []string{setupEnv + "=" + strconv.Itoa(fd)}
|
||||||
@@ -306,16 +295,10 @@ func (p *Container) Start() error {
|
|||||||
runtime.LockOSThread()
|
runtime.LockOSThread()
|
||||||
p.wait = make(chan struct{})
|
p.wait = make(chan struct{})
|
||||||
|
|
||||||
// setup depending on per-thread state must happen here
|
done <- func() error { // setup depending on per-thread state must happen here
|
||||||
done <- func() error {
|
// PR_SET_NO_NEW_PRIVS: depends on per-thread state but acts on all processes created from that thread
|
||||||
// PR_SET_NO_NEW_PRIVS: thread-directed but acts on all processes
|
|
||||||
// created from the calling thread
|
|
||||||
if err := SetNoNewPrivs(); err != nil {
|
if err := SetNoNewPrivs(); err != nil {
|
||||||
return &StartError{
|
return &StartError{true, "prctl(PR_SET_NO_NEW_PRIVS)", err, false, false}
|
||||||
Fatal: true,
|
|
||||||
Step: "prctl(PR_SET_NO_NEW_PRIVS)",
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// landlock: depends on per-thread state but acts on a process group
|
// landlock: depends on per-thread state but acts on a process group
|
||||||
@@ -327,40 +310,28 @@ func (p *Container) Start() error {
|
|||||||
|
|
||||||
if abi, err := LandlockGetABI(); err != nil {
|
if abi, err := LandlockGetABI(); err != nil {
|
||||||
if p.HostAbstract {
|
if p.HostAbstract {
|
||||||
// landlock can be skipped here as it restricts access
|
// landlock can be skipped here as it restricts access to resources
|
||||||
// to resources already covered by namespaces (pid)
|
// already covered by namespaces (pid)
|
||||||
goto landlockOut
|
goto landlockOut
|
||||||
}
|
}
|
||||||
return &StartError{Step: "get landlock ABI", Err: err}
|
return &StartError{false, "get landlock ABI", err, false, false}
|
||||||
} else if abi < 6 {
|
} else if abi < 6 {
|
||||||
if p.HostAbstract {
|
if p.HostAbstract {
|
||||||
// see above comment
|
// see above comment
|
||||||
goto landlockOut
|
goto landlockOut
|
||||||
}
|
}
|
||||||
return &StartError{
|
return &StartError{false, "kernel version too old for LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET", ENOSYS, true, false}
|
||||||
Step: "kernel too old for LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET",
|
|
||||||
Err: ENOSYS,
|
|
||||||
Origin: true,
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
p.msg.Verbosef("landlock abi version %d", abi)
|
p.msg.Verbosef("landlock abi version %d", abi)
|
||||||
}
|
}
|
||||||
|
|
||||||
if rulesetFd, err := rulesetAttr.Create(0); err != nil {
|
if rulesetFd, err := rulesetAttr.Create(0); err != nil {
|
||||||
return &StartError{
|
return &StartError{true, "create landlock ruleset", err, false, false}
|
||||||
Fatal: true,
|
|
||||||
Step: "create landlock ruleset",
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
p.msg.Verbosef("enforcing landlock ruleset %s", rulesetAttr)
|
p.msg.Verbosef("enforcing landlock ruleset %s", rulesetAttr)
|
||||||
if err = LandlockRestrictSelf(rulesetFd, 0); err != nil {
|
if err = LandlockRestrictSelf(rulesetFd, 0); err != nil {
|
||||||
_ = Close(rulesetFd)
|
_ = Close(rulesetFd)
|
||||||
return &StartError{
|
return &StartError{true, "enforce landlock ruleset", err, false, false}
|
||||||
Fatal: true,
|
|
||||||
Step: "enforce landlock ruleset",
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if err = Close(rulesetFd); err != nil {
|
if err = Close(rulesetFd); err != nil {
|
||||||
p.msg.Verbosef("cannot close landlock ruleset: %v", err)
|
p.msg.Verbosef("cannot close landlock ruleset: %v", err)
|
||||||
@@ -371,30 +342,9 @@ func (p *Container) Start() error {
|
|||||||
landlockOut:
|
landlockOut:
|
||||||
}
|
}
|
||||||
|
|
||||||
// sched_setscheduler: thread-directed but acts on all processes
|
|
||||||
// created from the calling thread
|
|
||||||
if p.SchedPolicy > 0 {
|
|
||||||
p.msg.Verbosef("setting scheduling policy %d", p.SchedPolicy)
|
|
||||||
if err := schedSetscheduler(
|
|
||||||
0, // calling thread
|
|
||||||
p.SchedPolicy,
|
|
||||||
&schedParam{0},
|
|
||||||
); err != nil {
|
|
||||||
return &StartError{
|
|
||||||
Fatal: true,
|
|
||||||
Step: "enforce landlock ruleset",
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p.msg.Verbose("starting container init")
|
p.msg.Verbose("starting container init")
|
||||||
if err := p.cmd.Start(); err != nil {
|
if err := p.cmd.Start(); err != nil {
|
||||||
return &StartError{
|
return &StartError{false, "start container init", err, false, true}
|
||||||
Step: "start container init",
|
|
||||||
Err: err,
|
|
||||||
Passthrough: true,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}()
|
}()
|
||||||
@@ -406,7 +356,6 @@ func (p *Container) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Serve serves [Container.Params] to the container init.
|
// Serve serves [Container.Params] to the container init.
|
||||||
//
|
|
||||||
// Serve must only be called once.
|
// Serve must only be called once.
|
||||||
func (p *Container) Serve() error {
|
func (p *Container) Serve() error {
|
||||||
if p.setup == nil {
|
if p.setup == nil {
|
||||||
@@ -416,21 +365,12 @@ func (p *Container) Serve() error {
|
|||||||
setup := p.setup
|
setup := p.setup
|
||||||
p.setup = nil
|
p.setup = nil
|
||||||
if err := setup.SetDeadline(time.Now().Add(initSetupTimeout)); err != nil {
|
if err := setup.SetDeadline(time.Now().Add(initSetupTimeout)); err != nil {
|
||||||
return &StartError{
|
return &StartError{true, "set init pipe deadline", err, false, true}
|
||||||
Fatal: true,
|
|
||||||
Step: "set init pipe deadline",
|
|
||||||
Err: err,
|
|
||||||
Passthrough: true,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.Path == nil {
|
if p.Path == nil {
|
||||||
p.cancel()
|
p.cancel()
|
||||||
return &StartError{
|
return &StartError{false, "invalid executable pathname", EINVAL, true, false}
|
||||||
Step: "invalid executable pathname",
|
|
||||||
Err: EINVAL,
|
|
||||||
Origin: true,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// do not transmit nil
|
// do not transmit nil
|
||||||
@@ -455,8 +395,7 @@ func (p *Container) Serve() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait blocks until the container init process to exit and releases any
|
// Wait waits for the container init process to exit and releases any resources associated with the [Container].
|
||||||
// resources associated with the [Container].
|
|
||||||
func (p *Container) Wait() error {
|
func (p *Container) Wait() error {
|
||||||
if p.cmd == nil || p.cmd.Process == nil {
|
if p.cmd == nil || p.cmd.Process == nil {
|
||||||
return EINVAL
|
return EINVAL
|
||||||
@@ -501,13 +440,11 @@ func (p *Container) StderrPipe() (r io.ReadCloser, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Container) String() string {
|
func (p *Container) String() string {
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf("argv: %q, filter: %v, rules: %d, flags: %#x, presets: %#x",
|
||||||
"argv: %q, filter: %v, rules: %d, flags: %#x, presets: %#x",
|
p.Args, !p.SeccompDisable, len(p.SeccompRules), int(p.SeccompFlags), int(p.SeccompPresets))
|
||||||
p.Args, !p.SeccompDisable, len(p.SeccompRules), int(p.SeccompFlags), int(p.SeccompPresets),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcessState returns the address of os.ProcessState held by the underlying [exec.Cmd].
|
// ProcessState returns the address to os.ProcessState held by the underlying [exec.Cmd].
|
||||||
func (p *Container) ProcessState() *os.ProcessState {
|
func (p *Container) ProcessState() *os.ProcessState {
|
||||||
if p.cmd == nil {
|
if p.cmd == nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -515,8 +452,7 @@ func (p *Container) ProcessState() *os.ProcessState {
|
|||||||
return p.cmd.ProcessState
|
return p.cmd.ProcessState
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns the address to a new instance of [Container]. This value requires
|
// New returns the address to a new instance of [Container] that requires further initialisation before use.
|
||||||
// further initialisation before use.
|
|
||||||
func New(ctx context.Context, msg message.Msg) *Container {
|
func New(ctx context.Context, msg message.Msg) *Container {
|
||||||
if msg == nil {
|
if msg == nil {
|
||||||
msg = message.New(nil)
|
msg = message.New(nil)
|
||||||
@@ -525,18 +461,12 @@ func New(ctx context.Context, msg message.Msg) *Container {
|
|||||||
p := &Container{ctx: ctx, msg: msg, Params: Params{Ops: new(Ops)}}
|
p := &Container{ctx: ctx, msg: msg, Params: Params{Ops: new(Ops)}}
|
||||||
c, cancel := context.WithCancel(ctx)
|
c, cancel := context.WithCancel(ctx)
|
||||||
p.cancel = cancel
|
p.cancel = cancel
|
||||||
p.cmd = exec.CommandContext(c, fhs.ProcSelfExe)
|
p.cmd = exec.CommandContext(c, MustExecutable(msg))
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCommand calls [New] and initialises the [Params.Path] and [Params.Args] fields.
|
// NewCommand calls [New] and initialises the [Params.Path] and [Params.Args] fields.
|
||||||
func NewCommand(
|
func NewCommand(ctx context.Context, msg message.Msg, pathname *check.Absolute, name string, args ...string) *Container {
|
||||||
ctx context.Context,
|
|
||||||
msg message.Msg,
|
|
||||||
pathname *check.Absolute,
|
|
||||||
name string,
|
|
||||||
args ...string,
|
|
||||||
) *Container {
|
|
||||||
z := New(ctx, msg)
|
z := New(ctx, msg)
|
||||||
z.Path = pathname
|
z.Path = pathname
|
||||||
z.Args = append([]string{name}, args...)
|
z.Args = append([]string{name}, args...)
|
||||||
|
|||||||
@@ -773,13 +773,14 @@ func TestMain(m *testing.M) {
|
|||||||
func helperNewContainerLibPaths(ctx context.Context, libPaths *[]*check.Absolute, args ...string) (c *container.Container) {
|
func helperNewContainerLibPaths(ctx context.Context, libPaths *[]*check.Absolute, args ...string) (c *container.Container) {
|
||||||
msg := message.New(nil)
|
msg := message.New(nil)
|
||||||
msg.SwapVerbose(testing.Verbose())
|
msg.SwapVerbose(testing.Verbose())
|
||||||
|
executable := check.MustAbs(container.MustExecutable(msg))
|
||||||
|
|
||||||
c = container.NewCommand(ctx, msg, absHelperInnerPath, "helper", args...)
|
c = container.NewCommand(ctx, msg, absHelperInnerPath, "helper", args...)
|
||||||
c.Env = append(c.Env, envDoCheck+"=1")
|
c.Env = append(c.Env, envDoCheck+"=1")
|
||||||
c.Bind(fhs.AbsProcSelfExe, absHelperInnerPath, 0)
|
c.Bind(executable, absHelperInnerPath, 0)
|
||||||
|
|
||||||
// in case test has cgo enabled
|
// in case test has cgo enabled
|
||||||
if entries, err := ldd.Resolve(ctx, msg, nil); err != nil {
|
if entries, err := ldd.Resolve(ctx, msg, executable); err != nil {
|
||||||
log.Fatalf("ldd: %v", err)
|
log.Fatalf("ldd: %v", err)
|
||||||
} else {
|
} else {
|
||||||
*libPaths = ldd.Path(entries)
|
*libPaths = ldd.Path(entries)
|
||||||
|
|||||||
@@ -21,8 +21,7 @@ type osFile interface {
|
|||||||
fs.File
|
fs.File
|
||||||
}
|
}
|
||||||
|
|
||||||
// syscallDispatcher provides methods that make state-dependent system calls as
|
// syscallDispatcher provides methods that make state-dependent system calls as part of their behaviour.
|
||||||
// part of their behaviour.
|
|
||||||
type syscallDispatcher interface {
|
type syscallDispatcher interface {
|
||||||
// new starts a goroutine with a new instance of syscallDispatcher.
|
// new starts a goroutine with a new instance of syscallDispatcher.
|
||||||
// A syscallDispatcher must never be used in any goroutine other than the one owning it,
|
// A syscallDispatcher must never be used in any goroutine other than the one owning it,
|
||||||
|
|||||||
@@ -238,11 +238,8 @@ func sliceAddr[S any](s []S) *[]S { return &s }
|
|||||||
|
|
||||||
func newCheckedFile(t *testing.T, name, wantData string, closeErr error) osFile {
|
func newCheckedFile(t *testing.T, name, wantData string, closeErr error) osFile {
|
||||||
f := &checkedOsFile{t: t, name: name, want: wantData, closeErr: closeErr}
|
f := &checkedOsFile{t: t, name: name, want: wantData, closeErr: closeErr}
|
||||||
// check happens in Close, and cleanup is not guaranteed to run, so relying
|
// check happens in Close, and cleanup is not guaranteed to run, so relying on it for sloppy implementations will cause sporadic test results
|
||||||
// on it for sloppy implementations will cause sporadic test results
|
f.cleanup = runtime.AddCleanup(f, func(name string) { f.t.Fatalf("checkedOsFile %s became unreachable without a call to Close", name) }, f.name)
|
||||||
f.cleanup = runtime.AddCleanup(f, func(name string) {
|
|
||||||
panic("checkedOsFile " + name + " became unreachable without a call to Close")
|
|
||||||
}, name)
|
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -43,8 +43,7 @@ func messageFromError(err error) (m string, ok bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// messagePrefix checks and prefixes the error message of a non-pointer error.
|
// messagePrefix checks and prefixes the error message of a non-pointer error.
|
||||||
// While this is usable for pointer errors, such use should be avoided as nil
|
// While this is usable for pointer errors, such use should be avoided as nil check is omitted.
|
||||||
// check is omitted.
|
|
||||||
func messagePrefix[T error](prefix string, err error) (string, bool) {
|
func messagePrefix[T error](prefix string, err error) (string, bool) {
|
||||||
var targetError T
|
var targetError T
|
||||||
if errors.As(err, &targetError) {
|
if errors.As(err, &targetError) {
|
||||||
|
|||||||
@@ -28,9 +28,6 @@ func copyExecutable(msg message.Msg) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustExecutable calls [os.Executable] and terminates the process on error.
|
|
||||||
//
|
|
||||||
// Deprecated: This is no longer used and will be removed in 0.4.
|
|
||||||
func MustExecutable(msg message.Msg) string {
|
func MustExecutable(msg message.Msg) string {
|
||||||
executableOnce.Do(func() { copyExecutable(msg) })
|
executableOnce.Do(func() { copyExecutable(msg) })
|
||||||
return executable
|
return executable
|
||||||
|
|||||||
@@ -42,8 +42,6 @@ var (
|
|||||||
AbsDevShm = unsafeAbs(DevShm)
|
AbsDevShm = unsafeAbs(DevShm)
|
||||||
// AbsProc is [Proc] as [check.Absolute].
|
// AbsProc is [Proc] as [check.Absolute].
|
||||||
AbsProc = unsafeAbs(Proc)
|
AbsProc = unsafeAbs(Proc)
|
||||||
// AbsProcSelfExe is [ProcSelfExe] as [check.Absolute].
|
|
||||||
AbsProcSelfExe = unsafeAbs(ProcSelfExe)
|
|
||||||
// AbsSys is [Sys] as [check.Absolute].
|
// AbsSys is [Sys] as [check.Absolute].
|
||||||
AbsSys = unsafeAbs(Sys)
|
AbsSys = unsafeAbs(Sys)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -9,8 +9,7 @@ const (
|
|||||||
// Tmp points to the place for small temporary files.
|
// Tmp points to the place for small temporary files.
|
||||||
Tmp = "/tmp/"
|
Tmp = "/tmp/"
|
||||||
|
|
||||||
// Run points to a "tmpfs" file system for system packages to place runtime
|
// Run points to a "tmpfs" file system for system packages to place runtime data, socket files, and similar.
|
||||||
// data, socket files, and similar.
|
|
||||||
Run = "/run/"
|
Run = "/run/"
|
||||||
// RunUser points to a directory containing per-user runtime directories,
|
// RunUser points to a directory containing per-user runtime directories,
|
||||||
// each usually individually mounted "tmpfs" instances.
|
// each usually individually mounted "tmpfs" instances.
|
||||||
@@ -18,12 +17,10 @@ const (
|
|||||||
|
|
||||||
// Usr points to vendor-supplied operating system resources.
|
// Usr points to vendor-supplied operating system resources.
|
||||||
Usr = "/usr/"
|
Usr = "/usr/"
|
||||||
// UsrBin points to binaries and executables for user commands that shall
|
// UsrBin points to binaries and executables for user commands that shall appear in the $PATH search path.
|
||||||
// appear in the $PATH search path.
|
|
||||||
UsrBin = Usr + "bin/"
|
UsrBin = Usr + "bin/"
|
||||||
|
|
||||||
// Var points to persistent, variable system data. Writable during normal
|
// Var points to persistent, variable system data. Writable during normal system operation.
|
||||||
// system operation.
|
|
||||||
Var = "/var/"
|
Var = "/var/"
|
||||||
// VarLib points to persistent system data.
|
// VarLib points to persistent system data.
|
||||||
VarLib = Var + "lib/"
|
VarLib = Var + "lib/"
|
||||||
@@ -32,20 +29,12 @@ const (
|
|||||||
|
|
||||||
// Dev points to the root directory for device nodes.
|
// Dev points to the root directory for device nodes.
|
||||||
Dev = "/dev/"
|
Dev = "/dev/"
|
||||||
// DevShm is the place for POSIX shared memory segments, as created via
|
// DevShm is the place for POSIX shared memory segments, as created via shm_open(3).
|
||||||
// shm_open(3).
|
|
||||||
DevShm = "/dev/shm/"
|
DevShm = "/dev/shm/"
|
||||||
// Proc points to a virtual kernel file system exposing the process list and
|
// Proc points to a virtual kernel file system exposing the process list and other functionality.
|
||||||
// other functionality.
|
|
||||||
Proc = "/proc/"
|
Proc = "/proc/"
|
||||||
// ProcSys points to a hierarchy below /proc/ that exposes a number of
|
// ProcSys points to a hierarchy below /proc/ that exposes a number of kernel tunables.
|
||||||
// kernel tunables.
|
|
||||||
ProcSys = Proc + "sys/"
|
ProcSys = Proc + "sys/"
|
||||||
// ProcSelf resolves to the process's own /proc/pid directory.
|
// Sys points to a virtual kernel file system exposing discovered devices and other functionality.
|
||||||
ProcSelf = Proc + "self/"
|
|
||||||
// ProcSelfExe is a symbolic link to program pathname.
|
|
||||||
ProcSelfExe = ProcSelf + "exe"
|
|
||||||
// Sys points to a virtual kernel file system exposing discovered devices
|
|
||||||
// and other functionality.
|
|
||||||
Sys = "/sys/"
|
Sys = "/sys/"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -33,12 +33,12 @@ const (
|
|||||||
- This path is only accessible by init and root:
|
- This path is only accessible by init and root:
|
||||||
The container init sets SUID_DUMP_DISABLE and terminates if that fails.
|
The container init sets SUID_DUMP_DISABLE and terminates if that fails.
|
||||||
|
|
||||||
It should be noted that none of this should become relevant at any point
|
It should be noted that none of this should become relevant at any point since the resulting
|
||||||
since the resulting intermediate root tmpfs should be effectively anonymous. */
|
intermediate root tmpfs should be effectively anonymous. */
|
||||||
intermediateHostPath = fhs.Proc + "self/fd"
|
intermediateHostPath = fhs.Proc + "self/fd"
|
||||||
|
|
||||||
// setupEnv is the name of the environment variable holding the string
|
// setupEnv is the name of the environment variable holding the string representation of
|
||||||
// representation of the read end file descriptor of the setup params pipe.
|
// the read end file descriptor of the setup params pipe.
|
||||||
setupEnv = "HAKUREI_SETUP"
|
setupEnv = "HAKUREI_SETUP"
|
||||||
|
|
||||||
// exitUnexpectedWait4 is the exit code if wait4 returns an unexpected errno.
|
// exitUnexpectedWait4 is the exit code if wait4 returns an unexpected errno.
|
||||||
@@ -59,8 +59,7 @@ type (
|
|||||||
// late is called right before starting the initial process.
|
// late is called right before starting the initial process.
|
||||||
late(state *setupState, k syscallDispatcher) error
|
late(state *setupState, k syscallDispatcher) error
|
||||||
|
|
||||||
// prefix returns a log message prefix, and whether this Op prints no
|
// prefix returns a log message prefix, and whether this Op prints no identifying message on its own.
|
||||||
// identifying message on its own.
|
|
||||||
prefix() (string, bool)
|
prefix() (string, bool)
|
||||||
|
|
||||||
Is(op Op) bool
|
Is(op Op) bool
|
||||||
@@ -72,11 +71,9 @@ type (
|
|||||||
setupState struct {
|
setupState struct {
|
||||||
nonrepeatable uintptr
|
nonrepeatable uintptr
|
||||||
|
|
||||||
// Whether early reaping has concluded. Must only be accessed in the
|
// Whether early reaping has concluded. Must only be accessed in the wait4 loop.
|
||||||
// wait4 loop.
|
|
||||||
processConcluded bool
|
processConcluded bool
|
||||||
// Process to syscall.WaitStatus populated in the wait4 loop. Freed
|
// Process to syscall.WaitStatus populated in the wait4 loop. Freed after early reaping concludes.
|
||||||
// after early reaping concludes.
|
|
||||||
process map[int]WaitStatus
|
process map[int]WaitStatus
|
||||||
// Synchronises access to process.
|
// Synchronises access to process.
|
||||||
processMu sync.RWMutex
|
processMu sync.RWMutex
|
||||||
@@ -219,10 +216,9 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
/* early is called right before pivot_root into intermediate root;
|
/* early is called right before pivot_root into intermediate root;
|
||||||
this step is mostly for gathering information that would otherwise be
|
this step is mostly for gathering information that would otherwise be difficult to obtain
|
||||||
difficult to obtain via library functions after pivot_root, and
|
via library functions after pivot_root, and implementations are expected to avoid changing
|
||||||
implementations are expected to avoid changing the state of the mount
|
the state of the mount namespace */
|
||||||
namespace */
|
|
||||||
for i, op := range *params.Ops {
|
for i, op := range *params.Ops {
|
||||||
if op == nil || !op.Valid() {
|
if op == nil || !op.Valid() {
|
||||||
k.fatalf(msg, "invalid op at index %d", i)
|
k.fatalf(msg, "invalid op at index %d", i)
|
||||||
@@ -262,10 +258,10 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
k.fatalf(msg, "cannot enter intermediate root: %v", err)
|
k.fatalf(msg, "cannot enter intermediate root: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* apply is called right after pivot_root and entering the new root. This
|
/* apply is called right after pivot_root and entering the new root;
|
||||||
step sets up the container filesystem, and implementations are expected to
|
this step sets up the container filesystem, and implementations are expected to keep the host root
|
||||||
keep the host root and sysroot mount points intact but otherwise can do
|
and sysroot mount points intact but otherwise can do whatever they need to;
|
||||||
whatever they need to. Calling chdir is allowed but discouraged. */
|
chdir is allowed but discouraged */
|
||||||
for i, op := range *params.Ops {
|
for i, op := range *params.Ops {
|
||||||
// ops already checked during early setup
|
// ops already checked during early setup
|
||||||
if prefix, ok := op.prefix(); ok {
|
if prefix, ok := op.prefix(); ok {
|
||||||
|
|||||||
@@ -12,16 +12,14 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(BindMountOp)) }
|
func init() { gob.Register(new(BindMountOp)) }
|
||||||
|
|
||||||
// Bind is a helper for appending [BindMountOp] to [Ops].
|
// Bind appends an [Op] that bind mounts host path [BindMountOp.Source] on container path [BindMountOp.Target].
|
||||||
func (f *Ops) Bind(source, target *check.Absolute, flags int) *Ops {
|
func (f *Ops) Bind(source, target *check.Absolute, flags int) *Ops {
|
||||||
*f = append(*f, &BindMountOp{nil, source, target, flags})
|
*f = append(*f, &BindMountOp{nil, source, target, flags})
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
// BindMountOp creates a bind mount from host path Source to container path Target.
|
// BindMountOp bind mounts host path Source on container path Target.
|
||||||
//
|
// Note that Flags uses bits declared in this package and should not be set with constants in [syscall].
|
||||||
// Note that Flags uses bits declared in the [std] package and should not be set
|
|
||||||
// with constants in [syscall].
|
|
||||||
type BindMountOp struct {
|
type BindMountOp struct {
|
||||||
sourceFinal, Source, Target *check.Absolute
|
sourceFinal, Source, Target *check.Absolute
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,8 @@ const (
|
|||||||
daemonTimeout = 5 * time.Second
|
daemonTimeout = 5 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
// Daemon is a helper for appending [DaemonOp] to [Ops].
|
// Daemon appends an [Op] that starts a daemon in the container and blocks until
|
||||||
|
// [DaemonOp.Target] appears.
|
||||||
func (f *Ops) Daemon(target, path *check.Absolute, args ...string) *Ops {
|
func (f *Ops) Daemon(target, path *check.Absolute, args ...string) *Ops {
|
||||||
*f = append(*f, &DaemonOp{target, path, args})
|
*f = append(*f, &DaemonOp{target, path, args})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -19,9 +19,7 @@ func (f *Ops) Dev(target *check.Absolute, mqueue bool) *Ops {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DevWritable appends an [Op] that mounts a writable subset of host /dev.
|
// DevWritable appends an [Op] that mounts a writable subset of host /dev.
|
||||||
//
|
// There is usually no good reason to write to /dev, so this should always be followed by a [RemountOp].
|
||||||
// There is usually no good reason to write to /dev, so this should always be
|
|
||||||
// followed by a [RemountOp].
|
|
||||||
func (f *Ops) DevWritable(target *check.Absolute, mqueue bool) *Ops {
|
func (f *Ops) DevWritable(target *check.Absolute, mqueue bool) *Ops {
|
||||||
*f = append(*f, &MountDevOp{target, mqueue, true})
|
*f = append(*f, &MountDevOp{target, mqueue, true})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(MkdirOp)) }
|
func init() { gob.Register(new(MkdirOp)) }
|
||||||
|
|
||||||
// Mkdir is a helper for appending [MkdirOp] to [Ops].
|
// Mkdir appends an [Op] that creates a directory in the container filesystem.
|
||||||
func (f *Ops) Mkdir(name *check.Absolute, perm os.FileMode) *Ops {
|
func (f *Ops) Mkdir(name *check.Absolute, perm os.FileMode) *Ops {
|
||||||
*f = append(*f, &MkdirOp{name, perm})
|
*f = append(*f, &MkdirOp{name, perm})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -54,11 +54,8 @@ func (e *OverlayArgumentError) Error() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Overlay is a helper for appending [MountOverlayOp] to [Ops].
|
// Overlay appends an [Op] that mounts the overlay pseudo filesystem on [MountOverlayOp.Target].
|
||||||
func (f *Ops) Overlay(
|
func (f *Ops) Overlay(target, state, work *check.Absolute, layers ...*check.Absolute) *Ops {
|
||||||
target, state, work *check.Absolute,
|
|
||||||
layers ...*check.Absolute,
|
|
||||||
) *Ops {
|
|
||||||
*f = append(*f, &MountOverlayOp{
|
*f = append(*f, &MountOverlayOp{
|
||||||
Target: target,
|
Target: target,
|
||||||
Lower: layers,
|
Lower: layers,
|
||||||
@@ -68,12 +65,13 @@ func (f *Ops) Overlay(
|
|||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
// OverlayEphemeral appends a [MountOverlayOp] with an ephemeral upperdir and workdir.
|
// OverlayEphemeral appends an [Op] that mounts the overlay pseudo filesystem on [MountOverlayOp.Target]
|
||||||
|
// with an ephemeral upperdir and workdir.
|
||||||
func (f *Ops) OverlayEphemeral(target *check.Absolute, layers ...*check.Absolute) *Ops {
|
func (f *Ops) OverlayEphemeral(target *check.Absolute, layers ...*check.Absolute) *Ops {
|
||||||
return f.Overlay(target, fhs.AbsRoot, nil, layers...)
|
return f.Overlay(target, fhs.AbsRoot, nil, layers...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// OverlayReadonly appends a readonly [MountOverlayOp].
|
// OverlayReadonly appends an [Op] that mounts the overlay pseudo filesystem readonly on [MountOverlayOp.Target]
|
||||||
func (f *Ops) OverlayReadonly(target *check.Absolute, layers ...*check.Absolute) *Ops {
|
func (f *Ops) OverlayReadonly(target *check.Absolute, layers ...*check.Absolute) *Ops {
|
||||||
return f.Overlay(target, nil, nil, layers...)
|
return f.Overlay(target, nil, nil, layers...)
|
||||||
}
|
}
|
||||||
@@ -84,34 +82,25 @@ type MountOverlayOp struct {
|
|||||||
|
|
||||||
// Any filesystem, does not need to be on a writable filesystem.
|
// Any filesystem, does not need to be on a writable filesystem.
|
||||||
Lower []*check.Absolute
|
Lower []*check.Absolute
|
||||||
// Formatted for [OptionOverlayLowerdir].
|
// formatted for [OptionOverlayLowerdir], resolved, prefixed and escaped during early
|
||||||
//
|
|
||||||
// Resolved, prefixed and escaped during early.
|
|
||||||
lower []string
|
lower []string
|
||||||
|
|
||||||
// The upperdir is normally on a writable filesystem.
|
// The upperdir is normally on a writable filesystem.
|
||||||
//
|
//
|
||||||
// If Work is nil and Upper holds the special value [fhs.AbsRoot], an
|
// If Work is nil and Upper holds the special value [fhs.AbsRoot],
|
||||||
// ephemeral upperdir and workdir will be set up.
|
// an ephemeral upperdir and workdir will be set up.
|
||||||
//
|
//
|
||||||
// If both Work and Upper are nil, upperdir and workdir is omitted and the
|
// If both Work and Upper are nil, upperdir and workdir is omitted and the overlay is mounted readonly.
|
||||||
// overlay is mounted readonly.
|
|
||||||
Upper *check.Absolute
|
Upper *check.Absolute
|
||||||
// Formatted for [OptionOverlayUpperdir].
|
// formatted for [OptionOverlayUpperdir], resolved, prefixed and escaped during early
|
||||||
//
|
|
||||||
// Resolved, prefixed and escaped during early.
|
|
||||||
upper string
|
upper string
|
||||||
|
|
||||||
// The workdir needs to be an empty directory on the same filesystem as upperdir.
|
// The workdir needs to be an empty directory on the same filesystem as upperdir.
|
||||||
Work *check.Absolute
|
Work *check.Absolute
|
||||||
// Formatted for [OptionOverlayWorkdir].
|
// formatted for [OptionOverlayWorkdir], resolved, prefixed and escaped during early
|
||||||
//
|
|
||||||
// Resolved, prefixed and escaped during early.
|
|
||||||
work string
|
work string
|
||||||
|
|
||||||
ephemeral bool
|
ephemeral bool
|
||||||
|
|
||||||
// Used internally for mounting to the intermediate root.
|
// used internally for mounting to the intermediate root
|
||||||
noPrefix bool
|
noPrefix bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ const (
|
|||||||
|
|
||||||
func init() { gob.Register(new(TmpfileOp)) }
|
func init() { gob.Register(new(TmpfileOp)) }
|
||||||
|
|
||||||
// Place is a helper for appending [TmpfileOp] to [Ops].
|
// Place appends an [Op] that places a file in container path [TmpfileOp.Path] containing [TmpfileOp.Data].
|
||||||
func (f *Ops) Place(name *check.Absolute, data []byte) *Ops {
|
func (f *Ops) Place(name *check.Absolute, data []byte) *Ops {
|
||||||
*f = append(*f, &TmpfileOp{name, data})
|
*f = append(*f, &TmpfileOp{name, data})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ func TestTmpfileOp(t *testing.T) {
|
|||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, (*checkedOsFile)(nil), stub.UniqueError(5)),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), stub.UniqueError(5)),
|
||||||
}, stub.UniqueError(5)},
|
}, stub.UniqueError(5)},
|
||||||
|
|
||||||
{"Write", &Params{ParentPerm: 0700}, &TmpfileOp{
|
{"Write", &Params{ParentPerm: 0700}, &TmpfileOp{
|
||||||
@@ -35,14 +35,14 @@ func TestTmpfileOp(t *testing.T) {
|
|||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.Close", sampleDataString, stub.UniqueError(3)), nil),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, stub.UniqueError(3)), nil),
|
||||||
}, stub.UniqueError(3)},
|
}, stub.UniqueError(3)},
|
||||||
|
|
||||||
{"ensureFile", &Params{ParentPerm: 0700}, &TmpfileOp{
|
{"ensureFile", &Params{ParentPerm: 0700}, &TmpfileOp{
|
||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.ensureFile", sampleDataString, nil), nil),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), nil),
|
||||||
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, stub.UniqueError(2)),
|
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, stub.UniqueError(2)),
|
||||||
}, stub.UniqueError(2)},
|
}, stub.UniqueError(2)},
|
||||||
|
|
||||||
@@ -50,29 +50,29 @@ func TestTmpfileOp(t *testing.T) {
|
|||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.bindMount", sampleDataString, nil), nil),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), nil),
|
||||||
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
||||||
call("bindMount", stub.ExpectArgs{"tmp.bindMount", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, stub.UniqueError(1)),
|
call("bindMount", stub.ExpectArgs{"tmp.32768", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, stub.UniqueError(1)),
|
||||||
}, stub.UniqueError(1)},
|
}, stub.UniqueError(1)},
|
||||||
|
|
||||||
{"remove", &Params{ParentPerm: 0700}, &TmpfileOp{
|
{"remove", &Params{ParentPerm: 0700}, &TmpfileOp{
|
||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.remove", sampleDataString, nil), nil),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), nil),
|
||||||
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
||||||
call("bindMount", stub.ExpectArgs{"tmp.remove", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, nil),
|
call("bindMount", stub.ExpectArgs{"tmp.32768", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, nil),
|
||||||
call("remove", stub.ExpectArgs{"tmp.remove"}, nil, stub.UniqueError(0)),
|
call("remove", stub.ExpectArgs{"tmp.32768"}, nil, stub.UniqueError(0)),
|
||||||
}, stub.UniqueError(0)},
|
}, stub.UniqueError(0)},
|
||||||
|
|
||||||
{"success", &Params{ParentPerm: 0700}, &TmpfileOp{
|
{"success", &Params{ParentPerm: 0700}, &TmpfileOp{
|
||||||
Path: samplePath,
|
Path: samplePath,
|
||||||
Data: sampleData,
|
Data: sampleData,
|
||||||
}, nil, nil, []stub.Call{
|
}, nil, nil, []stub.Call{
|
||||||
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.success", sampleDataString, nil), nil),
|
call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), nil),
|
||||||
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
call("ensureFile", stub.ExpectArgs{"/sysroot/etc/passwd", os.FileMode(0444), os.FileMode(0700)}, nil, nil),
|
||||||
call("bindMount", stub.ExpectArgs{"tmp.success", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, nil),
|
call("bindMount", stub.ExpectArgs{"tmp.32768", "/sysroot/etc/passwd", uintptr(0x5), false}, nil, nil),
|
||||||
call("remove", stub.ExpectArgs{"tmp.success"}, nil, nil),
|
call("remove", stub.ExpectArgs{"tmp.32768"}, nil, nil),
|
||||||
}, nil},
|
}, nil},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(MountProcOp)) }
|
func init() { gob.Register(new(MountProcOp)) }
|
||||||
|
|
||||||
// Proc is a helper for appending [MountProcOp] to [Ops].
|
// Proc appends an [Op] that mounts a private instance of proc.
|
||||||
func (f *Ops) Proc(target *check.Absolute) *Ops {
|
func (f *Ops) Proc(target *check.Absolute) *Ops {
|
||||||
*f = append(*f, &MountProcOp{target})
|
*f = append(*f, &MountProcOp{target})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
func init() { gob.Register(new(RemountOp)) }
|
func init() { gob.Register(new(RemountOp)) }
|
||||||
|
|
||||||
// Remount is a helper for appending [RemountOp] to [Ops].
|
// Remount appends an [Op] that applies [RemountOp.Flags] on container path [RemountOp.Target].
|
||||||
func (f *Ops) Remount(target *check.Absolute, flags uintptr) *Ops {
|
func (f *Ops) Remount(target *check.Absolute, flags uintptr) *Ops {
|
||||||
*f = append(*f, &RemountOp{target, flags})
|
*f = append(*f, &RemountOp{target, flags})
|
||||||
return f
|
return f
|
||||||
|
|||||||
@@ -38,7 +38,6 @@ const (
|
|||||||
_LANDLOCK_ACCESS_FS_DELIM
|
_LANDLOCK_ACCESS_FS_DELIM
|
||||||
)
|
)
|
||||||
|
|
||||||
// String returns a space-separated string of [LandlockAccessFS] flags.
|
|
||||||
func (f LandlockAccessFS) String() string {
|
func (f LandlockAccessFS) String() string {
|
||||||
switch f {
|
switch f {
|
||||||
case LANDLOCK_ACCESS_FS_EXECUTE:
|
case LANDLOCK_ACCESS_FS_EXECUTE:
|
||||||
@@ -117,7 +116,6 @@ const (
|
|||||||
_LANDLOCK_ACCESS_NET_DELIM
|
_LANDLOCK_ACCESS_NET_DELIM
|
||||||
)
|
)
|
||||||
|
|
||||||
// String returns a space-separated string of [LandlockAccessNet] flags.
|
|
||||||
func (f LandlockAccessNet) String() string {
|
func (f LandlockAccessNet) String() string {
|
||||||
switch f {
|
switch f {
|
||||||
case LANDLOCK_ACCESS_NET_BIND_TCP:
|
case LANDLOCK_ACCESS_NET_BIND_TCP:
|
||||||
@@ -154,7 +152,6 @@ const (
|
|||||||
_LANDLOCK_SCOPE_DELIM
|
_LANDLOCK_SCOPE_DELIM
|
||||||
)
|
)
|
||||||
|
|
||||||
// String returns a space-separated string of [LandlockScope] flags.
|
|
||||||
func (f LandlockScope) String() string {
|
func (f LandlockScope) String() string {
|
||||||
switch f {
|
switch f {
|
||||||
case LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET:
|
case LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET:
|
||||||
@@ -187,12 +184,10 @@ type RulesetAttr struct {
|
|||||||
HandledAccessFS LandlockAccessFS
|
HandledAccessFS LandlockAccessFS
|
||||||
// Bitmask of handled network actions.
|
// Bitmask of handled network actions.
|
||||||
HandledAccessNet LandlockAccessNet
|
HandledAccessNet LandlockAccessNet
|
||||||
// Bitmask of scopes restricting a Landlock domain from accessing outside
|
// Bitmask of scopes restricting a Landlock domain from accessing outside resources (e.g. IPCs).
|
||||||
// resources (e.g. IPCs).
|
|
||||||
Scoped LandlockScope
|
Scoped LandlockScope
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns a user-facing description of [RulesetAttr].
|
|
||||||
func (rulesetAttr *RulesetAttr) String() string {
|
func (rulesetAttr *RulesetAttr) String() string {
|
||||||
if rulesetAttr == nil {
|
if rulesetAttr == nil {
|
||||||
return "NULL"
|
return "NULL"
|
||||||
@@ -213,7 +208,6 @@ func (rulesetAttr *RulesetAttr) String() string {
|
|||||||
return strings.Join(elems, ", ")
|
return strings.Join(elems, ", ")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create loads the ruleset into the kernel.
|
|
||||||
func (rulesetAttr *RulesetAttr) Create(flags uintptr) (fd int, err error) {
|
func (rulesetAttr *RulesetAttr) Create(flags uintptr) (fd int, err error) {
|
||||||
var pointer, size uintptr
|
var pointer, size uintptr
|
||||||
// NULL needed for abi version
|
// NULL needed for abi version
|
||||||
@@ -222,13 +216,10 @@ func (rulesetAttr *RulesetAttr) Create(flags uintptr) (fd int, err error) {
|
|||||||
size = unsafe.Sizeof(*rulesetAttr)
|
size = unsafe.Sizeof(*rulesetAttr)
|
||||||
}
|
}
|
||||||
|
|
||||||
rulesetFd, _, errno := syscall.Syscall(
|
rulesetFd, _, errno := syscall.Syscall(std.SYS_LANDLOCK_CREATE_RULESET, pointer, size, flags)
|
||||||
std.SYS_LANDLOCK_CREATE_RULESET,
|
|
||||||
pointer, size,
|
|
||||||
flags,
|
|
||||||
)
|
|
||||||
fd = int(rulesetFd)
|
fd = int(rulesetFd)
|
||||||
err = errno
|
err = errno
|
||||||
|
|
||||||
if fd < 0 {
|
if fd < 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -239,19 +230,12 @@ func (rulesetAttr *RulesetAttr) Create(flags uintptr) (fd int, err error) {
|
|||||||
return fd, nil
|
return fd, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LandlockGetABI returns the ABI version supported by the kernel.
|
|
||||||
func LandlockGetABI() (int, error) {
|
func LandlockGetABI() (int, error) {
|
||||||
return (*RulesetAttr)(nil).Create(LANDLOCK_CREATE_RULESET_VERSION)
|
return (*RulesetAttr)(nil).Create(LANDLOCK_CREATE_RULESET_VERSION)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LandlockRestrictSelf applies a loaded ruleset to the calling thread.
|
|
||||||
func LandlockRestrictSelf(rulesetFd int, flags uintptr) error {
|
func LandlockRestrictSelf(rulesetFd int, flags uintptr) error {
|
||||||
r, _, errno := syscall.Syscall(
|
r, _, errno := syscall.Syscall(std.SYS_LANDLOCK_RESTRICT_SELF, uintptr(rulesetFd), flags, 0)
|
||||||
std.SYS_LANDLOCK_RESTRICT_SELF,
|
|
||||||
uintptr(rulesetFd),
|
|
||||||
flags,
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
if r != 0 {
|
if r != 0 {
|
||||||
return errno
|
return errno
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ done:
|
|||||||
}
|
}
|
||||||
if m.Header.Type == NLMSG_ERROR {
|
if m.Header.Type == NLMSG_ERROR {
|
||||||
if len(m.Data) >= 4 {
|
if len(m.Data) >= 4 {
|
||||||
errno := Errno(-std.Int(binary.NativeEndian.Uint32(m.Data)))
|
errno := Errno(-std.ScmpInt(binary.NativeEndian.Uint32(m.Data)))
|
||||||
if errno == 0 {
|
if errno == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,10 +15,7 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// Nonexistent is a path that cannot exist.
|
// Nonexistent is a path that cannot exist.
|
||||||
//
|
// /proc is chosen because a system with covered /proc is unsupported by this package.
|
||||||
// This path can never be presented by the kernel if proc is mounted on
|
|
||||||
// /proc/. This can only exist if parts of /proc/ is covered, or proc is not
|
|
||||||
// mounted at all. Neither configuration is supported by this package.
|
|
||||||
Nonexistent = fhs.Proc + "nonexistent"
|
Nonexistent = fhs.Proc + "nonexistent"
|
||||||
|
|
||||||
hostPath = fhs.Root + hostDir
|
hostPath = fhs.Root + hostDir
|
||||||
|
|||||||
@@ -88,22 +88,18 @@ var resPrefix = [...]string{
|
|||||||
7: "seccomp_load failed",
|
7: "seccomp_load failed",
|
||||||
}
|
}
|
||||||
|
|
||||||
// cbAllocateBuffer is the function signature for the function handle passed to
|
// cbAllocateBuffer is the function signature for the function handle passed to hakurei_export_filter
|
||||||
// hakurei_scmp_make_filter which allocates the buffer that the resulting bpf
|
// which allocates the buffer that the resulting bpf program is copied into, and writes its slice header
|
||||||
// program is copied into, and writes its slice header to a value held by the caller.
|
// to a value held by the caller.
|
||||||
type cbAllocateBuffer = func(len C.size_t) (buf unsafe.Pointer)
|
type cbAllocateBuffer = func(len C.size_t) (buf unsafe.Pointer)
|
||||||
|
|
||||||
// hakurei_scmp_allocate allocates a buffer of specified size known to the
|
|
||||||
// runtime through a callback passed in a [cgo.Handle].
|
|
||||||
//
|
|
||||||
//export hakurei_scmp_allocate
|
//export hakurei_scmp_allocate
|
||||||
func hakurei_scmp_allocate(f C.uintptr_t, len C.size_t) (buf unsafe.Pointer) {
|
func hakurei_scmp_allocate(f C.uintptr_t, len C.size_t) (buf unsafe.Pointer) {
|
||||||
return cgo.Handle(f).Value().(cbAllocateBuffer)(len)
|
return cgo.Handle(f).Value().(cbAllocateBuffer)(len)
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeFilter generates a bpf program from a slice of [std.NativeRule] and
|
// makeFilter generates a bpf program from a slice of [std.NativeRule] and writes the resulting byte slice to p.
|
||||||
// writes the resulting byte slice to p. The filter is installed to the current
|
// The filter is installed to the current process if p is nil.
|
||||||
// process if p is nil.
|
|
||||||
func makeFilter(rules []std.NativeRule, flags ExportFlag, p *[]byte) error {
|
func makeFilter(rules []std.NativeRule, flags ExportFlag, p *[]byte) error {
|
||||||
if len(rules) == 0 {
|
if len(rules) == 0 {
|
||||||
return ErrInvalidRules
|
return ErrInvalidRules
|
||||||
@@ -174,8 +170,8 @@ func Export(rules []std.NativeRule, flags ExportFlag) (data []byte, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load generates a bpf program from a slice of [std.NativeRule] and enforces it
|
// Load generates a bpf program from a slice of [std.NativeRule] and enforces it on the current process.
|
||||||
// on the current process. Errors returned by libseccomp is wrapped in [LibraryError].
|
// Errors returned by libseccomp is wrapped in [LibraryError].
|
||||||
func Load(rules []std.NativeRule, flags ExportFlag) error { return makeFilter(rules, flags, nil) }
|
func Load(rules []std.NativeRule, flags ExportFlag) error { return makeFilter(rules, flags, nil) }
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
|||||||
@@ -24,8 +24,8 @@ func TestSyscallResolveName(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRuleType(t *testing.T) {
|
func TestRuleType(t *testing.T) {
|
||||||
assertKind[std.Uint, scmpUint](t)
|
assertKind[std.ScmpUint, scmpUint](t)
|
||||||
assertKind[std.Int, scmpInt](t)
|
assertKind[std.ScmpInt, scmpInt](t)
|
||||||
|
|
||||||
assertSize[std.NativeRule, syscallRule](t)
|
assertSize[std.NativeRule, syscallRule](t)
|
||||||
assertKind[std.ScmpDatum, scmpDatum](t)
|
assertKind[std.ScmpDatum, scmpDatum](t)
|
||||||
|
|||||||
@@ -7,28 +7,24 @@ import (
|
|||||||
|
|
||||||
type (
|
type (
|
||||||
// ScmpUint is equivalent to C.uint.
|
// ScmpUint is equivalent to C.uint.
|
||||||
//
|
ScmpUint uint32
|
||||||
// Deprecated: This type has been renamed to Uint and will be removed in 0.4.
|
|
||||||
ScmpUint = Uint
|
|
||||||
// ScmpInt is equivalent to C.int.
|
// ScmpInt is equivalent to C.int.
|
||||||
//
|
ScmpInt int32
|
||||||
// Deprecated: This type has been renamed to Int and will be removed in 0.4.
|
|
||||||
ScmpInt = Int
|
|
||||||
|
|
||||||
// ScmpSyscall represents a syscall number passed to libseccomp via [NativeRule.Syscall].
|
// ScmpSyscall represents a syscall number passed to libseccomp via [NativeRule.Syscall].
|
||||||
ScmpSyscall Int
|
ScmpSyscall ScmpInt
|
||||||
// ScmpErrno represents an errno value passed to libseccomp via [NativeRule.Errno].
|
// ScmpErrno represents an errno value passed to libseccomp via [NativeRule.Errno].
|
||||||
ScmpErrno Int
|
ScmpErrno ScmpInt
|
||||||
|
|
||||||
// ScmpCompare is equivalent to enum scmp_compare;
|
// ScmpCompare is equivalent to enum scmp_compare;
|
||||||
ScmpCompare Uint
|
ScmpCompare ScmpUint
|
||||||
// ScmpDatum is equivalent to scmp_datum_t.
|
// ScmpDatum is equivalent to scmp_datum_t.
|
||||||
ScmpDatum uint64
|
ScmpDatum uint64
|
||||||
|
|
||||||
// ScmpArgCmp is equivalent to struct scmp_arg_cmp.
|
// ScmpArgCmp is equivalent to struct scmp_arg_cmp.
|
||||||
ScmpArgCmp struct {
|
ScmpArgCmp struct {
|
||||||
// argument number, starting at 0
|
// argument number, starting at 0
|
||||||
Arg Uint `json:"arg"`
|
Arg ScmpUint `json:"arg"`
|
||||||
// the comparison op, e.g. SCMP_CMP_*
|
// the comparison op, e.g. SCMP_CMP_*
|
||||||
Op ScmpCompare `json:"op"`
|
Op ScmpCompare `json:"op"`
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +0,0 @@
|
|||||||
package std
|
|
||||||
|
|
||||||
type (
|
|
||||||
// Uint is equivalent to C.uint.
|
|
||||||
Uint uint32
|
|
||||||
// Int is equivalent to C.int.
|
|
||||||
Int int32
|
|
||||||
)
|
|
||||||
@@ -3,8 +3,6 @@ package container
|
|||||||
import (
|
import (
|
||||||
. "syscall"
|
. "syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"hakurei.app/container/std"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Prctl manipulates various aspects of the behavior of the calling thread or process.
|
// Prctl manipulates various aspects of the behavior of the calling thread or process.
|
||||||
@@ -43,49 +41,6 @@ func Isatty(fd int) bool {
|
|||||||
return r == 0
|
return r == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// include/uapi/linux/sched.h
|
|
||||||
const (
|
|
||||||
SCHED_NORMAL = iota
|
|
||||||
SCHED_FIFO
|
|
||||||
SCHED_RR
|
|
||||||
SCHED_BATCH
|
|
||||||
_ // SCHED_ISO: reserved but not implemented yet
|
|
||||||
SCHED_IDLE
|
|
||||||
SCHED_DEADLINE
|
|
||||||
SCHED_EXT
|
|
||||||
)
|
|
||||||
|
|
||||||
// schedParam is equivalent to struct sched_param from include/linux/sched.h.
|
|
||||||
type schedParam struct {
|
|
||||||
// sched_priority
|
|
||||||
priority std.Int
|
|
||||||
}
|
|
||||||
|
|
||||||
// schedSetscheduler sets both the scheduling policy and parameters for the
|
|
||||||
// thread whose ID is specified in tid. If tid equals zero, the scheduling
|
|
||||||
// policy and parameters of the calling thread will be set.
|
|
||||||
//
|
|
||||||
// This function is unexported because it is [very subtle to use correctly]. The
|
|
||||||
// function signature in libc is misleading: pid actually refers to a thread ID.
|
|
||||||
// The glibc wrapper for this system call ignores this semantic and exposes
|
|
||||||
// this counterintuitive behaviour.
|
|
||||||
//
|
|
||||||
// This function is only called from the container setup thread. Do not reuse
|
|
||||||
// this if you do not have something similar in place!
|
|
||||||
//
|
|
||||||
// [very subtle to use correctly]: https://www.openwall.com/lists/musl/2016/03/01/4
|
|
||||||
func schedSetscheduler(tid, policy int, param *schedParam) error {
|
|
||||||
if r, _, errno := Syscall(
|
|
||||||
SYS_SCHED_SETSCHEDULER,
|
|
||||||
uintptr(tid),
|
|
||||||
uintptr(policy),
|
|
||||||
uintptr(unsafe.Pointer(param)),
|
|
||||||
); r < 0 {
|
|
||||||
return errno
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoringEINTR makes a function call and repeats it if it returns an
|
// IgnoringEINTR makes a function call and repeats it if it returns an
|
||||||
// EINTR error. This appears to be required even though we install all
|
// EINTR error. This appears to be required even though we install all
|
||||||
// signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846.
|
// signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846.
|
||||||
|
|||||||
@@ -2,8 +2,6 @@ package vfs
|
|||||||
|
|
||||||
import "strings"
|
import "strings"
|
||||||
|
|
||||||
// Unmangle reverses mangling of strings done by the kernel. Its behaviour is
|
|
||||||
// consistent with the equivalent function in util-linux.
|
|
||||||
func Unmangle(s string) string {
|
func Unmangle(s string) string {
|
||||||
if !strings.ContainsRune(s, '\\') {
|
if !strings.ContainsRune(s, '\\') {
|
||||||
return s
|
return s
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ var (
|
|||||||
ErrMountInfoSep = errors.New("bad optional fields separator")
|
ErrMountInfoSep = errors.New("bad optional fields separator")
|
||||||
)
|
)
|
||||||
|
|
||||||
// A DecoderError describes a nonrecoverable error decoding a mountinfo stream.
|
|
||||||
type DecoderError struct {
|
type DecoderError struct {
|
||||||
Op string
|
Op string
|
||||||
Line int
|
Line int
|
||||||
@@ -52,8 +51,7 @@ func (e *DecoderError) Error() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// A MountInfoDecoder reads and decodes proc_pid_mountinfo(5) entries from
|
// A MountInfoDecoder reads and decodes proc_pid_mountinfo(5) entries from an input stream.
|
||||||
// an input stream.
|
|
||||||
MountInfoDecoder struct {
|
MountInfoDecoder struct {
|
||||||
s *bufio.Scanner
|
s *bufio.Scanner
|
||||||
m *MountInfo
|
m *MountInfo
|
||||||
@@ -74,16 +72,13 @@ type (
|
|||||||
MountInfoEntry struct {
|
MountInfoEntry struct {
|
||||||
// mount ID: a unique ID for the mount (may be reused after umount(2)).
|
// mount ID: a unique ID for the mount (may be reused after umount(2)).
|
||||||
ID int `json:"id"`
|
ID int `json:"id"`
|
||||||
// parent ID: the ID of the parent mount (or of self for the root of
|
// parent ID: the ID of the parent mount (or of self for the root of this mount namespace's mount tree).
|
||||||
// this mount namespace's mount tree).
|
|
||||||
Parent int `json:"parent"`
|
Parent int `json:"parent"`
|
||||||
// major:minor: the value of st_dev for files on this filesystem (see stat(2)).
|
// major:minor: the value of st_dev for files on this filesystem (see stat(2)).
|
||||||
Devno DevT `json:"devno"`
|
Devno DevT `json:"devno"`
|
||||||
// root: the pathname of the directory in the filesystem which forms the
|
// root: the pathname of the directory in the filesystem which forms the root of this mount.
|
||||||
// root of this mount.
|
|
||||||
Root string `json:"root"`
|
Root string `json:"root"`
|
||||||
// mount point: the pathname of the mount point relative to the
|
// mount point: the pathname of the mount point relative to the process's root directory.
|
||||||
// process's root directory.
|
|
||||||
Target string `json:"target"`
|
Target string `json:"target"`
|
||||||
// mount options: per-mount options (see mount(2)).
|
// mount options: per-mount options (see mount(2)).
|
||||||
VfsOptstr string `json:"vfs_optstr"`
|
VfsOptstr string `json:"vfs_optstr"`
|
||||||
@@ -131,8 +126,7 @@ func (e *MountInfoEntry) Flags() (flags uintptr, unmatched []string) {
|
|||||||
|
|
||||||
// NewMountInfoDecoder returns a new decoder that reads from r.
|
// NewMountInfoDecoder returns a new decoder that reads from r.
|
||||||
//
|
//
|
||||||
// The decoder introduces its own buffering and may read data from r beyond the
|
// The decoder introduces its own buffering and may read data from r beyond the mountinfo entries requested.
|
||||||
// mountinfo entries requested.
|
|
||||||
func NewMountInfoDecoder(r io.Reader) *MountInfoDecoder {
|
func NewMountInfoDecoder(r io.Reader) *MountInfoDecoder {
|
||||||
return &MountInfoDecoder{s: bufio.NewScanner(r)}
|
return &MountInfoDecoder{s: bufio.NewScanner(r)}
|
||||||
}
|
}
|
||||||
@@ -277,8 +271,6 @@ func parseMountInfoLine(s string, ent *MountInfoEntry) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EqualWithIgnore compares to [MountInfoEntry] values, ignoring fields that
|
|
||||||
// compare equal to ignore.
|
|
||||||
func (e *MountInfoEntry) EqualWithIgnore(want *MountInfoEntry, ignore string) bool {
|
func (e *MountInfoEntry) EqualWithIgnore(want *MountInfoEntry, ignore string) bool {
|
||||||
return (e.ID == want.ID || want.ID == -1) &&
|
return (e.ID == want.ID || want.ID == -1) &&
|
||||||
(e.Parent == want.Parent || want.Parent == -1) &&
|
(e.Parent == want.Parent || want.Parent == -1) &&
|
||||||
@@ -292,8 +284,6 @@ func (e *MountInfoEntry) EqualWithIgnore(want *MountInfoEntry, ignore string) bo
|
|||||||
(e.FsOptstr == want.FsOptstr || want.FsOptstr == ignore)
|
(e.FsOptstr == want.FsOptstr || want.FsOptstr == ignore)
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns a user-facing representation of a [MountInfoEntry]. It fits
|
|
||||||
// roughly into the mountinfo format, but without mangling.
|
|
||||||
func (e *MountInfoEntry) String() string {
|
func (e *MountInfoEntry) String() string {
|
||||||
return fmt.Sprintf("%d %d %d:%d %s %s %s %s %s %s %s",
|
return fmt.Sprintf("%d %d %d:%d %s %s %s %s %s %s %s",
|
||||||
e.ID, e.Parent, e.Devno[0], e.Devno[1], e.Root, e.Target, e.VfsOptstr,
|
e.ID, e.Parent, e.Devno[0], e.Devno[1], e.Root, e.Target, e.VfsOptstr,
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// UnfoldTargetError is a pathname that never appeared in a mount hierarchy.
|
|
||||||
type UnfoldTargetError string
|
type UnfoldTargetError string
|
||||||
|
|
||||||
func (e UnfoldTargetError) Error() string {
|
func (e UnfoldTargetError) Error() string {
|
||||||
@@ -28,7 +27,6 @@ func (n *MountInfoNode) Collective() iter.Seq[*MountInfoNode] {
|
|||||||
return func(yield func(*MountInfoNode) bool) { n.visit(yield) }
|
return func(yield func(*MountInfoNode) bool) { n.visit(yield) }
|
||||||
}
|
}
|
||||||
|
|
||||||
// visit recursively visits all visible mountinfo nodes.
|
|
||||||
func (n *MountInfoNode) visit(yield func(*MountInfoNode) bool) bool {
|
func (n *MountInfoNode) visit(yield func(*MountInfoNode) bool) bool {
|
||||||
if !n.Covered && !yield(n) {
|
if !n.Covered && !yield(n) {
|
||||||
return false
|
return false
|
||||||
|
|||||||
4
dist/release.sh
vendored
4
dist/release.sh
vendored
@@ -13,7 +13,7 @@ echo
|
|||||||
echo '# Building hakurei.'
|
echo '# Building hakurei.'
|
||||||
go generate ./...
|
go generate ./...
|
||||||
go build -trimpath -v -o "${out}/bin/" -ldflags "-s -w
|
go build -trimpath -v -o "${out}/bin/" -ldflags "-s -w
|
||||||
-buildid= -linkmode external -extldflags=-static
|
-buildid= -extldflags '-static'
|
||||||
-X hakurei.app/internal/info.buildVersion=${VERSION}
|
-X hakurei.app/internal/info.buildVersion=${VERSION}
|
||||||
-X hakurei.app/internal/info.hakureiPath=/usr/bin/hakurei
|
-X hakurei.app/internal/info.hakureiPath=/usr/bin/hakurei
|
||||||
-X hakurei.app/internal/info.hsuPath=/usr/bin/hsu
|
-X hakurei.app/internal/info.hsuPath=/usr/bin/hsu
|
||||||
@@ -21,7 +21,7 @@ go build -trimpath -v -o "${out}/bin/" -ldflags "-s -w
|
|||||||
echo
|
echo
|
||||||
|
|
||||||
echo '# Testing hakurei.'
|
echo '# Testing hakurei.'
|
||||||
go test -ldflags='-buildid= -linkmode external -extldflags=-static' ./...
|
go test -ldflags='-buildid= -extldflags=-static' ./...
|
||||||
echo
|
echo
|
||||||
|
|
||||||
echo '# Creating distribution.'
|
echo '# Creating distribution.'
|
||||||
|
|||||||
23
flake.nix
23
flake.nix
@@ -29,6 +29,20 @@
|
|||||||
{
|
{
|
||||||
nixosModules.hakurei = import ./nixos.nix self.packages;
|
nixosModules.hakurei = import ./nixos.nix self.packages;
|
||||||
|
|
||||||
|
buildPackage = forAllSystems (
|
||||||
|
system:
|
||||||
|
nixpkgsFor.${system}.callPackage (
|
||||||
|
import ./cmd/hpkg/build.nix {
|
||||||
|
inherit
|
||||||
|
nixpkgsFor
|
||||||
|
system
|
||||||
|
nixpkgs
|
||||||
|
home-manager
|
||||||
|
;
|
||||||
|
}
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
checks = forAllSystems (
|
checks = forAllSystems (
|
||||||
system:
|
system:
|
||||||
let
|
let
|
||||||
@@ -57,6 +71,8 @@
|
|||||||
|
|
||||||
sharefs = callPackage ./cmd/sharefs/test { inherit system self; };
|
sharefs = callPackage ./cmd/sharefs/test { inherit system self; };
|
||||||
|
|
||||||
|
hpkg = callPackage ./cmd/hpkg/test { inherit system self; };
|
||||||
|
|
||||||
formatting = runCommandLocal "check-formatting" { nativeBuildInputs = [ nixfmt-rfc-style ]; } ''
|
formatting = runCommandLocal "check-formatting" { nativeBuildInputs = [ nixfmt-rfc-style ]; } ''
|
||||||
cd ${./.}
|
cd ${./.}
|
||||||
|
|
||||||
@@ -111,6 +127,11 @@
|
|||||||
glibc
|
glibc
|
||||||
xdg-dbus-proxy
|
xdg-dbus-proxy
|
||||||
|
|
||||||
|
# hpkg
|
||||||
|
zstd
|
||||||
|
gnutar
|
||||||
|
coreutils
|
||||||
|
|
||||||
# for check
|
# for check
|
||||||
util-linux
|
util-linux
|
||||||
nettools
|
nettools
|
||||||
@@ -198,7 +219,7 @@
|
|||||||
./test/interactive/trace.nix
|
./test/interactive/trace.nix
|
||||||
|
|
||||||
self.nixosModules.hakurei
|
self.nixosModules.hakurei
|
||||||
home-manager.nixosModules.home-manager
|
self.inputs.home-manager.nixosModules.home-manager
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
|
|||||||
@@ -1,69 +0,0 @@
|
|||||||
//go:generate gocc -a azalea.bnf
|
|
||||||
package azalea
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"io/fs"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"hakurei.app/container/check"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Parser struct {
|
|
||||||
Generator
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewParser(gen Generator) *Parser {
|
|
||||||
return &Parser{
|
|
||||||
Generator: gen,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (p Parser) Initialise() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p Parser) Consume(ns string, file io.Reader) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConsumeDir walks a directory and consumes all Azalea source files within it and all its subdirectories, as long as they end with the .az extension.
|
|
||||||
func (p Parser) ConsumeDir(dir *check.Absolute) error {
|
|
||||||
ds := dir.String()
|
|
||||||
return filepath.WalkDir(ds, func(path string, d fs.DirEntry, err error) (e error) {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if d.IsDir() || !strings.HasSuffix(d.Name(), ".az") {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
rel, e := filepath.Rel(ds, path)
|
|
||||||
ns := strings.TrimSuffix(rel, ".az")
|
|
||||||
f, e := os.Open(path)
|
|
||||||
return p.Consume(ns, f)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConsumeAll consumes all provided readers as Azalea source code, each given the namespace `r%d` where `%d` is the index of the reader in the provided arguments.
|
|
||||||
func (p Parser) ConsumeAll(in ...io.Reader) error {
|
|
||||||
for i, r := range in {
|
|
||||||
err := p.Consume("r"+strconv.FormatInt(int64(i), 10), r)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConsumeStrings consumes all provided strings as Azalea source code, each given the namespace `s%d` where `%d` is the index of the string in the provided arugments.
|
|
||||||
func (p Parser) ConsumeStrings(in ...string) error {
|
|
||||||
for i, s := range in {
|
|
||||||
err := p.Consume("s"+strconv.FormatInt(int64(i), 10), strings.NewReader(s))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
package azalea
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Generator interface {
|
|
||||||
Finalise() (error, io.Writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
type JsonGenerator struct {
|
|
||||||
t any
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewJsonGenerator[T any]() JsonGenerator {
|
|
||||||
t := new(T)
|
|
||||||
|
|
||||||
return JsonGenerator{
|
|
||||||
t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (j *JsonGenerator) Finalise() (error, io.Writer) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
type PkgIRGenerator struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPkgIRGenerator() PkgIRGenerator {
|
|
||||||
return PkgIRGenerator{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PkgIRGenerator) Finalise() (error, io.Writer) {
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -8,6 +8,7 @@
|
|||||||
package filelock
|
package filelock
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -73,3 +74,10 @@ func (lt lockType) String() string {
|
|||||||
return "Unlock"
|
return "Unlock"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsNotSupported returns a boolean indicating whether the error is known to
|
||||||
|
// report that a function is not supported (possibly for a specific input).
|
||||||
|
// It is satisfied by errors.ErrUnsupported as well as some syscall errors.
|
||||||
|
func IsNotSupported(err error) bool {
|
||||||
|
return errors.Is(err, errors.ErrUnsupported)
|
||||||
|
}
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"hakurei.app/container/fhs"
|
"hakurei.app/container"
|
||||||
"hakurei.app/internal/lockedfile/internal/filelock"
|
"hakurei.app/internal/lockedfile/internal/filelock"
|
||||||
"hakurei.app/internal/lockedfile/internal/testexec"
|
"hakurei.app/internal/lockedfile/internal/testexec"
|
||||||
)
|
)
|
||||||
@@ -197,7 +197,7 @@ func TestLockNotDroppedByExecCommand(t *testing.T) {
|
|||||||
// Some kinds of file locks are dropped when a duplicated or forked file
|
// Some kinds of file locks are dropped when a duplicated or forked file
|
||||||
// descriptor is unlocked. Double-check that the approach used by os/exec does
|
// descriptor is unlocked. Double-check that the approach used by os/exec does
|
||||||
// not accidentally drop locks.
|
// not accidentally drop locks.
|
||||||
cmd := testexec.CommandContext(t, t.Context(), fhs.ProcSelfExe, "-test.run=^$")
|
cmd := testexec.CommandContext(t, t.Context(), container.MustExecutable(nil), "-test.run=^$")
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
t.Fatalf("exec failed: %v", err)
|
t.Fatalf("exec failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -94,11 +94,6 @@ func (f *File) Close() error {
|
|||||||
|
|
||||||
err := closeFile(f.osFile.File)
|
err := closeFile(f.osFile.File)
|
||||||
f.cleanup.Stop()
|
f.cleanup.Stop()
|
||||||
// f may be dead at the moment after we access f.cleanup,
|
|
||||||
// so the cleanup can fire before Stop completes. Keep f
|
|
||||||
// alive while we call Stop. See the documentation for
|
|
||||||
// runtime.Cleanup.Stop.
|
|
||||||
runtime.KeepAlive(f)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"hakurei.app/container/fhs"
|
"hakurei.app/container"
|
||||||
"hakurei.app/internal/lockedfile"
|
"hakurei.app/internal/lockedfile"
|
||||||
"hakurei.app/internal/lockedfile/internal/testexec"
|
"hakurei.app/internal/lockedfile/internal/testexec"
|
||||||
)
|
)
|
||||||
@@ -215,7 +215,7 @@ func TestSpuriousEDEADLK(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := testexec.CommandContext(t, t.Context(), fhs.ProcSelfExe, "-test.run=^"+t.Name()+"$")
|
cmd := testexec.CommandContext(t, t.Context(), container.MustExecutable(nil), "-test.run=^"+t.Name()+"$")
|
||||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", dirVar, dir))
|
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", dirVar, dir))
|
||||||
|
|
||||||
qDone := make(chan struct{})
|
qDone := make(chan struct{})
|
||||||
|
|||||||
216
internal/pkg/asm.go
Normal file
216
internal/pkg/asm.go
Normal file
@@ -0,0 +1,216 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type asmOutLine struct {
|
||||||
|
pos int
|
||||||
|
word int
|
||||||
|
kindData int64
|
||||||
|
valueData []byte
|
||||||
|
indent int
|
||||||
|
kind string
|
||||||
|
value string
|
||||||
|
}
|
||||||
|
|
||||||
|
var spacingLine = asmOutLine{
|
||||||
|
pos: -1,
|
||||||
|
kindData: -1,
|
||||||
|
valueData: nil,
|
||||||
|
indent: 0,
|
||||||
|
kind: "",
|
||||||
|
value: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
func Disassemble(r io.Reader, real bool, showHeader bool, force bool, raw bool) (s string, err error) {
|
||||||
|
var lines []asmOutLine
|
||||||
|
sb := new(strings.Builder)
|
||||||
|
header := true
|
||||||
|
pos := new(int)
|
||||||
|
|
||||||
|
for err == nil {
|
||||||
|
if header {
|
||||||
|
var kind uint64
|
||||||
|
var size uint64
|
||||||
|
var bsize []byte
|
||||||
|
p := *pos
|
||||||
|
if _, kind, err = nextUint64(r, pos); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if bsize, size, err = nextUint64(r, pos); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if showHeader {
|
||||||
|
lines = append(lines, asmOutLine{p, 8, int64(kind), bsize, 0, "head " + intToKind(kind), ""})
|
||||||
|
}
|
||||||
|
for i := 0; uint64(i) < size; i++ {
|
||||||
|
var did Checksum
|
||||||
|
var dkind uint64
|
||||||
|
p := *pos
|
||||||
|
if _, dkind, err = nextUint64(r, pos); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if _, did, err = nextIdent(r, pos); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if showHeader {
|
||||||
|
lines = append(lines, asmOutLine{p, 8, int64(dkind), nil, 1, intToKind(dkind), Encode(did)})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
header = false
|
||||||
|
}
|
||||||
|
var k uint32
|
||||||
|
p := *pos
|
||||||
|
if _, k, err = nextUint32(r, pos); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
kind := IRValueKind(k)
|
||||||
|
switch kind {
|
||||||
|
case IRKindEnd:
|
||||||
|
var a uint32
|
||||||
|
var ba []byte
|
||||||
|
if ba, a, err = nextUint32(r, pos); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if a&1 != 0 {
|
||||||
|
var sum Checksum
|
||||||
|
if _, sum, err = nextIdent(r, pos); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
lines = append(lines, asmOutLine{p, 4, int64(kind), ba, 1, "end ", Encode(sum)})
|
||||||
|
} else {
|
||||||
|
lines = append(lines, asmOutLine{p, 4, int64(kind), []byte{0, 0, 0, 0}, 1, "end ", ""})
|
||||||
|
}
|
||||||
|
lines = append(lines, spacingLine)
|
||||||
|
header = true
|
||||||
|
continue
|
||||||
|
|
||||||
|
case IRKindIdent:
|
||||||
|
var a []byte
|
||||||
|
// discard ancillary
|
||||||
|
if a, _, err = nextUint32(r, pos); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
var sum Checksum
|
||||||
|
if _, sum, err = nextIdent(r, pos); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
lines = append(lines, asmOutLine{p, 4, int64(kind), a, 1, "id ", Encode(sum)})
|
||||||
|
continue
|
||||||
|
case IRKindUint32:
|
||||||
|
var i uint32
|
||||||
|
var bi []byte
|
||||||
|
if bi, i, err = nextUint32(r, pos); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
lines = append(lines, asmOutLine{p, 4, int64(kind), bi, 1, "int ", strconv.FormatUint(uint64(i), 10)})
|
||||||
|
case IRKindString:
|
||||||
|
var l uint32
|
||||||
|
var bl []byte
|
||||||
|
if bl, l, err = nextUint32(r, pos); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s := make([]byte, l+(wordSize-(l)%wordSize)%wordSize)
|
||||||
|
var n int
|
||||||
|
if n, err = r.Read(s); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
*pos = *pos + n
|
||||||
|
|
||||||
|
lines = append(lines, asmOutLine{p, 4, int64(kind), bl, 1, "str ", strconv.Quote(string(s[:l]))})
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
var bi []byte
|
||||||
|
if bi, _, err = nextUint32(r, pos); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
lines = append(lines, asmOutLine{p, 4, int64(kind), bi, 1, "????", ""})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != io.EOF {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = nil
|
||||||
|
for _, line := range lines {
|
||||||
|
if raw {
|
||||||
|
if line.pos != -1 {
|
||||||
|
sb.WriteString(fmt.Sprintf("%s\t%s\n", line.kind, line.value))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if line.pos == -1 {
|
||||||
|
sb.WriteString("\n")
|
||||||
|
} else if line.word == 4 {
|
||||||
|
sb.WriteString(fmt.Sprintf("%06x: %04x %04x%s %s %s\n", line.pos, binary.LittleEndian.AppendUint32(nil, uint32(line.kindData)), line.valueData, headerSpacing(showHeader), line.kind, line.value))
|
||||||
|
} else {
|
||||||
|
kind := binary.LittleEndian.AppendUint64(nil, uint64(line.kindData))
|
||||||
|
value := line.valueData
|
||||||
|
if len(value) == 8 {
|
||||||
|
sb.WriteString(fmt.Sprintf("%06x: %04x %04x %04x %04x %s %s\n", line.pos, kind[:4], kind[4:], value[:4], value[4:], line.kind, line.value))
|
||||||
|
} else {
|
||||||
|
sb.WriteString(fmt.Sprintf("%06x: %04x %04x %s %s\n", line.pos, kind[:4], kind[4:], line.kind, line.value))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return sb.String(), err
|
||||||
|
}
|
||||||
|
func nextUint32(r io.Reader, pos *int) ([]byte, uint32, error) {
|
||||||
|
i := make([]byte, 4)
|
||||||
|
_, err := r.Read(i)
|
||||||
|
if err != nil {
|
||||||
|
return i, 0, err
|
||||||
|
}
|
||||||
|
p := *pos + 4
|
||||||
|
*pos = p
|
||||||
|
return i, binary.LittleEndian.Uint32(i), nil
|
||||||
|
}
|
||||||
|
func nextUint64(r io.Reader, pos *int) ([]byte, uint64, error) {
|
||||||
|
i := make([]byte, 8)
|
||||||
|
_, err := r.Read(i)
|
||||||
|
if err != nil {
|
||||||
|
return i, 0, err
|
||||||
|
}
|
||||||
|
p := *pos + 8
|
||||||
|
*pos = p
|
||||||
|
return i, binary.LittleEndian.Uint64(i), nil
|
||||||
|
}
|
||||||
|
func nextIdent(r io.Reader, pos *int) ([]byte, Checksum, error) {
|
||||||
|
i := make([]byte, 48)
|
||||||
|
if _, err := r.Read(i); err != nil {
|
||||||
|
return i, Checksum{}, err
|
||||||
|
}
|
||||||
|
p := *pos + 48
|
||||||
|
*pos = p
|
||||||
|
return i, Checksum(i), nil
|
||||||
|
}
|
||||||
|
func intToKind(i uint64) string {
|
||||||
|
switch Kind(i) {
|
||||||
|
case KindHTTPGet:
|
||||||
|
return "http"
|
||||||
|
case KindTar:
|
||||||
|
return "tar "
|
||||||
|
case KindExec:
|
||||||
|
return "exec"
|
||||||
|
case KindExecNet:
|
||||||
|
return "exen"
|
||||||
|
case KindFile:
|
||||||
|
return "file"
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("$%d ", i-KindCustomOffset)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func headerSpacing(showHeader bool) string {
|
||||||
|
if showHeader {
|
||||||
|
return " "
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
@@ -23,7 +23,7 @@ import (
|
|||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AbsWork is the container pathname [TContext.GetWorkDir] is mounted on.
|
// AbsWork is the container pathname [CureContext.GetWorkDir] is mounted on.
|
||||||
var AbsWork = fhs.AbsRoot.Append("work/")
|
var AbsWork = fhs.AbsRoot.Append("work/")
|
||||||
|
|
||||||
// ExecPath is a slice of [Artifact] and the [check.Absolute] pathname to make
|
// ExecPath is a slice of [Artifact] and the [check.Absolute] pathname to make
|
||||||
@@ -39,23 +39,22 @@ type ExecPath struct {
|
|||||||
W bool
|
W bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// SchedPolicy is the [container] scheduling policy.
|
// layers returns pathnames collected from A deduplicated by checksum.
|
||||||
var SchedPolicy int
|
func (p *ExecPath) layers(f *FContext) []*check.Absolute {
|
||||||
|
msg := f.GetMessage()
|
||||||
|
|
||||||
// PromoteLayers returns artifacts with identical-by-content layers promoted to
|
layers := make([]*check.Absolute, 0, len(p.A))
|
||||||
// the highest priority instance, as if mounted via [ExecPath].
|
checksums := make(map[unique.Handle[Checksum]]struct{}, len(p.A))
|
||||||
func PromoteLayers(
|
for i := range p.A {
|
||||||
artifacts []Artifact,
|
d := p.A[len(p.A)-1-i]
|
||||||
getArtifact func(Artifact) (*check.Absolute, unique.Handle[Checksum]),
|
pathname, checksum := f.GetArtifact(d)
|
||||||
report func(i int, d Artifact),
|
|
||||||
) []*check.Absolute {
|
|
||||||
layers := make([]*check.Absolute, 0, len(artifacts))
|
|
||||||
checksums := make(map[unique.Handle[Checksum]]struct{}, len(artifacts))
|
|
||||||
for i := range artifacts {
|
|
||||||
d := artifacts[len(artifacts)-1-i]
|
|
||||||
pathname, checksum := getArtifact(d)
|
|
||||||
if _, ok := checksums[checksum]; ok {
|
if _, ok := checksums[checksum]; ok {
|
||||||
report(len(artifacts)-1-i, d)
|
if msg.IsVerbose() {
|
||||||
|
msg.Verbosef(
|
||||||
|
"promoted layer %d as %s",
|
||||||
|
len(p.A)-1-i, reportName(d, f.cache.Ident(d)),
|
||||||
|
)
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
checksums[checksum] = struct{}{}
|
checksums[checksum] = struct{}{}
|
||||||
@@ -65,19 +64,6 @@ func PromoteLayers(
|
|||||||
return layers
|
return layers
|
||||||
}
|
}
|
||||||
|
|
||||||
// layers returns pathnames collected from A deduplicated via [PromoteLayers].
|
|
||||||
func (p *ExecPath) layers(f *FContext) []*check.Absolute {
|
|
||||||
msg := f.GetMessage()
|
|
||||||
return PromoteLayers(p.A, f.GetArtifact, func(i int, d Artifact) {
|
|
||||||
if msg.IsVerbose() {
|
|
||||||
msg.Verbosef(
|
|
||||||
"promoted layer %d as %s",
|
|
||||||
i, reportName(d, f.cache.Ident(d)),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Path returns a populated [ExecPath].
|
// Path returns a populated [ExecPath].
|
||||||
func Path(pathname *check.Absolute, writable bool, a ...Artifact) ExecPath {
|
func Path(pathname *check.Absolute, writable bool, a ...Artifact) ExecPath {
|
||||||
return ExecPath{pathname, a, writable}
|
return ExecPath{pathname, a, writable}
|
||||||
@@ -361,7 +347,6 @@ const (
|
|||||||
// scanVerbose prefixes program output for a verbose [message.Msg].
|
// scanVerbose prefixes program output for a verbose [message.Msg].
|
||||||
func scanVerbose(
|
func scanVerbose(
|
||||||
msg message.Msg,
|
msg message.Msg,
|
||||||
cancel context.CancelFunc,
|
|
||||||
done chan<- struct{},
|
done chan<- struct{},
|
||||||
prefix string,
|
prefix string,
|
||||||
r io.Reader,
|
r io.Reader,
|
||||||
@@ -376,15 +361,10 @@ func scanVerbose(
|
|||||||
msg.Verbose(prefix, s.Text())
|
msg.Verbose(prefix, s.Text())
|
||||||
}
|
}
|
||||||
if err := s.Err(); err != nil && !errors.Is(err, os.ErrClosed) {
|
if err := s.Err(); err != nil && !errors.Is(err, os.ErrClosed) {
|
||||||
cancel()
|
|
||||||
msg.Verbose("*"+prefix, err)
|
msg.Verbose("*"+prefix, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeccompPresets is the [seccomp] presets used by exec artifacts.
|
|
||||||
const SeccompPresets = std.PresetStrict &
|
|
||||||
^(std.PresetDenyNS | std.PresetDenyDevel)
|
|
||||||
|
|
||||||
// cure is like Cure but allows optional host net namespace. This is used for
|
// cure is like Cure but allows optional host net namespace. This is used for
|
||||||
// the [KnownChecksum] variant where networking is allowed.
|
// the [KnownChecksum] variant where networking is allowed.
|
||||||
func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
|
func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
|
||||||
@@ -408,22 +388,15 @@ func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
|
|||||||
|
|
||||||
z := container.New(ctx, f.GetMessage())
|
z := container.New(ctx, f.GetMessage())
|
||||||
z.WaitDelay = execWaitDelay
|
z.WaitDelay = execWaitDelay
|
||||||
z.SeccompPresets = SeccompPresets
|
z.SeccompPresets |= std.PresetStrict & ^std.PresetDenyNS
|
||||||
z.SeccompFlags |= seccomp.AllowMultiarch
|
z.SeccompFlags |= seccomp.AllowMultiarch
|
||||||
z.ParentPerm = 0700
|
z.ParentPerm = 0700
|
||||||
z.HostNet = hostNet
|
z.HostNet = hostNet
|
||||||
z.Hostname = "cure"
|
z.Hostname = "cure"
|
||||||
z.SchedPolicy = SchedPolicy
|
|
||||||
if z.HostNet {
|
if z.HostNet {
|
||||||
z.Hostname = "cure-net"
|
z.Hostname = "cure-net"
|
||||||
}
|
}
|
||||||
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
||||||
|
|
||||||
var status io.Writer
|
|
||||||
if status, err = f.GetStatusWriter(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if msg := f.GetMessage(); msg.IsVerbose() {
|
if msg := f.GetMessage(); msg.IsVerbose() {
|
||||||
var stdout, stderr io.ReadCloser
|
var stdout, stderr io.ReadCloser
|
||||||
if stdout, err = z.StdoutPipe(); err != nil {
|
if stdout, err = z.StdoutPipe(); err != nil {
|
||||||
@@ -440,31 +413,10 @@ func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
bw := f.cache.getWriter(status)
|
|
||||||
stdoutDone, stderrDone := make(chan struct{}), make(chan struct{})
|
stdoutDone, stderrDone := make(chan struct{}), make(chan struct{})
|
||||||
go scanVerbose(
|
go scanVerbose(msg, stdoutDone, "("+a.name+":1)", stdout)
|
||||||
msg, cancel, stdoutDone,
|
go scanVerbose(msg, stderrDone, "("+a.name+":2)", stderr)
|
||||||
"("+a.name+":1)",
|
defer func() { <-stdoutDone; <-stderrDone }()
|
||||||
io.TeeReader(stdout, bw),
|
|
||||||
)
|
|
||||||
go scanVerbose(
|
|
||||||
msg, cancel, stderrDone,
|
|
||||||
"("+a.name+":2)",
|
|
||||||
io.TeeReader(stderr, bw),
|
|
||||||
)
|
|
||||||
defer func() {
|
|
||||||
<-stdoutDone
|
|
||||||
<-stderrDone
|
|
||||||
|
|
||||||
flushErr := bw.Flush()
|
|
||||||
if err == nil {
|
|
||||||
err = flushErr
|
|
||||||
}
|
|
||||||
f.cache.putWriter(bw)
|
|
||||||
|
|
||||||
}()
|
|
||||||
} else {
|
|
||||||
z.Stdout, z.Stderr = status, status
|
|
||||||
}
|
}
|
||||||
|
|
||||||
z.Dir, z.Env, z.Path, z.Args = a.dir, a.env, a.path, a.args
|
z.Dir, z.Env, z.Path, z.Args = a.dir, a.env, a.path, a.args
|
||||||
|
|||||||
@@ -150,7 +150,7 @@ func (i *IContext) WriteUint32(v uint32) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// irMaxStringLength is the maximum acceptable wire size of [IRKindString].
|
// irMaxStringLength is the maximum acceptable wire size of [IRKindString].
|
||||||
const irMaxStringLength = 1 << 24
|
const irMaxStringLength = 1 << 20
|
||||||
|
|
||||||
// IRStringError is a string value too big to encode in IR.
|
// IRStringError is a string value too big to encode in IR.
|
||||||
type IRStringError string
|
type IRStringError string
|
||||||
@@ -310,13 +310,6 @@ type (
|
|||||||
// verbose logging is enabled. Artifacts may only depend on artifacts
|
// verbose logging is enabled. Artifacts may only depend on artifacts
|
||||||
// previously described in the IR stream.
|
// previously described in the IR stream.
|
||||||
//
|
//
|
||||||
// IRDecoder rejects an IR stream on the first decoding error, it does not
|
|
||||||
// check against nonzero reserved ancillary data or incorrectly ordered or
|
|
||||||
// redundant unstructured dependencies. An invalid IR stream as such will
|
|
||||||
// yield [Artifact] values with identifiers disagreeing with those computed
|
|
||||||
// by IRDecoder. For this reason, IRDecoder does not access the ident cache
|
|
||||||
// to avoid putting [Cache] into an inconsistent state.
|
|
||||||
//
|
|
||||||
// Methods of IRDecoder are not safe for concurrent use.
|
// Methods of IRDecoder are not safe for concurrent use.
|
||||||
IRDecoder struct {
|
IRDecoder struct {
|
||||||
// Address of underlying [Cache], must not be exposed directly.
|
// Address of underlying [Cache], must not be exposed directly.
|
||||||
|
|||||||
@@ -28,21 +28,15 @@ import (
|
|||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/internal/info"
|
|
||||||
"hakurei.app/internal/lockedfile"
|
"hakurei.app/internal/lockedfile"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
// programName is the string identifying this build system.
|
|
||||||
programName = "internal/pkg"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// A Checksum is a SHA-384 checksum computed for a cured [Artifact].
|
// A Checksum is a SHA-384 checksum computed for a cured [Artifact].
|
||||||
Checksum = [sha512.Size384]byte
|
Checksum = [sha512.Size384]byte
|
||||||
|
|
||||||
// An ID is a unique identifier returned by [KnownIdent.ID]. This value must
|
// An ID is a unique identifier returned by [Artifact.ID]. This value must
|
||||||
// be deterministically determined ahead of time.
|
// be deterministically determined ahead of time.
|
||||||
ID Checksum
|
ID Checksum
|
||||||
)
|
)
|
||||||
@@ -71,75 +65,18 @@ func MustDecode(s string) (checksum Checksum) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// common holds elements and receives methods shared between different contexts.
|
|
||||||
type common struct {
|
|
||||||
// Address of underlying [Cache], should be zeroed or made unusable after
|
|
||||||
// Cure returns and must not be exposed directly.
|
|
||||||
cache *Cache
|
|
||||||
}
|
|
||||||
|
|
||||||
// TContext is passed to [TrivialArtifact.Cure] and provides information and
|
// TContext is passed to [TrivialArtifact.Cure] and provides information and
|
||||||
// methods required for curing the [TrivialArtifact].
|
// methods required for curing the [TrivialArtifact].
|
||||||
//
|
//
|
||||||
// Methods of TContext are safe for concurrent use. TContext is valid
|
// Methods of TContext are safe for concurrent use. TContext is valid
|
||||||
// until [TrivialArtifact.Cure] returns.
|
// until [TrivialArtifact.Cure] returns.
|
||||||
type TContext struct {
|
type TContext struct {
|
||||||
|
// Address of underlying [Cache], should be zeroed or made unusable after
|
||||||
|
// [TrivialArtifact.Cure] returns and must not be exposed directly.
|
||||||
|
cache *Cache
|
||||||
|
|
||||||
// Populated during [Cache.Cure].
|
// Populated during [Cache.Cure].
|
||||||
work, temp *check.Absolute
|
work, temp *check.Absolute
|
||||||
|
|
||||||
// Target [Artifact] encoded identifier.
|
|
||||||
ids string
|
|
||||||
// Pathname status was created at.
|
|
||||||
statusPath *check.Absolute
|
|
||||||
// File statusHeader and logs are written to.
|
|
||||||
status *os.File
|
|
||||||
// Error value during prepareStatus.
|
|
||||||
statusErr error
|
|
||||||
|
|
||||||
common
|
|
||||||
}
|
|
||||||
|
|
||||||
// statusHeader is the header written to all status files in dirStatus.
|
|
||||||
var statusHeader = func() string {
|
|
||||||
s := programName
|
|
||||||
if v := info.Version(); v != info.FallbackVersion {
|
|
||||||
s += " " + v
|
|
||||||
}
|
|
||||||
s += " (" + runtime.GOARCH + ")"
|
|
||||||
if name, err := os.Hostname(); err == nil {
|
|
||||||
s += " on " + name
|
|
||||||
}
|
|
||||||
s += "\n\n"
|
|
||||||
return s
|
|
||||||
}()
|
|
||||||
|
|
||||||
// prepareStatus initialises the status file once.
|
|
||||||
func (t *TContext) prepareStatus() error {
|
|
||||||
if t.statusPath != nil || t.status != nil {
|
|
||||||
return t.statusErr
|
|
||||||
}
|
|
||||||
|
|
||||||
t.statusPath = t.cache.base.Append(
|
|
||||||
dirStatus,
|
|
||||||
t.ids,
|
|
||||||
)
|
|
||||||
if t.status, t.statusErr = os.OpenFile(
|
|
||||||
t.statusPath.String(),
|
|
||||||
syscall.O_CREAT|syscall.O_EXCL|syscall.O_WRONLY,
|
|
||||||
0400,
|
|
||||||
); t.statusErr != nil {
|
|
||||||
return t.statusErr
|
|
||||||
}
|
|
||||||
|
|
||||||
_, t.statusErr = t.status.WriteString(statusHeader)
|
|
||||||
return t.statusErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStatusWriter returns a [io.Writer] for build logs. The caller must not
|
|
||||||
// seek this writer before the position it was first returned in.
|
|
||||||
func (t *TContext) GetStatusWriter() (io.Writer, error) {
|
|
||||||
err := t.prepareStatus()
|
|
||||||
return t.status, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// destroy destroys the temporary directory and joins its errors with the error
|
// destroy destroys the temporary directory and joins its errors with the error
|
||||||
@@ -147,15 +84,12 @@ func (t *TContext) GetStatusWriter() (io.Writer, error) {
|
|||||||
// directory is removed similarly. [Cache] is responsible for making sure work
|
// directory is removed similarly. [Cache] is responsible for making sure work
|
||||||
// is never left behind for a successful [Cache.Cure].
|
// is never left behind for a successful [Cache.Cure].
|
||||||
//
|
//
|
||||||
// If implementation had requested status, it is closed with error joined with
|
|
||||||
// the error referred to by errP. If the error referred to by errP is non-nil,
|
|
||||||
// the status file is removed from the filesystem.
|
|
||||||
//
|
|
||||||
// destroy must be deferred by [Cache.Cure] if [TContext] is passed to any Cure
|
// destroy must be deferred by [Cache.Cure] if [TContext] is passed to any Cure
|
||||||
// implementation. It should not be called prior to that point.
|
// implementation. It should not be called prior to that point.
|
||||||
func (t *TContext) destroy(errP *error) {
|
func (t *TContext) destroy(errP *error) {
|
||||||
if chmodErr, removeErr := removeAll(t.temp); chmodErr != nil || removeErr != nil {
|
if chmodErr, removeErr := removeAll(t.temp); chmodErr != nil || removeErr != nil {
|
||||||
*errP = errors.Join(*errP, chmodErr, removeErr)
|
*errP = errors.Join(*errP, chmodErr, removeErr)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if *errP != nil {
|
if *errP != nil {
|
||||||
@@ -163,31 +97,17 @@ func (t *TContext) destroy(errP *error) {
|
|||||||
if chmodErr != nil || removeErr != nil {
|
if chmodErr != nil || removeErr != nil {
|
||||||
*errP = errors.Join(*errP, chmodErr, removeErr)
|
*errP = errors.Join(*errP, chmodErr, removeErr)
|
||||||
} else if errors.Is(*errP, os.ErrExist) {
|
} else if errors.Is(*errP, os.ErrExist) {
|
||||||
var linkError *os.LinkError
|
|
||||||
if errors.As(*errP, &linkError) && linkError != nil &&
|
|
||||||
linkError.Op == "rename" {
|
|
||||||
// two artifacts may be backed by the same file
|
// two artifacts may be backed by the same file
|
||||||
*errP = nil
|
*errP = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.status != nil {
|
|
||||||
if err := t.status.Close(); err != nil {
|
|
||||||
*errP = errors.Join(*errP, err)
|
|
||||||
}
|
|
||||||
if *errP != nil {
|
|
||||||
*errP = errors.Join(*errP, os.Remove(t.statusPath.String()))
|
|
||||||
}
|
|
||||||
t.status = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unwrap returns the underlying [context.Context].
|
// Unwrap returns the underlying [context.Context].
|
||||||
func (c *common) Unwrap() context.Context { return c.cache.ctx }
|
func (t *TContext) Unwrap() context.Context { return t.cache.ctx }
|
||||||
|
|
||||||
// GetMessage returns [message.Msg] held by the underlying [Cache].
|
// GetMessage returns [message.Msg] held by the underlying [Cache].
|
||||||
func (c *common) GetMessage() message.Msg { return c.cache.msg }
|
func (t *TContext) GetMessage() message.Msg { return t.cache.msg }
|
||||||
|
|
||||||
// GetWorkDir returns a pathname to a directory which [Artifact] is expected to
|
// GetWorkDir returns a pathname to a directory which [Artifact] is expected to
|
||||||
// write its output to. This is not the final resting place of the [Artifact]
|
// write its output to. This is not the final resting place of the [Artifact]
|
||||||
@@ -206,13 +126,13 @@ func (t *TContext) GetTempDir() *check.Absolute { return t.temp }
|
|||||||
// If err is nil, the caller must close the resulting [io.ReadCloser] and return
|
// If err is nil, the caller must close the resulting [io.ReadCloser] and return
|
||||||
// its error, if any. Failure to read r to EOF may result in a spurious
|
// its error, if any. Failure to read r to EOF may result in a spurious
|
||||||
// [ChecksumMismatchError], or the underlying implementation may block on Close.
|
// [ChecksumMismatchError], or the underlying implementation may block on Close.
|
||||||
func (c *common) Open(a Artifact) (r io.ReadCloser, err error) {
|
func (t *TContext) Open(a Artifact) (r io.ReadCloser, err error) {
|
||||||
if f, ok := a.(FileArtifact); ok {
|
if f, ok := a.(FileArtifact); ok {
|
||||||
return c.cache.openFile(f)
|
return t.cache.openFile(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
var pathname *check.Absolute
|
var pathname *check.Absolute
|
||||||
if pathname, _, err = c.cache.Cure(a); err != nil {
|
if pathname, _, err = t.cache.Cure(a); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -244,7 +164,7 @@ type FContext struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// InvalidLookupError is the identifier of non-dependency [Artifact] looked up
|
// InvalidLookupError is the identifier of non-dependency [Artifact] looked up
|
||||||
// via [FContext.GetArtifact] by a misbehaving [Artifact] implementation.
|
// via [FContext.Pathname] by a misbehaving [Artifact] implementation.
|
||||||
type InvalidLookupError ID
|
type InvalidLookupError ID
|
||||||
|
|
||||||
func (e InvalidLookupError) Error() string {
|
func (e InvalidLookupError) Error() string {
|
||||||
@@ -271,7 +191,14 @@ func (f *FContext) GetArtifact(a Artifact) (
|
|||||||
//
|
//
|
||||||
// Methods of RContext are safe for concurrent use. RContext is valid
|
// Methods of RContext are safe for concurrent use. RContext is valid
|
||||||
// until [FileArtifact.Cure] returns.
|
// until [FileArtifact.Cure] returns.
|
||||||
type RContext struct{ common }
|
type RContext struct {
|
||||||
|
// Address of underlying [Cache], should be zeroed or made unusable after
|
||||||
|
// [FileArtifact.Cure] returns and must not be exposed directly.
|
||||||
|
cache *Cache
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap returns the underlying [context.Context].
|
||||||
|
func (r *RContext) Unwrap() context.Context { return r.cache.ctx }
|
||||||
|
|
||||||
// An Artifact is a read-only reference to a piece of data that may be created
|
// An Artifact is a read-only reference to a piece of data that may be created
|
||||||
// deterministically but might not currently be available in memory or on the
|
// deterministically but might not currently be available in memory or on the
|
||||||
@@ -450,9 +377,6 @@ const (
|
|||||||
// dirChecksum is the directory name appended to Cache.base for storing
|
// dirChecksum is the directory name appended to Cache.base for storing
|
||||||
// artifacts named after their [Checksum].
|
// artifacts named after their [Checksum].
|
||||||
dirChecksum = "checksum"
|
dirChecksum = "checksum"
|
||||||
// dirStatus is the directory name appended to Cache.base for storing
|
|
||||||
// artifact metadata and logs named after their [ID].
|
|
||||||
dirStatus = "status"
|
|
||||||
|
|
||||||
// dirWork is the directory name appended to Cache.base for working
|
// dirWork is the directory name appended to Cache.base for working
|
||||||
// pathnames set up during [Cache.Cure].
|
// pathnames set up during [Cache.Cure].
|
||||||
@@ -542,7 +466,7 @@ type Cache struct {
|
|||||||
// Synchronises entry into exclusive artifacts for the cure method.
|
// Synchronises entry into exclusive artifacts for the cure method.
|
||||||
exclMu sync.Mutex
|
exclMu sync.Mutex
|
||||||
// Buffered I/O free list, must not be accessed directly.
|
// Buffered I/O free list, must not be accessed directly.
|
||||||
brPool, bwPool sync.Pool
|
bufioPool sync.Pool
|
||||||
|
|
||||||
// Unlocks the on-filesystem cache. Must only be called from Close.
|
// Unlocks the on-filesystem cache. Must only be called from Close.
|
||||||
unlock func()
|
unlock func()
|
||||||
@@ -624,26 +548,6 @@ func (c *Cache) unsafeIdent(a Artifact, encodeKind bool) (
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// getReader is like [bufio.NewReader] but for brPool.
|
|
||||||
func (c *Cache) getReader(r io.Reader) *bufio.Reader {
|
|
||||||
br := c.brPool.Get().(*bufio.Reader)
|
|
||||||
br.Reset(r)
|
|
||||||
return br
|
|
||||||
}
|
|
||||||
|
|
||||||
// putReader adds br to brPool.
|
|
||||||
func (c *Cache) putReader(br *bufio.Reader) { c.brPool.Put(br) }
|
|
||||||
|
|
||||||
// getWriter is like [bufio.NewWriter] but for bwPool.
|
|
||||||
func (c *Cache) getWriter(w io.Writer) *bufio.Writer {
|
|
||||||
bw := c.bwPool.Get().(*bufio.Writer)
|
|
||||||
bw.Reset(w)
|
|
||||||
return bw
|
|
||||||
}
|
|
||||||
|
|
||||||
// putWriter adds bw to bwPool.
|
|
||||||
func (c *Cache) putWriter(bw *bufio.Writer) { c.bwPool.Put(bw) }
|
|
||||||
|
|
||||||
// A ChecksumMismatchError describes an [Artifact] with unexpected content.
|
// A ChecksumMismatchError describes an [Artifact] with unexpected content.
|
||||||
type ChecksumMismatchError struct {
|
type ChecksumMismatchError struct {
|
||||||
// Actual and expected checksums.
|
// Actual and expected checksums.
|
||||||
@@ -665,9 +569,6 @@ type ScrubError struct {
|
|||||||
// Dangling identifier symlinks. This can happen if the content-addressed
|
// Dangling identifier symlinks. This can happen if the content-addressed
|
||||||
// entry was removed while scrubbing due to a checksum mismatch.
|
// entry was removed while scrubbing due to a checksum mismatch.
|
||||||
DanglingIdentifiers []ID
|
DanglingIdentifiers []ID
|
||||||
// Dangling status files. This can happen if a dangling status symlink was
|
|
||||||
// removed while scrubbing.
|
|
||||||
DanglingStatus []ID
|
|
||||||
// Miscellaneous errors, including [os.ReadDir] on checksum and identifier
|
// Miscellaneous errors, including [os.ReadDir] on checksum and identifier
|
||||||
// directories, [Decode] on entry names and [os.RemoveAll] on inconsistent
|
// directories, [Decode] on entry names and [os.RemoveAll] on inconsistent
|
||||||
// entries.
|
// entries.
|
||||||
@@ -719,13 +620,6 @@ func (e *ScrubError) Error() string {
|
|||||||
}
|
}
|
||||||
segments = append(segments, s)
|
segments = append(segments, s)
|
||||||
}
|
}
|
||||||
if len(e.DanglingStatus) > 0 {
|
|
||||||
s := "dangling status:\n"
|
|
||||||
for _, id := range e.DanglingStatus {
|
|
||||||
s += Encode(id) + "\n"
|
|
||||||
}
|
|
||||||
segments = append(segments, s)
|
|
||||||
}
|
|
||||||
if len(e.Errs) > 0 {
|
if len(e.Errs) > 0 {
|
||||||
s := "errors during scrub:\n"
|
s := "errors during scrub:\n"
|
||||||
for pathname, errs := range e.errs {
|
for pathname, errs := range e.errs {
|
||||||
@@ -904,36 +798,6 @@ func (c *Cache) Scrub(checks int) error {
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
dir = c.base.Append(dirStatus)
|
|
||||||
if entries, readdirErr := os.ReadDir(dir.String()); readdirErr != nil {
|
|
||||||
if !errors.Is(readdirErr, os.ErrNotExist) {
|
|
||||||
addErr(dir, readdirErr)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
wg.Add(len(entries))
|
|
||||||
for _, ent := range entries {
|
|
||||||
w <- checkEntry{ent, func(ent os.DirEntry, want *Checksum) bool {
|
|
||||||
got := p.Get().(*Checksum)
|
|
||||||
defer p.Put(got)
|
|
||||||
|
|
||||||
if _, err := os.Stat(c.base.Append(
|
|
||||||
dirIdentifier,
|
|
||||||
ent.Name(),
|
|
||||||
).String()); err != nil {
|
|
||||||
if !errors.Is(err, os.ErrNotExist) {
|
|
||||||
addErr(dir.Append(ent.Name()), err)
|
|
||||||
}
|
|
||||||
seMu.Lock()
|
|
||||||
se.DanglingStatus = append(se.DanglingStatus, *want)
|
|
||||||
seMu.Unlock()
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(c.identPending) > 0 {
|
if len(c.identPending) > 0 {
|
||||||
addErr(c.base, errors.New(
|
addErr(c.base, errors.New(
|
||||||
"scrub began with pending artifacts",
|
"scrub began with pending artifacts",
|
||||||
@@ -964,7 +828,6 @@ func (c *Cache) Scrub(checks int) error {
|
|||||||
|
|
||||||
if len(se.ChecksumMismatches) > 0 ||
|
if len(se.ChecksumMismatches) > 0 ||
|
||||||
len(se.DanglingIdentifiers) > 0 ||
|
len(se.DanglingIdentifiers) > 0 ||
|
||||||
len(se.DanglingStatus) > 0 ||
|
|
||||||
len(se.Errs) > 0 {
|
len(se.Errs) > 0 {
|
||||||
slices.SortFunc(se.ChecksumMismatches, func(a, b ChecksumMismatchError) int {
|
slices.SortFunc(se.ChecksumMismatches, func(a, b ChecksumMismatchError) int {
|
||||||
return bytes.Compare(a.Want[:], b.Want[:])
|
return bytes.Compare(a.Want[:], b.Want[:])
|
||||||
@@ -972,9 +835,6 @@ func (c *Cache) Scrub(checks int) error {
|
|||||||
slices.SortFunc(se.DanglingIdentifiers, func(a, b ID) int {
|
slices.SortFunc(se.DanglingIdentifiers, func(a, b ID) int {
|
||||||
return bytes.Compare(a[:], b[:])
|
return bytes.Compare(a[:], b[:])
|
||||||
})
|
})
|
||||||
slices.SortFunc(se.DanglingStatus, func(a, b ID) int {
|
|
||||||
return bytes.Compare(a[:], b[:])
|
|
||||||
})
|
|
||||||
return &se
|
return &se
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
@@ -1065,17 +925,16 @@ func (c *Cache) openFile(f FileArtifact) (r io.ReadCloser, err error) {
|
|||||||
if !errors.Is(err, os.ErrNotExist) {
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
id := c.Ident(f)
|
|
||||||
if c.msg.IsVerbose() {
|
if c.msg.IsVerbose() {
|
||||||
rn := reportName(f, id)
|
rn := reportName(f, c.Ident(f))
|
||||||
c.msg.Verbosef("curing %s in memory...", rn)
|
c.msg.Verbosef("curing %s to memory...", rn)
|
||||||
defer func() {
|
defer func() {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
c.msg.Verbosef("opened %s for reading", rn)
|
c.msg.Verbosef("cured %s to memory", rn)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
return f.Cure(&RContext{common{c}})
|
return f.Cure(&RContext{c})
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -1272,9 +1131,6 @@ type DependencyCureError []*CureError
|
|||||||
// unwrapM recursively expands underlying errors into a caller-supplied map.
|
// unwrapM recursively expands underlying errors into a caller-supplied map.
|
||||||
func (e *DependencyCureError) unwrapM(me map[unique.Handle[ID]]*CureError) {
|
func (e *DependencyCureError) unwrapM(me map[unique.Handle[ID]]*CureError) {
|
||||||
for _, err := range *e {
|
for _, err := range *e {
|
||||||
if _, ok := me[err.Ident]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _e, ok := err.Err.(*DependencyCureError); ok {
|
if _e, ok := err.Err.(*DependencyCureError); ok {
|
||||||
_e.unwrapM(me)
|
_e.unwrapM(me)
|
||||||
continue
|
continue
|
||||||
@@ -1358,6 +1214,13 @@ func (c *Cache) exitCure(a Artifact, curesExempt bool) {
|
|||||||
<-c.cures
|
<-c.cures
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getWriter is like [bufio.NewWriter] but for bufioPool.
|
||||||
|
func (c *Cache) getWriter(w io.Writer) *bufio.Writer {
|
||||||
|
bw := c.bufioPool.Get().(*bufio.Writer)
|
||||||
|
bw.Reset(w)
|
||||||
|
return bw
|
||||||
|
}
|
||||||
|
|
||||||
// measuredReader implements [io.ReadCloser] and measures the checksum during
|
// measuredReader implements [io.ReadCloser] and measures the checksum during
|
||||||
// Close. If the underlying reader is not read to EOF, Close blocks until all
|
// Close. If the underlying reader is not read to EOF, Close blocks until all
|
||||||
// remaining data is consumed and validated.
|
// remaining data is consumed and validated.
|
||||||
@@ -1440,6 +1303,9 @@ func (r *RContext) NewMeasuredReader(
|
|||||||
return r.cache.newMeasuredReader(rc, checksum)
|
return r.cache.newMeasuredReader(rc, checksum)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// putWriter adds bw to bufioPool.
|
||||||
|
func (c *Cache) putWriter(bw *bufio.Writer) { c.bufioPool.Put(bw) }
|
||||||
|
|
||||||
// cure implements Cure without checking the full dependency graph.
|
// cure implements Cure without checking the full dependency graph.
|
||||||
func (c *Cache) cure(a Artifact, curesExempt bool) (
|
func (c *Cache) cure(a Artifact, curesExempt bool) (
|
||||||
pathname *check.Absolute,
|
pathname *check.Absolute,
|
||||||
@@ -1571,7 +1437,7 @@ func (c *Cache) cure(a Artifact, curesExempt bool) (
|
|||||||
if err = c.enterCure(a, curesExempt); err != nil {
|
if err = c.enterCure(a, curesExempt); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r, err = f.Cure(&RContext{common{c}})
|
r, err = f.Cure(&RContext{c})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if checksumPathname == nil || c.IsStrict() {
|
if checksumPathname == nil || c.IsStrict() {
|
||||||
h := sha512.New384()
|
h := sha512.New384()
|
||||||
@@ -1647,12 +1513,7 @@ func (c *Cache) cure(a Artifact, curesExempt bool) (
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
t := TContext{
|
t := TContext{c, c.base.Append(dirWork, ids), c.base.Append(dirTemp, ids)}
|
||||||
c.base.Append(dirWork, ids),
|
|
||||||
c.base.Append(dirTemp, ids),
|
|
||||||
ids, nil, nil, nil,
|
|
||||||
common{c},
|
|
||||||
}
|
|
||||||
switch ca := a.(type) {
|
switch ca := a.(type) {
|
||||||
case TrivialArtifact:
|
case TrivialArtifact:
|
||||||
defer t.destroy(&err)
|
defer t.destroy(&err)
|
||||||
@@ -1790,18 +1651,6 @@ func (pending *pendingArtifactDep) cure(c *Cache) {
|
|||||||
pending.errsMu.Unlock()
|
pending.errsMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenStatus attempts to open the status file associated to an [Artifact]. If
|
|
||||||
// err is nil, the caller must close the resulting reader.
|
|
||||||
func (c *Cache) OpenStatus(a Artifact) (r io.ReadSeekCloser, err error) {
|
|
||||||
c.identMu.RLock()
|
|
||||||
r, err = os.Open(c.base.Append(
|
|
||||||
dirStatus,
|
|
||||||
Encode(c.Ident(a).Value())).String(),
|
|
||||||
)
|
|
||||||
c.identMu.RUnlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close cancels all pending cures and waits for them to clean up.
|
// Close cancels all pending cures and waits for them to clean up.
|
||||||
func (c *Cache) Close() {
|
func (c *Cache) Close() {
|
||||||
c.closeOnce.Do(func() {
|
c.closeOnce.Do(func() {
|
||||||
@@ -1850,7 +1699,6 @@ func open(
|
|||||||
for _, name := range []string{
|
for _, name := range []string{
|
||||||
dirIdentifier,
|
dirIdentifier,
|
||||||
dirChecksum,
|
dirChecksum,
|
||||||
dirStatus,
|
|
||||||
dirWork,
|
dirWork,
|
||||||
} {
|
} {
|
||||||
if err := os.MkdirAll(base.Append(name).String(), 0700); err != nil &&
|
if err := os.MkdirAll(base.Append(name).String(), 0700); err != nil &&
|
||||||
@@ -1865,16 +1713,13 @@ func open(
|
|||||||
msg: msg,
|
msg: msg,
|
||||||
base: base,
|
base: base,
|
||||||
|
|
||||||
identPool: sync.Pool{New: func() any { return new(extIdent) }},
|
|
||||||
|
|
||||||
ident: make(map[unique.Handle[ID]]unique.Handle[Checksum]),
|
ident: make(map[unique.Handle[ID]]unique.Handle[Checksum]),
|
||||||
identErr: make(map[unique.Handle[ID]]error),
|
identErr: make(map[unique.Handle[ID]]error),
|
||||||
identPending: make(map[unique.Handle[ID]]<-chan struct{}),
|
identPending: make(map[unique.Handle[ID]]<-chan struct{}),
|
||||||
|
|
||||||
brPool: sync.Pool{New: func() any { return new(bufio.Reader) }},
|
|
||||||
bwPool: sync.Pool{New: func() any { return new(bufio.Writer) }},
|
|
||||||
}
|
}
|
||||||
c.ctx, c.cancel = context.WithCancel(ctx)
|
c.ctx, c.cancel = context.WithCancel(ctx)
|
||||||
|
c.identPool.New = func() any { return new(extIdent) }
|
||||||
|
c.bufioPool.New = func() any { return new(bufio.Writer) }
|
||||||
|
|
||||||
if lock || !testing.Testing() {
|
if lock || !testing.Testing() {
|
||||||
if unlock, err := lockedfile.MutexAt(
|
if unlock, err := lockedfile.MutexAt(
|
||||||
|
|||||||
@@ -314,11 +314,6 @@ func checkWithCache(t *testing.T, testCases []cacheTestCase) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// destroy non-deterministic status files
|
|
||||||
if err := os.RemoveAll(base.Append("status").String()); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var checksum pkg.Checksum
|
var checksum pkg.Checksum
|
||||||
if err := pkg.HashDir(&checksum, base); err != nil {
|
if err := pkg.HashDir(&checksum, base); err != nil {
|
||||||
t.Fatalf("HashDir: error = %v", err)
|
t.Fatalf("HashDir: error = %v", err)
|
||||||
@@ -387,9 +382,6 @@ func cureMany(t *testing.T, c *pkg.Cache, steps []cureStep) {
|
|||||||
} else if step.pathname != ignorePathname && !pathname.Is(step.pathname) {
|
} else if step.pathname != ignorePathname && !pathname.Is(step.pathname) {
|
||||||
t.Fatalf("Cure: pathname = %q, want %q", pathname, step.pathname)
|
t.Fatalf("Cure: pathname = %q, want %q", pathname, step.pathname)
|
||||||
} else if checksum != makeChecksumH(step.checksum) {
|
} else if checksum != makeChecksumH(step.checksum) {
|
||||||
if checksum == (unique.Handle[pkg.Checksum]{}) {
|
|
||||||
checksum = unique.Make(pkg.Checksum{})
|
|
||||||
}
|
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Cure: checksum = %s, want %s",
|
"Cure: checksum = %s, want %s",
|
||||||
pkg.Encode(checksum.Value()), pkg.Encode(step.checksum),
|
pkg.Encode(checksum.Value()), pkg.Encode(step.checksum),
|
||||||
|
|||||||
@@ -10,7 +10,8 @@ import (
|
|||||||
"io/fs"
|
"io/fs"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -99,6 +100,7 @@ func (e DisallowedTypeflagError) Error() string {
|
|||||||
|
|
||||||
// Cure cures the [Artifact], producing a directory located at work.
|
// Cure cures the [Artifact], producing a directory located at work.
|
||||||
func (a *tarArtifact) Cure(t *TContext) (err error) {
|
func (a *tarArtifact) Cure(t *TContext) (err error) {
|
||||||
|
temp := t.GetTempDir()
|
||||||
var tr io.ReadCloser
|
var tr io.ReadCloser
|
||||||
if tr, err = t.Open(a.f); err != nil {
|
if tr, err = t.Open(a.f); err != nil {
|
||||||
return
|
return
|
||||||
@@ -114,9 +116,7 @@ func (a *tarArtifact) Cure(t *TContext) (err error) {
|
|||||||
err = closeErr
|
err = closeErr
|
||||||
}
|
}
|
||||||
}(tr)
|
}(tr)
|
||||||
br := t.cache.getReader(tr)
|
tr = io.NopCloser(tr)
|
||||||
defer t.cache.putReader(br)
|
|
||||||
tr = io.NopCloser(br)
|
|
||||||
|
|
||||||
switch a.compression {
|
switch a.compression {
|
||||||
case TarUncompressed:
|
case TarUncompressed:
|
||||||
@@ -137,24 +137,14 @@ func (a *tarArtifact) Cure(t *TContext) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type dirTargetPerm struct {
|
type dirTargetPerm struct {
|
||||||
path string
|
path *check.Absolute
|
||||||
mode fs.FileMode
|
mode fs.FileMode
|
||||||
}
|
}
|
||||||
var madeDirectories []dirTargetPerm
|
var madeDirectories []dirTargetPerm
|
||||||
|
|
||||||
if err = os.MkdirAll(t.GetTempDir().String(), 0700); err != nil {
|
if err = os.MkdirAll(temp.String(), 0700); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var root *os.Root
|
|
||||||
if root, err = os.OpenRoot(t.GetTempDir().String()); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
closeErr := root.Close()
|
|
||||||
if err == nil {
|
|
||||||
err = closeErr
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var header *tar.Header
|
var header *tar.Header
|
||||||
r := tar.NewReader(tr)
|
r := tar.NewReader(tr)
|
||||||
@@ -168,8 +158,9 @@ func (a *tarArtifact) Cure(t *TContext) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pathname := temp.Append(header.Name)
|
||||||
if typeflag >= '0' && typeflag <= '9' && typeflag != tar.TypeDir {
|
if typeflag >= '0' && typeflag <= '9' && typeflag != tar.TypeDir {
|
||||||
if err = root.MkdirAll(path.Dir(header.Name), 0700); err != nil {
|
if err = os.MkdirAll(pathname.Dir().String(), 0700); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -177,8 +168,8 @@ func (a *tarArtifact) Cure(t *TContext) (err error) {
|
|||||||
switch typeflag {
|
switch typeflag {
|
||||||
case tar.TypeReg:
|
case tar.TypeReg:
|
||||||
var f *os.File
|
var f *os.File
|
||||||
if f, err = root.OpenFile(
|
if f, err = os.OpenFile(
|
||||||
header.Name,
|
pathname.String(),
|
||||||
os.O_CREATE|os.O_EXCL|os.O_WRONLY,
|
os.O_CREATE|os.O_EXCL|os.O_WRONLY,
|
||||||
header.FileInfo().Mode()&0500,
|
header.FileInfo().Mode()&0500,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
@@ -193,29 +184,26 @@ func (a *tarArtifact) Cure(t *TContext) (err error) {
|
|||||||
break
|
break
|
||||||
|
|
||||||
case tar.TypeLink:
|
case tar.TypeLink:
|
||||||
if err = root.Link(
|
if err = os.Link(
|
||||||
header.Linkname,
|
temp.Append(header.Linkname).String(),
|
||||||
header.Name,
|
pathname.String(),
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
|
|
||||||
case tar.TypeSymlink:
|
case tar.TypeSymlink:
|
||||||
if err = root.Symlink(
|
if err = os.Symlink(header.Linkname, pathname.String()); err != nil {
|
||||||
header.Linkname,
|
|
||||||
header.Name,
|
|
||||||
); err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
|
|
||||||
case tar.TypeDir:
|
case tar.TypeDir:
|
||||||
madeDirectories = append(madeDirectories, dirTargetPerm{
|
madeDirectories = append(madeDirectories, dirTargetPerm{
|
||||||
path: header.Name,
|
path: pathname,
|
||||||
mode: header.FileInfo().Mode(),
|
mode: header.FileInfo().Mode(),
|
||||||
})
|
})
|
||||||
if err = root.MkdirAll(header.Name, 0700); err != nil {
|
if err = os.MkdirAll(pathname.String(), 0700); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
@@ -232,7 +220,7 @@ func (a *tarArtifact) Cure(t *TContext) (err error) {
|
|||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for _, e := range madeDirectories {
|
for _, e := range madeDirectories {
|
||||||
if err = root.Chmod(e.path, e.mode&0500); err != nil {
|
if err = os.Chmod(e.path.String(), e.mode&0500); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -240,7 +228,6 @@ func (a *tarArtifact) Cure(t *TContext) (err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
temp := t.GetTempDir()
|
|
||||||
if err = os.Chmod(temp.String(), 0700); err != nil {
|
if err = os.Chmod(temp.String(), 0700); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,19 +2,18 @@ package rosa
|
|||||||
|
|
||||||
import "hakurei.app/internal/pkg"
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
func (t Toolchain) newAttr() (pkg.Artifact, string) {
|
func (t Toolchain) newAttr() pkg.Artifact {
|
||||||
const (
|
const (
|
||||||
version = "2.5.2"
|
version = "2.5.2"
|
||||||
checksum = "YWEphrz6vg1sUMmHHVr1CRo53pFXRhq_pjN-AlG8UgwZK1y6m7zuDhxqJhD0SV0l"
|
checksum = "YWEphrz6vg1sUMmHHVr1CRo53pFXRhq_pjN-AlG8UgwZK1y6m7zuDhxqJhD0SV0l"
|
||||||
)
|
)
|
||||||
return t.NewPackage("attr", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("attr", version, t.NewPatchedSource(
|
||||||
|
"attr", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://download.savannah.nongnu.org/releases/attr/"+
|
nil, "https://download.savannah.nongnu.org/releases/attr/"+
|
||||||
"attr-"+version+".tar.gz",
|
"attr-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), true, [2]string{"libgen-basename", `From 8a80d895dfd779373363c3a4b62ecce5a549efb2 Mon Sep 17 00:00:00 2001
|
||||||
Patches: [][2]string{
|
|
||||||
{"libgen-basename", `From 8a80d895dfd779373363c3a4b62ecce5a549efb2 Mon Sep 17 00:00:00 2001
|
|
||||||
From: "Haelwenn (lanodan) Monnier" <contact@hacktivis.me>
|
From: "Haelwenn (lanodan) Monnier" <contact@hacktivis.me>
|
||||||
Date: Sat, 30 Mar 2024 10:17:10 +0100
|
Date: Sat, 30 Mar 2024 10:17:10 +0100
|
||||||
Subject: tools/attr.c: Add missing libgen.h include for basename(3)
|
Subject: tools/attr.c: Add missing libgen.h include for basename(3)
|
||||||
@@ -39,9 +38,7 @@ index f12e4af..6a3c1e9 100644
|
|||||||
#include <attr/attributes.h>
|
#include <attr/attributes.h>
|
||||||
|
|
||||||
--
|
--
|
||||||
cgit v1.1`},
|
cgit v1.1`}, [2]string{"musl-errno", `diff --git a/test/attr.test b/test/attr.test
|
||||||
|
|
||||||
{"musl-errno", `diff --git a/test/attr.test b/test/attr.test
|
|
||||||
index 6ce2f9b..e9bde92 100644
|
index 6ce2f9b..e9bde92 100644
|
||||||
--- a/test/attr.test
|
--- a/test/attr.test
|
||||||
+++ b/test/attr.test
|
+++ b/test/attr.test
|
||||||
@@ -55,48 +52,39 @@ index 6ce2f9b..e9bde92 100644
|
|||||||
$ setfattr -n user. -v value f
|
$ setfattr -n user. -v value f
|
||||||
> setfattr: f: Invalid argument
|
> setfattr: f: Invalid argument
|
||||||
`},
|
`},
|
||||||
},
|
), &MakeAttr{
|
||||||
|
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
ln -s ../../system/bin/perl /usr/bin
|
ln -s ../../system/bin/perl /usr/bin
|
||||||
`,
|
`,
|
||||||
}, (*MakeHelper)(nil),
|
Configure: [][2]string{
|
||||||
Perl,
|
{"enable-static"},
|
||||||
), version
|
},
|
||||||
|
},
|
||||||
|
t.Load(Perl),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[Attr] = Toolchain.newAttr }
|
||||||
artifactsM[Attr] = Metadata{
|
|
||||||
f: Toolchain.newAttr,
|
|
||||||
|
|
||||||
Name: "attr",
|
func (t Toolchain) newACL() pkg.Artifact {
|
||||||
Description: "Commands for Manipulating Filesystem Extended Attributes",
|
|
||||||
Website: "https://savannah.nongnu.org/projects/attr/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newACL() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "2.3.2"
|
version = "2.3.2"
|
||||||
checksum = "-fY5nwH4K8ZHBCRXrzLdguPkqjKI6WIiGu4dBtrZ1o0t6AIU73w8wwJz_UyjIS0P"
|
checksum = "-fY5nwH4K8ZHBCRXrzLdguPkqjKI6WIiGu4dBtrZ1o0t6AIU73w8wwJz_UyjIS0P"
|
||||||
)
|
)
|
||||||
return t.NewPackage("acl", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("acl", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://download.savannah.nongnu.org/releases/acl/"+
|
nil,
|
||||||
|
"https://download.savannah.nongnu.org/releases/acl/"+
|
||||||
"acl-"+version+".tar.gz",
|
"acl-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, &MakeHelper{
|
), &MakeAttr{
|
||||||
|
Configure: [][2]string{
|
||||||
|
{"enable-static"},
|
||||||
|
},
|
||||||
|
|
||||||
// makes assumptions about uid_map/gid_map
|
// makes assumptions about uid_map/gid_map
|
||||||
SkipCheck: true,
|
SkipCheck: true,
|
||||||
},
|
},
|
||||||
Attr,
|
t.Load(Attr),
|
||||||
), version
|
)
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[ACL] = Metadata{
|
|
||||||
f: Toolchain.newACL,
|
|
||||||
|
|
||||||
Name: "acl",
|
|
||||||
Description: "Commands for Manipulating POSIX Access Control Lists",
|
|
||||||
Website: "https://savannah.nongnu.org/projects/acl/",
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
func init() { artifactsF[ACL] = Toolchain.newACL }
|
||||||
|
|||||||
@@ -10,40 +10,20 @@ import (
|
|||||||
type PArtifact int
|
type PArtifact int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// ImageInitramfs is the Rosa OS initramfs archive.
|
ACL PArtifact = iota
|
||||||
ImageInitramfs PArtifact = iota
|
|
||||||
|
|
||||||
// Kernel is the generic Rosa OS Linux kernel.
|
|
||||||
Kernel
|
|
||||||
// KernelHeaders is an installation of kernel headers for [Kernel].
|
|
||||||
KernelHeaders
|
|
||||||
// KernelSource is a writable kernel source tree installed to [AbsUsrSrc].
|
|
||||||
KernelSource
|
|
||||||
|
|
||||||
ACL
|
|
||||||
ArgpStandalone
|
|
||||||
Attr
|
Attr
|
||||||
Autoconf
|
Autoconf
|
||||||
Automake
|
Automake
|
||||||
BC
|
|
||||||
Bash
|
Bash
|
||||||
Binutils
|
Binutils
|
||||||
Bison
|
|
||||||
Bzip2
|
|
||||||
CMake
|
CMake
|
||||||
Coreutils
|
Coreutils
|
||||||
Curl
|
Curl
|
||||||
DTC
|
|
||||||
Diffutils
|
Diffutils
|
||||||
Elfutils
|
|
||||||
Fakeroot
|
|
||||||
Findutils
|
Findutils
|
||||||
Flex
|
|
||||||
Fuse
|
Fuse
|
||||||
GMP
|
|
||||||
GLib
|
|
||||||
Gawk
|
Gawk
|
||||||
GenInitCPIO
|
GMP
|
||||||
Gettext
|
Gettext
|
||||||
Git
|
Git
|
||||||
Go
|
Go
|
||||||
@@ -53,11 +33,9 @@ const (
|
|||||||
Hakurei
|
Hakurei
|
||||||
HakureiDist
|
HakureiDist
|
||||||
IniConfig
|
IniConfig
|
||||||
Kmod
|
KernelHeaders
|
||||||
LibXau
|
LibXau
|
||||||
Libcap
|
|
||||||
Libexpat
|
Libexpat
|
||||||
Libiconv
|
|
||||||
Libpsl
|
Libpsl
|
||||||
Libffi
|
Libffi
|
||||||
Libgd
|
Libgd
|
||||||
@@ -65,52 +43,31 @@ const (
|
|||||||
Libseccomp
|
Libseccomp
|
||||||
Libucontext
|
Libucontext
|
||||||
Libxml2
|
Libxml2
|
||||||
Libxslt
|
|
||||||
M4
|
M4
|
||||||
MPC
|
MPC
|
||||||
MPFR
|
MPFR
|
||||||
Make
|
Make
|
||||||
Meson
|
Meson
|
||||||
Mksh
|
Mksh
|
||||||
MuslFts
|
|
||||||
MuslObstack
|
|
||||||
NSS
|
NSS
|
||||||
NSSCACert
|
NSSCACert
|
||||||
Ncurses
|
|
||||||
Ninja
|
Ninja
|
||||||
OpenSSL
|
OpenSSL
|
||||||
PCRE2
|
|
||||||
Packaging
|
Packaging
|
||||||
Patch
|
Patch
|
||||||
Perl
|
Perl
|
||||||
PerlLocaleGettext
|
|
||||||
PerlMIMECharset
|
|
||||||
PerlModuleBuild
|
|
||||||
PerlPodParser
|
|
||||||
PerlSGMLS
|
|
||||||
PerlTermReadKey
|
|
||||||
PerlTextCharWidth
|
|
||||||
PerlTextWrapI18N
|
|
||||||
PerlUnicodeGCString
|
|
||||||
PerlYAMLTiny
|
|
||||||
PkgConfig
|
PkgConfig
|
||||||
Pluggy
|
Pluggy
|
||||||
Procps
|
|
||||||
PyTest
|
PyTest
|
||||||
Pygments
|
Pygments
|
||||||
Python
|
Python
|
||||||
QEMU
|
|
||||||
Rsync
|
Rsync
|
||||||
Sed
|
Sed
|
||||||
Setuptools
|
Setuptools
|
||||||
SquashfsTools
|
|
||||||
TamaGo
|
|
||||||
Tar
|
|
||||||
Texinfo
|
|
||||||
Toybox
|
Toybox
|
||||||
toyboxEarly
|
toyboxEarly
|
||||||
Unzip
|
Unzip
|
||||||
UtilLinux
|
utilMacros
|
||||||
Wayland
|
Wayland
|
||||||
WaylandProtocols
|
WaylandProtocols
|
||||||
XCB
|
XCB
|
||||||
@@ -118,82 +75,101 @@ const (
|
|||||||
Xproto
|
Xproto
|
||||||
XZ
|
XZ
|
||||||
Zlib
|
Zlib
|
||||||
Zstd
|
|
||||||
|
|
||||||
// PresetUnexportedStart is the first unexported preset.
|
buildcatrust
|
||||||
PresetUnexportedStart
|
|
||||||
|
|
||||||
buildcatrust = iota - 1
|
|
||||||
utilMacros
|
|
||||||
|
|
||||||
// Musl is a standalone libc that does not depend on the toolchain.
|
|
||||||
Musl
|
|
||||||
|
|
||||||
// gcc is a hacked-to-pieces GCC toolchain meant for use in intermediate
|
// gcc is a hacked-to-pieces GCC toolchain meant for use in intermediate
|
||||||
// stages only. This preset and its direct output must never be exposed.
|
// stages only. This preset and its direct output must never be exposed.
|
||||||
gcc
|
gcc
|
||||||
|
|
||||||
// Stage0 is a tarball containing all compile-time dependencies of artifacts
|
// _presetEnd is the total number of presets and does not denote a preset.
|
||||||
// part of the [Std] toolchain.
|
_presetEnd
|
||||||
Stage0
|
|
||||||
|
|
||||||
// PresetEnd is the total number of presets and does not denote a preset.
|
|
||||||
PresetEnd
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Metadata is stage-agnostic information of a [PArtifact] not directly
|
|
||||||
// representable in the resulting [pkg.Artifact].
|
|
||||||
type Metadata struct {
|
|
||||||
f func(t Toolchain) (a pkg.Artifact, version string)
|
|
||||||
|
|
||||||
// Unique package name.
|
|
||||||
Name string `json:"name"`
|
|
||||||
// Short user-facing description.
|
|
||||||
Description string `json:"description"`
|
|
||||||
// Project home page.
|
|
||||||
Website string `json:"website,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unversioned denotes an unversioned [PArtifact].
|
|
||||||
const Unversioned = "\x00"
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// artifactsM is an array of [PArtifact] metadata.
|
// artifactsF is an array of functions for the result of [PArtifact].
|
||||||
artifactsM [PresetEnd]Metadata
|
artifactsF [_presetEnd]func(t Toolchain) pkg.Artifact
|
||||||
|
|
||||||
// artifacts stores the result of Metadata.f.
|
// artifacts stores the result of artifactsF.
|
||||||
artifacts [_toolchainEnd][len(artifactsM)]pkg.Artifact
|
artifacts [_toolchainEnd][len(artifactsF)]pkg.Artifact
|
||||||
// versions stores the version of [PArtifact].
|
|
||||||
versions [_toolchainEnd][len(artifactsM)]string
|
|
||||||
// artifactsOnce is for lazy initialisation of artifacts.
|
// artifactsOnce is for lazy initialisation of artifacts.
|
||||||
artifactsOnce [_toolchainEnd][len(artifactsM)]sync.Once
|
artifactsOnce [_toolchainEnd][len(artifactsF)]sync.Once
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetMetadata returns [Metadata] of a [PArtifact].
|
|
||||||
func GetMetadata(p PArtifact) *Metadata { return &artifactsM[p] }
|
|
||||||
|
|
||||||
// Load returns the resulting [pkg.Artifact] of [PArtifact].
|
// Load returns the resulting [pkg.Artifact] of [PArtifact].
|
||||||
func (t Toolchain) Load(p PArtifact) pkg.Artifact {
|
func (t Toolchain) Load(p PArtifact) pkg.Artifact {
|
||||||
artifactsOnce[t][p].Do(func() {
|
artifactsOnce[t][p].Do(func() {
|
||||||
artifacts[t][p], versions[t][p] = artifactsM[p].f(t)
|
artifacts[t][p] = artifactsF[p](t)
|
||||||
})
|
})
|
||||||
return artifacts[t][p]
|
return artifacts[t][p]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Version returns the version string of [PArtifact].
|
|
||||||
func (t Toolchain) Version(p PArtifact) string {
|
|
||||||
artifactsOnce[t][p].Do(func() {
|
|
||||||
artifacts[t][p], versions[t][p] = artifactsM[p].f(t)
|
|
||||||
})
|
|
||||||
return versions[t][p]
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolveName returns a [PArtifact] by name.
|
// ResolveName returns a [PArtifact] by name.
|
||||||
func ResolveName(name string) (p PArtifact, ok bool) {
|
func ResolveName(name string) (p PArtifact, ok bool) {
|
||||||
for i := range PresetUnexportedStart {
|
p, ok = map[string]PArtifact{
|
||||||
if name == artifactsM[i].Name {
|
"acl": ACL,
|
||||||
return i, true
|
"attr": Attr,
|
||||||
}
|
"autoconf": Autoconf,
|
||||||
}
|
"automake": Automake,
|
||||||
return 0, false
|
"bash": Bash,
|
||||||
|
"binutils": Binutils,
|
||||||
|
"cmake": CMake,
|
||||||
|
"coreutils": Coreutils,
|
||||||
|
"curl": Curl,
|
||||||
|
"diffutils": Diffutils,
|
||||||
|
"findutils": Findutils,
|
||||||
|
"fuse": Fuse,
|
||||||
|
"gawk": Gawk,
|
||||||
|
"gmp": GMP,
|
||||||
|
"gettext": Gettext,
|
||||||
|
"git": Git,
|
||||||
|
"go": Go,
|
||||||
|
"gperf": Gperf,
|
||||||
|
"grep": Grep,
|
||||||
|
"gzip": Gzip,
|
||||||
|
"hakurei": Hakurei,
|
||||||
|
"hakurei-dist": HakureiDist,
|
||||||
|
"iniconfig": IniConfig,
|
||||||
|
"kernel-headers": KernelHeaders,
|
||||||
|
"libXau": LibXau,
|
||||||
|
"libexpat": Libexpat,
|
||||||
|
"libpsl": Libpsl,
|
||||||
|
"libseccomp": Libseccomp,
|
||||||
|
"libucontext": Libucontext,
|
||||||
|
"libxml2": Libxml2,
|
||||||
|
"libffi": Libffi,
|
||||||
|
"libgd": Libgd,
|
||||||
|
"libtool": Libtool,
|
||||||
|
"m4": M4,
|
||||||
|
"mpc": MPC,
|
||||||
|
"mpfr": MPFR,
|
||||||
|
"make": Make,
|
||||||
|
"meson": Meson,
|
||||||
|
"mksh": Mksh,
|
||||||
|
"nss": NSS,
|
||||||
|
"nss-cacert": NSSCACert,
|
||||||
|
"ninja": Ninja,
|
||||||
|
"openssl": OpenSSL,
|
||||||
|
"packaging": Packaging,
|
||||||
|
"patch": Patch,
|
||||||
|
"perl": Perl,
|
||||||
|
"pkg-config": PkgConfig,
|
||||||
|
"pluggy": Pluggy,
|
||||||
|
"pytest": PyTest,
|
||||||
|
"pygments": Pygments,
|
||||||
|
"python": Python,
|
||||||
|
"rsync": Rsync,
|
||||||
|
"sed": Sed,
|
||||||
|
"setuptools": Setuptools,
|
||||||
|
"toybox": Toybox,
|
||||||
|
"unzip": Unzip,
|
||||||
|
"wayland": Wayland,
|
||||||
|
"wayland-protocols": WaylandProtocols,
|
||||||
|
"xcb": XCB,
|
||||||
|
"xcb-proto": XCBProto,
|
||||||
|
"xproto": Xproto,
|
||||||
|
"xz": XZ,
|
||||||
|
"zlib": Zlib,
|
||||||
|
}[name]
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,53 +0,0 @@
|
|||||||
package rosa_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"hakurei.app/internal/rosa"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLoad(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
for i := range rosa.PresetEnd {
|
|
||||||
p := rosa.PArtifact(i)
|
|
||||||
t.Run(rosa.GetMetadata(p).Name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
rosa.Std.Load(p)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestResolveName(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
for i := range rosa.PresetUnexportedStart {
|
|
||||||
p := i
|
|
||||||
name := rosa.GetMetadata(p).Name
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
if got, ok := rosa.ResolveName(name); !ok {
|
|
||||||
t.Fatal("ResolveName: ok = false")
|
|
||||||
} else if got != p {
|
|
||||||
t.Fatalf("ResolveName: %d, want %d", got, p)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func TestResolveNameUnexported(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
for i := rosa.PresetUnexportedStart; i < rosa.PresetEnd; i++ {
|
|
||||||
p := i
|
|
||||||
name := rosa.GetMetadata(p).Name
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
if got, ok := rosa.ResolveName(name); ok {
|
|
||||||
t.Fatalf("ResolveName: resolved unexported preset %d", got)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
package rosa
|
|
||||||
|
|
||||||
import "hakurei.app/internal/pkg"
|
|
||||||
|
|
||||||
func (t Toolchain) newArgpStandalone() (pkg.Artifact, string) {
|
|
||||||
const (
|
|
||||||
version = "1.3"
|
|
||||||
checksum = "vtW0VyO2pJ-hPyYmDI2zwSLS8QL0sPAUKC1t3zNYbwN2TmsaE-fADhaVtNd3eNFl"
|
|
||||||
)
|
|
||||||
return t.NewPackage("argp-standalone", version, pkg.NewHTTPGetTar(
|
|
||||||
nil, "http://www.lysator.liu.se/~nisse/misc/"+
|
|
||||||
"argp-standalone-"+version+".tar.gz",
|
|
||||||
mustDecode(checksum),
|
|
||||||
pkg.TarGzip,
|
|
||||||
), &PackageAttr{
|
|
||||||
Env: []string{
|
|
||||||
"CC=cc -std=gnu89 -fPIC",
|
|
||||||
},
|
|
||||||
}, &MakeHelper{
|
|
||||||
Install: `
|
|
||||||
install -D -m644 /usr/src/argp-standalone/argp.h /work/system/include/argp.h
|
|
||||||
install -D -m755 libargp.a /work/system/lib/libargp.a
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
Diffutils,
|
|
||||||
), version
|
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[ArgpStandalone] = Metadata{
|
|
||||||
f: Toolchain.newArgpStandalone,
|
|
||||||
|
|
||||||
Name: "argp-standalone",
|
|
||||||
Description: "hierarchical argument parsing library broken out from glibc",
|
|
||||||
Website: "http://www.lysator.liu.se/~nisse/misc/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
package rosa
|
|
||||||
|
|
||||||
import "hakurei.app/internal/pkg"
|
|
||||||
|
|
||||||
func (t Toolchain) newBzip2() (pkg.Artifact, string) {
|
|
||||||
const (
|
|
||||||
version = "1.0.8"
|
|
||||||
checksum = "cTLykcco7boom-s05H1JVsQi1AtChYL84nXkg_92Dm1Xt94Ob_qlMg_-NSguIK-c"
|
|
||||||
)
|
|
||||||
return t.NewPackage("bzip2", version, pkg.NewHTTPGetTar(
|
|
||||||
nil, "https://sourceware.org/pub/bzip2/bzip2-"+version+".tar.gz",
|
|
||||||
mustDecode(checksum),
|
|
||||||
pkg.TarGzip,
|
|
||||||
), &PackageAttr{
|
|
||||||
Writable: true,
|
|
||||||
EnterSource: true,
|
|
||||||
}, &MakeHelper{
|
|
||||||
// uses source tree as scratch space
|
|
||||||
SkipConfigure: true,
|
|
||||||
SkipCheck: true,
|
|
||||||
InPlace: true,
|
|
||||||
Make: []string{
|
|
||||||
"CC=cc",
|
|
||||||
},
|
|
||||||
Install: "make PREFIX=/work/system install",
|
|
||||||
}), version
|
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[Bzip2] = Metadata{
|
|
||||||
f: Toolchain.newBzip2,
|
|
||||||
|
|
||||||
Name: "bzip2",
|
|
||||||
Description: "a freely available, patent free, high-quality data compressor",
|
|
||||||
Website: "https://sourceware.org/bzip2/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,167 +1,82 @@
|
|||||||
package rosa
|
package rosa
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"path"
|
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/internal/pkg"
|
"hakurei.app/internal/pkg"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (t Toolchain) newCMake() (pkg.Artifact, string) {
|
func (t Toolchain) newCMake() pkg.Artifact {
|
||||||
const (
|
const (
|
||||||
version = "4.2.3"
|
version = "4.2.1"
|
||||||
checksum = "Y4uYGnLrDQX78UdzH7fMzfok46Nt_1taDIHSmqgboU1yFi6f0iAXBDegMCu4eS-J"
|
checksum = "Y3OdbMsob6Xk2y1DCME6z4Fryb5_TkFD7knRT8dTNIRtSqbiCJyyDN9AxggN_I75"
|
||||||
)
|
)
|
||||||
return t.NewPackage("cmake", version, pkg.NewHTTPGetTar(
|
return t.New("cmake-"+version, 0, []pkg.Artifact{
|
||||||
|
t.Load(Make),
|
||||||
|
t.Load(KernelHeaders),
|
||||||
|
}, nil, nil, `
|
||||||
|
cd "$(mktemp -d)"
|
||||||
|
/usr/src/cmake/bootstrap \
|
||||||
|
--prefix=/system \
|
||||||
|
--parallel="$(nproc)" \
|
||||||
|
-- \
|
||||||
|
-DCMAKE_USE_OPENSSL=OFF
|
||||||
|
make "-j$(nproc)"
|
||||||
|
make DESTDIR=/work install
|
||||||
|
`, pkg.Path(AbsUsrSrc.Append("cmake"), true, t.NewPatchedSource(
|
||||||
|
// expected to be writable in the copy made during bootstrap
|
||||||
|
"cmake", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://github.com/Kitware/CMake/releases/download/"+
|
nil, "https://github.com/Kitware/CMake/releases/download/"+
|
||||||
"v"+version+"/cmake-"+version+".tar.gz",
|
"v"+version+"/cmake-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), false,
|
||||||
// test suite expects writable source tree
|
)))
|
||||||
Writable: true,
|
|
||||||
|
|
||||||
// expected to be writable in the copy made during bootstrap
|
|
||||||
Chmod: true,
|
|
||||||
|
|
||||||
Patches: [][2]string{
|
|
||||||
{"bootstrap-test-no-openssl", `diff --git a/Tests/BootstrapTest.cmake b/Tests/BootstrapTest.cmake
|
|
||||||
index 137de78bc1..b4da52e664 100644
|
|
||||||
--- a/Tests/BootstrapTest.cmake
|
|
||||||
+++ b/Tests/BootstrapTest.cmake
|
|
||||||
@@ -9,7 +9,7 @@ if(NOT nproc EQUAL 0)
|
|
||||||
endif()
|
|
||||||
message(STATUS "running bootstrap: ${bootstrap} ${ninja_arg} ${parallel_arg}")
|
|
||||||
execute_process(
|
|
||||||
- COMMAND ${bootstrap} ${ninja_arg} ${parallel_arg}
|
|
||||||
+ COMMAND ${bootstrap} ${ninja_arg} ${parallel_arg} -- -DCMAKE_USE_OPENSSL=OFF
|
|
||||||
WORKING_DIRECTORY "${bin_dir}"
|
|
||||||
RESULT_VARIABLE result
|
|
||||||
)
|
|
||||||
`},
|
|
||||||
|
|
||||||
{"disable-broken-tests-musl", `diff --git a/Tests/CMakeLists.txt b/Tests/CMakeLists.txt
|
|
||||||
index 2ead810437..f85cbb8b1c 100644
|
|
||||||
--- a/Tests/CMakeLists.txt
|
|
||||||
+++ b/Tests/CMakeLists.txt
|
|
||||||
@@ -384,7 +384,6 @@ if(BUILD_TESTING)
|
|
||||||
add_subdirectory(CMakeLib)
|
|
||||||
endif()
|
|
||||||
add_subdirectory(CMakeOnly)
|
|
||||||
- add_subdirectory(RunCMake)
|
|
||||||
|
|
||||||
add_subdirectory(FindPackageModeMakefileTest)
|
|
||||||
|
|
||||||
@@ -528,9 +527,6 @@ if(BUILD_TESTING)
|
|
||||||
-DCMake_TEST_CUDA:BOOL=${CMake_TEST_CUDA}
|
|
||||||
-DCMake_INSTALL_NAME_TOOL_BUG:BOOL=${CMake_INSTALL_NAME_TOOL_BUG}
|
|
||||||
)
|
|
||||||
- ADD_TEST_MACRO(ExportImport ExportImport)
|
|
||||||
- set_property(TEST ExportImport APPEND
|
|
||||||
- PROPERTY LABELS "CUDA")
|
|
||||||
ADD_TEST_MACRO(Unset Unset)
|
|
||||||
ADD_TEST_MACRO(PolicyScope PolicyScope)
|
|
||||||
ADD_TEST_MACRO(EmptyLibrary EmptyLibrary)
|
|
||||||
@@ -624,7 +620,6 @@ if(BUILD_TESTING)
|
|
||||||
# run test for BundleUtilities on supported platforms/compilers
|
|
||||||
if((MSVC OR
|
|
||||||
MINGW OR
|
|
||||||
- CMAKE_SYSTEM_NAME MATCHES "Linux" OR
|
|
||||||
CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
|
||||||
AND NOT CMAKE_GENERATOR STREQUAL "Watcom WMake")
|
|
||||||
|
|
||||||
@@ -3095,10 +3090,6 @@ if(BUILD_TESTING)
|
|
||||||
"${CMake_SOURCE_DIR}/Tests/CTestTestFdSetSize/test.cmake.in"
|
|
||||||
"${CMake_BINARY_DIR}/Tests/CTestTestFdSetSize/test.cmake"
|
|
||||||
@ONLY ESCAPE_QUOTES)
|
|
||||||
- add_test(CTestTestFdSetSize ${CMAKE_CTEST_COMMAND}
|
|
||||||
- -S "${CMake_BINARY_DIR}/Tests/CTestTestFdSetSize/test.cmake" -j20 -V --timeout 120
|
|
||||||
- --output-log "${CMake_BINARY_DIR}/Tests/CTestTestFdSetSize/testOutput.log"
|
|
||||||
- )
|
|
||||||
|
|
||||||
if(CMAKE_TESTS_CDASH_SERVER)
|
|
||||||
set(regex "^([^:]+)://([^/]+)(.*)$")
|
|
||||||
`},
|
|
||||||
},
|
|
||||||
}, &MakeHelper{
|
|
||||||
OmitDefaults: true,
|
|
||||||
|
|
||||||
ConfigureName: "/usr/src/cmake/bootstrap",
|
|
||||||
Configure: [][2]string{
|
|
||||||
{"prefix", "/system"},
|
|
||||||
{"parallel", `"$(nproc)"`},
|
|
||||||
{"--"},
|
|
||||||
{"-DCMAKE_USE_OPENSSL", "OFF"},
|
|
||||||
{"-DCMake_TEST_NO_NETWORK", "ON"},
|
|
||||||
},
|
|
||||||
Check: []string{
|
|
||||||
"CTEST_OUTPUT_ON_FAILURE=1",
|
|
||||||
"CTEST_PARALLEL_LEVEL=128",
|
|
||||||
"test",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
KernelHeaders,
|
|
||||||
), version
|
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[CMake] = Toolchain.newCMake }
|
||||||
artifactsM[CMake] = Metadata{
|
|
||||||
f: Toolchain.newCMake,
|
|
||||||
|
|
||||||
Name: "cmake",
|
|
||||||
Description: "cross-platform, open-source build system",
|
|
||||||
Website: "https://cmake.org/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CMakeHelper is the [CMake] build system helper.
|
|
||||||
type CMakeHelper struct {
|
|
||||||
// Joined with name with a dash if non-empty.
|
|
||||||
Variant string
|
|
||||||
|
|
||||||
|
// CMakeAttr holds the project-specific attributes that will be applied to a new
|
||||||
|
// [pkg.Artifact] compiled via [CMake].
|
||||||
|
type CMakeAttr struct {
|
||||||
// Path elements joined with source.
|
// Path elements joined with source.
|
||||||
Append []string
|
Append []string
|
||||||
|
// Use source tree as scratch space.
|
||||||
|
Writable bool
|
||||||
|
|
||||||
// CMake CACHE entries.
|
// CMake CACHE entries.
|
||||||
Cache [][2]string
|
Cache [][2]string
|
||||||
|
// Additional environment variables.
|
||||||
|
Env []string
|
||||||
|
// Runs before cmake.
|
||||||
|
ScriptEarly string
|
||||||
|
// Runs after cmake, replaces default.
|
||||||
|
ScriptConfigured string
|
||||||
// Runs after install.
|
// Runs after install.
|
||||||
Script string
|
Script string
|
||||||
|
|
||||||
|
// Override the default installation prefix [AbsSystem].
|
||||||
|
Prefix *check.Absolute
|
||||||
|
|
||||||
|
// Passed through to [Toolchain.New].
|
||||||
|
Paths []pkg.ExecPath
|
||||||
|
// Passed through to [Toolchain.New].
|
||||||
|
Flag int
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Helper = new(CMakeHelper)
|
// NewViaCMake returns a [pkg.Artifact] for compiling and installing via [CMake].
|
||||||
|
func (t Toolchain) NewViaCMake(
|
||||||
// name returns its arguments and an optional variant string joined with '-'.
|
name, version, variant string,
|
||||||
func (attr *CMakeHelper) name(name, version string) string {
|
source pkg.Artifact,
|
||||||
if attr != nil && attr.Variant != "" {
|
attr *CMakeAttr,
|
||||||
name += "-" + attr.Variant
|
extra ...pkg.Artifact,
|
||||||
|
) pkg.Artifact {
|
||||||
|
if name == "" || version == "" || variant == "" {
|
||||||
|
panic("names must be non-empty")
|
||||||
}
|
}
|
||||||
return name + "-" + version
|
|
||||||
}
|
|
||||||
|
|
||||||
// extra returns a hardcoded slice of [CMake] and [Ninja].
|
|
||||||
func (*CMakeHelper) extra(int) []PArtifact {
|
|
||||||
return []PArtifact{CMake, Ninja}
|
|
||||||
}
|
|
||||||
|
|
||||||
// wantsChmod returns false.
|
|
||||||
func (*CMakeHelper) wantsChmod() bool { return false }
|
|
||||||
|
|
||||||
// wantsWrite returns false.
|
|
||||||
func (*CMakeHelper) wantsWrite() bool { return false }
|
|
||||||
|
|
||||||
// scriptEarly returns the zero value.
|
|
||||||
func (*CMakeHelper) scriptEarly() string { return "" }
|
|
||||||
|
|
||||||
// createDir returns true.
|
|
||||||
func (*CMakeHelper) createDir() bool { return true }
|
|
||||||
|
|
||||||
// wantsDir returns a hardcoded, deterministic pathname.
|
|
||||||
func (*CMakeHelper) wantsDir() string { return "/cure/" }
|
|
||||||
|
|
||||||
// script generates the cure script.
|
|
||||||
func (attr *CMakeHelper) script(name string) string {
|
|
||||||
if attr == nil {
|
if attr == nil {
|
||||||
attr = &CMakeHelper{
|
attr = &CMakeAttr{
|
||||||
Cache: [][2]string{
|
Cache: [][2]string{
|
||||||
{"CMAKE_BUILD_TYPE", "Release"},
|
{"CMAKE_BUILD_TYPE", "Release"},
|
||||||
},
|
},
|
||||||
@@ -171,7 +86,26 @@ func (attr *CMakeHelper) script(name string) string {
|
|||||||
panic("CACHE must be non-empty")
|
panic("CACHE must be non-empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
return `
|
scriptConfigured := "cmake --build .\ncmake --install .\n"
|
||||||
|
if attr.ScriptConfigured != "" {
|
||||||
|
scriptConfigured = attr.ScriptConfigured
|
||||||
|
}
|
||||||
|
|
||||||
|
prefix := attr.Prefix
|
||||||
|
if prefix == nil {
|
||||||
|
prefix = AbsSystem
|
||||||
|
}
|
||||||
|
|
||||||
|
sourcePath := AbsUsrSrc.Append(name)
|
||||||
|
return t.New(name+"-"+variant+"-"+version, attr.Flag, stage3Concat(t, extra,
|
||||||
|
t.Load(CMake),
|
||||||
|
t.Load(Ninja),
|
||||||
|
), nil, slices.Concat([]string{
|
||||||
|
"ROSA_SOURCE=" + sourcePath.String(),
|
||||||
|
"ROSA_CMAKE_SOURCE=" + sourcePath.Append(attr.Append...).String(),
|
||||||
|
"ROSA_INSTALL_PREFIX=/work" + prefix.String(),
|
||||||
|
}, attr.Env), attr.ScriptEarly+`
|
||||||
|
mkdir /cure && cd /cure
|
||||||
cmake -G Ninja \
|
cmake -G Ninja \
|
||||||
-DCMAKE_C_COMPILER_TARGET="${ROSA_TRIPLE}" \
|
-DCMAKE_C_COMPILER_TARGET="${ROSA_TRIPLE}" \
|
||||||
-DCMAKE_CXX_COMPILER_TARGET="${ROSA_TRIPLE}" \
|
-DCMAKE_CXX_COMPILER_TARGET="${ROSA_TRIPLE}" \
|
||||||
@@ -183,9 +117,9 @@ cmake -G Ninja \
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}), " \\\n\t")+` \
|
}), " \\\n\t")+` \
|
||||||
-DCMAKE_INSTALL_PREFIX=/work/system \
|
-DCMAKE_INSTALL_PREFIX="${ROSA_INSTALL_PREFIX}" \
|
||||||
'/usr/src/` + name + `/` + path.Join(attr.Append...) + `'
|
"${ROSA_CMAKE_SOURCE}"
|
||||||
cmake --build .
|
`+scriptConfigured+attr.Script, slices.Concat([]pkg.ExecPath{
|
||||||
cmake --install .
|
pkg.Path(sourcePath, attr.Writable, source),
|
||||||
` + attr.Script
|
}, attr.Paths)...)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,37 +2,31 @@ package rosa
|
|||||||
|
|
||||||
import "hakurei.app/internal/pkg"
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
func (t Toolchain) newCurl() (pkg.Artifact, string) {
|
func (t Toolchain) newCurl() pkg.Artifact {
|
||||||
const (
|
const (
|
||||||
version = "8.18.0"
|
version = "8.18.0"
|
||||||
checksum = "YpOolP_sx1DIrCEJ3elgVAu0wTLDS-EZMZFvOP0eha7FaLueZUlEpuMwDzJNyi7i"
|
checksum = "YpOolP_sx1DIrCEJ3elgVAu0wTLDS-EZMZFvOP0eha7FaLueZUlEpuMwDzJNyi7i"
|
||||||
)
|
)
|
||||||
return t.NewPackage("curl", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("curl", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://curl.se/download/curl-"+version+".tar.bz2",
|
nil, "https://curl.se/download/curl-"+version+".tar.bz2",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarBzip2,
|
pkg.TarBzip2,
|
||||||
), nil, &MakeHelper{
|
), &MakeAttr{
|
||||||
|
Env: []string{
|
||||||
|
"TFLAGS=-j256",
|
||||||
|
},
|
||||||
Configure: [][2]string{
|
Configure: [][2]string{
|
||||||
{"with-openssl"},
|
{"with-openssl"},
|
||||||
{"with-ca-bundle", "/system/etc/ssl/certs/ca-bundle.crt"},
|
{"with-ca-bundle", "/system/etc/ssl/certs/ca-bundle.crt"},
|
||||||
},
|
},
|
||||||
Check: []string{
|
ScriptConfigured: `
|
||||||
"TFLAGS=-j256",
|
make "-j$(nproc)"
|
||||||
"check",
|
`,
|
||||||
},
|
},
|
||||||
},
|
t.Load(Perl),
|
||||||
Perl,
|
|
||||||
|
|
||||||
Libpsl,
|
t.Load(Libpsl),
|
||||||
OpenSSL,
|
t.Load(OpenSSL),
|
||||||
), version
|
)
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[Curl] = Metadata{
|
|
||||||
f: Toolchain.newCurl,
|
|
||||||
|
|
||||||
Name: "curl",
|
|
||||||
Description: "command line tool and library for transferring data with URLs",
|
|
||||||
Website: "https://curl.se/",
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
func init() { artifactsF[Curl] = Toolchain.newCurl }
|
||||||
|
|||||||
@@ -1,41 +0,0 @@
|
|||||||
package rosa
|
|
||||||
|
|
||||||
import "hakurei.app/internal/pkg"
|
|
||||||
|
|
||||||
func (t Toolchain) newDTC() (pkg.Artifact, string) {
|
|
||||||
const (
|
|
||||||
version = "1.7.2"
|
|
||||||
checksum = "vUoiRynPyYRexTpS6USweT5p4SVHvvVJs8uqFkkVD-YnFjwf6v3elQ0-Etrh00Dt"
|
|
||||||
)
|
|
||||||
return t.NewPackage("dtc", version, pkg.NewHTTPGetTar(
|
|
||||||
nil, "https://git.kernel.org/pub/scm/utils/dtc/dtc.git/snapshot/"+
|
|
||||||
"dtc-v"+version+".tar.gz",
|
|
||||||
mustDecode(checksum),
|
|
||||||
pkg.TarGzip,
|
|
||||||
), &PackageAttr{
|
|
||||||
// works around buggy test:
|
|
||||||
// fdtdump-runtest.sh /usr/src/dtc/tests/fdtdump.dts
|
|
||||||
Writable: true,
|
|
||||||
Chmod: true,
|
|
||||||
}, &MesonHelper{
|
|
||||||
Setup: [][2]string{
|
|
||||||
{"Dyaml", "disabled"},
|
|
||||||
{"Dstatic-build", "true"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Flex,
|
|
||||||
Bison,
|
|
||||||
M4,
|
|
||||||
Coreutils,
|
|
||||||
Diffutils,
|
|
||||||
), version
|
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[DTC] = Metadata{
|
|
||||||
f: Toolchain.newDTC,
|
|
||||||
|
|
||||||
Name: "dtc",
|
|
||||||
Description: "The Device Tree Compiler",
|
|
||||||
Website: "https://git.kernel.org/pub/scm/utils/dtc/dtc.git/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
package rosa
|
|
||||||
|
|
||||||
import "hakurei.app/internal/pkg"
|
|
||||||
|
|
||||||
func (t Toolchain) newElfutils() (pkg.Artifact, string) {
|
|
||||||
const (
|
|
||||||
version = "0.194"
|
|
||||||
checksum = "Q3XUygUPv9vR1TkWucwUsQ8Kb1_F6gzk-KMPELr3cC_4AcTrprhVPMvN0CKkiYRa"
|
|
||||||
)
|
|
||||||
return t.NewPackage("elfutils", version, pkg.NewHTTPGetTar(
|
|
||||||
nil, "https://sourceware.org/elfutils/ftp/"+
|
|
||||||
version+"/elfutils-"+version+".tar.bz2",
|
|
||||||
mustDecode(checksum),
|
|
||||||
pkg.TarBzip2,
|
|
||||||
), &PackageAttr{
|
|
||||||
Env: []string{
|
|
||||||
"CC=cc" +
|
|
||||||
// nonstandard glibc extension
|
|
||||||
" -DFNM_EXTMATCH=0",
|
|
||||||
},
|
|
||||||
}, &MakeHelper{
|
|
||||||
// nonstandard glibc extension
|
|
||||||
SkipCheck: true,
|
|
||||||
|
|
||||||
Configure: [][2]string{
|
|
||||||
{"enable-deterministic-archives"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
M4,
|
|
||||||
PkgConfig,
|
|
||||||
|
|
||||||
Zlib,
|
|
||||||
Bzip2,
|
|
||||||
Zstd,
|
|
||||||
ArgpStandalone,
|
|
||||||
MuslFts,
|
|
||||||
MuslObstack,
|
|
||||||
KernelHeaders,
|
|
||||||
), version
|
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[Elfutils] = Metadata{
|
|
||||||
f: Toolchain.newElfutils,
|
|
||||||
|
|
||||||
Name: "elfutils",
|
|
||||||
Description: "utilities and libraries to handle ELF files and DWARF data",
|
|
||||||
Website: "https://sourceware.org/elfutils/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,59 +0,0 @@
|
|||||||
package rosa
|
|
||||||
|
|
||||||
import "hakurei.app/internal/pkg"
|
|
||||||
|
|
||||||
func (t Toolchain) newFakeroot() (pkg.Artifact, string) {
|
|
||||||
const (
|
|
||||||
version = "1.37.2"
|
|
||||||
checksum = "4ve-eDqVspzQ6VWDhPS0NjW3aSenBJcPAJq_BFT7OOFgUdrQzoTBxZWipDAGWxF8"
|
|
||||||
)
|
|
||||||
return t.NewPackage("fakeroot", version, pkg.NewHTTPGetTar(
|
|
||||||
nil, "https://salsa.debian.org/clint/fakeroot/-/archive/upstream/"+
|
|
||||||
version+"/fakeroot-upstream-"+version+".tar.bz2",
|
|
||||||
mustDecode(checksum),
|
|
||||||
pkg.TarBzip2,
|
|
||||||
), &PackageAttr{
|
|
||||||
Patches: [][2]string{
|
|
||||||
{"remove-broken-docs", `diff --git a/doc/Makefile.am b/doc/Makefile.am
|
|
||||||
index f135ad9..85c784c 100644
|
|
||||||
--- a/doc/Makefile.am
|
|
||||||
+++ b/doc/Makefile.am
|
|
||||||
@@ -1,5 +1,4 @@
|
|
||||||
AUTOMAKE_OPTIONS=foreign
|
|
||||||
-SUBDIRS = de es fr nl pt ro sv
|
|
||||||
|
|
||||||
man_MANS = faked.1 fakeroot.1
|
|
||||||
|
|
||||||
`},
|
|
||||||
},
|
|
||||||
|
|
||||||
Env: []string{
|
|
||||||
"CONFIG_SHELL=/bin/sh",
|
|
||||||
},
|
|
||||||
}, &MakeHelper{
|
|
||||||
Generate: "./bootstrap",
|
|
||||||
|
|
||||||
// makes assumptions about /etc/passwd
|
|
||||||
SkipCheck: true,
|
|
||||||
},
|
|
||||||
M4,
|
|
||||||
Perl,
|
|
||||||
Autoconf,
|
|
||||||
Automake,
|
|
||||||
Libtool,
|
|
||||||
PkgConfig,
|
|
||||||
|
|
||||||
Attr,
|
|
||||||
Libcap,
|
|
||||||
KernelHeaders,
|
|
||||||
), version
|
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[Fakeroot] = Metadata{
|
|
||||||
f: Toolchain.newFakeroot,
|
|
||||||
|
|
||||||
Name: "fakeroot",
|
|
||||||
Description: "tool for simulating superuser privileges",
|
|
||||||
Website: "https://salsa.debian.org/clint/fakeroot",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
package rosa
|
|
||||||
|
|
||||||
import (
|
|
||||||
"hakurei.app/internal/pkg"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (t Toolchain) newFlex() (pkg.Artifact, string) {
|
|
||||||
const (
|
|
||||||
version = "2.6.4"
|
|
||||||
checksum = "p9POjQU7VhgOf3x5iFro8fjhy0NOanvA7CTeuWS_veSNgCixIJshTrWVkc5XLZkB"
|
|
||||||
)
|
|
||||||
return t.NewPackage("flex", version, pkg.NewHTTPGetTar(
|
|
||||||
nil, "https://github.com/westes/flex/releases/download/"+
|
|
||||||
"v"+version+"/flex-"+version+".tar.gz",
|
|
||||||
mustDecode(checksum),
|
|
||||||
pkg.TarGzip,
|
|
||||||
), nil, (*MakeHelper)(nil),
|
|
||||||
M4,
|
|
||||||
), version
|
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[Flex] = Metadata{
|
|
||||||
f: Toolchain.newFlex,
|
|
||||||
|
|
||||||
Name: "flex",
|
|
||||||
Description: "scanner generator for lexing in C and C++",
|
|
||||||
Website: "https://github.com/westes/flex/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -2,43 +2,44 @@ package rosa
|
|||||||
|
|
||||||
import "hakurei.app/internal/pkg"
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
func (t Toolchain) newFuse() (pkg.Artifact, string) {
|
func (t Toolchain) newFuse() pkg.Artifact {
|
||||||
const (
|
const (
|
||||||
version = "3.18.1"
|
version = "3.18.1"
|
||||||
checksum = "COb-BgJRWXLbt9XUkNeuiroQizpMifXqxgieE1SlkMXhs_WGSyJStrmyewAw2hd6"
|
checksum = "COb-BgJRWXLbt9XUkNeuiroQizpMifXqxgieE1SlkMXhs_WGSyJStrmyewAw2hd6"
|
||||||
)
|
)
|
||||||
return t.NewPackage("fuse", version, pkg.NewHTTPGetTar(
|
return t.New("fuse-"+version, 0, []pkg.Artifact{
|
||||||
|
t.Load(Python),
|
||||||
|
t.Load(Meson),
|
||||||
|
t.Load(Ninja),
|
||||||
|
|
||||||
|
t.Load(IniConfig),
|
||||||
|
t.Load(Packaging),
|
||||||
|
t.Load(Pluggy),
|
||||||
|
t.Load(Pygments),
|
||||||
|
t.Load(PyTest),
|
||||||
|
|
||||||
|
t.Load(KernelHeaders),
|
||||||
|
}, nil, nil, `
|
||||||
|
cd "$(mktemp -d)"
|
||||||
|
meson setup \
|
||||||
|
--reconfigure \
|
||||||
|
--buildtype=release \
|
||||||
|
--prefix=/system \
|
||||||
|
--prefer-static \
|
||||||
|
-Dtests=true \
|
||||||
|
-Duseroot=false \
|
||||||
|
-Dinitscriptdir=/system/init.d \
|
||||||
|
-Ddefault_library=both \
|
||||||
|
. /usr/src/fuse
|
||||||
|
meson compile
|
||||||
|
python3 -m pytest test/
|
||||||
|
meson install \
|
||||||
|
--destdir=/work
|
||||||
|
`, pkg.Path(AbsUsrSrc.Append("fuse"), false, pkg.NewHTTPGetTar(
|
||||||
nil, "https://github.com/libfuse/libfuse/releases/download/"+
|
nil, "https://github.com/libfuse/libfuse/releases/download/"+
|
||||||
"fuse-"+version+"/fuse-"+version+".tar.gz",
|
"fuse-"+version+"/fuse-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, &MesonHelper{
|
)))
|
||||||
Setup: [][2]string{
|
|
||||||
{"Ddefault_library", "both"},
|
|
||||||
{"Dtests", "true"},
|
|
||||||
{"Duseroot", "false"},
|
|
||||||
{"Dinitscriptdir", "/system/etc"},
|
|
||||||
},
|
|
||||||
|
|
||||||
ScriptCompiled: "python3 -m pytest test/",
|
|
||||||
// this project uses pytest
|
|
||||||
SkipTest: true,
|
|
||||||
},
|
|
||||||
IniConfig,
|
|
||||||
Packaging,
|
|
||||||
Pluggy,
|
|
||||||
Pygments,
|
|
||||||
PyTest,
|
|
||||||
|
|
||||||
KernelHeaders,
|
|
||||||
), version
|
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[Fuse] = Metadata{
|
|
||||||
f: Toolchain.newFuse,
|
|
||||||
|
|
||||||
Name: "fuse",
|
|
||||||
Description: "the reference implementation of the Linux FUSE interface",
|
|
||||||
Website: "https://github.com/libfuse/libfuse/",
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
func init() { artifactsF[Fuse] = Toolchain.newFuse }
|
||||||
|
|||||||
@@ -1,30 +1,40 @@
|
|||||||
package rosa
|
package rosa
|
||||||
|
|
||||||
import "hakurei.app/internal/pkg"
|
import (
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
func (t Toolchain) newGit() (pkg.Artifact, string) {
|
func (t Toolchain) newGit() pkg.Artifact {
|
||||||
const (
|
const (
|
||||||
version = "2.52.0"
|
version = "2.52.0"
|
||||||
checksum = "uH3J1HAN_c6PfGNJd2OBwW4zo36n71wmkdvityYnrh8Ak0D1IifiAvEWz9Vi9DmS"
|
checksum = "uH3J1HAN_c6PfGNJd2OBwW4zo36n71wmkdvityYnrh8Ak0D1IifiAvEWz9Vi9DmS"
|
||||||
)
|
)
|
||||||
return t.NewPackage("git", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("git", version, t.NewPatchedSource(
|
||||||
|
"git", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://www.kernel.org/pub/software/scm/git/"+
|
nil, "https://www.kernel.org/pub/software/scm/git/"+
|
||||||
"git-"+version+".tar.gz",
|
"git-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), false,
|
||||||
ScriptEarly: `
|
), &MakeAttr{
|
||||||
ln -s ../../system/bin/perl /usr/bin/ || true
|
|
||||||
`,
|
|
||||||
|
|
||||||
// uses source tree as scratch space
|
// uses source tree as scratch space
|
||||||
EnterSource: true,
|
Writable: true,
|
||||||
}, &MakeHelper{
|
|
||||||
InPlace: true,
|
InPlace: true,
|
||||||
Generate: "make configure",
|
|
||||||
ScriptMakeEarly: `
|
// test suite in subdirectory
|
||||||
|
SkipCheck: true,
|
||||||
|
|
||||||
|
Make: []string{"all"},
|
||||||
|
ScriptEarly: `
|
||||||
|
cd /usr/src/git
|
||||||
|
|
||||||
|
make configure
|
||||||
|
`,
|
||||||
|
Script: `
|
||||||
|
ln -s ../../system/bin/perl /usr/bin/ || true
|
||||||
|
|
||||||
function disable_test {
|
function disable_test {
|
||||||
local test=$1 pattern=${2:-''}
|
local test=$1 pattern=$2
|
||||||
if [ $# -eq 1 ]; then
|
if [ $# -eq 1 ]; then
|
||||||
rm "t/${test}.sh"
|
rm "t/${test}.sh"
|
||||||
else
|
else
|
||||||
@@ -46,34 +56,26 @@ disable_test t9300-fast-import
|
|||||||
disable_test t0211-trace2-perf
|
disable_test t0211-trace2-perf
|
||||||
disable_test t1517-outside-repo
|
disable_test t1517-outside-repo
|
||||||
disable_test t2200-add-update
|
disable_test t2200-add-update
|
||||||
|
|
||||||
|
make \
|
||||||
|
-C t \
|
||||||
|
GIT_PROVE_OPTS="--jobs 32 --failures" \
|
||||||
|
prove
|
||||||
`,
|
`,
|
||||||
Check: []string{
|
|
||||||
"-C t",
|
|
||||||
`GIT_PROVE_OPTS="--jobs 32 --failures"`,
|
|
||||||
"prove",
|
|
||||||
},
|
},
|
||||||
},
|
t.Load(Perl),
|
||||||
Perl,
|
t.Load(Diffutils),
|
||||||
Diffutils,
|
t.Load(M4),
|
||||||
M4,
|
t.Load(Autoconf),
|
||||||
Autoconf,
|
t.Load(Gettext),
|
||||||
Gettext,
|
|
||||||
|
|
||||||
Zlib,
|
t.Load(Zlib),
|
||||||
Curl,
|
t.Load(Curl),
|
||||||
OpenSSL,
|
t.Load(OpenSSL),
|
||||||
Libexpat,
|
t.Load(Libexpat),
|
||||||
), version
|
)
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[Git] = Metadata{
|
|
||||||
f: Toolchain.newGit,
|
|
||||||
|
|
||||||
Name: "git",
|
|
||||||
Description: "distributed version control system",
|
|
||||||
Website: "https://www.git-scm.com/",
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
func init() { artifactsF[Git] = Toolchain.newGit }
|
||||||
|
|
||||||
// NewViaGit returns a [pkg.Artifact] for cloning a git repository.
|
// NewViaGit returns a [pkg.Artifact] for cloning a git repository.
|
||||||
func (t Toolchain) NewViaGit(
|
func (t Toolchain) NewViaGit(
|
||||||
|
|||||||
@@ -2,127 +2,79 @@ package rosa
|
|||||||
|
|
||||||
import "hakurei.app/internal/pkg"
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
func (t Toolchain) newM4() (pkg.Artifact, string) {
|
func (t Toolchain) newM4() pkg.Artifact {
|
||||||
const (
|
const (
|
||||||
version = "1.4.20"
|
version = "1.4.20"
|
||||||
checksum = "RT0_L3m4Co86bVBY3lCFAEs040yI1WdeNmRylFpah8IZovTm6O4wI7qiHJN3qsW9"
|
checksum = "RT0_L3m4Co86bVBY3lCFAEs040yI1WdeNmRylFpah8IZovTm6O4wI7qiHJN3qsW9"
|
||||||
)
|
)
|
||||||
return t.NewPackage("m4", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("m4", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/m4/m4-"+version+".tar.bz2",
|
nil, "https://ftpmirror.gnu.org/gnu/m4/m4-"+version+".tar.bz2",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarBzip2,
|
pkg.TarBzip2,
|
||||||
), &PackageAttr{
|
), &MakeAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
|
cd /usr/src/m4
|
||||||
chmod +w tests/test-c32ispunct.sh && echo '#!/bin/sh' > tests/test-c32ispunct.sh
|
chmod +w tests/test-c32ispunct.sh && echo '#!/bin/sh' > tests/test-c32ispunct.sh
|
||||||
`,
|
`,
|
||||||
}, (*MakeHelper)(nil),
|
},
|
||||||
Diffutils,
|
t.Load(Diffutils),
|
||||||
), version
|
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[M4] = Metadata{
|
|
||||||
f: Toolchain.newM4,
|
|
||||||
|
|
||||||
Name: "m4",
|
|
||||||
Description: "a macro processor with GNU extensions",
|
|
||||||
Website: "https://www.gnu.org/software/m4/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newBison() (pkg.Artifact, string) {
|
|
||||||
const (
|
|
||||||
version = "3.8.2"
|
|
||||||
checksum = "BhRM6K7URj1LNOkIDCFDctSErLS-Xo5d9ba9seg10o6ACrgC1uNhED7CQPgIY29Y"
|
|
||||||
)
|
)
|
||||||
return t.NewPackage("bison", version, pkg.NewHTTPGetTar(
|
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/bison/bison-"+version+".tar.gz",
|
|
||||||
mustDecode(checksum),
|
|
||||||
pkg.TarGzip,
|
|
||||||
), nil, (*MakeHelper)(nil),
|
|
||||||
M4,
|
|
||||||
Diffutils,
|
|
||||||
Sed,
|
|
||||||
), version
|
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[M4] = Toolchain.newM4 }
|
||||||
artifactsM[Bison] = Metadata{
|
|
||||||
f: Toolchain.newBison,
|
|
||||||
|
|
||||||
Name: "bison",
|
func (t Toolchain) newSed() pkg.Artifact {
|
||||||
Description: "a general-purpose parser generator",
|
|
||||||
Website: "https://www.gnu.org/software/bison/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newSed() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "4.9"
|
version = "4.9"
|
||||||
checksum = "pe7HWH4PHNYrazOTlUoE1fXmhn2GOPFN_xE62i0llOr3kYGrH1g2_orDz0UtZ9Nt"
|
checksum = "pe7HWH4PHNYrazOTlUoE1fXmhn2GOPFN_xE62i0llOr3kYGrH1g2_orDz0UtZ9Nt"
|
||||||
)
|
)
|
||||||
return t.NewPackage("sed", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("sed", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/sed/sed-"+version+".tar.gz",
|
nil, "https://ftpmirror.gnu.org/gnu/sed/sed-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, (*MakeHelper)(nil),
|
), nil,
|
||||||
Diffutils,
|
t.Load(Diffutils),
|
||||||
), version
|
)
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[Sed] = Toolchain.newSed }
|
||||||
artifactsM[Sed] = Metadata{
|
|
||||||
f: Toolchain.newSed,
|
|
||||||
|
|
||||||
Name: "sed",
|
func (t Toolchain) newAutoconf() pkg.Artifact {
|
||||||
Description: "a non-interactive command-line text editor",
|
|
||||||
Website: "https://www.gnu.org/software/sed/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newAutoconf() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "2.72"
|
version = "2.72"
|
||||||
checksum = "-c5blYkC-xLDer3TWEqJTyh1RLbOd1c5dnRLKsDnIrg_wWNOLBpaqMY8FvmUFJ33"
|
checksum = "-c5blYkC-xLDer3TWEqJTyh1RLbOd1c5dnRLKsDnIrg_wWNOLBpaqMY8FvmUFJ33"
|
||||||
)
|
)
|
||||||
return t.NewPackage("autoconf", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("autoconf", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/autoconf/autoconf-"+version+".tar.gz",
|
nil, "https://ftpmirror.gnu.org/gnu/autoconf/autoconf-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &MakeAttr{
|
||||||
Flag: TExclusive,
|
Make: []string{
|
||||||
}, &MakeHelper{
|
|
||||||
Check: []string{
|
|
||||||
`TESTSUITEFLAGS="-j$(nproc)"`,
|
`TESTSUITEFLAGS="-j$(nproc)"`,
|
||||||
"check",
|
|
||||||
},
|
},
|
||||||
|
Flag: TExclusive,
|
||||||
},
|
},
|
||||||
M4,
|
t.Load(M4),
|
||||||
Perl,
|
t.Load(Perl),
|
||||||
Bash,
|
t.Load(Bash),
|
||||||
Diffutils,
|
t.Load(Diffutils),
|
||||||
), version
|
)
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[Autoconf] = Toolchain.newAutoconf }
|
||||||
artifactsM[Autoconf] = Metadata{
|
|
||||||
f: Toolchain.newAutoconf,
|
|
||||||
|
|
||||||
Name: "autoconf",
|
func (t Toolchain) newAutomake() pkg.Artifact {
|
||||||
Description: "M4 macros to produce self-contained configure script",
|
|
||||||
Website: "https://www.gnu.org/software/autoconf/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newAutomake() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "1.18.1"
|
version = "1.18.1"
|
||||||
checksum = "FjvLG_GdQP7cThTZJLDMxYpRcKdpAVG-YDs1Fj1yaHlSdh_Kx6nRGN14E0r_BjcG"
|
checksum = "FjvLG_GdQP7cThTZJLDMxYpRcKdpAVG-YDs1Fj1yaHlSdh_Kx6nRGN14E0r_BjcG"
|
||||||
)
|
)
|
||||||
return t.NewPackage("automake", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("automake", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/automake/automake-"+version+".tar.gz",
|
nil, "https://ftpmirror.gnu.org/gnu/automake/automake-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &MakeAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
|
cd /usr/src/automake
|
||||||
|
|
||||||
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||||
|
|
||||||
test_disable '#!/bin/sh' t/objcxx-minidemo.sh
|
test_disable '#!/bin/sh' t/objcxx-minidemo.sh
|
||||||
@@ -132,90 +84,66 @@ test_disable '#!/bin/sh' t/dist-no-built-sources.sh
|
|||||||
test_disable '#!/bin/sh' t/distname.sh
|
test_disable '#!/bin/sh' t/distname.sh
|
||||||
test_disable '#!/bin/sh' t/pr9.sh
|
test_disable '#!/bin/sh' t/pr9.sh
|
||||||
`,
|
`,
|
||||||
}, (*MakeHelper)(nil),
|
},
|
||||||
M4,
|
t.Load(M4),
|
||||||
Perl,
|
t.Load(Perl),
|
||||||
Grep,
|
t.Load(Grep),
|
||||||
Gzip,
|
t.Load(Gzip),
|
||||||
Autoconf,
|
t.Load(Autoconf),
|
||||||
Diffutils,
|
t.Load(Diffutils),
|
||||||
), version
|
)
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[Automake] = Toolchain.newAutomake }
|
||||||
artifactsM[Automake] = Metadata{
|
|
||||||
f: Toolchain.newAutomake,
|
|
||||||
|
|
||||||
Name: "automake",
|
func (t Toolchain) newLibtool() pkg.Artifact {
|
||||||
Description: "a tool for automatically generating Makefile.in files",
|
|
||||||
Website: "https://www.gnu.org/software/automake/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newLibtool() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "2.5.4"
|
version = "2.5.4"
|
||||||
checksum = "pa6LSrQggh8mSJHQfwGjysAApmZlGJt8wif2cCLzqAAa2jpsTY0jZ-6stS3BWZ2Q"
|
checksum = "pa6LSrQggh8mSJHQfwGjysAApmZlGJt8wif2cCLzqAAa2jpsTY0jZ-6stS3BWZ2Q"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libtool", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("libtool", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/libtool/libtool-"+version+".tar.gz",
|
nil, "https://ftpmirror.gnu.org/gnu/libtool/libtool-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, &MakeHelper{
|
), &MakeAttr{
|
||||||
Check: []string{
|
Make: []string{
|
||||||
`TESTSUITEFLAGS="-j$(nproc)"`,
|
`TESTSUITEFLAGS=32`,
|
||||||
"check",
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
M4,
|
t.Load(M4),
|
||||||
Diffutils,
|
t.Load(Diffutils),
|
||||||
), version
|
)
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[Libtool] = Toolchain.newLibtool }
|
||||||
artifactsM[Libtool] = Metadata{
|
|
||||||
f: Toolchain.newLibtool,
|
|
||||||
|
|
||||||
Name: "libtool",
|
func (t Toolchain) newGzip() pkg.Artifact {
|
||||||
Description: "a generic library support script",
|
|
||||||
Website: "https://www.gnu.org/software/libtool/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newGzip() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "1.14"
|
version = "1.14"
|
||||||
checksum = "NWhjUavnNfTDFkZJyAUonL9aCOak8GVajWX2OMlzpFnuI0ErpBFyj88mz2xSjz0q"
|
checksum = "NWhjUavnNfTDFkZJyAUonL9aCOak8GVajWX2OMlzpFnuI0ErpBFyj88mz2xSjz0q"
|
||||||
)
|
)
|
||||||
return t.NewPackage("gzip", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("gzip", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/gzip/gzip-"+version+".tar.gz",
|
nil, "https://ftpmirror.gnu.org/gnu/gzip/gzip-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, &MakeHelper{
|
), &MakeAttr{
|
||||||
// dependency loop
|
// dependency loop
|
||||||
SkipCheck: true,
|
SkipCheck: true,
|
||||||
}), version
|
})
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[Gzip] = Toolchain.newGzip }
|
||||||
artifactsM[Gzip] = Metadata{
|
|
||||||
f: Toolchain.newGzip,
|
|
||||||
|
|
||||||
Name: "gzip",
|
func (t Toolchain) newGettext() pkg.Artifact {
|
||||||
Description: "a popular data compression program",
|
|
||||||
Website: "https://www.gnu.org/software/gzip/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newGettext() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "1.0"
|
version = "1.0"
|
||||||
checksum = "3MasKeEdPeFEgWgzsBKk7JqWqql1wEMbgPmzAfs-mluyokoW0N8oQVxPQoOnSdgC"
|
checksum = "3MasKeEdPeFEgWgzsBKk7JqWqql1wEMbgPmzAfs-mluyokoW0N8oQVxPQoOnSdgC"
|
||||||
)
|
)
|
||||||
return t.NewPackage("gettext", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("gettext", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/gettext/gettext-"+version+".tar.gz",
|
nil, "https://ftpmirror.gnu.org/gnu/gettext/gettext-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &MakeAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
|
cd /usr/src/gettext
|
||||||
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||||
|
|
||||||
test_disable '#!/bin/sh' gettext-tools/tests/msgcat-22
|
test_disable '#!/bin/sh' gettext-tools/tests/msgcat-22
|
||||||
@@ -230,36 +158,29 @@ test_disable 'int main(){return 0;}' gettext-tools/gnulib-tests/test-stdcountof-
|
|||||||
|
|
||||||
touch gettext-tools/autotools/archive.dir.tar
|
touch gettext-tools/autotools/archive.dir.tar
|
||||||
`,
|
`,
|
||||||
}, (*MakeHelper)(nil),
|
},
|
||||||
Diffutils,
|
t.Load(Diffutils),
|
||||||
Gzip,
|
t.Load(Gzip),
|
||||||
Sed,
|
t.Load(Sed),
|
||||||
|
|
||||||
KernelHeaders,
|
t.Load(KernelHeaders),
|
||||||
), version
|
)
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[Gettext] = Toolchain.newGettext }
|
||||||
artifactsM[Gettext] = Metadata{
|
|
||||||
f: Toolchain.newGettext,
|
|
||||||
|
|
||||||
Name: "gettext",
|
func (t Toolchain) newDiffutils() pkg.Artifact {
|
||||||
Description: "tools for producing multi-lingual messages",
|
|
||||||
Website: "https://www.gnu.org/software/gettext/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newDiffutils() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "3.12"
|
version = "3.12"
|
||||||
checksum = "9J5VAq5oA7eqwzS1Yvw-l3G5o-TccUrNQR3PvyB_lgdryOFAfxtvQfKfhdpquE44"
|
checksum = "9J5VAq5oA7eqwzS1Yvw-l3G5o-TccUrNQR3PvyB_lgdryOFAfxtvQfKfhdpquE44"
|
||||||
)
|
)
|
||||||
return t.NewPackage("diffutils", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("diffutils", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/diffutils/diffutils-"+version+".tar.gz",
|
nil, "https://ftpmirror.gnu.org/gnu/diffutils/diffutils-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &MakeAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
|
cd /usr/src/diffutils
|
||||||
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||||
|
|
||||||
test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh
|
test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh
|
||||||
@@ -267,454 +188,254 @@ test_disable 'int main(){return 0;}' gnulib-tests/test-c32ispunct.c
|
|||||||
test_disable '#!/bin/sh' tests/cmp
|
test_disable '#!/bin/sh' tests/cmp
|
||||||
`,
|
`,
|
||||||
Flag: TEarly,
|
Flag: TEarly,
|
||||||
}, (*MakeHelper)(nil)), version
|
})
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[Diffutils] = Toolchain.newDiffutils }
|
||||||
artifactsM[Diffutils] = Metadata{
|
|
||||||
f: Toolchain.newDiffutils,
|
|
||||||
|
|
||||||
Name: "diffutils",
|
func (t Toolchain) newPatch() pkg.Artifact {
|
||||||
Description: "several programs related to finding differences between files",
|
|
||||||
Website: "https://www.gnu.org/software/diffutils/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newPatch() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "2.8"
|
version = "2.8"
|
||||||
checksum = "MA0BQc662i8QYBD-DdGgyyfTwaeALZ1K0yusV9rAmNiIsQdX-69YC4t9JEGXZkeR"
|
checksum = "MA0BQc662i8QYBD-DdGgyyfTwaeALZ1K0yusV9rAmNiIsQdX-69YC4t9JEGXZkeR"
|
||||||
)
|
)
|
||||||
return t.NewPackage("patch", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("patch", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/patch/patch-"+version+".tar.gz",
|
nil, "https://ftpmirror.gnu.org/gnu/patch/patch-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &MakeAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
|
cd /usr/src/patch
|
||||||
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||||
|
|
||||||
test_disable '#!/bin/sh' tests/ed-style
|
test_disable '#!/bin/sh' tests/ed-style
|
||||||
test_disable '#!/bin/sh' tests/need-filename
|
test_disable '#!/bin/sh' tests/need-filename
|
||||||
`,
|
`,
|
||||||
Flag: TEarly,
|
Flag: TEarly,
|
||||||
}, (*MakeHelper)(nil)), version
|
})
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[Patch] = Toolchain.newPatch }
|
||||||
artifactsM[Patch] = Metadata{
|
|
||||||
f: Toolchain.newPatch,
|
|
||||||
|
|
||||||
Name: "patch",
|
func (t Toolchain) newBash() pkg.Artifact {
|
||||||
Description: "a program to apply diff output to files",
|
|
||||||
Website: "https://savannah.gnu.org/projects/patch/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newBash() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "5.3"
|
version = "5.3"
|
||||||
checksum = "4LQ_GRoB_ko-Ih8QPf_xRKA02xAm_TOxQgcJLmFDT6udUPxTAWrsj-ZNeuTusyDq"
|
checksum = "4LQ_GRoB_ko-Ih8QPf_xRKA02xAm_TOxQgcJLmFDT6udUPxTAWrsj-ZNeuTusyDq"
|
||||||
)
|
)
|
||||||
return t.NewPackage("bash", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("bash", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/bash/bash-"+version+".tar.gz",
|
nil, "https://ftpmirror.gnu.org/gnu/bash/bash-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &MakeAttr{
|
||||||
Flag: TEarly,
|
|
||||||
}, &MakeHelper{
|
|
||||||
Script: "ln -s bash /work/system/bin/sh\n",
|
Script: "ln -s bash /work/system/bin/sh\n",
|
||||||
Configure: [][2]string{
|
Configure: [][2]string{
|
||||||
{"without-bash-malloc"},
|
{"without-bash-malloc"},
|
||||||
},
|
},
|
||||||
}), version
|
Flag: TEarly,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[Bash] = Toolchain.newBash }
|
||||||
artifactsM[Bash] = Metadata{
|
|
||||||
f: Toolchain.newBash,
|
|
||||||
|
|
||||||
Name: "bash",
|
func (t Toolchain) newCoreutils() pkg.Artifact {
|
||||||
Description: "the Bourne Again SHell",
|
|
||||||
Website: "https://www.gnu.org/software/bash/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newCoreutils() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "9.9"
|
version = "9.9"
|
||||||
checksum = "B1_TaXj1j5aiVIcazLWu8Ix03wDV54uo2_iBry4qHG6Y-9bjDpUPlkNLmU_3Nvw6"
|
checksum = "B1_TaXj1j5aiVIcazLWu8Ix03wDV54uo2_iBry4qHG6Y-9bjDpUPlkNLmU_3Nvw6"
|
||||||
)
|
)
|
||||||
return t.NewPackage("coreutils", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("coreutils", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/coreutils/coreutils-"+version+".tar.gz",
|
nil, "https://ftpmirror.gnu.org/gnu/coreutils/coreutils-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &MakeAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
|
cd /usr/src/coreutils
|
||||||
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||||
|
|
||||||
test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh
|
test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh
|
||||||
test_disable '#!/bin/sh' tests/split/line-bytes.sh
|
test_disable '#!/bin/sh' tests/split/line-bytes.sh
|
||||||
test_disable '#!/bin/sh' tests/dd/no-allocate.sh
|
test_disable '#!/bin/sh' tests/dd/no-allocate.sh
|
||||||
test_disable '#!/bin/sh' tests/env/env.sh
|
|
||||||
test_disable 'int main(){return 0;}' gnulib-tests/test-chown.c
|
test_disable 'int main(){return 0;}' gnulib-tests/test-chown.c
|
||||||
test_disable 'int main(){return 0;}' gnulib-tests/test-fchownat.c
|
test_disable 'int main(){return 0;}' gnulib-tests/test-fchownat.c
|
||||||
test_disable 'int main(){return 0;}' gnulib-tests/test-lchown.c
|
test_disable 'int main(){return 0;}' gnulib-tests/test-lchown.c
|
||||||
`,
|
`,
|
||||||
Flag: TEarly,
|
Flag: TEarly,
|
||||||
}, &MakeHelper{
|
|
||||||
Configure: [][2]string{
|
|
||||||
{"enable-single-binary", "symlinks"},
|
|
||||||
},
|
},
|
||||||
},
|
t.Load(Perl),
|
||||||
Perl,
|
t.Load(Bash),
|
||||||
Bash,
|
|
||||||
|
|
||||||
KernelHeaders,
|
t.Load(KernelHeaders),
|
||||||
), version
|
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[Coreutils] = Metadata{
|
|
||||||
f: Toolchain.newCoreutils,
|
|
||||||
|
|
||||||
Name: "coreutils",
|
|
||||||
Description: "the basic file, shell and text manipulation utilities",
|
|
||||||
Website: "https://www.gnu.org/software/coreutils/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newTexinfo() (pkg.Artifact, string) {
|
|
||||||
const (
|
|
||||||
version = "7.2"
|
|
||||||
checksum = "9EelM5b7QGMAY5DKrAm_El8lofBGuFWlaBPSBhh7l_VQE8054MBmC0KBvGrABqjv"
|
|
||||||
)
|
)
|
||||||
return t.NewPackage("texinfo", version, pkg.NewHTTPGetTar(
|
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/texinfo/texinfo-"+version+".tar.gz",
|
|
||||||
mustDecode(checksum),
|
|
||||||
pkg.TarGzip,
|
|
||||||
), nil, &MakeHelper{
|
|
||||||
// nonstandard glibc extension
|
|
||||||
SkipCheck: true,
|
|
||||||
},
|
|
||||||
Perl,
|
|
||||||
), version
|
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[Coreutils] = Toolchain.newCoreutils }
|
||||||
artifactsM[Texinfo] = Metadata{
|
|
||||||
f: Toolchain.newTexinfo,
|
|
||||||
|
|
||||||
Name: "texinfo",
|
func (t Toolchain) newGperf() pkg.Artifact {
|
||||||
Description: "the GNU square-wheel-reinvension of man pages",
|
|
||||||
Website: "https://www.gnu.org/software/texinfo/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newGperf() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "3.3"
|
version = "3.3"
|
||||||
checksum = "RtIy9pPb_Bb8-31J2Nw-rRGso2JlS-lDlVhuNYhqR7Nt4xM_nObznxAlBMnarJv7"
|
checksum = "RtIy9pPb_Bb8-31J2Nw-rRGso2JlS-lDlVhuNYhqR7Nt4xM_nObznxAlBMnarJv7"
|
||||||
)
|
)
|
||||||
return t.NewPackage("gperf", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("gperf", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gperf/gperf-"+version+".tar.gz",
|
nil, "https://ftpmirror.gnu.org/gperf/gperf-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, (*MakeHelper)(nil),
|
), nil,
|
||||||
Diffutils,
|
t.Load(Diffutils),
|
||||||
), version
|
)
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[Gperf] = Toolchain.newGperf }
|
||||||
artifactsM[Gperf] = Metadata{
|
|
||||||
f: Toolchain.newGperf,
|
|
||||||
|
|
||||||
Name: "gperf",
|
func (t Toolchain) newGawk() pkg.Artifact {
|
||||||
Description: "a perfect hash function generator",
|
|
||||||
Website: "https://www.gnu.org/software/gperf/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newGawk() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "5.3.2"
|
version = "5.3.2"
|
||||||
checksum = "uIs0d14h_d2DgMGYwrPtegGNyt_bxzG3D6Fe-MmExx_pVoVkQaHzrtmiXVr6NHKk"
|
checksum = "uIs0d14h_d2DgMGYwrPtegGNyt_bxzG3D6Fe-MmExx_pVoVkQaHzrtmiXVr6NHKk"
|
||||||
)
|
)
|
||||||
return t.NewPackage("gawk", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("gawk", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/gawk/gawk-"+version+".tar.gz",
|
nil, "https://ftpmirror.gnu.org/gnu/gawk/gawk-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &MakeAttr{
|
||||||
Flag: TEarly,
|
Flag: TEarly,
|
||||||
}, &MakeHelper{
|
|
||||||
// dependency loop
|
// dependency loop
|
||||||
SkipCheck: true,
|
SkipCheck: true,
|
||||||
}), version
|
})
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[Gawk] = Toolchain.newGawk }
|
||||||
artifactsM[Gawk] = Metadata{
|
|
||||||
f: Toolchain.newGawk,
|
|
||||||
|
|
||||||
Name: "gawk",
|
func (t Toolchain) newGrep() pkg.Artifact {
|
||||||
Description: "an implementation of awk with GNU extensions",
|
|
||||||
Website: "https://www.gnu.org/software/gawk/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newGrep() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "3.12"
|
version = "3.12"
|
||||||
checksum = "qMB4RjaPNRRYsxix6YOrjE8gyAT1zVSTy4nW4wKW9fqa0CHYAuWgPwDTirENzm_1"
|
checksum = "qMB4RjaPNRRYsxix6YOrjE8gyAT1zVSTy4nW4wKW9fqa0CHYAuWgPwDTirENzm_1"
|
||||||
)
|
)
|
||||||
return t.NewPackage("grep", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("grep", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/grep/grep-"+version+".tar.gz",
|
nil, "https://ftpmirror.gnu.org/gnu/grep/grep-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &MakeAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
|
cd /usr/src/grep
|
||||||
|
|
||||||
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||||
|
|
||||||
test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh
|
test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh
|
||||||
test_disable 'int main(){return 0;}' gnulib-tests/test-c32ispunct.c
|
test_disable 'int main(){return 0;}' gnulib-tests/test-c32ispunct.c
|
||||||
`,
|
`,
|
||||||
}, (*MakeHelper)(nil),
|
},
|
||||||
Diffutils,
|
t.Load(Diffutils),
|
||||||
), version
|
)
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[Grep] = Toolchain.newGrep }
|
||||||
artifactsM[Grep] = Metadata{
|
|
||||||
f: Toolchain.newGrep,
|
|
||||||
|
|
||||||
Name: "grep",
|
func (t Toolchain) newFindutils() pkg.Artifact {
|
||||||
Description: "searches input for lines containing a match to a pattern",
|
|
||||||
Website: "https://www.gnu.org/software/grep/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newFindutils() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "4.10.0"
|
version = "4.10.0"
|
||||||
checksum = "ZXABdNBQXL7QjTygynRRTdXYWxQKZ0Wn5eMd3NUnxR0xaS0u0VfcKoTlbo50zxv6"
|
checksum = "ZXABdNBQXL7QjTygynRRTdXYWxQKZ0Wn5eMd3NUnxR0xaS0u0VfcKoTlbo50zxv6"
|
||||||
)
|
)
|
||||||
return t.NewPackage("findutils", version, pkg.NewHTTPGet(
|
return t.NewViaMake("findutils", version, pkg.NewHTTPGet(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/findutils/findutils-"+version+".tar.xz",
|
nil, "https://ftpmirror.gnu.org/gnu/findutils/findutils-"+version+".tar.xz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
), &PackageAttr{
|
), &MakeAttr{
|
||||||
SourceKind: sourceTarXZ,
|
SourceSuffix: ".tar.xz",
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
|
cd /usr/src/
|
||||||
|
tar xf findutils.tar.xz
|
||||||
|
mv findutils-` + version + ` findutils
|
||||||
|
|
||||||
|
cd findutils
|
||||||
echo '#!/bin/sh' > gnulib-tests/test-c32ispunct.sh
|
echo '#!/bin/sh' > gnulib-tests/test-c32ispunct.sh
|
||||||
echo 'int main(){return 0;}' > tests/xargs/test-sigusr.c
|
echo 'int main(){return 0;}' > tests/xargs/test-sigusr.c
|
||||||
`,
|
`,
|
||||||
}, (*MakeHelper)(nil),
|
|
||||||
Diffutils,
|
|
||||||
XZ,
|
|
||||||
Sed,
|
|
||||||
), version
|
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[Findutils] = Metadata{
|
|
||||||
f: Toolchain.newFindutils,
|
|
||||||
|
|
||||||
Name: "findutils",
|
|
||||||
Description: "the basic directory searching utilities",
|
|
||||||
Website: "https://www.gnu.org/software/findutils/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newBC() (pkg.Artifact, string) {
|
|
||||||
const (
|
|
||||||
version = "1.08.2"
|
|
||||||
checksum = "8h6f3hjV80XiFs6v9HOPF2KEyg1kuOgn5eeFdVspV05ODBVQss-ey5glc8AmneLy"
|
|
||||||
)
|
|
||||||
return t.NewPackage("bc", version, pkg.NewHTTPGetTar(
|
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/bc/bc-"+version+".tar.gz",
|
|
||||||
mustDecode(checksum),
|
|
||||||
pkg.TarGzip,
|
|
||||||
), &PackageAttr{
|
|
||||||
// source expected to be writable
|
|
||||||
Writable: true,
|
|
||||||
Chmod: true,
|
|
||||||
}, (*MakeHelper)(nil),
|
|
||||||
Perl,
|
|
||||||
Texinfo,
|
|
||||||
), version
|
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[BC] = Metadata{
|
|
||||||
f: Toolchain.newBC,
|
|
||||||
|
|
||||||
Name: "bc",
|
|
||||||
Description: "an arbitrary precision numeric processing language",
|
|
||||||
Website: "https://www.gnu.org/software/bc/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newLibiconv() (pkg.Artifact, string) {
|
|
||||||
const (
|
|
||||||
version = "1.18"
|
|
||||||
checksum = "iV5q3VxP5VPdJ-X7O5OQI4fGm8VjeYb5viLd1L3eAHg26bbHb2_Qn63XPF3ucVZr"
|
|
||||||
)
|
|
||||||
return t.NewPackage("libiconv", version, pkg.NewHTTPGetTar(
|
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/libiconv/libiconv-"+version+".tar.gz",
|
|
||||||
mustDecode(checksum),
|
|
||||||
pkg.TarGzip,
|
|
||||||
), nil, (*MakeHelper)(nil)), version
|
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[Libiconv] = Metadata{
|
|
||||||
f: Toolchain.newLibiconv,
|
|
||||||
|
|
||||||
Name: "libiconv",
|
|
||||||
Description: "iconv implementation independent of glibc",
|
|
||||||
Website: "https://www.gnu.org/software/libiconv/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newTar() (pkg.Artifact, string) {
|
|
||||||
const (
|
|
||||||
version = "1.35"
|
|
||||||
checksum = "zSaoSlVUDW0dSfm4sbL4FrXLFR8U40Fh3zY5DWhR5NCIJ6GjU6Kc4VZo2-ZqpBRA"
|
|
||||||
)
|
|
||||||
return t.NewPackage("tar", version, pkg.NewHTTPGetTar(
|
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/tar/tar-"+version+".tar.gz",
|
|
||||||
mustDecode(checksum),
|
|
||||||
pkg.TarGzip,
|
|
||||||
), nil, &MakeHelper{
|
|
||||||
Configure: [][2]string{
|
|
||||||
{"disable-acl"},
|
|
||||||
{"without-posix-acls"},
|
|
||||||
{"without-xattrs"},
|
|
||||||
},
|
},
|
||||||
Check: []string{
|
t.Load(Diffutils),
|
||||||
// very expensive
|
t.Load(XZ),
|
||||||
"TARTEST_SKIP_LARGE_FILES=1",
|
t.Load(Sed),
|
||||||
|
)
|
||||||
`TESTSUITEFLAGS="-j$(nproc)"`,
|
|
||||||
"check",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Diffutils,
|
|
||||||
|
|
||||||
Gzip,
|
|
||||||
Bzip2,
|
|
||||||
Zstd,
|
|
||||||
), version
|
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[Findutils] = Toolchain.newFindutils }
|
||||||
artifactsM[Tar] = Metadata{
|
|
||||||
f: Toolchain.newTar,
|
|
||||||
|
|
||||||
Name: "tar",
|
func (t Toolchain) newBinutils() pkg.Artifact {
|
||||||
Description: "provides the ability to create tar archives",
|
|
||||||
Website: "https://www.gnu.org/software/tar/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newBinutils() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "2.45"
|
version = "2.45"
|
||||||
checksum = "hlLtqqHDmzAT2OQVHaKEd_io2DGFvJkaeS-igBuK8bRRir7LUKGHgHYNkDVKaHTT"
|
checksum = "hlLtqqHDmzAT2OQVHaKEd_io2DGFvJkaeS-igBuK8bRRir7LUKGHgHYNkDVKaHTT"
|
||||||
)
|
)
|
||||||
return t.NewPackage("binutils", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("binutils", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/binutils/binutils-"+version+".tar.bz2",
|
nil, "https://ftpmirror.gnu.org/gnu/binutils/binutils-"+version+".tar.bz2",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarBzip2,
|
pkg.TarBzip2,
|
||||||
), nil, (*MakeHelper)(nil),
|
), &MakeAttr{
|
||||||
Bash,
|
ScriptConfigured: `
|
||||||
), version
|
make "-j$(nproc)"
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
t.Load(Bash),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[Binutils] = Toolchain.newBinutils }
|
||||||
artifactsM[Binutils] = Metadata{
|
|
||||||
f: Toolchain.newBinutils,
|
|
||||||
|
|
||||||
Name: "binutils",
|
func (t Toolchain) newGMP() pkg.Artifact {
|
||||||
Description: "a collection of binary tools",
|
|
||||||
Website: "https://www.gnu.org/software/binutils/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newGMP() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "6.3.0"
|
version = "6.3.0"
|
||||||
checksum = "yrgbgEDWKDdMWVHh7gPbVl56-sRtVVhfvv0M_LX7xMUUk_mvZ1QOJEAnt7g4i3k5"
|
checksum = "yrgbgEDWKDdMWVHh7gPbVl56-sRtVVhfvv0M_LX7xMUUk_mvZ1QOJEAnt7g4i3k5"
|
||||||
)
|
)
|
||||||
return t.NewPackage("gmp", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("gmp", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://gcc.gnu.org/pub/gcc/infrastructure/"+
|
nil, "https://gcc.gnu.org/pub/gcc/infrastructure/"+
|
||||||
"gmp-"+version+".tar.bz2",
|
"gmp-"+version+".tar.bz2",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarBzip2,
|
pkg.TarBzip2,
|
||||||
), nil, (*MakeHelper)(nil),
|
), &MakeAttr{
|
||||||
M4,
|
ScriptConfigured: `
|
||||||
), version
|
make "-j$(nproc)"
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
t.Load(M4),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[GMP] = Toolchain.newGMP }
|
||||||
artifactsM[GMP] = Metadata{
|
|
||||||
f: Toolchain.newGMP,
|
|
||||||
|
|
||||||
Name: "gmp",
|
func (t Toolchain) newMPFR() pkg.Artifact {
|
||||||
Description: "a free library for arbitrary precision arithmetic",
|
|
||||||
Website: "https://gmplib.org/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newMPFR() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "4.2.2"
|
version = "4.2.2"
|
||||||
checksum = "wN3gx0zfIuCn9r3VAn_9bmfvAYILwrRfgBjYSD1IjLqyLrLojNN5vKyQuTE9kA-B"
|
checksum = "wN3gx0zfIuCn9r3VAn_9bmfvAYILwrRfgBjYSD1IjLqyLrLojNN5vKyQuTE9kA-B"
|
||||||
)
|
)
|
||||||
return t.NewPackage("mpfr", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("mpfr", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://gcc.gnu.org/pub/gcc/infrastructure/"+
|
nil, "https://gcc.gnu.org/pub/gcc/infrastructure/"+
|
||||||
"mpfr-"+version+".tar.bz2",
|
"mpfr-"+version+".tar.bz2",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarBzip2,
|
pkg.TarBzip2,
|
||||||
), nil, (*MakeHelper)(nil),
|
), nil,
|
||||||
GMP,
|
t.Load(GMP),
|
||||||
), version
|
)
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[MPFR] = Toolchain.newMPFR }
|
||||||
artifactsM[MPFR] = Metadata{
|
|
||||||
f: Toolchain.newMPFR,
|
|
||||||
|
|
||||||
Name: "mpfr",
|
func (t Toolchain) newMPC() pkg.Artifact {
|
||||||
Description: "a C library for multiple-precision floating-point computations",
|
|
||||||
Website: "https://www.mpfr.org/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newMPC() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "1.3.1"
|
version = "1.3.1"
|
||||||
checksum = "o8r8K9R4x7PuRx0-JE3-bC5jZQrtxGV2nkB773aqJ3uaxOiBDCID1gKjPaaDxX4V"
|
checksum = "o8r8K9R4x7PuRx0-JE3-bC5jZQrtxGV2nkB773aqJ3uaxOiBDCID1gKjPaaDxX4V"
|
||||||
)
|
)
|
||||||
return t.NewPackage("mpc", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("mpc", version, pkg.NewHTTPGetTar(
|
||||||
nil, "https://gcc.gnu.org/pub/gcc/infrastructure/"+
|
nil, "https://gcc.gnu.org/pub/gcc/infrastructure/"+
|
||||||
"mpc-"+version+".tar.gz",
|
"mpc-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, (*MakeHelper)(nil),
|
), nil,
|
||||||
GMP,
|
t.Load(GMP),
|
||||||
MPFR,
|
t.Load(MPFR),
|
||||||
), version
|
)
|
||||||
}
|
}
|
||||||
func init() {
|
func init() { artifactsF[MPC] = Toolchain.newMPC }
|
||||||
artifactsM[MPC] = Metadata{
|
|
||||||
f: Toolchain.newMPC,
|
|
||||||
|
|
||||||
Name: "mpc",
|
func (t Toolchain) newGCC() pkg.Artifact {
|
||||||
Description: "a C library for the arithmetic of complex numbers",
|
|
||||||
Website: "https://www.multiprecision.org/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Toolchain) newGCC() (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "15.2.0"
|
version = "15.2.0"
|
||||||
checksum = "TXJ5WrbXlGLzy1swghQTr4qxgDCyIZFgJry51XEPTBZ8QYbVmFeB4lZbSMtPJ-a1"
|
checksum = "TXJ5WrbXlGLzy1swghQTr4qxgDCyIZFgJry51XEPTBZ8QYbVmFeB4lZbSMtPJ-a1"
|
||||||
)
|
)
|
||||||
return t.NewPackage("gcc", version, pkg.NewHTTPGetTar(
|
return t.NewViaMake("gcc", version, t.NewPatchedSource(
|
||||||
|
"gcc", version,
|
||||||
|
pkg.NewHTTPGetTar(
|
||||||
nil, "https://ftp.tsukuba.wide.ad.jp/software/gcc/releases/"+
|
nil, "https://ftp.tsukuba.wide.ad.jp/software/gcc/releases/"+
|
||||||
"gcc-"+version+"/gcc-"+version+".tar.gz",
|
"gcc-"+version+"/gcc-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), true, [2]string{"musl-off64_t-loff_t", `diff --git a/libgo/sysinfo.c b/libgo/sysinfo.c
|
||||||
Patches: [][2]string{
|
|
||||||
{"musl-off64_t-loff_t", `diff --git a/libgo/sysinfo.c b/libgo/sysinfo.c
|
|
||||||
index 180f5c31d74..44d7ea73f7d 100644
|
index 180f5c31d74..44d7ea73f7d 100644
|
||||||
--- a/libgo/sysinfo.c
|
--- a/libgo/sysinfo.c
|
||||||
+++ b/libgo/sysinfo.c
|
+++ b/libgo/sysinfo.c
|
||||||
@@ -730,9 +451,7 @@ index 180f5c31d74..44d7ea73f7d 100644
|
|||||||
|
|
||||||
// The following section introduces explicit references to types and
|
// The following section introduces explicit references to types and
|
||||||
// constants of interest to support bootstrapping libgo using a
|
// constants of interest to support bootstrapping libgo using a
|
||||||
`},
|
`}, [2]string{"musl-legacy-lfs", `diff --git a/libgo/go/internal/syscall/unix/at_largefile.go b/libgo/go/internal/syscall/unix/at_largefile.go
|
||||||
|
|
||||||
{"musl-legacy-lfs", `diff --git a/libgo/go/internal/syscall/unix/at_largefile.go b/libgo/go/internal/syscall/unix/at_largefile.go
|
|
||||||
index 82e0dcfd074..16151ecad1b 100644
|
index 82e0dcfd074..16151ecad1b 100644
|
||||||
--- a/libgo/go/internal/syscall/unix/at_largefile.go
|
--- a/libgo/go/internal/syscall/unix/at_largefile.go
|
||||||
+++ b/libgo/go/internal/syscall/unix/at_largefile.go
|
+++ b/libgo/go/internal/syscall/unix/at_largefile.go
|
||||||
@@ -860,18 +579,11 @@ index f84860891e6..7efc9615985 100644
|
|||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
`},
|
`}), &MakeAttr{
|
||||||
},
|
|
||||||
|
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
ln -s system/lib /
|
ln -s system/lib /
|
||||||
ln -s system/lib /work/
|
ln -s system/lib /work/
|
||||||
`,
|
`,
|
||||||
|
|
||||||
// GCC spends most of its time in its many configure scripts, however
|
|
||||||
// it also saturates the CPU for a consequential amount of time.
|
|
||||||
Flag: TExclusive,
|
|
||||||
}, &MakeHelper{
|
|
||||||
Configure: [][2]string{
|
Configure: [][2]string{
|
||||||
{"disable-multilib"},
|
{"disable-multilib"},
|
||||||
{"with-multilib-list", `""`},
|
{"with-multilib-list", `""`},
|
||||||
@@ -892,24 +604,20 @@ ln -s system/lib /work/
|
|||||||
// well in its current state. That does not matter as long as the
|
// well in its current state. That does not matter as long as the
|
||||||
// toolchain it produces passes its own test suite.
|
// toolchain it produces passes its own test suite.
|
||||||
SkipCheck: true,
|
SkipCheck: true,
|
||||||
|
|
||||||
|
// GCC spends most of its time in its many configure scripts, however
|
||||||
|
// it also saturates the CPU for a consequential amount of time.
|
||||||
|
Flag: TExclusive,
|
||||||
},
|
},
|
||||||
Binutils,
|
t.Load(Binutils),
|
||||||
|
|
||||||
GMP,
|
t.Load(GMP),
|
||||||
MPFR,
|
t.Load(MPFR),
|
||||||
MPC,
|
t.Load(MPC),
|
||||||
|
|
||||||
Zlib,
|
t.Load(Zlib),
|
||||||
Libucontext,
|
t.Load(Libucontext),
|
||||||
KernelHeaders,
|
t.Load(KernelHeaders),
|
||||||
), version
|
)
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[gcc] = Metadata{
|
|
||||||
f: Toolchain.newGCC,
|
|
||||||
|
|
||||||
Name: "gcc",
|
|
||||||
Description: "The GNU Compiler Collection",
|
|
||||||
Website: "https://www.gnu.org/software/gcc/",
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
func init() { artifactsF[gcc] = Toolchain.newGCC }
|
||||||
|
|||||||
@@ -62,7 +62,7 @@ ln -s \
|
|||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t Toolchain) newGoLatest() (pkg.Artifact, string) {
|
func (t Toolchain) newGoLatest() pkg.Artifact {
|
||||||
var (
|
var (
|
||||||
bootstrapEnv []string
|
bootstrapEnv []string
|
||||||
bootstrapExtra []pkg.Artifact
|
bootstrapExtra []pkg.Artifact
|
||||||
@@ -143,7 +143,7 @@ sed -i \
|
|||||||
go125 := t.newGo(
|
go125 := t.newGo(
|
||||||
"1.25.7",
|
"1.25.7",
|
||||||
"fyylHdBVRUobnBjYj3NKBaYPUw3kGmo2mEELiZonOYurPfbarNU1x77B99Fjut7Q",
|
"fyylHdBVRUobnBjYj3NKBaYPUw3kGmo2mEELiZonOYurPfbarNU1x77B99Fjut7Q",
|
||||||
[]string{"CGO_ENABLED=0"}, `
|
finalEnv, `
|
||||||
sed -i \
|
sed -i \
|
||||||
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
|
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
|
||||||
cmd/link/internal/`+runtime.GOARCH+`/obj.go
|
cmd/link/internal/`+runtime.GOARCH+`/obj.go
|
||||||
@@ -153,29 +153,6 @@ rm \
|
|||||||
`, go123,
|
`, go123,
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
return go125
|
||||||
version = "1.26.0"
|
|
||||||
checksum = "uHLcrgBc0NMcyTMDLRNAZIcOx0RyQlyekSl9xbWSwj3esEFWJysYLfLa3S8p39Nh"
|
|
||||||
)
|
|
||||||
return t.newGo(
|
|
||||||
version,
|
|
||||||
checksum,
|
|
||||||
finalEnv, `
|
|
||||||
sed -i \
|
|
||||||
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
|
|
||||||
cmd/link/internal/`+runtime.GOARCH+`/obj.go
|
|
||||||
|
|
||||||
rm \
|
|
||||||
os/root_unix_test.go
|
|
||||||
`, go125,
|
|
||||||
), version
|
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[Go] = Metadata{
|
|
||||||
f: Toolchain.newGoLatest,
|
|
||||||
|
|
||||||
Name: "go",
|
|
||||||
Description: "the Go programming language toolchain",
|
|
||||||
Website: "https://go.dev/",
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
func init() { artifactsF[Go] = Toolchain.newGoLatest }
|
||||||
|
|||||||
@@ -1,59 +0,0 @@
|
|||||||
package rosa
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"hakurei.app/container/fhs"
|
|
||||||
"hakurei.app/internal/pkg"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (t Toolchain) newGLib() (pkg.Artifact, string) {
|
|
||||||
const (
|
|
||||||
version = "2.86.4"
|
|
||||||
checksum = "AfTjBrrxtXXPL6dFa1LfTe40PyPSth62CoIkM5m_VJTUngGLOFHw6I4XE7RGQE8G"
|
|
||||||
)
|
|
||||||
return t.NewPackage("glib", version, pkg.NewHTTPGet(
|
|
||||||
nil, "https://download.gnome.org/sources/glib/"+
|
|
||||||
strings.Join(strings.SplitN(version, ".", 3)[:2], ".")+
|
|
||||||
"/glib-"+version+".tar.xz",
|
|
||||||
mustDecode(checksum),
|
|
||||||
), &PackageAttr{
|
|
||||||
SourceKind: sourceTarXZ,
|
|
||||||
|
|
||||||
Paths: []pkg.ExecPath{
|
|
||||||
pkg.Path(fhs.AbsEtc.Append(
|
|
||||||
"machine-id",
|
|
||||||
), false, pkg.NewFile(
|
|
||||||
"glib-machine-id",
|
|
||||||
[]byte("ffffffffffffffffffffffffffffffff\n"),
|
|
||||||
)),
|
|
||||||
pkg.Path(AbsSystem.Append(
|
|
||||||
"var/lib/dbus/machine-id",
|
|
||||||
), false, pkg.NewFile(
|
|
||||||
"glib-machine-id",
|
|
||||||
[]byte("fefefefefefefefefefefefefefefefe\n"),
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
}, &MesonHelper{
|
|
||||||
Setup: [][2]string{
|
|
||||||
{"Ddefault_library", "both"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
XZ,
|
|
||||||
Packaging,
|
|
||||||
Bash,
|
|
||||||
|
|
||||||
PCRE2,
|
|
||||||
Libffi,
|
|
||||||
Zlib,
|
|
||||||
), version
|
|
||||||
}
|
|
||||||
func init() {
|
|
||||||
artifactsM[GLib] = Metadata{
|
|
||||||
f: Toolchain.newGLib,
|
|
||||||
|
|
||||||
Name: "glib",
|
|
||||||
Description: "the GNU library of miscellaneous stuff",
|
|
||||||
Website: "https://gitlab.gnome.org/GNOME/glib/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,9 +1,15 @@
|
|||||||
package rosa
|
package rosa
|
||||||
|
|
||||||
import "hakurei.app/internal/pkg"
|
import (
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
func (t Toolchain) newHakurei(suffix, script string) pkg.Artifact {
|
func (t Toolchain) newHakurei(suffix, script string) pkg.Artifact {
|
||||||
return t.New("hakurei"+suffix+"-"+hakureiVersion, 0, []pkg.Artifact{
|
const (
|
||||||
|
version = "0.3.4"
|
||||||
|
checksum = "wVwSLo75a2OnH5tgxNWXR_YhiOJUFnYM_9-sJtxAEOKhcPE0BJafs6PU8o5JzyCT"
|
||||||
|
)
|
||||||
|
return t.New("hakurei"+suffix+"-"+version, 0, []pkg.Artifact{
|
||||||
t.Load(Go),
|
t.Load(Go),
|
||||||
|
|
||||||
t.Load(Gzip),
|
t.Load(Gzip),
|
||||||
@@ -37,10 +43,214 @@ echo
|
|||||||
chmod -R +w /usr/src/hakurei
|
chmod -R +w /usr/src/hakurei
|
||||||
cd /usr/src/hakurei
|
cd /usr/src/hakurei
|
||||||
|
|
||||||
HAKUREI_VERSION='v`+hakureiVersion+`'
|
HAKUREI_VERSION='v`+version+`'
|
||||||
`+script, pkg.Path(AbsUsrSrc.Append("hakurei"), true, t.NewPatchedSource(
|
`+script, pkg.Path(AbsUsrSrc.Append("hakurei"), true, t.NewPatchedSource("hakurei", version, pkg.NewHTTPGetTar(
|
||||||
"hakurei", hakureiVersion, hakureiSource, true, hakureiPatches...,
|
nil, "https://git.gensokyo.uk/security/hakurei/archive/"+
|
||||||
)), pkg.Path(AbsUsrSrc.Append("hostname", "main.go"), false, pkg.NewFile(
|
"v"+version+".tar.gz",
|
||||||
|
mustDecode(checksum),
|
||||||
|
pkg.TarGzip,
|
||||||
|
), true, [2]string{"dist-00-tests", `From 67e453f5c4de915de23ecbe5980e595758f0f2fb Mon Sep 17 00:00:00 2001
|
||||||
|
From: Ophestra <cat@gensokyo.uk>
|
||||||
|
Date: Tue, 27 Jan 2026 06:49:48 +0900
|
||||||
|
Subject: [PATCH] dist: run tests
|
||||||
|
|
||||||
|
This used to be impossible due to nix jank which has been addressed.
|
||||||
|
|
||||||
|
Signed-off-by: Ophestra <cat@gensokyo.uk>
|
||||||
|
---
|
||||||
|
dist/release.sh | 21 ++++++++++++++++-----
|
||||||
|
flake.nix | 32 ++++++++++++++++++++------------
|
||||||
|
internal/acl/acl_test.go | 2 +-
|
||||||
|
package.nix | 2 +-
|
||||||
|
4 files changed, 38 insertions(+), 19 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/dist/release.sh b/dist/release.sh
|
||||||
|
index 4dcb278..0ba9104 100755
|
||||||
|
--- a/dist/release.sh
|
||||||
|
+++ b/dist/release.sh
|
||||||
|
@@ -2,19 +2,30 @@
|
||||||
|
cd "$(dirname -- "$0")/.."
|
||||||
|
VERSION="${HAKUREI_VERSION:-untagged}"
|
||||||
|
pname="hakurei-${VERSION}"
|
||||||
|
-out="dist/${pname}"
|
||||||
|
+out="${DESTDIR:-dist}/${pname}"
|
||||||
|
|
||||||
|
+echo '# Preparing distribution files.'
|
||||||
|
mkdir -p "${out}"
|
||||||
|
cp -v "README.md" "dist/hsurc.default" "dist/install.sh" "${out}"
|
||||||
|
cp -rv "dist/comp" "${out}"
|
||||||
|
+echo
|
||||||
|
|
||||||
|
+echo '# Building hakurei.'
|
||||||
|
go generate ./...
|
||||||
|
-go build -trimpath -v -o "${out}/bin/" -ldflags "-s -w -buildid= -extldflags '-static'
|
||||||
|
+go build -trimpath -v -o "${out}/bin/" -ldflags "-s -w
|
||||||
|
+ -buildid= -extldflags '-static'
|
||||||
|
-X hakurei.app/internal/info.buildVersion=${VERSION}
|
||||||
|
-X hakurei.app/internal/info.hakureiPath=/usr/bin/hakurei
|
||||||
|
-X hakurei.app/internal/info.hsuPath=/usr/bin/hsu
|
||||||
|
-X main.hakureiPath=/usr/bin/hakurei" ./...
|
||||||
|
+echo
|
||||||
|
|
||||||
|
-rm -f "./${out}.tar.gz" && tar -C dist -czf "${out}.tar.gz" "${pname}"
|
||||||
|
-rm -rf "./${out}"
|
||||||
|
-(cd dist && sha512sum "${pname}.tar.gz" > "${pname}.tar.gz.sha512")
|
||||||
|
+echo '# Testing hakurei.'
|
||||||
|
+go test -ldflags='-buildid= -extldflags=-static' ./...
|
||||||
|
+echo
|
||||||
|
+
|
||||||
|
+echo '# Creating distribution.'
|
||||||
|
+rm -f "${out}.tar.gz" && tar -C "${out}/.." -vczf "${out}.tar.gz" "${pname}"
|
||||||
|
+rm -rf "${out}"
|
||||||
|
+(cd "${out}/.." && sha512sum "${pname}.tar.gz" > "${pname}.tar.gz.sha512")
|
||||||
|
+echo
|
||||||
|
diff --git a/flake.nix b/flake.nix
|
||||||
|
index 9e09c61..2340b92 100644
|
||||||
|
--- a/flake.nix
|
||||||
|
+++ b/flake.nix
|
||||||
|
@@ -143,19 +143,27 @@
|
||||||
|
"bin/mount.fuse.sharefs" = "${hakurei}/libexec/sharefs";
|
||||||
|
};
|
||||||
|
|
||||||
|
- dist = pkgs.runCommand "${hakurei.name}-dist" { buildInputs = hakurei.targetPkgs ++ [ pkgs.pkgsStatic.musl ]; } ''
|
||||||
|
- # go requires XDG_CACHE_HOME for the build cache
|
||||||
|
- export XDG_CACHE_HOME="$(mktemp -d)"
|
||||||
|
+ dist =
|
||||||
|
+ pkgs.runCommand "${hakurei.name}-dist"
|
||||||
|
+ {
|
||||||
|
+ buildInputs = hakurei.targetPkgs ++ [
|
||||||
|
+ pkgs.pkgsStatic.musl
|
||||||
|
+ ];
|
||||||
|
+ }
|
||||||
|
+ ''
|
||||||
|
+ cd $(mktemp -d) \
|
||||||
|
+ && cp -r ${hakurei.src}/. . \
|
||||||
|
+ && chmod +w cmd && cp -r ${hsu.src}/. cmd/hsu/ \
|
||||||
|
+ && chmod -R +w .
|
||||||
|
|
||||||
|
- # get a different workdir as go does not like /build
|
||||||
|
- cd $(mktemp -d) \
|
||||||
|
- && cp -r ${hakurei.src}/. . \
|
||||||
|
- && chmod +w cmd && cp -r ${hsu.src}/. cmd/hsu/ \
|
||||||
|
- && chmod -R +w .
|
||||||
|
-
|
||||||
|
- export HAKUREI_VERSION="v${hakurei.version}"
|
||||||
|
- CC="clang -O3 -Werror" ./dist/release.sh && mkdir $out && cp -v "dist/hakurei-$HAKUREI_VERSION.tar.gz"* $out
|
||||||
|
- '';
|
||||||
|
+ CC="musl-clang -O3 -Werror -Qunused-arguments" \
|
||||||
|
+ GOCACHE="$(mktemp -d)" \
|
||||||
|
+ HAKUREI_TEST_SKIP_ACL=1 \
|
||||||
|
+ PATH="${pkgs.pkgsStatic.musl.bin}/bin:$PATH" \
|
||||||
|
+ DESTDIR="$out" \
|
||||||
|
+ HAKUREI_VERSION="v${hakurei.version}" \
|
||||||
|
+ ./dist/release.sh
|
||||||
|
+ '';
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
diff --git a/internal/acl/acl_test.go b/internal/acl/acl_test.go
|
||||||
|
index af6da55..19ce45a 100644
|
||||||
|
--- a/internal/acl/acl_test.go
|
||||||
|
+++ b/internal/acl/acl_test.go
|
||||||
|
@@ -24,7 +24,7 @@ var (
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestUpdate(t *testing.T) {
|
||||||
|
- if os.Getenv("GO_TEST_SKIP_ACL") == "1" {
|
||||||
|
+ if os.Getenv("HAKUREI_TEST_SKIP_ACL") == "1" {
|
||||||
|
t.Skip("acl test skipped")
|
||||||
|
}
|
||||||
|
|
||||||
|
diff --git a/package.nix b/package.nix
|
||||||
|
index 00c4401..2eaa2ec 100644
|
||||||
|
--- a/package.nix
|
||||||
|
+++ b/package.nix
|
||||||
|
@@ -89,7 +89,7 @@ buildGoModule rec {
|
||||||
|
CC = "clang -O3 -Werror";
|
||||||
|
|
||||||
|
# nix build environment does not allow acls
|
||||||
|
- GO_TEST_SKIP_ACL = 1;
|
||||||
|
+ HAKUREI_TEST_SKIP_ACL = 1;
|
||||||
|
};
|
||||||
|
|
||||||
|
buildInputs = [`}, [2]string{"container-tests", `From bf14a412e47344fff2681f4b24d1ecc7415bfcb0 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Ophestra <cat@gensokyo.uk>
|
||||||
|
Date: Sat, 31 Jan 2026 10:59:56 +0900
|
||||||
|
Subject: [PATCH] container: fix host-dependent test cases
|
||||||
|
|
||||||
|
These are not fully controlled by hakurei and may change depending on host configuration.
|
||||||
|
|
||||||
|
Signed-off-by: Ophestra <cat@gensokyo.uk>
|
||||||
|
---
|
||||||
|
container/container_test.go | 27 +++++++++++++++------------
|
||||||
|
1 file changed, 15 insertions(+), 12 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/container/container_test.go b/container/container_test.go
|
||||||
|
index d737a18..98713cb 100644
|
||||||
|
--- a/container/container_test.go
|
||||||
|
+++ b/container/container_test.go
|
||||||
|
@@ -275,12 +275,12 @@ var containerTestCases = []struct {
|
||||||
|
),
|
||||||
|
earlyMnt(
|
||||||
|
ent("/", "/dev", "ro,nosuid,nodev,relatime", "tmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/null", "/dev/null", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/zero", "/dev/zero", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/full", "/dev/full", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/random", "/dev/random", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/urandom", "/dev/urandom", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/tty", "/dev/tty", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/null", "/dev/null", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/zero", "/dev/zero", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/full", "/dev/full", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/random", "/dev/random", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/urandom", "/dev/urandom", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/tty", "/dev/tty", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
ent("/", "/dev/pts", "rw,nosuid,noexec,relatime", "devpts", "devpts", "rw,mode=620,ptmxmode=666"),
|
||||||
|
ent("/", "/dev/mqueue", "rw,nosuid,nodev,noexec,relatime", "mqueue", "mqueue", "rw"),
|
||||||
|
ent("/", "/dev/shm", "rw,nosuid,nodev,relatime", "tmpfs", "tmpfs", ignore),
|
||||||
|
@@ -293,12 +293,12 @@ var containerTestCases = []struct {
|
||||||
|
),
|
||||||
|
earlyMnt(
|
||||||
|
ent("/", "/dev", "ro,nosuid,nodev,relatime", "tmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/null", "/dev/null", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/zero", "/dev/zero", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/full", "/dev/full", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/random", "/dev/random", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/urandom", "/dev/urandom", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
- ent("/tty", "/dev/tty", "rw,nosuid", "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/null", "/dev/null", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/zero", "/dev/zero", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/full", "/dev/full", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/random", "/dev/random", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/urandom", "/dev/urandom", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
+ ent("/tty", "/dev/tty", ignore, "devtmpfs", "devtmpfs", ignore),
|
||||||
|
ent("/", "/dev/pts", "rw,nosuid,noexec,relatime", "devpts", "devpts", "rw,mode=620,ptmxmode=666"),
|
||||||
|
ent("/", "/dev/shm", "rw,nosuid,nodev,relatime", "tmpfs", "tmpfs", ignore),
|
||||||
|
),
|
||||||
|
@@ -696,6 +696,9 @@ func init() {
|
||||||
|
mnt[i].VfsOptstr = strings.TrimSuffix(mnt[i].VfsOptstr, ",relatime")
|
||||||
|
mnt[i].VfsOptstr = strings.TrimSuffix(mnt[i].VfsOptstr, ",noatime")
|
||||||
|
|
||||||
|
+ cur.FsOptstr = strings.Replace(cur.FsOptstr, ",seclabel", "", 1)
|
||||||
|
+ mnt[i].FsOptstr = strings.Replace(mnt[i].FsOptstr, ",seclabel", "", 1)
|
||||||
|
+
|
||||||
|
if !cur.EqualWithIgnore(mnt[i], "\x00") {
|
||||||
|
fail = true
|
||||||
|
log.Printf("[FAIL] %s", cur)`}, [2]string{"dist-01-tarball-name", `diff --git a/dist/release.sh b/dist/release.sh
|
||||||
|
index 0ba9104..2990ee1 100755
|
||||||
|
--- a/dist/release.sh
|
||||||
|
+++ b/dist/release.sh
|
||||||
|
@@ -1,7 +1,7 @@
|
||||||
|
#!/bin/sh -e
|
||||||
|
cd "$(dirname -- "$0")/.."
|
||||||
|
VERSION="${HAKUREI_VERSION:-untagged}"
|
||||||
|
-pname="hakurei-${VERSION}"
|
||||||
|
+pname="hakurei-${VERSION}-$(go env GOARCH)"
|
||||||
|
out="${DESTDIR:-dist}/${pname}"
|
||||||
|
|
||||||
|
echo '# Preparing distribution files.'
|
||||||
|
`}),
|
||||||
|
), pkg.Path(AbsUsrSrc.Append("hostname", "main.go"), false, pkg.NewFile(
|
||||||
"hostname.go",
|
"hostname.go",
|
||||||
[]byte(`
|
[]byte(`
|
||||||
package main
|
package main
|
||||||
@@ -58,8 +268,7 @@ func main() {
|
|||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
func init() {
|
func init() {
|
||||||
artifactsM[Hakurei] = Metadata{
|
artifactsF[Hakurei] = func(t Toolchain) pkg.Artifact {
|
||||||
f: func(t Toolchain) (pkg.Artifact, string) {
|
|
||||||
return t.newHakurei("", `
|
return t.newHakurei("", `
|
||||||
mkdir -p /work/system/libexec/hakurei/
|
mkdir -p /work/system/libexec/hakurei/
|
||||||
|
|
||||||
@@ -67,7 +276,6 @@ echo '# Building hakurei.'
|
|||||||
go generate -v ./...
|
go generate -v ./...
|
||||||
go build -trimpath -v -o /work/system/libexec/hakurei -ldflags="-s -w
|
go build -trimpath -v -o /work/system/libexec/hakurei -ldflags="-s -w
|
||||||
-buildid=
|
-buildid=
|
||||||
-linkmode external
|
|
||||||
-extldflags=-static
|
-extldflags=-static
|
||||||
-X hakurei.app/internal/info.buildVersion="$HAKUREI_VERSION"
|
-X hakurei.app/internal/info.buildVersion="$HAKUREI_VERSION"
|
||||||
-X hakurei.app/internal/info.hakureiPath=/system/bin/hakurei
|
-X hakurei.app/internal/info.hakureiPath=/system/bin/hakurei
|
||||||
@@ -76,7 +284,7 @@ go build -trimpath -v -o /work/system/libexec/hakurei -ldflags="-s -w
|
|||||||
echo
|
echo
|
||||||
|
|
||||||
echo '# Testing hakurei.'
|
echo '# Testing hakurei.'
|
||||||
go test -ldflags='-buildid= -linkmode external -extldflags=-static' ./...
|
go test -ldflags='-buildid= -extldflags=-static' ./...
|
||||||
echo
|
echo
|
||||||
|
|
||||||
mkdir -p /work/system/bin/
|
mkdir -p /work/system/bin/
|
||||||
@@ -84,23 +292,12 @@ mkdir -p /work/system/bin/
|
|||||||
hakurei \
|
hakurei \
|
||||||
sharefs \
|
sharefs \
|
||||||
../../bin/)
|
../../bin/)
|
||||||
`), hakureiVersion
|
`)
|
||||||
},
|
|
||||||
|
|
||||||
Name: "hakurei",
|
|
||||||
Description: "low-level userspace tooling for Rosa OS",
|
|
||||||
Website: "https://hakurei.app/",
|
|
||||||
}
|
}
|
||||||
artifactsM[HakureiDist] = Metadata{
|
artifactsF[HakureiDist] = func(t Toolchain) pkg.Artifact {
|
||||||
f: func(t Toolchain) (pkg.Artifact, string) {
|
|
||||||
return t.newHakurei("-dist", `
|
return t.newHakurei("-dist", `
|
||||||
export HAKUREI_VERSION
|
export HAKUREI_VERSION
|
||||||
DESTDIR=/work /usr/src/hakurei/dist/release.sh
|
DESTDIR=/work /usr/src/hakurei/dist/release.sh
|
||||||
`), hakureiVersion
|
`)
|
||||||
},
|
|
||||||
|
|
||||||
Name: "hakurei-dist",
|
|
||||||
Description: "low-level userspace tooling for Rosa OS (distribution tarball)",
|
|
||||||
Website: "https://hakurei.app/",
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
//go:build current
|
|
||||||
|
|
||||||
package rosa
|
|
||||||
|
|
||||||
import (
|
|
||||||
_ "embed"
|
|
||||||
|
|
||||||
"hakurei.app/internal/pkg"
|
|
||||||
)
|
|
||||||
|
|
||||||
const hakureiVersion = "1.0-current"
|
|
||||||
|
|
||||||
// hakureiSourceTarball is a compressed tarball of the hakurei source code.
|
|
||||||
//
|
|
||||||
//go:generate tar -zc -C ../.. --exclude .git --exclude *.tar.gz -f hakurei_current.tar.gz .
|
|
||||||
//go:embed hakurei_current.tar.gz
|
|
||||||
var hakureiSourceTarball []byte
|
|
||||||
|
|
||||||
// hakureiSource is the source code at the time this package is compiled.
|
|
||||||
var hakureiSource = pkg.NewTar(pkg.NewFile(
|
|
||||||
"hakurei-current.tar.gz",
|
|
||||||
hakureiSourceTarball,
|
|
||||||
), pkg.TarGzip)
|
|
||||||
|
|
||||||
// hakureiPatches are patches applied against the compile-time source tree.
|
|
||||||
var hakureiPatches [][2]string
|
|
||||||
@@ -1,51 +0,0 @@
|
|||||||
//go:build !current
|
|
||||||
|
|
||||||
package rosa
|
|
||||||
|
|
||||||
import "hakurei.app/internal/pkg"
|
|
||||||
|
|
||||||
const hakureiVersion = "0.3.5"
|
|
||||||
|
|
||||||
// hakureiSource is the source code of a hakurei release.
|
|
||||||
var hakureiSource = pkg.NewHTTPGetTar(
|
|
||||||
nil, "https://git.gensokyo.uk/security/hakurei/archive/"+
|
|
||||||
"v"+hakureiVersion+".tar.gz",
|
|
||||||
mustDecode("6Tn38NLezRD2d3aGdFg5qFfqn8_KvC6HwMKwJMPvaHmVw8xRgxn8B0PObswl2mOk"),
|
|
||||||
pkg.TarGzip,
|
|
||||||
)
|
|
||||||
|
|
||||||
// hakureiPatches are patches applied against a hakurei release.
|
|
||||||
var hakureiPatches = [][2]string{
|
|
||||||
{"createTemp-error-injection", `diff --git a/container/dispatcher_test.go b/container/dispatcher_test.go
|
|
||||||
index 5de37fc..fe0c4db 100644
|
|
||||||
--- a/container/dispatcher_test.go
|
|
||||||
+++ b/container/dispatcher_test.go
|
|
||||||
@@ -238,8 +238,11 @@ func sliceAddr[S any](s []S) *[]S { return &s }
|
|
||||||
|
|
||||||
func newCheckedFile(t *testing.T, name, wantData string, closeErr error) osFile {
|
|
||||||
f := &checkedOsFile{t: t, name: name, want: wantData, closeErr: closeErr}
|
|
||||||
- // check happens in Close, and cleanup is not guaranteed to run, so relying on it for sloppy implementations will cause sporadic test results
|
|
||||||
- f.cleanup = runtime.AddCleanup(f, func(name string) { f.t.Fatalf("checkedOsFile %s became unreachable without a call to Close", name) }, f.name)
|
|
||||||
+ // check happens in Close, and cleanup is not guaranteed to run, so relying
|
|
||||||
+ // on it for sloppy implementations will cause sporadic test results
|
|
||||||
+ f.cleanup = runtime.AddCleanup(f, func(name string) {
|
|
||||||
+ panic("checkedOsFile " + name + " became unreachable without a call to Close")
|
|
||||||
+ }, name)
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
diff --git a/container/initplace_test.go b/container/initplace_test.go
|
|
||||||
index afeddbe..1c2f20b 100644
|
|
||||||
--- a/container/initplace_test.go
|
|
||||||
+++ b/container/initplace_test.go
|
|
||||||
@@ -21,7 +21,7 @@ func TestTmpfileOp(t *testing.T) {
|
|
||||||
Path: samplePath,
|
|
||||||
Data: sampleData,
|
|
||||||
}, nil, nil, []stub.Call{
|
|
||||||
- call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, newCheckedFile(t, "tmp.32768", sampleDataString, nil), stub.UniqueError(5)),
|
|
||||||
+ call("createTemp", stub.ExpectArgs{"/", "tmp.*"}, (*checkedOsFile)(nil), stub.UniqueError(5)),
|
|
||||||
}, stub.UniqueError(5)},
|
|
||||||
|
|
||||||
{"Write", &Params{ParentPerm: 0700}, &TmpfileOp{
|
|
||||||
`},
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user