Compare commits
173 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
337bf20f50
|
|||
|
1cb792cf6e
|
|||
|
b2b40b07e8
|
|||
|
da11b26ec1
|
|||
|
024489e800
|
|||
|
0f795712b0
|
|||
|
7e2210ff71
|
|||
|
a71a008f3c
|
|||
|
162265b47e
|
|||
|
3fa7ac04e4
|
|||
|
bf2867d653
|
|||
|
ec0f0f6507
|
|||
|
a77a802955
|
|||
|
4407e14dfc
|
|||
|
e024d3184a
|
|||
|
8e1bf00c2d
|
|||
|
b111e22050
|
|||
|
1fa458c0be
|
|||
|
2c7ae67a67
|
|||
|
3826621b21
|
|||
|
041b505c2e
|
|||
|
e6debce649
|
|||
|
aa26b86fce
|
|||
|
a57a8fd5d8
|
|||
|
1d5d063d6a
|
|||
|
e61628a34e
|
|||
|
5a18f14929
|
|||
|
f12880688d
|
|||
|
bb5bbfe16a
|
|||
|
427e1ca37c
|
|||
|
96fdd9ecc5
|
|||
|
02771b655b
|
|||
|
d1c8d2c39b
|
|||
|
0efd742e8a
|
|||
|
ae1fe638d5
|
|||
|
445d95023b
|
|||
|
fc66f0bb47
|
|||
|
2cd6b35bee
|
|||
|
09a216c6ec
|
|||
|
44d17325c2
|
|||
|
544ce77cbc
|
|||
|
63c3c30b23
|
|||
|
d23c4ecc7c
|
|||
|
a46656dff8
|
|||
|
77db153ff5
|
|||
|
520d95bc07
|
|||
|
451df3f4e7
|
|||
|
011fac15ed
|
|||
|
347682ad0b
|
|||
|
1a2b979add
|
|||
|
b1c90cc380
|
|||
|
3a66b8143a
|
|||
|
64bbd3aabd
|
|||
|
08799a13d0
|
|||
|
1aef9c3bbb
|
|||
|
1f38303747
|
|||
|
640777b00c
|
|||
|
1d657193cf
|
|||
|
bab5406295
|
|||
|
725ae7d64d
|
|||
|
37a0c3967e
|
|||
|
ea0692548f
|
|||
|
48ea23e648
|
|||
|
40320e4920
|
|||
|
3ca0f61632
|
|||
|
6ffaac96e3
|
|||
|
13c7713d0c
|
|||
|
42389f7ec5
|
|||
|
30f130c691
|
|||
|
ceb4d26087
|
|||
|
852f3a9b3d
|
|||
|
5e02dbdb0d
|
|||
|
6a3248d472
|
|||
|
67404c98d9
|
|||
|
b9bf69cfce
|
|||
|
4648f98272
|
|||
|
11d99439ac
|
|||
|
39e4c5b8ac
|
|||
|
e8f6db38b6
|
|||
|
20d5b71575
|
|||
|
e903e7f542
|
|||
|
1caa051f4d
|
|||
|
dcdc6f7f6d
|
|||
|
5ad6f26b46
|
|||
|
7ba75a79f4
|
|||
|
9ef84d3904
|
|||
|
3b7b6e51fb
|
|||
|
b1b4debb82
|
|||
|
021cbbc2a8
|
|||
|
a4a54a4a4d
|
|||
|
04a344aac6
|
|||
|
6b98156a3d
|
|||
|
753432cf09
|
|||
|
f8902e3679
|
|||
|
8ee53a5164
|
|||
|
3981d44757
|
|||
|
9fd67e47b4
|
|||
|
4dcec40156
|
|||
|
9a274c78a3
|
|||
|
5647c3a91f
|
|||
|
992139c75d
|
|||
|
57c69b533e
|
|||
|
6f0c2a80f2
|
|||
|
08dfefb28d
|
|||
|
b081629662
|
|||
|
fba541f301
|
|||
|
5f0da3d5c2
|
|||
|
4d5841dd62
|
|||
|
9e752b588a
|
|||
|
27b1aaae38
|
|||
|
9e18de1dc2
|
|||
|
b80ea91a42
|
|||
|
30a9dfa4b8
|
|||
|
8d657b6fdf
|
|||
|
ae9b9adfd2
|
|||
|
dd6a480a21
|
|||
|
3942272c30
|
|||
|
9036986156
|
|||
|
a394971dd7
|
|||
|
9daba60809
|
|||
|
bcd79a22ff
|
|||
|
0ff7ab915b
|
|||
|
823575acac
|
|||
|
136bc0917b
|
|||
|
d6b082dd0b
|
|||
|
89d6d9576b
|
|||
|
fafce04a5d
|
|||
|
5d760a1db9
|
|||
|
d197e40b2a
|
|||
|
2008902247
|
|||
|
30ac985fd2
|
|||
|
e9fec368f8
|
|||
|
46add42f58
|
|||
|
377b61e342
|
|||
|
520c36db6d
|
|||
|
3352bb975b
|
|||
|
f7f48d57e9
|
|||
|
5c2345128e
|
|||
|
78f9676b1f
|
|||
|
5b5b676132
|
|||
|
78383fb6e8
|
|||
|
e97f6a393f
|
|||
|
eeffefd22b
|
|||
|
ac825640ab
|
|||
|
a7f7ce1795
|
|||
|
38c639e35c
|
|||
|
b2cb13e94c
|
|||
|
46f98d12d6
|
|||
|
503c7f953c
|
|||
|
15c9f6545d
|
|||
|
83b0e32c55
|
|||
|
eeaf26e7a2
|
|||
|
b587caf2e8
|
|||
|
f1c2ca4928
|
|||
|
0ca301219f
|
|||
|
e2199e1276
|
|||
|
86eacb3208
|
|||
|
8541bdd858
|
|||
|
46be0b0dc8
|
|||
|
cbe37e87e7
|
|||
|
66d741fb07
|
|||
|
0d449011f6
|
|||
|
46428ed85d
|
|||
|
081d6b463c
|
|||
|
11b3171180
|
|||
|
adbb84c3dd
|
|||
|
1084e31d95
|
|||
|
27a1b8fe0a
|
|||
|
b2141a41d7
|
|||
|
c0dff5bc87
|
|||
|
04513c0510
|
|||
|
28ebf973d6
|
|||
|
41aeb404ec
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -7,6 +7,7 @@
|
|||||||
|
|
||||||
# go generate
|
# go generate
|
||||||
/cmd/hakurei/LICENSE
|
/cmd/hakurei/LICENSE
|
||||||
|
/cmd/mbf/internal/pkgserver/ui/static
|
||||||
/internal/pkg/testdata/testtool
|
/internal/pkg/testdata/testtool
|
||||||
/internal/rosa/hakurei_current.tar.gz
|
/internal/rosa/hakurei_current.tar.gz
|
||||||
|
|
||||||
|
|||||||
8
all.sh
8
all.sh
@@ -2,5 +2,9 @@
|
|||||||
|
|
||||||
TOOLCHAIN_VERSION="$(go version)"
|
TOOLCHAIN_VERSION="$(go version)"
|
||||||
cd "$(dirname -- "$0")/"
|
cd "$(dirname -- "$0")/"
|
||||||
echo "# Building cmd/dist using ${TOOLCHAIN_VERSION}."
|
echo "Building cmd/dist using ${TOOLCHAIN_VERSION}."
|
||||||
go run -v --tags=dist ./cmd/dist
|
FLAGS=''
|
||||||
|
if test -n "$VERBOSE"; then
|
||||||
|
FLAGS="$FLAGS -v"
|
||||||
|
fi
|
||||||
|
go run $FLAGS --tags=dist ./cmd/dist
|
||||||
|
|||||||
@@ -4,15 +4,23 @@ import "strings"
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// SpecialOverlayEscape is the escape string for overlay mount options.
|
// SpecialOverlayEscape is the escape string for overlay mount options.
|
||||||
|
//
|
||||||
|
// Deprecated: This is no longer used and will be removed in 0.5.
|
||||||
SpecialOverlayEscape = `\`
|
SpecialOverlayEscape = `\`
|
||||||
// SpecialOverlayOption is the separator string between overlay mount options.
|
// SpecialOverlayOption is the separator string between overlay mount options.
|
||||||
|
//
|
||||||
|
// Deprecated: This is no longer used and will be removed in 0.5.
|
||||||
SpecialOverlayOption = ","
|
SpecialOverlayOption = ","
|
||||||
// SpecialOverlayPath is the separator string between overlay paths.
|
// SpecialOverlayPath is the separator string between overlay paths.
|
||||||
|
//
|
||||||
|
// Deprecated: This is no longer used and will be removed in 0.5.
|
||||||
SpecialOverlayPath = ":"
|
SpecialOverlayPath = ":"
|
||||||
)
|
)
|
||||||
|
|
||||||
// EscapeOverlayDataSegment escapes a string for formatting into the data
|
// EscapeOverlayDataSegment escapes a string for formatting into the data
|
||||||
// argument of an overlay mount system call.
|
// argument of an overlay mount system call.
|
||||||
|
//
|
||||||
|
// Deprecated: This is no longer used and will be removed in 0.5.
|
||||||
func EscapeOverlayDataSegment(s string) string {
|
func EscapeOverlayDataSegment(s string) string {
|
||||||
if s == "" {
|
if s == "" {
|
||||||
return ""
|
return ""
|
||||||
|
|||||||
27
cmd/dist/main.go
vendored
27
cmd/dist/main.go
vendored
@@ -42,14 +42,18 @@ func mustRun(ctx context.Context, name string, arg ...string) {
|
|||||||
var comp []byte
|
var comp []byte
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
fmt.Println()
|
|
||||||
log.SetFlags(0)
|
log.SetFlags(0)
|
||||||
log.SetPrefix("# ")
|
log.SetPrefix("")
|
||||||
|
|
||||||
|
verbose := os.Getenv("VERBOSE") != ""
|
||||||
version := getenv("HAKUREI_VERSION", "untagged")
|
version := getenv("HAKUREI_VERSION", "untagged")
|
||||||
prefix := getenv("PREFIX", "/usr")
|
prefix := getenv("PREFIX", "/usr")
|
||||||
destdir := getenv("DESTDIR", "dist")
|
destdir := getenv("DESTDIR", "dist")
|
||||||
|
|
||||||
|
if verbose {
|
||||||
|
log.Println()
|
||||||
|
}
|
||||||
|
|
||||||
if err := os.MkdirAll(destdir, 0755); err != nil {
|
if err := os.MkdirAll(destdir, 0755); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -76,12 +80,17 @@ func main() {
|
|||||||
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
|
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
log.Println("Building hakurei.")
|
verboseFlag := "-v"
|
||||||
|
if !verbose {
|
||||||
|
verboseFlag = "-buildvcs=false"
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Building hakurei for %s/%s.", runtime.GOOS, runtime.GOARCH)
|
||||||
mustRun(ctx, "go", "generate", "./...")
|
mustRun(ctx, "go", "generate", "./...")
|
||||||
mustRun(
|
mustRun(
|
||||||
ctx, "go", "build",
|
ctx, "go", "build",
|
||||||
"-trimpath",
|
"-trimpath",
|
||||||
"-v", "-o", s,
|
verboseFlag, "-o", s,
|
||||||
"-ldflags=-s -w "+
|
"-ldflags=-s -w "+
|
||||||
"-buildid= -linkmode external -extldflags=-static "+
|
"-buildid= -linkmode external -extldflags=-static "+
|
||||||
"-X hakurei.app/internal/info.buildVersion="+version+" "+
|
"-X hakurei.app/internal/info.buildVersion="+version+" "+
|
||||||
@@ -90,17 +99,17 @@ func main() {
|
|||||||
"-X main.hakureiPath="+prefix+"/bin/hakurei",
|
"-X main.hakureiPath="+prefix+"/bin/hakurei",
|
||||||
"./...",
|
"./...",
|
||||||
)
|
)
|
||||||
fmt.Println()
|
log.Println()
|
||||||
|
|
||||||
log.Println("Testing Hakurei.")
|
log.Println("##### Testing Hakurei.")
|
||||||
mustRun(
|
mustRun(
|
||||||
ctx, "go", "test",
|
ctx, "go", "test",
|
||||||
"-ldflags=-buildid= -linkmode external -extldflags=-static",
|
"-ldflags=-buildid= -linkmode external -extldflags=-static",
|
||||||
"./...",
|
"./...",
|
||||||
)
|
)
|
||||||
fmt.Println()
|
log.Println()
|
||||||
|
|
||||||
log.Println("Creating distribution.")
|
log.Println("##### Creating distribution.")
|
||||||
const suffix = ".tar.gz"
|
const suffix = ".tar.gz"
|
||||||
distName := "hakurei-" + version + "-" + runtime.GOARCH
|
distName := "hakurei-" + version + "-" + runtime.GOARCH
|
||||||
var f *os.File
|
var f *os.File
|
||||||
@@ -121,7 +130,7 @@ func main() {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
h := sha512.New()
|
h := sha512.New()
|
||||||
gw := gzip.NewWriter(io.MultiWriter(f, h))
|
gw, _ := gzip.NewWriterLevel(io.MultiWriter(f, h), gzip.BestCompression)
|
||||||
tw := tar.NewWriter(gw)
|
tw := tar.NewWriter(gw)
|
||||||
|
|
||||||
mustWriteHeader := func(name string, size int64, mode os.FileMode) {
|
mustWriteHeader := func(name string, size int64, mode os.FileMode) {
|
||||||
|
|||||||
91
cmd/mbf/cache.go
Normal file
91
cmd/mbf/cache.go
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"hakurei.app/check"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
// cache refers to an instance of [pkg.Cache] that might be open.
|
||||||
|
type cache struct {
|
||||||
|
ctx context.Context
|
||||||
|
msg message.Msg
|
||||||
|
|
||||||
|
// Should generally not be used directly.
|
||||||
|
c *pkg.Cache
|
||||||
|
|
||||||
|
cures, jobs int
|
||||||
|
hostAbstract, idle bool
|
||||||
|
|
||||||
|
base string
|
||||||
|
}
|
||||||
|
|
||||||
|
// open opens the underlying [pkg.Cache].
|
||||||
|
func (cache *cache) open() (err error) {
|
||||||
|
if cache.c != nil {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
var base *check.Absolute
|
||||||
|
if cache.base, err = filepath.Abs(cache.base); err != nil {
|
||||||
|
return
|
||||||
|
} else if base, err = check.NewAbs(cache.base); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var flags int
|
||||||
|
if cache.idle {
|
||||||
|
flags |= pkg.CSchedIdle
|
||||||
|
}
|
||||||
|
if cache.hostAbstract {
|
||||||
|
flags |= pkg.CHostAbstract
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer close(done)
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-cache.ctx.Done():
|
||||||
|
if testing.Testing() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
os.Exit(2)
|
||||||
|
|
||||||
|
case <-done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
cache.msg.Verbosef("opening cache at %s", base)
|
||||||
|
cache.c, err = pkg.Open(
|
||||||
|
cache.ctx,
|
||||||
|
cache.msg,
|
||||||
|
flags,
|
||||||
|
cache.cures,
|
||||||
|
cache.jobs,
|
||||||
|
base,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the underlying [pkg.Cache] if it is open.
|
||||||
|
func (cache *cache) Close() {
|
||||||
|
if cache.c != nil {
|
||||||
|
cache.c.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do calls f on the underlying cache and returns its error value.
|
||||||
|
func (cache *cache) Do(f func(cache *pkg.Cache) error) error {
|
||||||
|
if cache.c == nil {
|
||||||
|
if err := cache.open(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return f(cache.c)
|
||||||
|
}
|
||||||
37
cmd/mbf/cache_test.go
Normal file
37
cmd/mbf/cache_test.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCache(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
cm := cache{
|
||||||
|
ctx: t.Context(),
|
||||||
|
msg: message.New(log.New(os.Stderr, "check: ", 0)),
|
||||||
|
base: t.TempDir(),
|
||||||
|
|
||||||
|
hostAbstract: true, idle: true,
|
||||||
|
}
|
||||||
|
defer cm.Close()
|
||||||
|
cm.Close()
|
||||||
|
|
||||||
|
if err := cm.open(); err != nil {
|
||||||
|
t.Fatalf("open: error = %v", err)
|
||||||
|
}
|
||||||
|
if err := cm.open(); err != os.ErrInvalid {
|
||||||
|
t.Errorf("(duplicate) open: error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cm.Do(func(cache *pkg.Cache) error {
|
||||||
|
return cache.Scrub(0)
|
||||||
|
}); err != nil {
|
||||||
|
t.Errorf("Scrub: error = %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
354
cmd/mbf/daemon.go
Normal file
354
cmd/mbf/daemon.go
Normal file
@@ -0,0 +1,354 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
"unique"
|
||||||
|
|
||||||
|
"hakurei.app/check"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
// daemonTimeout is the maximum amount of time cureFromIR will wait on I/O.
|
||||||
|
const daemonTimeout = 30 * time.Second
|
||||||
|
|
||||||
|
// daemonDeadline returns the deadline corresponding to daemonTimeout, or the
|
||||||
|
// zero value when running in a test.
|
||||||
|
func daemonDeadline() time.Time {
|
||||||
|
if testing.Testing() {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
return time.Now().Add(daemonTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// remoteNoReply notifies that the client will not receive a cure reply.
|
||||||
|
remoteNoReply = 1 << iota
|
||||||
|
)
|
||||||
|
|
||||||
|
// cureFromIR services an IR curing request.
|
||||||
|
func cureFromIR(
|
||||||
|
cache *pkg.Cache,
|
||||||
|
conn net.Conn,
|
||||||
|
flags uint64,
|
||||||
|
) (pkg.Artifact, error) {
|
||||||
|
a, decodeErr := cache.NewDecoder(conn).Decode()
|
||||||
|
if decodeErr != nil {
|
||||||
|
_, err := conn.Write([]byte("\x00" + decodeErr.Error()))
|
||||||
|
return nil, errors.Join(decodeErr, err, conn.Close())
|
||||||
|
}
|
||||||
|
|
||||||
|
pathname, _, cureErr := cache.Cure(a)
|
||||||
|
if flags&remoteNoReply != 0 {
|
||||||
|
return a, errors.Join(cureErr, conn.Close())
|
||||||
|
}
|
||||||
|
if err := conn.SetWriteDeadline(daemonDeadline()); err != nil {
|
||||||
|
return a, errors.Join(cureErr, err, conn.Close())
|
||||||
|
}
|
||||||
|
if cureErr != nil {
|
||||||
|
_, err := conn.Write([]byte("\x00" + cureErr.Error()))
|
||||||
|
return a, errors.Join(cureErr, err, conn.Close())
|
||||||
|
}
|
||||||
|
_, err := conn.Write([]byte(pathname.String()))
|
||||||
|
if testing.Testing() && errors.Is(err, io.ErrClosedPipe) {
|
||||||
|
return a, nil
|
||||||
|
}
|
||||||
|
return a, errors.Join(err, conn.Close())
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// specialCancel is a message consisting of a single identifier referring
|
||||||
|
// to a curing artifact to be cancelled.
|
||||||
|
specialCancel = iota
|
||||||
|
// specialAbort requests for all pending cures to be aborted. It has no
|
||||||
|
// message body.
|
||||||
|
specialAbort
|
||||||
|
|
||||||
|
// remoteSpecial denotes a special message with custom layout.
|
||||||
|
remoteSpecial = math.MaxUint64
|
||||||
|
)
|
||||||
|
|
||||||
|
// writeSpecialHeader writes the header of a remoteSpecial message.
|
||||||
|
func writeSpecialHeader(conn net.Conn, kind uint64) error {
|
||||||
|
var sh [16]byte
|
||||||
|
binary.LittleEndian.PutUint64(sh[:], remoteSpecial)
|
||||||
|
binary.LittleEndian.PutUint64(sh[8:], kind)
|
||||||
|
if n, err := conn.Write(sh[:]); err != nil {
|
||||||
|
return err
|
||||||
|
} else if n != len(sh) {
|
||||||
|
return io.ErrShortWrite
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// cancelIdent reads an identifier from conn and cancels the corresponding cure.
|
||||||
|
func cancelIdent(
|
||||||
|
cache *pkg.Cache,
|
||||||
|
conn net.Conn,
|
||||||
|
) (*pkg.ID, bool, error) {
|
||||||
|
var ident pkg.ID
|
||||||
|
if _, err := io.ReadFull(conn, ident[:]); err != nil {
|
||||||
|
return nil, false, errors.Join(err, conn.Close())
|
||||||
|
}
|
||||||
|
ok := cache.Cancel(unique.Make(ident))
|
||||||
|
return &ident, ok, conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// serve services connections from a [net.UnixListener].
|
||||||
|
func serve(
|
||||||
|
ctx context.Context,
|
||||||
|
log *log.Logger,
|
||||||
|
cm *cache,
|
||||||
|
ul *net.UnixListener,
|
||||||
|
) error {
|
||||||
|
ul.SetUnlinkOnClose(true)
|
||||||
|
if cm.c == nil {
|
||||||
|
if err := cm.open(); err != nil {
|
||||||
|
return errors.Join(err, ul.Close())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
defer wg.Wait()
|
||||||
|
|
||||||
|
wg.Go(func() {
|
||||||
|
for {
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := ul.AcceptUnix()
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, os.ErrDeadlineExceeded) {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
wg.Go(func() {
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer close(done)
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
_ = conn.SetDeadline(time.Now())
|
||||||
|
|
||||||
|
case <-done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if _err := conn.SetReadDeadline(daemonDeadline()); _err != nil {
|
||||||
|
log.Println(_err)
|
||||||
|
if _err = conn.Close(); _err != nil {
|
||||||
|
log.Println(_err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var word [8]byte
|
||||||
|
if _, _err := io.ReadFull(conn, word[:]); _err != nil {
|
||||||
|
log.Println(_err)
|
||||||
|
if _err = conn.Close(); _err != nil {
|
||||||
|
log.Println(_err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
flags := binary.LittleEndian.Uint64(word[:])
|
||||||
|
|
||||||
|
if flags == remoteSpecial {
|
||||||
|
if _, _err := io.ReadFull(conn, word[:]); _err != nil {
|
||||||
|
log.Println(_err)
|
||||||
|
if _err = conn.Close(); _err != nil {
|
||||||
|
log.Println(_err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch special := binary.LittleEndian.Uint64(word[:]); special {
|
||||||
|
default:
|
||||||
|
log.Printf("invalid special %d", special)
|
||||||
|
|
||||||
|
case specialCancel:
|
||||||
|
if id, ok, _err := cancelIdent(cm.c, conn); _err != nil {
|
||||||
|
log.Println(_err)
|
||||||
|
} else if !ok {
|
||||||
|
log.Println(
|
||||||
|
"attempting to cancel invalid artifact",
|
||||||
|
pkg.Encode(*id),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
log.Println(
|
||||||
|
"cancelled artifact",
|
||||||
|
pkg.Encode(*id),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
case specialAbort:
|
||||||
|
log.Println("aborting all pending cures")
|
||||||
|
cm.c.Abort()
|
||||||
|
if _err := conn.Close(); _err != nil {
|
||||||
|
log.Println(_err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if a, _err := cureFromIR(cm.c, conn, flags); _err != nil {
|
||||||
|
log.Println(_err)
|
||||||
|
} else {
|
||||||
|
log.Printf(
|
||||||
|
"fulfilled artifact %s",
|
||||||
|
pkg.Encode(cm.c.Ident(a).Value()),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
<-ctx.Done()
|
||||||
|
if err := ul.SetDeadline(time.Now()); err != nil {
|
||||||
|
return errors.Join(err, ul.Close())
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
return ul.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// dial wraps [net.DialUnix] with a context.
|
||||||
|
func dial(ctx context.Context, addr *net.UnixAddr) (
|
||||||
|
done chan<- struct{},
|
||||||
|
conn *net.UnixConn,
|
||||||
|
err error,
|
||||||
|
) {
|
||||||
|
conn, err = net.DialUnix("unix", nil, addr)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
d := make(chan struct{})
|
||||||
|
done = d
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
_ = conn.SetDeadline(time.Now())
|
||||||
|
|
||||||
|
case <-d:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// cureRemote cures a [pkg.Artifact] on a daemon.
|
||||||
|
func cureRemote(
|
||||||
|
ctx context.Context,
|
||||||
|
addr *net.UnixAddr,
|
||||||
|
a pkg.Artifact,
|
||||||
|
flags uint64,
|
||||||
|
) (*check.Absolute, error) {
|
||||||
|
if flags == remoteSpecial {
|
||||||
|
return nil, syscall.EINVAL
|
||||||
|
}
|
||||||
|
|
||||||
|
done, conn, err := dial(ctx, addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer close(done)
|
||||||
|
|
||||||
|
if n, flagErr := conn.Write(binary.LittleEndian.AppendUint64(nil, flags)); flagErr != nil {
|
||||||
|
return nil, errors.Join(flagErr, conn.Close())
|
||||||
|
} else if n != 8 {
|
||||||
|
return nil, errors.Join(io.ErrShortWrite, conn.Close())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = pkg.NewIR().EncodeAll(conn, a); err != nil {
|
||||||
|
return nil, errors.Join(err, conn.Close())
|
||||||
|
} else if err = conn.CloseWrite(); err != nil {
|
||||||
|
return nil, errors.Join(err, conn.Close())
|
||||||
|
}
|
||||||
|
|
||||||
|
if flags&remoteNoReply != 0 {
|
||||||
|
return nil, conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
payload, recvErr := io.ReadAll(conn)
|
||||||
|
if err = errors.Join(recvErr, conn.Close()); err != nil {
|
||||||
|
if errors.Is(err, os.ErrDeadlineExceeded) {
|
||||||
|
if cancelErr := ctx.Err(); cancelErr != nil {
|
||||||
|
err = cancelErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(payload) > 0 && payload[0] == 0 {
|
||||||
|
return nil, errors.New(string(payload[1:]))
|
||||||
|
}
|
||||||
|
|
||||||
|
var p *check.Absolute
|
||||||
|
p, err = check.NewAbs(string(payload))
|
||||||
|
return p, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// cancelRemote cancels a [pkg.Artifact] curing on a daemon.
|
||||||
|
func cancelRemote(
|
||||||
|
ctx context.Context,
|
||||||
|
addr *net.UnixAddr,
|
||||||
|
a pkg.Artifact,
|
||||||
|
wait bool,
|
||||||
|
) error {
|
||||||
|
done, conn, err := dial(ctx, addr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer close(done)
|
||||||
|
|
||||||
|
if err = writeSpecialHeader(conn, specialCancel); err != nil {
|
||||||
|
return errors.Join(err, conn.Close())
|
||||||
|
}
|
||||||
|
|
||||||
|
var n int
|
||||||
|
id := pkg.NewIR().Ident(a).Value()
|
||||||
|
if n, err = conn.Write(id[:]); err != nil {
|
||||||
|
return errors.Join(err, conn.Close())
|
||||||
|
} else if n != len(id) {
|
||||||
|
return errors.Join(io.ErrShortWrite, conn.Close())
|
||||||
|
}
|
||||||
|
if wait {
|
||||||
|
if _, err = conn.Read(make([]byte, 1)); err == io.EOF {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errors.Join(err, conn.Close())
|
||||||
|
}
|
||||||
|
|
||||||
|
// abortRemote aborts all [pkg.Artifact] curing on a daemon.
|
||||||
|
func abortRemote(
|
||||||
|
ctx context.Context,
|
||||||
|
addr *net.UnixAddr,
|
||||||
|
wait bool,
|
||||||
|
) error {
|
||||||
|
done, conn, err := dial(ctx, addr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer close(done)
|
||||||
|
|
||||||
|
err = writeSpecialHeader(conn, specialAbort)
|
||||||
|
if wait && err == nil {
|
||||||
|
if _, err = conn.Read(make([]byte, 1)); err == io.EOF {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errors.Join(err, conn.Close())
|
||||||
|
}
|
||||||
146
cmd/mbf/daemon_test.go
Normal file
146
cmd/mbf/daemon_test.go
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"hakurei.app/check"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNoReply(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
if !daemonDeadline().IsZero() {
|
||||||
|
t.Fatal("daemonDeadline did not return the zero value")
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := pkg.Open(
|
||||||
|
t.Context(),
|
||||||
|
message.New(log.New(os.Stderr, "cir: ", 0)),
|
||||||
|
0, 0, 0,
|
||||||
|
check.MustAbs(t.TempDir()),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Open: error = %v", err)
|
||||||
|
}
|
||||||
|
defer c.Close()
|
||||||
|
|
||||||
|
client, server := net.Pipe()
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
defer close(done)
|
||||||
|
go func() {
|
||||||
|
<-t.Context().Done()
|
||||||
|
if _err := client.SetDeadline(time.Now()); _err != nil && !errors.Is(_err, io.ErrClosedPipe) {
|
||||||
|
panic(_err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if _err := c.EncodeAll(
|
||||||
|
client,
|
||||||
|
pkg.NewFile("check", []byte{0}),
|
||||||
|
); _err != nil {
|
||||||
|
panic(_err)
|
||||||
|
} else if _err = client.Close(); _err != nil {
|
||||||
|
panic(_err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
a, cureErr := cureFromIR(c, server, remoteNoReply)
|
||||||
|
if cureErr != nil {
|
||||||
|
t.Fatalf("cureFromIR: error = %v", cureErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
<-done
|
||||||
|
wantIdent := pkg.MustDecode("fiZf-ZY_Yq6qxJNrHbMiIPYCsGkUiKCRsZrcSELXTqZWtCnESlHmzV5ThhWWGGYG")
|
||||||
|
if gotIdent := c.Ident(a).Value(); gotIdent != wantIdent {
|
||||||
|
t.Errorf(
|
||||||
|
"cureFromIR: %s, want %s",
|
||||||
|
pkg.Encode(gotIdent), pkg.Encode(wantIdent),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDaemon(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
logger := log.New(&buf, "daemon: ", 0)
|
||||||
|
|
||||||
|
addr := net.UnixAddr{
|
||||||
|
Name: filepath.Join(t.TempDir(), "daemon"),
|
||||||
|
Net: "unix",
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(t.Context())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cm := cache{
|
||||||
|
ctx: ctx,
|
||||||
|
msg: message.New(logger),
|
||||||
|
base: t.TempDir(),
|
||||||
|
}
|
||||||
|
defer cm.Close()
|
||||||
|
|
||||||
|
ul, err := net.ListenUnix("unix", &addr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ListenUnix: error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
defer close(done)
|
||||||
|
if _err := serve(ctx, logger, &cm, ul); _err != nil {
|
||||||
|
panic(_err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err = cancelRemote(ctx, &addr, pkg.NewFile("nonexistent", nil), true); err != nil {
|
||||||
|
t.Fatalf("cancelRemote: error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = abortRemote(ctx, &addr, true); err != nil {
|
||||||
|
t.Fatalf("abortRemote: error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// keep this last for synchronisation
|
||||||
|
var p *check.Absolute
|
||||||
|
p, err = cureRemote(ctx, &addr, pkg.NewFile("check", []byte{0}), 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("cureRemote: error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
<-done
|
||||||
|
|
||||||
|
const want = "fiZf-ZY_Yq6qxJNrHbMiIPYCsGkUiKCRsZrcSELXTqZWtCnESlHmzV5ThhWWGGYG"
|
||||||
|
if got := filepath.Base(p.String()); got != want {
|
||||||
|
t.Errorf("cureRemote: %s, want %s", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
wantLog := []string{
|
||||||
|
"",
|
||||||
|
"daemon: aborting all pending cures",
|
||||||
|
"daemon: attempting to cancel invalid artifact kQm9fmnCmXST1-MMmxzcau2oKZCXXrlZydo4PkeV5hO_2PKfeC8t98hrbV_ZZx_j",
|
||||||
|
"daemon: fulfilled artifact fiZf-ZY_Yq6qxJNrHbMiIPYCsGkUiKCRsZrcSELXTqZWtCnESlHmzV5ThhWWGGYG",
|
||||||
|
}
|
||||||
|
gotLog := strings.Split(buf.String(), "\n")
|
||||||
|
slices.Sort(gotLog)
|
||||||
|
if !slices.Equal(gotLog, wantLog) {
|
||||||
|
t.Errorf(
|
||||||
|
"serve: logged\n%s\nwant\n%s",
|
||||||
|
strings.Join(gotLog, "\n"), strings.Join(wantLog, "\n"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
114
cmd/mbf/info.go
Normal file
114
cmd/mbf/info.go
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
"hakurei.app/internal/rosa"
|
||||||
|
)
|
||||||
|
|
||||||
|
// commandInfo implements the info subcommand.
|
||||||
|
func commandInfo(
|
||||||
|
cm *cache,
|
||||||
|
args []string,
|
||||||
|
w io.Writer,
|
||||||
|
writeStatus bool,
|
||||||
|
r *rosa.Report,
|
||||||
|
) (err error) {
|
||||||
|
if len(args) == 0 {
|
||||||
|
return errors.New("info requires at least 1 argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
// recovered by HandleAccess
|
||||||
|
mustPrintln := func(a ...any) {
|
||||||
|
if _, _err := fmt.Fprintln(w, a...); _err != nil {
|
||||||
|
panic(_err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mustPrint := func(a ...any) {
|
||||||
|
if _, _err := fmt.Fprint(w, a...); _err != nil {
|
||||||
|
panic(_err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, name := range args {
|
||||||
|
if p, ok := rosa.ResolveName(name); !ok {
|
||||||
|
return fmt.Errorf("unknown artifact %q", name)
|
||||||
|
} else {
|
||||||
|
var suffix string
|
||||||
|
if version := rosa.Std.Version(p); version != rosa.Unversioned {
|
||||||
|
suffix += "-" + version
|
||||||
|
}
|
||||||
|
mustPrintln("name : " + name + suffix)
|
||||||
|
|
||||||
|
meta := rosa.GetMetadata(p)
|
||||||
|
mustPrintln("description : " + meta.Description)
|
||||||
|
if meta.Website != "" {
|
||||||
|
mustPrintln("website : " +
|
||||||
|
strings.TrimSuffix(meta.Website, "/"))
|
||||||
|
}
|
||||||
|
if len(meta.Dependencies) > 0 {
|
||||||
|
mustPrint("depends on :")
|
||||||
|
for _, d := range meta.Dependencies {
|
||||||
|
s := rosa.GetMetadata(d).Name
|
||||||
|
if version := rosa.Std.Version(d); version != rosa.Unversioned {
|
||||||
|
s += "-" + version
|
||||||
|
}
|
||||||
|
mustPrint(" " + s)
|
||||||
|
}
|
||||||
|
mustPrintln()
|
||||||
|
}
|
||||||
|
|
||||||
|
const statusPrefix = "status : "
|
||||||
|
if writeStatus {
|
||||||
|
if r == nil {
|
||||||
|
var f io.ReadSeekCloser
|
||||||
|
err = cm.Do(func(cache *pkg.Cache) (err error) {
|
||||||
|
f, err = cache.OpenStatus(rosa.Std.Load(p))
|
||||||
|
return
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
mustPrintln(
|
||||||
|
statusPrefix + "not yet cured",
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
mustPrint(statusPrefix)
|
||||||
|
_, err = io.Copy(w, f)
|
||||||
|
if err = errors.Join(err, f.Close()); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if err = cm.Do(func(cache *pkg.Cache) (err error) {
|
||||||
|
status, n := r.ArtifactOf(cache.Ident(rosa.Std.Load(p)))
|
||||||
|
if status == nil {
|
||||||
|
mustPrintln(
|
||||||
|
statusPrefix + "not in report",
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
mustPrintln("size :", n)
|
||||||
|
mustPrint(statusPrefix)
|
||||||
|
if _, err = w.Write(status); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if i != len(args)-1 {
|
||||||
|
mustPrintln()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
181
cmd/mbf/info_test.go
Normal file
181
cmd/mbf/info_test.go
Normal file
@@ -0,0 +1,181 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"testing"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
"hakurei.app/internal/rosa"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInfo(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
args []string
|
||||||
|
status map[string]string
|
||||||
|
report string
|
||||||
|
want string
|
||||||
|
wantErr any
|
||||||
|
}{
|
||||||
|
{"qemu", []string{"qemu"}, nil, "", `
|
||||||
|
name : qemu-` + rosa.Std.Version(rosa.QEMU) + `
|
||||||
|
description : a generic and open source machine emulator and virtualizer
|
||||||
|
website : https://www.qemu.org
|
||||||
|
depends on : glib-` + rosa.Std.Version(rosa.GLib) + ` zstd-` + rosa.Std.Version(rosa.Zstd) + `
|
||||||
|
`, nil},
|
||||||
|
|
||||||
|
{"multi", []string{"hakurei", "hakurei-dist"}, nil, "", `
|
||||||
|
name : hakurei-` + rosa.Std.Version(rosa.Hakurei) + `
|
||||||
|
description : low-level userspace tooling for Rosa OS
|
||||||
|
website : https://hakurei.app
|
||||||
|
|
||||||
|
name : hakurei-dist-` + rosa.Std.Version(rosa.HakureiDist) + `
|
||||||
|
description : low-level userspace tooling for Rosa OS (distribution tarball)
|
||||||
|
website : https://hakurei.app
|
||||||
|
`, nil},
|
||||||
|
|
||||||
|
{"nonexistent", []string{"zlib", "\x00"}, nil, "", `
|
||||||
|
name : zlib-` + rosa.Std.Version(rosa.Zlib) + `
|
||||||
|
description : lossless data-compression library
|
||||||
|
website : https://zlib.net
|
||||||
|
|
||||||
|
`, fmt.Errorf("unknown artifact %q", "\x00")},
|
||||||
|
|
||||||
|
{"status cache", []string{"zlib", "zstd"}, map[string]string{
|
||||||
|
"zstd": "internal/pkg (amd64) on satori\n",
|
||||||
|
"hakurei": "internal/pkg (amd64) on satori\n\n",
|
||||||
|
}, "", `
|
||||||
|
name : zlib-` + rosa.Std.Version(rosa.Zlib) + `
|
||||||
|
description : lossless data-compression library
|
||||||
|
website : https://zlib.net
|
||||||
|
status : not yet cured
|
||||||
|
|
||||||
|
name : zstd-` + rosa.Std.Version(rosa.Zstd) + `
|
||||||
|
description : a fast compression algorithm
|
||||||
|
website : https://facebook.github.io/zstd
|
||||||
|
status : internal/pkg (amd64) on satori
|
||||||
|
`, nil},
|
||||||
|
|
||||||
|
{"status cache perm", []string{"zlib"}, map[string]string{
|
||||||
|
"zlib": "\x00",
|
||||||
|
}, "", `
|
||||||
|
name : zlib-` + rosa.Std.Version(rosa.Zlib) + `
|
||||||
|
description : lossless data-compression library
|
||||||
|
website : https://zlib.net
|
||||||
|
`, func(cm *cache) error {
|
||||||
|
return &os.PathError{
|
||||||
|
Op: "open",
|
||||||
|
Path: filepath.Join(cm.base, "status", pkg.Encode(cm.c.Ident(rosa.Std.Load(rosa.Zlib)).Value())),
|
||||||
|
Err: syscall.EACCES,
|
||||||
|
}
|
||||||
|
}},
|
||||||
|
|
||||||
|
{"status report", []string{"zlib"}, nil, strings.Repeat("\x00", len(pkg.Checksum{})+8), `
|
||||||
|
name : zlib-` + rosa.Std.Version(rosa.Zlib) + `
|
||||||
|
description : lossless data-compression library
|
||||||
|
website : https://zlib.net
|
||||||
|
status : not in report
|
||||||
|
`, nil},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var (
|
||||||
|
cm *cache
|
||||||
|
buf strings.Builder
|
||||||
|
r *rosa.Report
|
||||||
|
)
|
||||||
|
|
||||||
|
if tc.status != nil || tc.report != "" {
|
||||||
|
cm = &cache{
|
||||||
|
ctx: context.Background(),
|
||||||
|
msg: message.New(log.New(os.Stderr, "info: ", 0)),
|
||||||
|
base: t.TempDir(),
|
||||||
|
}
|
||||||
|
defer cm.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
if tc.report != "" {
|
||||||
|
pathname := filepath.Join(t.TempDir(), "report")
|
||||||
|
err := os.WriteFile(
|
||||||
|
pathname,
|
||||||
|
unsafe.Slice(unsafe.StringData(tc.report), len(tc.report)),
|
||||||
|
0400,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err = rosa.OpenReport(pathname)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err = r.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
if tc.status != nil {
|
||||||
|
for name, status := range tc.status {
|
||||||
|
p, ok := rosa.ResolveName(name)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("invalid name %q", name)
|
||||||
|
}
|
||||||
|
perm := os.FileMode(0400)
|
||||||
|
if status == "\x00" {
|
||||||
|
perm = 0
|
||||||
|
}
|
||||||
|
if err := cm.Do(func(cache *pkg.Cache) error {
|
||||||
|
return os.WriteFile(filepath.Join(
|
||||||
|
cm.base,
|
||||||
|
"status",
|
||||||
|
pkg.Encode(cache.Ident(rosa.Std.Load(p)).Value()),
|
||||||
|
), unsafe.Slice(unsafe.StringData(status), len(status)), perm)
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatalf("Do: error = %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var wantErr error
|
||||||
|
switch c := tc.wantErr.(type) {
|
||||||
|
case error:
|
||||||
|
wantErr = c
|
||||||
|
case func(cm *cache) error:
|
||||||
|
wantErr = c(cm)
|
||||||
|
default:
|
||||||
|
if tc.wantErr != nil {
|
||||||
|
t.Fatalf("invalid wantErr %#v", tc.wantErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := commandInfo(
|
||||||
|
cm,
|
||||||
|
tc.args,
|
||||||
|
&buf,
|
||||||
|
cm != nil,
|
||||||
|
r,
|
||||||
|
); !reflect.DeepEqual(err, wantErr) {
|
||||||
|
t.Fatalf("commandInfo: error = %v, want %v", err, wantErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got := buf.String(); got != strings.TrimPrefix(tc.want, "\n") {
|
||||||
|
t.Errorf("commandInfo:\n%s\nwant\n%s", got, tc.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
202
cmd/mbf/internal/pkgserver/api.go
Normal file
202
cmd/mbf/internal/pkgserver/api.go
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
// Package pkgserver implements the package metadata service backend.
|
||||||
|
package pkgserver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"hakurei.app/internal/info"
|
||||||
|
"hakurei.app/internal/rosa"
|
||||||
|
)
|
||||||
|
|
||||||
|
// for lazy initialisation of serveInfo
|
||||||
|
var (
|
||||||
|
infoPayload struct {
|
||||||
|
// Current package count.
|
||||||
|
Count int `json:"count"`
|
||||||
|
// Hakurei version, set at link time.
|
||||||
|
HakureiVersion string `json:"hakurei_version"`
|
||||||
|
}
|
||||||
|
infoPayloadOnce sync.Once
|
||||||
|
)
|
||||||
|
|
||||||
|
// handleInfo writes constant system information.
|
||||||
|
func handleInfo(w http.ResponseWriter, _ *http.Request) {
|
||||||
|
infoPayloadOnce.Do(func() {
|
||||||
|
infoPayload.Count = int(rosa.PresetUnexportedStart)
|
||||||
|
infoPayload.HakureiVersion = info.Version()
|
||||||
|
})
|
||||||
|
// TODO(mae): cache entire response if no additional fields are planned
|
||||||
|
writeAPIPayload(w, infoPayload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newStatusHandler returns a [http.HandlerFunc] that offers status files for
|
||||||
|
// viewing or download, if available.
|
||||||
|
func (index *packageIndex) newStatusHandler(disposition bool) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
m, ok := index.names[path.Base(r.URL.Path)]
|
||||||
|
if !ok || !m.HasReport {
|
||||||
|
http.NotFound(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
contentType := "text/plain; charset=utf-8"
|
||||||
|
if disposition {
|
||||||
|
contentType = "application/octet-stream"
|
||||||
|
|
||||||
|
// quoting like this is unsound, but okay, because metadata is hardcoded
|
||||||
|
contentDisposition := `attachment; filename="`
|
||||||
|
contentDisposition += m.Name + "-"
|
||||||
|
if m.Version != "" {
|
||||||
|
contentDisposition += m.Version + "-"
|
||||||
|
}
|
||||||
|
contentDisposition += m.ids + `.log"`
|
||||||
|
w.Header().Set("Content-Disposition", contentDisposition)
|
||||||
|
}
|
||||||
|
w.Header().Set("Content-Type", contentType)
|
||||||
|
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
||||||
|
if err := func() (err error) {
|
||||||
|
defer index.handleAccess(&err)()
|
||||||
|
_, err = w.Write(m.status)
|
||||||
|
return
|
||||||
|
}(); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
http.Error(
|
||||||
|
w, "cannot deliver status, contact maintainers",
|
||||||
|
http.StatusInternalServerError,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleGet writes a slice of metadata with specified order.
|
||||||
|
func (index *packageIndex) handleGet(w http.ResponseWriter, r *http.Request) {
|
||||||
|
q := r.URL.Query()
|
||||||
|
limit, err := strconv.Atoi(q.Get("limit"))
|
||||||
|
if err != nil || limit > 100 || limit < 1 {
|
||||||
|
http.Error(
|
||||||
|
w, "limit must be an integer between 1 and 100",
|
||||||
|
http.StatusBadRequest,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
i, err := strconv.Atoi(q.Get("index"))
|
||||||
|
if err != nil || i >= len(index.sorts[0]) || i < 0 {
|
||||||
|
http.Error(
|
||||||
|
w, "index must be an integer between 0 and "+
|
||||||
|
strconv.Itoa(int(rosa.PresetUnexportedStart-1)),
|
||||||
|
http.StatusBadRequest,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sort, err := strconv.Atoi(q.Get("sort"))
|
||||||
|
if err != nil || sort >= len(index.sorts) || sort < 0 {
|
||||||
|
http.Error(
|
||||||
|
w, "sort must be an integer between 0 and "+
|
||||||
|
strconv.Itoa(sortOrderEnd),
|
||||||
|
http.StatusBadRequest,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
values := index.sorts[sort][i:min(i+limit, len(index.sorts[sort]))]
|
||||||
|
writeAPIPayload(w, &struct {
|
||||||
|
Values []*metadata `json:"values"`
|
||||||
|
}{values})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (index *packageIndex) handleSearch(w http.ResponseWriter, r *http.Request) {
|
||||||
|
q := r.URL.Query()
|
||||||
|
limit, err := strconv.Atoi(q.Get("limit"))
|
||||||
|
if err != nil || limit > 100 || limit < 1 {
|
||||||
|
http.Error(
|
||||||
|
w, "limit must be an integer between 1 and 100",
|
||||||
|
http.StatusBadRequest,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
i, err := strconv.Atoi(q.Get("index"))
|
||||||
|
if err != nil || i >= len(index.sorts[0]) || i < 0 {
|
||||||
|
http.Error(
|
||||||
|
w, "index must be an integer between 0 and "+
|
||||||
|
strconv.Itoa(int(rosa.PresetUnexportedStart-1)),
|
||||||
|
http.StatusBadRequest,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
search, err := url.QueryUnescape(q.Get("search"))
|
||||||
|
if len(search) > 100 || err != nil {
|
||||||
|
http.Error(
|
||||||
|
w, "search must be a string between 0 and 100 characters long",
|
||||||
|
http.StatusBadRequest,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
desc := q.Get("desc") == "true"
|
||||||
|
n, res, err := index.performSearchQuery(limit, i, search, desc)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
writeAPIPayload(w, &struct {
|
||||||
|
Count int `json:"count"`
|
||||||
|
Values []searchResult `json:"values"`
|
||||||
|
}{n, res})
|
||||||
|
}
|
||||||
|
|
||||||
|
// apiVersion is the name of the current API revision, as part of the pattern.
|
||||||
|
const apiVersion = "v1"
|
||||||
|
|
||||||
|
// registerAPI registers API handler functions.
|
||||||
|
func (index *packageIndex) registerAPI(mux *http.ServeMux) {
|
||||||
|
mux.HandleFunc("GET /api/"+apiVersion+"/info", handleInfo)
|
||||||
|
mux.HandleFunc("GET /api/"+apiVersion+"/get", index.handleGet)
|
||||||
|
mux.HandleFunc("GET /api/"+apiVersion+"/search", index.handleSearch)
|
||||||
|
mux.HandleFunc("GET /api/"+apiVersion+"/status/", index.newStatusHandler(false))
|
||||||
|
mux.HandleFunc("GET /status/", index.newStatusHandler(true))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register arranges for mux to service API requests.
|
||||||
|
func Register(ctx context.Context, mux *http.ServeMux, report *rosa.Report) error {
|
||||||
|
var index packageIndex
|
||||||
|
index.search = make(searchCache)
|
||||||
|
if err := index.populate(report); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ticker := time.NewTicker(1 * time.Minute)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
ticker.Stop()
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
index.search.clean()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
index.registerAPI(mux)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeAPIPayload sets headers common to API responses and encodes payload as
|
||||||
|
// JSON for the response body.
|
||||||
|
func writeAPIPayload(w http.ResponseWriter, payload any) {
|
||||||
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||||
|
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
||||||
|
w.Header().Set("Pragma", "no-cache")
|
||||||
|
w.Header().Set("Expires", "0")
|
||||||
|
|
||||||
|
if err := json.NewEncoder(w).Encode(payload); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
http.Error(
|
||||||
|
w, "cannot encode payload, contact maintainers",
|
||||||
|
http.StatusInternalServerError,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
181
cmd/mbf/internal/pkgserver/api_test.go
Normal file
181
cmd/mbf/internal/pkgserver/api_test.go
Normal file
@@ -0,0 +1,181 @@
|
|||||||
|
package pkgserver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"hakurei.app/internal/info"
|
||||||
|
"hakurei.app/internal/rosa"
|
||||||
|
)
|
||||||
|
|
||||||
|
// prefix is prepended to every API path.
|
||||||
|
const prefix = "/api/" + apiVersion + "/"
|
||||||
|
|
||||||
|
func TestAPIInfo(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
handleInfo(w, httptest.NewRequestWithContext(
|
||||||
|
t.Context(),
|
||||||
|
http.MethodGet,
|
||||||
|
prefix+"info",
|
||||||
|
nil,
|
||||||
|
))
|
||||||
|
|
||||||
|
resp := w.Result()
|
||||||
|
checkStatus(t, resp, http.StatusOK)
|
||||||
|
checkAPIHeader(t, w.Header())
|
||||||
|
|
||||||
|
checkPayload(t, resp, struct {
|
||||||
|
Count int `json:"count"`
|
||||||
|
HakureiVersion string `json:"hakurei_version"`
|
||||||
|
}{int(rosa.PresetUnexportedStart), info.Version()})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAPIGet(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
const target = prefix + "get"
|
||||||
|
|
||||||
|
index := newIndex(t)
|
||||||
|
newRequest := func(suffix string) *httptest.ResponseRecorder {
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
index.handleGet(w, httptest.NewRequestWithContext(
|
||||||
|
t.Context(),
|
||||||
|
http.MethodGet,
|
||||||
|
target+suffix,
|
||||||
|
nil,
|
||||||
|
))
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
checkValidate := func(t *testing.T, suffix string, vmin, vmax int, wantErr string) {
|
||||||
|
t.Run("invalid", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
w := newRequest("?" + suffix + "=invalid")
|
||||||
|
resp := w.Result()
|
||||||
|
checkError(t, resp, wantErr, http.StatusBadRequest)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("min", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
w := newRequest("?" + suffix + "=" + strconv.Itoa(vmin-1))
|
||||||
|
resp := w.Result()
|
||||||
|
checkError(t, resp, wantErr, http.StatusBadRequest)
|
||||||
|
|
||||||
|
w = newRequest("?" + suffix + "=" + strconv.Itoa(vmin))
|
||||||
|
resp = w.Result()
|
||||||
|
checkStatus(t, resp, http.StatusOK)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("max", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
w := newRequest("?" + suffix + "=" + strconv.Itoa(vmax+1))
|
||||||
|
resp := w.Result()
|
||||||
|
checkError(t, resp, wantErr, http.StatusBadRequest)
|
||||||
|
|
||||||
|
w = newRequest("?" + suffix + "=" + strconv.Itoa(vmax))
|
||||||
|
resp = w.Result()
|
||||||
|
checkStatus(t, resp, http.StatusOK)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("limit", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
checkValidate(
|
||||||
|
t, "index=0&sort=0&limit", 1, 100,
|
||||||
|
"limit must be an integer between 1 and 100",
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("index", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
checkValidate(
|
||||||
|
t, "limit=1&sort=0&index", 0, int(rosa.PresetUnexportedStart-1),
|
||||||
|
"index must be an integer between 0 and "+strconv.Itoa(int(rosa.PresetUnexportedStart-1)),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("sort", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
checkValidate(
|
||||||
|
t, "index=0&limit=1&sort", 0, int(sortOrderEnd),
|
||||||
|
"sort must be an integer between 0 and "+strconv.Itoa(int(sortOrderEnd)),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
checkWithSuffix := func(name, suffix string, want []*metadata) {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
w := newRequest(suffix)
|
||||||
|
resp := w.Result()
|
||||||
|
checkStatus(t, resp, http.StatusOK)
|
||||||
|
checkAPIHeader(t, w.Header())
|
||||||
|
checkPayloadFunc(t, resp, func(got *struct {
|
||||||
|
Values []*metadata `json:"values"`
|
||||||
|
}) bool {
|
||||||
|
return slices.EqualFunc(got.Values, want, func(a, b *metadata) bool {
|
||||||
|
return (a.Version == b.Version ||
|
||||||
|
a.Version == rosa.Unversioned ||
|
||||||
|
b.Version == rosa.Unversioned) &&
|
||||||
|
a.HasReport == b.HasReport &&
|
||||||
|
a.Name == b.Name &&
|
||||||
|
a.Description == b.Description &&
|
||||||
|
a.Website == b.Website
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
checkWithSuffix("declarationAscending", "?limit=2&index=1&sort=0", []*metadata{
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(1),
|
||||||
|
Version: rosa.Std.Version(1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(2),
|
||||||
|
Version: rosa.Std.Version(2),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
checkWithSuffix("declarationAscending offset", "?limit=3&index=5&sort=0", []*metadata{
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(5),
|
||||||
|
Version: rosa.Std.Version(5),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(6),
|
||||||
|
Version: rosa.Std.Version(6),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(7),
|
||||||
|
Version: rosa.Std.Version(7),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
checkWithSuffix("declarationDescending", "?limit=3&index=0&sort=1", []*metadata{
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(rosa.PresetUnexportedStart - 1),
|
||||||
|
Version: rosa.Std.Version(rosa.PresetUnexportedStart - 1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(rosa.PresetUnexportedStart - 2),
|
||||||
|
Version: rosa.Std.Version(rosa.PresetUnexportedStart - 2),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(rosa.PresetUnexportedStart - 3),
|
||||||
|
Version: rosa.Std.Version(rosa.PresetUnexportedStart - 3),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
checkWithSuffix("declarationDescending offset", "?limit=1&index=37&sort=1", []*metadata{
|
||||||
|
{
|
||||||
|
Metadata: rosa.GetMetadata(rosa.PresetUnexportedStart - 38),
|
||||||
|
Version: rosa.Std.Version(rosa.PresetUnexportedStart - 38),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
106
cmd/mbf/internal/pkgserver/index.go
Normal file
106
cmd/mbf/internal/pkgserver/index.go
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
package pkgserver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"errors"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
"hakurei.app/internal/rosa"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
declarationAscending = iota
|
||||||
|
declarationDescending
|
||||||
|
nameAscending
|
||||||
|
nameDescending
|
||||||
|
sizeAscending
|
||||||
|
sizeDescending
|
||||||
|
|
||||||
|
sortOrderEnd = iota - 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// packageIndex refers to metadata by name and various sort orders.
|
||||||
|
type packageIndex struct {
|
||||||
|
sorts [sortOrderEnd + 1][rosa.PresetUnexportedStart]*metadata
|
||||||
|
names map[string]*metadata
|
||||||
|
search searchCache
|
||||||
|
// Taken from [rosa.Report] if available.
|
||||||
|
handleAccess func(*error) func()
|
||||||
|
}
|
||||||
|
|
||||||
|
// metadata holds [rosa.Metadata] extended with additional information.
|
||||||
|
type metadata struct {
|
||||||
|
p rosa.PArtifact
|
||||||
|
*rosa.Metadata
|
||||||
|
|
||||||
|
// Populated via [rosa.Toolchain.Version], [rosa.Unversioned] is equivalent
|
||||||
|
// to the zero value. Otherwise, the zero value is invalid.
|
||||||
|
Version string `json:"version,omitempty"`
|
||||||
|
// Output data size, available if present in report.
|
||||||
|
Size int64 `json:"size,omitempty"`
|
||||||
|
// Whether the underlying [pkg.Artifact] is present in the report.
|
||||||
|
HasReport bool `json:"report"`
|
||||||
|
|
||||||
|
// Ident string encoded ahead of time.
|
||||||
|
ids string
|
||||||
|
// Backed by [rosa.Report], access must be prepared by HandleAccess.
|
||||||
|
status []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// populate deterministically populates packageIndex, optionally with a report.
|
||||||
|
func (index *packageIndex) populate(report *rosa.Report) (err error) {
|
||||||
|
if report != nil {
|
||||||
|
defer report.HandleAccess(&err)()
|
||||||
|
index.handleAccess = report.HandleAccess
|
||||||
|
}
|
||||||
|
|
||||||
|
var work [rosa.PresetUnexportedStart]*metadata
|
||||||
|
index.names = make(map[string]*metadata)
|
||||||
|
ir := pkg.NewIR()
|
||||||
|
for p := range rosa.PresetUnexportedStart {
|
||||||
|
m := metadata{
|
||||||
|
p: p,
|
||||||
|
|
||||||
|
Metadata: rosa.GetMetadata(p),
|
||||||
|
Version: rosa.Std.Version(p),
|
||||||
|
}
|
||||||
|
if m.Version == "" {
|
||||||
|
return errors.New("invalid version from " + m.Name)
|
||||||
|
}
|
||||||
|
if m.Version == rosa.Unversioned {
|
||||||
|
m.Version = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
if report != nil {
|
||||||
|
id := ir.Ident(rosa.Std.Load(p))
|
||||||
|
m.ids = pkg.Encode(id.Value())
|
||||||
|
m.status, m.Size = report.ArtifactOf(id)
|
||||||
|
m.HasReport = m.Size >= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
work[p] = &m
|
||||||
|
index.names[m.Name] = &m
|
||||||
|
}
|
||||||
|
|
||||||
|
index.sorts[declarationAscending] = work
|
||||||
|
index.sorts[declarationDescending] = work
|
||||||
|
slices.Reverse(index.sorts[declarationDescending][:])
|
||||||
|
|
||||||
|
index.sorts[nameAscending] = work
|
||||||
|
slices.SortFunc(index.sorts[nameAscending][:], func(a, b *metadata) int {
|
||||||
|
return strings.Compare(a.Name, b.Name)
|
||||||
|
})
|
||||||
|
index.sorts[nameDescending] = index.sorts[nameAscending]
|
||||||
|
slices.Reverse(index.sorts[nameDescending][:])
|
||||||
|
|
||||||
|
index.sorts[sizeAscending] = work
|
||||||
|
slices.SortFunc(index.sorts[sizeAscending][:], func(a, b *metadata) int {
|
||||||
|
return cmp.Compare(a.Size, b.Size)
|
||||||
|
})
|
||||||
|
index.sorts[sizeDescending] = index.sorts[sizeAscending]
|
||||||
|
slices.Reverse(index.sorts[sizeDescending][:])
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
96
cmd/mbf/internal/pkgserver/index_test.go
Normal file
96
cmd/mbf/internal/pkgserver/index_test.go
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
package pkgserver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// newIndex returns the address of a newly populated packageIndex.
|
||||||
|
func newIndex(t *testing.T) *packageIndex {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var index packageIndex
|
||||||
|
if err := index.populate(nil); err != nil {
|
||||||
|
t.Fatalf("populate: error = %v", err)
|
||||||
|
}
|
||||||
|
return &index
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkStatus checks response status code.
|
||||||
|
func checkStatus(t *testing.T, resp *http.Response, want int) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
if resp.StatusCode != want {
|
||||||
|
t.Errorf(
|
||||||
|
"StatusCode: %s, want %s",
|
||||||
|
http.StatusText(resp.StatusCode),
|
||||||
|
http.StatusText(want),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkHeader checks the value of a header entry.
|
||||||
|
func checkHeader(t *testing.T, h http.Header, key, want string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
if got := h.Get(key); got != want {
|
||||||
|
t.Errorf("%s: %q, want %q", key, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkAPIHeader checks common entries set for API endpoints.
|
||||||
|
func checkAPIHeader(t *testing.T, h http.Header) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
checkHeader(t, h, "Content-Type", "application/json; charset=utf-8")
|
||||||
|
checkHeader(t, h, "Cache-Control", "no-cache, no-store, must-revalidate")
|
||||||
|
checkHeader(t, h, "Pragma", "no-cache")
|
||||||
|
checkHeader(t, h, "Expires", "0")
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkPayloadFunc checks the JSON response of an API endpoint by passing it to f.
|
||||||
|
func checkPayloadFunc[T any](
|
||||||
|
t *testing.T,
|
||||||
|
resp *http.Response,
|
||||||
|
f func(got *T) bool,
|
||||||
|
) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var got T
|
||||||
|
r := io.Reader(resp.Body)
|
||||||
|
if testing.Verbose() {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
r = io.TeeReader(r, &buf)
|
||||||
|
defer func() { t.Helper(); t.Log(buf.String()) }()
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(r).Decode(&got); err != nil {
|
||||||
|
t.Fatalf("Decode: error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !f(&got) {
|
||||||
|
t.Errorf("Body: %#v", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkPayload checks the JSON response of an API endpoint.
|
||||||
|
func checkPayload[T any](t *testing.T, resp *http.Response, want T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
checkPayloadFunc(t, resp, func(got *T) bool {
|
||||||
|
return reflect.DeepEqual(got, &want)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkError(t *testing.T, resp *http.Response, error string, code int) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
checkStatus(t, resp, code)
|
||||||
|
if got, _ := io.ReadAll(resp.Body); string(got) != fmt.Sprintln(error) {
|
||||||
|
t.Errorf("Body: %q, want %q", string(got), error)
|
||||||
|
}
|
||||||
|
}
|
||||||
81
cmd/mbf/internal/pkgserver/search.go
Normal file
81
cmd/mbf/internal/pkgserver/search.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
package pkgserver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"maps"
|
||||||
|
"regexp"
|
||||||
|
"slices"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type searchCache map[string]searchCacheEntry
|
||||||
|
type searchResult struct {
|
||||||
|
NameIndices [][]int `json:"name_matches"`
|
||||||
|
DescIndices [][]int `json:"desc_matches,omitempty"`
|
||||||
|
Score float64 `json:"score"`
|
||||||
|
*metadata
|
||||||
|
}
|
||||||
|
type searchCacheEntry struct {
|
||||||
|
query string
|
||||||
|
results []searchResult
|
||||||
|
expiry time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (index *packageIndex) performSearchQuery(limit int, i int, search string, desc bool) (int, []searchResult, error) {
|
||||||
|
query := search
|
||||||
|
if desc {
|
||||||
|
query += ";withDesc"
|
||||||
|
}
|
||||||
|
entry, ok := index.search[query]
|
||||||
|
if ok && len(entry.results) > 0 {
|
||||||
|
return len(entry.results), entry.results[min(i, len(entry.results)-1):min(i+limit, len(entry.results))], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
regex, err := regexp.Compile(search)
|
||||||
|
if err != nil {
|
||||||
|
return 0, make([]searchResult, 0), err
|
||||||
|
}
|
||||||
|
res := make([]searchResult, 0)
|
||||||
|
for p := range maps.Values(index.names) {
|
||||||
|
nameIndices := regex.FindAllIndex([]byte(p.Name), -1)
|
||||||
|
var descIndices [][]int = nil
|
||||||
|
if desc {
|
||||||
|
descIndices = regex.FindAllIndex([]byte(p.Description), -1)
|
||||||
|
}
|
||||||
|
if nameIndices == nil && descIndices == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
score := float64(indexsum(nameIndices)) / (float64(len(nameIndices)) + 1)
|
||||||
|
if desc {
|
||||||
|
score += float64(indexsum(descIndices)) / (float64(len(descIndices)) + 1) / 10.0
|
||||||
|
}
|
||||||
|
res = append(res, searchResult{
|
||||||
|
NameIndices: nameIndices,
|
||||||
|
DescIndices: descIndices,
|
||||||
|
Score: score,
|
||||||
|
metadata: p,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
slices.SortFunc(res[:], func(a, b searchResult) int { return -cmp.Compare(a.Score, b.Score) })
|
||||||
|
expiry := time.Now().Add(1 * time.Minute)
|
||||||
|
entry = searchCacheEntry{
|
||||||
|
query: search,
|
||||||
|
results: res,
|
||||||
|
expiry: expiry,
|
||||||
|
}
|
||||||
|
index.search[query] = entry
|
||||||
|
|
||||||
|
return len(res), res[i:min(i+limit, len(entry.results))], nil
|
||||||
|
}
|
||||||
|
func (s *searchCache) clean() {
|
||||||
|
maps.DeleteFunc(*s, func(_ string, v searchCacheEntry) bool {
|
||||||
|
return v.expiry.Before(time.Now())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
func indexsum(in [][]int) int {
|
||||||
|
sum := 0
|
||||||
|
for i := 0; i < len(in); i++ {
|
||||||
|
sum += in[i][1] - in[i][0]
|
||||||
|
}
|
||||||
|
return sum
|
||||||
|
}
|
||||||
57
cmd/mbf/internal/pkgserver/ui/index.html
Normal file
57
cmd/mbf/internal/pkgserver/ui/index.html
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<link rel="stylesheet" href="style.css">
|
||||||
|
<title>Hakurei PkgServer</title>
|
||||||
|
<script src="index.js"></script>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h1>Hakurei PkgServer</h1>
|
||||||
|
<div class="top-controls" id="top-controls-regular">
|
||||||
|
<p>Showing entries <span id="entry-counter"></span>.</p>
|
||||||
|
<span id="search-bar">
|
||||||
|
<label for="search">Search: </label>
|
||||||
|
<input type="text" name="search" id="search"/>
|
||||||
|
<button onclick="doSearch()">Find</button>
|
||||||
|
<label for="include-desc">Include descriptions: </label>
|
||||||
|
<input type="checkbox" name="include-desc" id="include-desc" checked/>
|
||||||
|
</span>
|
||||||
|
<div><label for="count">Entries per page: </label><select name="count" id="count">
|
||||||
|
<option value="10">10</option>
|
||||||
|
<option value="20">20</option>
|
||||||
|
<option value="30">30</option>
|
||||||
|
<option value="50">50</option>
|
||||||
|
</select></div>
|
||||||
|
<div><label for="sort">Sort by: </label><select name="sort" id="sort">
|
||||||
|
<option value="0">Definition (ascending)</option>
|
||||||
|
<option value="1">Definition (descending)</option>
|
||||||
|
<option value="2">Name (ascending)</option>
|
||||||
|
<option value="3">Name (descending)</option>
|
||||||
|
<option value="4">Size (ascending)</option>
|
||||||
|
<option value="5">Size (descending)</option>
|
||||||
|
</select></div>
|
||||||
|
</div>
|
||||||
|
<div class="top-controls" id="search-top-controls" hidden>
|
||||||
|
<p>Showing search results <span id="search-entry-counter"></span> for query "<span id="search-query"></span>".</p>
|
||||||
|
<button onclick="exitSearch()">Back</button>
|
||||||
|
<div><label for="search-count">Entries per page: </label><select name="search-count" id="search-count">
|
||||||
|
<option value="10">10</option>
|
||||||
|
<option value="20">20</option>
|
||||||
|
<option value="30">30</option>
|
||||||
|
<option value="50">50</option>
|
||||||
|
</select></div>
|
||||||
|
<p>Sorted by best match</p>
|
||||||
|
</div>
|
||||||
|
<div class="page-controls"><a href="javascript:prevPage()">« Previous</a> <input type="text" class="page-number" value="1"/> <a href="javascript:nextPage()">Next »</a></div>
|
||||||
|
<table id="pkg-list">
|
||||||
|
<tr><td>Loading...</td></tr>
|
||||||
|
</table>
|
||||||
|
<div class="page-controls"><a href="javascript:prevPage()">« Previous</a> <input type="text" class="page-number" value="1"/> <a href="javascript:nextPage()">Next »</a></div>
|
||||||
|
<footer>
|
||||||
|
<p>©<a href="https://hakurei.app/">Hakurei</a> (<span id="hakurei-version">unknown</span>). Licensed under the MIT license.</p>
|
||||||
|
</footer>
|
||||||
|
<script>main();</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
331
cmd/mbf/internal/pkgserver/ui/index.ts
Normal file
331
cmd/mbf/internal/pkgserver/ui/index.ts
Normal file
@@ -0,0 +1,331 @@
|
|||||||
|
interface PackageIndexEntry {
|
||||||
|
name: string
|
||||||
|
size?: number
|
||||||
|
description?: string
|
||||||
|
website?: string
|
||||||
|
version?: string
|
||||||
|
report?: boolean
|
||||||
|
}
|
||||||
|
|
||||||
|
function entryToHTML(entry: PackageIndexEntry | SearchResult): HTMLTableRowElement {
|
||||||
|
let v = entry.version != null ? `<span>${escapeHtml(entry.version)}</span>` : ""
|
||||||
|
let s = entry.size != null && entry.size > 0 ? `<p>Size: ${toByteSizeString(entry.size)} (${entry.size})</p>` : ""
|
||||||
|
let n: string
|
||||||
|
let d: string
|
||||||
|
if ('name_matches' in entry) {
|
||||||
|
n = `<h2>${nameMatches(entry as SearchResult)} ${v}</h2>`
|
||||||
|
} else {
|
||||||
|
n = `<h2>${escapeHtml(entry.name)} ${v}</h2>`
|
||||||
|
}
|
||||||
|
if ('desc_matches' in entry && STATE.getIncludeDescriptions()) {
|
||||||
|
d = descMatches(entry as SearchResult)
|
||||||
|
} else {
|
||||||
|
d = (entry as PackageIndexEntry).description != null ? `<p>${escapeHtml((entry as PackageIndexEntry).description)}</p>` : ""
|
||||||
|
}
|
||||||
|
let w = entry.website != null ? `<a href="${encodeURI(entry.website)}">Website</a>` : ""
|
||||||
|
let r = entry.report ? `Log (<a href=\"${encodeURI('/api/v1/status/' + entry.name)}\">View</a> | <a href=\"${encodeURI('/status/' + entry.name)}\">Download</a>)` : ""
|
||||||
|
let row = <HTMLTableRowElement>(document.createElement('tr'))
|
||||||
|
row.innerHTML = `<td>
|
||||||
|
${n}
|
||||||
|
${d}
|
||||||
|
${s}
|
||||||
|
${w}
|
||||||
|
${r}
|
||||||
|
</td>`
|
||||||
|
return row
|
||||||
|
}
|
||||||
|
|
||||||
|
function nameMatches(sr: SearchResult): string {
|
||||||
|
return markMatches(sr.name, sr.name_matches)
|
||||||
|
}
|
||||||
|
|
||||||
|
function descMatches(sr: SearchResult): string {
|
||||||
|
return markMatches(sr.description!, sr.desc_matches)
|
||||||
|
}
|
||||||
|
|
||||||
|
function markMatches(str: string, indices: [number, number][]): string {
|
||||||
|
if (indices == null) {
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
let out: string = ""
|
||||||
|
let j = 0
|
||||||
|
for (let i = 0; i < str.length; i++) {
|
||||||
|
if (j < indices.length) {
|
||||||
|
if (i === indices[j][0]) {
|
||||||
|
out += `<mark>${escapeHtmlChar(str[i])}`
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if (i === indices[j][1]) {
|
||||||
|
out += `</mark>${escapeHtmlChar(str[i])}`
|
||||||
|
j++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out += escapeHtmlChar(str[i])
|
||||||
|
}
|
||||||
|
if (indices[j] !== undefined) {
|
||||||
|
out += "</mark>"
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
function toByteSizeString(bytes: number): string {
|
||||||
|
if (bytes == null) return `unspecified`
|
||||||
|
if (bytes < 1024) return `${bytes}B`
|
||||||
|
if (bytes < Math.pow(1024, 2)) return `${(bytes / 1024).toFixed(2)}kiB`
|
||||||
|
if (bytes < Math.pow(1024, 3)) return `${(bytes / Math.pow(1024, 2)).toFixed(2)}MiB`
|
||||||
|
if (bytes < Math.pow(1024, 4)) return `${(bytes / Math.pow(1024, 3)).toFixed(2)}GiB`
|
||||||
|
if (bytes < Math.pow(1024, 5)) return `${(bytes / Math.pow(1024, 4)).toFixed(2)}TiB`
|
||||||
|
return "not only is it big, it's large"
|
||||||
|
}
|
||||||
|
|
||||||
|
const API_VERSION = 1
|
||||||
|
const ENDPOINT = `/api/v${API_VERSION}`
|
||||||
|
|
||||||
|
interface InfoPayload {
|
||||||
|
count?: number
|
||||||
|
hakurei_version?: string
|
||||||
|
}
|
||||||
|
|
||||||
|
async function infoRequest(): Promise<InfoPayload> {
|
||||||
|
const res = await fetch(`${ENDPOINT}/info`)
|
||||||
|
const payload = await res.json()
|
||||||
|
return payload as InfoPayload
|
||||||
|
}
|
||||||
|
|
||||||
|
interface GetPayload {
|
||||||
|
values?: PackageIndexEntry[]
|
||||||
|
}
|
||||||
|
|
||||||
|
enum SortOrders {
|
||||||
|
DeclarationAscending,
|
||||||
|
DeclarationDescending,
|
||||||
|
NameAscending,
|
||||||
|
NameDescending
|
||||||
|
}
|
||||||
|
|
||||||
|
async function getRequest(limit: number, index: number, sort: SortOrders): Promise<GetPayload> {
|
||||||
|
const res = await fetch(`${ENDPOINT}/get?limit=${limit}&index=${index}&sort=${sort.valueOf()}`)
|
||||||
|
const payload = await res.json()
|
||||||
|
return payload as GetPayload
|
||||||
|
}
|
||||||
|
|
||||||
|
interface SearchResult extends PackageIndexEntry {
|
||||||
|
name_matches: [number, number][]
|
||||||
|
desc_matches: [number, number][]
|
||||||
|
score: number
|
||||||
|
}
|
||||||
|
|
||||||
|
interface SearchPayload {
|
||||||
|
count?: number
|
||||||
|
values?: SearchResult[]
|
||||||
|
}
|
||||||
|
|
||||||
|
async function searchRequest(limit: number, index: number, search: string, desc: boolean): Promise<SearchPayload> {
|
||||||
|
const res = await fetch(`${ENDPOINT}/search?limit=${limit}&index=${index}&search=${encodeURIComponent(search)}&desc=${desc}`)
|
||||||
|
if (!res.ok) {
|
||||||
|
exitSearch()
|
||||||
|
alert("invalid search query!")
|
||||||
|
return Promise.reject(res.statusText)
|
||||||
|
}
|
||||||
|
const payload = await res.json()
|
||||||
|
return payload as SearchPayload
|
||||||
|
}
|
||||||
|
|
||||||
|
class State {
|
||||||
|
entriesPerPage: number = 10
|
||||||
|
entryIndex: number = 0
|
||||||
|
maxTotal: number = 0
|
||||||
|
maxEntries: number = 0
|
||||||
|
sort: SortOrders = SortOrders.DeclarationAscending
|
||||||
|
search: boolean = false
|
||||||
|
|
||||||
|
getEntriesPerPage(): number {
|
||||||
|
return this.entriesPerPage
|
||||||
|
}
|
||||||
|
|
||||||
|
setEntriesPerPage(entriesPerPage: number) {
|
||||||
|
this.entriesPerPage = entriesPerPage
|
||||||
|
this.setEntryIndex(Math.floor(this.getEntryIndex() / entriesPerPage) * entriesPerPage)
|
||||||
|
}
|
||||||
|
|
||||||
|
getEntryIndex(): number {
|
||||||
|
return this.entryIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
setEntryIndex(entryIndex: number) {
|
||||||
|
this.entryIndex = entryIndex
|
||||||
|
this.updatePage()
|
||||||
|
this.updateRange()
|
||||||
|
this.updateListings()
|
||||||
|
}
|
||||||
|
|
||||||
|
getMaxTotal(): number {
|
||||||
|
return this.maxTotal
|
||||||
|
}
|
||||||
|
|
||||||
|
setMaxTotal(max: number) {
|
||||||
|
this.maxTotal = max
|
||||||
|
}
|
||||||
|
|
||||||
|
getSortOrder(): SortOrders {
|
||||||
|
return this.sort
|
||||||
|
}
|
||||||
|
|
||||||
|
setSortOrder(sortOrder: SortOrders) {
|
||||||
|
this.sort = sortOrder
|
||||||
|
this.setEntryIndex(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
updatePage() {
|
||||||
|
let page = Math.ceil(((this.getEntryIndex() + this.getEntriesPerPage()) - 1) / this.getEntriesPerPage())
|
||||||
|
for (let e of document.getElementsByClassName("page-number")) {
|
||||||
|
(e as HTMLInputElement).value = String(page)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
updateRange() {
|
||||||
|
let max = Math.min(this.getEntryIndex() + this.getEntriesPerPage(), this.getMaxTotal())
|
||||||
|
document.getElementById("entry-counter")!.textContent = `${this.getEntryIndex() + 1}-${max} of ${this.getMaxTotal()}`
|
||||||
|
if (this.search) {
|
||||||
|
document.getElementById("search-entry-counter")!.textContent = `${this.getEntryIndex() + 1}-${max} of ${this.maxTotal}/${this.maxEntries}`
|
||||||
|
document.getElementById("search-query")!.innerHTML = `<code>${escapeHtml(this.getSearchQuery())}</code>`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
getSearchQuery(): string {
|
||||||
|
let queryString = document.getElementById("search")!;
|
||||||
|
return (queryString as HTMLInputElement).value
|
||||||
|
}
|
||||||
|
|
||||||
|
getIncludeDescriptions(): boolean {
|
||||||
|
let includeDesc = document.getElementById("include-desc")!;
|
||||||
|
return (includeDesc as HTMLInputElement).checked
|
||||||
|
}
|
||||||
|
|
||||||
|
updateListings() {
|
||||||
|
if (this.search) {
|
||||||
|
searchRequest(this.getEntriesPerPage(), this.getEntryIndex(), this.getSearchQuery(), this.getIncludeDescriptions())
|
||||||
|
.then(res => {
|
||||||
|
let table = document.getElementById("pkg-list")!
|
||||||
|
table.innerHTML = ''
|
||||||
|
for (let row of res.values!) {
|
||||||
|
table.appendChild(entryToHTML(row))
|
||||||
|
}
|
||||||
|
STATE.maxTotal = res.count!
|
||||||
|
STATE.updateRange()
|
||||||
|
if(res.count! < 1) {
|
||||||
|
exitSearch()
|
||||||
|
alert("no results found!")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
getRequest(this.getEntriesPerPage(), this.getEntryIndex(), this.getSortOrder())
|
||||||
|
.then(res => {
|
||||||
|
let table = document.getElementById("pkg-list")!
|
||||||
|
table.innerHTML = ''
|
||||||
|
for (let row of res.values!) {
|
||||||
|
table.appendChild(entryToHTML(row))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let STATE: State
|
||||||
|
|
||||||
|
|
||||||
|
function lastPageIndex(): number {
|
||||||
|
return Math.floor(STATE.getMaxTotal() / STATE.getEntriesPerPage()) * STATE.getEntriesPerPage()
|
||||||
|
}
|
||||||
|
|
||||||
|
function setPage(page: number) {
|
||||||
|
STATE.setEntryIndex(Math.max(0, Math.min(STATE.getEntriesPerPage() * (page - 1), lastPageIndex())))
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function escapeHtml(str?: string): string {
|
||||||
|
let out: string = ''
|
||||||
|
if (str == undefined) return ""
|
||||||
|
for (let i = 0; i < str.length; i++) {
|
||||||
|
out += escapeHtmlChar(str[i])
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
function escapeHtmlChar(char: string): string {
|
||||||
|
if (char.length != 1) return char
|
||||||
|
switch (char[0]) {
|
||||||
|
case '&':
|
||||||
|
return "&"
|
||||||
|
case '<':
|
||||||
|
return "<"
|
||||||
|
case '>':
|
||||||
|
return ">"
|
||||||
|
case '"':
|
||||||
|
return """
|
||||||
|
case "'":
|
||||||
|
return "'"
|
||||||
|
default:
|
||||||
|
return char
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function firstPage() {
|
||||||
|
STATE.setEntryIndex(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
function prevPage() {
|
||||||
|
let index = STATE.getEntryIndex()
|
||||||
|
STATE.setEntryIndex(Math.max(0, index - STATE.getEntriesPerPage()))
|
||||||
|
}
|
||||||
|
|
||||||
|
function lastPage() {
|
||||||
|
STATE.setEntryIndex(lastPageIndex())
|
||||||
|
}
|
||||||
|
|
||||||
|
function nextPage() {
|
||||||
|
let index = STATE.getEntryIndex()
|
||||||
|
STATE.setEntryIndex(Math.min(lastPageIndex(), index + STATE.getEntriesPerPage()))
|
||||||
|
}
|
||||||
|
|
||||||
|
function doSearch() {
|
||||||
|
document.getElementById("top-controls-regular")!.toggleAttribute("hidden");
|
||||||
|
document.getElementById("search-top-controls")!.toggleAttribute("hidden");
|
||||||
|
STATE.search = true;
|
||||||
|
STATE.setEntryIndex(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
function exitSearch() {
|
||||||
|
document.getElementById("top-controls-regular")!.toggleAttribute("hidden");
|
||||||
|
document.getElementById("search-top-controls")!.toggleAttribute("hidden");
|
||||||
|
STATE.search = false;
|
||||||
|
STATE.setMaxTotal(STATE.maxEntries)
|
||||||
|
STATE.setEntryIndex(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
function main() {
|
||||||
|
STATE = new State()
|
||||||
|
infoRequest()
|
||||||
|
.then(res => {
|
||||||
|
STATE.maxEntries = res.count!
|
||||||
|
STATE.setMaxTotal(STATE.maxEntries)
|
||||||
|
document.getElementById("hakurei-version")!.textContent = res.hakurei_version!
|
||||||
|
STATE.updateRange()
|
||||||
|
STATE.updateListings()
|
||||||
|
})
|
||||||
|
for (let e of document.getElementsByClassName("page-number")) {
|
||||||
|
e.addEventListener("change", (_) => {
|
||||||
|
setPage(parseInt((e as HTMLInputElement).value))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
document.getElementById("count")?.addEventListener("change", (event) => {
|
||||||
|
STATE.setEntriesPerPage(parseInt((event.target as HTMLSelectElement).value))
|
||||||
|
})
|
||||||
|
document.getElementById("sort")?.addEventListener("change", (event) => {
|
||||||
|
STATE.setSortOrder(parseInt((event.target as HTMLSelectElement).value))
|
||||||
|
})
|
||||||
|
document.getElementById("search")?.addEventListener("keyup", (event) => {
|
||||||
|
if (event.key === 'Enter') doSearch()
|
||||||
|
})
|
||||||
|
}
|
||||||
21
cmd/mbf/internal/pkgserver/ui/style.css
Normal file
21
cmd/mbf/internal/pkgserver/ui/style.css
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
.page-number {
|
||||||
|
width: 2em;
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
.page-number {
|
||||||
|
width: 2em;
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
html {
|
||||||
|
background-color: #2c2c2c;
|
||||||
|
color: ghostwhite;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@media (prefers-color-scheme: light) {
|
||||||
|
html {
|
||||||
|
background-color: #d3d3d3;
|
||||||
|
color: black;
|
||||||
|
}
|
||||||
|
}
|
||||||
8
cmd/mbf/internal/pkgserver/ui/tsconfig.json
Normal file
8
cmd/mbf/internal/pkgserver/ui/tsconfig.json
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"compilerOptions": {
|
||||||
|
"target": "ES2024",
|
||||||
|
"strict": true,
|
||||||
|
"alwaysStrict": true,
|
||||||
|
"outDir": "static"
|
||||||
|
}
|
||||||
|
}
|
||||||
9
cmd/mbf/internal/pkgserver/ui/ui.go
Normal file
9
cmd/mbf/internal/pkgserver/ui/ui.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
// Package ui holds the static web UI.
|
||||||
|
package ui
|
||||||
|
|
||||||
|
import "net/http"
|
||||||
|
|
||||||
|
// Register arranges for mux to serve the embedded frontend.
|
||||||
|
func Register(mux *http.ServeMux) {
|
||||||
|
mux.Handle("GET /", http.FileServer(http.FS(static)))
|
||||||
|
}
|
||||||
21
cmd/mbf/internal/pkgserver/ui/ui_full.go
Normal file
21
cmd/mbf/internal/pkgserver/ui/ui_full.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
//go:build frontend
|
||||||
|
|
||||||
|
package ui
|
||||||
|
|
||||||
|
import (
|
||||||
|
"embed"
|
||||||
|
"io/fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:generate tsc
|
||||||
|
//go:generate cp index.html style.css static
|
||||||
|
//go:embed static
|
||||||
|
var _static embed.FS
|
||||||
|
|
||||||
|
var static = func() fs.FS {
|
||||||
|
if f, err := fs.Sub(_static, "static"); err != nil {
|
||||||
|
panic(err)
|
||||||
|
} else {
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
}()
|
||||||
7
cmd/mbf/internal/pkgserver/ui/ui_stub.go
Normal file
7
cmd/mbf/internal/pkgserver/ui/ui_stub.go
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
//go:build !frontend
|
||||||
|
|
||||||
|
package ui
|
||||||
|
|
||||||
|
import "testing/fstest"
|
||||||
|
|
||||||
|
var static fstest.MapFS
|
||||||
525
cmd/mbf/main.go
525
cmd/mbf/main.go
@@ -14,16 +14,18 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/sha512"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"syscall"
|
"syscall"
|
||||||
@@ -40,6 +42,9 @@ import (
|
|||||||
"hakurei.app/internal/pkg"
|
"hakurei.app/internal/pkg"
|
||||||
"hakurei.app/internal/rosa"
|
"hakurei.app/internal/rosa"
|
||||||
"hakurei.app/message"
|
"hakurei.app/message"
|
||||||
|
|
||||||
|
"hakurei.app/cmd/mbf/internal/pkgserver"
|
||||||
|
"hakurei.app/cmd/mbf/internal/pkgserver/ui"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@@ -53,77 +58,106 @@ func main() {
|
|||||||
log.Fatal("this program must not run as root")
|
log.Fatal("this program must not run as root")
|
||||||
}
|
}
|
||||||
|
|
||||||
var cache *pkg.Cache
|
|
||||||
ctx, stop := signal.NotifyContext(context.Background(),
|
ctx, stop := signal.NotifyContext(context.Background(),
|
||||||
syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
|
syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
|
||||||
defer stop()
|
defer stop()
|
||||||
defer func() {
|
|
||||||
if cache != nil {
|
|
||||||
cache.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
if r := recover(); r != nil {
|
var cm cache
|
||||||
fmt.Println(r)
|
defer func() { cm.Close() }()
|
||||||
log.Fatal("consider scrubbing the on-disk cache")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
flagQuiet bool
|
flagQuiet bool
|
||||||
flagCures int
|
flagCheck bool
|
||||||
flagBase string
|
flagLTO bool
|
||||||
flagIdle bool
|
|
||||||
|
|
||||||
flagHostAbstract bool
|
addr net.UnixAddr
|
||||||
)
|
)
|
||||||
c := command.New(os.Stderr, log.Printf, "mbf", func([]string) (err error) {
|
c := command.New(os.Stderr, log.Printf, "mbf", func([]string) error {
|
||||||
msg.SwapVerbose(!flagQuiet)
|
msg.SwapVerbose(!flagQuiet)
|
||||||
|
cm.ctx, cm.msg = ctx, msg
|
||||||
flagBase = os.ExpandEnv(flagBase)
|
cm.base = os.ExpandEnv(cm.base)
|
||||||
if flagBase == "" {
|
if cm.base == "" {
|
||||||
flagBase = "cache"
|
cm.base = "cache"
|
||||||
}
|
}
|
||||||
|
|
||||||
var base *check.Absolute
|
addr.Net = "unix"
|
||||||
if flagBase, err = filepath.Abs(flagBase); err != nil {
|
addr.Name = os.ExpandEnv(addr.Name)
|
||||||
return
|
if addr.Name == "" {
|
||||||
} else if base, err = check.NewAbs(flagBase); err != nil {
|
addr.Name = filepath.Join(cm.base, "daemon")
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var flags int
|
var flags int
|
||||||
if flagIdle {
|
if !flagCheck {
|
||||||
flags |= pkg.CSchedIdle
|
flags |= rosa.OptSkipCheck
|
||||||
}
|
}
|
||||||
if flagHostAbstract {
|
if !flagLTO {
|
||||||
flags |= pkg.CHostAbstract
|
flags |= rosa.OptLLVMNoLTO
|
||||||
}
|
}
|
||||||
cache, err = pkg.Open(ctx, msg, flags, flagCures, base)
|
rosa.DropCaches(flags)
|
||||||
|
|
||||||
return
|
return nil
|
||||||
}).Flag(
|
}).Flag(
|
||||||
&flagQuiet,
|
&flagQuiet,
|
||||||
"q", command.BoolFlag(false),
|
"q", command.BoolFlag(false),
|
||||||
"Do not print cure messages",
|
"Do not print cure messages",
|
||||||
).Flag(
|
).Flag(
|
||||||
&flagCures,
|
&flagLTO,
|
||||||
|
"lto", command.BoolFlag(false),
|
||||||
|
"Enable LTO in stage2 and stage3 LLVM toolchains",
|
||||||
|
).Flag(
|
||||||
|
&flagCheck,
|
||||||
|
"check", command.BoolFlag(true),
|
||||||
|
"Run test suites",
|
||||||
|
).Flag(
|
||||||
|
&cm.cures,
|
||||||
"cures", command.IntFlag(0),
|
"cures", command.IntFlag(0),
|
||||||
"Maximum number of dependencies to cure at any given time",
|
"Maximum number of dependencies to cure at any given time",
|
||||||
).Flag(
|
).Flag(
|
||||||
&flagBase,
|
&cm.jobs,
|
||||||
|
"jobs", command.IntFlag(0),
|
||||||
|
"Preferred number of jobs to run, when applicable",
|
||||||
|
).Flag(
|
||||||
|
&cm.base,
|
||||||
"d", command.StringFlag("$MBF_CACHE_DIR"),
|
"d", command.StringFlag("$MBF_CACHE_DIR"),
|
||||||
"Directory to store cured artifacts",
|
"Directory to store cured artifacts",
|
||||||
).Flag(
|
).Flag(
|
||||||
&flagIdle,
|
&cm.idle,
|
||||||
"sched-idle", command.BoolFlag(false),
|
"sched-idle", command.BoolFlag(false),
|
||||||
"Set SCHED_IDLE scheduling policy",
|
"Set SCHED_IDLE scheduling policy",
|
||||||
).Flag(
|
).Flag(
|
||||||
&flagHostAbstract,
|
&cm.hostAbstract,
|
||||||
"host-abstract", command.BoolFlag(
|
"host-abstract", command.BoolFlag(
|
||||||
os.Getenv("MBF_HOST_ABSTRACT") != "",
|
os.Getenv("MBF_HOST_ABSTRACT") != "",
|
||||||
),
|
),
|
||||||
"Do not restrict networked cure containers from connecting to host "+
|
"Do not restrict networked cure containers from connecting to host "+
|
||||||
"abstract UNIX sockets",
|
"abstract UNIX sockets",
|
||||||
|
).Flag(
|
||||||
|
&addr.Name,
|
||||||
|
"socket", command.StringFlag("$MBF_DAEMON_SOCKET"),
|
||||||
|
"Pathname of socket to bind to",
|
||||||
|
)
|
||||||
|
|
||||||
|
c.NewCommand(
|
||||||
|
"checksum", "Compute checksum of data read from standard input",
|
||||||
|
func([]string) error {
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer close(done)
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
os.Exit(1)
|
||||||
|
case <-done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
h := sha512.New384()
|
||||||
|
if _, err := io.Copy(h, os.Stdin); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Println(pkg.Encode(pkg.Checksum(h.Sum(nil))))
|
||||||
|
return nil
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -137,7 +171,9 @@ func main() {
|
|||||||
if flagShifts < 0 || flagShifts > 31 {
|
if flagShifts < 0 || flagShifts > 31 {
|
||||||
flagShifts = 12
|
flagShifts = 12
|
||||||
}
|
}
|
||||||
return cache.Scrub(runtime.NumCPU() << flagShifts)
|
return cm.Do(func(cache *pkg.Cache) error {
|
||||||
|
return cache.Scrub(runtime.NumCPU() << flagShifts)
|
||||||
|
})
|
||||||
},
|
},
|
||||||
).Flag(
|
).Flag(
|
||||||
&flagShifts,
|
&flagShifts,
|
||||||
@@ -148,6 +184,7 @@ func main() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
var (
|
var (
|
||||||
|
flagBind string
|
||||||
flagStatus bool
|
flagStatus bool
|
||||||
flagReport string
|
flagReport string
|
||||||
)
|
)
|
||||||
@@ -155,9 +192,7 @@ func main() {
|
|||||||
"info",
|
"info",
|
||||||
"Display out-of-band metadata of an artifact",
|
"Display out-of-band metadata of an artifact",
|
||||||
func(args []string) (err error) {
|
func(args []string) (err error) {
|
||||||
if len(args) == 0 {
|
const shutdownTimeout = 15 * time.Second
|
||||||
return errors.New("info requires at least 1 argument")
|
|
||||||
}
|
|
||||||
|
|
||||||
var r *rosa.Report
|
var r *rosa.Report
|
||||||
if flagReport != "" {
|
if flagReport != "" {
|
||||||
@@ -172,88 +207,46 @@ func main() {
|
|||||||
defer r.HandleAccess(&err)()
|
defer r.HandleAccess(&err)()
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, name := range args {
|
if flagBind == "" {
|
||||||
if p, ok := rosa.ResolveName(name); !ok {
|
return commandInfo(&cm, args, os.Stdout, flagStatus, r)
|
||||||
return fmt.Errorf("unknown artifact %q", name)
|
|
||||||
} else {
|
|
||||||
var suffix string
|
|
||||||
if version := rosa.Std.Version(p); version != rosa.Unversioned {
|
|
||||||
suffix += "-" + version
|
|
||||||
}
|
|
||||||
fmt.Println("name : " + name + suffix)
|
|
||||||
|
|
||||||
meta := rosa.GetMetadata(p)
|
|
||||||
fmt.Println("description : " + meta.Description)
|
|
||||||
if meta.Website != "" {
|
|
||||||
fmt.Println("website : " +
|
|
||||||
strings.TrimSuffix(meta.Website, "/"))
|
|
||||||
}
|
|
||||||
if len(meta.Dependencies) > 0 {
|
|
||||||
fmt.Print("depends on :")
|
|
||||||
for _, d := range meta.Dependencies {
|
|
||||||
s := rosa.GetMetadata(d).Name
|
|
||||||
if version := rosa.Std.Version(d); version != rosa.Unversioned {
|
|
||||||
s += "-" + version
|
|
||||||
}
|
|
||||||
fmt.Print(" " + s)
|
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
}
|
|
||||||
|
|
||||||
const statusPrefix = "status : "
|
|
||||||
if flagStatus {
|
|
||||||
if r == nil {
|
|
||||||
var f io.ReadSeekCloser
|
|
||||||
f, err = cache.OpenStatus(rosa.Std.Load(p))
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
fmt.Println(
|
|
||||||
statusPrefix + "not yet cured",
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Print(statusPrefix)
|
|
||||||
_, err = io.Copy(os.Stdout, f)
|
|
||||||
if err = errors.Join(err, f.Close()); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
status, n := r.ArtifactOf(cache.Ident(rosa.Std.Load(p)))
|
|
||||||
if status == nil {
|
|
||||||
fmt.Println(
|
|
||||||
statusPrefix + "not in report",
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
fmt.Println("size :", n)
|
|
||||||
fmt.Print(statusPrefix)
|
|
||||||
if _, err = os.Stdout.Write(status); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if i != len(args)-1 {
|
|
||||||
fmt.Println()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
var mux http.ServeMux
|
||||||
|
ui.Register(&mux)
|
||||||
|
if err = pkgserver.Register(ctx, &mux, r); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
server := http.Server{Addr: flagBind, Handler: &mux}
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
cc, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
|
||||||
|
defer cancel()
|
||||||
|
if _err := server.Shutdown(cc); _err != nil {
|
||||||
|
log.Fatal(_err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
msg.Verbosef("listening on %q", flagBind)
|
||||||
|
err = server.ListenAndServe()
|
||||||
|
if errors.Is(err, http.ErrServerClosed) {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
},
|
},
|
||||||
).
|
).Flag(
|
||||||
Flag(
|
&flagBind,
|
||||||
&flagStatus,
|
"bind", command.StringFlag(""),
|
||||||
"status", command.BoolFlag(false),
|
"TCP address for the server to listen on",
|
||||||
"Display cure status if available",
|
).Flag(
|
||||||
).
|
&flagStatus,
|
||||||
Flag(
|
"status", command.BoolFlag(false),
|
||||||
&flagReport,
|
"Display cure status if available",
|
||||||
"report", command.StringFlag(""),
|
).Flag(
|
||||||
"Load cure status from this report file instead of cache",
|
&flagReport,
|
||||||
)
|
"report", command.StringFlag(""),
|
||||||
|
"Load cure status from this report file instead of cache",
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.NewCommand(
|
c.NewCommand(
|
||||||
@@ -287,7 +280,9 @@ func main() {
|
|||||||
if ext.Isatty(int(w.Fd())) {
|
if ext.Isatty(int(w.Fd())) {
|
||||||
return errors.New("output appears to be a terminal")
|
return errors.New("output appears to be a terminal")
|
||||||
}
|
}
|
||||||
return rosa.WriteReport(msg, w, cache)
|
return cm.Do(func(cache *pkg.Cache) error {
|
||||||
|
return rosa.WriteReport(msg, w, cache)
|
||||||
|
})
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -350,14 +345,26 @@ func main() {
|
|||||||
" package(s) are out of date"))
|
" package(s) are out of date"))
|
||||||
}
|
}
|
||||||
return errors.Join(errs...)
|
return errors.Join(errs...)
|
||||||
}).
|
}).Flag(
|
||||||
Flag(
|
&flagJobs,
|
||||||
&flagJobs,
|
"j", command.IntFlag(32),
|
||||||
"j", command.IntFlag(32),
|
"Maximum number of simultaneous connections",
|
||||||
"Maximum number of simultaneous connections",
|
)
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.NewCommand(
|
||||||
|
"daemon",
|
||||||
|
"Service artifact IR with Rosa OS extensions",
|
||||||
|
func(args []string) error {
|
||||||
|
ul, err := net.ListenUnix("unix", &addr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Printf("listening on pathname socket at %s", addr.Name)
|
||||||
|
return serve(ctx, log.Default(), &cm, ul)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
{
|
{
|
||||||
var (
|
var (
|
||||||
flagGentoo string
|
flagGentoo string
|
||||||
@@ -382,25 +389,37 @@ func main() {
|
|||||||
rosa.SetGentooStage3(flagGentoo, checksum)
|
rosa.SetGentooStage3(flagGentoo, checksum)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, _, stage1 := (t - 2).NewLLVM()
|
|
||||||
_, _, _, stage2 := (t - 1).NewLLVM()
|
|
||||||
_, _, _, stage3 := t.NewLLVM()
|
|
||||||
var (
|
var (
|
||||||
pathname *check.Absolute
|
pathname *check.Absolute
|
||||||
checksum [2]unique.Handle[pkg.Checksum]
|
checksum [2]unique.Handle[pkg.Checksum]
|
||||||
)
|
)
|
||||||
|
|
||||||
if pathname, _, err = cache.Cure(stage1); err != nil {
|
if err = cm.Do(func(cache *pkg.Cache) (err error) {
|
||||||
return err
|
pathname, _, err = cache.Cure(
|
||||||
|
(t - 2).Load(rosa.LLVM),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}); err != nil {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
log.Println("stage1:", pathname)
|
log.Println("stage1:", pathname)
|
||||||
|
|
||||||
if pathname, checksum[0], err = cache.Cure(stage2); err != nil {
|
if err = cm.Do(func(cache *pkg.Cache) (err error) {
|
||||||
return err
|
pathname, checksum[0], err = cache.Cure(
|
||||||
|
(t - 1).Load(rosa.LLVM),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}); err != nil {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
log.Println("stage2:", pathname)
|
log.Println("stage2:", pathname)
|
||||||
if pathname, checksum[1], err = cache.Cure(stage3); err != nil {
|
if err = cm.Do(func(cache *pkg.Cache) (err error) {
|
||||||
return err
|
pathname, checksum[1], err = cache.Cure(
|
||||||
|
t.Load(rosa.LLVM),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}); err != nil {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
log.Println("stage3:", pathname)
|
log.Println("stage3:", pathname)
|
||||||
|
|
||||||
@@ -417,39 +436,44 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if flagStage0 {
|
if flagStage0 {
|
||||||
if pathname, _, err = cache.Cure(
|
if err = cm.Do(func(cache *pkg.Cache) (err error) {
|
||||||
t.Load(rosa.Stage0),
|
pathname, _, err = cache.Cure(
|
||||||
); err != nil {
|
t.Load(rosa.Stage0),
|
||||||
return err
|
)
|
||||||
|
return
|
||||||
|
}); err != nil {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
log.Println(pathname)
|
log.Println(pathname)
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
},
|
},
|
||||||
).
|
).Flag(
|
||||||
Flag(
|
&flagGentoo,
|
||||||
&flagGentoo,
|
"gentoo", command.StringFlag(""),
|
||||||
"gentoo", command.StringFlag(""),
|
"Bootstrap from a Gentoo stage3 tarball",
|
||||||
"Bootstrap from a Gentoo stage3 tarball",
|
).Flag(
|
||||||
).
|
&flagChecksum,
|
||||||
Flag(
|
"checksum", command.StringFlag(""),
|
||||||
&flagChecksum,
|
"Checksum of Gentoo stage3 tarball",
|
||||||
"checksum", command.StringFlag(""),
|
).Flag(
|
||||||
"Checksum of Gentoo stage3 tarball",
|
&flagStage0,
|
||||||
).
|
"stage0", command.BoolFlag(false),
|
||||||
Flag(
|
"Create bootstrap stage0 tarball",
|
||||||
&flagStage0,
|
)
|
||||||
"stage0", command.BoolFlag(false),
|
|
||||||
"Create bootstrap stage0 tarball",
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
var (
|
var (
|
||||||
flagDump string
|
flagDump string
|
||||||
flagEnter bool
|
flagEnter bool
|
||||||
flagExport string
|
flagExport string
|
||||||
|
flagRemote bool
|
||||||
|
flagNoReply bool
|
||||||
|
|
||||||
|
flagBoot bool
|
||||||
|
flagStd bool
|
||||||
)
|
)
|
||||||
c.NewCommand(
|
c.NewCommand(
|
||||||
"cure",
|
"cure",
|
||||||
@@ -463,9 +487,20 @@ func main() {
|
|||||||
return fmt.Errorf("unknown artifact %q", args[0])
|
return fmt.Errorf("unknown artifact %q", args[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t := rosa.Std
|
||||||
|
if flagBoot {
|
||||||
|
t -= 2
|
||||||
|
} else if flagStd {
|
||||||
|
t -= 1
|
||||||
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
default:
|
default:
|
||||||
pathname, _, err := cache.Cure(rosa.Std.Load(p))
|
var pathname *check.Absolute
|
||||||
|
err := cm.Do(func(cache *pkg.Cache) (err error) {
|
||||||
|
pathname, _, err = cache.Cure(t.Load(p))
|
||||||
|
return
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -505,7 +540,7 @@ func main() {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = cache.EncodeAll(f, rosa.Std.Load(p)); err != nil {
|
if err = pkg.NewIR().EncodeAll(f, rosa.Std.Load(p)); err != nil {
|
||||||
_ = f.Close()
|
_ = f.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -513,33 +548,76 @@ func main() {
|
|||||||
return f.Close()
|
return f.Close()
|
||||||
|
|
||||||
case flagEnter:
|
case flagEnter:
|
||||||
return cache.EnterExec(
|
return cm.Do(func(cache *pkg.Cache) error {
|
||||||
ctx,
|
return cache.EnterExec(
|
||||||
rosa.Std.Load(p),
|
ctx,
|
||||||
true, os.Stdin, os.Stdout, os.Stderr,
|
t.Load(p),
|
||||||
rosa.AbsSystem.Append("bin", "mksh"),
|
true, os.Stdin, os.Stdout, os.Stderr,
|
||||||
"sh",
|
rosa.AbsSystem.Append("bin", "mksh"),
|
||||||
)
|
"sh",
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
case flagRemote:
|
||||||
|
var flags uint64
|
||||||
|
if flagNoReply {
|
||||||
|
flags |= remoteNoReply
|
||||||
|
}
|
||||||
|
a := t.Load(p)
|
||||||
|
pathname, err := cureRemote(ctx, &addr, a, flags)
|
||||||
|
if !flagNoReply && err == nil {
|
||||||
|
log.Println(pathname)
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
cc, cancel := context.WithDeadline(context.Background(), daemonDeadline())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
if _err := cancelRemote(cc, &addr, a, false); _err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
).
|
).Flag(
|
||||||
Flag(
|
&flagDump,
|
||||||
&flagDump,
|
"dump", command.StringFlag(""),
|
||||||
"dump", command.StringFlag(""),
|
"Write IR to specified pathname and terminate",
|
||||||
"Write IR to specified pathname and terminate",
|
).Flag(
|
||||||
).
|
&flagExport,
|
||||||
Flag(
|
"export", command.StringFlag(""),
|
||||||
&flagExport,
|
"Export cured artifact to specified pathname",
|
||||||
"export", command.StringFlag(""),
|
).Flag(
|
||||||
"Export cured artifact to specified pathname",
|
&flagEnter,
|
||||||
).
|
"enter", command.BoolFlag(false),
|
||||||
Flag(
|
"Enter cure container with an interactive shell",
|
||||||
&flagEnter,
|
).Flag(
|
||||||
"enter", command.BoolFlag(false),
|
&flagRemote,
|
||||||
"Enter cure container with an interactive shell",
|
"daemon", command.BoolFlag(false),
|
||||||
)
|
"Cure artifact on the daemon",
|
||||||
|
).Flag(
|
||||||
|
&flagNoReply,
|
||||||
|
"no-reply", command.BoolFlag(false),
|
||||||
|
"Do not receive a reply from the daemon",
|
||||||
|
).Flag(
|
||||||
|
&flagBoot,
|
||||||
|
"boot", command.BoolFlag(false),
|
||||||
|
"Build on the stage0 toolchain",
|
||||||
|
).Flag(
|
||||||
|
&flagStd,
|
||||||
|
"std", command.BoolFlag(false),
|
||||||
|
"Build on the intermediate toolchain",
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.NewCommand(
|
||||||
|
"abort",
|
||||||
|
"Abort all pending cures on the daemon",
|
||||||
|
func([]string) error { return abortRemote(ctx, &addr, false) },
|
||||||
|
)
|
||||||
|
|
||||||
{
|
{
|
||||||
var (
|
var (
|
||||||
flagNet bool
|
flagNet bool
|
||||||
@@ -551,7 +629,7 @@ func main() {
|
|||||||
"shell",
|
"shell",
|
||||||
"Interactive shell in the specified Rosa OS environment",
|
"Interactive shell in the specified Rosa OS environment",
|
||||||
func(args []string) error {
|
func(args []string) error {
|
||||||
presets := make([]rosa.PArtifact, len(args))
|
presets := make([]rosa.PArtifact, len(args)+3)
|
||||||
for i, arg := range args {
|
for i, arg := range args {
|
||||||
p, ok := rosa.ResolveName(arg)
|
p, ok := rosa.ResolveName(arg)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -559,21 +637,24 @@ func main() {
|
|||||||
}
|
}
|
||||||
presets[i] = p
|
presets[i] = p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
base := rosa.LLVM
|
||||||
|
if !flagWithToolchain {
|
||||||
|
base = rosa.Musl
|
||||||
|
}
|
||||||
|
presets = append(presets,
|
||||||
|
base,
|
||||||
|
rosa.Mksh,
|
||||||
|
rosa.Toybox,
|
||||||
|
)
|
||||||
|
|
||||||
root := make(pkg.Collect, 0, 6+len(args))
|
root := make(pkg.Collect, 0, 6+len(args))
|
||||||
root = rosa.Std.AppendPresets(root, presets...)
|
root = rosa.Std.AppendPresets(root, presets...)
|
||||||
|
|
||||||
if flagWithToolchain {
|
if err := cm.Do(func(cache *pkg.Cache) error {
|
||||||
musl, compilerRT, runtimes, clang := (rosa.Std - 1).NewLLVM()
|
_, _, err := cache.Cure(&root)
|
||||||
root = append(root, musl, compilerRT, runtimes, clang)
|
return err
|
||||||
} else {
|
}); err == nil {
|
||||||
root = append(root, rosa.Std.Load(rosa.Musl))
|
|
||||||
}
|
|
||||||
root = append(root,
|
|
||||||
rosa.Std.Load(rosa.Mksh),
|
|
||||||
rosa.Std.Load(rosa.Toybox),
|
|
||||||
)
|
|
||||||
|
|
||||||
if _, _, err := cache.Cure(&root); err == nil {
|
|
||||||
return errors.New("unreachable")
|
return errors.New("unreachable")
|
||||||
} else if !pkg.IsCollected(err) {
|
} else if !pkg.IsCollected(err) {
|
||||||
return err
|
return err
|
||||||
@@ -585,11 +666,22 @@ func main() {
|
|||||||
}
|
}
|
||||||
cured := make(map[pkg.Artifact]cureRes)
|
cured := make(map[pkg.Artifact]cureRes)
|
||||||
for _, a := range root {
|
for _, a := range root {
|
||||||
pathname, checksum, err := cache.Cure(a)
|
if err := cm.Do(func(cache *pkg.Cache) error {
|
||||||
if err != nil {
|
pathname, checksum, err := cache.Cure(a)
|
||||||
|
if err == nil {
|
||||||
|
cured[a] = cureRes{pathname, checksum}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// explicitly open for direct error-free use from this point
|
||||||
|
if cm.c == nil {
|
||||||
|
if err := cm.open(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cured[a] = cureRes{pathname, checksum}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
layers := pkg.PromoteLayers(root, func(a pkg.Artifact) (
|
layers := pkg.PromoteLayers(root, func(a pkg.Artifact) (
|
||||||
@@ -599,7 +691,7 @@ func main() {
|
|||||||
res := cured[a]
|
res := cured[a]
|
||||||
return res.pathname, res.checksum
|
return res.pathname, res.checksum
|
||||||
}, func(i int, d pkg.Artifact) {
|
}, func(i int, d pkg.Artifact) {
|
||||||
r := pkg.Encode(cache.Ident(d).Value())
|
r := pkg.Encode(cm.c.Ident(d).Value())
|
||||||
if s, ok := d.(fmt.Stringer); ok {
|
if s, ok := d.(fmt.Stringer); ok {
|
||||||
if name := s.String(); name != "" {
|
if name := s.String(); name != "" {
|
||||||
r += "-" + name
|
r += "-" + name
|
||||||
@@ -663,22 +755,19 @@ func main() {
|
|||||||
}
|
}
|
||||||
return z.Wait()
|
return z.Wait()
|
||||||
},
|
},
|
||||||
).
|
).Flag(
|
||||||
Flag(
|
&flagNet,
|
||||||
&flagNet,
|
"net", command.BoolFlag(false),
|
||||||
"net", command.BoolFlag(false),
|
"Share host net namespace",
|
||||||
"Share host net namespace",
|
).Flag(
|
||||||
).
|
&flagSession,
|
||||||
Flag(
|
"session", command.BoolFlag(true),
|
||||||
&flagSession,
|
"Retain session",
|
||||||
"session", command.BoolFlag(true),
|
).Flag(
|
||||||
"Retain session",
|
&flagWithToolchain,
|
||||||
).
|
"with-toolchain", command.BoolFlag(false),
|
||||||
Flag(
|
"Include the stage2 LLVM toolchain",
|
||||||
&flagWithToolchain,
|
)
|
||||||
"with-toolchain", command.BoolFlag(false),
|
|
||||||
"Include the stage2 LLVM toolchain",
|
|
||||||
)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -689,9 +778,7 @@ func main() {
|
|||||||
)
|
)
|
||||||
|
|
||||||
c.MustParse(os.Args[1:], func(err error) {
|
c.MustParse(os.Args[1:], func(err error) {
|
||||||
if cache != nil {
|
cm.Close()
|
||||||
cache.Close()
|
|
||||||
}
|
|
||||||
if w, ok := err.(interface{ Unwrap() []error }); !ok {
|
if w, ok := err.(interface{ Unwrap() []error }); !ok {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
47
cmd/mbf/main_test.go
Normal file
47
cmd/mbf/main_test.go
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"hakurei.app/internal/rosa"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
rosa.DropCaches(rosa.OptLLVMNoLTO)
|
||||||
|
os.Exit(m.Run())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCureAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
const env = "ROSA_TEST_DAEMON"
|
||||||
|
|
||||||
|
if !testing.Verbose() {
|
||||||
|
t.Skip("verbose flag not set")
|
||||||
|
}
|
||||||
|
|
||||||
|
pathname, ok := os.LookupEnv(env)
|
||||||
|
if !ok {
|
||||||
|
t.Skip(env + " not set")
|
||||||
|
}
|
||||||
|
|
||||||
|
addr := net.UnixAddr{Net: "unix", Name: pathname}
|
||||||
|
t.Cleanup(func() {
|
||||||
|
if t.Failed() {
|
||||||
|
if err := abortRemote(t.Context(), &addr, false); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
for i := range rosa.PresetEnd {
|
||||||
|
p := rosa.PArtifact(i)
|
||||||
|
t.Run(rosa.GetMetadata(p).Name, func(t *testing.T) {
|
||||||
|
_, err := cureRemote(t.Context(), &addr, rosa.Std.Load(p), 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -16,6 +16,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
|
_ "unsafe" // for go:linkname
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/check"
|
||||||
"hakurei.app/command"
|
"hakurei.app/command"
|
||||||
@@ -233,6 +234,9 @@ func earlyMnt(mnt ...*vfs.MountInfoEntry) func(*testing.T, context.Context) []*v
|
|||||||
return func(*testing.T, context.Context) []*vfs.MountInfoEntry { return mnt }
|
return func(*testing.T, context.Context) []*vfs.MountInfoEntry { return mnt }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//go:linkname toHost hakurei.app/container.toHost
|
||||||
|
func toHost(name string) string
|
||||||
|
|
||||||
var containerTestCases = []struct {
|
var containerTestCases = []struct {
|
||||||
name string
|
name string
|
||||||
filter bool
|
filter bool
|
||||||
@@ -332,13 +336,15 @@ var containerTestCases = []struct {
|
|||||||
func(t *testing.T, ctx context.Context) []*vfs.MountInfoEntry {
|
func(t *testing.T, ctx context.Context) []*vfs.MountInfoEntry {
|
||||||
return []*vfs.MountInfoEntry{
|
return []*vfs.MountInfoEntry{
|
||||||
ent("/", hst.PrivateTmp, "rw", "overlay", "overlay",
|
ent("/", hst.PrivateTmp, "rw", "overlay", "overlay",
|
||||||
"rw,lowerdir="+
|
"rw"+
|
||||||
container.InternalToHostOvlEscape(ctx.Value(testVal("lower0")).(*check.Absolute).String())+":"+
|
",lowerdir+="+
|
||||||
container.InternalToHostOvlEscape(ctx.Value(testVal("lower1")).(*check.Absolute).String())+
|
toHost(ctx.Value(testVal("lower0")).(*check.Absolute).String())+
|
||||||
|
",lowerdir+="+
|
||||||
|
toHost(ctx.Value(testVal("lower1")).(*check.Absolute).String())+
|
||||||
",upperdir="+
|
",upperdir="+
|
||||||
container.InternalToHostOvlEscape(ctx.Value(testVal("upper")).(*check.Absolute).String())+
|
toHost(ctx.Value(testVal("upper")).(*check.Absolute).String())+
|
||||||
",workdir="+
|
",workdir="+
|
||||||
container.InternalToHostOvlEscape(ctx.Value(testVal("work")).(*check.Absolute).String())+
|
toHost(ctx.Value(testVal("work")).(*check.Absolute).String())+
|
||||||
",redirect_dir=nofollow,uuid=on,userxattr"),
|
",redirect_dir=nofollow,uuid=on,userxattr"),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -388,9 +394,11 @@ var containerTestCases = []struct {
|
|||||||
func(t *testing.T, ctx context.Context) []*vfs.MountInfoEntry {
|
func(t *testing.T, ctx context.Context) []*vfs.MountInfoEntry {
|
||||||
return []*vfs.MountInfoEntry{
|
return []*vfs.MountInfoEntry{
|
||||||
ent("/", hst.PrivateTmp, "rw", "overlay", "overlay",
|
ent("/", hst.PrivateTmp, "rw", "overlay", "overlay",
|
||||||
"ro,lowerdir="+
|
"ro"+
|
||||||
container.InternalToHostOvlEscape(ctx.Value(testVal("lower0")).(*check.Absolute).String())+":"+
|
",lowerdir+="+
|
||||||
container.InternalToHostOvlEscape(ctx.Value(testVal("lower1")).(*check.Absolute).String())+
|
toHost(ctx.Value(testVal("lower0")).(*check.Absolute).String())+
|
||||||
|
",lowerdir+="+
|
||||||
|
toHost(ctx.Value(testVal("lower1")).(*check.Absolute).String())+
|
||||||
",redirect_dir=nofollow,userxattr"),
|
",redirect_dir=nofollow,userxattr"),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -65,6 +65,8 @@ type syscallDispatcher interface {
|
|||||||
remount(msg message.Msg, target string, flags uintptr) error
|
remount(msg message.Msg, target string, flags uintptr) error
|
||||||
// mountTmpfs provides mountTmpfs.
|
// mountTmpfs provides mountTmpfs.
|
||||||
mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error
|
mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error
|
||||||
|
// mountOverlay provides mountOverlay.
|
||||||
|
mountOverlay(target string, options [][2]string) error
|
||||||
// ensureFile provides ensureFile.
|
// ensureFile provides ensureFile.
|
||||||
ensureFile(name string, perm, pperm os.FileMode) error
|
ensureFile(name string, perm, pperm os.FileMode) error
|
||||||
// mustLoopback provides mustLoopback.
|
// mustLoopback provides mustLoopback.
|
||||||
@@ -169,6 +171,9 @@ func (direct) remount(msg message.Msg, target string, flags uintptr) error {
|
|||||||
func (k direct) mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error {
|
func (k direct) mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error {
|
||||||
return mountTmpfs(k, fsname, target, flags, size, perm)
|
return mountTmpfs(k, fsname, target, flags, size, perm)
|
||||||
}
|
}
|
||||||
|
func (k direct) mountOverlay(target string, options [][2]string) error {
|
||||||
|
return mountOverlay(target, options)
|
||||||
|
}
|
||||||
func (direct) ensureFile(name string, perm, pperm os.FileMode) error {
|
func (direct) ensureFile(name string, perm, pperm os.FileMode) error {
|
||||||
return ensureFile(name, perm, pperm)
|
return ensureFile(name, perm, pperm)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -468,6 +468,14 @@ func (k *kstub) mountTmpfs(fsname, target string, flags uintptr, size int, perm
|
|||||||
stub.CheckArg(k.Stub, "perm", perm, 4))
|
stub.CheckArg(k.Stub, "perm", perm, 4))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (k *kstub) mountOverlay(target string, options [][2]string) error {
|
||||||
|
k.Helper()
|
||||||
|
return k.Expects("mountOverlay").Error(
|
||||||
|
stub.CheckArg(k.Stub, "target", target, 0),
|
||||||
|
stub.CheckArgReflect(k.Stub, "options", options, 1),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
func (k *kstub) ensureFile(name string, perm, pperm os.FileMode) error {
|
func (k *kstub) ensureFile(name string, perm, pperm os.FileMode) error {
|
||||||
k.Helper()
|
k.Helper()
|
||||||
return k.Expects("ensureFile").Error(
|
return k.Expects("ensureFile").Error(
|
||||||
|
|||||||
@@ -118,6 +118,10 @@ func errnoFallback(op, path string, err error) (syscall.Errno, *os.PathError) {
|
|||||||
|
|
||||||
// mount wraps syscall.Mount for error handling.
|
// mount wraps syscall.Mount for error handling.
|
||||||
func mount(source, target, fstype string, flags uintptr, data string) error {
|
func mount(source, target, fstype string, flags uintptr, data string) error {
|
||||||
|
if max(len(source), len(target), len(data))+1 > os.Getpagesize() {
|
||||||
|
return &MountError{source, target, fstype, flags, data, syscall.ENOMEM}
|
||||||
|
}
|
||||||
|
|
||||||
err := syscall.Mount(source, target, fstype, flags, data)
|
err := syscall.Mount(source, target, fstype, flags, data)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -4,9 +4,9 @@ import (
|
|||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"fmt"
|
"fmt"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/check"
|
||||||
|
"hakurei.app/ext"
|
||||||
"hakurei.app/fhs"
|
"hakurei.app/fhs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -150,7 +150,7 @@ func (o *MountOverlayOp) early(_ *setupState, k syscallDispatcher) error {
|
|||||||
if v, err := k.evalSymlinks(o.Upper.String()); err != nil {
|
if v, err := k.evalSymlinks(o.Upper.String()); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
o.upper = check.EscapeOverlayDataSegment(toHost(v))
|
o.upper = toHost(v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -158,7 +158,7 @@ func (o *MountOverlayOp) early(_ *setupState, k syscallDispatcher) error {
|
|||||||
if v, err := k.evalSymlinks(o.Work.String()); err != nil {
|
if v, err := k.evalSymlinks(o.Work.String()); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
o.work = check.EscapeOverlayDataSegment(toHost(v))
|
o.work = toHost(v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -168,12 +168,39 @@ func (o *MountOverlayOp) early(_ *setupState, k syscallDispatcher) error {
|
|||||||
if v, err := k.evalSymlinks(a.String()); err != nil {
|
if v, err := k.evalSymlinks(a.String()); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
o.lower[i] = check.EscapeOverlayDataSegment(toHost(v))
|
o.lower[i] = toHost(v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mountOverlay sets up an overlay mount via [ext.FS].
|
||||||
|
func mountOverlay(target string, options [][2]string) error {
|
||||||
|
fs, err := ext.OpenFS(SourceOverlay, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = fs.SetString("source", SourceOverlay); err != nil {
|
||||||
|
_ = fs.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, option := range options {
|
||||||
|
if err = fs.SetString(option[0], option[1]); err != nil {
|
||||||
|
_ = fs.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err = fs.SetFlag(OptionOverlayUserxattr); err != nil {
|
||||||
|
_ = fs.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = fs.Mount(target, 0); err != nil {
|
||||||
|
_ = fs.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fs.Close()
|
||||||
|
}
|
||||||
|
|
||||||
func (o *MountOverlayOp) apply(state *setupState, k syscallDispatcher) error {
|
func (o *MountOverlayOp) apply(state *setupState, k syscallDispatcher) error {
|
||||||
target := o.Target.String()
|
target := o.Target.String()
|
||||||
if !o.noPrefix {
|
if !o.noPrefix {
|
||||||
@@ -194,7 +221,7 @@ func (o *MountOverlayOp) apply(state *setupState, k syscallDispatcher) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
options := make([]string, 0, 4)
|
options := make([][2]string, 0, 2+len(o.lower))
|
||||||
|
|
||||||
if o.upper == zeroString && o.work == zeroString { // readonly
|
if o.upper == zeroString && o.work == zeroString { // readonly
|
||||||
if len(o.Lower) < 2 {
|
if len(o.Lower) < 2 {
|
||||||
@@ -205,15 +232,16 @@ func (o *MountOverlayOp) apply(state *setupState, k syscallDispatcher) error {
|
|||||||
if len(o.Lower) == 0 {
|
if len(o.Lower) == 0 {
|
||||||
return &OverlayArgumentError{OverlayEmptyLower, zeroString}
|
return &OverlayArgumentError{OverlayEmptyLower, zeroString}
|
||||||
}
|
}
|
||||||
options = append(options,
|
options = append(options, [][2]string{
|
||||||
OptionOverlayUpperdir+"="+o.upper,
|
{OptionOverlayUpperdir, o.upper},
|
||||||
OptionOverlayWorkdir+"="+o.work)
|
{OptionOverlayWorkdir, o.work},
|
||||||
|
}...)
|
||||||
|
}
|
||||||
|
for _, lower := range o.lower {
|
||||||
|
options = append(options, [2]string{OptionOverlayLowerdir + "+", lower})
|
||||||
}
|
}
|
||||||
options = append(options,
|
|
||||||
OptionOverlayLowerdir+"="+strings.Join(o.lower, check.SpecialOverlayPath),
|
|
||||||
OptionOverlayUserxattr)
|
|
||||||
|
|
||||||
return k.mount(SourceOverlay, target, FstypeOverlay, 0, strings.Join(options, check.SpecialOverlayOption))
|
return k.mountOverlay(target, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *MountOverlayOp) late(*setupState, syscallDispatcher) error { return nil }
|
func (o *MountOverlayOp) late(*setupState, syscallDispatcher) error { return nil }
|
||||||
|
|||||||
@@ -97,13 +97,12 @@ func TestMountOverlayOp(t *testing.T) {
|
|||||||
call("mkdirAll", stub.ExpectArgs{"/sysroot", os.FileMode(0705)}, nil, nil),
|
call("mkdirAll", stub.ExpectArgs{"/sysroot", os.FileMode(0705)}, nil, nil),
|
||||||
call("mkdirTemp", stub.ExpectArgs{"/", "overlay.upper.*"}, "overlay.upper.32768", nil),
|
call("mkdirTemp", stub.ExpectArgs{"/", "overlay.upper.*"}, "overlay.upper.32768", nil),
|
||||||
call("mkdirTemp", stub.ExpectArgs{"/", "overlay.work.*"}, "overlay.work.32768", nil),
|
call("mkdirTemp", stub.ExpectArgs{"/", "overlay.work.*"}, "overlay.work.32768", nil),
|
||||||
call("mount", stub.ExpectArgs{"overlay", "/sysroot", "overlay", uintptr(0), "" +
|
call("mountOverlay", stub.ExpectArgs{"/sysroot", [][2]string{
|
||||||
"upperdir=overlay.upper.32768," +
|
{"upperdir", "overlay.upper.32768"},
|
||||||
"workdir=overlay.work.32768," +
|
{"workdir", "overlay.work.32768"},
|
||||||
"lowerdir=" +
|
{"lowerdir+", `/host/var/lib/planterette/base/debian:f92c9052`},
|
||||||
`/host/var/lib/planterette/base/debian\:f92c9052:` +
|
{"lowerdir+", `/host/var/lib/planterette/app/org.chromium.Chromium@debian:f92c9052`},
|
||||||
`/host/var/lib/planterette/app/org.chromium.Chromium@debian\:f92c9052,` +
|
}}, nil, nil),
|
||||||
"userxattr"}, nil, nil),
|
|
||||||
}, nil},
|
}, nil},
|
||||||
|
|
||||||
{"short lower ro", &Params{ParentPerm: 0755}, &MountOverlayOp{
|
{"short lower ro", &Params{ParentPerm: 0755}, &MountOverlayOp{
|
||||||
@@ -129,11 +128,10 @@ func TestMountOverlayOp(t *testing.T) {
|
|||||||
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store0"}, "/mnt-root/nix/.ro-store0", nil),
|
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store0"}, "/mnt-root/nix/.ro-store0", nil),
|
||||||
}, nil, []stub.Call{
|
}, nil, []stub.Call{
|
||||||
call("mkdirAll", stub.ExpectArgs{"/nix/store", os.FileMode(0755)}, nil, nil),
|
call("mkdirAll", stub.ExpectArgs{"/nix/store", os.FileMode(0755)}, nil, nil),
|
||||||
call("mount", stub.ExpectArgs{"overlay", "/nix/store", "overlay", uintptr(0), "" +
|
call("mountOverlay", stub.ExpectArgs{"/nix/store", [][2]string{
|
||||||
"lowerdir=" +
|
{"lowerdir+", "/host/mnt-root/nix/.ro-store"},
|
||||||
"/host/mnt-root/nix/.ro-store:" +
|
{"lowerdir+", "/host/mnt-root/nix/.ro-store0"},
|
||||||
"/host/mnt-root/nix/.ro-store0," +
|
}}, nil, nil),
|
||||||
"userxattr"}, nil, nil),
|
|
||||||
}, nil},
|
}, nil},
|
||||||
|
|
||||||
{"success ro", &Params{ParentPerm: 0755}, &MountOverlayOp{
|
{"success ro", &Params{ParentPerm: 0755}, &MountOverlayOp{
|
||||||
@@ -147,11 +145,10 @@ func TestMountOverlayOp(t *testing.T) {
|
|||||||
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store0"}, "/mnt-root/nix/.ro-store0", nil),
|
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store0"}, "/mnt-root/nix/.ro-store0", nil),
|
||||||
}, nil, []stub.Call{
|
}, nil, []stub.Call{
|
||||||
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0755)}, nil, nil),
|
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0755)}, nil, nil),
|
||||||
call("mount", stub.ExpectArgs{"overlay", "/sysroot/nix/store", "overlay", uintptr(0), "" +
|
call("mountOverlay", stub.ExpectArgs{"/sysroot/nix/store", [][2]string{
|
||||||
"lowerdir=" +
|
{"lowerdir+", "/host/mnt-root/nix/.ro-store"},
|
||||||
"/host/mnt-root/nix/.ro-store:" +
|
{"lowerdir+", "/host/mnt-root/nix/.ro-store0"},
|
||||||
"/host/mnt-root/nix/.ro-store0," +
|
}}, nil, nil),
|
||||||
"userxattr"}, nil, nil),
|
|
||||||
}, nil},
|
}, nil},
|
||||||
|
|
||||||
{"nil lower", &Params{ParentPerm: 0700}, &MountOverlayOp{
|
{"nil lower", &Params{ParentPerm: 0700}, &MountOverlayOp{
|
||||||
@@ -219,7 +216,11 @@ func TestMountOverlayOp(t *testing.T) {
|
|||||||
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store"}, "/mnt-root/nix/ro-store", nil),
|
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store"}, "/mnt-root/nix/ro-store", nil),
|
||||||
}, nil, []stub.Call{
|
}, nil, []stub.Call{
|
||||||
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil),
|
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil),
|
||||||
call("mount", stub.ExpectArgs{"overlay", "/sysroot/nix/store", "overlay", uintptr(0), "upperdir=/host/mnt-root/nix/.rw-store/.upper,workdir=/host/mnt-root/nix/.rw-store/.work,lowerdir=/host/mnt-root/nix/ro-store,userxattr"}, nil, stub.UniqueError(0)),
|
call("mountOverlay", stub.ExpectArgs{"/sysroot/nix/store", [][2]string{
|
||||||
|
{"upperdir", "/host/mnt-root/nix/.rw-store/.upper"},
|
||||||
|
{"workdir", "/host/mnt-root/nix/.rw-store/.work"},
|
||||||
|
{"lowerdir+", "/host/mnt-root/nix/ro-store"},
|
||||||
|
}}, nil, stub.UniqueError(0)),
|
||||||
}, stub.UniqueError(0)},
|
}, stub.UniqueError(0)},
|
||||||
|
|
||||||
{"success single layer", &Params{ParentPerm: 0700}, &MountOverlayOp{
|
{"success single layer", &Params{ParentPerm: 0700}, &MountOverlayOp{
|
||||||
@@ -233,11 +234,11 @@ func TestMountOverlayOp(t *testing.T) {
|
|||||||
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store"}, "/mnt-root/nix/ro-store", nil),
|
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store"}, "/mnt-root/nix/ro-store", nil),
|
||||||
}, nil, []stub.Call{
|
}, nil, []stub.Call{
|
||||||
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil),
|
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil),
|
||||||
call("mount", stub.ExpectArgs{"overlay", "/sysroot/nix/store", "overlay", uintptr(0), "" +
|
call("mountOverlay", stub.ExpectArgs{"/sysroot/nix/store", [][2]string{
|
||||||
"upperdir=/host/mnt-root/nix/.rw-store/.upper," +
|
{"upperdir", "/host/mnt-root/nix/.rw-store/.upper"},
|
||||||
"workdir=/host/mnt-root/nix/.rw-store/.work," +
|
{"workdir", "/host/mnt-root/nix/.rw-store/.work"},
|
||||||
"lowerdir=/host/mnt-root/nix/ro-store," +
|
{"lowerdir+", "/host/mnt-root/nix/ro-store"},
|
||||||
"userxattr"}, nil, nil),
|
}}, nil, nil),
|
||||||
}, nil},
|
}, nil},
|
||||||
|
|
||||||
{"success", &Params{ParentPerm: 0700}, &MountOverlayOp{
|
{"success", &Params{ParentPerm: 0700}, &MountOverlayOp{
|
||||||
@@ -261,16 +262,15 @@ func TestMountOverlayOp(t *testing.T) {
|
|||||||
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store3"}, "/mnt-root/nix/ro-store3", nil),
|
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store3"}, "/mnt-root/nix/ro-store3", nil),
|
||||||
}, nil, []stub.Call{
|
}, nil, []stub.Call{
|
||||||
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil),
|
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil),
|
||||||
call("mount", stub.ExpectArgs{"overlay", "/sysroot/nix/store", "overlay", uintptr(0), "" +
|
call("mountOverlay", stub.ExpectArgs{"/sysroot/nix/store", [][2]string{
|
||||||
"upperdir=/host/mnt-root/nix/.rw-store/.upper," +
|
{"upperdir", "/host/mnt-root/nix/.rw-store/.upper"},
|
||||||
"workdir=/host/mnt-root/nix/.rw-store/.work," +
|
{"workdir", "/host/mnt-root/nix/.rw-store/.work"},
|
||||||
"lowerdir=" +
|
{"lowerdir+", "/host/mnt-root/nix/ro-store"},
|
||||||
"/host/mnt-root/nix/ro-store:" +
|
{"lowerdir+", "/host/mnt-root/nix/ro-store0"},
|
||||||
"/host/mnt-root/nix/ro-store0:" +
|
{"lowerdir+", "/host/mnt-root/nix/ro-store1"},
|
||||||
"/host/mnt-root/nix/ro-store1:" +
|
{"lowerdir+", "/host/mnt-root/nix/ro-store2"},
|
||||||
"/host/mnt-root/nix/ro-store2:" +
|
{"lowerdir+", "/host/mnt-root/nix/ro-store3"},
|
||||||
"/host/mnt-root/nix/ro-store3," +
|
}}, nil, nil),
|
||||||
"userxattr"}, nil, nil),
|
|
||||||
}, nil},
|
}, nil},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"hakurei.app/check"
|
|
||||||
"hakurei.app/vfs"
|
"hakurei.app/vfs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -50,9 +49,6 @@ func TestToHost(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// InternalToHostOvlEscape exports toHost passed to [check.EscapeOverlayDataSegment].
|
|
||||||
func InternalToHostOvlEscape(s string) string { return check.EscapeOverlayDataSegment(toHost(s)) }
|
|
||||||
|
|
||||||
func TestCreateFile(t *testing.T) {
|
func TestCreateFile(t *testing.T) {
|
||||||
t.Run("nonexistent", func(t *testing.T) {
|
t.Run("nonexistent", func(t *testing.T) {
|
||||||
t.Run("mkdir", func(t *testing.T) {
|
t.Run("mkdir", func(t *testing.T) {
|
||||||
|
|||||||
267
ext/fs.go
Normal file
267
ext/fs.go
Normal file
@@ -0,0 +1,267 @@
|
|||||||
|
package ext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// include/uapi/linux/mount.h
|
||||||
|
|
||||||
|
/*
|
||||||
|
* move_mount() flags.
|
||||||
|
*/
|
||||||
|
const (
|
||||||
|
MOVE_MOUNT_F_SYMLINKS = 1 << iota /* Follow symlinks on from path */
|
||||||
|
MOVE_MOUNT_F_AUTOMOUNTS /* Follow automounts on from path */
|
||||||
|
MOVE_MOUNT_F_EMPTY_PATH /* Empty from path permitted */
|
||||||
|
_
|
||||||
|
MOVE_MOUNT_T_SYMLINKS /* Follow symlinks on to path */
|
||||||
|
MOVE_MOUNT_T_AUTOMOUNTS /* Follow automounts on to path */
|
||||||
|
MOVE_MOUNT_T_EMPTY_PATH /* Empty to path permitted */
|
||||||
|
_
|
||||||
|
MOVE_MOUNT_SET_GROUP /* Set sharing group instead */
|
||||||
|
MOVE_MOUNT_BENEATH /* Mount beneath top mount */
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* fsopen() flags.
|
||||||
|
*/
|
||||||
|
const (
|
||||||
|
FSOPEN_CLOEXEC = 1 << iota
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* fspick() flags.
|
||||||
|
*/
|
||||||
|
const (
|
||||||
|
FSPICK_CLOEXEC = 1 << iota
|
||||||
|
FSPICK_SYMLINK_NOFOLLOW
|
||||||
|
FSPICK_NO_AUTOMOUNT
|
||||||
|
FSPICK_EMPTY_PATH
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The type of fsconfig() call made.
|
||||||
|
*/
|
||||||
|
const (
|
||||||
|
FSCONFIG_SET_FLAG = iota /* Set parameter, supplying no value */
|
||||||
|
FSCONFIG_SET_STRING /* Set parameter, supplying a string value */
|
||||||
|
FSCONFIG_SET_BINARY /* Set parameter, supplying a binary blob value */
|
||||||
|
FSCONFIG_SET_PATH /* Set parameter, supplying an object by path */
|
||||||
|
FSCONFIG_SET_PATH_EMPTY /* Set parameter, supplying an object by (empty) path */
|
||||||
|
FSCONFIG_SET_FD /* Set parameter, supplying an object by fd */
|
||||||
|
FSCONFIG_CMD_CREATE /* Create new or reuse existing superblock */
|
||||||
|
FSCONFIG_CMD_RECONFIGURE /* Invoke superblock reconfiguration */
|
||||||
|
FSCONFIG_CMD_CREATE_EXCL /* Create new superblock, fail if reusing existing superblock */
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* fsmount() flags.
|
||||||
|
*/
|
||||||
|
const (
|
||||||
|
FSMOUNT_CLOEXEC = 1 << iota
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mount attributes.
|
||||||
|
*/
|
||||||
|
const (
|
||||||
|
MOUNT_ATTR_RDONLY = 0x00000001 /* Mount read-only */
|
||||||
|
MOUNT_ATTR_NOSUID = 0x00000002 /* Ignore suid and sgid bits */
|
||||||
|
MOUNT_ATTR_NODEV = 0x00000004 /* Disallow access to device special files */
|
||||||
|
MOUNT_ATTR_NOEXEC = 0x00000008 /* Disallow program execution */
|
||||||
|
MOUNT_ATTR__ATIME = 0x00000070 /* Setting on how atime should be updated */
|
||||||
|
MOUNT_ATTR_RELATIME = 0x00000000 /* - Update atime relative to mtime/ctime. */
|
||||||
|
MOUNT_ATTR_NOATIME = 0x00000010 /* - Do not update access times. */
|
||||||
|
MOUNT_ATTR_STRICTATIME = 0x00000020 /* - Always perform atime updates */
|
||||||
|
MOUNT_ATTR_NODIRATIME = 0x00000080 /* Do not update directory access times */
|
||||||
|
MOUNT_ATTR_IDMAP = 0x00100000 /* Idmap mount to @userns_fd in struct mount_attr. */
|
||||||
|
MOUNT_ATTR_NOSYMFOLLOW = 0x00200000 /* Do not follow symlinks */
|
||||||
|
)
|
||||||
|
|
||||||
|
// FS provides low-level wrappers around the suite of file-descriptor-based
|
||||||
|
// mount facilities in Linux.
|
||||||
|
type FS struct {
|
||||||
|
fd uintptr
|
||||||
|
c runtime.Cleanup
|
||||||
|
}
|
||||||
|
|
||||||
|
// newFS allocates a new [FS] for the specified fd.
|
||||||
|
func newFS(fd uintptr) *FS {
|
||||||
|
fs := FS{fd: fd}
|
||||||
|
fs.c = runtime.AddCleanup(&fs, func(fd uintptr) {
|
||||||
|
_ = syscall.Close(int(fd))
|
||||||
|
}, fd)
|
||||||
|
return &fs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the underlying filesystem context.
|
||||||
|
func (fs *FS) Close() error {
|
||||||
|
if fs == nil {
|
||||||
|
return syscall.EINVAL
|
||||||
|
}
|
||||||
|
err := syscall.Close(int(fs.fd))
|
||||||
|
fs.c.Stop()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenFS creates a new filesystem context.
|
||||||
|
func OpenFS(fsname string, flags int) (fs *FS, err error) {
|
||||||
|
var s *byte
|
||||||
|
s, err = syscall.BytePtrFromString(fsname)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fd, _, errno := syscall.Syscall(
|
||||||
|
SYS_FSOPEN,
|
||||||
|
uintptr(unsafe.Pointer(s)),
|
||||||
|
uintptr(flags|FSOPEN_CLOEXEC),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if errno != 0 {
|
||||||
|
err = os.NewSyscallError("fsopen", errno)
|
||||||
|
} else {
|
||||||
|
fs = newFS(fd)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// PickFS selects filesystem for reconfiguration.
|
||||||
|
func PickFS(dirfd int, pathname string, flags int) (fs *FS, err error) {
|
||||||
|
var s *byte
|
||||||
|
s, err = syscall.BytePtrFromString(pathname)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fd, _, errno := syscall.Syscall(
|
||||||
|
SYS_FSPICK,
|
||||||
|
uintptr(dirfd),
|
||||||
|
uintptr(unsafe.Pointer(s)),
|
||||||
|
uintptr(flags|FSPICK_CLOEXEC),
|
||||||
|
)
|
||||||
|
if errno != 0 {
|
||||||
|
err = os.NewSyscallError("fspick", errno)
|
||||||
|
} else {
|
||||||
|
fs = newFS(fd)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// config configures new or existing filesystem context.
|
||||||
|
func (fs *FS) config(cmd uint, key *byte, value unsafe.Pointer, aux int) (err error) {
|
||||||
|
_, _, errno := syscall.Syscall6(
|
||||||
|
SYS_FSCONFIG,
|
||||||
|
fs.fd,
|
||||||
|
uintptr(cmd),
|
||||||
|
uintptr(unsafe.Pointer(key)),
|
||||||
|
uintptr(value),
|
||||||
|
uintptr(aux),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if errno != 0 {
|
||||||
|
err = os.NewSyscallError("fsconfig", errno)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFlag sets the flag parameter named by key. ([FSCONFIG_SET_FLAG])
|
||||||
|
func (fs *FS) SetFlag(key string) (err error) {
|
||||||
|
var s *byte
|
||||||
|
s, err = syscall.BytePtrFromString(key)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.config(FSCONFIG_SET_FLAG, s, nil, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetString sets the string parameter named by key to the value specified by
|
||||||
|
// value. ([FSCONFIG_SET_STRING])
|
||||||
|
func (fs *FS) SetString(key, value string) (err error) {
|
||||||
|
var s0 *byte
|
||||||
|
s0, err = syscall.BytePtrFromString(key)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var s1 *byte
|
||||||
|
s1, err = syscall.BytePtrFromString(value)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.config(FSCONFIG_SET_STRING, s0, unsafe.Pointer(s1), 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mount instantiates mount object from filesystem context.
|
||||||
|
func (fs *FS) mount(flags, attrFlags int) (fsfd int, err error) {
|
||||||
|
r, _, errno := syscall.Syscall(
|
||||||
|
SYS_FSMOUNT,
|
||||||
|
fs.fd,
|
||||||
|
uintptr(flags|FSMOUNT_CLOEXEC),
|
||||||
|
uintptr(attrFlags),
|
||||||
|
)
|
||||||
|
fsfd = int(r)
|
||||||
|
if errno != 0 {
|
||||||
|
err = os.NewSyscallError("fsmount", errno)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveMount moves or attaches mount object to filesystem.
|
||||||
|
func MoveMount(
|
||||||
|
fromDirfd int,
|
||||||
|
fromPathname string,
|
||||||
|
toDirfd int,
|
||||||
|
toPathname string,
|
||||||
|
flags int,
|
||||||
|
) (err error) {
|
||||||
|
var s0 *byte
|
||||||
|
s0, err = syscall.BytePtrFromString(fromPathname)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var s1 *byte
|
||||||
|
s1, err = syscall.BytePtrFromString(toPathname)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, errno := syscall.Syscall6(
|
||||||
|
SYS_MOVE_MOUNT,
|
||||||
|
uintptr(fromDirfd),
|
||||||
|
uintptr(unsafe.Pointer(s0)),
|
||||||
|
uintptr(toDirfd),
|
||||||
|
uintptr(unsafe.Pointer(s1)),
|
||||||
|
uintptr(flags),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if errno != 0 {
|
||||||
|
err = os.NewSyscallError("move_mount", errno)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mount attaches the underlying filesystem context to the specified pathname.
|
||||||
|
func (fs *FS) Mount(pathname string, attrFlags int) error {
|
||||||
|
if err := fs.config(FSCONFIG_CMD_CREATE_EXCL, nil, nil, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fd, err := fs.mount(0, attrFlags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = MoveMount(
|
||||||
|
fd, "",
|
||||||
|
-1, pathname,
|
||||||
|
MOVE_MOUNT_F_EMPTY_PATH,
|
||||||
|
)
|
||||||
|
closeErr := syscall.Close(fd)
|
||||||
|
if err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
@@ -27,6 +27,11 @@ import (
|
|||||||
// AbsWork is the container pathname [TContext.GetWorkDir] is mounted on.
|
// AbsWork is the container pathname [TContext.GetWorkDir] is mounted on.
|
||||||
var AbsWork = fhs.AbsRoot.Append("work/")
|
var AbsWork = fhs.AbsRoot.Append("work/")
|
||||||
|
|
||||||
|
// EnvJobs is the name of the environment variable holding a decimal
|
||||||
|
// representation of the preferred job count. Its value must not affect cure
|
||||||
|
// outcome.
|
||||||
|
const EnvJobs = "CURE_JOBS"
|
||||||
|
|
||||||
// ExecPath is a slice of [Artifact] and the [check.Absolute] pathname to make
|
// ExecPath is a slice of [Artifact] and the [check.Absolute] pathname to make
|
||||||
// it available at under in the container.
|
// it available at under in the container.
|
||||||
type ExecPath struct {
|
type ExecPath struct {
|
||||||
@@ -397,7 +402,7 @@ const SeccompPresets = std.PresetStrict &
|
|||||||
func (a *execArtifact) makeContainer(
|
func (a *execArtifact) makeContainer(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
msg message.Msg,
|
msg message.Msg,
|
||||||
flags int,
|
flags, jobs int,
|
||||||
hostNet bool,
|
hostNet bool,
|
||||||
temp, work *check.Absolute,
|
temp, work *check.Absolute,
|
||||||
getArtifact GetArtifactFunc,
|
getArtifact GetArtifactFunc,
|
||||||
@@ -432,8 +437,8 @@ func (a *execArtifact) makeContainer(
|
|||||||
z.Hostname = "cure-net"
|
z.Hostname = "cure-net"
|
||||||
}
|
}
|
||||||
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
||||||
|
z.Dir, z.Path, z.Args = a.dir, a.path, a.args
|
||||||
z.Dir, z.Env, z.Path, z.Args = a.dir, a.env, a.path, a.args
|
z.Env = slices.Concat(a.env, []string{EnvJobs + "=" + strconv.Itoa(jobs)})
|
||||||
z.Grow(len(a.paths) + 4)
|
z.Grow(len(a.paths) + 4)
|
||||||
|
|
||||||
for i, b := range a.paths {
|
for i, b := range a.paths {
|
||||||
@@ -563,6 +568,7 @@ func (c *Cache) EnterExec(
|
|||||||
z, err = e.makeContainer(
|
z, err = e.makeContainer(
|
||||||
ctx, c.msg,
|
ctx, c.msg,
|
||||||
c.flags,
|
c.flags,
|
||||||
|
c.jobs,
|
||||||
hostNet,
|
hostNet,
|
||||||
temp, work,
|
temp, work,
|
||||||
func(a Artifact) (*check.Absolute, unique.Handle[Checksum]) {
|
func(a Artifact) (*check.Absolute, unique.Handle[Checksum]) {
|
||||||
@@ -602,7 +608,7 @@ func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
|
|||||||
msg := f.GetMessage()
|
msg := f.GetMessage()
|
||||||
var z *container.Container
|
var z *container.Container
|
||||||
if z, err = a.makeContainer(
|
if z, err = a.makeContainer(
|
||||||
ctx, msg, f.cache.flags, hostNet,
|
ctx, msg, f.cache.flags, f.GetJobs(), hostNet,
|
||||||
f.GetTempDir(), f.GetWorkDir(),
|
f.GetTempDir(), f.GetWorkDir(),
|
||||||
f.GetArtifact,
|
f.GetArtifact,
|
||||||
f.cache.Ident,
|
f.cache.Ident,
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package pkg
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"crypto/sha512"
|
"crypto/sha512"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
@@ -11,6 +10,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"slices"
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unique"
|
"unique"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
@@ -39,22 +39,45 @@ func panicToError(errP *error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// irCache implements [IRCache].
|
||||||
|
type irCache struct {
|
||||||
|
// Artifact to [unique.Handle] of identifier cache.
|
||||||
|
artifact sync.Map
|
||||||
|
// Identifier free list, must not be accessed directly.
|
||||||
|
identPool sync.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
// zeroIRCache returns the initialised value of irCache.
|
||||||
|
func zeroIRCache() irCache {
|
||||||
|
return irCache{
|
||||||
|
identPool: sync.Pool{New: func() any { return new(extIdent) }},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IRCache provides memory management and caching primitives for IR and
|
||||||
|
// identifier operations against [Artifact] implementations.
|
||||||
|
//
|
||||||
|
// The zero value is not safe for use.
|
||||||
|
type IRCache struct{ irCache }
|
||||||
|
|
||||||
|
// NewIR returns the address of a new [IRCache].
|
||||||
|
func NewIR() *IRCache {
|
||||||
|
return &IRCache{zeroIRCache()}
|
||||||
|
}
|
||||||
|
|
||||||
// IContext is passed to [Artifact.Params] and provides methods for writing
|
// IContext is passed to [Artifact.Params] and provides methods for writing
|
||||||
// values to the IR writer. It does not expose the underlying [io.Writer].
|
// values to the IR writer. It does not expose the underlying [io.Writer].
|
||||||
//
|
//
|
||||||
// IContext is valid until [Artifact.Params] returns.
|
// IContext is valid until [Artifact.Params] returns.
|
||||||
type IContext struct {
|
type IContext struct {
|
||||||
// Address of underlying [Cache], should be zeroed or made unusable after
|
// Address of underlying irCache, should be zeroed or made unusable after
|
||||||
// [Artifact.Params] returns and must not be exposed directly.
|
// [Artifact.Params] returns and must not be exposed directly.
|
||||||
cache *Cache
|
ic *irCache
|
||||||
// Written to by various methods, should be zeroed after [Artifact.Params]
|
// Written to by various methods, should be zeroed after [Artifact.Params]
|
||||||
// returns and must not be exposed directly.
|
// returns and must not be exposed directly.
|
||||||
w io.Writer
|
w io.Writer
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unwrap returns the underlying [context.Context].
|
|
||||||
func (i *IContext) Unwrap() context.Context { return i.cache.ctx }
|
|
||||||
|
|
||||||
// irZero is a zero IR word.
|
// irZero is a zero IR word.
|
||||||
var irZero [wordSize]byte
|
var irZero [wordSize]byte
|
||||||
|
|
||||||
@@ -136,11 +159,11 @@ func (i *IContext) mustWrite(p []byte) {
|
|||||||
// WriteIdent is not defined for an [Artifact] not part of the slice returned by
|
// WriteIdent is not defined for an [Artifact] not part of the slice returned by
|
||||||
// [Artifact.Dependencies].
|
// [Artifact.Dependencies].
|
||||||
func (i *IContext) WriteIdent(a Artifact) {
|
func (i *IContext) WriteIdent(a Artifact) {
|
||||||
buf := i.cache.getIdentBuf()
|
buf := i.ic.getIdentBuf()
|
||||||
defer i.cache.putIdentBuf(buf)
|
defer i.ic.putIdentBuf(buf)
|
||||||
|
|
||||||
IRKindIdent.encodeHeader(0).put(buf[:])
|
IRKindIdent.encodeHeader(0).put(buf[:])
|
||||||
*(*ID)(buf[wordSize:]) = i.cache.Ident(a).Value()
|
*(*ID)(buf[wordSize:]) = i.ic.Ident(a).Value()
|
||||||
i.mustWrite(buf[:])
|
i.mustWrite(buf[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -183,19 +206,19 @@ func (i *IContext) WriteString(s string) {
|
|||||||
|
|
||||||
// Encode writes a deterministic, efficient representation of a to w and returns
|
// Encode writes a deterministic, efficient representation of a to w and returns
|
||||||
// the first non-nil error encountered while writing to w.
|
// the first non-nil error encountered while writing to w.
|
||||||
func (c *Cache) Encode(w io.Writer, a Artifact) (err error) {
|
func (ic *irCache) Encode(w io.Writer, a Artifact) (err error) {
|
||||||
deps := a.Dependencies()
|
deps := a.Dependencies()
|
||||||
idents := make([]*extIdent, len(deps))
|
idents := make([]*extIdent, len(deps))
|
||||||
for i, d := range deps {
|
for i, d := range deps {
|
||||||
dbuf, did := c.unsafeIdent(d, true)
|
dbuf, did := ic.unsafeIdent(d, true)
|
||||||
if dbuf == nil {
|
if dbuf == nil {
|
||||||
dbuf = c.getIdentBuf()
|
dbuf = ic.getIdentBuf()
|
||||||
binary.LittleEndian.PutUint64(dbuf[:], uint64(d.Kind()))
|
binary.LittleEndian.PutUint64(dbuf[:], uint64(d.Kind()))
|
||||||
*(*ID)(dbuf[wordSize:]) = did.Value()
|
*(*ID)(dbuf[wordSize:]) = did.Value()
|
||||||
} else {
|
} else {
|
||||||
c.storeIdent(d, dbuf)
|
ic.storeIdent(d, dbuf)
|
||||||
}
|
}
|
||||||
defer c.putIdentBuf(dbuf)
|
defer ic.putIdentBuf(dbuf)
|
||||||
idents[i] = dbuf
|
idents[i] = dbuf
|
||||||
}
|
}
|
||||||
slices.SortFunc(idents, func(a, b *extIdent) int {
|
slices.SortFunc(idents, func(a, b *extIdent) int {
|
||||||
@@ -221,10 +244,10 @@ func (c *Cache) Encode(w io.Writer, a Artifact) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func() {
|
func() {
|
||||||
i := IContext{c, w}
|
i := IContext{ic, w}
|
||||||
|
|
||||||
defer panicToError(&err)
|
defer panicToError(&err)
|
||||||
defer func() { i.cache, i.w = nil, nil }()
|
defer func() { i.ic, i.w = nil, nil }()
|
||||||
|
|
||||||
a.Params(&i)
|
a.Params(&i)
|
||||||
}()
|
}()
|
||||||
@@ -233,7 +256,7 @@ func (c *Cache) Encode(w io.Writer, a Artifact) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var f IREndFlag
|
var f IREndFlag
|
||||||
kcBuf := c.getIdentBuf()
|
kcBuf := ic.getIdentBuf()
|
||||||
sz := wordSize
|
sz := wordSize
|
||||||
if kc, ok := a.(KnownChecksum); ok {
|
if kc, ok := a.(KnownChecksum); ok {
|
||||||
f |= IREndKnownChecksum
|
f |= IREndKnownChecksum
|
||||||
@@ -243,13 +266,13 @@ func (c *Cache) Encode(w io.Writer, a Artifact) (err error) {
|
|||||||
IRKindEnd.encodeHeader(uint32(f)).put(kcBuf[:])
|
IRKindEnd.encodeHeader(uint32(f)).put(kcBuf[:])
|
||||||
|
|
||||||
_, err = w.Write(kcBuf[:sz])
|
_, err = w.Write(kcBuf[:sz])
|
||||||
c.putIdentBuf(kcBuf)
|
ic.putIdentBuf(kcBuf)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// encodeAll implements EncodeAll by recursively encoding dependencies and
|
// encodeAll implements EncodeAll by recursively encoding dependencies and
|
||||||
// performs deduplication by value via the encoded map.
|
// performs deduplication by value via the encoded map.
|
||||||
func (c *Cache) encodeAll(
|
func (ic *irCache) encodeAll(
|
||||||
w io.Writer,
|
w io.Writer,
|
||||||
a Artifact,
|
a Artifact,
|
||||||
encoded map[Artifact]struct{},
|
encoded map[Artifact]struct{},
|
||||||
@@ -259,13 +282,13 @@ func (c *Cache) encodeAll(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, d := range a.Dependencies() {
|
for _, d := range a.Dependencies() {
|
||||||
if err = c.encodeAll(w, d, encoded); err != nil {
|
if err = ic.encodeAll(w, d, encoded); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
encoded[a] = struct{}{}
|
encoded[a] = struct{}{}
|
||||||
return c.Encode(w, a)
|
return ic.Encode(w, a)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeAll writes a self-describing IR stream of a to w and returns the first
|
// EncodeAll writes a self-describing IR stream of a to w and returns the first
|
||||||
@@ -283,8 +306,8 @@ func (c *Cache) encodeAll(
|
|||||||
// the ident cache, nor does it contribute identifiers it computes back to the
|
// the ident cache, nor does it contribute identifiers it computes back to the
|
||||||
// ident cache. Because of this, multiple invocations of EncodeAll will have
|
// ident cache. Because of this, multiple invocations of EncodeAll will have
|
||||||
// similar cost and does not amortise when combined with a call to Cure.
|
// similar cost and does not amortise when combined with a call to Cure.
|
||||||
func (c *Cache) EncodeAll(w io.Writer, a Artifact) error {
|
func (ic *irCache) EncodeAll(w io.Writer, a Artifact) error {
|
||||||
return c.encodeAll(w, a, make(map[Artifact]struct{}))
|
return ic.encodeAll(w, a, make(map[Artifact]struct{}))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrRemainingIR is returned for a [IRReadFunc] that failed to call
|
// ErrRemainingIR is returned for a [IRReadFunc] that failed to call
|
||||||
@@ -409,6 +432,12 @@ func (e InvalidKindError) Error() string {
|
|||||||
// register is not safe for concurrent use. register must not be called after
|
// register is not safe for concurrent use. register must not be called after
|
||||||
// the first instance of [Cache] has been opened.
|
// the first instance of [Cache] has been opened.
|
||||||
func register(k Kind, f IRReadFunc) {
|
func register(k Kind, f IRReadFunc) {
|
||||||
|
openMu.Lock()
|
||||||
|
defer openMu.Unlock()
|
||||||
|
|
||||||
|
if opened {
|
||||||
|
panic("attempting to register after open")
|
||||||
|
}
|
||||||
if _, ok := irArtifact[k]; ok {
|
if _, ok := irArtifact[k]; ok {
|
||||||
panic("attempting to register " + strconv.Itoa(int(k)) + " twice")
|
panic("attempting to register " + strconv.Itoa(int(k)) + " twice")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"testing/fstest"
|
"testing/fstest"
|
||||||
"unique"
|
"unique"
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"hakurei.app/check"
|
"hakurei.app/check"
|
||||||
"hakurei.app/internal/pkg"
|
"hakurei.app/internal/pkg"
|
||||||
@@ -33,20 +32,14 @@ func TestHTTPGet(t *testing.T) {
|
|||||||
|
|
||||||
checkWithCache(t, []cacheTestCase{
|
checkWithCache(t, []cacheTestCase{
|
||||||
{"direct", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
{"direct", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
var r pkg.RContext
|
r := newRContext(t, c)
|
||||||
rCacheVal := reflect.ValueOf(&r).Elem().FieldByName("cache")
|
|
||||||
reflect.NewAt(
|
|
||||||
rCacheVal.Type(),
|
|
||||||
unsafe.Pointer(rCacheVal.UnsafeAddr()),
|
|
||||||
).Elem().Set(reflect.ValueOf(c))
|
|
||||||
|
|
||||||
f := pkg.NewHTTPGet(
|
f := pkg.NewHTTPGet(
|
||||||
&client,
|
&client,
|
||||||
"file:///testdata",
|
"file:///testdata",
|
||||||
testdataChecksum.Value(),
|
testdataChecksum.Value(),
|
||||||
)
|
)
|
||||||
var got []byte
|
var got []byte
|
||||||
if rc, err := f.Cure(&r); err != nil {
|
if rc, err := f.Cure(r); err != nil {
|
||||||
t.Fatalf("Cure: error = %v", err)
|
t.Fatalf("Cure: error = %v", err)
|
||||||
} else if got, err = io.ReadAll(rc); err != nil {
|
} else if got, err = io.ReadAll(rc); err != nil {
|
||||||
t.Fatalf("ReadAll: error = %v", err)
|
t.Fatalf("ReadAll: error = %v", err)
|
||||||
@@ -65,7 +58,7 @@ func TestHTTPGet(t *testing.T) {
|
|||||||
wantErrMismatch := &pkg.ChecksumMismatchError{
|
wantErrMismatch := &pkg.ChecksumMismatchError{
|
||||||
Got: testdataChecksum.Value(),
|
Got: testdataChecksum.Value(),
|
||||||
}
|
}
|
||||||
if rc, err := f.Cure(&r); err != nil {
|
if rc, err := f.Cure(r); err != nil {
|
||||||
t.Fatalf("Cure: error = %v", err)
|
t.Fatalf("Cure: error = %v", err)
|
||||||
} else if got, err = io.ReadAll(rc); err != nil {
|
} else if got, err = io.ReadAll(rc); err != nil {
|
||||||
t.Fatalf("ReadAll: error = %v", err)
|
t.Fatalf("ReadAll: error = %v", err)
|
||||||
@@ -76,7 +69,7 @@ func TestHTTPGet(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check fallback validation
|
// check fallback validation
|
||||||
if rc, err := f.Cure(&r); err != nil {
|
if rc, err := f.Cure(r); err != nil {
|
||||||
t.Fatalf("Cure: error = %v", err)
|
t.Fatalf("Cure: error = %v", err)
|
||||||
} else if err = rc.Close(); !reflect.DeepEqual(err, wantErrMismatch) {
|
} else if err = rc.Close(); !reflect.DeepEqual(err, wantErrMismatch) {
|
||||||
t.Fatalf("Close: error = %#v, want %#v", err, wantErrMismatch)
|
t.Fatalf("Close: error = %#v, want %#v", err, wantErrMismatch)
|
||||||
@@ -89,18 +82,13 @@ func TestHTTPGet(t *testing.T) {
|
|||||||
pkg.Checksum{},
|
pkg.Checksum{},
|
||||||
)
|
)
|
||||||
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
|
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
|
||||||
if _, err := f.Cure(&r); !reflect.DeepEqual(err, wantErrNotFound) {
|
if _, err := f.Cure(r); !reflect.DeepEqual(err, wantErrNotFound) {
|
||||||
t.Fatalf("Cure: error = %#v, want %#v", err, wantErrNotFound)
|
t.Fatalf("Cure: error = %#v, want %#v", err, wantErrNotFound)
|
||||||
}
|
}
|
||||||
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
||||||
|
|
||||||
{"cure", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
{"cure", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
var r pkg.RContext
|
r := newRContext(t, c)
|
||||||
rCacheVal := reflect.ValueOf(&r).Elem().FieldByName("cache")
|
|
||||||
reflect.NewAt(
|
|
||||||
rCacheVal.Type(),
|
|
||||||
unsafe.Pointer(rCacheVal.UnsafeAddr()),
|
|
||||||
).Elem().Set(reflect.ValueOf(c))
|
|
||||||
|
|
||||||
f := pkg.NewHTTPGet(
|
f := pkg.NewHTTPGet(
|
||||||
&client,
|
&client,
|
||||||
@@ -120,7 +108,7 @@ func TestHTTPGet(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var got []byte
|
var got []byte
|
||||||
if rc, err := f.Cure(&r); err != nil {
|
if rc, err := f.Cure(r); err != nil {
|
||||||
t.Fatalf("Cure: error = %v", err)
|
t.Fatalf("Cure: error = %v", err)
|
||||||
} else if got, err = io.ReadAll(rc); err != nil {
|
} else if got, err = io.ReadAll(rc); err != nil {
|
||||||
t.Fatalf("ReadAll: error = %v", err)
|
t.Fatalf("ReadAll: error = %v", err)
|
||||||
@@ -136,7 +124,7 @@ func TestHTTPGet(t *testing.T) {
|
|||||||
"file:///testdata",
|
"file:///testdata",
|
||||||
testdataChecksum.Value(),
|
testdataChecksum.Value(),
|
||||||
)
|
)
|
||||||
if rc, err := f.Cure(&r); err != nil {
|
if rc, err := f.Cure(r); err != nil {
|
||||||
t.Fatalf("Cure: error = %v", err)
|
t.Fatalf("Cure: error = %v", err)
|
||||||
} else if got, err = io.ReadAll(rc); err != nil {
|
} else if got, err = io.ReadAll(rc); err != nil {
|
||||||
t.Fatalf("ReadAll: error = %v", err)
|
t.Fatalf("ReadAll: error = %v", err)
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"slices"
|
"slices"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@@ -70,8 +71,70 @@ func MustDecode(s string) (checksum Checksum) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// extension is a string uniquely identifying a set of custom [Artifact]
|
||||||
|
// implementations registered by calling [Register].
|
||||||
|
extension string
|
||||||
|
|
||||||
|
// openMu synchronises access to global state for initialisation.
|
||||||
|
openMu sync.Mutex
|
||||||
|
// opened is false if [Open] was never called.
|
||||||
|
opened bool
|
||||||
|
)
|
||||||
|
|
||||||
|
// Extension returns a string uniquely identifying the currently registered set
|
||||||
|
// of custom [Artifact], or the zero value if none was registered.
|
||||||
|
func Extension() string { return extension }
|
||||||
|
|
||||||
|
// ValidExtension returns whether s is valid for use in a call to SetExtension.
|
||||||
|
func ValidExtension(s string) bool {
|
||||||
|
if l := len(s); l == 0 || l > 128 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, v := range s {
|
||||||
|
if v < 'a' || v > 'z' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrInvalidExtension is returned for a variant identification string for which
|
||||||
|
// [ValidExtension] returns false.
|
||||||
|
var ErrInvalidExtension = errors.New("invalid extension variant identification string")
|
||||||
|
|
||||||
|
// SetExtension sets the extension variant identification string. SetExtension
|
||||||
|
// must be called before [Open] if custom [Artifact] implementations had been
|
||||||
|
// recorded by calling [Register].
|
||||||
|
//
|
||||||
|
// The variant identification string must be between 1 and 128 bytes long and
|
||||||
|
// consists of only bytes between 'a' and 'z'.
|
||||||
|
//
|
||||||
|
// SetExtension is not safe for concurrent use. SetExtension is called at most
|
||||||
|
// once and must not be called after the first instance of Cache has been opened.
|
||||||
|
func SetExtension(s string) {
|
||||||
|
openMu.Lock()
|
||||||
|
defer openMu.Unlock()
|
||||||
|
|
||||||
|
if opened {
|
||||||
|
panic("attempting to set extension after open")
|
||||||
|
}
|
||||||
|
if extension != "" {
|
||||||
|
panic("attempting to set extension twice")
|
||||||
|
}
|
||||||
|
if !ValidExtension(s) {
|
||||||
|
panic(ErrInvalidExtension)
|
||||||
|
}
|
||||||
|
extension = s
|
||||||
|
statusHeader = makeStatusHeader(s)
|
||||||
|
}
|
||||||
|
|
||||||
// common holds elements and receives methods shared between different contexts.
|
// common holds elements and receives methods shared between different contexts.
|
||||||
type common struct {
|
type common struct {
|
||||||
|
// Context specific to this [Artifact]. The toplevel context in [Cache] must
|
||||||
|
// not be exposed directly.
|
||||||
|
ctx context.Context
|
||||||
|
|
||||||
// Address of underlying [Cache], should be zeroed or made unusable after
|
// Address of underlying [Cache], should be zeroed or made unusable after
|
||||||
// Cure returns and must not be exposed directly.
|
// Cure returns and must not be exposed directly.
|
||||||
cache *Cache
|
cache *Cache
|
||||||
@@ -98,19 +161,27 @@ type TContext struct {
|
|||||||
common
|
common
|
||||||
}
|
}
|
||||||
|
|
||||||
// statusHeader is the header written to all status files in dirStatus.
|
// makeStatusHeader creates the header written to every status file. This should
|
||||||
var statusHeader = func() string {
|
// not be called directly, its result is stored in statusHeader and will not
|
||||||
|
// change after the first [Cache] is opened.
|
||||||
|
func makeStatusHeader(extension string) string {
|
||||||
s := programName
|
s := programName
|
||||||
if v := info.Version(); v != info.FallbackVersion {
|
if v := info.Version(); v != info.FallbackVersion {
|
||||||
s += " " + v
|
s += " " + v
|
||||||
}
|
}
|
||||||
|
if extension != "" {
|
||||||
|
s += " with " + extension + " extensions"
|
||||||
|
}
|
||||||
s += " (" + runtime.GOARCH + ")"
|
s += " (" + runtime.GOARCH + ")"
|
||||||
if name, err := os.Hostname(); err == nil {
|
if name, err := os.Hostname(); err == nil {
|
||||||
s += " on " + name
|
s += " on " + name
|
||||||
}
|
}
|
||||||
s += "\n\n"
|
s += "\n\n"
|
||||||
return s
|
return s
|
||||||
}()
|
}
|
||||||
|
|
||||||
|
// statusHeader is the header written to all status files in dirStatus.
|
||||||
|
var statusHeader = makeStatusHeader("")
|
||||||
|
|
||||||
// prepareStatus initialises the status file once.
|
// prepareStatus initialises the status file once.
|
||||||
func (t *TContext) prepareStatus() error {
|
func (t *TContext) prepareStatus() error {
|
||||||
@@ -183,11 +254,15 @@ func (t *TContext) destroy(errP *error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Unwrap returns the underlying [context.Context].
|
// Unwrap returns the underlying [context.Context].
|
||||||
func (c *common) Unwrap() context.Context { return c.cache.ctx }
|
func (c *common) Unwrap() context.Context { return c.ctx }
|
||||||
|
|
||||||
// GetMessage returns [message.Msg] held by the underlying [Cache].
|
// GetMessage returns [message.Msg] held by the underlying [Cache].
|
||||||
func (c *common) GetMessage() message.Msg { return c.cache.msg }
|
func (c *common) GetMessage() message.Msg { return c.cache.msg }
|
||||||
|
|
||||||
|
// GetJobs returns the preferred number of jobs to run, when applicable. Its
|
||||||
|
// value must not affect cure outcome.
|
||||||
|
func (c *common) GetJobs() int { return c.cache.jobs }
|
||||||
|
|
||||||
// GetWorkDir returns a pathname to a directory which [Artifact] is expected to
|
// GetWorkDir returns a pathname to a directory which [Artifact] is expected to
|
||||||
// write its output to. This is not the final resting place of the [Artifact]
|
// write its output to. This is not the final resting place of the [Artifact]
|
||||||
// and this pathname should not be directly referred to in the final contents.
|
// and this pathname should not be directly referred to in the final contents.
|
||||||
@@ -207,11 +282,11 @@ func (t *TContext) GetTempDir() *check.Absolute { return t.temp }
|
|||||||
// [ChecksumMismatchError], or the underlying implementation may block on Close.
|
// [ChecksumMismatchError], or the underlying implementation may block on Close.
|
||||||
func (c *common) Open(a Artifact) (r io.ReadCloser, err error) {
|
func (c *common) Open(a Artifact) (r io.ReadCloser, err error) {
|
||||||
if f, ok := a.(FileArtifact); ok {
|
if f, ok := a.(FileArtifact); ok {
|
||||||
return c.cache.openFile(f)
|
return c.cache.openFile(c.ctx, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
var pathname *check.Absolute
|
var pathname *check.Absolute
|
||||||
if pathname, _, err = c.cache.Cure(a); err != nil {
|
if pathname, _, err = c.cache.cure(a, true); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -372,6 +447,9 @@ type KnownChecksum interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FileArtifact refers to an [Artifact] backed by a single file.
|
// FileArtifact refers to an [Artifact] backed by a single file.
|
||||||
|
//
|
||||||
|
// FileArtifact does not support fine-grained cancellation. Its context is
|
||||||
|
// inherited from the first [TrivialArtifact] or [FloodArtifact] that opens it.
|
||||||
type FileArtifact interface {
|
type FileArtifact interface {
|
||||||
// Cure returns [io.ReadCloser] of the full contents of [FileArtifact]. If
|
// Cure returns [io.ReadCloser] of the full contents of [FileArtifact]. If
|
||||||
// [FileArtifact] implements [KnownChecksum], Cure is responsible for
|
// [FileArtifact] implements [KnownChecksum], Cure is responsible for
|
||||||
@@ -416,6 +494,9 @@ const (
|
|||||||
// KindFile is the kind of [Artifact] returned by [NewFile].
|
// KindFile is the kind of [Artifact] returned by [NewFile].
|
||||||
KindFile
|
KindFile
|
||||||
|
|
||||||
|
// _kindEnd is the total number of kinds and does not denote a kind.
|
||||||
|
_kindEnd
|
||||||
|
|
||||||
// KindCustomOffset is the first [Kind] value reserved for implementations
|
// KindCustomOffset is the first [Kind] value reserved for implementations
|
||||||
// not from this package.
|
// not from this package.
|
||||||
KindCustomOffset = 1 << 31
|
KindCustomOffset = 1 << 31
|
||||||
@@ -430,6 +511,9 @@ const (
|
|||||||
// fileLock is the file name appended to Cache.base for guaranteeing
|
// fileLock is the file name appended to Cache.base for guaranteeing
|
||||||
// exclusive access to the cache directory.
|
// exclusive access to the cache directory.
|
||||||
fileLock = "lock"
|
fileLock = "lock"
|
||||||
|
// fileVariant is the file name appended to Cache.base holding the variant
|
||||||
|
// identification string set by a prior call to [SetExtension].
|
||||||
|
fileVariant = "variant"
|
||||||
|
|
||||||
// dirIdentifier is the directory name appended to Cache.base for storing
|
// dirIdentifier is the directory name appended to Cache.base for storing
|
||||||
// artifacts named after their [ID].
|
// artifacts named after their [ID].
|
||||||
@@ -529,8 +613,36 @@ const (
|
|||||||
// impurity due to [KindExecNet] being [KnownChecksum]. This flag exists
|
// impurity due to [KindExecNet] being [KnownChecksum]. This flag exists
|
||||||
// to support kernels without Landlock LSM enabled.
|
// to support kernels without Landlock LSM enabled.
|
||||||
CHostAbstract
|
CHostAbstract
|
||||||
|
|
||||||
|
// CPromoteVariant allows [pkg.Open] to promote an unextended on-disk cache
|
||||||
|
// to the current extension variant. This is a one-way operation.
|
||||||
|
CPromoteVariant
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// toplevel holds [context.WithCancel] over caller-supplied context, where all
|
||||||
|
// [Artifact] context are derived from.
|
||||||
|
type toplevel struct {
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// newToplevel returns the address of a new toplevel via ctx.
|
||||||
|
func newToplevel(ctx context.Context) *toplevel {
|
||||||
|
var t toplevel
|
||||||
|
t.ctx, t.cancel = context.WithCancel(ctx)
|
||||||
|
return &t
|
||||||
|
}
|
||||||
|
|
||||||
|
// pendingCure provides synchronisation and cancellation for pending cures.
|
||||||
|
type pendingCure struct {
|
||||||
|
// Closed on cure completion.
|
||||||
|
done <-chan struct{}
|
||||||
|
// Error outcome, safe to access after done is closed.
|
||||||
|
err error
|
||||||
|
// Cancels the corresponding cure.
|
||||||
|
cancel context.CancelFunc
|
||||||
|
}
|
||||||
|
|
||||||
// Cache is a support layer that implementations of [Artifact] can use to store
|
// Cache is a support layer that implementations of [Artifact] can use to store
|
||||||
// cured [Artifact] data in a content addressed fashion.
|
// cured [Artifact] data in a content addressed fashion.
|
||||||
type Cache struct {
|
type Cache struct {
|
||||||
@@ -538,11 +650,10 @@ type Cache struct {
|
|||||||
// implementation and receives an equal amount of elements after.
|
// implementation and receives an equal amount of elements after.
|
||||||
cures chan struct{}
|
cures chan struct{}
|
||||||
|
|
||||||
// [context.WithCancel] over caller-supplied context, used by [Artifact] and
|
// Parent context which toplevel was derived from.
|
||||||
// all dependency curing goroutines.
|
parent context.Context
|
||||||
ctx context.Context
|
// For deriving curing context, must not be accessed directly.
|
||||||
// Cancels ctx.
|
toplevel atomic.Pointer[toplevel]
|
||||||
cancel context.CancelFunc
|
|
||||||
// For waiting on dependency curing goroutines.
|
// For waiting on dependency curing goroutines.
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
// Reports new cures and passed to [Artifact].
|
// Reports new cures and passed to [Artifact].
|
||||||
@@ -552,11 +663,11 @@ type Cache struct {
|
|||||||
base *check.Absolute
|
base *check.Absolute
|
||||||
// Immutable cure options set by [Open].
|
// Immutable cure options set by [Open].
|
||||||
flags int
|
flags int
|
||||||
|
// Immutable job count, when applicable.
|
||||||
|
jobs int
|
||||||
|
|
||||||
// Artifact to [unique.Handle] of identifier cache.
|
// Must not be exposed directly.
|
||||||
artifact sync.Map
|
irCache
|
||||||
// Identifier free list, must not be accessed directly.
|
|
||||||
identPool sync.Pool
|
|
||||||
|
|
||||||
// Synchronises access to dirChecksum.
|
// Synchronises access to dirChecksum.
|
||||||
checksumMu sync.RWMutex
|
checksumMu sync.RWMutex
|
||||||
@@ -566,9 +677,11 @@ type Cache struct {
|
|||||||
// Identifier to error pair for unrecoverably faulted [Artifact].
|
// Identifier to error pair for unrecoverably faulted [Artifact].
|
||||||
identErr map[unique.Handle[ID]]error
|
identErr map[unique.Handle[ID]]error
|
||||||
// Pending identifiers, accessed through Cure for entries not in ident.
|
// Pending identifiers, accessed through Cure for entries not in ident.
|
||||||
identPending map[unique.Handle[ID]]<-chan struct{}
|
identPending map[unique.Handle[ID]]*pendingCure
|
||||||
// Synchronises access to ident and corresponding filesystem entries.
|
// Synchronises access to ident and corresponding filesystem entries.
|
||||||
identMu sync.RWMutex
|
identMu sync.RWMutex
|
||||||
|
// Synchronises entry into Abort and Cure.
|
||||||
|
abortMu sync.RWMutex
|
||||||
|
|
||||||
// Synchronises entry into exclusive artifacts for the cure method.
|
// Synchronises entry into exclusive artifacts for the cure method.
|
||||||
exclMu sync.Mutex
|
exclMu sync.Mutex
|
||||||
@@ -577,8 +690,10 @@ type Cache struct {
|
|||||||
|
|
||||||
// Unlocks the on-filesystem cache. Must only be called from Close.
|
// Unlocks the on-filesystem cache. Must only be called from Close.
|
||||||
unlock func()
|
unlock func()
|
||||||
// Synchronises calls to Close.
|
// Whether [Cache] is considered closed.
|
||||||
closeOnce sync.Once
|
closed bool
|
||||||
|
// Synchronises calls to Abort and Close.
|
||||||
|
closeMu sync.Mutex
|
||||||
|
|
||||||
// Whether EnterExec has not yet returned.
|
// Whether EnterExec has not yet returned.
|
||||||
inExec atomic.Bool
|
inExec atomic.Bool
|
||||||
@@ -588,24 +703,24 @@ type Cache struct {
|
|||||||
type extIdent [wordSize + len(ID{})]byte
|
type extIdent [wordSize + len(ID{})]byte
|
||||||
|
|
||||||
// getIdentBuf returns the address of an extIdent for Ident.
|
// getIdentBuf returns the address of an extIdent for Ident.
|
||||||
func (c *Cache) getIdentBuf() *extIdent { return c.identPool.Get().(*extIdent) }
|
func (ic *irCache) getIdentBuf() *extIdent { return ic.identPool.Get().(*extIdent) }
|
||||||
|
|
||||||
// putIdentBuf adds buf to identPool.
|
// putIdentBuf adds buf to identPool.
|
||||||
func (c *Cache) putIdentBuf(buf *extIdent) { c.identPool.Put(buf) }
|
func (ic *irCache) putIdentBuf(buf *extIdent) { ic.identPool.Put(buf) }
|
||||||
|
|
||||||
// storeIdent adds an [Artifact] to the artifact cache.
|
// storeIdent adds an [Artifact] to the artifact cache.
|
||||||
func (c *Cache) storeIdent(a Artifact, buf *extIdent) unique.Handle[ID] {
|
func (ic *irCache) storeIdent(a Artifact, buf *extIdent) unique.Handle[ID] {
|
||||||
idu := unique.Make(ID(buf[wordSize:]))
|
idu := unique.Make(ID(buf[wordSize:]))
|
||||||
c.artifact.Store(a, idu)
|
ic.artifact.Store(a, idu)
|
||||||
return idu
|
return idu
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ident returns the identifier of an [Artifact].
|
// Ident returns the identifier of an [Artifact].
|
||||||
func (c *Cache) Ident(a Artifact) unique.Handle[ID] {
|
func (ic *irCache) Ident(a Artifact) unique.Handle[ID] {
|
||||||
buf, idu := c.unsafeIdent(a, false)
|
buf, idu := ic.unsafeIdent(a, false)
|
||||||
if buf != nil {
|
if buf != nil {
|
||||||
idu = c.storeIdent(a, buf)
|
idu = ic.storeIdent(a, buf)
|
||||||
c.putIdentBuf(buf)
|
ic.putIdentBuf(buf)
|
||||||
}
|
}
|
||||||
return idu
|
return idu
|
||||||
}
|
}
|
||||||
@@ -613,17 +728,17 @@ func (c *Cache) Ident(a Artifact) unique.Handle[ID] {
|
|||||||
// unsafeIdent implements Ident but returns the underlying buffer for a newly
|
// unsafeIdent implements Ident but returns the underlying buffer for a newly
|
||||||
// computed identifier. Callers must return this buffer to identPool. encodeKind
|
// computed identifier. Callers must return this buffer to identPool. encodeKind
|
||||||
// is only a hint, kind may still be encoded in the buffer.
|
// is only a hint, kind may still be encoded in the buffer.
|
||||||
func (c *Cache) unsafeIdent(a Artifact, encodeKind bool) (
|
func (ic *irCache) unsafeIdent(a Artifact, encodeKind bool) (
|
||||||
buf *extIdent,
|
buf *extIdent,
|
||||||
idu unique.Handle[ID],
|
idu unique.Handle[ID],
|
||||||
) {
|
) {
|
||||||
if id, ok := c.artifact.Load(a); ok {
|
if id, ok := ic.artifact.Load(a); ok {
|
||||||
idu = id.(unique.Handle[ID])
|
idu = id.(unique.Handle[ID])
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if ki, ok := a.(KnownIdent); ok {
|
if ki, ok := a.(KnownIdent); ok {
|
||||||
buf = c.getIdentBuf()
|
buf = ic.getIdentBuf()
|
||||||
if encodeKind {
|
if encodeKind {
|
||||||
binary.LittleEndian.PutUint64(buf[:], uint64(a.Kind()))
|
binary.LittleEndian.PutUint64(buf[:], uint64(a.Kind()))
|
||||||
}
|
}
|
||||||
@@ -631,9 +746,9 @@ func (c *Cache) unsafeIdent(a Artifact, encodeKind bool) (
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
buf = c.getIdentBuf()
|
buf = ic.getIdentBuf()
|
||||||
h := sha512.New384()
|
h := sha512.New384()
|
||||||
if err := c.Encode(h, a); err != nil {
|
if err := ic.Encode(h, a); err != nil {
|
||||||
// unreachable
|
// unreachable
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -1002,7 +1117,11 @@ func (c *Cache) Scrub(checks int) error {
|
|||||||
// loadOrStoreIdent attempts to load a cached [Artifact] by its identifier or
|
// loadOrStoreIdent attempts to load a cached [Artifact] by its identifier or
|
||||||
// wait for a pending [Artifact] to cure. If neither is possible, the current
|
// wait for a pending [Artifact] to cure. If neither is possible, the current
|
||||||
// identifier is stored in identPending and a non-nil channel is returned.
|
// identifier is stored in identPending and a non-nil channel is returned.
|
||||||
|
//
|
||||||
|
// Since identErr is treated as grow-only, loadOrStoreIdent must not be entered
|
||||||
|
// without holding a read lock on abortMu.
|
||||||
func (c *Cache) loadOrStoreIdent(id unique.Handle[ID]) (
|
func (c *Cache) loadOrStoreIdent(id unique.Handle[ID]) (
|
||||||
|
ctx context.Context,
|
||||||
done chan<- struct{},
|
done chan<- struct{},
|
||||||
checksum unique.Handle[Checksum],
|
checksum unique.Handle[Checksum],
|
||||||
err error,
|
err error,
|
||||||
@@ -1019,20 +1138,23 @@ func (c *Cache) loadOrStoreIdent(id unique.Handle[ID]) (
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var notify <-chan struct{}
|
var pending *pendingCure
|
||||||
if notify, ok = c.identPending[id]; ok {
|
if pending, ok = c.identPending[id]; ok {
|
||||||
c.identMu.Unlock()
|
c.identMu.Unlock()
|
||||||
<-notify
|
<-pending.done
|
||||||
c.identMu.RLock()
|
c.identMu.RLock()
|
||||||
if checksum, ok = c.ident[id]; !ok {
|
if checksum, ok = c.ident[id]; !ok {
|
||||||
err = c.identErr[id]
|
err = pending.err
|
||||||
}
|
}
|
||||||
c.identMu.RUnlock()
|
c.identMu.RUnlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
d := make(chan struct{})
|
d := make(chan struct{})
|
||||||
c.identPending[id] = d
|
pending = &pendingCure{done: d}
|
||||||
|
ctx, pending.cancel = context.WithCancel(c.toplevel.Load().ctx)
|
||||||
|
c.wg.Add(1)
|
||||||
|
c.identPending[id] = pending
|
||||||
c.identMu.Unlock()
|
c.identMu.Unlock()
|
||||||
done = d
|
done = d
|
||||||
return
|
return
|
||||||
@@ -1048,21 +1170,62 @@ func (c *Cache) finaliseIdent(
|
|||||||
) {
|
) {
|
||||||
c.identMu.Lock()
|
c.identMu.Lock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
c.identPending[id].err = err
|
||||||
c.identErr[id] = err
|
c.identErr[id] = err
|
||||||
} else {
|
} else {
|
||||||
c.ident[id] = checksum
|
c.ident[id] = checksum
|
||||||
}
|
}
|
||||||
delete(c.identPending, id)
|
delete(c.identPending, id)
|
||||||
c.identMu.Unlock()
|
c.identMu.Unlock()
|
||||||
|
c.wg.Done()
|
||||||
|
|
||||||
close(done)
|
close(done)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Done returns a channel that is closed when the ongoing cure of an [Artifact]
|
||||||
|
// referred to by the specified identifier completes. Done may return nil if
|
||||||
|
// no ongoing cure of the specified identifier exists.
|
||||||
|
func (c *Cache) Done(id unique.Handle[ID]) <-chan struct{} {
|
||||||
|
c.identMu.RLock()
|
||||||
|
pending, ok := c.identPending[id]
|
||||||
|
c.identMu.RUnlock()
|
||||||
|
if !ok || pending == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return pending.done
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel cancels the ongoing cure of an [Artifact] referred to by the specified
|
||||||
|
// identifier. Cancel returns whether the [context.CancelFunc] has been killed.
|
||||||
|
// Cancel returns after the cure is complete.
|
||||||
|
func (c *Cache) Cancel(id unique.Handle[ID]) bool {
|
||||||
|
c.identMu.RLock()
|
||||||
|
pending, ok := c.identPending[id]
|
||||||
|
c.identMu.RUnlock()
|
||||||
|
if !ok || pending == nil || pending.cancel == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
pending.cancel()
|
||||||
|
<-pending.done
|
||||||
|
|
||||||
|
c.abortMu.Lock()
|
||||||
|
c.identMu.Lock()
|
||||||
|
delete(c.identErr, id)
|
||||||
|
c.identMu.Unlock()
|
||||||
|
c.abortMu.Unlock()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// openFile tries to load [FileArtifact] from [Cache], and if that fails,
|
// openFile tries to load [FileArtifact] from [Cache], and if that fails,
|
||||||
// obtains it via [FileArtifact.Cure] instead. Notably, it does not cure
|
// obtains it via [FileArtifact.Cure] instead. Notably, it does not cure
|
||||||
// [FileArtifact] to the filesystem. If err is nil, the caller is responsible
|
// [FileArtifact] to the filesystem. If err is nil, the caller is responsible
|
||||||
// for closing the resulting [io.ReadCloser].
|
// for closing the resulting [io.ReadCloser].
|
||||||
func (c *Cache) openFile(f FileArtifact) (r io.ReadCloser, err error) {
|
//
|
||||||
|
// The context must originate from loadOrStoreIdent to enable cancellation.
|
||||||
|
func (c *Cache) openFile(
|
||||||
|
ctx context.Context,
|
||||||
|
f FileArtifact,
|
||||||
|
) (r io.ReadCloser, err error) {
|
||||||
if kc, ok := f.(KnownChecksum); c.flags&CAssumeChecksum != 0 && ok {
|
if kc, ok := f.(KnownChecksum); c.flags&CAssumeChecksum != 0 && ok {
|
||||||
c.checksumMu.RLock()
|
c.checksumMu.RLock()
|
||||||
r, err = os.Open(c.base.Append(
|
r, err = os.Open(c.base.Append(
|
||||||
@@ -1093,7 +1256,7 @@ func (c *Cache) openFile(f FileArtifact) (r io.ReadCloser, err error) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
return f.Cure(&RContext{common{c}})
|
return f.Cure(&RContext{common{ctx, c}})
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -1241,12 +1404,11 @@ func (c *Cache) Cure(a Artifact) (
|
|||||||
checksum unique.Handle[Checksum],
|
checksum unique.Handle[Checksum],
|
||||||
err error,
|
err error,
|
||||||
) {
|
) {
|
||||||
select {
|
c.abortMu.RLock()
|
||||||
case <-c.ctx.Done():
|
defer c.abortMu.RUnlock()
|
||||||
err = c.ctx.Err()
|
|
||||||
return
|
|
||||||
|
|
||||||
default:
|
if err = c.toplevel.Load().ctx.Err(); err != nil {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.cure(a, true)
|
return c.cure(a, true)
|
||||||
@@ -1332,15 +1494,16 @@ func (c *Cache) enterCure(a Artifact, curesExempt bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx := c.toplevel.Load().ctx
|
||||||
select {
|
select {
|
||||||
case c.cures <- struct{}{}:
|
case c.cures <- struct{}{}:
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
case <-c.ctx.Done():
|
case <-ctx.Done():
|
||||||
if a.IsExclusive() {
|
if a.IsExclusive() {
|
||||||
c.exclMu.Unlock()
|
c.exclMu.Unlock()
|
||||||
}
|
}
|
||||||
return c.ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1438,7 +1601,8 @@ func (r *RContext) NewMeasuredReader(
|
|||||||
return r.cache.newMeasuredReader(rc, checksum)
|
return r.cache.newMeasuredReader(rc, checksum)
|
||||||
}
|
}
|
||||||
|
|
||||||
// cure implements Cure without checking the full dependency graph.
|
// cure implements Cure without acquiring a read lock on abortMu. cure must not
|
||||||
|
// be entered during Abort.
|
||||||
func (c *Cache) cure(a Artifact, curesExempt bool) (
|
func (c *Cache) cure(a Artifact, curesExempt bool) (
|
||||||
pathname *check.Absolute,
|
pathname *check.Absolute,
|
||||||
checksum unique.Handle[Checksum],
|
checksum unique.Handle[Checksum],
|
||||||
@@ -1457,8 +1621,11 @@ func (c *Cache) cure(a Artifact, curesExempt bool) (
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var done chan<- struct{}
|
var (
|
||||||
done, checksum, err = c.loadOrStoreIdent(id)
|
ctx context.Context
|
||||||
|
done chan<- struct{}
|
||||||
|
)
|
||||||
|
ctx, done, checksum, err = c.loadOrStoreIdent(id)
|
||||||
if done == nil {
|
if done == nil {
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
@@ -1571,7 +1738,7 @@ func (c *Cache) cure(a Artifact, curesExempt bool) (
|
|||||||
if err = c.enterCure(a, curesExempt); err != nil {
|
if err = c.enterCure(a, curesExempt); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r, err = f.Cure(&RContext{common{c}})
|
r, err = f.Cure(&RContext{common{ctx, c}})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if checksumPathname == nil || c.flags&CValidateKnown != 0 {
|
if checksumPathname == nil || c.flags&CValidateKnown != 0 {
|
||||||
h := sha512.New384()
|
h := sha512.New384()
|
||||||
@@ -1651,7 +1818,7 @@ func (c *Cache) cure(a Artifact, curesExempt bool) (
|
|||||||
c.base.Append(dirWork, ids),
|
c.base.Append(dirWork, ids),
|
||||||
c.base.Append(dirTemp, ids),
|
c.base.Append(dirTemp, ids),
|
||||||
ids, nil, nil, nil,
|
ids, nil, nil, nil,
|
||||||
common{c},
|
common{ctx, c},
|
||||||
}
|
}
|
||||||
switch ca := a.(type) {
|
switch ca := a.(type) {
|
||||||
case TrivialArtifact:
|
case TrivialArtifact:
|
||||||
@@ -1802,23 +1969,65 @@ func (c *Cache) OpenStatus(a Artifact) (r io.ReadSeekCloser, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Abort cancels all pending cures and waits for them to clean up, but does not
|
||||||
|
// close the cache.
|
||||||
|
func (c *Cache) Abort() {
|
||||||
|
c.closeMu.Lock()
|
||||||
|
defer c.closeMu.Unlock()
|
||||||
|
|
||||||
|
if c.closed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.toplevel.Load().cancel()
|
||||||
|
c.abortMu.Lock()
|
||||||
|
defer c.abortMu.Unlock()
|
||||||
|
|
||||||
|
// holding abortMu, identPending stays empty
|
||||||
|
c.wg.Wait()
|
||||||
|
c.identMu.Lock()
|
||||||
|
c.toplevel.Store(newToplevel(c.parent))
|
||||||
|
clear(c.identErr)
|
||||||
|
c.identMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
// Close cancels all pending cures and waits for them to clean up.
|
// Close cancels all pending cures and waits for them to clean up.
|
||||||
func (c *Cache) Close() {
|
func (c *Cache) Close() {
|
||||||
c.closeOnce.Do(func() {
|
c.closeMu.Lock()
|
||||||
c.cancel()
|
defer c.closeMu.Unlock()
|
||||||
c.wg.Wait()
|
|
||||||
close(c.cures)
|
if c.closed {
|
||||||
c.unlock()
|
return
|
||||||
})
|
}
|
||||||
|
|
||||||
|
c.closed = true
|
||||||
|
c.toplevel.Load().cancel()
|
||||||
|
c.wg.Wait()
|
||||||
|
close(c.cures)
|
||||||
|
c.unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnsupportedVariantError describes an on-disk cache with an extension variant
|
||||||
|
// identification string that differs from the value returned by [Extension].
|
||||||
|
type UnsupportedVariantError string
|
||||||
|
|
||||||
|
func (e UnsupportedVariantError) Error() string {
|
||||||
|
return "unsupported variant " + strconv.Quote(string(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrWouldPromote is returned by [Open] if the [CPromoteVariant] bit is not
|
||||||
|
// set and the on-disk cache requires variant promotion.
|
||||||
|
ErrWouldPromote = errors.New("operation would promote unextended cache")
|
||||||
|
)
|
||||||
|
|
||||||
// Open returns the address of a newly opened instance of [Cache].
|
// Open returns the address of a newly opened instance of [Cache].
|
||||||
//
|
//
|
||||||
// Concurrent cures of a [FloodArtifact] dependency graph is limited to the
|
// Concurrent cures of a [FloodArtifact] dependency graph is limited to the
|
||||||
// caller-supplied value, however direct calls to [Cache.Cure] is not subject
|
// caller-supplied value, however direct calls to [Cache.Cure] is not subject
|
||||||
// to this limitation.
|
// to this limitation.
|
||||||
//
|
//
|
||||||
// A cures value of 0 or lower is equivalent to the value returned by
|
// A cures or jobs value of 0 or lower is equivalent to the value returned by
|
||||||
// [runtime.NumCPU].
|
// [runtime.NumCPU].
|
||||||
//
|
//
|
||||||
// A successful call to Open guarantees exclusive access to the on-filesystem
|
// A successful call to Open guarantees exclusive access to the on-filesystem
|
||||||
@@ -1828,10 +2037,10 @@ func (c *Cache) Close() {
|
|||||||
func Open(
|
func Open(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
msg message.Msg,
|
msg message.Msg,
|
||||||
flags, cures int,
|
flags, cures, jobs int,
|
||||||
base *check.Absolute,
|
base *check.Absolute,
|
||||||
) (*Cache, error) {
|
) (*Cache, error) {
|
||||||
return open(ctx, msg, flags, cures, base, true)
|
return open(ctx, msg, flags, cures, jobs, base, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// open implements Open but allows omitting the [lockedfile] lock when called
|
// open implements Open but allows omitting the [lockedfile] lock when called
|
||||||
@@ -1839,13 +2048,24 @@ func Open(
|
|||||||
func open(
|
func open(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
msg message.Msg,
|
msg message.Msg,
|
||||||
flags, cures int,
|
flags, cures, jobs int,
|
||||||
base *check.Absolute,
|
base *check.Absolute,
|
||||||
lock bool,
|
lock bool,
|
||||||
) (*Cache, error) {
|
) (*Cache, error) {
|
||||||
|
openMu.Lock()
|
||||||
|
defer openMu.Unlock()
|
||||||
|
opened = true
|
||||||
|
|
||||||
|
if extension == "" && len(irArtifact) != int(_kindEnd) {
|
||||||
|
panic("attempting to open cache with incomplete variant setup")
|
||||||
|
}
|
||||||
|
|
||||||
if cures < 1 {
|
if cures < 1 {
|
||||||
cures = runtime.NumCPU()
|
cures = runtime.NumCPU()
|
||||||
}
|
}
|
||||||
|
if jobs < 1 {
|
||||||
|
jobs = runtime.NumCPU()
|
||||||
|
}
|
||||||
|
|
||||||
for _, name := range []string{
|
for _, name := range []string{
|
||||||
dirIdentifier,
|
dirIdentifier,
|
||||||
@@ -1853,29 +2073,34 @@ func open(
|
|||||||
dirStatus,
|
dirStatus,
|
||||||
dirWork,
|
dirWork,
|
||||||
} {
|
} {
|
||||||
if err := os.MkdirAll(base.Append(name).String(), 0700); err != nil &&
|
if err := os.MkdirAll(
|
||||||
!errors.Is(err, os.ErrExist) {
|
base.Append(name).String(),
|
||||||
|
0700,
|
||||||
|
); err != nil && !errors.Is(err, os.ErrExist) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
c := Cache{
|
c := Cache{
|
||||||
|
parent: ctx,
|
||||||
|
|
||||||
cures: make(chan struct{}, cures),
|
cures: make(chan struct{}, cures),
|
||||||
flags: flags,
|
flags: flags,
|
||||||
|
jobs: jobs,
|
||||||
|
|
||||||
msg: msg,
|
msg: msg,
|
||||||
base: base,
|
base: base,
|
||||||
|
|
||||||
identPool: sync.Pool{New: func() any { return new(extIdent) }},
|
irCache: zeroIRCache(),
|
||||||
|
|
||||||
ident: make(map[unique.Handle[ID]]unique.Handle[Checksum]),
|
ident: make(map[unique.Handle[ID]]unique.Handle[Checksum]),
|
||||||
identErr: make(map[unique.Handle[ID]]error),
|
identErr: make(map[unique.Handle[ID]]error),
|
||||||
identPending: make(map[unique.Handle[ID]]<-chan struct{}),
|
identPending: make(map[unique.Handle[ID]]*pendingCure),
|
||||||
|
|
||||||
brPool: sync.Pool{New: func() any { return new(bufio.Reader) }},
|
brPool: sync.Pool{New: func() any { return new(bufio.Reader) }},
|
||||||
bwPool: sync.Pool{New: func() any { return new(bufio.Writer) }},
|
bwPool: sync.Pool{New: func() any { return new(bufio.Writer) }},
|
||||||
}
|
}
|
||||||
c.ctx, c.cancel = context.WithCancel(ctx)
|
c.toplevel.Store(newToplevel(ctx))
|
||||||
|
|
||||||
if lock || !testing.Testing() {
|
if lock || !testing.Testing() {
|
||||||
if unlock, err := lockedfile.MutexAt(
|
if unlock, err := lockedfile.MutexAt(
|
||||||
@@ -1889,6 +2114,45 @@ func open(
|
|||||||
c.unlock = func() {}
|
c.unlock = func() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variantPath := base.Append(fileVariant).String()
|
||||||
|
if p, err := os.ReadFile(variantPath); err != nil {
|
||||||
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
|
c.unlock()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// nonexistence implies newly created cache, or a cache predating
|
||||||
|
// variant identification strings, in which case it is silently promoted
|
||||||
|
if err = os.WriteFile(
|
||||||
|
variantPath,
|
||||||
|
[]byte(extension),
|
||||||
|
0400,
|
||||||
|
); err != nil {
|
||||||
|
c.unlock()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else if s := string(p); s == "" {
|
||||||
|
if extension != "" {
|
||||||
|
if flags&CPromoteVariant == 0 {
|
||||||
|
c.unlock()
|
||||||
|
return nil, ErrWouldPromote
|
||||||
|
}
|
||||||
|
if err = os.WriteFile(
|
||||||
|
variantPath,
|
||||||
|
[]byte(extension),
|
||||||
|
0400,
|
||||||
|
); err != nil {
|
||||||
|
c.unlock()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if !ValidExtension(s) {
|
||||||
|
c.unlock()
|
||||||
|
return nil, ErrInvalidExtension
|
||||||
|
} else if s != extension {
|
||||||
|
c.unlock()
|
||||||
|
return nil, UnsupportedVariantError(s)
|
||||||
|
}
|
||||||
|
|
||||||
return &c, nil
|
return &c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
"unique"
|
"unique"
|
||||||
@@ -35,11 +36,47 @@ import (
|
|||||||
func unsafeOpen(
|
func unsafeOpen(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
msg message.Msg,
|
msg message.Msg,
|
||||||
flags, cures int,
|
flags, cures, jobs int,
|
||||||
base *check.Absolute,
|
base *check.Absolute,
|
||||||
lock bool,
|
lock bool,
|
||||||
) (*pkg.Cache, error)
|
) (*pkg.Cache, error)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// extension is a string uniquely identifying a set of custom [Artifact]
|
||||||
|
// implementations registered by calling [Register].
|
||||||
|
//
|
||||||
|
//go:linkname extension hakurei.app/internal/pkg.extension
|
||||||
|
extension string
|
||||||
|
|
||||||
|
// opened is false if [Open] was never called.
|
||||||
|
//
|
||||||
|
//go:linkname opened hakurei.app/internal/pkg.opened
|
||||||
|
opened bool
|
||||||
|
|
||||||
|
// irArtifact refers to artifact IR interpretation functions and must not be
|
||||||
|
// written to directly.
|
||||||
|
//
|
||||||
|
//go:linkname irArtifact hakurei.app/internal/pkg.irArtifact
|
||||||
|
irArtifact map[pkg.Kind]pkg.IRReadFunc
|
||||||
|
)
|
||||||
|
|
||||||
|
// newRContext returns the address of a new [pkg.RContext] unsafely created for
|
||||||
|
// the specified [testing.TB].
|
||||||
|
func newRContext(tb testing.TB, c *pkg.Cache) *pkg.RContext {
|
||||||
|
var r pkg.RContext
|
||||||
|
rContextVal := reflect.ValueOf(&r).Elem().FieldByName("ctx")
|
||||||
|
reflect.NewAt(
|
||||||
|
rContextVal.Type(),
|
||||||
|
unsafe.Pointer(rContextVal.UnsafeAddr()),
|
||||||
|
).Elem().Set(reflect.ValueOf(tb.Context()))
|
||||||
|
rCacheVal := reflect.ValueOf(&r).Elem().FieldByName("cache")
|
||||||
|
reflect.NewAt(
|
||||||
|
rCacheVal.Type(),
|
||||||
|
unsafe.Pointer(rCacheVal.UnsafeAddr()),
|
||||||
|
).Elem().Set(reflect.ValueOf(c))
|
||||||
|
return &r
|
||||||
|
}
|
||||||
|
|
||||||
func TestMain(m *testing.M) { container.TryArgv0(nil); os.Exit(m.Run()) }
|
func TestMain(m *testing.M) { container.TryArgv0(nil); os.Exit(m.Run()) }
|
||||||
|
|
||||||
// overrideIdent overrides the ID method of [Artifact].
|
// overrideIdent overrides the ID method of [Artifact].
|
||||||
@@ -230,7 +267,7 @@ func TestIdent(t *testing.T) {
|
|||||||
var cache *pkg.Cache
|
var cache *pkg.Cache
|
||||||
if a, err := check.NewAbs(t.TempDir()); err != nil {
|
if a, err := check.NewAbs(t.TempDir()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if cache, err = pkg.Open(t.Context(), msg, 0, 0, a); err != nil {
|
} else if cache, err = pkg.Open(t.Context(), msg, 0, 0, 0, a); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
t.Cleanup(cache.Close)
|
t.Cleanup(cache.Close)
|
||||||
@@ -304,7 +341,7 @@ func checkWithCache(t *testing.T, testCases []cacheTestCase) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var scrubFunc func() error // scrub after hashing
|
var scrubFunc func() error // scrub after hashing
|
||||||
if c, err := pkg.Open(t.Context(), msg, flags, 1<<4, base); err != nil {
|
if c, err := pkg.Open(t.Context(), msg, flags, 1<<4, 0, base); err != nil {
|
||||||
t.Fatalf("Open: error = %v", err)
|
t.Fatalf("Open: error = %v", err)
|
||||||
} else {
|
} else {
|
||||||
t.Cleanup(c.Close)
|
t.Cleanup(c.Close)
|
||||||
@@ -324,9 +361,20 @@ func checkWithCache(t *testing.T, testCases []cacheTestCase) {
|
|||||||
restoreTemp = true
|
restoreTemp = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// destroy lock file to avoid changing cache checksums
|
// destroy lock and variant file to avoid changing cache checksums
|
||||||
if err := os.Remove(base.Append("lock").String()); err != nil {
|
for _, s := range []string{
|
||||||
t.Fatal(err)
|
"lock",
|
||||||
|
"variant",
|
||||||
|
} {
|
||||||
|
pathname := base.Append(s)
|
||||||
|
if p, err := os.ReadFile(pathname.String()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if len(p) != 0 {
|
||||||
|
t.Fatalf("file %q: %q", s, string(p))
|
||||||
|
}
|
||||||
|
if err := os.Remove(pathname.String()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// destroy non-deterministic status files
|
// destroy non-deterministic status files
|
||||||
@@ -606,7 +654,7 @@ func TestCache(t *testing.T) {
|
|||||||
if c0, err := unsafeOpen(
|
if c0, err := unsafeOpen(
|
||||||
t.Context(),
|
t.Context(),
|
||||||
message.New(nil),
|
message.New(nil),
|
||||||
0, 0, base, false,
|
0, 0, 0, base, false,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
t.Fatalf("open: error = %v", err)
|
t.Fatalf("open: error = %v", err)
|
||||||
} else {
|
} else {
|
||||||
@@ -876,17 +924,69 @@ func TestCache(t *testing.T) {
|
|||||||
t.Fatalf("Scrub: error = %#v, want %#v", err, wantErrScrub)
|
t.Fatalf("Scrub: error = %#v, want %#v", err, wantErrScrub)
|
||||||
}
|
}
|
||||||
|
|
||||||
identPendingVal := reflect.ValueOf(c).Elem().FieldByName("identPending")
|
notify := c.Done(unique.Make(pkg.ID{0xff}))
|
||||||
identPending := reflect.NewAt(
|
|
||||||
identPendingVal.Type(),
|
|
||||||
unsafe.Pointer(identPendingVal.UnsafeAddr()),
|
|
||||||
).Elem().Interface().(map[unique.Handle[pkg.ID]]<-chan struct{})
|
|
||||||
notify := identPending[unique.Make(pkg.ID{0xff})]
|
|
||||||
go close(n)
|
go close(n)
|
||||||
<-notify
|
if notify != nil {
|
||||||
|
<-notify
|
||||||
|
}
|
||||||
|
for c.Done(unique.Make(pkg.ID{0xff})) != nil {
|
||||||
|
}
|
||||||
<-wCureDone
|
<-wCureDone
|
||||||
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
||||||
|
|
||||||
|
{"cancel abort block", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
defer wg.Wait()
|
||||||
|
|
||||||
|
var started sync.WaitGroup
|
||||||
|
defer started.Wait()
|
||||||
|
|
||||||
|
blockCures := func(d byte, e stub.UniqueError, n int) {
|
||||||
|
started.Add(n)
|
||||||
|
for i := range n {
|
||||||
|
wg.Go(func() {
|
||||||
|
if _, _, err := c.Cure(overrideIdent{pkg.ID{d, byte(i)}, &stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
started.Done()
|
||||||
|
<-t.Unwrap().Done()
|
||||||
|
return e + stub.UniqueError(i)
|
||||||
|
},
|
||||||
|
}}); !reflect.DeepEqual(err, e+stub.UniqueError(i)) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
started.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
blockCures(0xfd, 0xbad, 16)
|
||||||
|
c.Abort()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
blockCures(0xfd, 0xcafe, 16)
|
||||||
|
c.Abort()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
blockCures(0xff, 0xbad, 1)
|
||||||
|
if !c.Cancel(unique.Make(pkg.ID{0xff})) {
|
||||||
|
t.Fatal("missed cancellation")
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
blockCures(0xff, 0xcafe, 1)
|
||||||
|
if !c.Cancel(unique.Make(pkg.ID{0xff})) {
|
||||||
|
t.Fatal("missed cancellation")
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
for c.Cancel(unique.Make(pkg.ID{0xff})) {
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Close()
|
||||||
|
c.Abort()
|
||||||
|
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
||||||
|
|
||||||
{"no assume checksum", 0, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
{"no assume checksum", 0, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
makeGarbage := func(work *check.Absolute, wantErr error) error {
|
makeGarbage := func(work *check.Absolute, wantErr error) error {
|
||||||
if err := os.Mkdir(work.String(), 0700); err != nil {
|
if err := os.Mkdir(work.String(), 0700); err != nil {
|
||||||
@@ -1031,6 +1131,10 @@ func TestErrors(t *testing.T) {
|
|||||||
Want: pkg.IRKindIdent,
|
Want: pkg.IRKindIdent,
|
||||||
Ancillary: 0xcafe,
|
Ancillary: 0xcafe,
|
||||||
}, "got invalid kind 48879 IR value (0xcafe) instead of ident"},
|
}, "got invalid kind 48879 IR value (0xcafe) instead of ident"},
|
||||||
|
|
||||||
|
{"UnsupportedVariantError", pkg.UnsupportedVariantError(
|
||||||
|
"rosa",
|
||||||
|
), `unsupported variant "rosa"`},
|
||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
@@ -1239,6 +1343,8 @@ func (a earlyFailureF) Cure(*pkg.FContext) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDependencyCureErrorEarly(t *testing.T) {
|
func TestDependencyCureErrorEarly(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
checkWithCache(t, []cacheTestCase{
|
checkWithCache(t, []cacheTestCase{
|
||||||
{"early", 0, nil, func(t *testing.T, _ *check.Absolute, c *pkg.Cache) {
|
{"early", 0, nil, func(t *testing.T, _ *check.Absolute, c *pkg.Cache) {
|
||||||
_, _, err := c.Cure(earlyFailureF(8))
|
_, _, err := c.Cure(earlyFailureF(8))
|
||||||
@@ -1249,7 +1355,7 @@ func TestDependencyCureErrorEarly(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNew(t *testing.T) {
|
func TestOpen(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
t.Run("nonexistent", func(t *testing.T) {
|
t.Run("nonexistent", func(t *testing.T) {
|
||||||
@@ -1263,7 +1369,7 @@ func TestNew(t *testing.T) {
|
|||||||
if _, err := pkg.Open(
|
if _, err := pkg.Open(
|
||||||
t.Context(),
|
t.Context(),
|
||||||
message.New(nil),
|
message.New(nil),
|
||||||
0, 0, check.MustAbs(container.Nonexistent),
|
0, 0, 0, check.MustAbs(container.Nonexistent),
|
||||||
); !reflect.DeepEqual(err, wantErr) {
|
); !reflect.DeepEqual(err, wantErr) {
|
||||||
t.Errorf("Open: error = %#v, want %#v", err, wantErr)
|
t.Errorf("Open: error = %#v, want %#v", err, wantErr)
|
||||||
}
|
}
|
||||||
@@ -1291,9 +1397,225 @@ func TestNew(t *testing.T) {
|
|||||||
if _, err := pkg.Open(
|
if _, err := pkg.Open(
|
||||||
t.Context(),
|
t.Context(),
|
||||||
message.New(nil),
|
message.New(nil),
|
||||||
0, 0, tempDir.Append("cache"),
|
0, 0, 0, tempDir.Append("cache"),
|
||||||
); !reflect.DeepEqual(err, wantErr) {
|
); !reflect.DeepEqual(err, wantErr) {
|
||||||
t.Errorf("Open: error = %#v, want %#v", err, wantErr)
|
t.Errorf("Open: error = %#v, want %#v", err, wantErr)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestExtensionRegister(t *testing.T) {
|
||||||
|
extensionOld := extension
|
||||||
|
openedOld := opened
|
||||||
|
t.Cleanup(func() { extension = extensionOld; opened = openedOld })
|
||||||
|
extension = ""
|
||||||
|
opened = false
|
||||||
|
|
||||||
|
t.Run("set", func(t *testing.T) {
|
||||||
|
t.Cleanup(func() { extension = "" })
|
||||||
|
|
||||||
|
const want = "rosa"
|
||||||
|
pkg.SetExtension(want)
|
||||||
|
if got := pkg.Extension(); got != want {
|
||||||
|
t.Fatalf("Extension: %q, want %q", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("twice", func(t *testing.T) {
|
||||||
|
t.Cleanup(func() { extension = "" })
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
const wantPanic = "attempting to set extension twice"
|
||||||
|
if r := recover(); r != wantPanic {
|
||||||
|
t.Errorf("panic: %#v, want %q", r, wantPanic)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
pkg.SetExtension("rosa")
|
||||||
|
pkg.SetExtension("rosa")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("invalid", func(t *testing.T) {
|
||||||
|
defer func() {
|
||||||
|
var wantPanic = pkg.ErrInvalidExtension
|
||||||
|
if r := recover(); r != wantPanic {
|
||||||
|
t.Errorf("panic: %#v, want %#v", r, wantPanic)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
pkg.SetExtension(" ")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("opened", func(t *testing.T) {
|
||||||
|
t.Cleanup(func() { opened = false })
|
||||||
|
|
||||||
|
if _, err := pkg.Open(
|
||||||
|
t.Context(),
|
||||||
|
message.New(log.Default()),
|
||||||
|
0, 0, 0,
|
||||||
|
check.MustAbs(container.Nonexistent),
|
||||||
|
); !errors.Is(err, os.ErrNotExist) {
|
||||||
|
t.Fatalf("Open: error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("variant", func(t *testing.T) {
|
||||||
|
defer func() {
|
||||||
|
const wantPanic = "attempting to set extension after open"
|
||||||
|
if r := recover(); r != wantPanic {
|
||||||
|
t.Errorf("panic: %#v, want %q", r, wantPanic)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
pkg.SetExtension("rosa")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("register", func(t *testing.T) {
|
||||||
|
defer func() {
|
||||||
|
const wantPanic = "attempting to register after open"
|
||||||
|
if r := recover(); r != wantPanic {
|
||||||
|
t.Errorf("panic: %#v, want %q", r, wantPanic)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
pkg.Register(pkg.KindCustomOffset, nil)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("incomplete", func(t *testing.T) {
|
||||||
|
t.Cleanup(func() { delete(irArtifact, pkg.KindCustomOffset) })
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
const wantPanic = "attempting to open cache with incomplete variant setup"
|
||||||
|
if r := recover(); r != wantPanic {
|
||||||
|
t.Errorf("panic: %#v, want %q", r, wantPanic)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
pkg.Register(pkg.KindCustomOffset, nil)
|
||||||
|
|
||||||
|
t.Cleanup(func() { opened = false })
|
||||||
|
_, _ = pkg.Open(nil, nil, 0, 0, 0, nil)
|
||||||
|
panic("unreachable")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("create", func(t *testing.T) {
|
||||||
|
t.Cleanup(func() { extension = "" })
|
||||||
|
const want = "rosa"
|
||||||
|
pkg.SetExtension(want)
|
||||||
|
|
||||||
|
base := check.MustAbs(t.TempDir())
|
||||||
|
t.Cleanup(func() { opened = false })
|
||||||
|
if c, err := pkg.Open(
|
||||||
|
t.Context(), nil,
|
||||||
|
0, 0, 0,
|
||||||
|
base,
|
||||||
|
); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else {
|
||||||
|
c.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
if got, err := os.ReadFile(base.Append("variant").String()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if string(got) != want {
|
||||||
|
t.Fatalf("variant: %q", string(got))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("access", func(t *testing.T) {
|
||||||
|
base := check.MustAbs(t.TempDir())
|
||||||
|
t.Cleanup(func() { opened = false })
|
||||||
|
|
||||||
|
if err := os.WriteFile(base.Append("variant").String(), nil, 0); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
wantErr := &os.PathError{
|
||||||
|
Op: "open",
|
||||||
|
Path: base.Append("variant").String(),
|
||||||
|
Err: syscall.EACCES,
|
||||||
|
}
|
||||||
|
if _, err := pkg.Open(
|
||||||
|
t.Context(), nil,
|
||||||
|
0, 0, 0,
|
||||||
|
base,
|
||||||
|
); !reflect.DeepEqual(err, wantErr) {
|
||||||
|
t.Fatalf("Open: error = %v, want %v", err, wantErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("promote", func(t *testing.T) {
|
||||||
|
t.Cleanup(func() { extension = "" })
|
||||||
|
const want = "rosa"
|
||||||
|
pkg.SetExtension(want)
|
||||||
|
|
||||||
|
base := check.MustAbs(t.TempDir())
|
||||||
|
t.Cleanup(func() { opened = false })
|
||||||
|
|
||||||
|
variantPath := base.Append("variant")
|
||||||
|
if err := os.WriteFile(variantPath.String(), nil, 0600); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := pkg.Open(
|
||||||
|
t.Context(), nil,
|
||||||
|
0, 0, 0,
|
||||||
|
base,
|
||||||
|
); !reflect.DeepEqual(err, pkg.ErrWouldPromote) {
|
||||||
|
t.Fatalf("Open: error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p, err := os.ReadFile(variantPath.String()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if len(p) != 0 {
|
||||||
|
t.Fatalf("variant: %q", string(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
if c, err := pkg.Open(
|
||||||
|
t.Context(), nil,
|
||||||
|
pkg.CPromoteVariant, 0, 0,
|
||||||
|
base,
|
||||||
|
); err != nil {
|
||||||
|
t.Fatalf("Open: error = %v", err)
|
||||||
|
} else {
|
||||||
|
c.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
if p, err := os.ReadFile(variantPath.String()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if string(p) != want {
|
||||||
|
t.Fatalf("variant: %q, want %q", string(p), want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("open invalid", func(t *testing.T) {
|
||||||
|
base := check.MustAbs(t.TempDir())
|
||||||
|
t.Cleanup(func() { opened = false })
|
||||||
|
|
||||||
|
variantPath := base.Append("variant")
|
||||||
|
if err := os.WriteFile(variantPath.String(), make([]byte, 129), 0400); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := pkg.Open(
|
||||||
|
t.Context(), nil,
|
||||||
|
0, 0, 0,
|
||||||
|
base,
|
||||||
|
); !reflect.DeepEqual(err, pkg.ErrInvalidExtension) {
|
||||||
|
t.Fatalf("Open: error = %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("unsupported", func(t *testing.T) {
|
||||||
|
base := check.MustAbs(t.TempDir())
|
||||||
|
t.Cleanup(func() { opened = false })
|
||||||
|
|
||||||
|
variantPath := base.Append("variant")
|
||||||
|
if err := os.WriteFile(variantPath.String(), []byte("rosa"), 0400); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := pkg.Open(
|
||||||
|
t.Context(), nil,
|
||||||
|
0, 0, 0,
|
||||||
|
base,
|
||||||
|
); !reflect.DeepEqual(err, pkg.UnsupportedVariantError("rosa")) {
|
||||||
|
t.Fatalf("Open: error = %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -43,8 +43,7 @@ var _ fmt.Stringer = new(tarArtifactNamed)
|
|||||||
func (a *tarArtifactNamed) String() string { return a.name + "-unpack" }
|
func (a *tarArtifactNamed) String() string { return a.name + "-unpack" }
|
||||||
|
|
||||||
// NewTar returns a new [Artifact] backed by the supplied [Artifact] and
|
// NewTar returns a new [Artifact] backed by the supplied [Artifact] and
|
||||||
// compression method. The source [Artifact] must be compatible with
|
// compression method. The source [Artifact] must be a [FileArtifact].
|
||||||
// [TContext.Open].
|
|
||||||
func NewTar(a Artifact, compression uint32) Artifact {
|
func NewTar(a Artifact, compression uint32) Artifact {
|
||||||
ta := tarArtifact{a, compression}
|
ta := tarArtifact{a, compression}
|
||||||
if s, ok := a.(fmt.Stringer); ok {
|
if s, ok := a.(fmt.Stringer); ok {
|
||||||
|
|||||||
48
internal/pkg/testdata/main.go
vendored
48
internal/pkg/testdata/main.go
vendored
@@ -9,10 +9,11 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"runtime"
|
||||||
"slices"
|
"slices"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"hakurei.app/check"
|
|
||||||
"hakurei.app/fhs"
|
"hakurei.app/fhs"
|
||||||
"hakurei.app/vfs"
|
"hakurei.app/vfs"
|
||||||
)
|
)
|
||||||
@@ -21,6 +22,10 @@ func main() {
|
|||||||
log.SetFlags(0)
|
log.SetFlags(0)
|
||||||
log.SetPrefix("testtool: ")
|
log.SetPrefix("testtool: ")
|
||||||
|
|
||||||
|
environ := slices.DeleteFunc(slices.Clone(os.Environ()), func(s string) bool {
|
||||||
|
return s == "CURE_JOBS="+strconv.Itoa(runtime.NumCPU())
|
||||||
|
})
|
||||||
|
|
||||||
var hostNet, layers, promote bool
|
var hostNet, layers, promote bool
|
||||||
if len(os.Args) == 2 && os.Args[0] == "testtool" {
|
if len(os.Args) == 2 && os.Args[0] == "testtool" {
|
||||||
switch os.Args[1] {
|
switch os.Args[1] {
|
||||||
@@ -48,15 +53,15 @@ func main() {
|
|||||||
|
|
||||||
var overlayRoot bool
|
var overlayRoot bool
|
||||||
wantEnv := []string{"HAKUREI_TEST=1"}
|
wantEnv := []string{"HAKUREI_TEST=1"}
|
||||||
if len(os.Environ()) == 2 {
|
if len(environ) == 2 {
|
||||||
overlayRoot = true
|
overlayRoot = true
|
||||||
if !layers && !promote {
|
if !layers && !promote {
|
||||||
log.SetPrefix("testtool(overlay root): ")
|
log.SetPrefix("testtool(overlay root): ")
|
||||||
}
|
}
|
||||||
wantEnv = []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}
|
wantEnv = []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}
|
||||||
}
|
}
|
||||||
if !slices.Equal(wantEnv, os.Environ()) {
|
if !slices.Equal(wantEnv, environ) {
|
||||||
log.Fatalf("Environ: %q, want %q", os.Environ(), wantEnv)
|
log.Fatalf("Environ: %q, want %q", environ, wantEnv)
|
||||||
}
|
}
|
||||||
|
|
||||||
var overlayWork bool
|
var overlayWork bool
|
||||||
@@ -153,43 +158,24 @@ func main() {
|
|||||||
m.Source != "overlay" || m.FsType != "overlay" {
|
m.Source != "overlay" || m.FsType != "overlay" {
|
||||||
log.Fatal("unexpected root mount entry")
|
log.Fatal("unexpected root mount entry")
|
||||||
}
|
}
|
||||||
var lowerdir string
|
var lowerdir []string
|
||||||
for _, o := range strings.Split(m.FsOptstr, ",") {
|
for _, o := range strings.Split(m.FsOptstr, ",") {
|
||||||
const lowerdirKey = "lowerdir="
|
const lowerdirKey = "lowerdir+="
|
||||||
if strings.HasPrefix(o, lowerdirKey) {
|
if strings.HasPrefix(o, lowerdirKey) {
|
||||||
lowerdir = o[len(lowerdirKey):]
|
lowerdir = append(lowerdir, o[len(lowerdirKey):])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !layers {
|
if !layers {
|
||||||
if filepath.Base(lowerdir) != checksumEmptyDir {
|
if len(lowerdir) != 1 || filepath.Base(lowerdir[0]) != checksumEmptyDir {
|
||||||
log.Fatal("unexpected artifact checksum")
|
log.Fatal("unexpected artifact checksum")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ident = "p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT"
|
ident = "p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT"
|
||||||
|
|
||||||
lowerdirsEscaped := strings.Split(lowerdir, ":")
|
if len(lowerdir) != 2 ||
|
||||||
lowerdirs := lowerdirsEscaped[:0]
|
filepath.Base(lowerdir[0]) != "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU" ||
|
||||||
// ignore the option separator since it does not appear in ident
|
filepath.Base(lowerdir[1]) != "nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK" {
|
||||||
for i, e := range lowerdirsEscaped {
|
log.Fatalf("unexpected lowerdirs %s", strings.Join(lowerdir, ", "))
|
||||||
if len(e) > 0 &&
|
|
||||||
e[len(e)-1] == check.SpecialOverlayEscape[0] &&
|
|
||||||
(len(e) == 1 || e[len(e)-2] != check.SpecialOverlayEscape[0]) {
|
|
||||||
// ignore escaped pathname separator since it does not
|
|
||||||
// appear in ident
|
|
||||||
|
|
||||||
e = e[:len(e)-1]
|
|
||||||
if len(lowerdirsEscaped) != i {
|
|
||||||
lowerdirsEscaped[i+1] = e + lowerdirsEscaped[i+1]
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
lowerdirs = append(lowerdirs, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(lowerdirs) != 2 ||
|
|
||||||
filepath.Base(lowerdirs[0]) != "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU" ||
|
|
||||||
filepath.Base(lowerdirs[1]) != "nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK" {
|
|
||||||
log.Fatalf("unexpected lowerdirs %s", strings.Join(lowerdirs, ", "))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -7,10 +7,10 @@ func (t Toolchain) newAttr() (pkg.Artifact, string) {
|
|||||||
version = "2.5.2"
|
version = "2.5.2"
|
||||||
checksum = "YWEphrz6vg1sUMmHHVr1CRo53pFXRhq_pjN-AlG8UgwZK1y6m7zuDhxqJhD0SV0l"
|
checksum = "YWEphrz6vg1sUMmHHVr1CRo53pFXRhq_pjN-AlG8UgwZK1y6m7zuDhxqJhD0SV0l"
|
||||||
)
|
)
|
||||||
return t.NewPackage("attr", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("attr", version, newTar(
|
||||||
nil, "https://download.savannah.nongnu.org/releases/attr/"+
|
"https://download.savannah.nongnu.org/releases/attr/"+
|
||||||
"attr-"+version+".tar.gz",
|
"attr-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Patches: []KV{
|
Patches: []KV{
|
||||||
@@ -81,10 +81,10 @@ func (t Toolchain) newACL() (pkg.Artifact, string) {
|
|||||||
version = "2.3.2"
|
version = "2.3.2"
|
||||||
checksum = "-fY5nwH4K8ZHBCRXrzLdguPkqjKI6WIiGu4dBtrZ1o0t6AIU73w8wwJz_UyjIS0P"
|
checksum = "-fY5nwH4K8ZHBCRXrzLdguPkqjKI6WIiGu4dBtrZ1o0t6AIU73w8wwJz_UyjIS0P"
|
||||||
)
|
)
|
||||||
return t.NewPackage("acl", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("acl", version, newTar(
|
||||||
nil, "https://download.savannah.nongnu.org/releases/acl/"+
|
"https://download.savannah.nongnu.org/releases/acl/"+
|
||||||
"acl-"+version+".tar.gz",
|
"acl-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, &MakeHelper{
|
), nil, &MakeHelper{
|
||||||
// makes assumptions about uid_map/gid_map
|
// makes assumptions about uid_map/gid_map
|
||||||
|
|||||||
@@ -16,9 +16,7 @@ import (
|
|||||||
type PArtifact int
|
type PArtifact int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
LLVMCompilerRT PArtifact = iota
|
LLVM PArtifact = iota
|
||||||
LLVMRuntimes
|
|
||||||
LLVMClang
|
|
||||||
|
|
||||||
// EarlyInit is the Rosa OS init program.
|
// EarlyInit is the Rosa OS init program.
|
||||||
EarlyInit
|
EarlyInit
|
||||||
@@ -64,6 +62,7 @@ const (
|
|||||||
GenInitCPIO
|
GenInitCPIO
|
||||||
Gettext
|
Gettext
|
||||||
Git
|
Git
|
||||||
|
Glslang
|
||||||
GnuTLS
|
GnuTLS
|
||||||
Go
|
Go
|
||||||
Gperf
|
Gperf
|
||||||
@@ -73,16 +72,22 @@ const (
|
|||||||
HakureiDist
|
HakureiDist
|
||||||
IPTables
|
IPTables
|
||||||
Kmod
|
Kmod
|
||||||
|
LIT
|
||||||
|
LibX11
|
||||||
LibXau
|
LibXau
|
||||||
|
LibXext
|
||||||
Libbsd
|
Libbsd
|
||||||
Libcap
|
Libcap
|
||||||
|
Libdrm
|
||||||
Libev
|
Libev
|
||||||
Libexpat
|
Libexpat
|
||||||
Libffi
|
Libffi
|
||||||
Libgd
|
Libgd
|
||||||
|
Libglvnd
|
||||||
Libiconv
|
Libiconv
|
||||||
Libmd
|
Libmd
|
||||||
Libmnl
|
Libmnl
|
||||||
|
Libpciaccess
|
||||||
Libnftnl
|
Libnftnl
|
||||||
Libpsl
|
Libpsl
|
||||||
Libseccomp
|
Libseccomp
|
||||||
@@ -90,8 +95,10 @@ const (
|
|||||||
Libtool
|
Libtool
|
||||||
Libucontext
|
Libucontext
|
||||||
Libunistring
|
Libunistring
|
||||||
|
Libxshmfence
|
||||||
Libxml2
|
Libxml2
|
||||||
Libxslt
|
Libxslt
|
||||||
|
Libxtrans
|
||||||
M4
|
M4
|
||||||
MPC
|
MPC
|
||||||
MPFR
|
MPFR
|
||||||
@@ -119,22 +126,35 @@ const (
|
|||||||
PerlTermReadKey
|
PerlTermReadKey
|
||||||
PerlTextCharWidth
|
PerlTextCharWidth
|
||||||
PerlTextWrapI18N
|
PerlTextWrapI18N
|
||||||
PerlUnicodeGCString
|
PerlUnicodeLineBreak
|
||||||
PerlYAMLTiny
|
PerlYAMLTiny
|
||||||
PkgConfig
|
PkgConfig
|
||||||
Procps
|
Procps
|
||||||
Python
|
Python
|
||||||
|
PythonFlitCore
|
||||||
|
PythonHatchling
|
||||||
PythonIniConfig
|
PythonIniConfig
|
||||||
|
PythonMako
|
||||||
|
PythonMarkupSafe
|
||||||
PythonPackaging
|
PythonPackaging
|
||||||
|
PythonPathspec
|
||||||
PythonPluggy
|
PythonPluggy
|
||||||
PythonPyTest
|
PythonPyTest
|
||||||
|
PythonPyYAML
|
||||||
PythonPygments
|
PythonPygments
|
||||||
|
PythonSetuptools
|
||||||
|
PythonSetuptoolsSCM
|
||||||
|
PythonTroveClassifiers
|
||||||
|
PythonVCSVersioning
|
||||||
|
PythonWheel
|
||||||
QEMU
|
QEMU
|
||||||
Rdfind
|
Rdfind
|
||||||
Readline
|
Readline
|
||||||
Rsync
|
Rsync
|
||||||
Sed
|
Sed
|
||||||
Setuptools
|
SPIRVHeaders
|
||||||
|
SPIRVLLVMTranslator
|
||||||
|
SPIRVTools
|
||||||
SquashfsTools
|
SquashfsTools
|
||||||
Strace
|
Strace
|
||||||
TamaGo
|
TamaGo
|
||||||
@@ -148,19 +168,35 @@ const (
|
|||||||
WaylandProtocols
|
WaylandProtocols
|
||||||
XCB
|
XCB
|
||||||
XCBProto
|
XCBProto
|
||||||
Xproto
|
XDGDBusProxy
|
||||||
XZ
|
XZ
|
||||||
|
XorgProto
|
||||||
Zlib
|
Zlib
|
||||||
Zstd
|
Zstd
|
||||||
|
|
||||||
// PresetUnexportedStart is the first unexported preset.
|
// PresetUnexportedStart is the first unexported preset.
|
||||||
PresetUnexportedStart
|
PresetUnexportedStart
|
||||||
|
|
||||||
buildcatrust = iota - 1
|
llvmSource = iota - 1
|
||||||
|
// earlyCompilerRT is an early, standalone compiler-rt installation for the
|
||||||
|
// standalone runtimes build.
|
||||||
|
//
|
||||||
|
// earlyCompilerRT must only be loaded by [LLVM].
|
||||||
|
earlyCompilerRT
|
||||||
|
// earlyRuntimes is an early, standalone installation of LLVM runtimes to
|
||||||
|
// work around the cmake build system leaking the system LLVM installation
|
||||||
|
// when invoking the newly built toolchain.
|
||||||
|
//
|
||||||
|
// earlyRuntimes must only be loaded by [LLVM].
|
||||||
|
earlyRuntimes
|
||||||
|
|
||||||
|
buildcatrust
|
||||||
utilMacros
|
utilMacros
|
||||||
|
|
||||||
// Musl is a standalone libc that does not depend on the toolchain.
|
// Musl is a standalone libc that does not depend on the toolchain.
|
||||||
Musl
|
Musl
|
||||||
|
// muslHeaders is a system installation of [Musl] headers.
|
||||||
|
muslHeaders
|
||||||
|
|
||||||
// gcc is a hacked-to-pieces GCC toolchain meant for use in intermediate
|
// gcc is a hacked-to-pieces GCC toolchain meant for use in intermediate
|
||||||
// stages only. This preset and its direct output must never be exposed.
|
// stages only. This preset and its direct output must never be exposed.
|
||||||
@@ -305,15 +341,29 @@ var (
|
|||||||
}
|
}
|
||||||
// artifactsOnce is for lazy initialisation of artifacts.
|
// artifactsOnce is for lazy initialisation of artifacts.
|
||||||
artifactsOnce [_toolchainEnd][len(artifactsM)]sync.Once
|
artifactsOnce [_toolchainEnd][len(artifactsM)]sync.Once
|
||||||
|
|
||||||
|
// presetOpts globally modifies behaviour of presets.
|
||||||
|
presetOpts int
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// OptSkipCheck skips running all test suites.
|
||||||
|
OptSkipCheck = 1 << iota
|
||||||
|
// OptLLVMNoLTO disables LTO in all [LLVM] stages.
|
||||||
|
OptLLVMNoLTO
|
||||||
|
)
|
||||||
|
|
||||||
|
// Flags returns the current preset flags
|
||||||
|
func Flags() int { return presetOpts }
|
||||||
|
|
||||||
// zero zeros the value pointed to by p.
|
// zero zeros the value pointed to by p.
|
||||||
func zero[T any](p *T) { var v T; *p = v }
|
func zero[T any](p *T) { var v T; *p = v }
|
||||||
|
|
||||||
// DropCaches arranges for all cached [pkg.Artifact] to be freed some time after
|
// DropCaches arranges for all cached [pkg.Artifact] to be freed some time after
|
||||||
// it returns. Must not be used concurrently with any other function from this
|
// it returns. Must not be used concurrently with any other function from this
|
||||||
// package.
|
// package.
|
||||||
func DropCaches() {
|
func DropCaches(flags int) {
|
||||||
|
presetOpts = flags
|
||||||
zero(&artifacts)
|
zero(&artifacts)
|
||||||
zero(&artifactsOnce)
|
zero(&artifactsOnce)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,13 +20,16 @@ func TestLoad(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkAll(b *testing.B) {
|
func BenchmarkAll(b *testing.B) {
|
||||||
|
flags := rosa.Flags()
|
||||||
|
b.Cleanup(func() { rosa.DropCaches(flags) })
|
||||||
|
|
||||||
for b.Loop() {
|
for b.Loop() {
|
||||||
for i := range rosa.PresetEnd {
|
for i := range rosa.PresetEnd {
|
||||||
rosa.Std.Load(rosa.PArtifact(i))
|
rosa.Std.Load(rosa.PArtifact(i))
|
||||||
}
|
}
|
||||||
|
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
rosa.DropCaches()
|
rosa.DropCaches(0)
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,10 +7,10 @@ func (t Toolchain) newArgpStandalone() (pkg.Artifact, string) {
|
|||||||
version = "1.3"
|
version = "1.3"
|
||||||
checksum = "vtW0VyO2pJ-hPyYmDI2zwSLS8QL0sPAUKC1t3zNYbwN2TmsaE-fADhaVtNd3eNFl"
|
checksum = "vtW0VyO2pJ-hPyYmDI2zwSLS8QL0sPAUKC1t3zNYbwN2TmsaE-fADhaVtNd3eNFl"
|
||||||
)
|
)
|
||||||
return t.NewPackage("argp-standalone", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("argp-standalone", version, newTar(
|
||||||
nil, "http://www.lysator.liu.se/~nisse/misc/"+
|
"http://www.lysator.liu.se/~nisse/misc/"+
|
||||||
"argp-standalone-"+version+".tar.gz",
|
"argp-standalone-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
|
|||||||
@@ -7,9 +7,9 @@ func (t Toolchain) newBzip2() (pkg.Artifact, string) {
|
|||||||
version = "1.0.8"
|
version = "1.0.8"
|
||||||
checksum = "cTLykcco7boom-s05H1JVsQi1AtChYL84nXkg_92Dm1Xt94Ob_qlMg_-NSguIK-c"
|
checksum = "cTLykcco7boom-s05H1JVsQi1AtChYL84nXkg_92Dm1Xt94Ob_qlMg_-NSguIK-c"
|
||||||
)
|
)
|
||||||
return t.NewPackage("bzip2", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("bzip2", version, newTar(
|
||||||
nil, "https://sourceware.org/pub/bzip2/bzip2-"+version+".tar.gz",
|
"https://sourceware.org/pub/bzip2/bzip2-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
|
|||||||
@@ -10,13 +10,14 @@ import (
|
|||||||
|
|
||||||
func (t Toolchain) newCMake() (pkg.Artifact, string) {
|
func (t Toolchain) newCMake() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "4.3.1"
|
version = "4.3.2"
|
||||||
checksum = "RHpzZiM1kJ5bwLjo9CpXSeHJJg3hTtV9QxBYpQoYwKFtRh5YhGWpShrqZCSOzQN6"
|
checksum = "6QylwRVKletndTSkZTV2YBRwgd_9rUVgav_QW23HpjUgV21AVYZOUOal8tdBDmO7"
|
||||||
)
|
)
|
||||||
return t.NewPackage("cmake", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("cmake", version, newFromGitHubRelease(
|
||||||
nil, "https://github.com/Kitware/CMake/releases/download/"+
|
"Kitware/CMake",
|
||||||
"v"+version+"/cmake-"+version+".tar.gz",
|
"v"+version,
|
||||||
mustDecode(checksum),
|
"cmake-"+version+".tar.gz",
|
||||||
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
// test suite expects writable source tree
|
// test suite expects writable source tree
|
||||||
@@ -90,7 +91,7 @@ index 2ead810437..f85cbb8b1c 100644
|
|||||||
ConfigureName: "/usr/src/cmake/bootstrap",
|
ConfigureName: "/usr/src/cmake/bootstrap",
|
||||||
Configure: []KV{
|
Configure: []KV{
|
||||||
{"prefix", "/system"},
|
{"prefix", "/system"},
|
||||||
{"parallel", `"$(nproc)"`},
|
{"parallel", jobsE},
|
||||||
{"--"},
|
{"--"},
|
||||||
{"-DCMAKE_USE_OPENSSL", "OFF"},
|
{"-DCMAKE_USE_OPENSSL", "OFF"},
|
||||||
{"-DCMake_TEST_NO_NETWORK", "ON"},
|
{"-DCMake_TEST_NO_NETWORK", "ON"},
|
||||||
@@ -118,31 +119,27 @@ func init() {
|
|||||||
|
|
||||||
// CMakeHelper is the [CMake] build system helper.
|
// CMakeHelper is the [CMake] build system helper.
|
||||||
type CMakeHelper struct {
|
type CMakeHelper struct {
|
||||||
// Joined with name with a dash if non-empty.
|
|
||||||
Variant string
|
|
||||||
|
|
||||||
// Path elements joined with source.
|
// Path elements joined with source.
|
||||||
Append []string
|
Append []string
|
||||||
|
|
||||||
|
// Value of CMAKE_BUILD_TYPE. The zero value is equivalent to "Release".
|
||||||
|
BuildType string
|
||||||
// CMake CACHE entries.
|
// CMake CACHE entries.
|
||||||
Cache []KV
|
Cache []KV
|
||||||
// Runs after install.
|
// Runs after install.
|
||||||
Script string
|
Script string
|
||||||
|
|
||||||
|
// Replaces the default test command.
|
||||||
|
Test string
|
||||||
|
// Whether to skip running tests.
|
||||||
|
SkipTest bool
|
||||||
|
|
||||||
// Whether to generate Makefile instead.
|
// Whether to generate Makefile instead.
|
||||||
Make bool
|
Make bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Helper = new(CMakeHelper)
|
var _ Helper = new(CMakeHelper)
|
||||||
|
|
||||||
// name returns its arguments and an optional variant string joined with '-'.
|
|
||||||
func (attr *CMakeHelper) name(name, version string) string {
|
|
||||||
if attr != nil && attr.Variant != "" {
|
|
||||||
name += "-" + attr.Variant
|
|
||||||
}
|
|
||||||
return name + "-" + version
|
|
||||||
}
|
|
||||||
|
|
||||||
// extra returns a hardcoded slice of [CMake] and [Ninja].
|
// extra returns a hardcoded slice of [CMake] and [Ninja].
|
||||||
func (attr *CMakeHelper) extra(int) P {
|
func (attr *CMakeHelper) extra(int) P {
|
||||||
if attr != nil && attr.Make {
|
if attr != nil && attr.Make {
|
||||||
@@ -169,22 +166,30 @@ func (*CMakeHelper) wantsDir() string { return "/cure/" }
|
|||||||
// script generates the cure script.
|
// script generates the cure script.
|
||||||
func (attr *CMakeHelper) script(name string) string {
|
func (attr *CMakeHelper) script(name string) string {
|
||||||
if attr == nil {
|
if attr == nil {
|
||||||
attr = &CMakeHelper{
|
attr = new(CMakeHelper)
|
||||||
Cache: []KV{
|
|
||||||
{"CMAKE_BUILD_TYPE", "Release"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(attr.Cache) == 0 {
|
|
||||||
panic("CACHE must be non-empty")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
generate := "Ninja"
|
generate := "Ninja"
|
||||||
jobs := ""
|
test := "ninja " + jobsFlagE + " test"
|
||||||
if attr.Make {
|
if attr.Make {
|
||||||
generate = "'Unix Makefiles'"
|
generate = "'Unix Makefiles'"
|
||||||
jobs += ` "--parallel=$(nproc)"`
|
test = "make " + jobsFlagE + " test"
|
||||||
}
|
}
|
||||||
|
if attr.Test != "" {
|
||||||
|
test = attr.Test
|
||||||
|
}
|
||||||
|
|
||||||
|
script := attr.Script
|
||||||
|
if !attr.SkipTest && presetOpts&OptSkipCheck == 0 {
|
||||||
|
script += "\n" + test
|
||||||
|
}
|
||||||
|
|
||||||
|
cache := make([]KV, 1, 1+len(attr.Cache))
|
||||||
|
cache[0] = KV{"CMAKE_BUILD_TYPE", "Release"}
|
||||||
|
if attr.BuildType != "" {
|
||||||
|
cache[0][1] = attr.BuildType
|
||||||
|
}
|
||||||
|
cache = append(cache, attr.Cache...)
|
||||||
|
|
||||||
return `
|
return `
|
||||||
cmake -G ` + generate + ` \
|
cmake -G ` + generate + ` \
|
||||||
@@ -193,7 +198,7 @@ cmake -G ` + generate + ` \
|
|||||||
-DCMAKE_ASM_COMPILER_TARGET="${ROSA_TRIPLE}" \
|
-DCMAKE_ASM_COMPILER_TARGET="${ROSA_TRIPLE}" \
|
||||||
-DCMAKE_INSTALL_LIBDIR=lib \
|
-DCMAKE_INSTALL_LIBDIR=lib \
|
||||||
` + strings.Join(slices.Collect(func(yield func(string) bool) {
|
` + strings.Join(slices.Collect(func(yield func(string) bool) {
|
||||||
for _, v := range attr.Cache {
|
for _, v := range cache {
|
||||||
if !yield("-D" + v[0] + "=" + v[1]) {
|
if !yield("-D" + v[0] + "=" + v[1]) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -201,7 +206,7 @@ cmake -G ` + generate + ` \
|
|||||||
}), " \\\n\t") + ` \
|
}), " \\\n\t") + ` \
|
||||||
-DCMAKE_INSTALL_PREFIX=/system \
|
-DCMAKE_INSTALL_PREFIX=/system \
|
||||||
'/usr/src/` + name + `/` + filepath.Join(attr.Append...) + `'
|
'/usr/src/` + name + `/` + filepath.Join(attr.Append...) + `'
|
||||||
cmake --build .` + jobs + `
|
cmake --build . --parallel=` + jobsE + `
|
||||||
cmake --install . --prefix=/work/system
|
cmake --install . --prefix=/work/system
|
||||||
` + attr.Script
|
` + script
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,10 +7,10 @@ func (t Toolchain) newConnman() (pkg.Artifact, string) {
|
|||||||
version = "2.0"
|
version = "2.0"
|
||||||
checksum = "MhVTdJOhndnZn2SWd8URKo_Pj7Zvc14tntEbrVOf9L3yVWJvpb3v3Q6104tWJgtW"
|
checksum = "MhVTdJOhndnZn2SWd8URKo_Pj7Zvc14tntEbrVOf9L3yVWJvpb3v3Q6104tWJgtW"
|
||||||
)
|
)
|
||||||
return t.NewPackage("connman", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("connman", version, newTar(
|
||||||
nil, "https://git.kernel.org/pub/scm/network/connman/connman.git/"+
|
"https://git.kernel.org/pub/scm/network/connman/connman.git/"+
|
||||||
"snapshot/connman-"+version+".tar.gz",
|
"snapshot/connman-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Patches: []KV{
|
Patches: []KV{
|
||||||
|
|||||||
@@ -7,15 +7,15 @@ func (t Toolchain) newCurl() (pkg.Artifact, string) {
|
|||||||
version = "8.19.0"
|
version = "8.19.0"
|
||||||
checksum = "YHuVLVVp8q_Y7-JWpID5ReNjq2Zk6t7ArHB6ngQXilp_R5l3cubdxu3UKo-xDByv"
|
checksum = "YHuVLVVp8q_Y7-JWpID5ReNjq2Zk6t7ArHB6ngQXilp_R5l3cubdxu3UKo-xDByv"
|
||||||
)
|
)
|
||||||
return t.NewPackage("curl", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("curl", version, newTar(
|
||||||
nil, "https://curl.se/download/curl-"+version+".tar.bz2",
|
"https://curl.se/download/curl-"+version+".tar.bz2",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarBzip2,
|
pkg.TarBzip2,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
// remove broken test
|
// remove broken test
|
||||||
Writable: true,
|
Writable: true,
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
chmod +w tests/data && rm tests/data/test459
|
chmod +w tests/data && rm -f tests/data/test459
|
||||||
`,
|
`,
|
||||||
}, &MakeHelper{
|
}, &MakeHelper{
|
||||||
Configure: []KV{
|
Configure: []KV{
|
||||||
@@ -25,7 +25,7 @@ chmod +w tests/data && rm tests/data/test459
|
|||||||
{"disable-smb"},
|
{"disable-smb"},
|
||||||
},
|
},
|
||||||
Check: []string{
|
Check: []string{
|
||||||
`TFLAGS="-j$(expr "$(nproc)" '*' 2)"`,
|
"TFLAGS=" + jobsLFlagE,
|
||||||
"test-nonflaky",
|
"test-nonflaky",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -7,11 +7,11 @@ func (t Toolchain) newDBus() (pkg.Artifact, string) {
|
|||||||
version = "1.16.2"
|
version = "1.16.2"
|
||||||
checksum = "INwOuNdrDG7XW5ilW_vn8JSxEa444rRNc5ho97i84I1CNF09OmcFcV-gzbF4uCyg"
|
checksum = "INwOuNdrDG7XW5ilW_vn8JSxEa444rRNc5ho97i84I1CNF09OmcFcV-gzbF4uCyg"
|
||||||
)
|
)
|
||||||
return t.NewPackage("dbus", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("dbus", version, newFromGitLab(
|
||||||
nil, "https://gitlab.freedesktop.org/dbus/dbus/-/archive/"+
|
"gitlab.freedesktop.org",
|
||||||
"dbus-"+version+"/dbus-dbus-"+version+".tar.bz2",
|
"dbus/dbus",
|
||||||
mustDecode(checksum),
|
"dbus-"+version,
|
||||||
pkg.TarBzip2,
|
checksum,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
// OSError: [Errno 30] Read-only file system: '/usr/src/dbus/subprojects/packagecache'
|
// OSError: [Errno 30] Read-only file system: '/usr/src/dbus/subprojects/packagecache'
|
||||||
Writable: true,
|
Writable: true,
|
||||||
@@ -44,3 +44,38 @@ func init() {
|
|||||||
ID: 5356,
|
ID: 5356,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t Toolchain) newXDGDBusProxy() (pkg.Artifact, string) {
|
||||||
|
const (
|
||||||
|
version = "0.1.7"
|
||||||
|
checksum = "UW5Pe-TP-XAaN-kTbxrkOQ7eYdmlAQlr2pdreLtPT0uwdAz-7rzDP8V_8PWuZBup"
|
||||||
|
)
|
||||||
|
return t.NewPackage("xdg-dbus-proxy", version, newFromGitHub(
|
||||||
|
"flatpak/xdg-dbus-proxy",
|
||||||
|
version,
|
||||||
|
checksum,
|
||||||
|
), nil, &MesonHelper{
|
||||||
|
Setup: []KV{
|
||||||
|
{"Dman", "disabled"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
DBus,
|
||||||
|
|
||||||
|
GLib,
|
||||||
|
), version
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
artifactsM[XDGDBusProxy] = Metadata{
|
||||||
|
f: Toolchain.newXDGDBusProxy,
|
||||||
|
|
||||||
|
Name: "xdg-dbus-proxy",
|
||||||
|
Description: "a filtering proxy for D-Bus connections",
|
||||||
|
Website: "https://github.com/flatpak/xdg-dbus-proxy",
|
||||||
|
|
||||||
|
Dependencies: P{
|
||||||
|
GLib,
|
||||||
|
},
|
||||||
|
|
||||||
|
ID: 58434,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,10 +7,10 @@ func (t Toolchain) newDTC() (pkg.Artifact, string) {
|
|||||||
version = "1.7.2"
|
version = "1.7.2"
|
||||||
checksum = "vUoiRynPyYRexTpS6USweT5p4SVHvvVJs8uqFkkVD-YnFjwf6v3elQ0-Etrh00Dt"
|
checksum = "vUoiRynPyYRexTpS6USweT5p4SVHvvVJs8uqFkkVD-YnFjwf6v3elQ0-Etrh00Dt"
|
||||||
)
|
)
|
||||||
return t.NewPackage("dtc", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("dtc", version, newTar(
|
||||||
nil, "https://git.kernel.org/pub/scm/utils/dtc/dtc.git/snapshot/"+
|
"https://git.kernel.org/pub/scm/utils/dtc/dtc.git/snapshot/"+
|
||||||
"dtc-v"+version+".tar.gz",
|
"dtc-v"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
// works around buggy test:
|
// works around buggy test:
|
||||||
|
|||||||
@@ -4,13 +4,13 @@ import "hakurei.app/internal/pkg"
|
|||||||
|
|
||||||
func (t Toolchain) newElfutils() (pkg.Artifact, string) {
|
func (t Toolchain) newElfutils() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "0.194"
|
version = "0.195"
|
||||||
checksum = "Q3XUygUPv9vR1TkWucwUsQ8Kb1_F6gzk-KMPELr3cC_4AcTrprhVPMvN0CKkiYRa"
|
checksum = "JrGnBD38w8Mj0ZxDw3fKlRBFcLvRKu8rcYnX35R9yTlUSYnzTazyLboG-a2CsJlu"
|
||||||
)
|
)
|
||||||
return t.NewPackage("elfutils", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("elfutils", version, newTar(
|
||||||
nil, "https://sourceware.org/elfutils/ftp/"+
|
"https://sourceware.org/elfutils/ftp/"+
|
||||||
version+"/elfutils-"+version+".tar.bz2",
|
version+"/elfutils-"+version+".tar.bz2",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarBzip2,
|
pkg.TarBzip2,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
|
|||||||
@@ -135,10 +135,11 @@ func newIANAEtc() pkg.Artifact {
|
|||||||
version = "20251215"
|
version = "20251215"
|
||||||
checksum = "kvKz0gW_rGG5QaNK9ZWmWu1IEgYAdmhj_wR7DYrh3axDfIql_clGRHmelP7525NJ"
|
checksum = "kvKz0gW_rGG5QaNK9ZWmWu1IEgYAdmhj_wR7DYrh3axDfIql_clGRHmelP7525NJ"
|
||||||
)
|
)
|
||||||
return pkg.NewHTTPGetTar(
|
return newFromGitHubRelease(
|
||||||
nil, "https://github.com/Mic92/iana-etc/releases/download/"+
|
"Mic92/iana-etc",
|
||||||
version+"/iana-etc-"+version+".tar.gz",
|
version,
|
||||||
mustDecode(checksum),
|
"iana-etc-"+version+".tar.gz",
|
||||||
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,11 +7,11 @@ func (t Toolchain) newFakeroot() (pkg.Artifact, string) {
|
|||||||
version = "1.37.2"
|
version = "1.37.2"
|
||||||
checksum = "4ve-eDqVspzQ6VWDhPS0NjW3aSenBJcPAJq_BFT7OOFgUdrQzoTBxZWipDAGWxF8"
|
checksum = "4ve-eDqVspzQ6VWDhPS0NjW3aSenBJcPAJq_BFT7OOFgUdrQzoTBxZWipDAGWxF8"
|
||||||
)
|
)
|
||||||
return t.NewPackage("fakeroot", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("fakeroot", version, newFromGitLab(
|
||||||
nil, "https://salsa.debian.org/clint/fakeroot/-/archive/upstream/"+
|
"salsa.debian.org",
|
||||||
version+"/fakeroot-upstream-"+version+".tar.bz2",
|
"clint/fakeroot",
|
||||||
mustDecode(checksum),
|
"upstream/"+version,
|
||||||
pkg.TarBzip2,
|
checksum,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Patches: []KV{
|
Patches: []KV{
|
||||||
{"remove-broken-docs", `diff --git a/doc/Makefile.am b/doc/Makefile.am
|
{"remove-broken-docs", `diff --git a/doc/Makefile.am b/doc/Makefile.am
|
||||||
|
|||||||
@@ -9,10 +9,11 @@ func (t Toolchain) newFlex() (pkg.Artifact, string) {
|
|||||||
version = "2.6.4"
|
version = "2.6.4"
|
||||||
checksum = "p9POjQU7VhgOf3x5iFro8fjhy0NOanvA7CTeuWS_veSNgCixIJshTrWVkc5XLZkB"
|
checksum = "p9POjQU7VhgOf3x5iFro8fjhy0NOanvA7CTeuWS_veSNgCixIJshTrWVkc5XLZkB"
|
||||||
)
|
)
|
||||||
return t.NewPackage("flex", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("flex", version, newFromGitHubRelease(
|
||||||
nil, "https://github.com/westes/flex/releases/download/"+
|
"westes/flex",
|
||||||
"v"+version+"/flex-"+version+".tar.gz",
|
"v"+version,
|
||||||
mustDecode(checksum),
|
"flex-"+version+".tar.gz",
|
||||||
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, (*MakeHelper)(nil),
|
), nil, (*MakeHelper)(nil),
|
||||||
M4,
|
M4,
|
||||||
|
|||||||
@@ -7,10 +7,11 @@ func (t Toolchain) newFuse() (pkg.Artifact, string) {
|
|||||||
version = "3.18.2"
|
version = "3.18.2"
|
||||||
checksum = "iL-7b7eUtmlVSf5cSq0dzow3UiqSjBmzV3cI_ENPs1tXcHdktkG45j1V12h-4jZe"
|
checksum = "iL-7b7eUtmlVSf5cSq0dzow3UiqSjBmzV3cI_ENPs1tXcHdktkG45j1V12h-4jZe"
|
||||||
)
|
)
|
||||||
return t.NewPackage("fuse", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("fuse", version, newFromGitHubRelease(
|
||||||
nil, "https://github.com/libfuse/libfuse/releases/download/"+
|
"libfuse/libfuse",
|
||||||
"fuse-"+version+"/fuse-"+version+".tar.gz",
|
"fuse-"+version,
|
||||||
mustDecode(checksum),
|
"fuse-"+version+".tar.gz",
|
||||||
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, &MesonHelper{
|
), nil, &MesonHelper{
|
||||||
Setup: []KV{
|
Setup: []KV{
|
||||||
|
|||||||
@@ -9,17 +9,20 @@ import (
|
|||||||
|
|
||||||
func (t Toolchain) newGit() (pkg.Artifact, string) {
|
func (t Toolchain) newGit() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "2.53.0"
|
version = "2.54.0"
|
||||||
checksum = "rlqSTeNgSeVKJA7nvzGqddFH8q3eFEPB4qRZft-4zth8wTHnbTbm7J90kp_obHGm"
|
checksum = "7vGKtFOJGqY8DO4e8UMRax7dLgImXKQz5MMalec6MlgYrsarffSJjgOughwRFpSH"
|
||||||
)
|
)
|
||||||
return t.NewPackage("git", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("git", version, newTar(
|
||||||
nil, "https://www.kernel.org/pub/software/scm/git/"+
|
"https://www.kernel.org/pub/software/scm/git/"+
|
||||||
"git-"+version+".tar.gz",
|
"git-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
ln -s ../../system/bin/perl /usr/bin/ || true
|
ln -s ../../system/bin/perl /usr/bin/ || true
|
||||||
|
|
||||||
|
# test suite assumes apache
|
||||||
|
rm -f /system/bin/httpd
|
||||||
`,
|
`,
|
||||||
|
|
||||||
// uses source tree as scratch space
|
// uses source tree as scratch space
|
||||||
@@ -38,6 +41,7 @@ function disable_test {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
disable_test t1800-hook
|
||||||
disable_test t5319-multi-pack-index
|
disable_test t5319-multi-pack-index
|
||||||
disable_test t1305-config-include
|
disable_test t1305-config-include
|
||||||
disable_test t3900-i18n-commit
|
disable_test t3900-i18n-commit
|
||||||
@@ -58,11 +62,14 @@ disable_test t2200-add-update
|
|||||||
"prove",
|
"prove",
|
||||||
},
|
},
|
||||||
Install: `make \
|
Install: `make \
|
||||||
"-j$(nproc)" \
|
` + jobsFlagE + ` \
|
||||||
DESTDIR=/work \
|
DESTDIR=/work \
|
||||||
NO_INSTALL_HARDLINKS=1 \
|
NO_INSTALL_HARDLINKS=1 \
|
||||||
install`,
|
install`,
|
||||||
},
|
},
|
||||||
|
// test suite hangs on mksh
|
||||||
|
Bash,
|
||||||
|
|
||||||
Diffutils,
|
Diffutils,
|
||||||
Autoconf,
|
Autoconf,
|
||||||
Gettext,
|
Gettext,
|
||||||
@@ -114,3 +121,8 @@ git \
|
|||||||
rm -rf /work/.git
|
rm -rf /work/.git
|
||||||
`, resolvconf())
|
`, resolvconf())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newTagRemote is a helper around NewViaGit for a tag on a git remote.
|
||||||
|
func (t Toolchain) newTagRemote(url, tag, checksum string) pkg.Artifact {
|
||||||
|
return t.NewViaGit(url, "refs/tags/"+tag, mustDecode(checksum))
|
||||||
|
}
|
||||||
|
|||||||
190
internal/rosa/glslang.go
Normal file
190
internal/rosa/glslang.go
Normal file
@@ -0,0 +1,190 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t Toolchain) newSPIRVHeaders() (pkg.Artifact, string) {
|
||||||
|
const (
|
||||||
|
version = "1.4.341.0"
|
||||||
|
checksum = "0PL43-19Iaw4k7_D8J8BvoJ-iLgCVSYZ2ThgDPGfAJwIJFtre7l0cnQtLjcY-JvD"
|
||||||
|
)
|
||||||
|
return t.NewPackage("spirv-headers", version, newFromGitHub(
|
||||||
|
"KhronosGroup/SPIRV-Headers",
|
||||||
|
"vulkan-sdk-"+version,
|
||||||
|
checksum,
|
||||||
|
), nil, &CMakeHelper{
|
||||||
|
// upstream has no tests
|
||||||
|
SkipTest: true,
|
||||||
|
}), version
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
artifactsM[SPIRVHeaders] = Metadata{
|
||||||
|
f: Toolchain.newSPIRVHeaders,
|
||||||
|
|
||||||
|
Name: "spirv-headers",
|
||||||
|
Description: "machine-readable files for the SPIR-V Registry",
|
||||||
|
Website: "https://github.com/KhronosGroup/SPIRV-Headers",
|
||||||
|
|
||||||
|
ID: 230542,
|
||||||
|
|
||||||
|
// upstream changed version scheme, anitya incapable of filtering them
|
||||||
|
latest: func(v *Versions) string {
|
||||||
|
for _, s := range v.Stable {
|
||||||
|
fields := strings.SplitN(s, ".", 4)
|
||||||
|
if len(fields) != 4 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if slices.ContainsFunc(fields, func(f string) bool {
|
||||||
|
return slices.ContainsFunc([]byte(f), func(d byte) bool {
|
||||||
|
return d < '0' || d > '9'
|
||||||
|
})
|
||||||
|
}) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return v.Latest
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Toolchain) newSPIRVTools() (pkg.Artifact, string) {
|
||||||
|
const (
|
||||||
|
version = "2026.1"
|
||||||
|
checksum = "ZSQPQx8NltCDzQLk4qlaVxyWRWeI_JtsjEpeFt3kezTanl9DTHfLixSUCezMFBjv"
|
||||||
|
)
|
||||||
|
return t.NewPackage("spirv-tools", version, newFromGitHub(
|
||||||
|
"KhronosGroup/SPIRV-Tools",
|
||||||
|
"v"+version,
|
||||||
|
checksum,
|
||||||
|
), nil, &CMakeHelper{
|
||||||
|
Cache: []KV{
|
||||||
|
{"SPIRV-Headers_SOURCE_DIR", "/system"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Python,
|
||||||
|
|
||||||
|
SPIRVHeaders,
|
||||||
|
), version
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
artifactsM[SPIRVTools] = Metadata{
|
||||||
|
f: Toolchain.newSPIRVTools,
|
||||||
|
|
||||||
|
Name: "spirv-tools",
|
||||||
|
Description: "an API and commands for processing SPIR-V modules",
|
||||||
|
Website: "https://github.com/KhronosGroup/SPIRV-Tools",
|
||||||
|
|
||||||
|
Dependencies: P{
|
||||||
|
SPIRVHeaders,
|
||||||
|
},
|
||||||
|
|
||||||
|
ID: 14894,
|
||||||
|
|
||||||
|
latest: (*Versions).getStable,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Toolchain) newGlslang() (pkg.Artifact, string) {
|
||||||
|
const (
|
||||||
|
version = "16.2.0"
|
||||||
|
checksum = "6_UuF9reLRDaVkgO-9IfB3kMwme3lQZM8LL8YsJwPdUFkrjzxJtf2A9X3w9nFxj2"
|
||||||
|
)
|
||||||
|
return t.NewPackage("glslang", version, newFromGitHub(
|
||||||
|
"KhronosGroup/glslang",
|
||||||
|
version,
|
||||||
|
checksum,
|
||||||
|
), &PackageAttr{
|
||||||
|
// test suite writes to source
|
||||||
|
Writable: true,
|
||||||
|
Chmod: true,
|
||||||
|
}, &CMakeHelper{
|
||||||
|
Cache: []KV{
|
||||||
|
{"BUILD_SHARED_LIBS", "ON"},
|
||||||
|
{"ALLOW_EXTERNAL_SPIRV_TOOLS", "ON"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Python,
|
||||||
|
Bash,
|
||||||
|
Diffutils,
|
||||||
|
|
||||||
|
SPIRVTools,
|
||||||
|
), version
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
artifactsM[Glslang] = Metadata{
|
||||||
|
f: Toolchain.newGlslang,
|
||||||
|
|
||||||
|
Name: "glslang",
|
||||||
|
Description: "reference front end for GLSL/ESSL",
|
||||||
|
Website: "https://github.com/KhronosGroup/glslang",
|
||||||
|
|
||||||
|
ID: 205796,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Toolchain) newSPIRVLLVMTranslator() (pkg.Artifact, string) {
|
||||||
|
const (
|
||||||
|
version = "22.1.2"
|
||||||
|
checksum = "JZAaV5ewYcm-35YA_U2BM2IcsQouZtX1BLZR0zh2vSlfEXMsT5OCtY4Gh5RJkcGy"
|
||||||
|
)
|
||||||
|
return t.NewPackage("spirv-llvm-translator", version, newFromGitHub(
|
||||||
|
"KhronosGroup/SPIRV-LLVM-Translator",
|
||||||
|
"v"+version, checksum,
|
||||||
|
), &PackageAttr{
|
||||||
|
Patches: []KV{
|
||||||
|
{"remove-early-prefix", `diff --git a/CMakeLists.txt b/CMakeLists.txt
|
||||||
|
index c000a77e..86f79b03 100644
|
||||||
|
--- a/CMakeLists.txt
|
||||||
|
+++ b/CMakeLists.txt
|
||||||
|
@@ -172,5 +172,5 @@ install(
|
||||||
|
FILES
|
||||||
|
${CMAKE_BINARY_DIR}/LLVMSPIRVLib.pc
|
||||||
|
DESTINATION
|
||||||
|
- ${CMAKE_INSTALL_PREFIX}/lib${LLVM_LIBDIR_SUFFIX}/pkgconfig
|
||||||
|
+ lib${LLVM_LIBDIR_SUFFIX}/pkgconfig
|
||||||
|
)
|
||||||
|
`},
|
||||||
|
},
|
||||||
|
|
||||||
|
// litArgs emits shell syntax
|
||||||
|
ScriptEarly: `
|
||||||
|
export LIT_OPTS=` + litArgs(true,
|
||||||
|
// error: line 13: OpTypeCooperativeMatrixKHR Scope is limited to Workgroup and Subgroup
|
||||||
|
"cooperative_matrix_constant_null.spvasm") + `
|
||||||
|
`,
|
||||||
|
}, &CMakeHelper{
|
||||||
|
Cache: []KV{
|
||||||
|
{"CMAKE_SKIP_BUILD_RPATH", "ON"},
|
||||||
|
{"BUILD_SHARED_LIBS", "ON"},
|
||||||
|
{"LLVM_SPIRV_ENABLE_LIBSPIRV_DIS", "ON"},
|
||||||
|
{"LLVM_EXTERNAL_SPIRV_HEADERS_SOURCE_DIR", "/system"},
|
||||||
|
{"LLVM_EXTERNAL_LIT", "/system/bin/lit"},
|
||||||
|
{"LLVM_INCLUDE_TESTS", "ON"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Bash,
|
||||||
|
LIT,
|
||||||
|
|
||||||
|
SPIRVTools,
|
||||||
|
), version
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
artifactsM[SPIRVLLVMTranslator] = Metadata{
|
||||||
|
f: Toolchain.newSPIRVLLVMTranslator,
|
||||||
|
|
||||||
|
Name: "spirv-llvm-translator",
|
||||||
|
Description: "bi-directional translation between SPIR-V and LLVM IR",
|
||||||
|
Website: "https://github.com/KhronosGroup/SPIRV-LLVM-Translator",
|
||||||
|
|
||||||
|
Dependencies: P{
|
||||||
|
SPIRVTools,
|
||||||
|
},
|
||||||
|
|
||||||
|
ID: 227273,
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,18 +2,55 @@ package rosa
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"hakurei.app/internal/pkg"
|
"hakurei.app/internal/pkg"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// skipGNUTests generates a string for skipping specific tests by number in a
|
||||||
|
// GNU test suite. This is nontrivial because the test suite does not support
|
||||||
|
// excluding tests in any way, so ranges for all but the skipped tests have to
|
||||||
|
// be specified instead.
|
||||||
|
//
|
||||||
|
// For example, to skip test 764, ranges around the skipped test must be
|
||||||
|
// specified:
|
||||||
|
//
|
||||||
|
// 1-763 765-
|
||||||
|
//
|
||||||
|
// Tests are numbered starting from 1. The resulting string is unquoted.
|
||||||
|
func skipGNUTests(tests ...int) string {
|
||||||
|
tests = slices.Clone(tests)
|
||||||
|
slices.Sort(tests)
|
||||||
|
|
||||||
|
var buf strings.Builder
|
||||||
|
|
||||||
|
if tests[0] != 1 {
|
||||||
|
buf.WriteString("1-")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, n := range tests {
|
||||||
|
if n != 1 && (i == 0 || tests[i-1] != n-1) {
|
||||||
|
buf.WriteString(strconv.Itoa(n - 1))
|
||||||
|
buf.WriteString(" ")
|
||||||
|
}
|
||||||
|
if i == len(tests)-1 || tests[i+1] != n+1 {
|
||||||
|
buf.WriteString(strconv.Itoa(n + 1))
|
||||||
|
buf.WriteString("-")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
func (t Toolchain) newM4() (pkg.Artifact, string) {
|
func (t Toolchain) newM4() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "1.4.21"
|
version = "1.4.21"
|
||||||
checksum = "pPa6YOo722Jw80l1OsH1tnUaklnPFjFT-bxGw5iAVrZTm1P8FQaWao_NXop46-pm"
|
checksum = "pPa6YOo722Jw80l1OsH1tnUaklnPFjFT-bxGw5iAVrZTm1P8FQaWao_NXop46-pm"
|
||||||
)
|
)
|
||||||
return t.NewPackage("m4", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("m4", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/m4/m4-"+version+".tar.bz2",
|
"https://ftpmirror.gnu.org/gnu/m4/m4-"+version+".tar.bz2",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarBzip2,
|
pkg.TarBzip2,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
@@ -43,11 +80,19 @@ func (t Toolchain) newBison() (pkg.Artifact, string) {
|
|||||||
version = "3.8.2"
|
version = "3.8.2"
|
||||||
checksum = "BhRM6K7URj1LNOkIDCFDctSErLS-Xo5d9ba9seg10o6ACrgC1uNhED7CQPgIY29Y"
|
checksum = "BhRM6K7URj1LNOkIDCFDctSErLS-Xo5d9ba9seg10o6ACrgC1uNhED7CQPgIY29Y"
|
||||||
)
|
)
|
||||||
return t.NewPackage("bison", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("bison", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/bison/bison-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/bison/bison-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, (*MakeHelper)(nil),
|
), nil, &MakeHelper{
|
||||||
|
Check: []string{
|
||||||
|
"TESTSUITEFLAGS=" + jobsFlagE + "' " + skipGNUTests(
|
||||||
|
// clang miscompiles (SIGILL)
|
||||||
|
764,
|
||||||
|
) + "'",
|
||||||
|
"check",
|
||||||
|
},
|
||||||
|
},
|
||||||
M4,
|
M4,
|
||||||
Diffutils,
|
Diffutils,
|
||||||
Sed,
|
Sed,
|
||||||
@@ -67,15 +112,17 @@ func init() {
|
|||||||
|
|
||||||
func (t Toolchain) newSed() (pkg.Artifact, string) {
|
func (t Toolchain) newSed() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "4.9"
|
version = "4.10"
|
||||||
checksum = "pe7HWH4PHNYrazOTlUoE1fXmhn2GOPFN_xE62i0llOr3kYGrH1g2_orDz0UtZ9Nt"
|
checksum = "TXTRFQJCyflb-bpBRI2S5Y1DpplwvT7-KfXtpqN4AdZgZ5OtI6yStn1-bkhDKx51"
|
||||||
)
|
)
|
||||||
return t.NewPackage("sed", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("sed", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/sed/sed-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/sed/sed-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, (*MakeHelper)(nil),
|
), nil, (*MakeHelper)(nil),
|
||||||
Diffutils,
|
Diffutils,
|
||||||
|
|
||||||
|
KernelHeaders,
|
||||||
), version
|
), version
|
||||||
}
|
}
|
||||||
func init() {
|
func init() {
|
||||||
@@ -95,15 +142,15 @@ func (t Toolchain) newAutoconf() (pkg.Artifact, string) {
|
|||||||
version = "2.73"
|
version = "2.73"
|
||||||
checksum = "yGabDTeOfaCUB0JX-h3REYLYzMzvpDwFmFFzHNR7QilChCUNE4hR6q7nma4viDYg"
|
checksum = "yGabDTeOfaCUB0JX-h3REYLYzMzvpDwFmFFzHNR7QilChCUNE4hR6q7nma4viDYg"
|
||||||
)
|
)
|
||||||
return t.NewPackage("autoconf", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("autoconf", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/autoconf/autoconf-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/autoconf/autoconf-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Flag: TExclusive,
|
Flag: TExclusive,
|
||||||
}, &MakeHelper{
|
}, &MakeHelper{
|
||||||
Check: []string{
|
Check: []string{
|
||||||
`TESTSUITEFLAGS="-j$(nproc)"`,
|
"TESTSUITEFLAGS=" + jobsFlagE,
|
||||||
"check",
|
"check",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -135,9 +182,9 @@ func (t Toolchain) newAutomake() (pkg.Artifact, string) {
|
|||||||
version = "1.18.1"
|
version = "1.18.1"
|
||||||
checksum = "FjvLG_GdQP7cThTZJLDMxYpRcKdpAVG-YDs1Fj1yaHlSdh_Kx6nRGN14E0r_BjcG"
|
checksum = "FjvLG_GdQP7cThTZJLDMxYpRcKdpAVG-YDs1Fj1yaHlSdh_Kx6nRGN14E0r_BjcG"
|
||||||
)
|
)
|
||||||
return t.NewPackage("automake", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("automake", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/automake/automake-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/automake/automake-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
@@ -179,13 +226,16 @@ func (t Toolchain) newLibtool() (pkg.Artifact, string) {
|
|||||||
version = "2.5.4"
|
version = "2.5.4"
|
||||||
checksum = "pa6LSrQggh8mSJHQfwGjysAApmZlGJt8wif2cCLzqAAa2jpsTY0jZ-6stS3BWZ2Q"
|
checksum = "pa6LSrQggh8mSJHQfwGjysAApmZlGJt8wif2cCLzqAAa2jpsTY0jZ-6stS3BWZ2Q"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libtool", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("libtool", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/libtool/libtool-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/libtool/libtool-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, &MakeHelper{
|
), nil, &MakeHelper{
|
||||||
|
// _Z2a2c: symbol not found
|
||||||
|
SkipCheck: t.isStage0(),
|
||||||
|
|
||||||
Check: []string{
|
Check: []string{
|
||||||
`TESTSUITEFLAGS="-j$(nproc)"`,
|
"TESTSUITEFLAGS=" + jobsFlagE,
|
||||||
"check",
|
"check",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -210,9 +260,9 @@ func (t Toolchain) newGzip() (pkg.Artifact, string) {
|
|||||||
version = "1.14"
|
version = "1.14"
|
||||||
checksum = "NWhjUavnNfTDFkZJyAUonL9aCOak8GVajWX2OMlzpFnuI0ErpBFyj88mz2xSjz0q"
|
checksum = "NWhjUavnNfTDFkZJyAUonL9aCOak8GVajWX2OMlzpFnuI0ErpBFyj88mz2xSjz0q"
|
||||||
)
|
)
|
||||||
return t.NewPackage("gzip", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("gzip", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/gzip/gzip-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/gzip/gzip-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, &MakeHelper{
|
), nil, &MakeHelper{
|
||||||
// dependency loop
|
// dependency loop
|
||||||
@@ -236,9 +286,9 @@ func (t Toolchain) newGettext() (pkg.Artifact, string) {
|
|||||||
version = "1.0"
|
version = "1.0"
|
||||||
checksum = "3MasKeEdPeFEgWgzsBKk7JqWqql1wEMbgPmzAfs-mluyokoW0N8oQVxPQoOnSdgC"
|
checksum = "3MasKeEdPeFEgWgzsBKk7JqWqql1wEMbgPmzAfs-mluyokoW0N8oQVxPQoOnSdgC"
|
||||||
)
|
)
|
||||||
return t.NewPackage("gettext", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("gettext", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/gettext/gettext-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/gettext/gettext-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
@@ -282,9 +332,9 @@ func (t Toolchain) newDiffutils() (pkg.Artifact, string) {
|
|||||||
version = "3.12"
|
version = "3.12"
|
||||||
checksum = "9J5VAq5oA7eqwzS1Yvw-l3G5o-TccUrNQR3PvyB_lgdryOFAfxtvQfKfhdpquE44"
|
checksum = "9J5VAq5oA7eqwzS1Yvw-l3G5o-TccUrNQR3PvyB_lgdryOFAfxtvQfKfhdpquE44"
|
||||||
)
|
)
|
||||||
return t.NewPackage("diffutils", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("diffutils", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/diffutils/diffutils-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/diffutils/diffutils-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
@@ -315,9 +365,9 @@ func (t Toolchain) newPatch() (pkg.Artifact, string) {
|
|||||||
version = "2.8"
|
version = "2.8"
|
||||||
checksum = "MA0BQc662i8QYBD-DdGgyyfTwaeALZ1K0yusV9rAmNiIsQdX-69YC4t9JEGXZkeR"
|
checksum = "MA0BQc662i8QYBD-DdGgyyfTwaeALZ1K0yusV9rAmNiIsQdX-69YC4t9JEGXZkeR"
|
||||||
)
|
)
|
||||||
return t.NewPackage("patch", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("patch", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/patch/patch-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/patch/patch-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
@@ -347,9 +397,9 @@ func (t Toolchain) newBash() (pkg.Artifact, string) {
|
|||||||
version = "5.3"
|
version = "5.3"
|
||||||
checksum = "4LQ_GRoB_ko-Ih8QPf_xRKA02xAm_TOxQgcJLmFDT6udUPxTAWrsj-ZNeuTusyDq"
|
checksum = "4LQ_GRoB_ko-Ih8QPf_xRKA02xAm_TOxQgcJLmFDT6udUPxTAWrsj-ZNeuTusyDq"
|
||||||
)
|
)
|
||||||
return t.NewPackage("bash", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("bash", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/bash/bash-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/bash/bash-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Flag: TEarly,
|
Flag: TEarly,
|
||||||
@@ -374,12 +424,12 @@ func init() {
|
|||||||
|
|
||||||
func (t Toolchain) newCoreutils() (pkg.Artifact, string) {
|
func (t Toolchain) newCoreutils() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "9.10"
|
version = "9.11"
|
||||||
checksum = "o-B9wssRnZySzJUI1ZJAgw-bZtj1RC67R9po2AcM2OjjS8FQIl16IRHpC6IwO30i"
|
checksum = "t8UMed5wpFEoC56aa42_yidfOAaRGzOfj7MRtQkkqgGbpXiskNA8bd-EmVSQkZie"
|
||||||
)
|
)
|
||||||
return t.NewPackage("coreutils", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("coreutils", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/coreutils/coreutils-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/coreutils/coreutils-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
@@ -387,106 +437,13 @@ func (t Toolchain) newCoreutils() (pkg.Artifact, string) {
|
|||||||
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
|
||||||
|
|
||||||
test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh
|
test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh
|
||||||
test_disable '#!/bin/sh' tests/split/line-bytes.sh
|
|
||||||
test_disable '#!/bin/sh' tests/ls/hyperlink.sh
|
test_disable '#!/bin/sh' tests/ls/hyperlink.sh
|
||||||
|
test_disable '#!/bin/sh' tests/misc/user.sh
|
||||||
test_disable 'int main(){return 0;}' gnulib-tests/test-chown.c
|
test_disable 'int main(){return 0;}' gnulib-tests/test-chown.c
|
||||||
test_disable 'int main(){return 0;}' gnulib-tests/test-fchownat.c
|
test_disable 'int main(){return 0;}' gnulib-tests/test-fchownat.c
|
||||||
test_disable 'int main(){return 0;}' gnulib-tests/test-lchown.c
|
test_disable 'int main(){return 0;}' gnulib-tests/test-lchown.c
|
||||||
`,
|
`,
|
||||||
|
|
||||||
Patches: []KV{
|
|
||||||
{"tests-fix-job-control", `From 21d287324aa43aa3a31f39619ade0deac7fd6013 Mon Sep 17 00:00:00 2001
|
|
||||||
From: =?UTF-8?q?P=C3=A1draig=20Brady?= <P@draigBrady.com>
|
|
||||||
Date: Tue, 24 Feb 2026 15:44:41 +0000
|
|
||||||
Subject: [PATCH] tests: fix job control triggering test termination
|
|
||||||
|
|
||||||
This avoids the test harness being terminated like:
|
|
||||||
make[1]: *** [Makefile:24419: check-recursive] Hangup
|
|
||||||
make[3]: *** [Makefile:24668: check-TESTS] Hangup
|
|
||||||
make: *** [Makefile:24922: check] Hangup
|
|
||||||
make[2]: *** [Makefile:24920: check-am] Hangup
|
|
||||||
make[4]: *** [Makefile:24685: tests/misc/usage_vs_refs.log] Error 129
|
|
||||||
...
|
|
||||||
|
|
||||||
This happened sometimes when the tests were being run non interactively.
|
|
||||||
For example when run like:
|
|
||||||
|
|
||||||
setsid make TESTS="tests/timeout/timeout.sh \
|
|
||||||
tests/tail/overlay-headers.sh" SUBDIRS=. -j2 check
|
|
||||||
|
|
||||||
Note the race window can be made bigger by adding a sleep
|
|
||||||
after tail is stopped in overlay-headers.sh
|
|
||||||
|
|
||||||
The race can trigger the kernel to induce its job control
|
|
||||||
mechanism to prevent stuck processes.
|
|
||||||
I.e. where it sends SIGHUP + SIGCONT to a process group
|
|
||||||
when it determines that group may become orphaned,
|
|
||||||
and there are stopped processes in that group.
|
|
||||||
|
|
||||||
* tests/tail/overlay-headers.sh: Use setsid(1) to keep the stopped
|
|
||||||
tail process in a separate process group, thus avoiding any kernel
|
|
||||||
job control protection mechanism.
|
|
||||||
* tests/timeout/timeout.sh: Use setsid(1) to avoid the kernel
|
|
||||||
checking the main process group when sleep(1) is reparented.
|
|
||||||
Fixes https://bugs.gnu.org/80477
|
|
||||||
---
|
|
||||||
tests/tail/overlay-headers.sh | 8 +++++++-
|
|
||||||
tests/timeout/timeout.sh | 11 ++++++++---
|
|
||||||
2 files changed, 15 insertions(+), 4 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/tests/tail/overlay-headers.sh b/tests/tail/overlay-headers.sh
|
|
||||||
index be9b6a7df..1e6da0a3f 100755
|
|
||||||
--- a/tests/tail/overlay-headers.sh
|
|
||||||
+++ b/tests/tail/overlay-headers.sh
|
|
||||||
@@ -20,6 +20,8 @@
|
|
||||||
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
|
|
||||||
print_ver_ tail sleep
|
|
||||||
|
|
||||||
+setsid true || skip_ 'setsid required to control groups'
|
|
||||||
+
|
|
||||||
# Function to count number of lines from tail
|
|
||||||
# while ignoring transient errors due to resource limits
|
|
||||||
countlines_ ()
|
|
||||||
@@ -54,7 +56,11 @@ echo start > file2 || framework_failure_
|
|
||||||
env sleep 60 & sleep=$!
|
|
||||||
|
|
||||||
# Note don't use timeout(1) here as it currently
|
|
||||||
-# does not propagate SIGCONT
|
|
||||||
+# does not propagate SIGCONT.
|
|
||||||
+# Note use setsid here to ensure we're in a separate process group
|
|
||||||
+# as we're going to STOP this tail process, and this can trigger
|
|
||||||
+# the kernel to send SIGHUP to a group if other tests have
|
|
||||||
+# processes that are reparented. (See tests/timeout/timeout.sh).
|
|
||||||
tail $fastpoll --pid=$sleep -f file1 file2 > out & pid=$!
|
|
||||||
|
|
||||||
# Ensure tail is running
|
|
||||||
diff --git a/tests/timeout/timeout.sh b/tests/timeout/timeout.sh
|
|
||||||
index 9a395416b..fbb043312 100755
|
|
||||||
--- a/tests/timeout/timeout.sh
|
|
||||||
+++ b/tests/timeout/timeout.sh
|
|
||||||
@@ -56,9 +56,14 @@ returns_ 124 timeout --foreground -s0 -k1 .1 sleep 10 && fail=1
|
|
||||||
) || fail=1
|
|
||||||
|
|
||||||
# Don't be confused when starting off with a child (Bug#9098).
|
|
||||||
-out=$(sleep .1 & exec timeout .5 sh -c 'sleep 2; echo foo')
|
|
||||||
-status=$?
|
|
||||||
-test "$out" = "" && test $status = 124 || fail=1
|
|
||||||
+# Use setsid to avoid sleep being in the test's process group, as
|
|
||||||
+# upon reparenting it can trigger an orphaned process group SIGHUP
|
|
||||||
+# (if there were stopped processes in other tests).
|
|
||||||
+if setsid true; then
|
|
||||||
+ out=$(setsid sleep .1 & exec timeout .5 sh -c 'sleep 2; echo foo')
|
|
||||||
+ status=$?
|
|
||||||
+ test "$out" = "" && test $status = 124 || fail=1
|
|
||||||
+fi
|
|
||||||
|
|
||||||
# Verify --verbose output
|
|
||||||
cat > exp <<\EOF
|
|
||||||
--
|
|
||||||
2.53.0
|
|
||||||
`},
|
|
||||||
},
|
|
||||||
|
|
||||||
Flag: TEarly,
|
Flag: TEarly,
|
||||||
}, &MakeHelper{
|
}, &MakeHelper{
|
||||||
Configure: []KV{
|
Configure: []KV{
|
||||||
@@ -516,9 +473,9 @@ func (t Toolchain) newTexinfo() (pkg.Artifact, string) {
|
|||||||
version = "7.3"
|
version = "7.3"
|
||||||
checksum = "RRmC8Xwdof7JuZJeWGAQ_GeASIHAuJFQMbNONXBz5InooKIQGmqmWRjGNGEr5n4-"
|
checksum = "RRmC8Xwdof7JuZJeWGAQ_GeASIHAuJFQMbNONXBz5InooKIQGmqmWRjGNGEr5n4-"
|
||||||
)
|
)
|
||||||
return t.NewPackage("texinfo", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("texinfo", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/texinfo/texinfo-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/texinfo/texinfo-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, &MakeHelper{
|
), nil, &MakeHelper{
|
||||||
// nonstandard glibc extension
|
// nonstandard glibc extension
|
||||||
@@ -549,9 +506,9 @@ func (t Toolchain) newGperf() (pkg.Artifact, string) {
|
|||||||
version = "3.3"
|
version = "3.3"
|
||||||
checksum = "RtIy9pPb_Bb8-31J2Nw-rRGso2JlS-lDlVhuNYhqR7Nt4xM_nObznxAlBMnarJv7"
|
checksum = "RtIy9pPb_Bb8-31J2Nw-rRGso2JlS-lDlVhuNYhqR7Nt4xM_nObznxAlBMnarJv7"
|
||||||
)
|
)
|
||||||
return t.NewPackage("gperf", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("gperf", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gperf/gperf-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gperf/gperf-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, (*MakeHelper)(nil),
|
), nil, (*MakeHelper)(nil),
|
||||||
Diffutils,
|
Diffutils,
|
||||||
@@ -574,9 +531,9 @@ func (t Toolchain) newGawk() (pkg.Artifact, string) {
|
|||||||
version = "5.4.0"
|
version = "5.4.0"
|
||||||
checksum = "m0RkIolC-PI7EY5q8pcx5Y-0twlIW0Yp3wXXmV-QaHorSdf8BhZ7kW9F8iWomz0C"
|
checksum = "m0RkIolC-PI7EY5q8pcx5Y-0twlIW0Yp3wXXmV-QaHorSdf8BhZ7kW9F8iWomz0C"
|
||||||
)
|
)
|
||||||
return t.NewPackage("gawk", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("gawk", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/gawk/gawk-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/gawk/gawk-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Flag: TEarly,
|
Flag: TEarly,
|
||||||
@@ -602,9 +559,9 @@ func (t Toolchain) newGrep() (pkg.Artifact, string) {
|
|||||||
version = "3.12"
|
version = "3.12"
|
||||||
checksum = "qMB4RjaPNRRYsxix6YOrjE8gyAT1zVSTy4nW4wKW9fqa0CHYAuWgPwDTirENzm_1"
|
checksum = "qMB4RjaPNRRYsxix6YOrjE8gyAT1zVSTy4nW4wKW9fqa0CHYAuWgPwDTirENzm_1"
|
||||||
)
|
)
|
||||||
return t.NewPackage("grep", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("grep", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/grep/grep-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/grep/grep-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
@@ -639,7 +596,6 @@ func (t Toolchain) newFindutils() (pkg.Artifact, string) {
|
|||||||
nil, "https://ftpmirror.gnu.org/gnu/findutils/findutils-"+version+".tar.xz",
|
nil, "https://ftpmirror.gnu.org/gnu/findutils/findutils-"+version+".tar.xz",
|
||||||
mustDecode(checksum),
|
mustDecode(checksum),
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
SourceKind: SourceKindTarXZ,
|
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
echo '#!/bin/sh' > gnulib-tests/test-c32ispunct.sh
|
echo '#!/bin/sh' > gnulib-tests/test-c32ispunct.sh
|
||||||
echo 'int main(){return 0;}' > tests/xargs/test-sigusr.c
|
echo 'int main(){return 0;}' > tests/xargs/test-sigusr.c
|
||||||
@@ -667,9 +623,9 @@ func (t Toolchain) newBC() (pkg.Artifact, string) {
|
|||||||
version = "1.08.2"
|
version = "1.08.2"
|
||||||
checksum = "8h6f3hjV80XiFs6v9HOPF2KEyg1kuOgn5eeFdVspV05ODBVQss-ey5glc8AmneLy"
|
checksum = "8h6f3hjV80XiFs6v9HOPF2KEyg1kuOgn5eeFdVspV05ODBVQss-ey5glc8AmneLy"
|
||||||
)
|
)
|
||||||
return t.NewPackage("bc", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("bc", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/bc/bc-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/bc/bc-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
// source expected to be writable
|
// source expected to be writable
|
||||||
@@ -696,9 +652,9 @@ func (t Toolchain) newLibiconv() (pkg.Artifact, string) {
|
|||||||
version = "1.19"
|
version = "1.19"
|
||||||
checksum = "UibB6E23y4MksNqYmCCrA3zTFO6vJugD1DEDqqWYFZNuBsUWMVMcncb_5pPAr88x"
|
checksum = "UibB6E23y4MksNqYmCCrA3zTFO6vJugD1DEDqqWYFZNuBsUWMVMcncb_5pPAr88x"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libiconv", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("libiconv", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/libiconv/libiconv-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/libiconv/libiconv-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, (*MakeHelper)(nil)), version
|
), nil, (*MakeHelper)(nil)), version
|
||||||
}
|
}
|
||||||
@@ -719,9 +675,9 @@ func (t Toolchain) newTar() (pkg.Artifact, string) {
|
|||||||
version = "1.35"
|
version = "1.35"
|
||||||
checksum = "zSaoSlVUDW0dSfm4sbL4FrXLFR8U40Fh3zY5DWhR5NCIJ6GjU6Kc4VZo2-ZqpBRA"
|
checksum = "zSaoSlVUDW0dSfm4sbL4FrXLFR8U40Fh3zY5DWhR5NCIJ6GjU6Kc4VZo2-ZqpBRA"
|
||||||
)
|
)
|
||||||
return t.NewPackage("tar", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("tar", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/tar/tar-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/tar/tar-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, &MakeHelper{
|
), nil, &MakeHelper{
|
||||||
Configure: []KV{
|
Configure: []KV{
|
||||||
@@ -733,7 +689,7 @@ func (t Toolchain) newTar() (pkg.Artifact, string) {
|
|||||||
// very expensive
|
// very expensive
|
||||||
"TARTEST_SKIP_LARGE_FILES=1",
|
"TARTEST_SKIP_LARGE_FILES=1",
|
||||||
|
|
||||||
`TESTSUITEFLAGS="-j$(nproc)"`,
|
"TESTSUITEFLAGS=" + jobsFlagE,
|
||||||
"check",
|
"check",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -758,15 +714,20 @@ func init() {
|
|||||||
|
|
||||||
func (t Toolchain) newParallel() (pkg.Artifact, string) {
|
func (t Toolchain) newParallel() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "20260322"
|
version = "20260422"
|
||||||
checksum = "gHoPmFkOO62ev4xW59HqyMlodhjp8LvTsBOwsVKHUUdfrt7KwB8koXmSVqQ4VOrB"
|
checksum = "eTsepxgqhXpMEhPd55qh-W5y4vjKn0x9TD2mzbJCNZYtFf4lT4Wzoqr74HGJYBEH"
|
||||||
)
|
)
|
||||||
return t.NewPackage("parallel", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("parallel", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/parallel/parallel-"+version+".tar.bz2",
|
"https://ftpmirror.gnu.org/gnu/parallel/parallel-"+version+".tar.bz2",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarBzip2,
|
pkg.TarBzip2,
|
||||||
), nil, (*MakeHelper)(nil),
|
), &PackageAttr{
|
||||||
|
ScriptEarly: `
|
||||||
|
ln -s ../system/bin/bash /bin/
|
||||||
|
`,
|
||||||
|
}, (*MakeHelper)(nil),
|
||||||
Perl,
|
Perl,
|
||||||
|
Bash,
|
||||||
), version
|
), version
|
||||||
}
|
}
|
||||||
func init() {
|
func init() {
|
||||||
@@ -790,9 +751,9 @@ func (t Toolchain) newLibunistring() (pkg.Artifact, string) {
|
|||||||
version = "1.4.2"
|
version = "1.4.2"
|
||||||
checksum = "iW9BbfLoVlXjWoLTZ4AekQSu4cFBnLcZ4W8OHWbv0AhJNgD3j65_zqaLMzFKylg2"
|
checksum = "iW9BbfLoVlXjWoLTZ4AekQSu4cFBnLcZ4W8OHWbv0AhJNgD3j65_zqaLMzFKylg2"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libunistring", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("libunistring", version, newTar(
|
||||||
nil, "https://ftp.gnu.org/gnu/libunistring/libunistring-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/libunistring/libunistring-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
@@ -823,9 +784,9 @@ func (t Toolchain) newLibtasn1() (pkg.Artifact, string) {
|
|||||||
version = "4.21.0"
|
version = "4.21.0"
|
||||||
checksum = "9DYI3UYbfYLy8JsKUcY6f0irskbfL0fHZA91Q-JEOA3kiUwpodyjemRsYRjUpjuq"
|
checksum = "9DYI3UYbfYLy8JsKUcY6f0irskbfL0fHZA91Q-JEOA3kiUwpodyjemRsYRjUpjuq"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libtasn1", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("libtasn1", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/libtasn1/libtasn1-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/libtasn1/libtasn1-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, (*MakeHelper)(nil)), version
|
), nil, (*MakeHelper)(nil)), version
|
||||||
}
|
}
|
||||||
@@ -846,9 +807,9 @@ func (t Toolchain) newReadline() (pkg.Artifact, string) {
|
|||||||
version = "8.3"
|
version = "8.3"
|
||||||
checksum = "r-lcGRJq_MvvBpOq47Z2Y1OI2iqrmtcqhTLVXR0xWo37ZpC2uT_md7gKq5o_qTMV"
|
checksum = "r-lcGRJq_MvvBpOq47Z2Y1OI2iqrmtcqhTLVXR0xWo37ZpC2uT_md7gKq5o_qTMV"
|
||||||
)
|
)
|
||||||
return t.NewPackage("readline", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("readline", version, newTar(
|
||||||
nil, "https://ftp.gnu.org/gnu/readline/readline-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/readline/readline-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, &MakeHelper{
|
), nil, &MakeHelper{
|
||||||
Configure: []KV{
|
Configure: []KV{
|
||||||
@@ -889,10 +850,9 @@ func (t Toolchain) newGnuTLS() (pkg.Artifact, string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return t.NewPackage("gnutls", version, t.NewViaGit(
|
return t.NewPackage("gnutls", version, t.newTagRemote(
|
||||||
"https://gitlab.com/gnutls/gnutls.git",
|
"https://gitlab.com/gnutls/gnutls.git",
|
||||||
"refs/tags/"+version,
|
version, checksum,
|
||||||
mustDecode(checksum),
|
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Patches: []KV{
|
Patches: []KV{
|
||||||
{"bootstrap-remove-gtk-doc", `diff --git a/bootstrap.conf b/bootstrap.conf
|
{"bootstrap-remove-gtk-doc", `diff --git a/bootstrap.conf b/bootstrap.conf
|
||||||
@@ -1062,9 +1022,9 @@ func (t Toolchain) newBinutils() (pkg.Artifact, string) {
|
|||||||
version = "2.46.0"
|
version = "2.46.0"
|
||||||
checksum = "4kK1_EXQipxSqqyvwD4LbiMLFKCUApjq6PeG4XJP4dzxYGqDeqXfh8zLuTyOuOVR"
|
checksum = "4kK1_EXQipxSqqyvwD4LbiMLFKCUApjq6PeG4XJP4dzxYGqDeqXfh8zLuTyOuOVR"
|
||||||
)
|
)
|
||||||
return t.NewPackage("binutils", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("binutils", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/binutils/binutils-"+version+".tar.bz2",
|
"https://ftpmirror.gnu.org/gnu/binutils/binutils-"+version+".tar.bz2",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarBzip2,
|
pkg.TarBzip2,
|
||||||
), nil, (*MakeHelper)(nil),
|
), nil, (*MakeHelper)(nil),
|
||||||
Bash,
|
Bash,
|
||||||
@@ -1087,12 +1047,16 @@ func (t Toolchain) newGMP() (pkg.Artifact, string) {
|
|||||||
version = "6.3.0"
|
version = "6.3.0"
|
||||||
checksum = "yrgbgEDWKDdMWVHh7gPbVl56-sRtVVhfvv0M_LX7xMUUk_mvZ1QOJEAnt7g4i3k5"
|
checksum = "yrgbgEDWKDdMWVHh7gPbVl56-sRtVVhfvv0M_LX7xMUUk_mvZ1QOJEAnt7g4i3k5"
|
||||||
)
|
)
|
||||||
return t.NewPackage("gmp", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("gmp", version, newTar(
|
||||||
nil, "https://gcc.gnu.org/pub/gcc/infrastructure/"+
|
"https://gcc.gnu.org/pub/gcc/infrastructure/"+
|
||||||
"gmp-"+version+".tar.bz2",
|
"gmp-"+version+".tar.bz2",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarBzip2,
|
pkg.TarBzip2,
|
||||||
), nil, (*MakeHelper)(nil),
|
), &PackageAttr{
|
||||||
|
Env: []string{
|
||||||
|
"CC=cc",
|
||||||
|
},
|
||||||
|
}, (*MakeHelper)(nil),
|
||||||
M4,
|
M4,
|
||||||
), version
|
), version
|
||||||
}
|
}
|
||||||
@@ -1113,10 +1077,10 @@ func (t Toolchain) newMPFR() (pkg.Artifact, string) {
|
|||||||
version = "4.2.2"
|
version = "4.2.2"
|
||||||
checksum = "wN3gx0zfIuCn9r3VAn_9bmfvAYILwrRfgBjYSD1IjLqyLrLojNN5vKyQuTE9kA-B"
|
checksum = "wN3gx0zfIuCn9r3VAn_9bmfvAYILwrRfgBjYSD1IjLqyLrLojNN5vKyQuTE9kA-B"
|
||||||
)
|
)
|
||||||
return t.NewPackage("mpfr", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("mpfr", version, newTar(
|
||||||
nil, "https://gcc.gnu.org/pub/gcc/infrastructure/"+
|
"https://gcc.gnu.org/pub/gcc/infrastructure/"+
|
||||||
"mpfr-"+version+".tar.bz2",
|
"mpfr-"+version+".tar.bz2",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarBzip2,
|
pkg.TarBzip2,
|
||||||
), nil, (*MakeHelper)(nil),
|
), nil, (*MakeHelper)(nil),
|
||||||
GMP,
|
GMP,
|
||||||
@@ -1140,13 +1104,13 @@ func init() {
|
|||||||
|
|
||||||
func (t Toolchain) newMPC() (pkg.Artifact, string) {
|
func (t Toolchain) newMPC() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "1.4.0"
|
version = "1.4.1"
|
||||||
checksum = "TbrxLiE3ipQrHz_F3Xzz4zqBAnkMWyjhNwIK6wh9360RZ39xMt8rxfW3LxA9SnvU"
|
checksum = "ZffaZyWkvIw0iPvRe5EJ7O-VvHtSkbbb3K_7SgPtK810NvGan7nbF0T5-6tozjQN"
|
||||||
)
|
)
|
||||||
return t.NewPackage("mpc", version, t.NewViaGit(
|
return t.NewPackage("mpc", version, newFromGitLab(
|
||||||
"https://gitlab.inria.fr/mpc/mpc.git",
|
"gitlab.inria.fr",
|
||||||
"refs/tags/"+version,
|
"mpc/mpc",
|
||||||
mustDecode(checksum),
|
version, checksum,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
// does not find mpc-impl.h otherwise
|
// does not find mpc-impl.h otherwise
|
||||||
EnterSource: true,
|
EnterSource: true,
|
||||||
@@ -1182,10 +1146,17 @@ func (t Toolchain) newGCC() (pkg.Artifact, string) {
|
|||||||
version = "15.2.0"
|
version = "15.2.0"
|
||||||
checksum = "TXJ5WrbXlGLzy1swghQTr4qxgDCyIZFgJry51XEPTBZ8QYbVmFeB4lZbSMtPJ-a1"
|
checksum = "TXJ5WrbXlGLzy1swghQTr4qxgDCyIZFgJry51XEPTBZ8QYbVmFeB4lZbSMtPJ-a1"
|
||||||
)
|
)
|
||||||
return t.NewPackage("gcc", version, pkg.NewHTTPGetTar(
|
|
||||||
nil, "https://ftp.tsukuba.wide.ad.jp/software/gcc/releases/"+
|
var configureExtra []KV
|
||||||
|
switch runtime.GOARCH {
|
||||||
|
case "amd64", "arm64":
|
||||||
|
configureExtra = append(configureExtra, KV{"with-multilib-list", "''"})
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.NewPackage("gcc", version, newTar(
|
||||||
|
"https://ftp.tsukuba.wide.ad.jp/software/gcc/releases/"+
|
||||||
"gcc-"+version+"/gcc-"+version+".tar.gz",
|
"gcc-"+version+"/gcc-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Patches: []KV{
|
Patches: []KV{
|
||||||
@@ -1347,9 +1318,8 @@ ln -s system/lib /work/
|
|||||||
// it also saturates the CPU for a consequential amount of time.
|
// it also saturates the CPU for a consequential amount of time.
|
||||||
Flag: TExclusive,
|
Flag: TExclusive,
|
||||||
}, &MakeHelper{
|
}, &MakeHelper{
|
||||||
Configure: []KV{
|
Configure: append([]KV{
|
||||||
{"disable-multilib"},
|
{"disable-multilib"},
|
||||||
{"with-multilib-list", `""`},
|
|
||||||
{"enable-default-pie"},
|
{"enable-default-pie"},
|
||||||
{"disable-nls"},
|
{"disable-nls"},
|
||||||
{"with-gnu-as"},
|
{"with-gnu-as"},
|
||||||
@@ -1357,7 +1327,7 @@ ln -s system/lib /work/
|
|||||||
{"with-system-zlib"},
|
{"with-system-zlib"},
|
||||||
{"enable-languages", "c,c++,go"},
|
{"enable-languages", "c,c++,go"},
|
||||||
{"with-native-system-header-dir", "/system/include"},
|
{"with-native-system-header-dir", "/system/include"},
|
||||||
},
|
}, configureExtra...),
|
||||||
Make: []string{
|
Make: []string{
|
||||||
"BOOT_CFLAGS='-O2 -g'",
|
"BOOT_CFLAGS='-O2 -g'",
|
||||||
"bootstrap",
|
"bootstrap",
|
||||||
|
|||||||
35
internal/rosa/gnu_test.go
Normal file
35
internal/rosa/gnu_test.go
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSkipGNUTests(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
tests []int
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{[]int{764}, "1-763 765-"},
|
||||||
|
{[]int{764, 0xcafe, 37, 9}, "1-8 10-36 38-763 765-51965 51967-"},
|
||||||
|
{[]int{1, 2, 0xbed}, "3-3052 3054-"},
|
||||||
|
{[]int{3, 4}, "1-2 5-"},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(strings.Join(slices.Collect(func(yield func(string) bool) {
|
||||||
|
for _, n := range tc.tests {
|
||||||
|
yield(strconv.Itoa(n))
|
||||||
|
}
|
||||||
|
}), ","), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if got := skipGNUTests(tc.tests...); got != tc.want {
|
||||||
|
t.Errorf("skipGNUTests: %q, want %q", got, tc.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -10,9 +10,9 @@ import (
|
|||||||
// newGoBootstrap returns the Go bootstrap toolchain.
|
// newGoBootstrap returns the Go bootstrap toolchain.
|
||||||
func (t Toolchain) newGoBootstrap() pkg.Artifact {
|
func (t Toolchain) newGoBootstrap() pkg.Artifact {
|
||||||
const checksum = "8o9JL_ToiQKadCTb04nvBDkp8O1xiWOolAxVEqaTGodieNe4lOFEjlOxN3bwwe23"
|
const checksum = "8o9JL_ToiQKadCTb04nvBDkp8O1xiWOolAxVEqaTGodieNe4lOFEjlOxN3bwwe23"
|
||||||
return t.New("go1.4-bootstrap", 0, []pkg.Artifact{
|
return t.New("go1.4-bootstrap", 0, t.AppendPresets(nil,
|
||||||
t.Load(Bash),
|
Bash,
|
||||||
}, nil, []string{
|
), nil, []string{
|
||||||
"CGO_ENABLED=0",
|
"CGO_ENABLED=0",
|
||||||
}, `
|
}, `
|
||||||
mkdir -p /var/tmp/ /work/system/
|
mkdir -p /var/tmp/ /work/system/
|
||||||
@@ -21,9 +21,9 @@ cd /work/system/go/src
|
|||||||
chmod -R +w ..
|
chmod -R +w ..
|
||||||
|
|
||||||
./make.bash
|
./make.bash
|
||||||
`, pkg.Path(AbsUsrSrc.Append("go"), false, pkg.NewHTTPGetTar(
|
`, pkg.Path(AbsUsrSrc.Append("go"), false, newTar(
|
||||||
nil, "https://dl.google.com/go/go1.4-bootstrap-20171003.tar.gz",
|
"https://dl.google.com/go/go1.4-bootstrap-20171003.tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
@@ -35,9 +35,9 @@ func (t Toolchain) newGo(
|
|||||||
script string,
|
script string,
|
||||||
extra ...pkg.Artifact,
|
extra ...pkg.Artifact,
|
||||||
) pkg.Artifact {
|
) pkg.Artifact {
|
||||||
return t.New("go"+version, 0, slices.Concat([]pkg.Artifact{
|
return t.New("go"+version, 0, t.AppendPresets(extra,
|
||||||
t.Load(Bash),
|
Bash,
|
||||||
}, extra), nil, slices.Concat([]string{
|
), nil, slices.Concat([]string{
|
||||||
"CC=cc",
|
"CC=cc",
|
||||||
"GOCACHE=/tmp/gocache",
|
"GOCACHE=/tmp/gocache",
|
||||||
"GOROOT_BOOTSTRAP=/system/go",
|
"GOROOT_BOOTSTRAP=/system/go",
|
||||||
@@ -55,9 +55,9 @@ ln -s \
|
|||||||
../go/bin/go \
|
../go/bin/go \
|
||||||
../go/bin/gofmt \
|
../go/bin/gofmt \
|
||||||
/work/system/bin
|
/work/system/bin
|
||||||
`, pkg.Path(AbsUsrSrc.Append("go"), false, pkg.NewHTTPGetTar(
|
`, pkg.Path(AbsUsrSrc.Append("go"), false, newTar(
|
||||||
nil, "https://go.dev/dl/go"+version+".src.tar.gz",
|
"https://go.dev/dl/go"+version+".src.tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
@@ -127,8 +127,8 @@ sed -i \
|
|||||||
)
|
)
|
||||||
|
|
||||||
go125 := t.newGo(
|
go125 := t.newGo(
|
||||||
"1.25.7",
|
"1.25.9",
|
||||||
"fyylHdBVRUobnBjYj3NKBaYPUw3kGmo2mEELiZonOYurPfbarNU1x77B99Fjut7Q",
|
"gShJb9uOMk5AxqPSwvn53ZO56S6PyP6nfojzrHUiJ3krAvrgjJpYa6-DPA-jxbpN",
|
||||||
[]string{"CGO_ENABLED=0"}, `
|
[]string{"CGO_ENABLED=0"}, `
|
||||||
sed -i \
|
sed -i \
|
||||||
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
|
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
|
||||||
@@ -151,9 +151,14 @@ rm \
|
|||||||
sed -i \
|
sed -i \
|
||||||
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
|
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
|
||||||
cmd/link/internal/`+runtime.GOARCH+`/obj.go
|
cmd/link/internal/`+runtime.GOARCH+`/obj.go
|
||||||
|
sed -i \
|
||||||
|
's/cpu.X86.HasAVX512VBMI/& \&\& cpu.X86.HasPOPCNT/' \
|
||||||
|
internal/runtime/gc/scan/scan_amd64.go
|
||||||
|
|
||||||
rm \
|
rm \
|
||||||
os/root_unix_test.go
|
os/root_unix_test.go \
|
||||||
|
cmd/cgo/internal/testsanitizers/tsan_test.go \
|
||||||
|
cmd/cgo/internal/testsanitizers/cshared_test.go
|
||||||
`, go125,
|
`, go125,
|
||||||
), version
|
), version
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,10 +10,9 @@ func (t Toolchain) newGLib() (pkg.Artifact, string) {
|
|||||||
version = "2.88.0"
|
version = "2.88.0"
|
||||||
checksum = "T79Cg4z6j-sDZ2yIwvbY4ccRv2-fbwbqgcw59F5NQ6qJT6z4v261vbYp3dHO6Ma3"
|
checksum = "T79Cg4z6j-sDZ2yIwvbY4ccRv2-fbwbqgcw59F5NQ6qJT6z4v261vbYp3dHO6Ma3"
|
||||||
)
|
)
|
||||||
return t.NewPackage("glib", version, t.NewViaGit(
|
return t.NewPackage("glib", version, t.newTagRemote(
|
||||||
"https://gitlab.gnome.org/GNOME/glib.git",
|
"https://gitlab.gnome.org/GNOME/glib.git",
|
||||||
"refs/tags/"+version,
|
version, checksum,
|
||||||
mustDecode(checksum),
|
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Paths: []pkg.ExecPath{
|
Paths: []pkg.ExecPath{
|
||||||
pkg.Path(fhs.AbsEtc.Append(
|
pkg.Path(fhs.AbsEtc.Append(
|
||||||
|
|||||||
@@ -7,9 +7,8 @@ func (t Toolchain) newHakurei(
|
|||||||
withHostname bool,
|
withHostname bool,
|
||||||
) pkg.Artifact {
|
) pkg.Artifact {
|
||||||
hostname := `
|
hostname := `
|
||||||
echo '# Building test helper (hostname).'
|
echo 'Building test helper (hostname).'
|
||||||
go build -v -o /bin/hostname /usr/src/hostname/main.go
|
go build -o /bin/hostname /usr/src/hostname/main.go
|
||||||
echo
|
|
||||||
`
|
`
|
||||||
if !withHostname {
|
if !withHostname {
|
||||||
hostname = ""
|
hostname = ""
|
||||||
@@ -99,7 +98,7 @@ mkdir -p /work/system/bin/
|
|||||||
f: func(t Toolchain) (pkg.Artifact, string) {
|
f: func(t Toolchain) (pkg.Artifact, string) {
|
||||||
return t.newHakurei("-dist", `
|
return t.newHakurei("-dist", `
|
||||||
export HAKUREI_VERSION
|
export HAKUREI_VERSION
|
||||||
DESTDIR=/work /usr/src/hakurei/dist/release.sh
|
DESTDIR=/work /usr/src/hakurei/all.sh
|
||||||
`, true), hakureiVersion
|
`, true), hakureiVersion
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|||||||
@@ -4,13 +4,13 @@ package rosa
|
|||||||
|
|
||||||
import "hakurei.app/internal/pkg"
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
const hakureiVersion = "0.3.7"
|
const hakureiVersion = "0.4.0"
|
||||||
|
|
||||||
// hakureiSource is the source code of a hakurei release.
|
// hakureiSource is the source code of a hakurei release.
|
||||||
var hakureiSource = pkg.NewHTTPGetTar(
|
var hakureiSource = newTar(
|
||||||
nil, "https://git.gensokyo.uk/rosa/hakurei/archive/"+
|
"https://git.gensokyo.uk/rosa/hakurei/archive/"+
|
||||||
"v"+hakureiVersion+".tar.gz",
|
"v"+hakureiVersion+".tar.gz",
|
||||||
mustDecode("Xh_sdITOATEAQN5_UuaOyrWsgboxorqRO9bml3dGm8GAxF8NFpB7MqhSZgjJxAl2"),
|
"wfQ9DqCW0Fw9o91wj-I55waoqzB-UqzzuC0_2h-P-1M78SgZ1WHSPCDJMth6EyC2",
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -2,12 +2,12 @@ package rosa
|
|||||||
|
|
||||||
import "hakurei.app/internal/pkg"
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
const kernelVersion = "6.12.80"
|
const kernelVersion = "6.12.84"
|
||||||
|
|
||||||
var kernelSource = pkg.NewHTTPGetTar(
|
var kernelSource = newTar(
|
||||||
nil, "https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/"+
|
"https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/"+
|
||||||
"snapshot/linux-"+kernelVersion+".tar.gz",
|
"snapshot/linux-"+kernelVersion+".tar.gz",
|
||||||
mustDecode("_iJEAYoQISJxefuWZYfv0RPWUmHHIjHQw33Fapix-irXrEIREP5ruK37UJW4uMZO"),
|
"GJLUEu68r3DpLYoTcMl4wA_ThMBs_Zwc0gZsp82ii_3AOfcVxpI639IKfq2jAAY2",
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1221,7 +1221,7 @@ install -Dm0500 \
|
|||||||
/sbin/depmod
|
/sbin/depmod
|
||||||
|
|
||||||
make \
|
make \
|
||||||
"-j$(nproc)" \
|
` + jobsFlagE + ` \
|
||||||
-f /usr/src/kernel/Makefile \
|
-f /usr/src/kernel/Makefile \
|
||||||
O=/tmp/kbuild \
|
O=/tmp/kbuild \
|
||||||
LLVM=1 \
|
LLVM=1 \
|
||||||
@@ -1282,14 +1282,14 @@ func init() {
|
|||||||
|
|
||||||
func (t Toolchain) newFirmware() (pkg.Artifact, string) {
|
func (t Toolchain) newFirmware() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "20260309"
|
version = "20260410"
|
||||||
checksum = "M1az8BxSiOEH3LA11Trc5VAlakwAHhP7-_LKWg6k-SVIzU3xclMDO4Tiujw1gQrC"
|
checksum = "J8PdQlGqwrivpskPzbL6xacqR6mlKtXpe5RpzFfVzKPAgG81ZRXsc3qrxwdGJbil"
|
||||||
)
|
)
|
||||||
return t.NewPackage("firmware", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("firmware", version, newFromGitLab(
|
||||||
nil, "https://gitlab.com/kernel-firmware/linux-firmware/-/"+
|
"gitlab.com",
|
||||||
"archive/"+version+"/linux-firmware-"+version+".tar.bz2",
|
"kernel-firmware/linux-firmware",
|
||||||
mustDecode(checksum),
|
version,
|
||||||
pkg.TarBzip2,
|
checksum,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
// dedup creates temporary file
|
// dedup creates temporary file
|
||||||
Writable: true,
|
Writable: true,
|
||||||
@@ -1309,7 +1309,7 @@ func (t Toolchain) newFirmware() (pkg.Artifact, string) {
|
|||||||
"install-zst",
|
"install-zst",
|
||||||
},
|
},
|
||||||
SkipCheck: true, // requires pre-commit
|
SkipCheck: true, // requires pre-commit
|
||||||
Install: `make "-j$(nproc)" DESTDIR=/work/system dedup`,
|
Install: "make " + jobsFlagE + " DESTDIR=/work/system dedup",
|
||||||
},
|
},
|
||||||
Parallel,
|
Parallel,
|
||||||
Rdfind,
|
Rdfind,
|
||||||
|
|||||||
@@ -1,16 +1,16 @@
|
|||||||
#
|
#
|
||||||
# Automatically generated file; DO NOT EDIT.
|
# Automatically generated file; DO NOT EDIT.
|
||||||
# Linux/x86 6.12.80 Kernel Configuration
|
# Linux/x86 6.12.84 Kernel Configuration
|
||||||
#
|
#
|
||||||
CONFIG_CC_VERSION_TEXT="clang version 22.1.2"
|
CONFIG_CC_VERSION_TEXT="clang version 22.1.4"
|
||||||
CONFIG_GCC_VERSION=0
|
CONFIG_GCC_VERSION=0
|
||||||
CONFIG_CC_IS_CLANG=y
|
CONFIG_CC_IS_CLANG=y
|
||||||
CONFIG_CLANG_VERSION=220102
|
CONFIG_CLANG_VERSION=220104
|
||||||
CONFIG_AS_IS_LLVM=y
|
CONFIG_AS_IS_LLVM=y
|
||||||
CONFIG_AS_VERSION=220102
|
CONFIG_AS_VERSION=220104
|
||||||
CONFIG_LD_VERSION=0
|
CONFIG_LD_VERSION=0
|
||||||
CONFIG_LD_IS_LLD=y
|
CONFIG_LD_IS_LLD=y
|
||||||
CONFIG_LLD_VERSION=220102
|
CONFIG_LLD_VERSION=220104
|
||||||
CONFIG_RUSTC_VERSION=0
|
CONFIG_RUSTC_VERSION=0
|
||||||
CONFIG_RUSTC_LLVM_VERSION=0
|
CONFIG_RUSTC_LLVM_VERSION=0
|
||||||
CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
|
CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
|
||||||
@@ -3175,14 +3175,8 @@ CONFIG_PATA_ACPI=y
|
|||||||
CONFIG_ATA_GENERIC=y
|
CONFIG_ATA_GENERIC=y
|
||||||
CONFIG_PATA_LEGACY=m
|
CONFIG_PATA_LEGACY=m
|
||||||
CONFIG_MD=y
|
CONFIG_MD=y
|
||||||
CONFIG_BLK_DEV_MD=m
|
# CONFIG_BLK_DEV_MD is not set
|
||||||
CONFIG_MD_BITMAP_FILE=y
|
CONFIG_MD_BITMAP_FILE=y
|
||||||
CONFIG_MD_LINEAR=m
|
|
||||||
CONFIG_MD_RAID0=m
|
|
||||||
CONFIG_MD_RAID1=m
|
|
||||||
CONFIG_MD_RAID10=m
|
|
||||||
CONFIG_MD_RAID456=m
|
|
||||||
CONFIG_MD_CLUSTER=m
|
|
||||||
CONFIG_BCACHE=m
|
CONFIG_BCACHE=m
|
||||||
# CONFIG_BCACHE_DEBUG is not set
|
# CONFIG_BCACHE_DEBUG is not set
|
||||||
# CONFIG_BCACHE_ASYNC_REGISTRATION is not set
|
# CONFIG_BCACHE_ASYNC_REGISTRATION is not set
|
||||||
@@ -3205,7 +3199,7 @@ CONFIG_DM_ERA=m
|
|||||||
CONFIG_DM_CLONE=m
|
CONFIG_DM_CLONE=m
|
||||||
CONFIG_DM_MIRROR=m
|
CONFIG_DM_MIRROR=m
|
||||||
CONFIG_DM_LOG_USERSPACE=m
|
CONFIG_DM_LOG_USERSPACE=m
|
||||||
CONFIG_DM_RAID=m
|
# CONFIG_DM_RAID is not set
|
||||||
CONFIG_DM_ZERO=m
|
CONFIG_DM_ZERO=m
|
||||||
CONFIG_DM_MULTIPATH=m
|
CONFIG_DM_MULTIPATH=m
|
||||||
CONFIG_DM_MULTIPATH_QL=m
|
CONFIG_DM_MULTIPATH_QL=m
|
||||||
@@ -11636,10 +11630,7 @@ CONFIG_RANDSTRUCT_NONE=y
|
|||||||
|
|
||||||
CONFIG_XOR_BLOCKS=m
|
CONFIG_XOR_BLOCKS=m
|
||||||
CONFIG_ASYNC_CORE=m
|
CONFIG_ASYNC_CORE=m
|
||||||
CONFIG_ASYNC_MEMCPY=m
|
|
||||||
CONFIG_ASYNC_XOR=m
|
CONFIG_ASYNC_XOR=m
|
||||||
CONFIG_ASYNC_PQ=m
|
|
||||||
CONFIG_ASYNC_RAID6_RECOV=m
|
|
||||||
CONFIG_CRYPTO=y
|
CONFIG_CRYPTO=y
|
||||||
|
|
||||||
#
|
#
|
||||||
@@ -11925,8 +11916,6 @@ CONFIG_BINARY_PRINTF=y
|
|||||||
#
|
#
|
||||||
# Library routines
|
# Library routines
|
||||||
#
|
#
|
||||||
CONFIG_RAID6_PQ=m
|
|
||||||
CONFIG_RAID6_PQ_BENCHMARK=y
|
|
||||||
CONFIG_LINEAR_RANGES=y
|
CONFIG_LINEAR_RANGES=y
|
||||||
CONFIG_PACKING=y
|
CONFIG_PACKING=y
|
||||||
CONFIG_BITREVERSE=y
|
CONFIG_BITREVERSE=y
|
||||||
@@ -12471,7 +12460,6 @@ CONFIG_RUNTIME_TESTING_MENU=y
|
|||||||
# CONFIG_INTERVAL_TREE_TEST is not set
|
# CONFIG_INTERVAL_TREE_TEST is not set
|
||||||
# CONFIG_PERCPU_TEST is not set
|
# CONFIG_PERCPU_TEST is not set
|
||||||
# CONFIG_ATOMIC64_SELFTEST is not set
|
# CONFIG_ATOMIC64_SELFTEST is not set
|
||||||
# CONFIG_ASYNC_RAID6_TEST is not set
|
|
||||||
# CONFIG_TEST_HEXDUMP is not set
|
# CONFIG_TEST_HEXDUMP is not set
|
||||||
# CONFIG_TEST_KSTRTOX is not set
|
# CONFIG_TEST_KSTRTOX is not set
|
||||||
# CONFIG_TEST_PRINTF is not set
|
# CONFIG_TEST_PRINTF is not set
|
||||||
|
|||||||
@@ -1,16 +1,16 @@
|
|||||||
#
|
#
|
||||||
# Automatically generated file; DO NOT EDIT.
|
# Automatically generated file; DO NOT EDIT.
|
||||||
# Linux/arm64 6.12.80 Kernel Configuration
|
# Linux/arm64 6.12.83 Kernel Configuration
|
||||||
#
|
#
|
||||||
CONFIG_CC_VERSION_TEXT="clang version 21.1.8"
|
CONFIG_CC_VERSION_TEXT="clang version 22.1.4"
|
||||||
CONFIG_GCC_VERSION=0
|
CONFIG_GCC_VERSION=0
|
||||||
CONFIG_CC_IS_CLANG=y
|
CONFIG_CC_IS_CLANG=y
|
||||||
CONFIG_CLANG_VERSION=210108
|
CONFIG_CLANG_VERSION=220104
|
||||||
CONFIG_AS_IS_LLVM=y
|
CONFIG_AS_IS_LLVM=y
|
||||||
CONFIG_AS_VERSION=210108
|
CONFIG_AS_VERSION=220104
|
||||||
CONFIG_LD_VERSION=0
|
CONFIG_LD_VERSION=0
|
||||||
CONFIG_LD_IS_LLD=y
|
CONFIG_LD_IS_LLD=y
|
||||||
CONFIG_LLD_VERSION=210108
|
CONFIG_LLD_VERSION=220104
|
||||||
CONFIG_RUSTC_VERSION=0
|
CONFIG_RUSTC_VERSION=0
|
||||||
CONFIG_RUSTC_LLVM_VERSION=0
|
CONFIG_RUSTC_LLVM_VERSION=0
|
||||||
CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
|
CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
|
||||||
@@ -3253,14 +3253,8 @@ CONFIG_PATA_ACPI=y
|
|||||||
CONFIG_ATA_GENERIC=y
|
CONFIG_ATA_GENERIC=y
|
||||||
CONFIG_PATA_LEGACY=m
|
CONFIG_PATA_LEGACY=m
|
||||||
CONFIG_MD=y
|
CONFIG_MD=y
|
||||||
CONFIG_BLK_DEV_MD=m
|
# CONFIG_BLK_DEV_MD is not set
|
||||||
CONFIG_MD_BITMAP_FILE=y
|
CONFIG_MD_BITMAP_FILE=y
|
||||||
CONFIG_MD_LINEAR=m
|
|
||||||
CONFIG_MD_RAID0=m
|
|
||||||
CONFIG_MD_RAID1=m
|
|
||||||
CONFIG_MD_RAID10=m
|
|
||||||
CONFIG_MD_RAID456=m
|
|
||||||
CONFIG_MD_CLUSTER=m
|
|
||||||
CONFIG_BCACHE=m
|
CONFIG_BCACHE=m
|
||||||
# CONFIG_BCACHE_DEBUG is not set
|
# CONFIG_BCACHE_DEBUG is not set
|
||||||
# CONFIG_BCACHE_ASYNC_REGISTRATION is not set
|
# CONFIG_BCACHE_ASYNC_REGISTRATION is not set
|
||||||
@@ -3283,7 +3277,7 @@ CONFIG_DM_ERA=m
|
|||||||
CONFIG_DM_CLONE=m
|
CONFIG_DM_CLONE=m
|
||||||
CONFIG_DM_MIRROR=m
|
CONFIG_DM_MIRROR=m
|
||||||
CONFIG_DM_LOG_USERSPACE=m
|
CONFIG_DM_LOG_USERSPACE=m
|
||||||
CONFIG_DM_RAID=m
|
# CONFIG_DM_RAID is not set
|
||||||
CONFIG_DM_ZERO=m
|
CONFIG_DM_ZERO=m
|
||||||
CONFIG_DM_MULTIPATH=m
|
CONFIG_DM_MULTIPATH=m
|
||||||
CONFIG_DM_MULTIPATH_QL=m
|
CONFIG_DM_MULTIPATH_QL=m
|
||||||
@@ -10300,7 +10294,6 @@ CONFIG_ALTERA_MSGDMA=m
|
|||||||
# CONFIG_AMBA_PL08X is not set
|
# CONFIG_AMBA_PL08X is not set
|
||||||
CONFIG_APPLE_ADMAC=m
|
CONFIG_APPLE_ADMAC=m
|
||||||
CONFIG_AXI_DMAC=m
|
CONFIG_AXI_DMAC=m
|
||||||
CONFIG_BCM_SBA_RAID=m
|
|
||||||
CONFIG_DMA_BCM2835=m
|
CONFIG_DMA_BCM2835=m
|
||||||
CONFIG_DMA_SUN6I=m
|
CONFIG_DMA_SUN6I=m
|
||||||
CONFIG_DW_AXI_DMAC=m
|
CONFIG_DW_AXI_DMAC=m
|
||||||
@@ -13292,12 +13285,7 @@ CONFIG_RANDSTRUCT_NONE=y
|
|||||||
|
|
||||||
CONFIG_XOR_BLOCKS=m
|
CONFIG_XOR_BLOCKS=m
|
||||||
CONFIG_ASYNC_CORE=m
|
CONFIG_ASYNC_CORE=m
|
||||||
CONFIG_ASYNC_MEMCPY=m
|
|
||||||
CONFIG_ASYNC_XOR=m
|
CONFIG_ASYNC_XOR=m
|
||||||
CONFIG_ASYNC_PQ=m
|
|
||||||
CONFIG_ASYNC_RAID6_RECOV=m
|
|
||||||
CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA=y
|
|
||||||
CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA=y
|
|
||||||
CONFIG_CRYPTO=y
|
CONFIG_CRYPTO=y
|
||||||
|
|
||||||
#
|
#
|
||||||
@@ -13640,8 +13628,6 @@ CONFIG_BINARY_PRINTF=y
|
|||||||
#
|
#
|
||||||
# Library routines
|
# Library routines
|
||||||
#
|
#
|
||||||
CONFIG_RAID6_PQ=m
|
|
||||||
CONFIG_RAID6_PQ_BENCHMARK=y
|
|
||||||
CONFIG_LINEAR_RANGES=y
|
CONFIG_LINEAR_RANGES=y
|
||||||
CONFIG_PACKING=y
|
CONFIG_PACKING=y
|
||||||
CONFIG_BITREVERSE=y
|
CONFIG_BITREVERSE=y
|
||||||
@@ -14172,7 +14158,6 @@ CONFIG_RUNTIME_TESTING_MENU=y
|
|||||||
# CONFIG_INTERVAL_TREE_TEST is not set
|
# CONFIG_INTERVAL_TREE_TEST is not set
|
||||||
# CONFIG_PERCPU_TEST is not set
|
# CONFIG_PERCPU_TEST is not set
|
||||||
# CONFIG_ATOMIC64_SELFTEST is not set
|
# CONFIG_ATOMIC64_SELFTEST is not set
|
||||||
# CONFIG_ASYNC_RAID6_TEST is not set
|
|
||||||
# CONFIG_TEST_HEXDUMP is not set
|
# CONFIG_TEST_HEXDUMP is not set
|
||||||
# CONFIG_TEST_KSTRTOX is not set
|
# CONFIG_TEST_KSTRTOX is not set
|
||||||
# CONFIG_TEST_PRINTF is not set
|
# CONFIG_TEST_PRINTF is not set
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Automatically generated file; DO NOT EDIT.
|
# Automatically generated file; DO NOT EDIT.
|
||||||
# Linux/riscv 6.12.77 Kernel Configuration
|
# Linux/riscv 6.12.80 Kernel Configuration
|
||||||
#
|
#
|
||||||
CONFIG_CC_VERSION_TEXT="clang version 22.1.2"
|
CONFIG_CC_VERSION_TEXT="clang version 22.1.2"
|
||||||
CONFIG_GCC_VERSION=0
|
CONFIG_GCC_VERSION=0
|
||||||
@@ -37,11 +37,6 @@ CONFIG_BUILD_SALT=""
|
|||||||
CONFIG_HAVE_KERNEL_GZIP=y
|
CONFIG_HAVE_KERNEL_GZIP=y
|
||||||
CONFIG_HAVE_KERNEL_ZSTD=y
|
CONFIG_HAVE_KERNEL_ZSTD=y
|
||||||
# CONFIG_KERNEL_GZIP is not set
|
# CONFIG_KERNEL_GZIP is not set
|
||||||
# CONFIG_KERNEL_BZIP2 is not set
|
|
||||||
# CONFIG_KERNEL_LZMA is not set
|
|
||||||
# CONFIG_KERNEL_XZ is not set
|
|
||||||
# CONFIG_KERNEL_LZO is not set
|
|
||||||
# CONFIG_KERNEL_LZ4 is not set
|
|
||||||
CONFIG_KERNEL_ZSTD=y
|
CONFIG_KERNEL_ZSTD=y
|
||||||
CONFIG_DEFAULT_INIT=""
|
CONFIG_DEFAULT_INIT=""
|
||||||
CONFIG_DEFAULT_HOSTNAME="rosa-early"
|
CONFIG_DEFAULT_HOSTNAME="rosa-early"
|
||||||
@@ -2848,14 +2843,8 @@ CONFIG_PATA_ACPI=y
|
|||||||
CONFIG_ATA_GENERIC=y
|
CONFIG_ATA_GENERIC=y
|
||||||
CONFIG_PATA_LEGACY=m
|
CONFIG_PATA_LEGACY=m
|
||||||
CONFIG_MD=y
|
CONFIG_MD=y
|
||||||
CONFIG_BLK_DEV_MD=m
|
# CONFIG_BLK_DEV_MD is not set
|
||||||
CONFIG_MD_BITMAP_FILE=y
|
CONFIG_MD_BITMAP_FILE=y
|
||||||
CONFIG_MD_LINEAR=m
|
|
||||||
CONFIG_MD_RAID0=m
|
|
||||||
CONFIG_MD_RAID1=m
|
|
||||||
CONFIG_MD_RAID10=m
|
|
||||||
CONFIG_MD_RAID456=m
|
|
||||||
CONFIG_MD_CLUSTER=m
|
|
||||||
CONFIG_BCACHE=m
|
CONFIG_BCACHE=m
|
||||||
# CONFIG_BCACHE_DEBUG is not set
|
# CONFIG_BCACHE_DEBUG is not set
|
||||||
# CONFIG_BCACHE_ASYNC_REGISTRATION is not set
|
# CONFIG_BCACHE_ASYNC_REGISTRATION is not set
|
||||||
@@ -2878,7 +2867,7 @@ CONFIG_DM_ERA=m
|
|||||||
CONFIG_DM_CLONE=m
|
CONFIG_DM_CLONE=m
|
||||||
CONFIG_DM_MIRROR=m
|
CONFIG_DM_MIRROR=m
|
||||||
CONFIG_DM_LOG_USERSPACE=m
|
CONFIG_DM_LOG_USERSPACE=m
|
||||||
CONFIG_DM_RAID=m
|
# CONFIG_DM_RAID is not set
|
||||||
CONFIG_DM_ZERO=m
|
CONFIG_DM_ZERO=m
|
||||||
CONFIG_DM_MULTIPATH=m
|
CONFIG_DM_MULTIPATH=m
|
||||||
CONFIG_DM_MULTIPATH_QL=m
|
CONFIG_DM_MULTIPATH_QL=m
|
||||||
@@ -10655,10 +10644,7 @@ CONFIG_RANDSTRUCT_NONE=y
|
|||||||
|
|
||||||
CONFIG_XOR_BLOCKS=m
|
CONFIG_XOR_BLOCKS=m
|
||||||
CONFIG_ASYNC_CORE=m
|
CONFIG_ASYNC_CORE=m
|
||||||
CONFIG_ASYNC_MEMCPY=m
|
|
||||||
CONFIG_ASYNC_XOR=m
|
CONFIG_ASYNC_XOR=m
|
||||||
CONFIG_ASYNC_PQ=m
|
|
||||||
CONFIG_ASYNC_RAID6_RECOV=m
|
|
||||||
CONFIG_CRYPTO=y
|
CONFIG_CRYPTO=y
|
||||||
|
|
||||||
#
|
#
|
||||||
@@ -10918,8 +10904,6 @@ CONFIG_BINARY_PRINTF=y
|
|||||||
#
|
#
|
||||||
# Library routines
|
# Library routines
|
||||||
#
|
#
|
||||||
CONFIG_RAID6_PQ=m
|
|
||||||
CONFIG_RAID6_PQ_BENCHMARK=y
|
|
||||||
CONFIG_LINEAR_RANGES=y
|
CONFIG_LINEAR_RANGES=y
|
||||||
CONFIG_PACKING=y
|
CONFIG_PACKING=y
|
||||||
CONFIG_BITREVERSE=y
|
CONFIG_BITREVERSE=y
|
||||||
@@ -11408,7 +11392,6 @@ CONFIG_RUNTIME_TESTING_MENU=y
|
|||||||
# CONFIG_INTERVAL_TREE_TEST is not set
|
# CONFIG_INTERVAL_TREE_TEST is not set
|
||||||
# CONFIG_PERCPU_TEST is not set
|
# CONFIG_PERCPU_TEST is not set
|
||||||
# CONFIG_ATOMIC64_SELFTEST is not set
|
# CONFIG_ATOMIC64_SELFTEST is not set
|
||||||
# CONFIG_ASYNC_RAID6_TEST is not set
|
|
||||||
# CONFIG_TEST_HEXDUMP is not set
|
# CONFIG_TEST_HEXDUMP is not set
|
||||||
# CONFIG_TEST_KSTRTOX is not set
|
# CONFIG_TEST_KSTRTOX is not set
|
||||||
# CONFIG_TEST_PRINTF is not set
|
# CONFIG_TEST_PRINTF is not set
|
||||||
|
|||||||
@@ -7,10 +7,10 @@ func (t Toolchain) newKmod() (pkg.Artifact, string) {
|
|||||||
version = "34.2"
|
version = "34.2"
|
||||||
checksum = "0K7POeTKxMhExsaTsnKAC6LUNsRSfe6sSZxWONPbOu-GI_pXOw3toU_BIoqfBhJV"
|
checksum = "0K7POeTKxMhExsaTsnKAC6LUNsRSfe6sSZxWONPbOu-GI_pXOw3toU_BIoqfBhJV"
|
||||||
)
|
)
|
||||||
return t.NewPackage("kmod", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("kmod", version, newTar(
|
||||||
nil, "https://www.kernel.org/pub/linux/utils/kernel/"+
|
"https://www.kernel.org/pub/linux/utils/kernel/"+
|
||||||
"kmod/kmod-"+version+".tar.gz",
|
"kmod/kmod-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, &MesonHelper{
|
), nil, &MesonHelper{
|
||||||
Setup: []KV{
|
Setup: []KV{
|
||||||
|
|||||||
@@ -7,10 +7,9 @@ func (t Toolchain) newLibmd() (pkg.Artifact, string) {
|
|||||||
version = "1.1.0"
|
version = "1.1.0"
|
||||||
checksum = "9apYqPPZm0j5HQT8sCsVIhnVIqRD7XgN7kPIaTwTqnTuUq5waUAMq4M7ev8CODJ1"
|
checksum = "9apYqPPZm0j5HQT8sCsVIhnVIqRD7XgN7kPIaTwTqnTuUq5waUAMq4M7ev8CODJ1"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libmd", version, t.NewViaGit(
|
return t.NewPackage("libmd", version, t.newTagRemote(
|
||||||
"https://git.hadrons.org/git/libmd.git",
|
"https://git.hadrons.org/git/libmd.git",
|
||||||
"refs/tags/"+version,
|
version, checksum,
|
||||||
mustDecode(checksum),
|
|
||||||
), nil, &MakeHelper{
|
), nil, &MakeHelper{
|
||||||
Generate: "echo '" + version + "' > .dist-version && ./autogen",
|
Generate: "echo '" + version + "' > .dist-version && ./autogen",
|
||||||
ScriptMakeEarly: `
|
ScriptMakeEarly: `
|
||||||
@@ -38,10 +37,9 @@ func (t Toolchain) newLibbsd() (pkg.Artifact, string) {
|
|||||||
version = "0.12.2"
|
version = "0.12.2"
|
||||||
checksum = "NVS0xFLTwSP8JiElEftsZ-e1_C-IgJhHrHE77RwKt5178M7r087waO-zYx2_dfGX"
|
checksum = "NVS0xFLTwSP8JiElEftsZ-e1_C-IgJhHrHE77RwKt5178M7r087waO-zYx2_dfGX"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libbsd", version, t.NewViaGit(
|
return t.NewPackage("libbsd", version, t.newTagRemote(
|
||||||
"https://gitlab.freedesktop.org/libbsd/libbsd.git",
|
"https://gitlab.freedesktop.org/libbsd/libbsd.git",
|
||||||
"refs/tags/"+version,
|
version, checksum,
|
||||||
mustDecode(checksum),
|
|
||||||
), nil, &MakeHelper{
|
), nil, &MakeHelper{
|
||||||
Generate: "echo '" + version + "' > .dist-version && ./autogen",
|
Generate: "echo '" + version + "' > .dist-version && ./autogen",
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -7,10 +7,10 @@ func (t Toolchain) newLibcap() (pkg.Artifact, string) {
|
|||||||
version = "2.78"
|
version = "2.78"
|
||||||
checksum = "wFdUkBhFMD9InPnrBZyegWrlPSAg_9JiTBC-eSFyWWlmbzL2qjh2mKxr9Kx2a8ut"
|
checksum = "wFdUkBhFMD9InPnrBZyegWrlPSAg_9JiTBC-eSFyWWlmbzL2qjh2mKxr9Kx2a8ut"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libcap", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("libcap", version, newTar(
|
||||||
nil, "https://git.kernel.org/pub/scm/libs/libcap/libcap.git/"+
|
"https://git.kernel.org/pub/scm/libs/libcap/libcap.git/"+
|
||||||
"snapshot/libcap-"+version+".tar.gz",
|
"snapshot/libcap-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
// uses source tree as scratch space
|
// uses source tree as scratch space
|
||||||
|
|||||||
@@ -7,9 +7,9 @@ func (t Toolchain) newLibev() (pkg.Artifact, string) {
|
|||||||
version = "4.33"
|
version = "4.33"
|
||||||
checksum = "774eSXV_4k8PySRprUDChbEwsw-kzjIFnJ3MpNOl5zDpamBRvC3BqPyRxvkwcL6_"
|
checksum = "774eSXV_4k8PySRprUDChbEwsw-kzjIFnJ3MpNOl5zDpamBRvC3BqPyRxvkwcL6_"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libev", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("libev", version, newTar(
|
||||||
nil, "https://dist.schmorp.de/libev/Attic/libev-"+version+".tar.gz",
|
"https://dist.schmorp.de/libev/Attic/libev-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, (*MakeHelper)(nil)), version
|
), nil, (*MakeHelper)(nil)), version
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,14 +8,14 @@ import (
|
|||||||
|
|
||||||
func (t Toolchain) newLibexpat() (pkg.Artifact, string) {
|
func (t Toolchain) newLibexpat() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "2.7.5"
|
version = "2.8.0"
|
||||||
checksum = "vTRUjjg-qbHSXUBYKXgzVHkUO7UNyuhrkSYrE7ikApQm0g-OvQ8tspw4w55M-1Tp"
|
checksum = "pnwZ_JSif-OfoWIwk2JYXWHagOWMA3Sh-Ea0p-4Rz9U9mDEeAebhyvnfD7OYOMCk"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libexpat", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("libexpat", version, newFromGitHubRelease(
|
||||||
nil, "https://github.com/libexpat/libexpat/releases/download/"+
|
"libexpat/libexpat",
|
||||||
"R_"+strings.ReplaceAll(version, ".", "_")+"/"+
|
"R_"+strings.ReplaceAll(version, ".", "_"),
|
||||||
"expat-"+version+".tar.bz2",
|
"expat-"+version+".tar.bz2",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarBzip2,
|
pkg.TarBzip2,
|
||||||
), nil, (*MakeHelper)(nil),
|
), nil, (*MakeHelper)(nil),
|
||||||
Bash,
|
Bash,
|
||||||
|
|||||||
@@ -7,10 +7,11 @@ func (t Toolchain) newLibffi() (pkg.Artifact, string) {
|
|||||||
version = "3.5.2"
|
version = "3.5.2"
|
||||||
checksum = "2_Q-ZNBBbVhltfL5zEr0wljxPegUimTK4VeMSiwJEGksls3n4gj3lV0Ly3vviSFH"
|
checksum = "2_Q-ZNBBbVhltfL5zEr0wljxPegUimTK4VeMSiwJEGksls3n4gj3lV0Ly3vviSFH"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libffi", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("libffi", version, newFromGitHubRelease(
|
||||||
nil, "https://github.com/libffi/libffi/releases/download/"+
|
"libffi/libffi",
|
||||||
"v"+version+"/libffi-"+version+".tar.gz",
|
"v"+version,
|
||||||
mustDecode(checksum),
|
"libffi-"+version+".tar.gz",
|
||||||
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, (*MakeHelper)(nil),
|
), nil, (*MakeHelper)(nil),
|
||||||
KernelHeaders,
|
KernelHeaders,
|
||||||
|
|||||||
@@ -7,10 +7,10 @@ func (t Toolchain) newLibgd() (pkg.Artifact, string) {
|
|||||||
version = "2.3.3"
|
version = "2.3.3"
|
||||||
checksum = "8T-sh1_FJT9K9aajgxzh8ot6vWIF-xxjcKAHvTak9MgGUcsFfzP8cAvvv44u2r36"
|
checksum = "8T-sh1_FJT9K9aajgxzh8ot6vWIF-xxjcKAHvTak9MgGUcsFfzP8cAvvv44u2r36"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libgd", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("libgd", version, newFromGitHubRelease(
|
||||||
nil, "https://github.com/libgd/libgd/releases/download/"+
|
"libgd/libgd",
|
||||||
"gd-"+version+"/libgd-"+version+".tar.gz",
|
"gd-"+version,
|
||||||
mustDecode(checksum),
|
"libgd-"+version+".tar.gz", checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
|
|||||||
@@ -7,10 +7,11 @@ func (t Toolchain) newLibpsl() (pkg.Artifact, string) {
|
|||||||
version = "0.21.5"
|
version = "0.21.5"
|
||||||
checksum = "XjfxSzh7peG2Vg4vJlL8z4JZJLcXqbuP6pLWkrGCmRxlnYUFTKNBqWGHCxEOlCad"
|
checksum = "XjfxSzh7peG2Vg4vJlL8z4JZJLcXqbuP6pLWkrGCmRxlnYUFTKNBqWGHCxEOlCad"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libpsl", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("libpsl", version, newFromGitHubRelease(
|
||||||
nil, "https://github.com/rockdaboot/libpsl/releases/download/"+
|
"rockdaboot/libpsl",
|
||||||
version+"/libpsl-"+version+".tar.gz",
|
version,
|
||||||
mustDecode(checksum),
|
"libpsl-"+version+".tar.gz",
|
||||||
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Writable: true,
|
Writable: true,
|
||||||
|
|||||||
@@ -7,10 +7,11 @@ func (t Toolchain) newLibseccomp() (pkg.Artifact, string) {
|
|||||||
version = "2.6.0"
|
version = "2.6.0"
|
||||||
checksum = "mMu-iR71guPjFbb31u-YexBaanKE_nYPjPux-vuBiPfS_0kbwJdfCGlkofaUm-EY"
|
checksum = "mMu-iR71guPjFbb31u-YexBaanKE_nYPjPux-vuBiPfS_0kbwJdfCGlkofaUm-EY"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libseccomp", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("libseccomp", version, newFromGitHubRelease(
|
||||||
nil, "https://github.com/seccomp/libseccomp/releases/download/"+
|
"seccomp/libseccomp",
|
||||||
"v"+version+"/libseccomp-"+version+".tar.gz",
|
"v"+version,
|
||||||
mustDecode(checksum),
|
"libseccomp-"+version+".tar.gz",
|
||||||
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
|
|||||||
@@ -7,11 +7,10 @@ func (t Toolchain) newLibucontext() (pkg.Artifact, string) {
|
|||||||
version = "1.5"
|
version = "1.5"
|
||||||
checksum = "Ggk7FMmDNBdCx1Z9PcNWWW6LSpjGYssn2vU0GK5BLXJYw7ZxZbA2m_eSgT9TFnIG"
|
checksum = "Ggk7FMmDNBdCx1Z9PcNWWW6LSpjGYssn2vU0GK5BLXJYw7ZxZbA2m_eSgT9TFnIG"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libucontext", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("libucontext", version, newFromGitHub(
|
||||||
nil, "https://github.com/kaniini/libucontext/archive/refs/tags/"+
|
"kaniini/libucontext",
|
||||||
"libucontext-"+version+".tar.gz",
|
"libucontext-"+version,
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
// uses source tree as scratch space
|
// uses source tree as scratch space
|
||||||
Writable: true,
|
Writable: true,
|
||||||
|
|||||||
@@ -4,13 +4,13 @@ import "hakurei.app/internal/pkg"
|
|||||||
|
|
||||||
func (t Toolchain) newLibxml2() (pkg.Artifact, string) {
|
func (t Toolchain) newLibxml2() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "2.15.2"
|
version = "2.15.3"
|
||||||
checksum = "zwQvCIBnjzUFY-inX5ckfNT3mIezsCRV55C_Iztde5OnRTB3u33lfO5h03g7DK_8"
|
checksum = "oJy74htGlEpf70KPvpW18fYJo0RQQkCXZRwqUz6NoXborS3HCq3Nm4gsyaSeNmUH"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libxml2", version, t.NewViaGit(
|
return t.NewPackage("libxml2", version, newFromGitLab(
|
||||||
"https://gitlab.gnome.org/GNOME/libxml2.git",
|
"gitlab.gnome.org",
|
||||||
"refs/tags/v"+version,
|
"GNOME/libxml2",
|
||||||
mustDecode(checksum),
|
"v"+version, checksum,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
// can't create shell.out: Read-only file system
|
// can't create shell.out: Read-only file system
|
||||||
Writable: true,
|
Writable: true,
|
||||||
|
|||||||
@@ -5,12 +5,12 @@ import "hakurei.app/internal/pkg"
|
|||||||
func (t Toolchain) newLibxslt() (pkg.Artifact, string) {
|
func (t Toolchain) newLibxslt() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "1.1.45"
|
version = "1.1.45"
|
||||||
checksum = "MZc_dyUWpHChkWDKa5iycrECxBsRd4ZMbYfL4VojTbung593mlH2tHGmxYB6NFYT"
|
checksum = "67ks7v8od2oWaEGf23Sst_Xbn_8brQyolQjqxPoO-lK35k_WJhi2Px5JJgbk-nfn"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libxslt", version, t.NewViaGit(
|
return t.NewPackage("libxslt", version, newFromGitLab(
|
||||||
"https://gitlab.gnome.org/GNOME/libxslt.git",
|
"gitlab.gnome.org",
|
||||||
"refs/tags/v"+version,
|
"GNOME/libxslt",
|
||||||
mustDecode(checksum),
|
"v"+version, checksum,
|
||||||
), nil, &MakeHelper{
|
), nil, &MakeHelper{
|
||||||
Generate: "NOCONFIGURE=1 ./autogen.sh",
|
Generate: "NOCONFIGURE=1 ./autogen.sh",
|
||||||
|
|
||||||
|
|||||||
@@ -1,202 +1,289 @@
|
|||||||
package rosa
|
package rosa
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
"slices"
|
"slices"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"hakurei.app/internal/pkg"
|
"hakurei.app/internal/pkg"
|
||||||
)
|
)
|
||||||
|
|
||||||
// llvmAttr holds the attributes that will be applied to a new [pkg.Artifact]
|
// litArgs returns [LIT] arguments for optional verbosity and check skipping.
|
||||||
// containing a LLVM variant.
|
func litArgs(verbose bool, skipChecks ...string) string {
|
||||||
type llvmAttr struct {
|
args := []string{"-sv"}
|
||||||
// Enabled projects and runtimes.
|
if verbose {
|
||||||
pr int
|
args[0] = "--verbose"
|
||||||
|
}
|
||||||
|
|
||||||
// Concatenated with default environment for PackageAttr.Env.
|
if len(skipChecks) > 0 {
|
||||||
env []string
|
skipChecks = slices.Clone(skipChecks)
|
||||||
// Concatenated with generated entries for CMakeHelper.Cache.
|
for i, s := range skipChecks {
|
||||||
cmake []KV
|
s = regexp.QuoteMeta(s)
|
||||||
// Override CMakeHelper.Append.
|
s = strings.ReplaceAll(s, "/", "\\/")
|
||||||
append []string
|
skipChecks[i] = s
|
||||||
// Passed through to PackageAttr.NonStage0.
|
}
|
||||||
nonStage0 []pkg.Artifact
|
args = append(args,
|
||||||
// Concatenated with default fixup for CMakeHelper.Script.
|
"--filter-out='\\''"+strings.Join(skipChecks, "|")+"'\\''")
|
||||||
script string
|
}
|
||||||
|
|
||||||
// Patch name and body pairs.
|
return "'" + strings.Join(args, " ") + "'"
|
||||||
patches []KV
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
func (t Toolchain) newEarlyCompilerRT() (pkg.Artifact, string) {
|
||||||
llvmProjectClang = 1 << iota
|
version := t.Version(llvmSource)
|
||||||
llvmProjectLld
|
major, _, _ := strings.Cut(version, ".")
|
||||||
|
return t.NewPackage("early-compiler-rt", version, t.Load(llvmSource), &PackageAttr{
|
||||||
|
Flag: TExclusive,
|
||||||
|
}, &CMakeHelper{
|
||||||
|
Append: []string{"compiler-rt"},
|
||||||
|
|
||||||
llvmProjectAll = 1<<iota - 1
|
Cache: []KV{
|
||||||
|
// libc++ not yet available
|
||||||
|
{"CMAKE_CXX_COMPILER_TARGET", ""},
|
||||||
|
|
||||||
llvmRuntimeCompilerRT = 1 << iota
|
{"LLVM_HOST_TRIPLE", `"${ROSA_TRIPLE}"`},
|
||||||
llvmRuntimeLibunwind
|
{"LLVM_DEFAULT_TARGET_TRIPLE", `"${ROSA_TRIPLE}"`},
|
||||||
llvmRuntimeLibc
|
{"LLVM_ENABLE_PER_TARGET_RUNTIME_DIR", "ON"},
|
||||||
llvmRuntimeLibcxx
|
|
||||||
llvmRuntimeLibcxxABI
|
|
||||||
|
|
||||||
llvmAll = 1<<iota - 1
|
{"COMPILER_RT_BUILD_BUILTINS", "ON"},
|
||||||
llvmRuntimeAll = llvmAll - (2 * llvmProjectAll) - 1
|
{"COMPILER_RT_DEFAULT_TARGET_ONLY", "OFF"},
|
||||||
)
|
{"COMPILER_RT_SANITIZERS_TO_BUILD", "asan"},
|
||||||
|
|
||||||
// llvmFlagName resolves a llvmAttr.flags project or runtime flag to its name.
|
// does not work without libunwind
|
||||||
func llvmFlagName(flag int) string {
|
{"COMPILER_RT_BUILD_CTX_PROFILE", "OFF"},
|
||||||
switch flag {
|
{"COMPILER_RT_BUILD_LIBFUZZER", "OFF"},
|
||||||
case llvmProjectClang:
|
{"COMPILER_RT_BUILD_MEMPROF", "OFF"},
|
||||||
return "clang"
|
{"COMPILER_RT_BUILD_PROFILE", "OFF"},
|
||||||
case llvmProjectLld:
|
{"COMPILER_RT_BUILD_XRAY", "OFF"},
|
||||||
return "lld"
|
},
|
||||||
|
SkipTest: true,
|
||||||
|
Script: `
|
||||||
|
mkdir -p "/work/system/lib/clang/` + major + `/lib/"
|
||||||
|
ln -s \
|
||||||
|
"../../../${ROSA_TRIPLE}" \
|
||||||
|
"/work/system/lib/clang/` + major + `/lib/"
|
||||||
|
|
||||||
case llvmRuntimeCompilerRT:
|
ln -s \
|
||||||
return "compiler-rt"
|
"clang_rt.crtbegin-` + linuxArch() + `.o" \
|
||||||
case llvmRuntimeLibunwind:
|
"/work/system/lib/${ROSA_TRIPLE}/crtbeginS.o"
|
||||||
return "libunwind"
|
ln -s \
|
||||||
case llvmRuntimeLibc:
|
"clang_rt.crtend-` + linuxArch() + `.o" \
|
||||||
return "libc"
|
"/work/system/lib/${ROSA_TRIPLE}/crtendS.o"
|
||||||
case llvmRuntimeLibcxx:
|
`,
|
||||||
return "libcxx"
|
},
|
||||||
case llvmRuntimeLibcxxABI:
|
Python,
|
||||||
return "libcxxabi"
|
|
||||||
|
|
||||||
default:
|
muslHeaders,
|
||||||
panic("invalid flag " + strconv.Itoa(flag))
|
KernelHeaders,
|
||||||
|
), version
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
artifactsM[earlyCompilerRT] = Metadata{
|
||||||
|
f: Toolchain.newEarlyCompilerRT,
|
||||||
|
|
||||||
|
Name: "early-compiler-rt",
|
||||||
|
Description: "early LLVM runtime: compiler-rt",
|
||||||
|
|
||||||
|
Dependencies: P{
|
||||||
|
Musl,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// newLLVMVariant returns a [pkg.Artifact] containing a LLVM variant.
|
func (t Toolchain) newEarlyRuntimes() (pkg.Artifact, string) {
|
||||||
func (t Toolchain) newLLVMVariant(variant string, attr *llvmAttr) pkg.Artifact {
|
version := t.Version(llvmSource)
|
||||||
if attr == nil {
|
return t.NewPackage("early-runtimes", version, t.Load(llvmSource), &PackageAttr{
|
||||||
panic("LLVM attr must be non-nil")
|
Flag: TExclusive,
|
||||||
}
|
}, &CMakeHelper{
|
||||||
|
Append: []string{"runtimes"},
|
||||||
|
|
||||||
var projects, runtimes []string
|
Cache: []KV{
|
||||||
for i := 1; i < llvmProjectAll; i <<= 1 {
|
// libc++ not yet available
|
||||||
if attr.pr&i != 0 {
|
{"CMAKE_CXX_COMPILER_WORKS", "ON"},
|
||||||
projects = append(projects, llvmFlagName(i))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i := (llvmProjectAll + 1) << 1; i < llvmRuntimeAll; i <<= 1 {
|
|
||||||
if attr.pr&i != 0 {
|
|
||||||
runtimes = append(runtimes, llvmFlagName(i))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var script string
|
{"LLVM_HOST_TRIPLE", `"${ROSA_TRIPLE}"`},
|
||||||
|
{"LLVM_DEFAULT_TARGET_TRIPLE", `"${ROSA_TRIPLE}"`},
|
||||||
|
{"LLVM_ENABLE_RUNTIMES", "'libunwind;libcxx;libcxxabi'"},
|
||||||
|
|
||||||
|
{"LIBUNWIND_USE_COMPILER_RT", "ON"},
|
||||||
|
{"LIBCXX_HAS_MUSL_LIBC", "ON"},
|
||||||
|
{"LIBCXX_USE_COMPILER_RT", "ON"},
|
||||||
|
{"LIBCXX_HAS_ATOMIC_LIB", "OFF"},
|
||||||
|
{"LIBCXXABI_USE_COMPILER_RT", "ON"},
|
||||||
|
{"LIBCXXABI_USE_LLVM_UNWINDER", "ON"},
|
||||||
|
{"LIBCXXABI_HAS_CXA_THREAD_ATEXIT_IMPL", "OFF"},
|
||||||
|
|
||||||
|
{"LLVM_ENABLE_ZLIB", "FORCE_ON"},
|
||||||
|
{"LLVM_ENABLE_ZSTD", "FORCE_ON"},
|
||||||
|
{"LLVM_ENABLE_LIBXML2", "OFF"},
|
||||||
|
},
|
||||||
|
SkipTest: true,
|
||||||
|
},
|
||||||
|
Python,
|
||||||
|
|
||||||
|
Zlib,
|
||||||
|
Zstd,
|
||||||
|
earlyCompilerRT,
|
||||||
|
KernelHeaders,
|
||||||
|
), version
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
artifactsM[earlyRuntimes] = Metadata{
|
||||||
|
f: Toolchain.newEarlyRuntimes,
|
||||||
|
|
||||||
|
Name: "early-runtimes",
|
||||||
|
Description: "early LLVM runtimes: libunwind, libcxx, libcxxabi",
|
||||||
|
|
||||||
|
Dependencies: P{
|
||||||
|
earlyCompilerRT,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Toolchain) newLLVM() (pkg.Artifact, string) {
|
||||||
|
var early PArtifact = muslHeaders
|
||||||
|
if t.isStage0() {
|
||||||
|
// The LLVM build system is buggy around LLVM_LINK_LLVM_DYLIB and leaks
|
||||||
|
// the system installation when invoking the newly built toolchain. This
|
||||||
|
// is worked around in stage0 by providing standalone builds of
|
||||||
|
// runtimes. Later stages rely on 3-stage determinism and allows the
|
||||||
|
// system installation from its previous stage to leak through.
|
||||||
|
early = earlyRuntimes
|
||||||
|
}
|
||||||
|
|
||||||
cache := []KV{
|
cache := []KV{
|
||||||
{"CMAKE_BUILD_TYPE", "Release"},
|
{"ENABLE_LINKER_BUILD_ID", "ON"},
|
||||||
|
{"COMPILER_RT_USE_BUILTINS_LIBRARY", "ON"},
|
||||||
|
{"COMPILER_RT_DEFAULT_TARGET_ONLY", "ON"},
|
||||||
|
{"COMPILER_RT_BUILD_GWP_ASAN", "OFF"},
|
||||||
|
{"LIBCXX_CXX_ABI", "libcxxabi"},
|
||||||
|
{"LIBCXX_USE_COMPILER_RT", "ON"},
|
||||||
|
{"LIBCXX_ENABLE_STATIC_ABI_LIBRARY", "OFF"},
|
||||||
|
{"LIBCXX_HAS_MUSL_LIBC", "ON"},
|
||||||
|
{"LIBCXX_HARDENING_MODE", "fast"},
|
||||||
|
{"LIBCXXABI_USE_LLVM_UNWINDER", "ON"},
|
||||||
|
{"LIBCXXABI_ENABLE_STATIC_UNWINDER", "OFF"},
|
||||||
|
{"LIBCXXABI_USE_COMPILER_RT", "ON"},
|
||||||
|
{"LLVM_INSTALL_BINUTILS_SYMLINKS", "ON"},
|
||||||
|
{"LLVM_INSTALL_UTILS", "ON"},
|
||||||
|
{"LLVM_BUILD_LLVM_DYLIB", "ON"},
|
||||||
|
{"LLVM_LINK_LLVM_DYLIB", "ON"},
|
||||||
|
{"LLVM_APPEND_VC_REV", "OFF"},
|
||||||
|
{"LLVM_ENABLE_RTTI", "ON"},
|
||||||
|
{"LLVM_ENABLE_ZLIB", "FORCE_ON"},
|
||||||
|
{"LLVM_ENABLE_ZSTD", "FORCE_ON"},
|
||||||
|
{"LLVM_ENABLE_PER_TARGET_RUNTIME_DIR", "ON"},
|
||||||
|
{"CLANG_DEFAULT_RTLIB", "compiler-rt"},
|
||||||
|
{"CLANG_DEFAULT_UNWINDLIB", "libunwind"},
|
||||||
|
{"CLANG_DEFAULT_CXX_STDLIB", "libc++"},
|
||||||
|
{"CLANG_CONFIG_FILE_SYSTEM_DIR", "/system/etc/clang"},
|
||||||
|
{"LLVM_ENABLE_FFI", "OFF"},
|
||||||
|
{"LLVM_ENABLE_LIBXML2", "OFF"},
|
||||||
|
{"LLVM_ENABLE_LIBCXX", "ON"},
|
||||||
|
{"LLVM_ENABLE_LLD", "ON"},
|
||||||
|
{"LIBUNWIND_ENABLE_ASSERTIONS", "OFF"},
|
||||||
|
{"LIBUNWIND_USE_COMPILER_RT", "ON"},
|
||||||
|
|
||||||
{"LLVM_HOST_TRIPLE", `"${ROSA_TRIPLE}"`},
|
{"LLVM_HOST_TRIPLE", `"${ROSA_TRIPLE}"`},
|
||||||
{"LLVM_DEFAULT_TARGET_TRIPLE", `"${ROSA_TRIPLE}"`},
|
{"LLVM_DEFAULT_TARGET_TRIPLE", `"${ROSA_TRIPLE}"`},
|
||||||
}
|
{"LLVM_ENABLE_PROJECTS", "'" + strings.Join([]string{
|
||||||
if len(projects) > 0 {
|
"clang",
|
||||||
cache = append(cache, []KV{
|
"lld",
|
||||||
{"LLVM_ENABLE_PROJECTS", `"${ROSA_LLVM_PROJECTS}"`},
|
}, ";") + "'"},
|
||||||
}...)
|
{"LLVM_ENABLE_RUNTIMES", "'" + strings.Join([]string{
|
||||||
}
|
"compiler-rt",
|
||||||
if len(runtimes) > 0 {
|
"libcxx",
|
||||||
cache = append(cache, []KV{
|
"libcxxabi",
|
||||||
{"LLVM_ENABLE_RUNTIMES", `"${ROSA_LLVM_RUNTIMES}"`},
|
"libunwind",
|
||||||
}...)
|
"libclc",
|
||||||
|
}, ";") + "'"},
|
||||||
}
|
}
|
||||||
|
|
||||||
cmakeAppend := []string{"llvm"}
|
if !t.isStage0() {
|
||||||
if attr.append != nil {
|
skipChecks := []string{
|
||||||
cmakeAppend = attr.append
|
// expensive, pointless to run here
|
||||||
} else {
|
"benchmarks",
|
||||||
cache = append(cache, []KV{
|
// LLVM ERROR: Tried to execute an unknown external function: roundevenf
|
||||||
{"LLVM_ENABLE_LIBCXX", "ON"},
|
"ExecutionEngine/Interpreter/intrinsics.ll",
|
||||||
{"LLVM_USE_LINKER", "lld"},
|
// clang: deadlocks with LLVM_BUILD_LLVM_DYLIB
|
||||||
|
"crash-recovery-modules",
|
||||||
|
// clang: fatal error: '__config_site' file not found
|
||||||
|
"CodeGen/PowerPC/ppc-xmmintrin.c",
|
||||||
|
"CodeGen/PowerPC/ppc-mmintrin.c",
|
||||||
|
"CodeGen/PowerPC/ppc-emmintrin.c",
|
||||||
|
"CodeGen/PowerPC/ppc-pmmintrin.c",
|
||||||
|
"CodeGen/PowerPC/ppc-tmmintrin.c",
|
||||||
|
"CodeGen/PowerPC/ppc-smmintrin.c",
|
||||||
|
"CodeGenCUDA/amdgpu-alias-undef-symbols.cu",
|
||||||
|
// cxx: fails on musl
|
||||||
|
"close.dont-get-rid-of-buffer",
|
||||||
|
"re/re.traits",
|
||||||
|
"std/time",
|
||||||
|
"localization/locales",
|
||||||
|
"localization/locale.categories",
|
||||||
|
"selftest/dsl/dsl.sh.py",
|
||||||
|
"input.output/iostream.format",
|
||||||
|
"locale-specific_form",
|
||||||
|
// cxx: deadlocks
|
||||||
|
"std/thread/thread.jthread",
|
||||||
|
// unwind: fails on musl
|
||||||
|
"eh_frame_fde_pc_range",
|
||||||
|
}
|
||||||
|
switch runtime.GOARCH {
|
||||||
|
case "arm64":
|
||||||
|
skipChecks = append(skipChecks,
|
||||||
|
// LLVM: intermittently crashes
|
||||||
|
"ExecutionEngine/OrcLazy/multiple-compile-threads-basic.ll",
|
||||||
|
// unwind: unexpectedly passes
|
||||||
|
"unwind_leaffunction",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
{"LLVM_INSTALL_BINUTILS_SYMLINKS", "ON"},
|
if presetOpts&OptLLVMNoLTO == 0 {
|
||||||
{"LLVM_INSTALL_CCTOOLS_SYMLINKS", "ON"},
|
|
||||||
|
|
||||||
{"LLVM_LIT_ARGS", "'--verbose'"},
|
|
||||||
}...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if attr.pr&llvmProjectClang != 0 {
|
|
||||||
cache = append(cache, []KV{
|
|
||||||
{"CLANG_DEFAULT_LINKER", "lld"},
|
|
||||||
{"CLANG_DEFAULT_CXX_STDLIB", "libc++"},
|
|
||||||
{"CLANG_DEFAULT_RTLIB", "compiler-rt"},
|
|
||||||
{"CLANG_DEFAULT_UNWINDLIB", "libunwind"},
|
|
||||||
}...)
|
|
||||||
}
|
|
||||||
if attr.pr&llvmProjectLld != 0 {
|
|
||||||
script += `
|
|
||||||
ln -s ld.lld /work/system/bin/ld
|
|
||||||
`
|
|
||||||
}
|
|
||||||
if attr.pr&llvmRuntimeCompilerRT != 0 {
|
|
||||||
if attr.append == nil {
|
|
||||||
cache = append(cache, []KV{
|
cache = append(cache, []KV{
|
||||||
{"COMPILER_RT_USE_LLVM_UNWINDER", "ON"},
|
// very expensive
|
||||||
|
{"LLVM_ENABLE_LTO", "Thin"},
|
||||||
}...)
|
}...)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if attr.pr&llvmRuntimeLibunwind != 0 {
|
|
||||||
cache = append(cache, []KV{
|
cache = append(cache, []KV{
|
||||||
{"LIBUNWIND_USE_COMPILER_RT", "ON"},
|
// symbols: clock_gettime, mallopt
|
||||||
}...)
|
{"COMPILER_RT_INCLUDE_TESTS", "OFF"},
|
||||||
}
|
|
||||||
if attr.pr&llvmRuntimeLibcxx != 0 {
|
{"LLVM_BUILD_TESTS", "ON"},
|
||||||
cache = append(cache, []KV{
|
{"LLVM_LIT_ARGS", litArgs(true, skipChecks...)},
|
||||||
{"LIBCXX_HAS_MUSL_LIBC", "ON"},
|
|
||||||
{"LIBCXX_USE_COMPILER_RT", "ON"},
|
|
||||||
}...)
|
|
||||||
}
|
|
||||||
if attr.pr&llvmRuntimeLibcxxABI != 0 {
|
|
||||||
cache = append(cache, []KV{
|
|
||||||
{"LIBCXXABI_USE_COMPILER_RT", "ON"},
|
|
||||||
{"LIBCXXABI_USE_LLVM_UNWINDER", "ON"},
|
|
||||||
}...)
|
}...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return t.NewPackage("llvm", llvmVersion, pkg.NewHTTPGetTar(
|
version := t.Version(llvmSource)
|
||||||
nil, "https://github.com/llvm/llvm-project/archive/refs/tags/"+
|
return t.NewPackage("llvm", version, t.Load(llvmSource), nil, &CMakeHelper{
|
||||||
"llvmorg-"+llvmVersion+".tar.gz",
|
Append: []string{"llvm"},
|
||||||
mustDecode(llvmChecksum),
|
|
||||||
pkg.TarGzip,
|
|
||||||
), &PackageAttr{
|
|
||||||
Patches: slices.Concat(attr.patches, []KV{
|
|
||||||
{"increase-stack-size-unconditional", `diff --git a/llvm/lib/Support/Threading.cpp b/llvm/lib/Support/Threading.cpp
|
|
||||||
index 9da357a7ebb9..b2931510c1ae 100644
|
|
||||||
--- a/llvm/lib/Support/Threading.cpp
|
|
||||||
+++ b/llvm/lib/Support/Threading.cpp
|
|
||||||
@@ -80,7 +80,7 @@ unsigned llvm::ThreadPoolStrategy::compute_thread_count() const {
|
|
||||||
// keyword.
|
|
||||||
#include "llvm/Support/thread.h"
|
|
||||||
|
|
||||||
-#if defined(__APPLE__)
|
|
||||||
+#if defined(__APPLE__) || 1
|
|
||||||
// Darwin's default stack size for threads except the main one is only 512KB,
|
|
||||||
// which is not enough for some/many normal LLVM compilations. This implements
|
|
||||||
// the same interface as std::thread but requests the same stack size as the
|
|
||||||
`},
|
|
||||||
}),
|
|
||||||
NonStage0: attr.nonStage0,
|
|
||||||
|
|
||||||
Env: slices.Concat([]string{
|
Cache: cache,
|
||||||
"ROSA_LLVM_PROJECTS=" + strings.Join(projects, ";"),
|
Script: `
|
||||||
"ROSA_LLVM_RUNTIMES=" + strings.Join(runtimes, ";"),
|
ln -s ld.lld /work/system/bin/ld
|
||||||
}, attr.env),
|
ln -s clang /work/system/bin/cc
|
||||||
|
ln -s clang /work/system/bin/cpp
|
||||||
|
ln -s clang++ /work/system/bin/c++
|
||||||
|
`,
|
||||||
|
|
||||||
Flag: TExclusive,
|
// LLVM_LINK_LLVM_DYLIB causes llvm test suite to leak system
|
||||||
}, &CMakeHelper{
|
// installation into test environment, and the tests end up testing the
|
||||||
Variant: variant,
|
// system installation instead. Tests are disabled on stage0 and relies
|
||||||
|
// on 3-stage determinism to test later stages.
|
||||||
|
SkipTest: t.isStage0(),
|
||||||
|
|
||||||
Cache: slices.Concat(cache, attr.cmake),
|
Test: `
|
||||||
Append: cmakeAppend,
|
chmod +w /bin && ln -s \
|
||||||
Script: script + attr.script,
|
../system/bin/chmod \
|
||||||
|
../system/bin/mkdir \
|
||||||
|
../system/bin/rm \
|
||||||
|
../system/bin/tr \
|
||||||
|
../system/bin/awk \
|
||||||
|
/bin
|
||||||
|
ninja ` + jobsFlagE + ` check-all
|
||||||
|
`,
|
||||||
},
|
},
|
||||||
Python,
|
Python,
|
||||||
Perl,
|
Perl,
|
||||||
@@ -206,354 +293,44 @@ index 9da357a7ebb9..b2931510c1ae 100644
|
|||||||
Coreutils,
|
Coreutils,
|
||||||
Findutils,
|
Findutils,
|
||||||
|
|
||||||
|
Zlib,
|
||||||
|
Zstd,
|
||||||
|
early,
|
||||||
KernelHeaders,
|
KernelHeaders,
|
||||||
)
|
), version
|
||||||
}
|
|
||||||
|
|
||||||
// newLLVM returns LLVM toolchain across multiple [pkg.Artifact].
|
|
||||||
func (t Toolchain) newLLVM() (musl, compilerRT, runtimes, clang pkg.Artifact) {
|
|
||||||
target := "'AArch64;RISCV;X86'"
|
|
||||||
if t.isStage0() {
|
|
||||||
switch runtime.GOARCH {
|
|
||||||
case "386", "amd64":
|
|
||||||
target = "X86"
|
|
||||||
case "arm64":
|
|
||||||
target = "AArch64"
|
|
||||||
case "riscv64":
|
|
||||||
target = "RISCV"
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic("unsupported target " + runtime.GOARCH)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
minimalDeps := []KV{
|
|
||||||
{"LLVM_ENABLE_ZLIB", "OFF"},
|
|
||||||
{"LLVM_ENABLE_ZSTD", "OFF"},
|
|
||||||
{"LLVM_ENABLE_LIBXML2", "OFF"},
|
|
||||||
}
|
|
||||||
|
|
||||||
muslHeaders, _ := t.newMusl(true, []string{
|
|
||||||
"CC=clang",
|
|
||||||
})
|
|
||||||
|
|
||||||
compilerRT = t.newLLVMVariant("compiler-rt", &llvmAttr{
|
|
||||||
env: stage0ExclConcat(t, []string{},
|
|
||||||
"LDFLAGS="+earlyLDFLAGS(false),
|
|
||||||
),
|
|
||||||
cmake: []KV{
|
|
||||||
// libc++ not yet available
|
|
||||||
{"CMAKE_CXX_COMPILER_TARGET", ""},
|
|
||||||
|
|
||||||
{"COMPILER_RT_BUILD_BUILTINS", "ON"},
|
|
||||||
{"COMPILER_RT_DEFAULT_TARGET_ONLY", "OFF"},
|
|
||||||
{"COMPILER_RT_SANITIZERS_TO_BUILD", "asan"},
|
|
||||||
{"LLVM_ENABLE_PER_TARGET_RUNTIME_DIR", "ON"},
|
|
||||||
|
|
||||||
// does not work without libunwind
|
|
||||||
{"COMPILER_RT_BUILD_CTX_PROFILE", "OFF"},
|
|
||||||
{"COMPILER_RT_BUILD_LIBFUZZER", "OFF"},
|
|
||||||
{"COMPILER_RT_BUILD_MEMPROF", "OFF"},
|
|
||||||
{"COMPILER_RT_BUILD_PROFILE", "OFF"},
|
|
||||||
{"COMPILER_RT_BUILD_XRAY", "OFF"},
|
|
||||||
},
|
|
||||||
append: []string{"compiler-rt"},
|
|
||||||
nonStage0: []pkg.Artifact{
|
|
||||||
muslHeaders,
|
|
||||||
},
|
|
||||||
script: `
|
|
||||||
mkdir -p "/work/system/lib/clang/` + llvmVersionMajor + `/lib/"
|
|
||||||
ln -s \
|
|
||||||
"../../../${ROSA_TRIPLE}" \
|
|
||||||
"/work/system/lib/clang/` + llvmVersionMajor + `/lib/"
|
|
||||||
|
|
||||||
ln -s \
|
|
||||||
"clang_rt.crtbegin-` + linuxArch() + `.o" \
|
|
||||||
"/work/system/lib/${ROSA_TRIPLE}/crtbeginS.o"
|
|
||||||
ln -s \
|
|
||||||
"clang_rt.crtend-` + linuxArch() + `.o" \
|
|
||||||
"/work/system/lib/${ROSA_TRIPLE}/crtendS.o"
|
|
||||||
`,
|
|
||||||
})
|
|
||||||
|
|
||||||
musl, _ = t.newMusl(false, stage0ExclConcat(t, []string{
|
|
||||||
"CC=clang",
|
|
||||||
"LIBCC=/system/lib/clang/" + llvmVersionMajor + "/lib/" +
|
|
||||||
triplet() + "/libclang_rt.builtins.a",
|
|
||||||
"AR=ar",
|
|
||||||
"RANLIB=ranlib",
|
|
||||||
},
|
|
||||||
"LDFLAGS="+earlyLDFLAGS(false),
|
|
||||||
), compilerRT)
|
|
||||||
|
|
||||||
runtimes = t.newLLVMVariant("runtimes", &llvmAttr{
|
|
||||||
env: stage0ExclConcat(t, []string{},
|
|
||||||
"LDFLAGS="+earlyLDFLAGS(false),
|
|
||||||
),
|
|
||||||
pr: llvmRuntimeLibunwind | llvmRuntimeLibcxx | llvmRuntimeLibcxxABI,
|
|
||||||
cmake: slices.Concat([]KV{
|
|
||||||
// libc++ not yet available
|
|
||||||
{"CMAKE_CXX_COMPILER_WORKS", "ON"},
|
|
||||||
|
|
||||||
{"LIBCXX_HAS_ATOMIC_LIB", "OFF"},
|
|
||||||
{"LIBCXXABI_HAS_CXA_THREAD_ATEXIT_IMPL", "OFF"},
|
|
||||||
}, minimalDeps),
|
|
||||||
append: []string{"runtimes"},
|
|
||||||
nonStage0: []pkg.Artifact{
|
|
||||||
compilerRT,
|
|
||||||
musl,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
clang = t.newLLVMVariant("clang", &llvmAttr{
|
|
||||||
pr: llvmProjectClang | llvmProjectLld,
|
|
||||||
env: stage0ExclConcat(t, []string{},
|
|
||||||
"CFLAGS="+earlyCFLAGS,
|
|
||||||
"CXXFLAGS="+earlyCXXFLAGS(),
|
|
||||||
"LDFLAGS="+earlyLDFLAGS(false),
|
|
||||||
),
|
|
||||||
cmake: slices.Concat([]KV{
|
|
||||||
{"LLVM_TARGETS_TO_BUILD", target},
|
|
||||||
{"CMAKE_CROSSCOMPILING", "OFF"},
|
|
||||||
{"CXX_SUPPORTS_CUSTOM_LINKER", "ON"},
|
|
||||||
}, minimalDeps),
|
|
||||||
nonStage0: []pkg.Artifact{
|
|
||||||
musl,
|
|
||||||
compilerRT,
|
|
||||||
runtimes,
|
|
||||||
},
|
|
||||||
script: `
|
|
||||||
ln -s clang /work/system/bin/cc
|
|
||||||
ln -s clang++ /work/system/bin/c++
|
|
||||||
|
|
||||||
ninja check-all
|
|
||||||
`,
|
|
||||||
|
|
||||||
patches: []KV{
|
|
||||||
{"add-rosa-vendor", `diff --git a/llvm/include/llvm/TargetParser/Triple.h b/llvm/include/llvm/TargetParser/Triple.h
|
|
||||||
index 9c83abeeb3b1..5acfe5836a23 100644
|
|
||||||
--- a/llvm/include/llvm/TargetParser/Triple.h
|
|
||||||
+++ b/llvm/include/llvm/TargetParser/Triple.h
|
|
||||||
@@ -190,6 +190,7 @@ public:
|
|
||||||
|
|
||||||
Apple,
|
|
||||||
PC,
|
|
||||||
+ Rosa,
|
|
||||||
SCEI,
|
|
||||||
Freescale,
|
|
||||||
IBM,
|
|
||||||
diff --git a/llvm/lib/TargetParser/Triple.cpp b/llvm/lib/TargetParser/Triple.cpp
|
|
||||||
index a4f9dd42c0fe..cb5a12387034 100644
|
|
||||||
--- a/llvm/lib/TargetParser/Triple.cpp
|
|
||||||
+++ b/llvm/lib/TargetParser/Triple.cpp
|
|
||||||
@@ -279,6 +279,7 @@ StringRef Triple::getVendorTypeName(VendorType Kind) {
|
|
||||||
case NVIDIA: return "nvidia";
|
|
||||||
case OpenEmbedded: return "oe";
|
|
||||||
case PC: return "pc";
|
|
||||||
+ case Rosa: return "rosa";
|
|
||||||
case SCEI: return "scei";
|
|
||||||
case SUSE: return "suse";
|
|
||||||
case Meta:
|
|
||||||
@@ -689,6 +690,7 @@ static Triple::VendorType parseVendor(StringRef VendorName) {
|
|
||||||
return StringSwitch<Triple::VendorType>(VendorName)
|
|
||||||
.Case("apple", Triple::Apple)
|
|
||||||
.Case("pc", Triple::PC)
|
|
||||||
+ .Case("rosa", Triple::Rosa)
|
|
||||||
.Case("scei", Triple::SCEI)
|
|
||||||
.Case("sie", Triple::SCEI)
|
|
||||||
.Case("fsl", Triple::Freescale)
|
|
||||||
`},
|
|
||||||
|
|
||||||
{"xfail-broken-tests", `diff --git a/clang/test/Modules/timestamps.c b/clang/test/Modules/timestamps.c
|
|
||||||
index 50fdce630255..4b4465a75617 100644
|
|
||||||
--- a/clang/test/Modules/timestamps.c
|
|
||||||
+++ b/clang/test/Modules/timestamps.c
|
|
||||||
@@ -1,3 +1,5 @@
|
|
||||||
+// XFAIL: target={{.*-rosa-linux-musl}}
|
|
||||||
+
|
|
||||||
/// Verify timestamps that gets embedded in the module
|
|
||||||
#include <c-header.h>
|
|
||||||
|
|
||||||
`},
|
|
||||||
|
|
||||||
{"path-system-include", `diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
|
|
||||||
index 8ac8d4eb9181..e46b04a898ca 100644
|
|
||||||
--- a/clang/lib/Driver/ToolChains/Linux.cpp
|
|
||||||
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
|
|
||||||
@@ -671,6 +671,12 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
|
|
||||||
addExternCSystemInclude(
|
|
||||||
DriverArgs, CC1Args,
|
|
||||||
concat(SysRoot, "/usr/include", MultiarchIncludeDir));
|
|
||||||
+ if (!MultiarchIncludeDir.empty() &&
|
|
||||||
+ D.getVFS().exists(concat(SysRoot, "/system/include", MultiarchIncludeDir)))
|
|
||||||
+ addExternCSystemInclude(
|
|
||||||
+ DriverArgs, CC1Args,
|
|
||||||
+ concat(SysRoot, "/system/include", MultiarchIncludeDir));
|
|
||||||
+
|
|
||||||
|
|
||||||
if (getTriple().getOS() == llvm::Triple::RTEMS)
|
|
||||||
return;
|
|
||||||
@@ -681,6 +687,7 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
|
|
||||||
addExternCSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/include"));
|
|
||||||
|
|
||||||
addExternCSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/usr/include"));
|
|
||||||
+ addExternCSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/system/include"));
|
|
||||||
|
|
||||||
if (!DriverArgs.hasArg(options::OPT_nobuiltininc) && getTriple().isMusl())
|
|
||||||
addSystemInclude(DriverArgs, CC1Args, ResourceDirInclude);
|
|
||||||
`},
|
|
||||||
|
|
||||||
{"path-system-libraries", `diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
|
|
||||||
index 8ac8d4eb9181..f4d1347ab64d 100644
|
|
||||||
--- a/clang/lib/Driver/ToolChains/Linux.cpp
|
|
||||||
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
|
|
||||||
@@ -282,6 +282,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
|
|
||||||
const bool IsHexagon = Arch == llvm::Triple::hexagon;
|
|
||||||
const bool IsRISCV = Triple.isRISCV();
|
|
||||||
const bool IsCSKY = Triple.isCSKY();
|
|
||||||
+ const bool IsRosa = Triple.getVendor() == llvm::Triple::Rosa;
|
|
||||||
|
|
||||||
if (IsCSKY && !SelectedMultilibs.empty())
|
|
||||||
SysRoot = SysRoot + SelectedMultilibs.back().osSuffix();
|
|
||||||
@@ -318,12 +319,23 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
|
|
||||||
const std::string OSLibDir = std::string(getOSLibDir(Triple, Args));
|
|
||||||
const std::string MultiarchTriple = getMultiarchTriple(D, Triple, SysRoot);
|
|
||||||
|
|
||||||
+ if (IsRosa) {
|
|
||||||
+ ExtraOpts.push_back("-rpath");
|
|
||||||
+ ExtraOpts.push_back("/system/lib");
|
|
||||||
+ ExtraOpts.push_back("-rpath");
|
|
||||||
+ ExtraOpts.push_back(concat("/system/lib", MultiarchTriple));
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
// mips32: Debian multilib, we use /libo32, while in other case, /lib is
|
|
||||||
// used. We need add both libo32 and /lib.
|
|
||||||
if (Arch == llvm::Triple::mips || Arch == llvm::Triple::mipsel) {
|
|
||||||
Generic_GCC::AddMultilibPaths(D, SysRoot, "libo32", MultiarchTriple, Paths);
|
|
||||||
- addPathIfExists(D, concat(SysRoot, "/libo32"), Paths);
|
|
||||||
- addPathIfExists(D, concat(SysRoot, "/usr/libo32"), Paths);
|
|
||||||
+ if (!IsRosa) {
|
|
||||||
+ addPathIfExists(D, concat(SysRoot, "/libo32"), Paths);
|
|
||||||
+ addPathIfExists(D, concat(SysRoot, "/usr/libo32"), Paths);
|
|
||||||
+ } else {
|
|
||||||
+ addPathIfExists(D, concat(SysRoot, "/system/libo32"), Paths);
|
|
||||||
+ }
|
|
||||||
}
|
|
||||||
Generic_GCC::AddMultilibPaths(D, SysRoot, OSLibDir, MultiarchTriple, Paths);
|
|
||||||
|
|
||||||
@@ -341,18 +353,30 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
|
|
||||||
Paths);
|
|
||||||
}
|
|
||||||
|
|
||||||
- addPathIfExists(D, concat(SysRoot, "/usr/lib", MultiarchTriple), Paths);
|
|
||||||
- addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir), Paths);
|
|
||||||
+ if (!IsRosa) {
|
|
||||||
+ addPathIfExists(D, concat(SysRoot, "/usr/lib", MultiarchTriple), Paths);
|
|
||||||
+ addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir), Paths);
|
|
||||||
+ } else {
|
|
||||||
+ addPathIfExists(D, concat(SysRoot, "/system/lib", MultiarchTriple), Paths);
|
|
||||||
+ addPathIfExists(D, concat(SysRoot, "/system", OSLibDir), Paths);
|
|
||||||
+ }
|
|
||||||
if (IsRISCV) {
|
|
||||||
StringRef ABIName = tools::riscv::getRISCVABI(Args, Triple);
|
|
||||||
addPathIfExists(D, concat(SysRoot, "/", OSLibDir, ABIName), Paths);
|
|
||||||
- addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir, ABIName), Paths);
|
|
||||||
+ if (!IsRosa)
|
|
||||||
+ addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir, ABIName), Paths);
|
|
||||||
+ else
|
|
||||||
+ addPathIfExists(D, concat(SysRoot, "/system", OSLibDir, ABIName), Paths);
|
|
||||||
}
|
|
||||||
|
|
||||||
Generic_GCC::AddMultiarchPaths(D, SysRoot, OSLibDir, Paths);
|
|
||||||
|
|
||||||
- addPathIfExists(D, concat(SysRoot, "/lib"), Paths);
|
|
||||||
- addPathIfExists(D, concat(SysRoot, "/usr/lib"), Paths);
|
|
||||||
+ if (!IsRosa) {
|
|
||||||
+ addPathIfExists(D, concat(SysRoot, "/lib"), Paths);
|
|
||||||
+ addPathIfExists(D, concat(SysRoot, "/usr/lib"), Paths);
|
|
||||||
+ } else {
|
|
||||||
+ addPathIfExists(D, concat(SysRoot, "/system/lib"), Paths);
|
|
||||||
+ }
|
|
||||||
}
|
|
||||||
|
|
||||||
ToolChain::RuntimeLibType Linux::GetDefaultRuntimeLibType() const {
|
|
||||||
@@ -457,6 +481,9 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
|
|
||||||
return Triple.isArch64Bit() ? "/system/bin/linker64" : "/system/bin/linker";
|
|
||||||
}
|
|
||||||
if (Triple.isMusl()) {
|
|
||||||
+ if (Triple.getVendor() == llvm::Triple::Rosa)
|
|
||||||
+ return "/system/bin/linker";
|
|
||||||
+
|
|
||||||
std::string ArchName;
|
|
||||||
bool IsArm = false;
|
|
||||||
|
|
||||||
diff --git a/clang/tools/clang-installapi/Options.cpp b/clang/tools/clang-installapi/Options.cpp
|
|
||||||
index 64324a3f8b01..15ce70b68217 100644
|
|
||||||
--- a/clang/tools/clang-installapi/Options.cpp
|
|
||||||
+++ b/clang/tools/clang-installapi/Options.cpp
|
|
||||||
@@ -515,7 +515,7 @@ bool Options::processFrontendOptions(InputArgList &Args) {
|
|
||||||
FEOpts.FwkPaths = std::move(FrameworkPaths);
|
|
||||||
|
|
||||||
// Add default framework/library paths.
|
|
||||||
- PathSeq DefaultLibraryPaths = {"/usr/lib", "/usr/local/lib"};
|
|
||||||
+ PathSeq DefaultLibraryPaths = {"/usr/lib", "/system/lib", "/usr/local/lib"};
|
|
||||||
PathSeq DefaultFrameworkPaths = {"/Library/Frameworks",
|
|
||||||
"/System/Library/Frameworks"};
|
|
||||||
|
|
||||||
`},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
func init() {
|
func init() {
|
||||||
artifactsM[LLVMCompilerRT] = Metadata{
|
const (
|
||||||
|
version = "22.1.4"
|
||||||
|
checksum = "Bk3t-tV5sD5T0bqefFMcLeFuAwXnhFipywZmqst5hAZs97QQWGKB_5XyAFjj5tDB"
|
||||||
|
)
|
||||||
|
|
||||||
|
artifactsM[llvmSource] = Metadata{
|
||||||
f: func(t Toolchain) (pkg.Artifact, string) {
|
f: func(t Toolchain) (pkg.Artifact, string) {
|
||||||
_, compilerRT, _, _ := t.newLLVM()
|
return t.NewPatchedSource("llvm", version, newFromGitHub(
|
||||||
return compilerRT, llvmVersion
|
"llvm/llvm-project",
|
||||||
|
"llvmorg-"+version,
|
||||||
|
checksum,
|
||||||
|
), true, llvmPatches...), version
|
||||||
},
|
},
|
||||||
|
|
||||||
Name: "llvm-compiler-rt",
|
Name: "llvm-project",
|
||||||
Description: "LLVM runtime: compiler-rt",
|
Description: "LLVM monorepo with Rosa OS patches",
|
||||||
Website: "https://llvm.org/",
|
|
||||||
}
|
|
||||||
|
|
||||||
artifactsM[LLVMRuntimes] = Metadata{
|
|
||||||
f: func(t Toolchain) (pkg.Artifact, string) {
|
|
||||||
_, _, runtimes, _ := t.newLLVM()
|
|
||||||
return runtimes, llvmVersion
|
|
||||||
},
|
|
||||||
|
|
||||||
Name: "llvm-runtimes",
|
|
||||||
Description: "LLVM runtimes: libunwind, libcxx, libcxxabi",
|
|
||||||
Website: "https://llvm.org/",
|
|
||||||
}
|
|
||||||
|
|
||||||
artifactsM[LLVMClang] = Metadata{
|
|
||||||
f: func(t Toolchain) (pkg.Artifact, string) {
|
|
||||||
_, _, _, clang := t.newLLVM()
|
|
||||||
return clang, llvmVersion
|
|
||||||
},
|
|
||||||
|
|
||||||
Name: "clang",
|
|
||||||
Description: `an "LLVM native" C/C++/Objective-C compiler`,
|
|
||||||
Website: "https://llvm.org/",
|
|
||||||
|
|
||||||
ID: 1830,
|
ID: 1830,
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
artifactsM[LLVM] = Metadata{
|
||||||
// llvm stores the result of Toolchain.newLLVM.
|
f: Toolchain.newLLVM,
|
||||||
llvm [_toolchainEnd][4]pkg.Artifact
|
|
||||||
// llvmOnce is for lazy initialisation of llvm.
|
|
||||||
llvmOnce [_toolchainEnd]sync.Once
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewLLVM returns LLVM toolchain across multiple [pkg.Artifact].
|
Name: "llvm",
|
||||||
func (t Toolchain) NewLLVM() (musl, compilerRT, runtimes, clang pkg.Artifact) {
|
Description: "a collection of modular and reusable compiler and toolchain technologies",
|
||||||
llvmOnce[t].Do(func() {
|
Website: "https://llvm.org",
|
||||||
llvm[t][0], llvm[t][1], llvm[t][2], llvm[t][3] = t.newLLVM()
|
|
||||||
})
|
Dependencies: P{
|
||||||
return llvm[t][0], llvm[t][1], llvm[t][2], llvm[t][3]
|
Zlib,
|
||||||
|
Zstd,
|
||||||
|
Musl,
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +0,0 @@
|
|||||||
package rosa
|
|
||||||
|
|
||||||
// latest version of LLVM, conditional to temporarily avoid broken new releases
|
|
||||||
const (
|
|
||||||
llvmVersionMajor = "22"
|
|
||||||
llvmVersion = llvmVersionMajor + ".1.3"
|
|
||||||
|
|
||||||
llvmChecksum = "CUwnpzua_y28HZ9oI0NmcKL2wClsSjFpgY9do5-7cCZJHI5KNF64vfwGvY0TYyR3"
|
|
||||||
)
|
|
||||||
191
internal/rosa/llvm_patches.go
Normal file
191
internal/rosa/llvm_patches.go
Normal file
@@ -0,0 +1,191 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
// llvmPatches are centralised patches against latest LLVM monorepo.
|
||||||
|
var llvmPatches = []KV{
|
||||||
|
{"increase-stack-size-unconditional", `diff --git a/llvm/lib/Support/Threading.cpp b/llvm/lib/Support/Threading.cpp
|
||||||
|
index 9da357a7ebb9..b2931510c1ae 100644
|
||||||
|
--- a/llvm/lib/Support/Threading.cpp
|
||||||
|
+++ b/llvm/lib/Support/Threading.cpp
|
||||||
|
@@ -80,7 +80,7 @@ unsigned llvm::ThreadPoolStrategy::compute_thread_count() const {
|
||||||
|
// keyword.
|
||||||
|
#include "llvm/Support/thread.h"
|
||||||
|
|
||||||
|
-#if defined(__APPLE__)
|
||||||
|
+#if defined(__APPLE__) || 1
|
||||||
|
// Darwin's default stack size for threads except the main one is only 512KB,
|
||||||
|
// which is not enough for some/many normal LLVM compilations. This implements
|
||||||
|
// the same interface as std::thread but requests the same stack size as the
|
||||||
|
`},
|
||||||
|
|
||||||
|
{"add-rosa-vendor", `diff --git a/llvm/include/llvm/TargetParser/Triple.h b/llvm/include/llvm/TargetParser/Triple.h
|
||||||
|
index 9c83abeeb3b1..5acfe5836a23 100644
|
||||||
|
--- a/llvm/include/llvm/TargetParser/Triple.h
|
||||||
|
+++ b/llvm/include/llvm/TargetParser/Triple.h
|
||||||
|
@@ -190,6 +190,7 @@ public:
|
||||||
|
|
||||||
|
Apple,
|
||||||
|
PC,
|
||||||
|
+ Rosa,
|
||||||
|
SCEI,
|
||||||
|
Freescale,
|
||||||
|
IBM,
|
||||||
|
diff --git a/llvm/lib/TargetParser/Triple.cpp b/llvm/lib/TargetParser/Triple.cpp
|
||||||
|
index a4f9dd42c0fe..cb5a12387034 100644
|
||||||
|
--- a/llvm/lib/TargetParser/Triple.cpp
|
||||||
|
+++ b/llvm/lib/TargetParser/Triple.cpp
|
||||||
|
@@ -279,6 +279,7 @@ StringRef Triple::getVendorTypeName(VendorType Kind) {
|
||||||
|
case NVIDIA: return "nvidia";
|
||||||
|
case OpenEmbedded: return "oe";
|
||||||
|
case PC: return "pc";
|
||||||
|
+ case Rosa: return "rosa";
|
||||||
|
case SCEI: return "scei";
|
||||||
|
case SUSE: return "suse";
|
||||||
|
case Meta:
|
||||||
|
@@ -689,6 +690,7 @@ static Triple::VendorType parseVendor(StringRef VendorName) {
|
||||||
|
return StringSwitch<Triple::VendorType>(VendorName)
|
||||||
|
.Case("apple", Triple::Apple)
|
||||||
|
.Case("pc", Triple::PC)
|
||||||
|
+ .Case("rosa", Triple::Rosa)
|
||||||
|
.Case("scei", Triple::SCEI)
|
||||||
|
.Case("sie", Triple::SCEI)
|
||||||
|
.Case("fsl", Triple::Freescale)
|
||||||
|
`},
|
||||||
|
|
||||||
|
{"xfail-broken-tests", `diff --git a/clang/test/Modules/timestamps.c b/clang/test/Modules/timestamps.c
|
||||||
|
index 50fdce630255..4b4465a75617 100644
|
||||||
|
--- a/clang/test/Modules/timestamps.c
|
||||||
|
+++ b/clang/test/Modules/timestamps.c
|
||||||
|
@@ -1,3 +1,5 @@
|
||||||
|
+// XFAIL: target={{.*-rosa-linux-musl}}
|
||||||
|
+
|
||||||
|
/// Verify timestamps that gets embedded in the module
|
||||||
|
#include <c-header.h>
|
||||||
|
|
||||||
|
`},
|
||||||
|
|
||||||
|
{"path-system-include", `diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
|
||||||
|
index 8ac8d4eb9181..e46b04a898ca 100644
|
||||||
|
--- a/clang/lib/Driver/ToolChains/Linux.cpp
|
||||||
|
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
|
||||||
|
@@ -671,6 +671,12 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
|
||||||
|
addExternCSystemInclude(
|
||||||
|
DriverArgs, CC1Args,
|
||||||
|
concat(SysRoot, "/usr/include", MultiarchIncludeDir));
|
||||||
|
+ if (!MultiarchIncludeDir.empty() &&
|
||||||
|
+ D.getVFS().exists(concat(SysRoot, "/system/include", MultiarchIncludeDir)))
|
||||||
|
+ addExternCSystemInclude(
|
||||||
|
+ DriverArgs, CC1Args,
|
||||||
|
+ concat(SysRoot, "/system/include", MultiarchIncludeDir));
|
||||||
|
+
|
||||||
|
|
||||||
|
if (getTriple().getOS() == llvm::Triple::RTEMS)
|
||||||
|
return;
|
||||||
|
@@ -681,6 +687,7 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
|
||||||
|
addExternCSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/include"));
|
||||||
|
|
||||||
|
addExternCSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/usr/include"));
|
||||||
|
+ addExternCSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/system/include"));
|
||||||
|
|
||||||
|
if (!DriverArgs.hasArg(options::OPT_nobuiltininc) && getTriple().isMusl())
|
||||||
|
addSystemInclude(DriverArgs, CC1Args, ResourceDirInclude);
|
||||||
|
`},
|
||||||
|
|
||||||
|
{"path-system-libraries", `diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
|
||||||
|
index d525b417b4ea..fdc411f2239c 100644
|
||||||
|
--- a/clang/lib/Driver/ToolChains/Linux.cpp
|
||||||
|
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
|
||||||
|
@@ -302,6 +302,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
|
||||||
|
const bool IsHexagon = Arch == llvm::Triple::hexagon;
|
||||||
|
const bool IsRISCV = Triple.isRISCV();
|
||||||
|
const bool IsCSKY = Triple.isCSKY();
|
||||||
|
+ const bool IsRosa = Triple.getVendor() == llvm::Triple::Rosa;
|
||||||
|
|
||||||
|
if (IsCSKY && !SelectedMultilibs.empty())
|
||||||
|
SysRoot = SysRoot + SelectedMultilibs.back().osSuffix();
|
||||||
|
@@ -337,12 +338,23 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
|
||||||
|
const std::string OSLibDir = std::string(getOSLibDir(Triple, Args));
|
||||||
|
const std::string MultiarchTriple = getMultiarchTriple(D, Triple, SysRoot);
|
||||||
|
|
||||||
|
+ if (IsRosa) {
|
||||||
|
+ ExtraOpts.push_back("-rpath");
|
||||||
|
+ ExtraOpts.push_back("/system/lib");
|
||||||
|
+ ExtraOpts.push_back("-rpath");
|
||||||
|
+ ExtraOpts.push_back(concat("/system/lib", Triple.str()));
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
// mips32: Debian multilib, we use /libo32, while in other case, /lib is
|
||||||
|
// used. We need add both libo32 and /lib.
|
||||||
|
if (Arch == llvm::Triple::mips || Arch == llvm::Triple::mipsel) {
|
||||||
|
Generic_GCC::AddMultilibPaths(D, SysRoot, "libo32", MultiarchTriple, Paths);
|
||||||
|
- addPathIfExists(D, concat(SysRoot, "/libo32"), Paths);
|
||||||
|
- addPathIfExists(D, concat(SysRoot, "/usr/libo32"), Paths);
|
||||||
|
+ if (!IsRosa) {
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/libo32"), Paths);
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/usr/libo32"), Paths);
|
||||||
|
+ } else {
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/system/libo32"), Paths);
|
||||||
|
+ }
|
||||||
|
}
|
||||||
|
Generic_GCC::AddMultilibPaths(D, SysRoot, OSLibDir, MultiarchTriple, Paths);
|
||||||
|
|
||||||
|
@@ -360,18 +372,30 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
|
||||||
|
Paths);
|
||||||
|
}
|
||||||
|
|
||||||
|
- addPathIfExists(D, concat(SysRoot, "/usr/lib", MultiarchTriple), Paths);
|
||||||
|
- addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir), Paths);
|
||||||
|
+ if (!IsRosa) {
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/usr/lib", MultiarchTriple), Paths);
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir), Paths);
|
||||||
|
+ } else {
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/system/lib", MultiarchTriple), Paths);
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/system", OSLibDir), Paths);
|
||||||
|
+ }
|
||||||
|
if (IsRISCV) {
|
||||||
|
StringRef ABIName = tools::riscv::getRISCVABI(Args, Triple);
|
||||||
|
addPathIfExists(D, concat(SysRoot, "/", OSLibDir, ABIName), Paths);
|
||||||
|
- addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir, ABIName), Paths);
|
||||||
|
+ if (!IsRosa)
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir, ABIName), Paths);
|
||||||
|
+ else
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/system", OSLibDir, ABIName), Paths);
|
||||||
|
}
|
||||||
|
|
||||||
|
Generic_GCC::AddMultiarchPaths(D, SysRoot, OSLibDir, Paths);
|
||||||
|
|
||||||
|
- addPathIfExists(D, concat(SysRoot, "/lib"), Paths);
|
||||||
|
- addPathIfExists(D, concat(SysRoot, "/usr/lib"), Paths);
|
||||||
|
+ if (!IsRosa) {
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/lib"), Paths);
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/usr/lib"), Paths);
|
||||||
|
+ } else {
|
||||||
|
+ addPathIfExists(D, concat(SysRoot, "/system/lib"), Paths);
|
||||||
|
+ }
|
||||||
|
}
|
||||||
|
|
||||||
|
ToolChain::RuntimeLibType Linux::GetDefaultRuntimeLibType() const {
|
||||||
|
@@ -572,6 +596,9 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
|
||||||
|
return Triple.isArch64Bit() ? "/system/bin/linker64" : "/system/bin/linker";
|
||||||
|
}
|
||||||
|
if (Triple.isMusl()) {
|
||||||
|
+ if (Triple.getVendor() == llvm::Triple::Rosa)
|
||||||
|
+ return "/system/bin/linker";
|
||||||
|
+
|
||||||
|
std::string ArchName;
|
||||||
|
bool IsArm = false;
|
||||||
|
|
||||||
|
diff --git a/clang/tools/clang-installapi/Options.cpp b/clang/tools/clang-installapi/Options.cpp
|
||||||
|
index f484d6f33ad8..dca55e72d67e 100644
|
||||||
|
--- a/clang/tools/clang-installapi/Options.cpp
|
||||||
|
+++ b/clang/tools/clang-installapi/Options.cpp
|
||||||
|
@@ -514,7 +514,7 @@ bool Options::processFrontendOptions(InputArgList &Args) {
|
||||||
|
FEOpts.FwkPaths = std::move(FrameworkPaths);
|
||||||
|
|
||||||
|
// Add default framework/library paths.
|
||||||
|
- PathSeq DefaultLibraryPaths = {"/usr/lib", "/usr/local/lib"};
|
||||||
|
+ PathSeq DefaultLibraryPaths = {"/usr/lib", "/system/lib", "/usr/local/lib"};
|
||||||
|
PathSeq DefaultFrameworkPaths = {"/Library/Frameworks",
|
||||||
|
"/System/Library/Frameworks"};
|
||||||
|
|
||||||
|
`},
|
||||||
|
}
|
||||||
@@ -19,10 +19,10 @@ cd "$(mktemp -d)"
|
|||||||
--build="${ROSA_TRIPLE}" \
|
--build="${ROSA_TRIPLE}" \
|
||||||
--disable-dependency-tracking
|
--disable-dependency-tracking
|
||||||
./build.sh
|
./build.sh
|
||||||
./make DESTDIR=/work install check
|
./make DESTDIR=/work install
|
||||||
`, pkg.Path(AbsUsrSrc.Append("make"), false, pkg.NewHTTPGetTar(
|
`, pkg.Path(AbsUsrSrc.Append("make"), false, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/make/make-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/make/make-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
))), version
|
))), version
|
||||||
}
|
}
|
||||||
@@ -78,11 +78,6 @@ type MakeHelper struct {
|
|||||||
|
|
||||||
var _ Helper = new(MakeHelper)
|
var _ Helper = new(MakeHelper)
|
||||||
|
|
||||||
// name returns its arguments joined with '-'.
|
|
||||||
func (*MakeHelper) name(name, version string) string {
|
|
||||||
return name + "-" + version
|
|
||||||
}
|
|
||||||
|
|
||||||
// extra returns make and other optional dependencies.
|
// extra returns make and other optional dependencies.
|
||||||
func (attr *MakeHelper) extra(flag int) P {
|
func (attr *MakeHelper) extra(flag int) P {
|
||||||
extra := P{Make}
|
extra := P{Make}
|
||||||
@@ -176,7 +171,10 @@ func (attr *MakeHelper) script(name string) string {
|
|||||||
s = "-" + s
|
s = "-" + s
|
||||||
}
|
}
|
||||||
if v[1] != "" {
|
if v[1] != "" {
|
||||||
s += "=" + v[1]
|
if v[0] != "" {
|
||||||
|
s += "="
|
||||||
|
}
|
||||||
|
s += v[1]
|
||||||
}
|
}
|
||||||
if !yield(s) {
|
if !yield(s) {
|
||||||
return
|
return
|
||||||
@@ -190,15 +188,15 @@ func (attr *MakeHelper) script(name string) string {
|
|||||||
|
|
||||||
scriptMake := `
|
scriptMake := `
|
||||||
make \
|
make \
|
||||||
"-j$(nproc)"`
|
` + jobsFlagE
|
||||||
if len(attr.Make) > 0 {
|
if len(attr.Make) > 0 {
|
||||||
scriptMake += " \\\n\t" + strings.Join(attr.Make, " \\\n\t")
|
scriptMake += " \\\n\t" + strings.Join(attr.Make, " \\\n\t")
|
||||||
}
|
}
|
||||||
scriptMake += "\n"
|
scriptMake += "\n"
|
||||||
|
|
||||||
if !attr.SkipCheck {
|
if !attr.SkipCheck && presetOpts&OptSkipCheck == 0 {
|
||||||
scriptMake += attr.ScriptCheckEarly + `make \
|
scriptMake += attr.ScriptCheckEarly + `make \
|
||||||
"-j$(nproc)" \
|
` + jobsFlagE + ` \
|
||||||
`
|
`
|
||||||
if len(attr.Check) > 0 {
|
if len(attr.Check) > 0 {
|
||||||
scriptMake += strings.Join(attr.Check, " \\\n\t")
|
scriptMake += strings.Join(attr.Check, " \\\n\t")
|
||||||
|
|||||||
66
internal/rosa/mesa.go
Normal file
66
internal/rosa/mesa.go
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
package rosa
|
||||||
|
|
||||||
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
|
func (t Toolchain) newLibglvnd() (pkg.Artifact, string) {
|
||||||
|
const (
|
||||||
|
version = "1.7.0"
|
||||||
|
checksum = "eIQJK2sgFQDHdeFkQO87TrSUaZRFG4y2DrwA8Ut-sGboI59uw1OOiIVqq2AIwnGY"
|
||||||
|
)
|
||||||
|
return t.NewPackage("libglvnd", version, newFromGitLab(
|
||||||
|
"gitlab.freedesktop.org",
|
||||||
|
"glvnd/libglvnd",
|
||||||
|
"v"+version,
|
||||||
|
checksum,
|
||||||
|
), nil, (*MesonHelper)(nil),
|
||||||
|
Binutils, // symbols check fail with llvm nm
|
||||||
|
), version
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
artifactsM[Libglvnd] = Metadata{
|
||||||
|
f: Toolchain.newLibglvnd,
|
||||||
|
|
||||||
|
Name: "libglvnd",
|
||||||
|
Description: "The GL Vendor-Neutral Dispatch library",
|
||||||
|
Website: "https://gitlab.freedesktop.org/glvnd/libglvnd",
|
||||||
|
|
||||||
|
ID: 12098,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Toolchain) newLibdrm() (pkg.Artifact, string) {
|
||||||
|
const (
|
||||||
|
version = "2.4.133"
|
||||||
|
checksum = "bfj296NcR9DndO11hqDbSRFPqaweSLMqRk3dlCPZpM6FONX1WZ9J4JdbTDMUd1rU"
|
||||||
|
)
|
||||||
|
return t.NewPackage("libdrm", version, newFromGitLab(
|
||||||
|
"gitlab.freedesktop.org",
|
||||||
|
"mesa/libdrm",
|
||||||
|
"libdrm-"+version,
|
||||||
|
checksum,
|
||||||
|
), nil, &MesonHelper{
|
||||||
|
Setup: []KV{
|
||||||
|
{"Dintel", "enabled"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Binutils, // symbols check fail with llvm nm
|
||||||
|
|
||||||
|
Libpciaccess,
|
||||||
|
KernelHeaders,
|
||||||
|
), version
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
artifactsM[Libdrm] = Metadata{
|
||||||
|
f: Toolchain.newLibdrm,
|
||||||
|
|
||||||
|
Name: "libdrm",
|
||||||
|
Description: "a userspace library for accessing the DRM",
|
||||||
|
Website: "https://dri.freedesktop.org/",
|
||||||
|
|
||||||
|
Dependencies: P{
|
||||||
|
Libpciaccess,
|
||||||
|
},
|
||||||
|
|
||||||
|
ID: 1596,
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -9,26 +9,47 @@ import (
|
|||||||
|
|
||||||
func (t Toolchain) newMeson() (pkg.Artifact, string) {
|
func (t Toolchain) newMeson() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "1.10.2"
|
version = "1.11.1"
|
||||||
checksum = "18VmKUVKuXCwtawkYCeYHseC3cKpi86OhnIPaV878wjY0rkXH8XnQwUyymnxFgcl"
|
checksum = "uvILRxdopwc6Dy17UbIeClcQr0qHqyTaqyk1M9OqWKN9PwB9N6UVAiyN8kSSz3r2"
|
||||||
)
|
)
|
||||||
return t.New("meson-"+version, 0, []pkg.Artifact{
|
return t.NewPackage("meson", version, newFromGitHub(
|
||||||
t.Load(Zlib),
|
"mesonbuild/meson",
|
||||||
t.Load(Python),
|
version,
|
||||||
t.Load(Setuptools),
|
checksum,
|
||||||
}, nil, nil, `
|
), &PackageAttr{
|
||||||
cd /usr/src/meson
|
Env: []string{
|
||||||
chmod -R +w meson.egg-info
|
"CMAKE_MAKE_PROGRAM=ninja",
|
||||||
python3 setup.py \
|
},
|
||||||
install \
|
}, &PipHelper{
|
||||||
--prefix=/system \
|
EnterSource: true,
|
||||||
--root=/work
|
Check: `
|
||||||
`, pkg.Path(AbsUsrSrc.Append("meson"), true, pkg.NewHTTPGetTar(
|
cd 'test cases'
|
||||||
nil, "https://github.com/mesonbuild/meson/releases/download/"+
|
rm -rf \
|
||||||
version+"/meson-"+version+".tar.gz",
|
'common/32 has header' \
|
||||||
mustDecode(checksum),
|
'common/66 vcstag' \
|
||||||
pkg.TarGzip,
|
'common/153 wrap file should not failed' \
|
||||||
))), version
|
'common/184 openmp' \
|
||||||
|
'common/189 check header' \
|
||||||
|
'linuxlike/6 subdir include order' \
|
||||||
|
'linuxlike/9 compiler checks with dependencies' \
|
||||||
|
'linuxlike/13 cmake dependency' \
|
||||||
|
'frameworks/15 llvm' \
|
||||||
|
'frameworks/29 blocks'
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
python3 ./run_project_tests.py \
|
||||||
|
-v \
|
||||||
|
` + jobsFlagE + ` \
|
||||||
|
--failfast \
|
||||||
|
--backend=ninja
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
PythonSetuptools,
|
||||||
|
PkgConfig,
|
||||||
|
CMake,
|
||||||
|
Ninja,
|
||||||
|
PythonPyTest,
|
||||||
|
), version
|
||||||
}
|
}
|
||||||
func init() {
|
func init() {
|
||||||
artifactsM[Meson] = Metadata{
|
artifactsM[Meson] = Metadata{
|
||||||
@@ -66,11 +87,6 @@ type MesonHelper struct {
|
|||||||
|
|
||||||
var _ Helper = new(MesonHelper)
|
var _ Helper = new(MesonHelper)
|
||||||
|
|
||||||
// name returns its arguments joined with '-'.
|
|
||||||
func (*MesonHelper) name(name, version string) string {
|
|
||||||
return name + "-" + version
|
|
||||||
}
|
|
||||||
|
|
||||||
// extra returns hardcoded meson runtime dependencies.
|
// extra returns hardcoded meson runtime dependencies.
|
||||||
func (*MesonHelper) extra(int) P { return P{Meson} }
|
func (*MesonHelper) extra(int) P { return P{Meson} }
|
||||||
|
|
||||||
@@ -101,7 +117,7 @@ func (attr *MesonHelper) script(name string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var scriptTest string
|
var scriptTest string
|
||||||
if !attr.SkipTest {
|
if !attr.SkipTest && presetOpts&OptSkipCheck == 0 {
|
||||||
scriptTest = `
|
scriptTest = `
|
||||||
meson test \
|
meson test \
|
||||||
--print-errorlogs`
|
--print-errorlogs`
|
||||||
|
|||||||
@@ -7,9 +7,9 @@ func (t Toolchain) newMksh() (pkg.Artifact, string) {
|
|||||||
version = "59c"
|
version = "59c"
|
||||||
checksum = "0Zj-k4nXEu3IuJY4lvwD2OrC2t27GdZj8SPy4DoaeuBRH1padWb7oREpYgwY8JNq"
|
checksum = "0Zj-k4nXEu3IuJY4lvwD2OrC2t27GdZj8SPy4DoaeuBRH1padWb7oREpYgwY8JNq"
|
||||||
)
|
)
|
||||||
return t.New("mksh-"+version, 0, stage0Concat(t, []pkg.Artifact{},
|
return t.New("mksh-"+version, 0, t.AppendPresets(nil,
|
||||||
t.Load(Perl),
|
Perl,
|
||||||
t.Load(Coreutils),
|
Coreutils,
|
||||||
), nil, []string{
|
), nil, []string{
|
||||||
"LDSTATIC=-static",
|
"LDSTATIC=-static",
|
||||||
"CPPFLAGS=-DMKSH_DEFAULT_PROFILEDIR=\\\"/system/etc\\\"",
|
"CPPFLAGS=-DMKSH_DEFAULT_PROFILEDIR=\\\"/system/etc\\\"",
|
||||||
@@ -26,10 +26,9 @@ cp -v lksh /work/system/bin/sh
|
|||||||
|
|
||||||
mkdir -p /work/bin/
|
mkdir -p /work/bin/
|
||||||
ln -vs ../system/bin/sh /work/bin/
|
ln -vs ../system/bin/sh /work/bin/
|
||||||
`, pkg.Path(AbsUsrSrc.Append("mksh"), false, pkg.NewHTTPGetTar(
|
`, pkg.Path(AbsUsrSrc.Append("mksh"), false, newTar(
|
||||||
nil,
|
|
||||||
"https://mbsd.evolvis.org/MirOS/dist/mir/mksh/mksh-R"+version+".tgz",
|
"https://mbsd.evolvis.org/MirOS/dist/mir/mksh/mksh-R"+version+".tgz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
))), version
|
))), version
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,11 +7,10 @@ func (t Toolchain) newMuslFts() (pkg.Artifact, string) {
|
|||||||
version = "1.2.7"
|
version = "1.2.7"
|
||||||
checksum = "N_p_ZApX3eHt7xoDCw1hLf6XdJOw7ZSx7xPvpvAP0knG2zgU0zeN5w8tt5Pg60XJ"
|
checksum = "N_p_ZApX3eHt7xoDCw1hLf6XdJOw7ZSx7xPvpvAP0knG2zgU0zeN5w8tt5Pg60XJ"
|
||||||
)
|
)
|
||||||
return t.NewPackage("musl-fts", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("musl-fts", version, newFromGitHub(
|
||||||
nil, "https://github.com/void-linux/musl-fts/archive/refs/tags/"+
|
"void-linux/musl-fts",
|
||||||
"v"+version+".tar.gz",
|
"v"+version,
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"CC=cc -fPIC",
|
"CC=cc -fPIC",
|
||||||
|
|||||||
@@ -7,11 +7,10 @@ func (t Toolchain) newMuslObstack() (pkg.Artifact, string) {
|
|||||||
version = "1.2.3"
|
version = "1.2.3"
|
||||||
checksum = "tVRY_KjIlkkMszcaRlkKdBVQHIXTT_T_TiMxbwErlILXrOBosocg8KklppZhNdCG"
|
checksum = "tVRY_KjIlkkMszcaRlkKdBVQHIXTT_T_TiMxbwErlILXrOBosocg8KklppZhNdCG"
|
||||||
)
|
)
|
||||||
return t.NewPackage("musl-obstack", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("musl-obstack", version, newFromGitHub(
|
||||||
nil, "https://github.com/void-linux/musl-obstack/archive/refs/tags/"+
|
"void-linux/musl-obstack",
|
||||||
"v"+version+".tar.gz",
|
"v"+version,
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"CC=cc -fPIC",
|
"CC=cc -fPIC",
|
||||||
|
|||||||
@@ -2,11 +2,7 @@ package rosa
|
|||||||
|
|
||||||
import "hakurei.app/internal/pkg"
|
import "hakurei.app/internal/pkg"
|
||||||
|
|
||||||
func (t Toolchain) newMusl(
|
func (t Toolchain) newMusl(headers bool) (pkg.Artifact, string) {
|
||||||
headers bool,
|
|
||||||
env []string,
|
|
||||||
extra ...pkg.Artifact,
|
|
||||||
) (pkg.Artifact, string) {
|
|
||||||
const (
|
const (
|
||||||
version = "1.2.6"
|
version = "1.2.6"
|
||||||
checksum = "WtWb_OV_XxLDAB5NerOL9loLlHVadV00MmGk65PPBU1evaolagoMHfvpZp_vxEzS"
|
checksum = "WtWb_OV_XxLDAB5NerOL9loLlHVadV00MmGk65PPBU1evaolagoMHfvpZp_vxEzS"
|
||||||
@@ -37,13 +33,22 @@ rmdir -v /work/lib
|
|||||||
helper.Script = ""
|
helper.Script = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
return t.NewPackage(name, version, pkg.NewHTTPGetTar(
|
env := []string{
|
||||||
nil, "https://musl.libc.org/releases/musl-"+version+".tar.gz",
|
"LDFLAGS=" + earlyLDFLAGS(false),
|
||||||
mustDecode(checksum),
|
}
|
||||||
|
if t.isStage0() {
|
||||||
|
env = append(env,
|
||||||
|
"CC=clang",
|
||||||
|
"AR=ar",
|
||||||
|
"RANLIB=ranlib",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.NewPackage(name, version, newTar(
|
||||||
|
"https://musl.libc.org/releases/musl-"+version+".tar.gz",
|
||||||
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
NonStage0: extra,
|
|
||||||
|
|
||||||
// expected to be writable in copies
|
// expected to be writable in copies
|
||||||
Chmod: true,
|
Chmod: true,
|
||||||
|
|
||||||
@@ -55,7 +60,7 @@ rmdir -v /work/lib
|
|||||||
func init() {
|
func init() {
|
||||||
artifactsM[Musl] = Metadata{
|
artifactsM[Musl] = Metadata{
|
||||||
f: func(t Toolchain) (pkg.Artifact, string) {
|
f: func(t Toolchain) (pkg.Artifact, string) {
|
||||||
return t.newMusl(false, nil)
|
return t.newMusl(false)
|
||||||
},
|
},
|
||||||
|
|
||||||
Name: "musl",
|
Name: "musl",
|
||||||
@@ -64,4 +69,13 @@ func init() {
|
|||||||
|
|
||||||
ID: 11688,
|
ID: 11688,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
artifactsM[muslHeaders] = Metadata{
|
||||||
|
f: func(t Toolchain) (pkg.Artifact, string) {
|
||||||
|
return t.newMusl(true)
|
||||||
|
},
|
||||||
|
|
||||||
|
Name: "musl-headers",
|
||||||
|
Description: "system installation of musl headers",
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,9 +7,9 @@ func (t Toolchain) newNcurses() (pkg.Artifact, string) {
|
|||||||
version = "6.6"
|
version = "6.6"
|
||||||
checksum = "XvWp4xi6hR_hH8XUoGY26L_pqBSDapJYulhzZqPuR0KNklqypqNc1yNXU-nOjf5w"
|
checksum = "XvWp4xi6hR_hH8XUoGY26L_pqBSDapJYulhzZqPuR0KNklqypqNc1yNXU-nOjf5w"
|
||||||
)
|
)
|
||||||
return t.NewPackage("ncurses", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("ncurses", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/ncurses/ncurses-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/ncurses/ncurses-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, &MakeHelper{
|
), nil, &MakeHelper{
|
||||||
// "tests" are actual demo programs, not a test suite.
|
// "tests" are actual demo programs, not a test suite.
|
||||||
|
|||||||
@@ -7,10 +7,10 @@ func (t Toolchain) newLibmnl() (pkg.Artifact, string) {
|
|||||||
version = "1.0.5"
|
version = "1.0.5"
|
||||||
checksum = "DN-vbbvQDpxXJm0TJ6xlluILvfrB86avrCTX50XyE9SEFSAZ_o8nuKc5Gu0Am7-u"
|
checksum = "DN-vbbvQDpxXJm0TJ6xlluILvfrB86avrCTX50XyE9SEFSAZ_o8nuKc5Gu0Am7-u"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libmnl", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("libmnl", version, newTar(
|
||||||
nil, "https://www.netfilter.org/projects/libmnl/files/"+
|
"https://www.netfilter.org/projects/libmnl/files/"+
|
||||||
"libmnl-"+version+".tar.bz2",
|
"libmnl-"+version+".tar.bz2",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarBzip2,
|
pkg.TarBzip2,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Patches: []KV{
|
Patches: []KV{
|
||||||
@@ -55,10 +55,9 @@ func (t Toolchain) newLibnftnl() (pkg.Artifact, string) {
|
|||||||
version = "1.3.1"
|
version = "1.3.1"
|
||||||
checksum = "91ou66K-I17iX6DB6hiQkhhC_v4DFW5iDGzwjVRNbJNEmKqowLZBlh3FY-ZDO0r9"
|
checksum = "91ou66K-I17iX6DB6hiQkhhC_v4DFW5iDGzwjVRNbJNEmKqowLZBlh3FY-ZDO0r9"
|
||||||
)
|
)
|
||||||
return t.NewPackage("libnftnl", version, t.NewViaGit(
|
return t.NewPackage("libnftnl", version, t.newTagRemote(
|
||||||
"https://git.netfilter.org/libnftnl",
|
"https://git.netfilter.org/libnftnl",
|
||||||
"refs/tags/libnftnl-"+version,
|
"libnftnl-"+version, checksum,
|
||||||
mustDecode(checksum),
|
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"CFLAGS=-D_GNU_SOURCE",
|
"CFLAGS=-D_GNU_SOURCE",
|
||||||
@@ -98,10 +97,9 @@ func (t Toolchain) newIPTables() (pkg.Artifact, string) {
|
|||||||
version = "1.8.13"
|
version = "1.8.13"
|
||||||
checksum = "TUA-cFIAsiMvtRR-XzQvXzoIhJUOc9J2gQDJCbBRjmgmVfGfPTCf58wL7e-cUKVQ"
|
checksum = "TUA-cFIAsiMvtRR-XzQvXzoIhJUOc9J2gQDJCbBRjmgmVfGfPTCf58wL7e-cUKVQ"
|
||||||
)
|
)
|
||||||
return t.NewPackage("iptables", version, t.NewViaGit(
|
return t.NewPackage("iptables", version, t.newTagRemote(
|
||||||
"https://git.netfilter.org/iptables",
|
"https://git.netfilter.org/iptables",
|
||||||
"refs/tags/v"+version,
|
"v"+version, checksum,
|
||||||
mustDecode(checksum),
|
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
rm \
|
rm \
|
||||||
|
|||||||
@@ -7,9 +7,9 @@ func (t Toolchain) newNettle() (pkg.Artifact, string) {
|
|||||||
version = "4.0"
|
version = "4.0"
|
||||||
checksum = "6agC-vHzzoqAlaX3K9tX8yHgrm03HLqPZzVzq8jh_ePbuPMIvpxereu_uRJFmQK7"
|
checksum = "6agC-vHzzoqAlaX3K9tX8yHgrm03HLqPZzVzq8jh_ePbuPMIvpxereu_uRJFmQK7"
|
||||||
)
|
)
|
||||||
return t.NewPackage("nettle", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("nettle", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/nettle/nettle-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/nettle/nettle-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, (*MakeHelper)(nil),
|
), nil, (*MakeHelper)(nil),
|
||||||
M4,
|
M4,
|
||||||
|
|||||||
@@ -7,9 +7,9 @@ func (t Toolchain) newNettle3() (pkg.Artifact, string) {
|
|||||||
version = "3.10.2"
|
version = "3.10.2"
|
||||||
checksum = "07aXlj10X5llf67jIqRQAA1pgLSgb0w_JYggZVPuKNoc-B-_usb5Kr8FrfBe7g1S"
|
checksum = "07aXlj10X5llf67jIqRQAA1pgLSgb0w_JYggZVPuKNoc-B-_usb5Kr8FrfBe7g1S"
|
||||||
)
|
)
|
||||||
return t.NewPackage("nettle", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("nettle", version, newTar(
|
||||||
nil, "https://ftpmirror.gnu.org/gnu/nettle/nettle-"+version+".tar.gz",
|
"https://ftpmirror.gnu.org/gnu/nettle/nettle-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil, (*MakeHelper)(nil),
|
), nil, (*MakeHelper)(nil),
|
||||||
M4,
|
M4,
|
||||||
|
|||||||
@@ -13,25 +13,26 @@ func (t Toolchain) newNinja() (pkg.Artifact, string) {
|
|||||||
}, nil, nil, `
|
}, nil, nil, `
|
||||||
cd "$(mktemp -d)"
|
cd "$(mktemp -d)"
|
||||||
python3 /usr/src/ninja/configure.py \
|
python3 /usr/src/ninja/configure.py \
|
||||||
|
--verbose \
|
||||||
--bootstrap \
|
--bootstrap \
|
||||||
--gtest-source-dir=/usr/src/googletest
|
--gtest-source-dir=/usr/src/googletest
|
||||||
./ninja all
|
./ninja `+jobsFlagE+` all
|
||||||
./ninja_test
|
./ninja_test
|
||||||
|
|
||||||
mkdir -p /work/system/bin/
|
mkdir -p /work/system/bin/
|
||||||
cp ninja /work/system/bin/
|
cp ninja /work/system/bin/
|
||||||
`, pkg.Path(AbsUsrSrc.Append("googletest"), false,
|
`, pkg.Path(AbsUsrSrc.Append("googletest"), false,
|
||||||
pkg.NewHTTPGetTar(
|
newFromGitHubRelease(
|
||||||
nil, "https://github.com/google/googletest/releases/download/"+
|
"google/googletest",
|
||||||
"v1.16.0/googletest-1.16.0.tar.gz",
|
"v1.16.0",
|
||||||
mustDecode("NjLGvSbgPy_B-y-o1hdanlzEzaYeStFcvFGxpYV3KYlhrWWFRcugYhM3ZMzOA9B_"),
|
"googletest-1.16.0.tar.gz",
|
||||||
|
"NjLGvSbgPy_B-y-o1hdanlzEzaYeStFcvFGxpYV3KYlhrWWFRcugYhM3ZMzOA9B_",
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
)), pkg.Path(AbsUsrSrc.Append("ninja"), true, t.NewPatchedSource(
|
)), pkg.Path(AbsUsrSrc.Append("ninja"), true, t.NewPatchedSource(
|
||||||
"ninja", version, pkg.NewHTTPGetTar(
|
"ninja", version, newFromGitHub(
|
||||||
nil, "https://github.com/ninja-build/ninja/archive/refs/tags/"+
|
"ninja-build/ninja",
|
||||||
"v"+version+".tar.gz",
|
"v"+version,
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
|
||||||
), false,
|
), false,
|
||||||
))), version
|
))), version
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,17 +8,16 @@ import (
|
|||||||
|
|
||||||
func (t Toolchain) newNSS() (pkg.Artifact, string) {
|
func (t Toolchain) newNSS() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "3.122"
|
version = "3.123.1"
|
||||||
checksum = "QvC6TBO4BAUEh6wmgUrb1hwH5podQAN-QdcAaWL32cWEppmZs6oKkZpD9GvZf59S"
|
checksum = "g811Z_fc74ssg-s6BeXRG-ipSfJggD6hrxjVJxrOBIz98CE7piv0OLwzIRLMQpwR"
|
||||||
|
|
||||||
version0 = "4_38_2"
|
version0 = "4_38_2"
|
||||||
checksum0 = "25x2uJeQnOHIiq_zj17b4sYqKgeoU8-IsySUptoPcdHZ52PohFZfGuIisBreWzx0"
|
checksum0 = "25x2uJeQnOHIiq_zj17b4sYqKgeoU8-IsySUptoPcdHZ52PohFZfGuIisBreWzx0"
|
||||||
)
|
)
|
||||||
return t.NewPackage("nss", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("nss", version, newFromGitHub(
|
||||||
nil, "https://github.com/nss-dev/nss/archive/refs/tags/"+
|
"nss-dev/nss",
|
||||||
"NSS_"+strings.Join(strings.SplitN(version, ".", 2), "_")+"_RTM.tar.gz",
|
"NSS_"+strings.Join(strings.SplitN(version, ".", 3), "_")+"_RTM",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Paths: []pkg.ExecPath{
|
Paths: []pkg.ExecPath{
|
||||||
pkg.Path(AbsUsrSrc.Append("nspr.zip"), false, pkg.NewHTTPGet(
|
pkg.Path(AbsUsrSrc.Append("nspr.zip"), false, pkg.NewHTTPGet(
|
||||||
@@ -84,14 +83,22 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
const version = "0.4.0"
|
const (
|
||||||
artifactsM[buildcatrust] = newViaPip(
|
version = "0.5.1"
|
||||||
"buildcatrust",
|
checksum = "oxjnuIrPVMPvD6x8VFLqB7EdbfuhouGQdtPuHDpEHGzoyH5nkxqtYN9UthMY9noA"
|
||||||
|
)
|
||||||
|
artifactsM[buildcatrust] = newPythonPackage(
|
||||||
|
"buildcatrust", 233988,
|
||||||
"transform certificate stores between formats",
|
"transform certificate stores between formats",
|
||||||
version, "py3", "none", "any",
|
"https://github.com/nix-community/buildcatrust",
|
||||||
"k_FGzkRCLjbTWBkuBLzQJ1S8FPAz19neJZlMHm0t10F2Y0hElmvVwdSBRc03Rjo1",
|
version, newFromGitHub(
|
||||||
"https://github.com/nix-community/buildcatrust/"+
|
"nix-community/buildcatrust",
|
||||||
"releases/download/v"+version+"/",
|
"v"+version, checksum,
|
||||||
|
), &PackageAttr{
|
||||||
|
ScriptEarly: `
|
||||||
|
rm buildcatrust/tests/test_nonhermetic.py
|
||||||
|
`,
|
||||||
|
}, nil, P{PythonFlitCore},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,10 +7,11 @@ func (t Toolchain) newOpenSSL() (pkg.Artifact, string) {
|
|||||||
version = "3.6.2"
|
version = "3.6.2"
|
||||||
checksum = "jH004dXTiE01Hp0kyShkWXwrSHEksZi4i_3v47D9H9Uz9LQ1aMwF7mrl2Tb4t_XA"
|
checksum = "jH004dXTiE01Hp0kyShkWXwrSHEksZi4i_3v47D9H9Uz9LQ1aMwF7mrl2Tb4t_XA"
|
||||||
)
|
)
|
||||||
return t.NewPackage("openssl", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("openssl", version, newFromGitHubRelease(
|
||||||
nil, "https://github.com/openssl/openssl/releases/download/"+
|
"openssl/openssl",
|
||||||
"openssl-"+version+"/openssl-"+version+".tar.gz",
|
"openssl-"+version,
|
||||||
mustDecode(checksum),
|
"openssl-"+version+".tar.gz",
|
||||||
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
@@ -24,9 +25,10 @@ func (t Toolchain) newOpenSSL() (pkg.Artifact, string) {
|
|||||||
{"prefix", "/system"},
|
{"prefix", "/system"},
|
||||||
{"libdir", "lib"},
|
{"libdir", "lib"},
|
||||||
{"openssldir", "etc/ssl"},
|
{"openssldir", "etc/ssl"},
|
||||||
|
{"", "no-docs"},
|
||||||
},
|
},
|
||||||
Check: []string{
|
Check: []string{
|
||||||
`HARNESS_JOBS="$(expr "$(nproc)" '*' 2)"`,
|
"HARNESS_JOBS=" + jobsE,
|
||||||
"test",
|
"test",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -7,9 +7,9 @@ func (t Toolchain) newP11Kit() (pkg.Artifact, string) {
|
|||||||
version = "0.26.2"
|
version = "0.26.2"
|
||||||
checksum = "3ei-6DUVtYzrRVe-SubtNgRlweXd6H2qHmUu-_5qVyIn6gSTvZbGS2u79Y8IFb2N"
|
checksum = "3ei-6DUVtYzrRVe-SubtNgRlweXd6H2qHmUu-_5qVyIn6gSTvZbGS2u79Y8IFb2N"
|
||||||
)
|
)
|
||||||
return t.NewPackage("p11-kit", version, t.NewViaGit(
|
return t.NewPackage("p11-kit", version, t.newTagRemote(
|
||||||
"https://github.com/p11-glue/p11-kit.git",
|
"https://github.com/p11-glue/p11-kit.git",
|
||||||
"refs/tags/"+version, mustDecode(checksum),
|
version, checksum,
|
||||||
), nil, &MesonHelper{
|
), nil, &MesonHelper{
|
||||||
Setup: []KV{
|
Setup: []KV{
|
||||||
{"Dsystemd", "disabled"},
|
{"Dsystemd", "disabled"},
|
||||||
|
|||||||
@@ -9,10 +9,11 @@ func (t Toolchain) newPCRE2() (pkg.Artifact, string) {
|
|||||||
version = "10.47"
|
version = "10.47"
|
||||||
checksum = "IbC24vVayju6nB9EhrBPSDexk22wDecdpyrjgC3nCZXkwTnUjq4CD2q5sopqu6CW"
|
checksum = "IbC24vVayju6nB9EhrBPSDexk22wDecdpyrjgC3nCZXkwTnUjq4CD2q5sopqu6CW"
|
||||||
)
|
)
|
||||||
return t.NewPackage("pcre2", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("pcre2", version, newFromGitHubRelease(
|
||||||
nil, "https://github.com/PCRE2Project/pcre2/releases/download/"+
|
"PCRE2Project/pcre2",
|
||||||
"pcre2-"+version+"/pcre2-"+version+".tar.bz2",
|
"pcre2-"+version,
|
||||||
mustDecode(checksum),
|
"pcre2-"+version+".tar.bz2",
|
||||||
|
checksum,
|
||||||
pkg.TarBzip2,
|
pkg.TarBzip2,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package rosa
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"slices"
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"hakurei.app/internal/pkg"
|
"hakurei.app/internal/pkg"
|
||||||
)
|
)
|
||||||
@@ -11,9 +12,9 @@ func (t Toolchain) newPerl() (pkg.Artifact, string) {
|
|||||||
version = "5.42.2"
|
version = "5.42.2"
|
||||||
checksum = "Me_xFfgkRnVyG0sE6a74TktK2OUq9Z1LVJNEu_9RdZG3S2fbjfzNiuk2SJqHAgbm"
|
checksum = "Me_xFfgkRnVyG0sE6a74TktK2OUq9Z1LVJNEu_9RdZG3S2fbjfzNiuk2SJqHAgbm"
|
||||||
)
|
)
|
||||||
return t.NewPackage("perl", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("perl", version, newTar(
|
||||||
nil, "https://www.cpan.org/src/5.0/perl-"+version+".tar.gz",
|
"https://www.cpan.org/src/5.0/perl-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), &PackageAttr{
|
), &PackageAttr{
|
||||||
// uses source tree as scratch space
|
// uses source tree as scratch space
|
||||||
@@ -22,7 +23,7 @@ func (t Toolchain) newPerl() (pkg.Artifact, string) {
|
|||||||
|
|
||||||
ScriptEarly: `
|
ScriptEarly: `
|
||||||
echo 'print STDOUT "1..0 # Skip broken test\n";' > ext/Pod-Html/t/htmldir3.t
|
echo 'print STDOUT "1..0 # Skip broken test\n";' > ext/Pod-Html/t/htmldir3.t
|
||||||
rm -f /system/bin/ps # perl does not like toybox ps
|
chmod +w /system/bin && rm -f /system/bin/ps # perl does not like toybox ps
|
||||||
`,
|
`,
|
||||||
|
|
||||||
Flag: TEarly,
|
Flag: TEarly,
|
||||||
@@ -42,7 +43,7 @@ rm -f /system/bin/ps # perl does not like toybox ps
|
|||||||
{"Duseshrplib"},
|
{"Duseshrplib"},
|
||||||
},
|
},
|
||||||
Check: []string{
|
Check: []string{
|
||||||
"TEST_JOBS=256",
|
"TEST_JOBS=" + jobsLE,
|
||||||
"test_harness",
|
"test_harness",
|
||||||
},
|
},
|
||||||
Install: `LD_LIBRARY_PATH="$PWD" ./perl -Ilib -I. installperl --destdir=/work`,
|
Install: `LD_LIBRARY_PATH="$PWD" ./perl -Ilib -I. installperl --destdir=/work`,
|
||||||
@@ -91,10 +92,10 @@ func (t Toolchain) newPerlModuleBuild() (pkg.Artifact, string) {
|
|||||||
version = "0.4234"
|
version = "0.4234"
|
||||||
checksum = "ZKxEFG4hE1rqZt52zBL2LRZBMkYzhjb5-cTBXcsyA52EbPeeYyVxU176yAea8-Di"
|
checksum = "ZKxEFG4hE1rqZt52zBL2LRZBMkYzhjb5-cTBXcsyA52EbPeeYyVxU176yAea8-Di"
|
||||||
)
|
)
|
||||||
return t.newViaPerlModuleBuild("Module-Build", version, pkg.NewHTTPGetTar(
|
return t.newViaPerlModuleBuild("Module-Build", version, newTar(
|
||||||
nil, "https://cpan.metacpan.org/authors/id/L/LE/LEONT/"+
|
"https://cpan.metacpan.org/authors/id/L/LE/LEONT/"+
|
||||||
"Module-Build-"+version+".tar.gz",
|
"Module-Build-"+version+".tar.gz",
|
||||||
mustDecode(checksum),
|
checksum,
|
||||||
pkg.TarGzip,
|
pkg.TarGzip,
|
||||||
), nil), version
|
), nil), version
|
||||||
}
|
}
|
||||||
@@ -145,11 +146,11 @@ func (t Toolchain) newPerlLocaleGettext() (pkg.Artifact, string) {
|
|||||||
version = "1.07"
|
version = "1.07"
|
||||||
checksum = "cFq4BKFD1MWSoa7lsrPjpdo9kzPqd0jlRcBFUyL1L1isw8m3D_Sge_ff0MAu_9J3"
|
checksum = "cFq4BKFD1MWSoa7lsrPjpdo9kzPqd0jlRcBFUyL1L1isw8m3D_Sge_ff0MAu_9J3"
|
||||||
)
|
)
|
||||||
return t.newViaPerlMakeMaker("Locale::gettext", version, pkg.NewHTTPGetTar(
|
return t.newViaPerlMakeMaker("Locale::gettext", version, newFromCPAN(
|
||||||
nil, "https://cpan.metacpan.org/authors/id/P/PV/PVANDRY/"+
|
"PVANDRY",
|
||||||
"Locale-gettext-"+version+".tar.gz",
|
"Locale-gettext",
|
||||||
mustDecode(checksum),
|
version,
|
||||||
pkg.TarGzip,
|
checksum,
|
||||||
), nil), version
|
), nil), version
|
||||||
}
|
}
|
||||||
func init() {
|
func init() {
|
||||||
@@ -159,6 +160,8 @@ func init() {
|
|||||||
Name: "perl-Locale::gettext",
|
Name: "perl-Locale::gettext",
|
||||||
Description: "message handling functions",
|
Description: "message handling functions",
|
||||||
Website: "https://metacpan.org/release/Locale-gettext",
|
Website: "https://metacpan.org/release/Locale-gettext",
|
||||||
|
|
||||||
|
ID: 7523,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -167,11 +170,11 @@ func (t Toolchain) newPerlPodParser() (pkg.Artifact, string) {
|
|||||||
version = "1.67"
|
version = "1.67"
|
||||||
checksum = "RdURu9mOfExk_loCp6abxlcQV3FycSNbTqhRS9i6JUqnYfGGEgercK30g0gjYyqe"
|
checksum = "RdURu9mOfExk_loCp6abxlcQV3FycSNbTqhRS9i6JUqnYfGGEgercK30g0gjYyqe"
|
||||||
)
|
)
|
||||||
return t.newViaPerlMakeMaker("Pod::Parser", version, pkg.NewHTTPGetTar(
|
return t.newViaPerlMakeMaker("Pod::Parser", version, newFromCPAN(
|
||||||
nil, "https://cpan.metacpan.org/authors/id/M/MA/MAREKR/"+
|
"MAREKR",
|
||||||
"Pod-Parser-"+version+".tar.gz",
|
"Pod-Parser",
|
||||||
mustDecode(checksum),
|
version,
|
||||||
pkg.TarGzip,
|
checksum,
|
||||||
), nil), version
|
), nil), version
|
||||||
}
|
}
|
||||||
func init() {
|
func init() {
|
||||||
@@ -181,6 +184,8 @@ func init() {
|
|||||||
Name: "perl-Pod::Parser",
|
Name: "perl-Pod::Parser",
|
||||||
Description: "base class for creating POD filters and translators",
|
Description: "base class for creating POD filters and translators",
|
||||||
Website: "https://metacpan.org/release/Pod-Parser",
|
Website: "https://metacpan.org/release/Pod-Parser",
|
||||||
|
|
||||||
|
ID: 3244,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -189,11 +194,11 @@ func (t Toolchain) newPerlSGMLS() (pkg.Artifact, string) {
|
|||||||
version = "1.1"
|
version = "1.1"
|
||||||
checksum = "aZijn4MUqD-wfyZgdcCruCwl4SgDdu25cNmJ4_UvdAk9a7uz4gzMQdoeB6DQ6QOy"
|
checksum = "aZijn4MUqD-wfyZgdcCruCwl4SgDdu25cNmJ4_UvdAk9a7uz4gzMQdoeB6DQ6QOy"
|
||||||
)
|
)
|
||||||
return t.newViaPerlMakeMaker("SGMLS", version, pkg.NewHTTPGetTar(
|
return t.newViaPerlMakeMaker("SGMLS", version, newFromCPAN(
|
||||||
nil, "https://cpan.metacpan.org/authors/id/R/RA/RAAB/"+
|
"RAAB",
|
||||||
"SGMLSpm-"+version+".tar.gz",
|
"SGMLSpm",
|
||||||
mustDecode(checksum),
|
version,
|
||||||
pkg.TarGzip,
|
checksum,
|
||||||
), nil), version
|
), nil), version
|
||||||
}
|
}
|
||||||
func init() {
|
func init() {
|
||||||
@@ -203,6 +208,22 @@ func init() {
|
|||||||
Name: "perl-SGMLS",
|
Name: "perl-SGMLS",
|
||||||
Description: "class for postprocessing the output from the sgmls and nsgmls parsers",
|
Description: "class for postprocessing the output from the sgmls and nsgmls parsers",
|
||||||
Website: "https://metacpan.org/release/RAAB/SGMLSpm-1.1",
|
Website: "https://metacpan.org/release/RAAB/SGMLSpm-1.1",
|
||||||
|
|
||||||
|
ID: 389576,
|
||||||
|
|
||||||
|
latest: func(v *Versions) string {
|
||||||
|
for _, s := range v.Stable {
|
||||||
|
_, m, ok := strings.Cut(s, ".")
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(m) > 1 && m[0] == '0' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return v.Latest
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -211,11 +232,11 @@ func (t Toolchain) newPerlTermReadKey() (pkg.Artifact, string) {
|
|||||||
version = "2.38"
|
version = "2.38"
|
||||||
checksum = "qerL8Xo7kD0f42PZoiEbmE8Roc_S9pOa27LXelY4DN_0UNy_u5wLrGHI8utNlaiI"
|
checksum = "qerL8Xo7kD0f42PZoiEbmE8Roc_S9pOa27LXelY4DN_0UNy_u5wLrGHI8utNlaiI"
|
||||||
)
|
)
|
||||||
return t.newViaPerlMakeMaker("Term::ReadKey", version, pkg.NewHTTPGetTar(
|
return t.newViaPerlMakeMaker("Term::ReadKey", version, newFromCPAN(
|
||||||
nil, "https://cpan.metacpan.org/authors/id/J/JS/JSTOWE/"+
|
"JSTOWE",
|
||||||
"TermReadKey-"+version+".tar.gz",
|
"TermReadKey",
|
||||||
mustDecode(checksum),
|
version,
|
||||||
pkg.TarGzip,
|
checksum,
|
||||||
), nil), version
|
), nil), version
|
||||||
}
|
}
|
||||||
func init() {
|
func init() {
|
||||||
@@ -225,6 +246,8 @@ func init() {
|
|||||||
Name: "perl-Term::ReadKey",
|
Name: "perl-Term::ReadKey",
|
||||||
Description: "a perl module for simple terminal control",
|
Description: "a perl module for simple terminal control",
|
||||||
Website: "https://metacpan.org/release/TermReadKey",
|
Website: "https://metacpan.org/release/TermReadKey",
|
||||||
|
|
||||||
|
ID: 3372,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -233,11 +256,11 @@ func (t Toolchain) newPerlTextCharWidth() (pkg.Artifact, string) {
|
|||||||
version = "0.04"
|
version = "0.04"
|
||||||
checksum = "G2p5RHU4_HiZ23ZusBA_enTlVMxz0J4esUx4CGcOPhY6xYTbp-aXWRN6lYZpzBw2"
|
checksum = "G2p5RHU4_HiZ23ZusBA_enTlVMxz0J4esUx4CGcOPhY6xYTbp-aXWRN6lYZpzBw2"
|
||||||
)
|
)
|
||||||
return t.newViaPerlMakeMaker("Text::CharWidth", version, pkg.NewHTTPGetTar(
|
return t.newViaPerlMakeMaker("Text::CharWidth", version, newFromCPAN(
|
||||||
nil, "https://cpan.metacpan.org/authors/id/K/KU/KUBOTA/"+
|
"KUBOTA",
|
||||||
"Text-CharWidth-"+version+".tar.gz",
|
"Text-CharWidth",
|
||||||
mustDecode(checksum),
|
version,
|
||||||
pkg.TarGzip,
|
checksum,
|
||||||
), nil), version
|
), nil), version
|
||||||
}
|
}
|
||||||
func init() {
|
func init() {
|
||||||
@@ -247,6 +270,8 @@ func init() {
|
|||||||
Name: "perl-Text::CharWidth",
|
Name: "perl-Text::CharWidth",
|
||||||
Description: "get number of occupied columns of a string on terminal",
|
Description: "get number of occupied columns of a string on terminal",
|
||||||
Website: "https://metacpan.org/release/Text-CharWidth",
|
Website: "https://metacpan.org/release/Text-CharWidth",
|
||||||
|
|
||||||
|
ID: 14380,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -255,11 +280,11 @@ func (t Toolchain) newPerlTextWrapI18N() (pkg.Artifact, string) {
|
|||||||
version = "0.06"
|
version = "0.06"
|
||||||
checksum = "Vmo89qLgxUqyQ6QmWJVqu60aQAUjrNKRjFQSXGnvClxofzRjiCa6idzPgJ4VkixM"
|
checksum = "Vmo89qLgxUqyQ6QmWJVqu60aQAUjrNKRjFQSXGnvClxofzRjiCa6idzPgJ4VkixM"
|
||||||
)
|
)
|
||||||
return t.newViaPerlMakeMaker("Text::WrapI18N", version, pkg.NewHTTPGetTar(
|
return t.newViaPerlMakeMaker("Text::WrapI18N", version, newFromCPAN(
|
||||||
nil, "https://cpan.metacpan.org/authors/id/K/KU/KUBOTA/"+
|
"KUBOTA",
|
||||||
"Text-WrapI18N-"+version+".tar.gz",
|
"Text-WrapI18N",
|
||||||
mustDecode(checksum),
|
version,
|
||||||
pkg.TarGzip,
|
checksum,
|
||||||
), nil,
|
), nil,
|
||||||
PerlTextCharWidth,
|
PerlTextCharWidth,
|
||||||
), version
|
), version
|
||||||
@@ -275,6 +300,8 @@ func init() {
|
|||||||
Dependencies: P{
|
Dependencies: P{
|
||||||
PerlTextCharWidth,
|
PerlTextCharWidth,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
ID: 14385,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -283,11 +310,11 @@ func (t Toolchain) newPerlMIMECharset() (pkg.Artifact, string) {
|
|||||||
version = "1.013.1"
|
version = "1.013.1"
|
||||||
checksum = "Ou_ukcrOa1cgtE3mptinb-os3bdL1SXzbRDFZQF3prrJj-drc3rp_huay7iDLJol"
|
checksum = "Ou_ukcrOa1cgtE3mptinb-os3bdL1SXzbRDFZQF3prrJj-drc3rp_huay7iDLJol"
|
||||||
)
|
)
|
||||||
return t.newViaPerlMakeMaker("MIME::Charset", version, pkg.NewHTTPGetTar(
|
return t.newViaPerlMakeMaker("MIME::Charset", version, newFromCPAN(
|
||||||
nil, "https://cpan.metacpan.org/authors/id/N/NE/NEZUMI/"+
|
"NEZUMI",
|
||||||
"MIME-Charset-"+version+".tar.gz",
|
"MIME-Charset",
|
||||||
mustDecode(checksum),
|
version,
|
||||||
pkg.TarGzip,
|
checksum,
|
||||||
), nil), version
|
), nil), version
|
||||||
}
|
}
|
||||||
func init() {
|
func init() {
|
||||||
@@ -297,34 +324,38 @@ func init() {
|
|||||||
Name: "perl-MIME::Charset",
|
Name: "perl-MIME::Charset",
|
||||||
Description: "Charset Information for MIME",
|
Description: "Charset Information for MIME",
|
||||||
Website: "https://metacpan.org/release/MIME-Charset",
|
Website: "https://metacpan.org/release/MIME-Charset",
|
||||||
|
|
||||||
|
ID: 3070,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t Toolchain) newPerlUnicodeGCString() (pkg.Artifact, string) {
|
func (t Toolchain) newPerlUnicodeLineBreak() (pkg.Artifact, string) {
|
||||||
const (
|
const (
|
||||||
version = "2019.001"
|
version = "2019.001"
|
||||||
checksum = "ZHVkh7EDgAUHnTpvXsnPAuWpgNoBImtY_9_8TIbo2co_WgUwEb0MtXPhI8pAZ5OH"
|
checksum = "ZHVkh7EDgAUHnTpvXsnPAuWpgNoBImtY_9_8TIbo2co_WgUwEb0MtXPhI8pAZ5OH"
|
||||||
)
|
)
|
||||||
return t.newViaPerlMakeMaker("Unicode::GCString", version, pkg.NewHTTPGetTar(
|
return t.newViaPerlMakeMaker("Unicode::LineBreak", version, newFromCPAN(
|
||||||
nil, "https://cpan.metacpan.org/authors/id/N/NE/NEZUMI/"+
|
"NEZUMI",
|
||||||
"Unicode-LineBreak-"+version+".tar.gz",
|
"Unicode-LineBreak",
|
||||||
mustDecode(checksum),
|
version,
|
||||||
pkg.TarGzip,
|
checksum,
|
||||||
), nil,
|
), nil,
|
||||||
PerlMIMECharset,
|
PerlMIMECharset,
|
||||||
), version
|
), version
|
||||||
}
|
}
|
||||||
func init() {
|
func init() {
|
||||||
artifactsM[PerlUnicodeGCString] = Metadata{
|
artifactsM[PerlUnicodeLineBreak] = Metadata{
|
||||||
f: Toolchain.newPerlUnicodeGCString,
|
f: Toolchain.newPerlUnicodeLineBreak,
|
||||||
|
|
||||||
Name: "perl-Unicode::GCString",
|
Name: "perl-Unicode::LineBreak",
|
||||||
Description: "String as Sequence of UAX #29 Grapheme Clusters",
|
Description: "String as Sequence of UAX #29 Grapheme Clusters",
|
||||||
Website: "https://metacpan.org/release/Unicode-LineBreak",
|
Website: "https://metacpan.org/release/Unicode-LineBreak",
|
||||||
|
|
||||||
Dependencies: P{
|
Dependencies: P{
|
||||||
PerlMIMECharset,
|
PerlMIMECharset,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
ID: 6033,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -333,11 +364,11 @@ func (t Toolchain) newPerlYAMLTiny() (pkg.Artifact, string) {
|
|||||||
version = "1.76"
|
version = "1.76"
|
||||||
checksum = "V1MV4KPym1LxSw8CRXqPR3K-l1hGHbT5Ob4t-9xju6R9X_CWyw6hI8wsMaNdHdBY"
|
checksum = "V1MV4KPym1LxSw8CRXqPR3K-l1hGHbT5Ob4t-9xju6R9X_CWyw6hI8wsMaNdHdBY"
|
||||||
)
|
)
|
||||||
return t.newViaPerlMakeMaker("YAML::Tiny", version, pkg.NewHTTPGetTar(
|
return t.newViaPerlMakeMaker("YAML::Tiny", version, newFromCPAN(
|
||||||
nil, "https://cpan.metacpan.org/authors/id/E/ET/ETHER/"+
|
"ETHER",
|
||||||
"YAML-Tiny-"+version+".tar.gz",
|
"YAML-Tiny",
|
||||||
mustDecode(checksum),
|
version,
|
||||||
pkg.TarGzip,
|
checksum,
|
||||||
), nil), version
|
), nil), version
|
||||||
}
|
}
|
||||||
func init() {
|
func init() {
|
||||||
@@ -347,5 +378,7 @@ func init() {
|
|||||||
Name: "perl-YAML::Tiny",
|
Name: "perl-YAML::Tiny",
|
||||||
Description: "read/write YAML files with as little code as possible",
|
Description: "read/write YAML files with as little code as possible",
|
||||||
Website: "https://metacpan.org/release/YAML-Tiny",
|
Website: "https://metacpan.org/release/YAML-Tiny",
|
||||||
|
|
||||||
|
ID: 3549,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,11 +7,11 @@ func (t Toolchain) newPkgConfig() (pkg.Artifact, string) {
|
|||||||
version = "0.29.2"
|
version = "0.29.2"
|
||||||
checksum = "6UsGqEMA8EER_5b9N0b32UCqiRy39B6_RnPfvuslWhtFV1qYD4DfS10crGZN_TP2"
|
checksum = "6UsGqEMA8EER_5b9N0b32UCqiRy39B6_RnPfvuslWhtFV1qYD4DfS10crGZN_TP2"
|
||||||
)
|
)
|
||||||
return t.NewPackage("pkg-config", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("pkg-config", version, newFromGitLab(
|
||||||
nil, "https://gitlab.freedesktop.org/pkg-config/pkg-config/-/archive"+
|
"gitlab.freedesktop.org",
|
||||||
"/pkg-config-"+version+"/pkg-config-pkg-config-"+version+".tar.bz2",
|
"pkg-config/pkg-config",
|
||||||
mustDecode(checksum),
|
"pkg-config-"+version,
|
||||||
pkg.TarBzip2,
|
checksum,
|
||||||
), nil, &MakeHelper{
|
), nil, &MakeHelper{
|
||||||
Generate: "./autogen.sh --no-configure",
|
Generate: "./autogen.sh --no-configure",
|
||||||
Configure: []KV{
|
Configure: []KV{
|
||||||
|
|||||||
@@ -7,11 +7,11 @@ func (t Toolchain) newProcps() (pkg.Artifact, string) {
|
|||||||
version = "4.0.6"
|
version = "4.0.6"
|
||||||
checksum = "pl_fZLvDlv6iZTkm8l_tHFpzTDVFGCiSJEs3eu0zAX6u36AV36P_En8K7JPScRWM"
|
checksum = "pl_fZLvDlv6iZTkm8l_tHFpzTDVFGCiSJEs3eu0zAX6u36AV36P_En8K7JPScRWM"
|
||||||
)
|
)
|
||||||
return t.NewPackage("procps", version, pkg.NewHTTPGetTar(
|
return t.NewPackage("procps", version, newFromGitLab(
|
||||||
nil, "https://gitlab.com/procps-ng/procps/-/archive/"+
|
"gitlab.com",
|
||||||
"v"+version+"/procps-v"+version+".tar.bz2",
|
"procps-ng/procps",
|
||||||
mustDecode(checksum),
|
"v"+version,
|
||||||
pkg.TarBzip2,
|
checksum,
|
||||||
), nil, &MakeHelper{
|
), nil, &MakeHelper{
|
||||||
Generate: "./autogen.sh",
|
Generate: "./autogen.sh",
|
||||||
Configure: []KV{
|
Configure: []KV{
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user