internal/pkg: parallelise scrub
All checks were successful
Test / Create distribution (push) Successful in 47s
Test / ShareFS (push) Successful in 5m8s
Test / Sandbox (race detector) (push) Successful in 5m28s
Test / Hpkg (push) Successful in 5m39s
Test / Hakurei (push) Successful in 6m3s
Test / Hakurei (race detector) (push) Successful in 8m6s
Test / Sandbox (push) Successful in 1m40s
Test / Flake checks (push) Successful in 1m43s

This significantly improves scrubbing performance. Since the cache directory structure is friendly to simultaneous access, this is possible without synchronisation.

Signed-off-by: Ophestra <cat@gensokyo.uk>
This commit is contained in:
2026-01-16 02:09:12 +09:00
parent 3499a82785
commit 5936e6a4aa
4 changed files with 296 additions and 156 deletions

View File

@@ -196,15 +196,16 @@ func Flatten(fsys fs.FS, root string, w io.Writer) (n int, err error) {
}
// HashFS returns a checksum produced by hashing the result of [Flatten].
func HashFS(fsys fs.FS, root string) (Checksum, error) {
func HashFS(buf *Checksum, fsys fs.FS, root string) error {
h := sha512.New384()
if _, err := Flatten(fsys, root, h); err != nil {
return Checksum{}, err
return err
}
return (Checksum)(h.Sum(nil)), nil
h.Sum(buf[:0])
return nil
}
// HashDir returns a checksum produced by hashing the result of [Flatten].
func HashDir(pathname *check.Absolute) (Checksum, error) {
return HashFS(os.DirFS(pathname.String()), ".")
func HashDir(buf *Checksum, pathname *check.Absolute) error {
return HashFS(buf, os.DirFS(pathname.String()), ".")
}