internal/pkg: consistency check for on-disk cache
All checks were successful
Test / Create distribution (push) Successful in 43s
Test / Sandbox (push) Successful in 2m32s
Test / ShareFS (push) Successful in 3m43s
Test / Hpkg (push) Successful in 4m29s
Test / Sandbox (race detector) (push) Successful in 4m57s
Test / Hakurei (race detector) (push) Successful in 5m45s
Test / Hakurei (push) Successful in 2m31s
Test / Flake checks (push) Successful in 1m44s

This change adds a method to check on-disk cache consistency and destroy inconsistent entries as they are encountered. This primarily helps verify artifact implementation correctness, but can also repair a cache that got into an inconsistent state from curing a misbehaving artifact, without having to destroy the entire cache.

Signed-off-by: Ophestra <cat@gensokyo.uk>
This commit is contained in:
2026-01-05 04:52:59 +09:00
parent 834cb0d40b
commit 0ab6c13c77
2 changed files with 355 additions and 0 deletions

View File

@@ -13,6 +13,7 @@ import (
"path"
"path/filepath"
"slices"
"strings"
"sync"
"hakurei.app/container/check"
@@ -240,6 +241,221 @@ func (e *ChecksumMismatchError) Error() string {
" instead of " + Encode(e.Want)
}
// ScrubError describes the outcome of a [Cache.Scrub] call where errors were
// found and removed from the underlying storage of [Cache].
type ScrubError struct {
// Content-addressed entries not matching their checksum. This can happen
// if an incorrect [File] implementation was cured against a non-strict
// [Cache].
ChecksumMismatches []ChecksumMismatchError
// Dangling identifier symlinks. This can happen if the content-addressed
// entry was removed while scrubbing due to a checksum mismatch.
DanglingIdentifiers []ID
// Miscellaneous errors, including [os.ReadDir] on checksum and identifier
// directories, [Decode] on entry names and [os.RemoveAll] on inconsistent
// entries.
Errs []error
}
// Unwrap returns a concatenation of ChecksumMismatches and Errs.
func (e *ScrubError) Unwrap() []error {
s := make([]error, 0, len(e.ChecksumMismatches)+len(e.Errs))
for _, err := range e.ChecksumMismatches {
s = append(s, &err)
}
for _, err := range e.Errs {
s = append(s, err)
}
return s
}
// Error returns a multi-line representation of [ScrubError].
func (e *ScrubError) Error() string {
var segments []string
if len(e.ChecksumMismatches) > 0 {
s := "checksum mismatches:\n"
for _, m := range e.ChecksumMismatches {
s += m.Error() + "\n"
}
segments = append(segments, s)
}
if len(e.DanglingIdentifiers) > 0 {
s := "dangling identifiers:\n"
for _, id := range e.DanglingIdentifiers {
s += Encode(id) + "\n"
}
segments = append(segments, s)
}
if len(e.Errs) > 0 {
s := "errors during scrub:\n"
for _, err := range e.Errs {
s += err.Error() + "\n"
}
segments = append(segments, s)
}
return strings.Join(segments, "\n")
}
// Scrub frees internal in-memory identifier to content pair cache, verifies all
// cached artifacts against their checksums, checks for dangling identifier
// symlinks and removes them if found.
//
// This method is not safe for concurrent use with any other method.
func (c *Cache) Scrub() error {
c.identMu.Lock()
defer c.identMu.Unlock()
c.checksumMu.Lock()
defer c.checksumMu.Unlock()
c.ident = make(map[ID]Checksum)
c.identErr = make(map[ID]error)
var se ScrubError
var (
ent os.DirEntry
dir *check.Absolute
)
condemnEntry := func() {
chmodErr, removeErr := removeAll(dir.Append(ent.Name()))
if chmodErr != nil {
se.Errs = append(se.Errs, chmodErr)
}
if removeErr != nil {
se.Errs = append(se.Errs, removeErr)
}
}
dir = c.base.Append(dirChecksum)
if entries, err := os.ReadDir(dir.String()); err != nil {
se.Errs = append(se.Errs, err)
} else {
var got, want Checksum
for _, ent = range entries {
if want, err = Decode(ent.Name()); err != nil {
se.Errs = append(se.Errs, err)
condemnEntry()
continue
}
if ent.IsDir() {
if got, err = HashDir(dir.Append(ent.Name())); err != nil {
se.Errs = append(se.Errs, err)
continue
}
} else if ent.Type().IsRegular() {
h := sha512.New384()
var r *os.File
r, err = os.Open(dir.Append(ent.Name()).String())
if err != nil {
se.Errs = append(se.Errs, err)
continue
}
_, err = io.Copy(h, r)
closeErr := r.Close()
if closeErr != nil {
se.Errs = append(se.Errs, closeErr)
}
if err != nil {
se.Errs = append(se.Errs, err)
continue
}
h.Sum(got[:0])
} else {
se.Errs = append(se.Errs, InvalidFileModeError(ent.Type()))
condemnEntry()
continue
}
if got != want {
se.ChecksumMismatches = append(se.ChecksumMismatches, ChecksumMismatchError{
Got: got,
Want: want,
})
condemnEntry()
}
}
}
dir = c.base.Append(dirIdentifier)
if entries, err := os.ReadDir(dir.String()); err != nil {
se.Errs = append(se.Errs, err)
} else {
var (
id ID
linkname string
)
for _, ent = range entries {
if id, err = Decode(ent.Name()); err != nil {
se.Errs = append(se.Errs, err)
condemnEntry()
continue
}
if linkname, err = os.Readlink(
dir.Append(ent.Name()).String(),
); err != nil {
se.Errs = append(se.Errs, err)
se.DanglingIdentifiers = append(se.DanglingIdentifiers, id)
condemnEntry()
continue
}
if _, err = Decode(path.Base(linkname)); err != nil {
se.Errs = append(se.Errs, err)
se.DanglingIdentifiers = append(se.DanglingIdentifiers, id)
condemnEntry()
continue
}
if _, err = os.Stat(dir.Append(ent.Name()).String()); err != nil {
if !errors.Is(err, os.ErrNotExist) {
se.Errs = append(se.Errs, err)
}
se.DanglingIdentifiers = append(se.DanglingIdentifiers, id)
condemnEntry()
continue
}
}
}
if len(c.identPending) > 0 {
se.Errs = append(se.Errs, errors.New(
"scrub began with pending artifacts",
))
} else {
chmodErr, removeErr := removeAll(c.base.Append(dirWork))
if chmodErr != nil {
se.Errs = append(se.Errs, chmodErr)
}
if removeErr != nil {
se.Errs = append(se.Errs, removeErr)
}
if err := os.Mkdir(
c.base.Append(dirWork).String(),
0700,
); err != nil {
se.Errs = append(se.Errs, err)
}
chmodErr, removeErr = removeAll(c.base.Append(dirTemp))
if chmodErr != nil {
se.Errs = append(se.Errs, chmodErr)
}
if removeErr != nil {
se.Errs = append(se.Errs, removeErr)
}
}
if len(se.ChecksumMismatches) > 0 ||
len(se.DanglingIdentifiers) > 0 ||
len(se.Errs) > 0 {
return &se
} else {
return nil
}
}
// loadOrStoreIdent attempts to load a cached [Artifact] by its identifier or
// wait for a pending [Artifact] to cure. If neither is possible, the current
// identifier is stored in identPending and a non-nil channel is returned.

View File

@@ -4,6 +4,7 @@ import (
"archive/tar"
"bytes"
"crypto/sha512"
"encoding/base64"
"encoding/binary"
"errors"
"fmt"
@@ -190,6 +191,7 @@ func checkWithCache(t *testing.T, testCases []cacheTestCase) {
}
})
var scrubFunc func() error // scrub after hashing
if c, err := pkg.New(base); err != nil {
t.Fatalf("New: error = %v", err)
} else {
@@ -197,6 +199,7 @@ func checkWithCache(t *testing.T, testCases []cacheTestCase) {
tc.early(t, base)
}
tc.f(t, base, c)
scrubFunc = c.Scrub
}
if checksum, err := pkg.HashDir(base); err != nil {
@@ -207,6 +210,10 @@ func checkWithCache(t *testing.T, testCases []cacheTestCase) {
Want: tc.want,
})
}
if err := scrubFunc(); err != nil {
t.Fatal("cache contains inconsistencies\n\n" + err.Error())
}
})
}
}
@@ -560,12 +567,14 @@ func TestCache(t *testing.T) {
}()
<-ready
wCureDone := make(chan struct{})
go func() {
if _, _, err := c.Cure(overrideIdent{pkg.ID{0xff}, stubArtifact{
kind: pkg.KindTar,
}}); !reflect.DeepEqual(err, wantErr) {
panic(fmt.Sprintf("Cure: error = %v, want %v", err, wantErr))
}
close(wCureDone)
}()
// check cache activity while a cure is blocking
@@ -585,6 +594,13 @@ func TestCache(t *testing.T) {
}}, nil, pkg.Checksum{}, errors.New("non-file artifact produced regular file")},
})
wantErrScrub := &pkg.ScrubError{
Errs: []error{errors.New("scrub began with pending artifacts")},
}
if err := c.Scrub(); !reflect.DeepEqual(err, wantErrScrub) {
t.Fatalf("Scrub: error = %#v, want %#v", err, wantErrScrub)
}
identPendingVal := reflect.ValueOf(c).Elem().FieldByName("identPending")
identPending := reflect.NewAt(
identPendingVal.Type(),
@@ -593,6 +609,74 @@ func TestCache(t *testing.T) {
notify := identPending[pkg.ID{0xff}]
go close(n)
<-notify
<-wCureDone
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
{"scrub", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
cureMany(t, c, []cureStep{
{"bad measured file", newStubFile(
pkg.KindHTTPGet,
pkg.Checksum{0xfe, 0},
&pkg.Checksum{0xff, 0},
[]byte{0}, nil,
), base.Append(
"identifier",
pkg.Encode(pkg.Checksum{0xfe, 0}),
), pkg.Checksum{0xff, 0}, nil},
})
for _, p := range [][]string{
{"identifier", "invalid"},
{"identifier", pkg.Encode(pkg.ID{0xfe, 0xff})},
{"checksum", "invalid"},
} {
if err := os.WriteFile(
base.Append(p...).String(),
nil,
0400,
); err != nil {
t.Fatal(err)
}
}
for _, p := range [][]string{
{"../nonexistent", "checksum", pkg.Encode(pkg.Checksum{0xff, 0xff})},
{"../nonexistent", "identifier", pkg.Encode(pkg.Checksum{0xfe, 0xfe})},
} {
if err := os.Symlink(
p[0],
base.Append(p[1:]...).String(),
); err != nil {
t.Fatal(err)
}
}
wantErr := &pkg.ScrubError{
ChecksumMismatches: []pkg.ChecksumMismatchError{
{Got: pkg.MustDecode(
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
), Want: pkg.Checksum{0xff, 0}},
},
DanglingIdentifiers: []pkg.ID{
{0xfe, 0},
{0xfe, 0xfe},
{0xfe, 0xff},
},
Errs: []error{
pkg.InvalidFileModeError(fs.ModeSymlink),
base64.CorruptInputError(4),
base64.CorruptInputError(8),
&os.PathError{
Op: "readlink",
Path: base.Append("identifier", pkg.Encode(pkg.ID{0xfe, 0xff})).String(),
Err: syscall.EINVAL,
},
base64.CorruptInputError(4),
},
}
if err := c.Scrub(); !reflect.DeepEqual(err, wantErr) {
t.Fatalf("Scrub: error =\n%s\nwant\n%s", err, wantErr)
}
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
}
checkWithCache(t, testCases)
@@ -640,6 +724,61 @@ func TestErrors(t *testing.T) {
}
}
func TestScrubError(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
err pkg.ScrubError
want string
unwrap []error
}{
{"full", pkg.ScrubError{
ChecksumMismatches: []pkg.ChecksumMismatchError{
{Want: pkg.MustDecode("CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
},
DanglingIdentifiers: []pkg.ID{
(pkg.ID)(bytes.Repeat([]byte{0x75, 0xe6, 0x9d, 0x6d, 0xe7, 0x9f}, 8)),
(pkg.ID)(bytes.Repeat([]byte{0x71, 0xa7, 0xde, 0x6d, 0xa6, 0xde}, 8)),
},
Errs: []error{
stub.UniqueError(0xcafe),
stub.UniqueError(0xbad),
stub.UniqueError(0xff),
},
}, `checksum mismatches:
got AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA instead of CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN
dangling identifiers:
deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef
cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe
errors during scrub:
unique error 51966 injected by the test suite
unique error 2989 injected by the test suite
unique error 255 injected by the test suite
`, []error{
&pkg.ChecksumMismatchError{Want: pkg.MustDecode("CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
stub.UniqueError(0xcafe),
stub.UniqueError(0xbad),
stub.UniqueError(0xff),
}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
if got := tc.err.Error(); got != tc.want {
t.Errorf("Error:\n\n%s\n\nwant\n\n%s", got, tc.want)
}
if unwrap := tc.err.Unwrap(); !reflect.DeepEqual(unwrap, tc.unwrap) {
t.Errorf("Unwrap: %#v, want %#v", unwrap, tc.unwrap)
}
})
}
}
func TestNew(t *testing.T) {
t.Parallel()