internal/pkg: consistency check for on-disk cache
All checks were successful
Test / Create distribution (push) Successful in 43s
Test / Sandbox (push) Successful in 2m32s
Test / ShareFS (push) Successful in 3m43s
Test / Hpkg (push) Successful in 4m29s
Test / Sandbox (race detector) (push) Successful in 4m57s
Test / Hakurei (race detector) (push) Successful in 5m45s
Test / Hakurei (push) Successful in 2m31s
Test / Flake checks (push) Successful in 1m44s
All checks were successful
Test / Create distribution (push) Successful in 43s
Test / Sandbox (push) Successful in 2m32s
Test / ShareFS (push) Successful in 3m43s
Test / Hpkg (push) Successful in 4m29s
Test / Sandbox (race detector) (push) Successful in 4m57s
Test / Hakurei (race detector) (push) Successful in 5m45s
Test / Hakurei (push) Successful in 2m31s
Test / Flake checks (push) Successful in 1m44s
This change adds a method to check on-disk cache consistency and destroy inconsistent entries as they are encountered. This primarily helps verify artifact implementation correctness, but can also repair a cache that got into an inconsistent state from curing a misbehaving artifact, without having to destroy the entire cache. Signed-off-by: Ophestra <cat@gensokyo.uk>
This commit is contained in:
@@ -13,6 +13,7 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
@@ -240,6 +241,221 @@ func (e *ChecksumMismatchError) Error() string {
|
||||
" instead of " + Encode(e.Want)
|
||||
}
|
||||
|
||||
// ScrubError describes the outcome of a [Cache.Scrub] call where errors were
|
||||
// found and removed from the underlying storage of [Cache].
|
||||
type ScrubError struct {
|
||||
// Content-addressed entries not matching their checksum. This can happen
|
||||
// if an incorrect [File] implementation was cured against a non-strict
|
||||
// [Cache].
|
||||
ChecksumMismatches []ChecksumMismatchError
|
||||
// Dangling identifier symlinks. This can happen if the content-addressed
|
||||
// entry was removed while scrubbing due to a checksum mismatch.
|
||||
DanglingIdentifiers []ID
|
||||
// Miscellaneous errors, including [os.ReadDir] on checksum and identifier
|
||||
// directories, [Decode] on entry names and [os.RemoveAll] on inconsistent
|
||||
// entries.
|
||||
Errs []error
|
||||
}
|
||||
|
||||
// Unwrap returns a concatenation of ChecksumMismatches and Errs.
|
||||
func (e *ScrubError) Unwrap() []error {
|
||||
s := make([]error, 0, len(e.ChecksumMismatches)+len(e.Errs))
|
||||
for _, err := range e.ChecksumMismatches {
|
||||
s = append(s, &err)
|
||||
}
|
||||
for _, err := range e.Errs {
|
||||
s = append(s, err)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Error returns a multi-line representation of [ScrubError].
|
||||
func (e *ScrubError) Error() string {
|
||||
var segments []string
|
||||
if len(e.ChecksumMismatches) > 0 {
|
||||
s := "checksum mismatches:\n"
|
||||
for _, m := range e.ChecksumMismatches {
|
||||
s += m.Error() + "\n"
|
||||
}
|
||||
segments = append(segments, s)
|
||||
}
|
||||
if len(e.DanglingIdentifiers) > 0 {
|
||||
s := "dangling identifiers:\n"
|
||||
for _, id := range e.DanglingIdentifiers {
|
||||
s += Encode(id) + "\n"
|
||||
}
|
||||
segments = append(segments, s)
|
||||
}
|
||||
if len(e.Errs) > 0 {
|
||||
s := "errors during scrub:\n"
|
||||
for _, err := range e.Errs {
|
||||
s += err.Error() + "\n"
|
||||
}
|
||||
segments = append(segments, s)
|
||||
}
|
||||
return strings.Join(segments, "\n")
|
||||
}
|
||||
|
||||
// Scrub frees internal in-memory identifier to content pair cache, verifies all
|
||||
// cached artifacts against their checksums, checks for dangling identifier
|
||||
// symlinks and removes them if found.
|
||||
//
|
||||
// This method is not safe for concurrent use with any other method.
|
||||
func (c *Cache) Scrub() error {
|
||||
c.identMu.Lock()
|
||||
defer c.identMu.Unlock()
|
||||
c.checksumMu.Lock()
|
||||
defer c.checksumMu.Unlock()
|
||||
|
||||
c.ident = make(map[ID]Checksum)
|
||||
c.identErr = make(map[ID]error)
|
||||
|
||||
var se ScrubError
|
||||
|
||||
var (
|
||||
ent os.DirEntry
|
||||
dir *check.Absolute
|
||||
)
|
||||
condemnEntry := func() {
|
||||
chmodErr, removeErr := removeAll(dir.Append(ent.Name()))
|
||||
if chmodErr != nil {
|
||||
se.Errs = append(se.Errs, chmodErr)
|
||||
}
|
||||
if removeErr != nil {
|
||||
se.Errs = append(se.Errs, removeErr)
|
||||
}
|
||||
}
|
||||
|
||||
dir = c.base.Append(dirChecksum)
|
||||
if entries, err := os.ReadDir(dir.String()); err != nil {
|
||||
se.Errs = append(se.Errs, err)
|
||||
} else {
|
||||
var got, want Checksum
|
||||
for _, ent = range entries {
|
||||
if want, err = Decode(ent.Name()); err != nil {
|
||||
se.Errs = append(se.Errs, err)
|
||||
condemnEntry()
|
||||
continue
|
||||
}
|
||||
if ent.IsDir() {
|
||||
if got, err = HashDir(dir.Append(ent.Name())); err != nil {
|
||||
se.Errs = append(se.Errs, err)
|
||||
continue
|
||||
}
|
||||
} else if ent.Type().IsRegular() {
|
||||
h := sha512.New384()
|
||||
var r *os.File
|
||||
r, err = os.Open(dir.Append(ent.Name()).String())
|
||||
if err != nil {
|
||||
se.Errs = append(se.Errs, err)
|
||||
continue
|
||||
}
|
||||
_, err = io.Copy(h, r)
|
||||
closeErr := r.Close()
|
||||
if closeErr != nil {
|
||||
se.Errs = append(se.Errs, closeErr)
|
||||
}
|
||||
if err != nil {
|
||||
se.Errs = append(se.Errs, err)
|
||||
continue
|
||||
}
|
||||
h.Sum(got[:0])
|
||||
} else {
|
||||
se.Errs = append(se.Errs, InvalidFileModeError(ent.Type()))
|
||||
condemnEntry()
|
||||
continue
|
||||
}
|
||||
|
||||
if got != want {
|
||||
se.ChecksumMismatches = append(se.ChecksumMismatches, ChecksumMismatchError{
|
||||
Got: got,
|
||||
Want: want,
|
||||
})
|
||||
condemnEntry()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dir = c.base.Append(dirIdentifier)
|
||||
if entries, err := os.ReadDir(dir.String()); err != nil {
|
||||
se.Errs = append(se.Errs, err)
|
||||
} else {
|
||||
var (
|
||||
id ID
|
||||
linkname string
|
||||
)
|
||||
for _, ent = range entries {
|
||||
if id, err = Decode(ent.Name()); err != nil {
|
||||
se.Errs = append(se.Errs, err)
|
||||
condemnEntry()
|
||||
continue
|
||||
}
|
||||
|
||||
if linkname, err = os.Readlink(
|
||||
dir.Append(ent.Name()).String(),
|
||||
); err != nil {
|
||||
se.Errs = append(se.Errs, err)
|
||||
se.DanglingIdentifiers = append(se.DanglingIdentifiers, id)
|
||||
condemnEntry()
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err = Decode(path.Base(linkname)); err != nil {
|
||||
se.Errs = append(se.Errs, err)
|
||||
se.DanglingIdentifiers = append(se.DanglingIdentifiers, id)
|
||||
condemnEntry()
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err = os.Stat(dir.Append(ent.Name()).String()); err != nil {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
se.Errs = append(se.Errs, err)
|
||||
}
|
||||
se.DanglingIdentifiers = append(se.DanglingIdentifiers, id)
|
||||
condemnEntry()
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.identPending) > 0 {
|
||||
se.Errs = append(se.Errs, errors.New(
|
||||
"scrub began with pending artifacts",
|
||||
))
|
||||
} else {
|
||||
chmodErr, removeErr := removeAll(c.base.Append(dirWork))
|
||||
if chmodErr != nil {
|
||||
se.Errs = append(se.Errs, chmodErr)
|
||||
}
|
||||
if removeErr != nil {
|
||||
se.Errs = append(se.Errs, removeErr)
|
||||
}
|
||||
|
||||
if err := os.Mkdir(
|
||||
c.base.Append(dirWork).String(),
|
||||
0700,
|
||||
); err != nil {
|
||||
se.Errs = append(se.Errs, err)
|
||||
}
|
||||
|
||||
chmodErr, removeErr = removeAll(c.base.Append(dirTemp))
|
||||
if chmodErr != nil {
|
||||
se.Errs = append(se.Errs, chmodErr)
|
||||
}
|
||||
if removeErr != nil {
|
||||
se.Errs = append(se.Errs, removeErr)
|
||||
}
|
||||
}
|
||||
|
||||
if len(se.ChecksumMismatches) > 0 ||
|
||||
len(se.DanglingIdentifiers) > 0 ||
|
||||
len(se.Errs) > 0 {
|
||||
return &se
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// loadOrStoreIdent attempts to load a cached [Artifact] by its identifier or
|
||||
// wait for a pending [Artifact] to cure. If neither is possible, the current
|
||||
// identifier is stored in identPending and a non-nil channel is returned.
|
||||
|
||||
Reference in New Issue
Block a user