internal/pkg: improve artifact interface
All checks were successful
Test / Hakurei (race detector) (push) Successful in 5m52s
Test / Create distribution (push) Successful in 43s
Test / Sandbox (push) Successful in 2m35s
Test / Hakurei (push) Successful in 3m36s
Test / ShareFS (push) Successful in 3m41s
Test / Hpkg (push) Successful in 4m19s
Test / Sandbox (race detector) (push) Successful in 4m52s
Test / Flake checks (push) Successful in 1m53s
All checks were successful
Test / Hakurei (race detector) (push) Successful in 5m52s
Test / Create distribution (push) Successful in 43s
Test / Sandbox (push) Successful in 2m35s
Test / Hakurei (push) Successful in 3m36s
Test / ShareFS (push) Successful in 3m41s
Test / Hpkg (push) Successful in 4m19s
Test / Sandbox (race detector) (push) Successful in 4m52s
Test / Flake checks (push) Successful in 1m53s
This moves all cache I/O code to Cache. Artifact now only contains methods for constructing their actual contents. Signed-off-by: Ophestra <cat@gensokyo.uk>
This commit is contained in:
@@ -3,11 +3,10 @@ package pkg
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha512"
|
"crypto/sha512"
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
)
|
)
|
||||||
@@ -17,38 +16,34 @@ type httpArtifact struct {
|
|||||||
// Caller-supplied request.
|
// Caller-supplied request.
|
||||||
req *http.Request
|
req *http.Request
|
||||||
|
|
||||||
// Caller-supplied checksum of the response body, also used as the
|
// Caller-supplied checksum of the response body. This is validated during
|
||||||
// identifier. This is validated during curing.
|
// curing and the first call to Data.
|
||||||
id ID
|
checksum Checksum
|
||||||
|
|
||||||
// doFunc is the Do method of [http.Client] supplied by the caller.
|
// doFunc is the Do method of [http.Client] supplied by the caller.
|
||||||
doFunc func(req *http.Request) (*http.Response, error)
|
doFunc func(req *http.Request) (*http.Response, error)
|
||||||
|
|
||||||
// Instance of [Cache] to submit the cured artifact to.
|
|
||||||
c *Cache
|
|
||||||
// Response body read to EOF.
|
// Response body read to EOF.
|
||||||
data []byte
|
data []byte
|
||||||
// Populated when submitting to or loading from [Cache].
|
|
||||||
pathname *check.Absolute
|
|
||||||
|
|
||||||
// Synchronises access to pathname and data.
|
// Synchronises access to data.
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewHTTP returns a new [File] backed by the supplied client and request. If
|
// NewHTTP returns a new [File] backed by the supplied client and request. If
|
||||||
// c is nil, [http.DefaultClient] is used instead.
|
// c is nil, [http.DefaultClient] is used instead.
|
||||||
func (c *Cache) NewHTTP(hc *http.Client, req *http.Request, checksum Checksum) File {
|
func NewHTTP(c *http.Client, req *http.Request, checksum Checksum) File {
|
||||||
if hc == nil {
|
if c == nil {
|
||||||
hc = http.DefaultClient
|
c = http.DefaultClient
|
||||||
}
|
}
|
||||||
return &httpArtifact{req: req, id: checksum, doFunc: hc.Do, c: c}
|
return &httpArtifact{req: req, checksum: checksum, doFunc: c.Do}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewHTTPGet returns a new [File] backed by the supplied client. A GET request
|
// NewHTTPGet returns a new [File] backed by the supplied client. A GET request
|
||||||
// is set up for url. If c is nil, [http.DefaultClient] is used instead.
|
// is set up for url. If c is nil, [http.DefaultClient] is used instead.
|
||||||
func (c *Cache) NewHTTPGet(
|
func NewHTTPGet(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
hc *http.Client,
|
c *http.Client,
|
||||||
url string,
|
url string,
|
||||||
checksum Checksum,
|
checksum Checksum,
|
||||||
) (File, error) {
|
) (File, error) {
|
||||||
@@ -56,14 +51,25 @@ func (c *Cache) NewHTTPGet(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return c.NewHTTP(hc, req, checksum), nil
|
return NewHTTP(c, req, checksum), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Kind returns the hardcoded [Kind] constant.
|
// Kind returns the hardcoded [Kind] constant.
|
||||||
func (a *httpArtifact) Kind() Kind { return KindHTTP }
|
func (a *httpArtifact) Kind() Kind { return KindHTTP }
|
||||||
|
|
||||||
// ID returns the caller-supplied hash of the response body.
|
// ID returns the caller-supplied hash of the response body.
|
||||||
func (a *httpArtifact) ID() ID { return a.id }
|
func (a *httpArtifact) ID() ID { return a.checksum }
|
||||||
|
|
||||||
|
// Params is unreachable.
|
||||||
|
func (a *httpArtifact) Params() []byte {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dependencies returns a nil slice.
|
||||||
|
func (a *httpArtifact) Dependencies() []Artifact { return nil }
|
||||||
|
|
||||||
|
// Checksum returns the address to the caller-supplied checksum.
|
||||||
|
func (a *httpArtifact) Checksum() *Checksum { return &a.checksum }
|
||||||
|
|
||||||
// ResponseStatusError is returned for a response returned by an [http.Client]
|
// ResponseStatusError is returned for a response returned by an [http.Client]
|
||||||
// with a status code other than [http.StatusOK].
|
// with a status code other than [http.StatusOK].
|
||||||
@@ -95,44 +101,13 @@ func (a *httpArtifact) do() (data []byte, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hash cures the [Artifact] and returns its hash. The return value is always
|
// Cure returns syscall.ENOTSUP. Callers should use Data instead.
|
||||||
// identical to that of the ID method.
|
func (a *httpArtifact) Cure(*check.Absolute, CacheDataFunc) error {
|
||||||
func (a *httpArtifact) Hash() (Checksum, error) { _, err := a.Pathname(); return a.id, err }
|
return syscall.ENOTSUP
|
||||||
|
|
||||||
// Pathname cures the [Artifact] and returns its pathname in the [Cache].
|
|
||||||
func (a *httpArtifact) Pathname() (pathname *check.Absolute, err error) {
|
|
||||||
a.mu.Lock()
|
|
||||||
defer a.mu.Unlock()
|
|
||||||
|
|
||||||
if a.pathname != nil {
|
|
||||||
return a.pathname, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if a.data != nil {
|
|
||||||
pathname, err = a.c.StoreFile(
|
|
||||||
a.id, a.data,
|
|
||||||
(*Checksum)(&a.id),
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
if err == nil {
|
|
||||||
a.pathname = pathname
|
|
||||||
}
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
a.pathname, a.data, _, err = a.c.LoadOrStoreFile(
|
|
||||||
a.id, a.do,
|
|
||||||
(*Checksum)(&a.id),
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
a.pathname, a.data = nil, nil
|
|
||||||
}
|
|
||||||
return a.pathname, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Data completes the http request and returns the resulting response body read
|
// Data completes the http request and returns the resulting response body read
|
||||||
// to EOF. Data does not write to the underlying [Cache].
|
// to EOF. Data does not interact with the filesystem.
|
||||||
func (a *httpArtifact) Data() (data []byte, err error) {
|
func (a *httpArtifact) Data() (data []byte, err error) {
|
||||||
a.mu.Lock()
|
a.mu.Lock()
|
||||||
defer a.mu.Unlock()
|
defer a.mu.Unlock()
|
||||||
@@ -142,23 +117,14 @@ func (a *httpArtifact) Data() (data []byte, err error) {
|
|||||||
return a.data, nil
|
return a.data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if a.pathname, a.data, err = a.c.LoadFile(a.id); err == nil {
|
|
||||||
return a.data, nil
|
|
||||||
} else {
|
|
||||||
a.pathname, a.data = nil, nil
|
|
||||||
if !errors.Is(err, os.ErrNotExist) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if data, err = a.do(); err != nil {
|
if data, err = a.do(); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
h := sha512.New384()
|
h := sha512.New384()
|
||||||
h.Write(data)
|
h.Write(data)
|
||||||
if got := (Checksum)(h.Sum(nil)); got != a.id {
|
if got := (Checksum)(h.Sum(nil)); got != a.checksum {
|
||||||
return nil, &ChecksumMismatchError{got, a.id}
|
return nil, &ChecksumMismatchError{got, a.checksum}
|
||||||
}
|
}
|
||||||
a.data = data
|
a.data = data
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ func TestHTTP(t *testing.T) {
|
|||||||
checkWithCache(t, []cacheTestCase{
|
checkWithCache(t, []cacheTestCase{
|
||||||
{"direct", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
{"direct", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
var got []byte
|
var got []byte
|
||||||
if f, err := c.NewHTTPGet(
|
if f, err := pkg.NewHTTPGet(
|
||||||
t.Context(),
|
t.Context(),
|
||||||
&client,
|
&client,
|
||||||
"file:///testdata",
|
"file:///testdata",
|
||||||
@@ -45,15 +45,15 @@ func TestHTTP(t *testing.T) {
|
|||||||
t.Fatalf("Data: error = %v", err)
|
t.Fatalf("Data: error = %v", err)
|
||||||
} else if string(got) != testdata {
|
} else if string(got) != testdata {
|
||||||
t.Fatalf("Data: %x, want %x", got, testdata)
|
t.Fatalf("Data: %x, want %x", got, testdata)
|
||||||
} else if gotIdent := f.ID(); gotIdent != testdataChecksum {
|
} else if gotIdent := pkg.Ident(f); gotIdent != testdataChecksum {
|
||||||
t.Fatalf("ID: %x, want %x", gotIdent, testdataChecksum)
|
t.Fatalf("Ident: %x, want %x", gotIdent, testdataChecksum)
|
||||||
}
|
}
|
||||||
|
|
||||||
// check direct validation
|
// check direct validation
|
||||||
wantErrMismatch := &pkg.ChecksumMismatchError{
|
wantErrMismatch := &pkg.ChecksumMismatchError{
|
||||||
Got: testdataChecksum,
|
Got: testdataChecksum,
|
||||||
}
|
}
|
||||||
if f, err := c.NewHTTPGet(
|
if f, err := pkg.NewHTTPGet(
|
||||||
t.Context(),
|
t.Context(),
|
||||||
&client,
|
&client,
|
||||||
"file:///testdata",
|
"file:///testdata",
|
||||||
@@ -62,13 +62,13 @@ func TestHTTP(t *testing.T) {
|
|||||||
t.Fatalf("NewHTTPGet: error = %v", err)
|
t.Fatalf("NewHTTPGet: error = %v", err)
|
||||||
} else if _, err = f.Data(); !reflect.DeepEqual(err, wantErrMismatch) {
|
} else if _, err = f.Data(); !reflect.DeepEqual(err, wantErrMismatch) {
|
||||||
t.Fatalf("Data: error = %#v, want %#v", err, wantErrMismatch)
|
t.Fatalf("Data: error = %#v, want %#v", err, wantErrMismatch)
|
||||||
} else if gotIdent := f.ID(); gotIdent != (pkg.Checksum{}) {
|
} else if gotIdent := pkg.Ident(f); gotIdent != (pkg.Checksum{}) {
|
||||||
t.Fatalf("ID: %x, want %x", gotIdent, pkg.Checksum{})
|
t.Fatalf("Ident: %x, want %x", gotIdent, pkg.Checksum{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// check direct response error
|
// check direct response error
|
||||||
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
|
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
|
||||||
if f, err := c.NewHTTPGet(
|
if f, err := pkg.NewHTTPGet(
|
||||||
t.Context(),
|
t.Context(),
|
||||||
&client,
|
&client,
|
||||||
"file:///nonexistent",
|
"file:///nonexistent",
|
||||||
@@ -77,13 +77,13 @@ func TestHTTP(t *testing.T) {
|
|||||||
t.Fatalf("NewHTTPGet: error = %v", err)
|
t.Fatalf("NewHTTPGet: error = %v", err)
|
||||||
} else if _, err = f.Data(); !reflect.DeepEqual(err, wantErrNotFound) {
|
} else if _, err = f.Data(); !reflect.DeepEqual(err, wantErrNotFound) {
|
||||||
t.Fatalf("Data: error = %#v, want %#v", err, wantErrNotFound)
|
t.Fatalf("Data: error = %#v, want %#v", err, wantErrNotFound)
|
||||||
} else if gotIdent := f.ID(); gotIdent != (pkg.Checksum{}) {
|
} else if gotIdent := pkg.Ident(f); gotIdent != (pkg.Checksum{}) {
|
||||||
t.Fatalf("ID: %x, want %x", gotIdent, pkg.Checksum{})
|
t.Fatalf("Ident: %x, want %x", gotIdent, pkg.Checksum{})
|
||||||
}
|
}
|
||||||
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
||||||
|
|
||||||
{"load or store", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
{"cure", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
f, err := c.NewHTTPGet(
|
f, err := pkg.NewHTTPGet(
|
||||||
t.Context(),
|
t.Context(),
|
||||||
&client,
|
&client,
|
||||||
"file:///testdata",
|
"file:///testdata",
|
||||||
@@ -97,18 +97,16 @@ func TestHTTP(t *testing.T) {
|
|||||||
"identifier",
|
"identifier",
|
||||||
testdataChecksumString,
|
testdataChecksumString,
|
||||||
)
|
)
|
||||||
var pathname *check.Absolute
|
var (
|
||||||
if pathname, err = f.Pathname(); err != nil {
|
pathname *check.Absolute
|
||||||
t.Fatalf("Pathname: error = %v", err)
|
checksum pkg.Checksum
|
||||||
|
)
|
||||||
|
if pathname, checksum, err = c.Cure(f); err != nil {
|
||||||
|
t.Fatalf("Cure: error = %v", err)
|
||||||
} else if !pathname.Is(wantPathname) {
|
} else if !pathname.Is(wantPathname) {
|
||||||
t.Fatalf("Pathname: %q, want %q", pathname, wantPathname)
|
t.Fatalf("Cure: %q, want %q", pathname, wantPathname)
|
||||||
}
|
|
||||||
|
|
||||||
var checksum pkg.Checksum
|
|
||||||
if checksum, err = f.Hash(); err != nil {
|
|
||||||
t.Fatalf("Hash: error = %v", err)
|
|
||||||
} else if checksum != testdataChecksum {
|
} else if checksum != testdataChecksum {
|
||||||
t.Fatalf("Hash: %x, want %x", checksum, testdataChecksum)
|
t.Fatalf("Cure: %x, want %x", checksum, testdataChecksum)
|
||||||
}
|
}
|
||||||
|
|
||||||
var got []byte
|
var got []byte
|
||||||
@@ -116,12 +114,12 @@ func TestHTTP(t *testing.T) {
|
|||||||
t.Fatalf("Data: error = %v", err)
|
t.Fatalf("Data: error = %v", err)
|
||||||
} else if string(got) != testdata {
|
} else if string(got) != testdata {
|
||||||
t.Fatalf("Data: %x, want %x", got, testdata)
|
t.Fatalf("Data: %x, want %x", got, testdata)
|
||||||
} else if gotIdent := f.ID(); gotIdent != testdataChecksum {
|
} else if gotIdent := pkg.Ident(f); gotIdent != testdataChecksum {
|
||||||
t.Fatalf("ID: %x, want %x", gotIdent, testdataChecksum)
|
t.Fatalf("Ident: %x, want %x", gotIdent, testdataChecksum)
|
||||||
}
|
}
|
||||||
|
|
||||||
// check load from cache
|
// check load from cache
|
||||||
if f, err = c.NewHTTPGet(
|
if f, err = pkg.NewHTTPGet(
|
||||||
t.Context(),
|
t.Context(),
|
||||||
&client,
|
&client,
|
||||||
"file:///testdata",
|
"file:///testdata",
|
||||||
@@ -132,57 +130,23 @@ func TestHTTP(t *testing.T) {
|
|||||||
t.Fatalf("Data: error = %v", err)
|
t.Fatalf("Data: error = %v", err)
|
||||||
} else if string(got) != testdata {
|
} else if string(got) != testdata {
|
||||||
t.Fatalf("Data: %x, want %x", got, testdata)
|
t.Fatalf("Data: %x, want %x", got, testdata)
|
||||||
} else if gotIdent := f.ID(); gotIdent != testdataChecksum {
|
} else if gotIdent := pkg.Ident(f); gotIdent != testdataChecksum {
|
||||||
t.Fatalf("ID: %x, want %x", gotIdent, testdataChecksum)
|
t.Fatalf("Ident: %x, want %x", gotIdent, testdataChecksum)
|
||||||
}
|
}
|
||||||
|
|
||||||
// check error passthrough
|
// check error passthrough
|
||||||
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
|
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
|
||||||
if f, err = c.NewHTTPGet(
|
if f, err = pkg.NewHTTPGet(
|
||||||
t.Context(),
|
t.Context(),
|
||||||
&client,
|
&client,
|
||||||
"file:///nonexistent",
|
"file:///nonexistent",
|
||||||
pkg.Checksum{},
|
pkg.Checksum{},
|
||||||
); err != nil {
|
); err != nil {
|
||||||
t.Fatalf("NewHTTPGet: error = %v", err)
|
t.Fatalf("NewHTTPGet: error = %v", err)
|
||||||
} else if _, err = f.Pathname(); !reflect.DeepEqual(err, wantErrNotFound) {
|
} else if _, _, err = c.Cure(f); !reflect.DeepEqual(err, wantErrNotFound) {
|
||||||
t.Fatalf("Pathname: error = %#v, want %#v", err, wantErrNotFound)
|
t.Fatalf("Pathname: error = %#v, want %#v", err, wantErrNotFound)
|
||||||
} else if gotIdent := f.ID(); gotIdent != (pkg.Checksum{}) {
|
} else if gotIdent := pkg.Ident(f); gotIdent != (pkg.Checksum{}) {
|
||||||
t.Fatalf("ID: %x, want %x", gotIdent, pkg.Checksum{})
|
t.Fatalf("Ident: %x, want %x", gotIdent, pkg.Checksum{})
|
||||||
}
|
|
||||||
}, pkg.MustDecode("4WHaMvRRcCac1uAyXnEklEd2YaNQBj6rXlfMntX9GgYLij3By1znv5QYPGJHYQIH")},
|
|
||||||
|
|
||||||
{"store", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
|
||||||
var (
|
|
||||||
got []byte
|
|
||||||
pathname *check.Absolute
|
|
||||||
checksum pkg.Checksum
|
|
||||||
)
|
|
||||||
wantPathname := base.Append(
|
|
||||||
"identifier",
|
|
||||||
testdataChecksumString,
|
|
||||||
)
|
|
||||||
if f, err := c.NewHTTPGet(
|
|
||||||
t.Context(),
|
|
||||||
&client,
|
|
||||||
"file:///testdata",
|
|
||||||
testdataChecksum,
|
|
||||||
); err != nil {
|
|
||||||
t.Fatalf("NewHTTPGet: error = %v", err)
|
|
||||||
} else if got, err = f.Data(); err != nil {
|
|
||||||
t.Fatalf("Data: error = %v", err)
|
|
||||||
} else if string(got) != testdata {
|
|
||||||
t.Fatalf("Data: %x, want %x", got, testdata)
|
|
||||||
} else if gotIdent := f.ID(); gotIdent != testdataChecksum {
|
|
||||||
t.Fatalf("ID: %x, want %x", gotIdent, testdataChecksum)
|
|
||||||
} else if pathname, err = f.Pathname(); err != nil {
|
|
||||||
t.Fatalf("Pathname: error = %v", err)
|
|
||||||
} else if !pathname.Is(wantPathname) {
|
|
||||||
t.Fatalf("Pathname: %q, want %q", pathname, wantPathname)
|
|
||||||
} else if checksum, err = f.Hash(); err != nil {
|
|
||||||
t.Fatalf("Hash: error = %v", err)
|
|
||||||
} else if checksum != testdataChecksum {
|
|
||||||
t.Fatalf("Hash: %x, want %x", checksum, testdataChecksum)
|
|
||||||
}
|
}
|
||||||
}, pkg.MustDecode("4WHaMvRRcCac1uAyXnEklEd2YaNQBj6rXlfMntX9GgYLij3By1znv5QYPGJHYQIH")},
|
}, pkg.MustDecode("4WHaMvRRcCac1uAyXnEklEd2YaNQBj6rXlfMntX9GgYLij3By1znv5QYPGJHYQIH")},
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -32,25 +32,30 @@ func Encode(checksum Checksum) string {
|
|||||||
return base64.URLEncoding.EncodeToString(checksum[:])
|
return base64.URLEncoding.EncodeToString(checksum[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
// encode is abbreviation for base64.URLEncoding.EncodeToString(checksum[:]).
|
// Decode is abbreviation for base64.URLEncoding.Decode(checksum[:], []byte(s)).
|
||||||
func encode(checksum *Checksum) string {
|
func Decode(s string) (checksum Checksum, err error) {
|
||||||
return base64.URLEncoding.EncodeToString(checksum[:])
|
var n int
|
||||||
|
n, err = base64.URLEncoding.Decode(checksum[:], []byte(s))
|
||||||
|
if err == nil && n != len(Checksum{}) {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustDecode decodes a string representation of [Checksum] and panics if there
|
// MustDecode decodes a string representation of [Checksum] and panics if there
|
||||||
// is a decoding error or the resulting data is too short.
|
// is a decoding error or the resulting data is too short.
|
||||||
func MustDecode(s string) (checksum Checksum) {
|
func MustDecode(s string) Checksum {
|
||||||
if n, err := base64.URLEncoding.Decode(
|
if checksum, err := Decode(s); err != nil {
|
||||||
checksum[:],
|
|
||||||
[]byte(s),
|
|
||||||
); err != nil {
|
|
||||||
panic(err)
|
panic(err)
|
||||||
} else if n != len(Checksum{}) {
|
} else {
|
||||||
panic(io.ErrUnexpectedEOF)
|
return checksum
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CacheDataFunc tries to load [File] from [Cache], and if that fails, obtains
|
||||||
|
// it via [File.Data] instead.
|
||||||
|
type CacheDataFunc func(f File) (data []byte, err error)
|
||||||
|
|
||||||
// An Artifact is a read-only reference to a piece of data that may be created
|
// An Artifact is a read-only reference to a piece of data that may be created
|
||||||
// deterministically but might not currently be available in memory or on the
|
// deterministically but might not currently be available in memory or on the
|
||||||
// filesystem.
|
// filesystem.
|
||||||
@@ -60,31 +65,62 @@ type Artifact interface {
|
|||||||
// [Artifact] is allowed to return the same [Kind] value.
|
// [Artifact] is allowed to return the same [Kind] value.
|
||||||
Kind() Kind
|
Kind() Kind
|
||||||
|
|
||||||
|
// Params returns opaque bytes that describes [Artifact]. Implementations
|
||||||
|
// must guarantee that these values are unique among differing instances
|
||||||
|
// of the same implementation with the same dependencies.
|
||||||
|
//
|
||||||
|
// Callers must not modify the retuned byte slice.
|
||||||
|
//
|
||||||
|
// Result must remain identical across multiple invocations.
|
||||||
|
Params() []byte
|
||||||
|
|
||||||
|
// Dependencies returns a slice of [Artifact] that the current instance
|
||||||
|
// depends on to produce its contents.
|
||||||
|
//
|
||||||
|
// Callers must not modify the retuned slice.
|
||||||
|
//
|
||||||
|
// Result must remain identical across multiple invocations.
|
||||||
|
Dependencies() []Artifact
|
||||||
|
|
||||||
|
// Cure cures the current [Artifact] to the caller-specified temporary
|
||||||
|
// pathname. This is not the final resting place of the [Artifact] and this
|
||||||
|
// pathname should not be directly referred to in the final contents.
|
||||||
|
//
|
||||||
|
// If the implementation produces a single file, it must implement [File]
|
||||||
|
// as well. In that case, Cure must produce a single regular file with
|
||||||
|
// contents identical to that returned by [File.Data].
|
||||||
|
Cure(work *check.Absolute, loadData CacheDataFunc) (err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KnownIdent is optionally implemented by [Artifact] and is used instead of
|
||||||
|
// [Kind.Ident] when it is available.
|
||||||
|
//
|
||||||
|
// This is very subtle to use correctly. The implementation must ensure that
|
||||||
|
// this value is globally unique, otherwise [Cache] can enter an inconsistent
|
||||||
|
// state. This should not be implemented outside of testing.
|
||||||
|
type KnownIdent interface {
|
||||||
// ID returns a globally unique identifier referring to the current
|
// ID returns a globally unique identifier referring to the current
|
||||||
// [Artifact]. This value must be known ahead of time and guaranteed to be
|
// [Artifact]. This value must be known ahead of time and guaranteed to be
|
||||||
// unique without having obtained the full contents of the [Artifact].
|
// unique without having obtained the full contents of the [Artifact].
|
||||||
ID() ID
|
ID() ID
|
||||||
|
}
|
||||||
|
|
||||||
// Hash returns the [Checksum] created from the full contents of a cured
|
// KnownChecksum is optionally implemented by [Artifact] for an artifact with
|
||||||
// [Artifact]. This can be stored for future lookup in a [Cache].
|
// output known ahead of time.
|
||||||
|
type KnownChecksum interface {
|
||||||
|
// Checksum returns the address of a known checksum.
|
||||||
//
|
//
|
||||||
// A call to Hash implicitly cures [Artifact].
|
// Callers must not modify the [Checksum].
|
||||||
Hash() (Checksum, error)
|
|
||||||
|
|
||||||
// Pathname returns an absolute pathname to a file or directory populated
|
|
||||||
// with the full contents of [Artifact]. This is the most expensive
|
|
||||||
// operation possible on any [Artifact] and should be avoided if possible.
|
|
||||||
//
|
//
|
||||||
// A call to Pathname implicitly cures [Artifact].
|
// Result must remain identical across multiple invocations.
|
||||||
//
|
Checksum() Checksum
|
||||||
// Callers must only open files read-only. If [Artifact] is a directory,
|
|
||||||
// files must not be created or removed under this directory.
|
|
||||||
Pathname() (*check.Absolute, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// A File refers to an [Artifact] backed by a single file.
|
// A File refers to an [Artifact] backed by a single file.
|
||||||
type File interface {
|
type File interface {
|
||||||
// Data returns the full contents of [Artifact].
|
// Data returns the full contents of [Artifact]. If [Artifact.Checksum]
|
||||||
|
// returns a non-nil address, Data is responsible for validating any data
|
||||||
|
// it produces and must return [ChecksumMismatchError] if validation fails.
|
||||||
//
|
//
|
||||||
// Callers must not modify the returned byte slice.
|
// Callers must not modify the returned byte slice.
|
||||||
Data() ([]byte, error)
|
Data() ([]byte, error)
|
||||||
@@ -92,13 +128,22 @@ type File interface {
|
|||||||
Artifact
|
Artifact
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ident returns the identifier of an [Artifact].
|
||||||
|
func Ident(a Artifact) ID {
|
||||||
|
if ki, ok := a.(KnownIdent); ok {
|
||||||
|
return ki.ID()
|
||||||
|
}
|
||||||
|
return a.Kind().Ident(a.Params(), a.Dependencies()...)
|
||||||
|
}
|
||||||
|
|
||||||
// Kind corresponds to the concrete type of [Artifact] and is used to create
|
// Kind corresponds to the concrete type of [Artifact] and is used to create
|
||||||
// identifier for an [Artifact] with dependencies.
|
// identifier for an [Artifact] with dependencies.
|
||||||
type Kind uint64
|
type Kind uint64
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// KindHTTP is the kind of [Artifact] returned by [Cache.NewHTTP].
|
// KindHTTP is the kind of [Artifact] returned by [NewHTTP].
|
||||||
KindHTTP Kind = iota
|
KindHTTP Kind = iota
|
||||||
|
// KindTar is the kind of artifact returned by [NewTar].
|
||||||
KindTar
|
KindTar
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -109,11 +154,13 @@ func (k Kind) Ident(params []byte, deps ...Artifact) ID {
|
|||||||
type extIdent [len(ID{}) + wordSize]byte
|
type extIdent [len(ID{}) + wordSize]byte
|
||||||
identifiers := make([]extIdent, len(deps))
|
identifiers := make([]extIdent, len(deps))
|
||||||
for i, a := range deps {
|
for i, a := range deps {
|
||||||
id := a.ID()
|
id := Ident(a)
|
||||||
copy(identifiers[i][wordSize:], id[:])
|
copy(identifiers[i][wordSize:], id[:])
|
||||||
binary.LittleEndian.PutUint64(identifiers[i][:], uint64(a.Kind()))
|
binary.LittleEndian.PutUint64(identifiers[i][:], uint64(a.Kind()))
|
||||||
}
|
}
|
||||||
slices.SortFunc(identifiers, func(a, b extIdent) int { return bytes.Compare(a[:], b[:]) })
|
slices.SortFunc(identifiers, func(a, b extIdent) int {
|
||||||
|
return bytes.Compare(a[:], b[:])
|
||||||
|
})
|
||||||
slices.Compact(identifiers)
|
slices.Compact(identifiers)
|
||||||
|
|
||||||
h := sha512.New384()
|
h := sha512.New384()
|
||||||
@@ -134,8 +181,12 @@ const (
|
|||||||
dirChecksum = "checksum"
|
dirChecksum = "checksum"
|
||||||
|
|
||||||
// dirWork is the directory name appended to Cache.base for working
|
// dirWork is the directory name appended to Cache.base for working
|
||||||
// directories created for [Cache.Store].
|
// pathnames set up during [Cache.Cure].
|
||||||
dirWork = "work"
|
dirWork = "work"
|
||||||
|
|
||||||
|
// checksumLinknamePrefix is prepended to the encoded [Checksum] value
|
||||||
|
// of an [Artifact] when creating a symbolic link to dirChecksum.
|
||||||
|
checksumLinknamePrefix = "../" + dirChecksum + "/"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Cache is a support layer that implementations of [Artifact] can use to store
|
// Cache is a support layer that implementations of [Artifact] can use to store
|
||||||
@@ -144,30 +195,32 @@ type Cache struct {
|
|||||||
// Directory where all [Cache] related files are placed.
|
// Directory where all [Cache] related files are placed.
|
||||||
base *check.Absolute
|
base *check.Absolute
|
||||||
|
|
||||||
// Protects the Store critical section.
|
// Whether to validate [File.Data] for a [KnownChecksum] file. This
|
||||||
storeMu sync.Mutex
|
// significantly reduces performance.
|
||||||
|
strict bool
|
||||||
|
|
||||||
// Synchronises access to most public methods.
|
// Synchronises access to dirChecksum.
|
||||||
mu sync.RWMutex
|
checksumMu sync.RWMutex
|
||||||
|
|
||||||
|
// Identifier to content pair cache.
|
||||||
|
ident map[ID]Checksum
|
||||||
|
// Identifier to error pair for unrecoverably faulted [Artifact].
|
||||||
|
identErr map[ID]error
|
||||||
|
// Pending identifiers, accessed through Cure for entries not in ident.
|
||||||
|
identPending map[ID]<-chan struct{}
|
||||||
|
// Synchronises access to ident and corresponding filesystem entries.
|
||||||
|
identMu sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadFile loads the contents of a [File] by its identifier.
|
// IsStrict returns whether the [Cache] strictly verifies checksums.
|
||||||
func (c *Cache) LoadFile(id ID) (
|
func (c *Cache) IsStrict() bool { return c.strict }
|
||||||
pathname *check.Absolute,
|
|
||||||
data []byte,
|
|
||||||
err error,
|
|
||||||
) {
|
|
||||||
pathname = c.base.Append(
|
|
||||||
dirIdentifier,
|
|
||||||
Encode(id),
|
|
||||||
)
|
|
||||||
|
|
||||||
c.mu.RLock()
|
// SetStrict sets whether the [Cache] strictly verifies checksums, even when
|
||||||
data, err = os.ReadFile(pathname.String())
|
// the implementation promises to validate them internally. This significantly
|
||||||
c.mu.RUnlock()
|
// reduces performance and is not recommended outside of testing.
|
||||||
|
//
|
||||||
return
|
// This method is not safe for concurrent use with any other method.
|
||||||
}
|
func (c *Cache) SetStrict(strict bool) { c.strict = strict }
|
||||||
|
|
||||||
// A ChecksumMismatchError describes an [Artifact] with unexpected content.
|
// A ChecksumMismatchError describes an [Artifact] with unexpected content.
|
||||||
type ChecksumMismatchError struct {
|
type ChecksumMismatchError struct {
|
||||||
@@ -180,217 +233,357 @@ func (e *ChecksumMismatchError) Error() string {
|
|||||||
" instead of " + Encode(e.Want)
|
" instead of " + Encode(e.Want)
|
||||||
}
|
}
|
||||||
|
|
||||||
// pathname returns the content-addressed pathname for a [Checksum].
|
// loadOrStoreIdent attempts to load a cached [Artifact] by its identifier or
|
||||||
func (c *Cache) pathname(checksum *Checksum) *check.Absolute {
|
// wait for a pending [Artifact] to cure. If neither is possible, the current
|
||||||
return c.base.Append(
|
// identifier is stored in identPending and a non-nil channel is returned.
|
||||||
dirChecksum,
|
func (c *Cache) loadOrStoreIdent(id *ID) (
|
||||||
encode(checksum),
|
done chan<- struct{},
|
||||||
)
|
checksum Checksum,
|
||||||
}
|
|
||||||
|
|
||||||
// pathnameIdent returns the identifier-based pathname for an [ID].
|
|
||||||
func (c *Cache) pathnameIdent(id *ID) *check.Absolute {
|
|
||||||
return c.base.Append(
|
|
||||||
dirIdentifier,
|
|
||||||
encode((*Checksum)(id)),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store looks up an identifier, and if it is not present, calls makeArtifact
|
|
||||||
// with a private working directory and stores its result instead. An optional
|
|
||||||
// checksum can be passed via the result buffer which is used to validate the
|
|
||||||
// produced directory.
|
|
||||||
func (c *Cache) Store(
|
|
||||||
id ID,
|
|
||||||
makeArtifact func(work *check.Absolute) error,
|
|
||||||
buf *Checksum,
|
|
||||||
validate bool,
|
|
||||||
) (
|
|
||||||
pathname *check.Absolute,
|
|
||||||
store bool,
|
|
||||||
err error,
|
err error,
|
||||||
) {
|
) {
|
||||||
pathname = c.pathnameIdent(&id)
|
var ok bool
|
||||||
c.storeMu.Lock()
|
|
||||||
defer c.storeMu.Unlock()
|
|
||||||
|
|
||||||
_, err = os.Lstat(pathname.String())
|
c.identMu.Lock()
|
||||||
if err == nil || !errors.Is(err, os.ErrNotExist) {
|
if checksum, ok = c.ident[*id]; ok {
|
||||||
|
c.identMu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err, ok = c.identErr[*id]; ok {
|
||||||
|
c.identMu.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
store = true
|
|
||||||
|
|
||||||
var (
|
var notify <-chan struct{}
|
||||||
workPathname *check.Absolute
|
if notify, ok = c.identPending[*id]; ok {
|
||||||
workPathnameRaw string
|
c.identMu.Unlock()
|
||||||
|
<-notify
|
||||||
|
c.identMu.RLock()
|
||||||
|
if checksum, ok = c.ident[*id]; !ok {
|
||||||
|
err = c.identErr[*id]
|
||||||
|
}
|
||||||
|
c.identMu.RUnlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
d := make(chan struct{})
|
||||||
|
c.identPending[*id] = d
|
||||||
|
c.identMu.Unlock()
|
||||||
|
done = d
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// finaliseIdent commits a checksum or error to ident for an identifier
|
||||||
|
// previously submitted to identPending.
|
||||||
|
func (c *Cache) finaliseIdent(
|
||||||
|
done chan<- struct{},
|
||||||
|
id *ID,
|
||||||
|
checksum *Checksum,
|
||||||
|
err error,
|
||||||
|
) {
|
||||||
|
c.identMu.Lock()
|
||||||
|
if err != nil {
|
||||||
|
c.identErr[*id] = err
|
||||||
|
} else {
|
||||||
|
c.ident[*id] = *checksum
|
||||||
|
}
|
||||||
|
c.identMu.Unlock()
|
||||||
|
|
||||||
|
close(done)
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadData provides [CacheDataFunc] for [Artifact.Cure].
|
||||||
|
func (c *Cache) loadData(f File) (data []byte, err error) {
|
||||||
|
var r *os.File
|
||||||
|
if kc, ok := f.(KnownChecksum); ok {
|
||||||
|
c.checksumMu.RLock()
|
||||||
|
r, err = os.Open(c.base.Append(
|
||||||
|
dirChecksum,
|
||||||
|
Encode(kc.Checksum()),
|
||||||
|
).String())
|
||||||
|
c.checksumMu.RUnlock()
|
||||||
|
} else {
|
||||||
|
c.identMu.RLock()
|
||||||
|
r, err = os.Open(c.base.Append(
|
||||||
|
dirIdentifier,
|
||||||
|
Encode(Ident(f)),
|
||||||
|
).String())
|
||||||
|
c.identMu.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return f.Data()
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err = io.ReadAll(r)
|
||||||
|
closeErr := r.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidFileModeError describes an [Artifact.Cure] that did not result in
|
||||||
|
// a regular file or directory located at the work pathname.
|
||||||
|
type InvalidFileModeError fs.FileMode
|
||||||
|
|
||||||
|
// Error returns a constant string.
|
||||||
|
func (e InvalidFileModeError) Error() string {
|
||||||
|
return "artifact did not produce a regular file or directory"
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoOutputError describes an [Artifact.Cure] that did not populate its
|
||||||
|
// work pathname despite completing successfully.
|
||||||
|
type NoOutputError struct{}
|
||||||
|
|
||||||
|
// Unwrap returns [os.ErrNotExist].
|
||||||
|
func (NoOutputError) Unwrap() error { return os.ErrNotExist }
|
||||||
|
|
||||||
|
// Error returns a constant string.
|
||||||
|
func (NoOutputError) Error() string {
|
||||||
|
return "artifact cured successfully but did not produce any output"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cure cures the [Artifact] and returns its pathname and [Checksum].
|
||||||
|
func (c *Cache) Cure(a Artifact) (
|
||||||
|
pathname *check.Absolute,
|
||||||
|
checksum Checksum,
|
||||||
|
err error,
|
||||||
|
) {
|
||||||
|
id := Ident(a)
|
||||||
|
ids := Encode(id)
|
||||||
|
pathname = c.base.Append(
|
||||||
|
dirIdentifier,
|
||||||
|
ids,
|
||||||
)
|
)
|
||||||
if workPathnameRaw, err = os.MkdirTemp(
|
|
||||||
c.base.Append(dirWork).String(),
|
|
||||||
path.Base(pathname.String()+".*"),
|
|
||||||
); err != nil {
|
|
||||||
return
|
|
||||||
} else if workPathname, err = check.NewAbs(workPathnameRaw); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
chmodErr := filepath.WalkDir(workPathname.String(), func(path string, d fs.DirEntry, err error) error {
|
pathname = nil
|
||||||
if err != nil {
|
checksum = Checksum{}
|
||||||
return err
|
|
||||||
}
|
|
||||||
if d.IsDir() {
|
|
||||||
return os.Chmod(path, 0700)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
removeErr := os.RemoveAll(workPathname.String())
|
|
||||||
if chmodErr != nil || removeErr != nil {
|
|
||||||
err = errors.Join(err, chmodErr, removeErr)
|
|
||||||
} else if errors.Is(err, os.ErrExist) {
|
|
||||||
// two artifacts may be backed by the same file
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
if err = os.Chmod(workPathname.String(), 0700); err != nil {
|
|
||||||
|
var done chan<- struct{}
|
||||||
|
done, checksum, err = c.loadOrStoreIdent(&id)
|
||||||
|
if done == nil {
|
||||||
return
|
return
|
||||||
|
} else {
|
||||||
|
defer func() { c.finaliseIdent(done, &id, &checksum, err) }()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = makeArtifact(workPathname); err != nil {
|
_, err = os.Lstat(pathname.String())
|
||||||
return
|
if err == nil {
|
||||||
}
|
var name string
|
||||||
// override this before hashing since it will be made read-only after the
|
if name, err = os.Readlink(pathname.String()); err != nil {
|
||||||
// rename anyway so do not let perm bits affect the checksum
|
|
||||||
if err = os.Chmod(workPathname.String(), 0700); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var checksum Checksum
|
|
||||||
if checksum, err = HashDir(workPathname); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if validate {
|
|
||||||
if checksum != *buf {
|
|
||||||
err = &ChecksumMismatchError{checksum, *buf}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
checksum, err = Decode(path.Base(name))
|
||||||
*buf = checksum
|
return
|
||||||
|
}
|
||||||
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
checksumPathname := c.pathname(&checksum)
|
var checksums string
|
||||||
if err = os.Rename(
|
defer func() {
|
||||||
workPathname.String(),
|
if err == nil && checksums != "" {
|
||||||
checksumPathname.String(),
|
err = os.Symlink(
|
||||||
); err != nil {
|
checksumLinknamePrefix+checksums,
|
||||||
if !errors.Is(err, os.ErrExist) {
|
pathname.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
var checksumPathname *check.Absolute
|
||||||
|
var checksumFi os.FileInfo
|
||||||
|
if kc, ok := a.(KnownChecksum); ok {
|
||||||
|
checksum = kc.Checksum()
|
||||||
|
checksums = Encode(checksum)
|
||||||
|
checksumPathname = c.base.Append(
|
||||||
|
dirChecksum,
|
||||||
|
checksums,
|
||||||
|
)
|
||||||
|
|
||||||
|
c.checksumMu.RLock()
|
||||||
|
checksumFi, err = os.Stat(checksumPathname.String())
|
||||||
|
c.checksumMu.RUnlock()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
checksumFi, err = nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if f, ok := a.(File); ok {
|
||||||
|
if checksumFi != nil {
|
||||||
|
if !checksumFi.Mode().IsRegular() {
|
||||||
|
// unreachable
|
||||||
|
err = InvalidFileModeError(checksumFi.Mode())
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else if err = os.Chmod(checksumPathname.String(), 0500); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if linkErr := os.Symlink(
|
var data []byte
|
||||||
"../"+dirChecksum+"/"+path.Base(checksumPathname.String()),
|
data, err = f.Data()
|
||||||
pathname.String(),
|
if err != nil {
|
||||||
); linkErr != nil {
|
return
|
||||||
err = linkErr
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// storeFile stores the contents of a [File]. An optional checksum can be
|
|
||||||
// passed via the result buffer which is used to validate the submitted data.
|
|
||||||
//
|
|
||||||
// If locking is disabled, the caller is responsible for acquiring a write lock
|
|
||||||
// and releasing it after this method returns. This makes LoadOrStoreFile
|
|
||||||
// possible without holding the lock while computing hash for store only.
|
|
||||||
func (c *Cache) storeFile(
|
|
||||||
identifierPathname *check.Absolute,
|
|
||||||
data []byte,
|
|
||||||
buf *Checksum,
|
|
||||||
validate, lock bool,
|
|
||||||
) error {
|
|
||||||
h := sha512.New384()
|
|
||||||
h.Write(data)
|
|
||||||
if validate {
|
|
||||||
if got := (Checksum)(h.Sum(nil)); got != *buf {
|
|
||||||
return &ChecksumMismatchError{got, *buf}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if checksumPathname == nil {
|
||||||
|
h := sha512.New384()
|
||||||
|
h.Write(data)
|
||||||
|
h.Sum(checksum[:0])
|
||||||
|
checksums = Encode(checksum)
|
||||||
|
checksumPathname = c.base.Append(
|
||||||
|
dirChecksum,
|
||||||
|
checksums,
|
||||||
|
)
|
||||||
|
} else if c.IsStrict() {
|
||||||
|
h := sha512.New384()
|
||||||
|
h.Write(data)
|
||||||
|
if got := Checksum(h.Sum(nil)); got != checksum {
|
||||||
|
err = &ChecksumMismatchError{
|
||||||
|
Got: got,
|
||||||
|
Want: checksum,
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.checksumMu.Lock()
|
||||||
|
var w *os.File
|
||||||
|
w, err = os.OpenFile(
|
||||||
|
checksumPathname.String(),
|
||||||
|
os.O_CREATE|os.O_EXCL|os.O_WRONLY,
|
||||||
|
0400,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
c.checksumMu.Unlock()
|
||||||
|
|
||||||
|
if errors.Is(err, os.ErrExist) {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, err = w.Write(data)
|
||||||
|
closeErr := w.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
c.checksumMu.Unlock()
|
||||||
|
|
||||||
|
return
|
||||||
} else {
|
} else {
|
||||||
h.Sum(buf[:0])
|
if checksumFi != nil {
|
||||||
}
|
if !checksumFi.Mode().IsDir() {
|
||||||
|
// unreachable
|
||||||
checksumPathname := c.pathname(buf)
|
err = InvalidFileModeError(checksumFi.Mode())
|
||||||
|
}
|
||||||
if lock {
|
return
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
if f, err := os.OpenFile(
|
|
||||||
checksumPathname.String(),
|
|
||||||
os.O_WRONLY|os.O_CREATE|os.O_EXCL,
|
|
||||||
0400,
|
|
||||||
); err != nil {
|
|
||||||
// two artifacts may be backed by the same file
|
|
||||||
if !errors.Is(err, os.ErrExist) {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
} else if _, err = f.Write(data); err != nil {
|
|
||||||
// do not attempt cleanup: this is content-addressed and a partial
|
|
||||||
// write is caught during integrity check
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return os.Symlink(
|
workPathname := c.base.Append(dirWork, ids)
|
||||||
"../"+dirChecksum+"/"+path.Base(checksumPathname.String()),
|
defer func() {
|
||||||
identifierPathname.String(),
|
// must not use the value of checksum string as it might be zeroed
|
||||||
)
|
// to cancel the deferred symlink operation
|
||||||
}
|
|
||||||
|
|
||||||
// StoreFile stores the contents of a [File]. An optional checksum can be
|
if err != nil {
|
||||||
// passed via the result buffer which is used to validate the submitted data.
|
chmodErr := filepath.WalkDir(workPathname.String(), func(
|
||||||
func (c *Cache) StoreFile(
|
path string,
|
||||||
id ID,
|
d fs.DirEntry,
|
||||||
data []byte,
|
err error,
|
||||||
buf *Checksum,
|
) error {
|
||||||
validate bool,
|
if err != nil {
|
||||||
) (pathname *check.Absolute, err error) {
|
return err
|
||||||
pathname = c.pathnameIdent(&id)
|
}
|
||||||
err = c.storeFile(pathname, data, buf, validate, true)
|
if d.IsDir() {
|
||||||
return
|
return os.Chmod(path, 0700)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if errors.Is(chmodErr, os.ErrNotExist) {
|
||||||
|
chmodErr = nil
|
||||||
|
}
|
||||||
|
removeErr := os.RemoveAll(workPathname.String())
|
||||||
|
if chmodErr != nil || removeErr != nil {
|
||||||
|
err = errors.Join(err, chmodErr, removeErr)
|
||||||
|
} else if errors.Is(err, os.ErrExist) {
|
||||||
|
// two artifacts may be backed by the same file
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// LoadOrStoreFile attempts to load the contents of a [File] by its identifier,
|
if err = a.Cure(workPathname, c.loadData); err != nil {
|
||||||
// and if that file is not present, calls makeData and stores its result
|
return
|
||||||
// instead. Hash validation behaviour is identical to StoreFile.
|
}
|
||||||
func (c *Cache) LoadOrStoreFile(
|
|
||||||
id ID,
|
var fi os.FileInfo
|
||||||
makeData func() ([]byte, error),
|
if fi, err = os.Lstat(workPathname.String()); err != nil {
|
||||||
buf *Checksum,
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
validate bool,
|
err = NoOutputError{}
|
||||||
) (
|
}
|
||||||
pathname *check.Absolute,
|
return
|
||||||
data []byte,
|
}
|
||||||
store bool,
|
|
||||||
err error,
|
if !fi.IsDir() {
|
||||||
) {
|
if !fi.Mode().IsRegular() {
|
||||||
pathname = c.pathnameIdent(&id)
|
err = InvalidFileModeError(fi.Mode())
|
||||||
c.mu.Lock()
|
} else {
|
||||||
defer c.mu.Unlock()
|
err = errors.New("non-file artifact produced regular file")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// override this before hashing since it will be made read-only after
|
||||||
|
// the rename anyway so do not let perm bits affect the checksum
|
||||||
|
if err = os.Chmod(workPathname.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var gotChecksum Checksum
|
||||||
|
if gotChecksum, err = HashDir(workPathname); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if checksumPathname == nil {
|
||||||
|
checksum = gotChecksum
|
||||||
|
checksums = Encode(checksum)
|
||||||
|
checksumPathname = c.base.Append(
|
||||||
|
dirChecksum,
|
||||||
|
checksums,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
if gotChecksum != checksum {
|
||||||
|
err = &ChecksumMismatchError{
|
||||||
|
Got: gotChecksum,
|
||||||
|
Want: checksum,
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.checksumMu.Lock()
|
||||||
|
if err = os.Rename(
|
||||||
|
workPathname.String(),
|
||||||
|
checksumPathname.String(),
|
||||||
|
); err != nil {
|
||||||
|
if !errors.Is(err, os.ErrExist) {
|
||||||
|
c.checksumMu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// err is zeroed during deferred cleanup
|
||||||
|
} else {
|
||||||
|
err = os.Chmod(checksumPathname.String(), 0500)
|
||||||
|
}
|
||||||
|
c.checksumMu.Unlock()
|
||||||
|
|
||||||
data, err = os.ReadFile(pathname.String())
|
|
||||||
if err == nil || !errors.Is(err, os.ErrNotExist) {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
store = true
|
|
||||||
|
|
||||||
data, err = makeData()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = c.storeFile(pathname, data, buf, validate, false)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns the address to a new instance of [Cache].
|
// New returns the address to a new instance of [Cache].
|
||||||
@@ -408,5 +601,9 @@ func New(base *check.Absolute) (*Cache, error) {
|
|||||||
|
|
||||||
return &Cache{
|
return &Cache{
|
||||||
base: base,
|
base: base,
|
||||||
|
|
||||||
|
ident: make(map[ID]Checksum),
|
||||||
|
identErr: make(map[ID]error),
|
||||||
|
identPending: make(map[ID]<-chan struct{}),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,6 +5,8 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"crypto/sha512"
|
"crypto/sha512"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
@@ -12,6 +14,7 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
"hakurei.app/container"
|
"hakurei.app/container"
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
@@ -19,41 +22,131 @@ import (
|
|||||||
"hakurei.app/internal/pkg"
|
"hakurei.app/internal/pkg"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A stubArtifact implements [Artifact] with hardcoded kind and identifier.
|
// overrideIdent overrides the ID method of [Artifact].
|
||||||
type stubArtifact struct {
|
type overrideIdent struct {
|
||||||
kind pkg.Kind
|
id pkg.ID
|
||||||
id pkg.ID
|
pkg.Artifact
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a stubArtifact) Kind() pkg.Kind { return a.kind }
|
func (a overrideIdent) ID() pkg.ID { return a.id }
|
||||||
func (a stubArtifact) ID() pkg.ID { return a.id }
|
|
||||||
func (a stubArtifact) Hash() (pkg.Checksum, error) { panic("unreachable") }
|
// overrideIdentFile overrides the ID method of [File].
|
||||||
func (a stubArtifact) Pathname() (*check.Absolute, error) { panic("unreachable") }
|
type overrideIdentFile struct {
|
||||||
|
id pkg.ID
|
||||||
|
pkg.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a overrideIdentFile) ID() pkg.ID { return a.id }
|
||||||
|
|
||||||
|
// A knownIdentArtifact implements [pkg.KnownIdent] and [Artifact]
|
||||||
|
type knownIdentArtifact interface {
|
||||||
|
pkg.KnownIdent
|
||||||
|
pkg.Artifact
|
||||||
|
}
|
||||||
|
|
||||||
|
// A knownIdentFile implements [pkg.KnownIdent] and [File]
|
||||||
|
type knownIdentFile interface {
|
||||||
|
pkg.KnownIdent
|
||||||
|
pkg.File
|
||||||
|
}
|
||||||
|
|
||||||
|
// overrideChecksum overrides the Checksum method of [Artifact].
|
||||||
|
type overrideChecksum struct {
|
||||||
|
checksum pkg.Checksum
|
||||||
|
knownIdentArtifact
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a overrideChecksum) Checksum() pkg.Checksum { return a.checksum }
|
||||||
|
|
||||||
|
// overrideChecksumFile overrides the Checksum method of [File].
|
||||||
|
type overrideChecksumFile struct {
|
||||||
|
checksum pkg.Checksum
|
||||||
|
knownIdentFile
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a overrideChecksumFile) Checksum() pkg.Checksum { return a.checksum }
|
||||||
|
|
||||||
|
// A stubArtifact implements [Artifact] with hardcoded behaviour.
|
||||||
|
type stubArtifact struct {
|
||||||
|
kind pkg.Kind
|
||||||
|
params []byte
|
||||||
|
deps []pkg.Artifact
|
||||||
|
|
||||||
|
cure func(work *check.Absolute, loadData pkg.CacheDataFunc) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a stubArtifact) Kind() pkg.Kind { return a.kind }
|
||||||
|
func (a stubArtifact) Params() []byte { return a.params }
|
||||||
|
func (a stubArtifact) Dependencies() []pkg.Artifact { return a.deps }
|
||||||
|
|
||||||
|
func (a stubArtifact) Cure(
|
||||||
|
work *check.Absolute,
|
||||||
|
loadData pkg.CacheDataFunc,
|
||||||
|
) error {
|
||||||
|
return a.cure(work, loadData)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A stubFile implements [File] with hardcoded behaviour.
|
||||||
|
type stubFile struct {
|
||||||
|
data []byte
|
||||||
|
err error
|
||||||
|
|
||||||
|
stubArtifact
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a stubFile) Data() ([]byte, error) { return a.data, a.err }
|
||||||
|
|
||||||
|
// newStubFile returns an implementation of [pkg.File] with hardcoded behaviour.
|
||||||
|
func newStubFile(
|
||||||
|
kind pkg.Kind,
|
||||||
|
id pkg.ID,
|
||||||
|
sum *pkg.Checksum,
|
||||||
|
data []byte,
|
||||||
|
err error,
|
||||||
|
) pkg.File {
|
||||||
|
f := overrideIdentFile{id, stubFile{data, err, stubArtifact{
|
||||||
|
kind,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
func(work *check.Absolute, loadData pkg.CacheDataFunc) error {
|
||||||
|
panic("unreachable")
|
||||||
|
},
|
||||||
|
}}}
|
||||||
|
if sum == nil {
|
||||||
|
return f
|
||||||
|
} else {
|
||||||
|
return overrideChecksumFile{*sum, f}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestIdent(t *testing.T) {
|
func TestIdent(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
kind pkg.Kind
|
a pkg.Artifact
|
||||||
params []byte
|
want pkg.ID
|
||||||
deps []pkg.Artifact
|
|
||||||
want pkg.ID
|
|
||||||
}{
|
}{
|
||||||
{"tar", pkg.KindTar, []byte{
|
{"tar", stubArtifact{
|
||||||
pkg.TarGzip, 0, 0, 0, 0, 0, 0, 0,
|
pkg.KindTar,
|
||||||
}, []pkg.Artifact{
|
[]byte{pkg.TarGzip, 0, 0, 0, 0, 0, 0, 0},
|
||||||
stubArtifact{pkg.KindHTTP, pkg.ID{}},
|
[]pkg.Artifact{
|
||||||
}, pkg.MustDecode("HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY")},
|
overrideIdent{pkg.ID{}, stubArtifact{}},
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
}, pkg.MustDecode(
|
||||||
|
"HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY",
|
||||||
|
)},
|
||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
if got := tc.kind.Ident(tc.params, tc.deps...); got != tc.want {
|
if got := pkg.Ident(tc.a); got != tc.want {
|
||||||
t.Errorf("Ident: %s, want %s",
|
t.Errorf("Ident: %s, want %s",
|
||||||
pkg.Encode(got),
|
pkg.Encode(got),
|
||||||
pkg.Encode(tc.want))
|
pkg.Encode(tc.want),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -118,6 +211,37 @@ func checkWithCache(t *testing.T, testCases []cacheTestCase) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A cureStep contains an [Artifact] to be cured, and the expected outcome.
|
||||||
|
type cureStep struct {
|
||||||
|
name string
|
||||||
|
|
||||||
|
a pkg.Artifact
|
||||||
|
|
||||||
|
pathname *check.Absolute
|
||||||
|
checksum pkg.Checksum
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// cureMany cures many artifacts against a [Cache] and checks their outcomes.
|
||||||
|
func cureMany(t *testing.T, c *pkg.Cache, steps []cureStep) {
|
||||||
|
for _, step := range steps {
|
||||||
|
t.Log("cure step:", step.name)
|
||||||
|
if pathname, checksum, err := c.Cure(step.a); !reflect.DeepEqual(err, step.err) {
|
||||||
|
t.Fatalf("Cure: error = %v, want %v", err, step.err)
|
||||||
|
} else if !pathname.Is(step.pathname) {
|
||||||
|
t.Fatalf("Cure: pathname = %q, want %q", pathname, step.pathname)
|
||||||
|
} else if checksum != step.checksum {
|
||||||
|
t.Fatalf("Cure: checksum = %s, want %s", pkg.Encode(checksum), pkg.Encode(step.checksum))
|
||||||
|
} else {
|
||||||
|
v := any(err)
|
||||||
|
if err == nil {
|
||||||
|
v = pathname
|
||||||
|
}
|
||||||
|
t.Log(pkg.Encode(checksum)+":", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestCache(t *testing.T) {
|
func TestCache(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
@@ -134,21 +258,9 @@ func TestCache(t *testing.T) {
|
|||||||
return (pkg.Checksum)(h.Sum(nil))
|
return (pkg.Checksum)(h.Sum(nil))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
testdataChecksumString := pkg.Encode(testdataChecksum)
|
|
||||||
|
|
||||||
testCases := []cacheTestCase{
|
testCases := []cacheTestCase{
|
||||||
{"file", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
{"file", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
wantErrNonexistent := &os.PathError{
|
c.SetStrict(true)
|
||||||
Op: "open",
|
|
||||||
Path: base.Append(
|
|
||||||
"identifier",
|
|
||||||
testdataChecksumString,
|
|
||||||
).String(),
|
|
||||||
Err: syscall.ENOENT,
|
|
||||||
}
|
|
||||||
if _, _, err := c.LoadFile(testdataChecksum); !reflect.DeepEqual(err, wantErrNonexistent) {
|
|
||||||
t.Fatalf("LoadFile: error = %#v, want %#v", err, wantErrNonexistent)
|
|
||||||
}
|
|
||||||
|
|
||||||
identifier := (pkg.ID)(bytes.Repeat([]byte{
|
identifier := (pkg.ID)(bytes.Repeat([]byte{
|
||||||
0x75, 0xe6, 0x9d, 0x6d, 0xe7, 0x9f,
|
0x75, 0xe6, 0x9d, 0x6d, 0xe7, 0x9f,
|
||||||
@@ -165,154 +277,109 @@ func TestCache(t *testing.T) {
|
|||||||
"cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe",
|
"cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe",
|
||||||
)
|
)
|
||||||
|
|
||||||
// initial store
|
cureMany(t, c, []cureStep{
|
||||||
if pathname, err := c.StoreFile(
|
{"initial file", newStubFile(
|
||||||
identifier,
|
pkg.KindHTTP,
|
||||||
[]byte(testdata),
|
identifier,
|
||||||
&testdataChecksum,
|
&testdataChecksum,
|
||||||
true,
|
[]byte(testdata), nil,
|
||||||
); err != nil {
|
), wantPathname, testdataChecksum, nil},
|
||||||
t.Fatalf("StoreFile: error = %v", err)
|
|
||||||
} else if !pathname.Is(wantPathname) {
|
|
||||||
t.Fatalf("StoreFile: pathname = %q, want %q", pathname, wantPathname)
|
|
||||||
}
|
|
||||||
|
|
||||||
// load or store, identical content
|
{"identical content", newStubFile(
|
||||||
if pathname, data, store, err := c.LoadOrStoreFile(identifier0, func() ([]byte, error) {
|
pkg.KindHTTP,
|
||||||
return []byte(testdata), nil
|
identifier0,
|
||||||
}, &testdataChecksum, true); err != nil {
|
&testdataChecksum,
|
||||||
t.Fatalf("LoadOrStoreFile: error = %v", err)
|
[]byte(testdata), nil,
|
||||||
} else if !pathname.Is(wantPathname0) {
|
), wantPathname0, testdataChecksum, nil},
|
||||||
t.Fatalf("LoadOrStoreFile: pathname = %q, want %q", pathname, wantPathname0)
|
|
||||||
} else if string(data) != testdata {
|
|
||||||
t.Fatalf("LoadOrStoreFile: data = %x, want %x", data, testdata)
|
|
||||||
} else if !store {
|
|
||||||
t.Fatal("LoadOrStoreFile did not store nonpresent entry")
|
|
||||||
}
|
|
||||||
|
|
||||||
// load or store, existing entry
|
{"existing entry", newStubFile(
|
||||||
if pathname, data, store, err := c.LoadOrStoreFile(identifier, func() ([]byte, error) {
|
pkg.KindHTTP,
|
||||||
return []byte(testdata), nil
|
identifier,
|
||||||
}, &testdataChecksum, true); err != nil {
|
&testdataChecksum,
|
||||||
t.Fatalf("LoadOrStoreFile: error = %v", err)
|
[]byte(testdata), nil,
|
||||||
} else if !pathname.Is(wantPathname) {
|
), wantPathname, testdataChecksum, nil},
|
||||||
t.Fatalf("LoadOrStoreFile: pathname = %q, want %q", pathname, wantPathname)
|
|
||||||
} else if string(data) != testdata {
|
|
||||||
t.Fatalf("LoadOrStoreFile: data = %x, want %x", data, testdata)
|
|
||||||
} else if store {
|
|
||||||
t.Fatal("LoadOrStoreFile stored over present entry")
|
|
||||||
}
|
|
||||||
|
|
||||||
// load, existing entry
|
{"checksum mismatch", newStubFile(
|
||||||
if pathname, data, err := c.LoadFile(identifier0); err != nil {
|
pkg.KindHTTP,
|
||||||
t.Fatalf("LoadFile: error = %v", err)
|
pkg.ID{0xff, 0},
|
||||||
} else if !pathname.Is(wantPathname0) {
|
new(pkg.Checksum),
|
||||||
t.Fatalf("LoadFile: pathname = %q, want %q", pathname, wantPathname0)
|
[]byte(testdata), nil,
|
||||||
} else if string(data) != testdata {
|
), nil, pkg.Checksum{}, &pkg.ChecksumMismatchError{
|
||||||
t.Fatalf("LoadFile: data = %x, want %x", data, testdata)
|
Got: testdataChecksum,
|
||||||
}
|
}},
|
||||||
|
|
||||||
// checksum mismatch
|
{"store without validation", newStubFile(
|
||||||
wantErrChecksum := &pkg.ChecksumMismatchError{
|
pkg.KindHTTP,
|
||||||
Got: testdataChecksum,
|
pkg.MustDecode("vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX"),
|
||||||
}
|
nil,
|
||||||
if _, err := c.StoreFile(
|
[]byte{0}, nil,
|
||||||
testdataChecksum,
|
), base.Append(
|
||||||
[]byte(testdata),
|
|
||||||
new(pkg.Checksum),
|
|
||||||
true,
|
|
||||||
); !reflect.DeepEqual(err, wantErrChecksum) {
|
|
||||||
t.Fatalf("StoreFile: error = %#v, want %#v", err, wantErrChecksum)
|
|
||||||
}
|
|
||||||
|
|
||||||
// verify failed store
|
|
||||||
if _, _, err := c.LoadFile(testdataChecksum); !reflect.DeepEqual(err, wantErrNonexistent) {
|
|
||||||
t.Fatalf("LoadFile: error = %#v, want %#v", err, wantErrNonexistent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// store, same identifier
|
|
||||||
wantPathnameF := base.Append(
|
|
||||||
"identifier",
|
|
||||||
testdataChecksumString,
|
|
||||||
)
|
|
||||||
if pathname, err := c.StoreFile(
|
|
||||||
testdataChecksum,
|
|
||||||
[]byte(testdata),
|
|
||||||
&testdataChecksum,
|
|
||||||
true,
|
|
||||||
); err != nil {
|
|
||||||
t.Fatalf("StoreFile: error = %v", err)
|
|
||||||
} else if !pathname.Is(wantPathnameF) {
|
|
||||||
t.Fatalf("StoreFile: pathname = %q, want %q", pathname, wantPathnameF)
|
|
||||||
}
|
|
||||||
|
|
||||||
// load, same identifier
|
|
||||||
if pathname, data, err := c.LoadFile(testdataChecksum); err != nil {
|
|
||||||
t.Fatalf("LoadFile: error = %v", err)
|
|
||||||
} else if !pathname.Is(wantPathnameF) {
|
|
||||||
t.Fatalf("LoadFile: pathname = %q, want %q", pathname, wantPathnameF)
|
|
||||||
} else if string(data) != testdata {
|
|
||||||
t.Fatalf("LoadFile: data = %x, want %x", data, testdata)
|
|
||||||
}
|
|
||||||
|
|
||||||
// store without validation
|
|
||||||
wantChecksum := pkg.Checksum{
|
|
||||||
0xbe, 0xc0, 0x21, 0xb4, 0xf3, 0x68,
|
|
||||||
0xe3, 0x06, 0x91, 0x34, 0xe0, 0x12,
|
|
||||||
0xc2, 0xb4, 0x30, 0x70, 0x83, 0xd3,
|
|
||||||
0xa9, 0xbd, 0xd2, 0x06, 0xe2, 0x4e,
|
|
||||||
0x5f, 0x0d, 0x86, 0xe1, 0x3d, 0x66,
|
|
||||||
0x36, 0x65, 0x59, 0x33, 0xec, 0x2b,
|
|
||||||
0x41, 0x34, 0x65, 0x96, 0x68, 0x17,
|
|
||||||
0xa9, 0xc2, 0x08, 0xa1, 0x17, 0x17,
|
|
||||||
}
|
|
||||||
var gotChecksum pkg.Checksum
|
|
||||||
wantPathnameG := base.Append(
|
|
||||||
"identifier",
|
|
||||||
pkg.Encode(wantChecksum),
|
|
||||||
)
|
|
||||||
if pathname, err := c.StoreFile(
|
|
||||||
wantChecksum,
|
|
||||||
[]byte{0},
|
|
||||||
&gotChecksum,
|
|
||||||
false,
|
|
||||||
); err != nil {
|
|
||||||
t.Fatalf("StoreFile: error = %#v", err)
|
|
||||||
} else if !pathname.Is(wantPathnameG) {
|
|
||||||
t.Fatalf("StoreFile: pathname = %q, want %q", pathname, wantPathnameG)
|
|
||||||
} else if gotChecksum != wantChecksum {
|
|
||||||
t.Fatalf("StoreFile: buf = %x, want %x", gotChecksum, wantChecksum)
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeData passthrough
|
|
||||||
var zeroIdent pkg.ID
|
|
||||||
wantErrPassthrough := stub.UniqueError(0xcafe)
|
|
||||||
if _, _, _, err := c.LoadOrStoreFile(zeroIdent, func() ([]byte, error) {
|
|
||||||
return nil, wantErrPassthrough
|
|
||||||
}, new(pkg.Checksum), true); !reflect.DeepEqual(err, wantErrPassthrough) {
|
|
||||||
t.Fatalf("LoadOrStoreFile: error = %#v, want %#v", err, wantErrPassthrough)
|
|
||||||
}
|
|
||||||
|
|
||||||
// verify failed store
|
|
||||||
wantErrNonexistentZero := &os.PathError{
|
|
||||||
Op: "open",
|
|
||||||
Path: base.Append(
|
|
||||||
"identifier",
|
"identifier",
|
||||||
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
|
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
|
||||||
).String(),
|
), pkg.Checksum{
|
||||||
Err: syscall.ENOENT,
|
0xbe, 0xc0, 0x21, 0xb4, 0xf3, 0x68,
|
||||||
}
|
0xe3, 0x06, 0x91, 0x34, 0xe0, 0x12,
|
||||||
if _, _, err := c.LoadFile(zeroIdent); !reflect.DeepEqual(err, wantErrNonexistentZero) {
|
0xc2, 0xb4, 0x30, 0x70, 0x83, 0xd3,
|
||||||
t.Fatalf("LoadFile: error = %#v, want %#v", err, wantErrNonexistentZero)
|
0xa9, 0xbd, 0xd2, 0x06, 0xe2, 0x4e,
|
||||||
|
0x5f, 0x0d, 0x86, 0xe1, 0x3d, 0x66,
|
||||||
|
0x36, 0x65, 0x59, 0x33, 0xec, 0x2b,
|
||||||
|
0x41, 0x34, 0x65, 0x96, 0x68, 0x17,
|
||||||
|
0xa9, 0xc2, 0x08, 0xa1, 0x17, 0x17,
|
||||||
|
}, nil},
|
||||||
|
|
||||||
|
{"error passthrough", newStubFile(
|
||||||
|
pkg.KindHTTP,
|
||||||
|
pkg.ID{0xff, 1},
|
||||||
|
nil,
|
||||||
|
nil, stub.UniqueError(0xcafe),
|
||||||
|
), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
||||||
|
|
||||||
|
{"error caching", newStubFile(
|
||||||
|
pkg.KindHTTP,
|
||||||
|
pkg.ID{0xff, 1},
|
||||||
|
nil,
|
||||||
|
nil, nil,
|
||||||
|
), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
||||||
|
|
||||||
|
{"cache hit bad type", overrideChecksum{testdataChecksum, overrideIdent{pkg.ID{0xff, 2}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
}}}, nil, pkg.Checksum{}, pkg.InvalidFileModeError(
|
||||||
|
0400,
|
||||||
|
)},
|
||||||
|
})
|
||||||
|
|
||||||
|
if c0, err := pkg.New(base); err != nil {
|
||||||
|
t.Fatalf("New: error = %v", err)
|
||||||
|
} else {
|
||||||
|
cureMany(t, c0, []cureStep{
|
||||||
|
{"cache hit ident", overrideIdent{
|
||||||
|
id: identifier,
|
||||||
|
}, wantPathname, testdataChecksum, nil},
|
||||||
|
|
||||||
|
{"cache miss checksum match", newStubFile(
|
||||||
|
pkg.KindHTTP,
|
||||||
|
testdataChecksum,
|
||||||
|
nil,
|
||||||
|
[]byte(testdata),
|
||||||
|
nil,
|
||||||
|
), base.Append(
|
||||||
|
"identifier",
|
||||||
|
pkg.Encode(testdataChecksum),
|
||||||
|
), testdataChecksum, nil},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}, pkg.MustDecode("St9rlE-mGZ5gXwiv_hzQ_B8bZP-UUvSNmf4nHUZzCMOumb6hKnheZSe0dmnuc4Q2")},
|
}, pkg.MustDecode("St9rlE-mGZ5gXwiv_hzQ_B8bZP-UUvSNmf4nHUZzCMOumb6hKnheZSe0dmnuc4Q2")},
|
||||||
|
|
||||||
{"directory", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
{"directory", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
id := pkg.KindTar.Ident(
|
id := pkg.KindTar.Ident(
|
||||||
binary.LittleEndian.AppendUint64(nil, pkg.TarGzip),
|
binary.LittleEndian.AppendUint64(nil, pkg.TarGzip),
|
||||||
stubArtifact{pkg.KindHTTP, testdataChecksum},
|
overrideIdent{testdataChecksum, stubArtifact{}},
|
||||||
)
|
)
|
||||||
makeSample := func(work *check.Absolute) error {
|
makeSample := func(work *check.Absolute, _ pkg.CacheDataFunc) error {
|
||||||
|
if err := os.Mkdir(work.String(), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if err := os.WriteFile(
|
if err := os.WriteFile(
|
||||||
work.Append("check").String(),
|
work.Append("check").String(),
|
||||||
[]byte{0, 0},
|
[]byte{0, 0},
|
||||||
@@ -344,59 +411,22 @@ func TestCache(t *testing.T) {
|
|||||||
pkg.Encode(id),
|
pkg.Encode(id),
|
||||||
)
|
)
|
||||||
|
|
||||||
if pathname, store, err := c.Store(
|
|
||||||
id,
|
|
||||||
makeSample,
|
|
||||||
&wantChecksum,
|
|
||||||
true,
|
|
||||||
); err != nil {
|
|
||||||
t.Fatalf("Store: error = %v", err)
|
|
||||||
} else if !store {
|
|
||||||
t.Fatal("Store did not store nonpresent entry")
|
|
||||||
} else if !pathname.Is(wantPathname) {
|
|
||||||
t.Fatalf("Store: pathname = %q, want %q", pathname, wantPathname)
|
|
||||||
}
|
|
||||||
|
|
||||||
// check lookup
|
|
||||||
if pathname, store, err := c.Store(
|
|
||||||
id,
|
|
||||||
nil,
|
|
||||||
&wantChecksum,
|
|
||||||
true,
|
|
||||||
); err != nil {
|
|
||||||
t.Fatalf("Store: error = %v", err)
|
|
||||||
} else if store {
|
|
||||||
t.Fatal("Store stored over present entry")
|
|
||||||
} else if !pathname.Is(wantPathname) {
|
|
||||||
t.Fatalf("Store: pathname = %q, want %q", pathname, wantPathname)
|
|
||||||
}
|
|
||||||
|
|
||||||
// check exist
|
|
||||||
id0 := pkg.KindTar.Ident(
|
id0 := pkg.KindTar.Ident(
|
||||||
binary.LittleEndian.AppendUint64(nil, pkg.TarGzip),
|
binary.LittleEndian.AppendUint64(nil, pkg.TarGzip),
|
||||||
stubArtifact{pkg.KindHTTP, pkg.ID{}},
|
overrideIdent{pkg.ID{}, stubArtifact{}},
|
||||||
)
|
)
|
||||||
wantPathname0 := base.Append(
|
wantPathname0 := base.Append(
|
||||||
"identifier",
|
"identifier",
|
||||||
pkg.Encode(id0),
|
pkg.Encode(id0),
|
||||||
)
|
)
|
||||||
if pathname, store, err := c.Store(
|
|
||||||
id0,
|
|
||||||
makeSample,
|
|
||||||
&wantChecksum,
|
|
||||||
true,
|
|
||||||
); err != nil {
|
|
||||||
t.Fatalf("Store: error = %v", err)
|
|
||||||
} else if !store {
|
|
||||||
t.Fatal("Store did not store nonpresent entry")
|
|
||||||
} else if !pathname.Is(wantPathname0) {
|
|
||||||
t.Fatalf("Store: pathname = %q, want %q", pathname, wantPathname0)
|
|
||||||
}
|
|
||||||
|
|
||||||
var wantErrMakeGarbage error
|
makeGarbage := func(work *check.Absolute, wantErr error) error {
|
||||||
makeGarbage := func(work *check.Absolute) error {
|
if err := os.Mkdir(work.String(), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
mode := fs.FileMode(0)
|
mode := fs.FileMode(0)
|
||||||
if wantErrMakeGarbage == nil {
|
if wantErr == nil {
|
||||||
mode = 0500
|
mode = 0500
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -426,38 +456,144 @@ func TestCache(t *testing.T) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return wantErrMakeGarbage
|
return wantErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// check makeArtifact fault
|
cureMany(t, c, []cureStep{
|
||||||
wantErrMakeGarbage = stub.UniqueError(0xcafe)
|
{"initial directory", overrideChecksum{wantChecksum, overrideIdent{id, stubArtifact{
|
||||||
if _, store, err := c.Store(
|
kind: pkg.KindTar,
|
||||||
pkg.ID{},
|
cure: makeSample,
|
||||||
makeGarbage,
|
}}}, wantPathname, wantChecksum, nil},
|
||||||
nil,
|
|
||||||
false,
|
|
||||||
); !reflect.DeepEqual(err, wantErrMakeGarbage) {
|
|
||||||
t.Fatalf("Store: error = %#v, want %#v", err, wantErrMakeGarbage)
|
|
||||||
} else if !store {
|
|
||||||
t.Fatal("Store did not store nonpresent entry")
|
|
||||||
}
|
|
||||||
|
|
||||||
// checksum mismatch
|
{"identical identifier", overrideChecksum{wantChecksum, overrideIdent{id, stubArtifact{
|
||||||
wantErrMakeGarbage = nil
|
kind: pkg.KindTar,
|
||||||
wantErrMismatch := &pkg.ChecksumMismatchError{
|
}}}, wantPathname, wantChecksum, nil},
|
||||||
Got: pkg.MustDecode("GbjlYMcHQANdfwL6qNGopBF99IscPTvCy95HSH1_kIF3eKjFDSLP0_iUUT0z8hiw"),
|
|
||||||
}
|
{"identical checksum", overrideIdent{id0, stubArtifact{
|
||||||
if _, store, err := c.Store(
|
kind: pkg.KindTar,
|
||||||
pkg.ID{},
|
cure: makeSample,
|
||||||
makeGarbage,
|
}}, wantPathname0, wantChecksum, nil},
|
||||||
new(pkg.Checksum),
|
|
||||||
true,
|
{"cure fault", overrideIdent{pkg.ID{0xff, 0}, stubArtifact{
|
||||||
); !reflect.DeepEqual(err, wantErrMismatch) {
|
kind: pkg.KindTar,
|
||||||
t.Fatalf("Store: error = %v, want %v", err, wantErrMismatch)
|
cure: func(work *check.Absolute, _ pkg.CacheDataFunc) error {
|
||||||
} else if !store {
|
return makeGarbage(work, stub.UniqueError(0xcafe))
|
||||||
t.Fatal("Store did not store nonpresent entry")
|
},
|
||||||
}
|
}}, nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
||||||
|
|
||||||
|
{"checksum mismatch", overrideChecksum{pkg.Checksum{}, overrideIdent{pkg.ID{0xff, 1}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(work *check.Absolute, _ pkg.CacheDataFunc) error {
|
||||||
|
return makeGarbage(work, nil)
|
||||||
|
},
|
||||||
|
}}}, nil, pkg.Checksum{}, &pkg.ChecksumMismatchError{
|
||||||
|
Got: pkg.MustDecode(
|
||||||
|
"GbjlYMcHQANdfwL6qNGopBF99IscPTvCy95HSH1_kIF3eKjFDSLP0_iUUT0z8hiw",
|
||||||
|
),
|
||||||
|
}},
|
||||||
|
|
||||||
|
{"cache hit bad type", newStubFile(
|
||||||
|
pkg.KindHTTP,
|
||||||
|
pkg.ID{0xff, 2},
|
||||||
|
&wantChecksum,
|
||||||
|
[]byte(testdata), nil,
|
||||||
|
), nil, pkg.Checksum{}, pkg.InvalidFileModeError(
|
||||||
|
fs.ModeDir | 0500,
|
||||||
|
)},
|
||||||
|
|
||||||
|
{"loadData directory", overrideIdent{pkg.ID{0xff, 3}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(work *check.Absolute, loadData pkg.CacheDataFunc) error {
|
||||||
|
_, err := loadData(overrideChecksumFile{checksum: wantChecksum})
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}}, nil, pkg.Checksum{}, &os.PathError{
|
||||||
|
Op: "read",
|
||||||
|
Path: base.Append(
|
||||||
|
"checksum",
|
||||||
|
pkg.Encode(wantChecksum),
|
||||||
|
).String(),
|
||||||
|
Err: syscall.EISDIR,
|
||||||
|
}},
|
||||||
|
|
||||||
|
{"no output", overrideIdent{pkg.ID{0xff, 4}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(work *check.Absolute, loadData pkg.CacheDataFunc) error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}}, nil, pkg.Checksum{}, pkg.NoOutputError{}},
|
||||||
|
|
||||||
|
{"file output", overrideIdent{pkg.ID{0xff, 5}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(work *check.Absolute, loadData pkg.CacheDataFunc) error {
|
||||||
|
return os.WriteFile(work.String(), []byte{0}, 0400)
|
||||||
|
},
|
||||||
|
}}, nil, pkg.Checksum{}, errors.New("non-file artifact produced regular file")},
|
||||||
|
|
||||||
|
{"symlink output", overrideIdent{pkg.ID{0xff, 6}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(work *check.Absolute, loadData pkg.CacheDataFunc) error {
|
||||||
|
return os.Symlink(work.String(), work.String())
|
||||||
|
},
|
||||||
|
}}, nil, pkg.Checksum{}, pkg.InvalidFileModeError(
|
||||||
|
fs.ModeSymlink | 0777,
|
||||||
|
)},
|
||||||
|
})
|
||||||
}, pkg.MustDecode("8OP6YxJAdRrhV2WSBt1BPD7oC_n2Qh7JqUMyVMoGvjDX83bDqq2hgVMNcdiBH_64")},
|
}, pkg.MustDecode("8OP6YxJAdRrhV2WSBt1BPD7oC_n2Qh7JqUMyVMoGvjDX83bDqq2hgVMNcdiBH_64")},
|
||||||
|
|
||||||
|
{"pending", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
|
||||||
|
wantErr := stub.UniqueError(0xcafe)
|
||||||
|
n, ready := make(chan struct{}), make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
if _, _, err := c.Cure(overrideIdent{pkg.ID{0xff}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(work *check.Absolute, loadData pkg.CacheDataFunc) error {
|
||||||
|
close(ready)
|
||||||
|
<-n
|
||||||
|
return wantErr
|
||||||
|
},
|
||||||
|
}}); !reflect.DeepEqual(err, wantErr) {
|
||||||
|
panic(fmt.Sprintf("Cure: error = %v, want %v", err, wantErr))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
<-ready
|
||||||
|
go func() {
|
||||||
|
if _, _, err := c.Cure(overrideIdent{pkg.ID{0xff}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
}}); !reflect.DeepEqual(err, wantErr) {
|
||||||
|
panic(fmt.Sprintf("Cure: error = %v, want %v", err, wantErr))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// check cache activity while a cure is blocking
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"error passthrough", newStubFile(
|
||||||
|
pkg.KindHTTP,
|
||||||
|
pkg.ID{0xff, 1},
|
||||||
|
nil,
|
||||||
|
nil, stub.UniqueError(0xbad),
|
||||||
|
), nil, pkg.Checksum{}, stub.UniqueError(0xbad)},
|
||||||
|
|
||||||
|
{"file output", overrideIdent{pkg.ID{0xff, 2}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(work *check.Absolute, loadData pkg.CacheDataFunc) error {
|
||||||
|
return os.WriteFile(work.String(), []byte{0}, 0400)
|
||||||
|
},
|
||||||
|
}}, nil, pkg.Checksum{}, errors.New("non-file artifact produced regular file")},
|
||||||
|
})
|
||||||
|
|
||||||
|
identPendingVal := reflect.ValueOf(c).Elem().FieldByName("identPending")
|
||||||
|
identPending := reflect.NewAt(
|
||||||
|
identPendingVal.Type(),
|
||||||
|
unsafe.Pointer(identPendingVal.UnsafeAddr()),
|
||||||
|
).Elem().Interface().(map[pkg.ID]<-chan struct{})
|
||||||
|
notify := identPending[pkg.ID{0xff}]
|
||||||
|
go close(n)
|
||||||
|
<-notify
|
||||||
|
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
||||||
}
|
}
|
||||||
checkWithCache(t, testCases)
|
checkWithCache(t, testCases)
|
||||||
}
|
}
|
||||||
@@ -484,6 +620,14 @@ func TestErrors(t *testing.T) {
|
|||||||
{"DisallowedTypeflagError", pkg.DisallowedTypeflagError(
|
{"DisallowedTypeflagError", pkg.DisallowedTypeflagError(
|
||||||
tar.TypeChar,
|
tar.TypeChar,
|
||||||
), "disallowed typeflag '3'"},
|
), "disallowed typeflag '3'"},
|
||||||
|
|
||||||
|
{"InvalidFileModeError", pkg.InvalidFileModeError(
|
||||||
|
fs.ModeSymlink | 0777,
|
||||||
|
), "artifact did not produce a regular file or directory"},
|
||||||
|
|
||||||
|
{"NoOutputError", pkg.NoOutputError{
|
||||||
|
// empty struct
|
||||||
|
}, "artifact cured successfully but did not produce any output"},
|
||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import (
|
|||||||
"io/fs"
|
"io/fs"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
)
|
)
|
||||||
@@ -25,59 +24,44 @@ const (
|
|||||||
|
|
||||||
// A tarArtifact is an [Artifact] unpacking a tarball backed by a [File].
|
// A tarArtifact is an [Artifact] unpacking a tarball backed by a [File].
|
||||||
type tarArtifact struct {
|
type tarArtifact struct {
|
||||||
// Computed ahead of time from the checksum of the identifier of f appended
|
|
||||||
// with parameters of tarArtifact.
|
|
||||||
id ID
|
|
||||||
|
|
||||||
// Caller-supplied backing tarball.
|
// Caller-supplied backing tarball.
|
||||||
f File
|
f File
|
||||||
// Compression on top of the tarball.
|
// Compression on top of the tarball.
|
||||||
compression uint64
|
compression uint64
|
||||||
|
|
||||||
// Populated when submitting to or loading from [Cache].
|
|
||||||
pathname *check.Absolute
|
|
||||||
// Checksum of cured directory. Valid if pathname is not nil.
|
|
||||||
checksum Checksum
|
|
||||||
|
|
||||||
// Instance of [Cache] to submit the cured artifact to.
|
|
||||||
c *Cache
|
|
||||||
// Protects the Pathname critical section.
|
|
||||||
mu sync.Mutex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTar returns a new [Artifact] backed by the supplied [File] and
|
// NewTar returns a new [Artifact] backed by the supplied [File] and
|
||||||
// compression method.
|
// compression method.
|
||||||
func (c *Cache) NewTar(f File, compression uint64) Artifact {
|
func NewTar(f File, compression uint64) Artifact {
|
||||||
return &tarArtifact{id: KindTar.Ident(
|
return &tarArtifact{f: f, compression: compression}
|
||||||
binary.LittleEndian.AppendUint64(nil, compression), f,
|
|
||||||
), f: f, compression: compression, c: c}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewHTTPGetTar is abbreviation for NewHTTPGet passed to NewTar.
|
// NewHTTPGetTar is abbreviation for NewHTTPGet passed to NewTar.
|
||||||
func (c *Cache) NewHTTPGetTar(
|
func NewHTTPGetTar(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
hc *http.Client,
|
hc *http.Client,
|
||||||
url string,
|
url string,
|
||||||
checksum Checksum,
|
checksum Checksum,
|
||||||
compression uint64,
|
compression uint64,
|
||||||
) (Artifact, error) {
|
) (Artifact, error) {
|
||||||
f, err := c.NewHTTPGet(ctx, hc, url, checksum)
|
f, err := NewHTTPGet(ctx, hc, url, checksum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return c.NewTar(f, compression), nil
|
return NewTar(f, compression), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Kind returns the hardcoded [Kind] constant.
|
// Kind returns the hardcoded [Kind] constant.
|
||||||
func (a *tarArtifact) Kind() Kind { return KindTar }
|
func (a *tarArtifact) Kind() Kind { return KindTar }
|
||||||
|
|
||||||
// ID returns the identifier prepared ahead of time.
|
// Params returns compression encoded in little endian.
|
||||||
func (a *tarArtifact) ID() ID { return a.id }
|
func (a *tarArtifact) Params() []byte {
|
||||||
|
return binary.LittleEndian.AppendUint64(nil, a.compression)
|
||||||
|
}
|
||||||
|
|
||||||
// Hash cures the [Artifact] and returns its hash.
|
// Dependencies returns a slice containing the backing file.
|
||||||
func (a *tarArtifact) Hash() (Checksum, error) {
|
func (a *tarArtifact) Dependencies() []Artifact {
|
||||||
_, err := a.Pathname()
|
return []Artifact{a.f}
|
||||||
return a.checksum, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// A DisallowedTypeflagError describes a disallowed typeflag encountered while
|
// A DisallowedTypeflagError describes a disallowed typeflag encountered while
|
||||||
@@ -88,146 +72,131 @@ func (e DisallowedTypeflagError) Error() string {
|
|||||||
return "disallowed typeflag '" + string(e) + "'"
|
return "disallowed typeflag '" + string(e) + "'"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pathname cures the [Artifact] and returns its pathname in the [Cache].
|
// Cure cures the [Artifact], producing a directory located at work.
|
||||||
func (a *tarArtifact) Pathname() (*check.Absolute, error) {
|
func (a *tarArtifact) Cure(work *check.Absolute, loadData CacheDataFunc) (err error) {
|
||||||
a.mu.Lock()
|
var tr io.ReadCloser
|
||||||
defer a.mu.Unlock()
|
|
||||||
|
|
||||||
if a.pathname != nil {
|
{
|
||||||
return a.pathname, nil
|
var data []byte
|
||||||
|
data, err = loadData(a.f)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tr = io.NopCloser(bytes.NewReader(data))
|
||||||
}
|
}
|
||||||
|
|
||||||
pathname, _, err := a.c.Store(a.id, func(work *check.Absolute) (err error) {
|
defer func() {
|
||||||
var tr io.ReadCloser
|
closeErr := tr.Close()
|
||||||
|
|
||||||
{
|
|
||||||
var data []byte
|
|
||||||
data, err = a.f.Data()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
tr = io.NopCloser(bytes.NewReader(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
closeErr := tr.Close()
|
|
||||||
if err == nil {
|
|
||||||
err = closeErr
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
switch a.compression {
|
|
||||||
case TarUncompressed:
|
|
||||||
break
|
|
||||||
|
|
||||||
case TarGzip:
|
|
||||||
if tr, err = gzip.NewReader(tr); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
break
|
|
||||||
|
|
||||||
default:
|
|
||||||
return os.ErrInvalid
|
|
||||||
}
|
|
||||||
|
|
||||||
type dirTargetPerm struct {
|
|
||||||
path *check.Absolute
|
|
||||||
mode fs.FileMode
|
|
||||||
}
|
|
||||||
var madeDirectories []dirTargetPerm
|
|
||||||
|
|
||||||
var header *tar.Header
|
|
||||||
r := tar.NewReader(tr)
|
|
||||||
for header, err = r.Next(); err == nil; header, err = r.Next() {
|
|
||||||
typeflag := header.Typeflag
|
|
||||||
for {
|
|
||||||
switch typeflag {
|
|
||||||
case 0:
|
|
||||||
if len(header.Name) > 0 && header.Name[len(header.Name)-1] == '/' {
|
|
||||||
typeflag = tar.TypeDir
|
|
||||||
} else {
|
|
||||||
typeflag = tar.TypeReg
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
|
|
||||||
case tar.TypeReg:
|
|
||||||
var f *os.File
|
|
||||||
if f, err = os.OpenFile(
|
|
||||||
work.Append(header.Name).String(),
|
|
||||||
os.O_CREATE|os.O_EXCL|os.O_WRONLY,
|
|
||||||
header.FileInfo().Mode()&0400,
|
|
||||||
); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if _, err = io.Copy(f, r); err != nil {
|
|
||||||
_ = f.Close()
|
|
||||||
return
|
|
||||||
} else if err = f.Close(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
break
|
|
||||||
|
|
||||||
case tar.TypeLink:
|
|
||||||
if err = os.Link(
|
|
||||||
header.Linkname,
|
|
||||||
work.Append(header.Name).String(),
|
|
||||||
); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
break
|
|
||||||
|
|
||||||
case tar.TypeSymlink:
|
|
||||||
if err = os.Symlink(
|
|
||||||
header.Linkname,
|
|
||||||
work.Append(header.Name).String(),
|
|
||||||
); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
break
|
|
||||||
|
|
||||||
case tar.TypeDir:
|
|
||||||
pathname := work.Append(header.Name)
|
|
||||||
madeDirectories = append(madeDirectories, dirTargetPerm{
|
|
||||||
path: pathname,
|
|
||||||
mode: header.FileInfo().Mode(),
|
|
||||||
})
|
|
||||||
if err = os.MkdirAll(
|
|
||||||
pathname.String(),
|
|
||||||
0700,
|
|
||||||
); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
break
|
|
||||||
|
|
||||||
case tar.TypeXGlobalHeader:
|
|
||||||
// ignore
|
|
||||||
break
|
|
||||||
|
|
||||||
default:
|
|
||||||
return DisallowedTypeflagError(typeflag)
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if errors.Is(err, io.EOF) {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for _, e := range madeDirectories {
|
err = closeErr
|
||||||
if err = os.Chmod(e.path.String(), e.mode&0500); err != nil {
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
switch a.compression {
|
||||||
|
case TarUncompressed:
|
||||||
|
break
|
||||||
|
|
||||||
|
case TarGzip:
|
||||||
|
if tr, err = gzip.NewReader(tr); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
default:
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
type dirTargetPerm struct {
|
||||||
|
path *check.Absolute
|
||||||
|
mode fs.FileMode
|
||||||
|
}
|
||||||
|
var madeDirectories []dirTargetPerm
|
||||||
|
|
||||||
|
var header *tar.Header
|
||||||
|
r := tar.NewReader(tr)
|
||||||
|
for header, err = r.Next(); err == nil; header, err = r.Next() {
|
||||||
|
typeflag := header.Typeflag
|
||||||
|
for {
|
||||||
|
switch typeflag {
|
||||||
|
case 0:
|
||||||
|
if len(header.Name) > 0 && header.Name[len(header.Name)-1] == '/' {
|
||||||
|
typeflag = tar.TypeDir
|
||||||
|
} else {
|
||||||
|
typeflag = tar.TypeReg
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
|
||||||
|
case tar.TypeReg:
|
||||||
|
var f *os.File
|
||||||
|
if f, err = os.OpenFile(
|
||||||
|
work.Append(header.Name).String(),
|
||||||
|
os.O_CREATE|os.O_EXCL|os.O_WRONLY,
|
||||||
|
header.FileInfo().Mode()&0400,
|
||||||
|
); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if _, err = io.Copy(f, r); err != nil {
|
||||||
|
_ = f.Close()
|
||||||
|
return
|
||||||
|
} else if err = f.Close(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
case tar.TypeLink:
|
||||||
|
if err = os.Link(
|
||||||
|
header.Linkname,
|
||||||
|
work.Append(header.Name).String(),
|
||||||
|
); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
case tar.TypeSymlink:
|
||||||
|
if err = os.Symlink(
|
||||||
|
header.Linkname,
|
||||||
|
work.Append(header.Name).String(),
|
||||||
|
); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
case tar.TypeDir:
|
||||||
|
pathname := work.Append(header.Name)
|
||||||
|
madeDirectories = append(madeDirectories, dirTargetPerm{
|
||||||
|
path: pathname,
|
||||||
|
mode: header.FileInfo().Mode(),
|
||||||
|
})
|
||||||
|
if err = os.MkdirAll(
|
||||||
|
pathname.String(),
|
||||||
|
0700,
|
||||||
|
); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
case tar.TypeXGlobalHeader:
|
||||||
|
// ignore
|
||||||
|
break
|
||||||
|
|
||||||
|
default:
|
||||||
|
return DisallowedTypeflagError(typeflag)
|
||||||
}
|
}
|
||||||
err = os.Chmod(work.String(), 0500)
|
|
||||||
|
break
|
||||||
}
|
}
|
||||||
return
|
}
|
||||||
}, &a.checksum, false)
|
if errors.Is(err, io.EOF) {
|
||||||
if err != nil {
|
err = nil
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
a.pathname = pathname
|
if err == nil {
|
||||||
return pathname, nil
|
for _, e := range madeDirectories {
|
||||||
|
if err = os.Chmod(e.path.String(), e.mode&0500); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = os.Chmod(work.String(), 0500)
|
||||||
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ func TestTar(t *testing.T) {
|
|||||||
return pkg.ID(h.Sum(nil))
|
return pkg.ID(h.Sum(nil))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
a, err := c.NewHTTPGetTar(
|
a, err := pkg.NewHTTPGetTar(
|
||||||
t.Context(),
|
t.Context(),
|
||||||
&client,
|
&client,
|
||||||
"file:///testdata",
|
"file:///testdata",
|
||||||
@@ -89,27 +89,27 @@ func TestTar(t *testing.T) {
|
|||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("NewHTTPGetTar: error = %v", err)
|
t.Fatalf("NewHTTPGetTar: error = %v", err)
|
||||||
} else if id := a.ID(); id != wantIdent {
|
} else if id := pkg.Ident(a); id != wantIdent {
|
||||||
t.Fatalf("ID: %s, want %s", pkg.Encode(id), pkg.Encode(wantIdent))
|
t.Fatalf("Ident: %s, want %s", pkg.Encode(id), pkg.Encode(wantIdent))
|
||||||
}
|
}
|
||||||
|
|
||||||
var pathname *check.Absolute
|
var (
|
||||||
|
pathname *check.Absolute
|
||||||
|
checksum pkg.Checksum
|
||||||
|
)
|
||||||
wantPathname := base.Append(
|
wantPathname := base.Append(
|
||||||
"identifier",
|
"identifier",
|
||||||
pkg.Encode(wantIdent),
|
pkg.Encode(wantIdent),
|
||||||
)
|
)
|
||||||
if pathname, err = a.Pathname(); err != nil {
|
wantChecksum := pkg.MustDecode(
|
||||||
t.Fatalf("Pathname: error = %v", err)
|
"yJlSb2A3jxaMLuKqwp1GwHOguAHddS9MjygF9ICEeegKfRvgLPdPmNh8mva47f8o",
|
||||||
|
)
|
||||||
|
if pathname, checksum, err = c.Cure(a); err != nil {
|
||||||
|
t.Fatalf("Cure: error = %v", err)
|
||||||
} else if !pathname.Is(wantPathname) {
|
} else if !pathname.Is(wantPathname) {
|
||||||
t.Fatalf("Pathname: %q, want %q", pathname, wantPathname)
|
t.Fatalf("Cure: %q, want %q", pathname, wantPathname)
|
||||||
}
|
|
||||||
|
|
||||||
var checksum pkg.Checksum
|
|
||||||
wantChecksum := pkg.MustDecode("yJlSb2A3jxaMLuKqwp1GwHOguAHddS9MjygF9ICEeegKfRvgLPdPmNh8mva47f8o")
|
|
||||||
if checksum, err = a.Hash(); err != nil {
|
|
||||||
t.Fatalf("Hash: error = %v", err)
|
|
||||||
} else if checksum != wantChecksum {
|
} else if checksum != wantChecksum {
|
||||||
t.Fatalf("Hash: %v", &pkg.ChecksumMismatchError{
|
t.Fatalf("Cure: %v", &pkg.ChecksumMismatchError{
|
||||||
Got: checksum,
|
Got: checksum,
|
||||||
Want: wantChecksum,
|
Want: wantChecksum,
|
||||||
})
|
})
|
||||||
|
|||||||
Reference in New Issue
Block a user