internal/pkg: cache computed identifiers
All checks were successful
Test / Create distribution (push) Successful in 49s
Test / Sandbox (push) Successful in 3m1s
Test / ShareFS (push) Successful in 4m56s
Test / Sandbox (race detector) (push) Successful in 5m21s
Test / Hpkg (push) Successful in 5m30s
Test / Hakurei (push) Successful in 5m53s
Test / Hakurei (race detector) (push) Successful in 7m56s
Test / Flake checks (push) Successful in 1m57s
All checks were successful
Test / Create distribution (push) Successful in 49s
Test / Sandbox (push) Successful in 3m1s
Test / ShareFS (push) Successful in 4m56s
Test / Sandbox (race detector) (push) Successful in 5m21s
Test / Hpkg (push) Successful in 5m30s
Test / Hakurei (push) Successful in 5m53s
Test / Hakurei (race detector) (push) Successful in 7m56s
Test / Flake checks (push) Successful in 1m57s
This eliminates duplicate identifier computations. The new implementation also significantly reduces allocations while computing identifier for a large dependency tree. Signed-off-by: Ophestra <cat@gensokyo.uk>
This commit is contained in:
@@ -101,8 +101,11 @@ func checkTarHTTP(
|
||||
h.Write([]byte{byte(pkg.KindTar), 0, 0, 0, 0, 0, 0, 0})
|
||||
h.Write([]byte{pkg.TarGzip, 0, 0, 0, 0, 0, 0, 0})
|
||||
h.Write([]byte{byte(pkg.KindHTTPGet), 0, 0, 0, 0, 0, 0, 0})
|
||||
httpIdent := pkg.KindHTTPGet.Ident([]byte("file:///testdata"))
|
||||
h.Write(httpIdent[:])
|
||||
|
||||
h0 := sha512.New384()
|
||||
h0.Write([]byte{byte(pkg.KindHTTPGet), 0, 0, 0, 0, 0, 0, 0})
|
||||
h0.Write([]byte("file:///testdata"))
|
||||
h.Write(h0.Sum(nil))
|
||||
return pkg.ID(h.Sum(nil))
|
||||
}()
|
||||
|
||||
@@ -113,10 +116,6 @@ func checkTarHTTP(
|
||||
pkg.TarGzip,
|
||||
)
|
||||
|
||||
if id := pkg.Ident(a); id != wantIdent {
|
||||
t.Fatalf("Ident: %s, want %s", pkg.Encode(id), pkg.Encode(wantIdent))
|
||||
}
|
||||
|
||||
tarDir := stubArtifact{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("directory containing a single regular file"),
|
||||
@@ -164,9 +163,9 @@ func checkTarHTTP(
|
||||
},
|
||||
}
|
||||
// destroy these to avoid including it in flatten test case
|
||||
defer newDestroyArtifactFunc(tarDir)(t, base, c)
|
||||
defer newDestroyArtifactFunc(tarDirMulti)(t, base, c)
|
||||
defer newDestroyArtifactFunc(tarDirType)(t, base, c)
|
||||
defer newDestroyArtifactFunc(&tarDir)(t, base, c)
|
||||
defer newDestroyArtifactFunc(&tarDirMulti)(t, base, c)
|
||||
defer newDestroyArtifactFunc(&tarDirType)(t, base, c)
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"file", a, base.Append(
|
||||
@@ -175,25 +174,25 @@ func checkTarHTTP(
|
||||
), wantChecksum, nil},
|
||||
|
||||
{"directory", pkg.NewTar(
|
||||
tarDir,
|
||||
&tarDir,
|
||||
pkg.TarGzip,
|
||||
), ignorePathname, wantChecksum, nil},
|
||||
|
||||
{"multiple entries", pkg.NewTar(
|
||||
tarDirMulti,
|
||||
&tarDirMulti,
|
||||
pkg.TarGzip,
|
||||
), nil, pkg.Checksum{}, errors.New(
|
||||
"input directory does not contain a single regular file",
|
||||
)},
|
||||
|
||||
{"bad type", pkg.NewTar(
|
||||
tarDirType,
|
||||
&tarDirType,
|
||||
pkg.TarGzip,
|
||||
), nil, pkg.Checksum{}, errors.New(
|
||||
"input directory does not contain a single regular file",
|
||||
)},
|
||||
|
||||
{"error passthrough", pkg.NewTar(stubArtifact{
|
||||
{"error passthrough", pkg.NewTar(&stubArtifact{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("doomed artifact"),
|
||||
cure: func(t *pkg.TContext) error {
|
||||
|
||||
Reference in New Issue
Block a user