copy: generalise store

This commit is contained in:
Ophestra 2025-07-23 11:43:14 +09:00
parent e448541464
commit e0278a6d7d
Signed by: cat
SSH Key Fingerprint: SHA256:wr6yH7sDDbUFi81k/GsIGwpM3O2QrwqYlLF26CcJa4w
4 changed files with 102 additions and 72 deletions

39
copy.go
View File

@ -2,43 +2,12 @@ package nix
import ( import (
"context" "context"
"fmt"
"iter" "iter"
"os" "os"
) )
const ( // Copy copies installables to the binary cache store.
EnvAwsSharedCredentialsFile = "AWS_SHARED_CREDENTIALS_FILE" func Copy(ctx Context, store Store, installables iter.Seq[string]) error {
)
// A BinaryCache holds credentials and parameters to a s3 binary cache.
type BinaryCache struct {
// Compression is the name of the compression algorithm to use. Example: "zstd".
Compression string `json:"compression"`
// ParallelCompression determines whether parallel compression is enabled.
ParallelCompression bool `json:"parallel_compression,omitempty"`
// Bucket is the s3 bucket name.
Bucket string `json:"bucket"`
// Endpoint is the s3 endpoint. Example: "s3.example.org".
Endpoint string `json:"endpoint,omitempty"`
// Region is the s3 region. Example: "ap-northeast-1".
Region string `json:"region"`
// Scheme is the s3 protocol. Example: "https".
Scheme string `json:"scheme"`
// CredentialsPath is the path to the s3 shared credentials file.
CredentialsPath string `json:"credentials_path"`
}
func (store *BinaryCache) String() string {
return fmt.Sprintf(
"s3://%s?compression=%s&parallel-compression=%t&region=%s&scheme=%s&endpoint=%s",
store.Bucket, store.Compression, store.ParallelCompression, store.Region, store.Scheme, store.Endpoint,
)
}
// Copy copies installables to the binary cache store, signing all paths using the key at keyPath.
func Copy(ctx Context, keyPath string, store *BinaryCache, installables iter.Seq[string]) error {
if store == nil { if store == nil {
return os.ErrInvalid return os.ErrInvalid
} }
@ -47,9 +16,9 @@ func Copy(ctx Context, keyPath string, store *BinaryCache, installables iter.Seq
defer cancel() defer cancel()
cmd := ctx.Nix(c, CommandCopy, cmd := ctx.Nix(c, CommandCopy,
FlagTo, store.String()+"&secret-key="+keyPath, FlagTo, store.String(),
FlagStdin) FlagStdin)
cmd.Env = append(os.Environ(), EnvAwsSharedCredentialsFile+"="+store.CredentialsPath) cmd.Env = append(os.Environ(), store.Environ()...)
cmd.Stdout, cmd.Stderr = ctx.Streams() cmd.Stdout, cmd.Stderr = ctx.Streams()
_, err := ctx.WriteStdin(cmd, installables, nil) _, err := ctx.WriteStdin(cmd, installables, nil)

View File

@ -36,46 +36,10 @@ func init() {
}) })
} }
func TestBinaryCache(t *testing.T) {
testCases := []struct {
name string
store *nix.BinaryCache
want string
}{
{"example", &nix.BinaryCache{
Compression: "none",
ParallelCompression: false,
Bucket: "example",
Endpoint: "s3.example.org",
Region: "us-east-1",
Scheme: "http",
CredentialsPath: "/dev/null",
}, "s3://example?compression=none&parallel-compression=false&region=us-east-1&scheme=http&endpoint=s3.example.org"},
{"gensokyo", &nix.BinaryCache{
Compression: "zstd",
ParallelCompression: true,
Bucket: "nix-cache",
Endpoint: "s3.gensokyo.uk",
Region: "ap-northeast-1",
Scheme: "https",
CredentialsPath: "/var/lib/persist/cache/s3",
}, "s3://nix-cache?compression=zstd&parallel-compression=true&region=ap-northeast-1&scheme=https&endpoint=s3.gensokyo.uk"},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
if got := tc.store.String(); got != tc.want {
t.Errorf("String: %q, want %q", got, tc.want)
}
})
}
}
func TestCopy(t *testing.T) { func TestCopy(t *testing.T) {
stubNixCommand(t) stubNixCommand(t)
if err := nix.Copy( if err := nix.Copy(
newStubContext(t.Context(), nil, os.Stdout, os.Stderr), newStubContext(t.Context(), nil, os.Stdout, os.Stderr),
nonexistent,
&nix.BinaryCache{ &nix.BinaryCache{
Compression: "none", Compression: "none",
ParallelCompression: false, ParallelCompression: false,
@ -84,6 +48,7 @@ func TestCopy(t *testing.T) {
Region: "us-east-1", Region: "us-east-1",
Scheme: "http", Scheme: "http",
CredentialsPath: "/dev/null", CredentialsPath: "/dev/null",
KeyPath: nonexistent,
}, },
slices.Values(instWant["pluiedev pappardelle"]), slices.Values(instWant["pluiedev pappardelle"]),
); err != nil { ); err != nil {
@ -93,7 +58,6 @@ func TestCopy(t *testing.T) {
t.Run("nil store", func(t *testing.T) { t.Run("nil store", func(t *testing.T) {
if err := nix.Copy( if err := nix.Copy(
newStubContext(t.Context(), nil, os.Stdout, os.Stderr), newStubContext(t.Context(), nil, os.Stdout, os.Stderr),
nonexistent,
nil, nil,
nil, nil,
); !errors.Is(err, os.ErrInvalid) { ); !errors.Is(err, os.ErrInvalid) {

50
store.go Normal file
View File

@ -0,0 +1,50 @@
package nix
import (
"fmt"
"strings"
)
type Store interface {
// Environ returns extra environment variables specified by Store.
Environ() []string
fmt.Stringer
}
const (
EnvAwsSharedCredentialsFile = "AWS_SHARED_CREDENTIALS_FILE"
)
// A BinaryCache holds credentials and parameters to a s3 binary cache.
type BinaryCache struct {
// Compression is the name of the compression algorithm to use. Example: "zstd".
Compression string `json:"compression"`
// ParallelCompression determines whether parallel compression is enabled.
ParallelCompression bool `json:"parallel_compression,omitempty"`
// Bucket is the s3 bucket name.
Bucket string `json:"bucket"`
// Endpoint is the s3 endpoint. Example: "s3.example.org".
Endpoint string `json:"endpoint,omitempty"`
// Region is the s3 region. Example: "ap-northeast-1".
Region string `json:"region"`
// Scheme is the s3 protocol. Example: "https".
Scheme string `json:"scheme"`
// CredentialsPath is the path to the s3 shared credentials file.
CredentialsPath string `json:"credentials_path"`
// KeyPath is the path to the nix secret key for signing all newly copied paths.
KeyPath string `json:"key_path"`
}
func (store *BinaryCache) Environ() []string {
return []string{EnvAwsSharedCredentialsFile + "=" + strings.TrimSpace(store.CredentialsPath)}
}
func (store *BinaryCache) String() string {
return fmt.Sprintf(
"s3://%s?compression=%s&parallel-compression=%t&region=%s&scheme=%s&endpoint=%s&secret-key=%s",
store.Bucket, store.Compression, store.ParallelCompression, store.Region, store.Scheme, store.Endpoint, store.KeyPath,
)
}

47
store_test.go Normal file
View File

@ -0,0 +1,47 @@
package nix_test
import (
"testing"
"gensokyo.uk/nix"
)
func TestBinaryCache(t *testing.T) {
testCases := []struct {
name string
store *nix.BinaryCache
want string
wantEnv []string
}{
{"example", &nix.BinaryCache{
Compression: "none",
ParallelCompression: false,
Bucket: "example",
Endpoint: "s3.example.org",
Region: "us-east-1",
Scheme: "http",
CredentialsPath: "/dev/null",
KeyPath: nonexistent,
}, "s3://example?compression=none&parallel-compression=false&region=us-east-1&scheme=http&endpoint=s3.example.org&secret-key=/proc/nonexistent",
[]string{nix.EnvAwsSharedCredentialsFile + "=/dev/null"}},
{"gensokyo", &nix.BinaryCache{
Compression: "zstd",
ParallelCompression: true,
Bucket: "nix-cache",
Endpoint: "s3.gensokyo.uk",
Region: "ap-northeast-1",
Scheme: "https",
CredentialsPath: "/var/lib/persist/cache/s3",
KeyPath: "/var/lib/persist/cache/key",
}, "s3://nix-cache?compression=zstd&parallel-compression=true&region=ap-northeast-1&scheme=https&endpoint=s3.gensokyo.uk&secret-key=/var/lib/persist/cache/key",
[]string{nix.EnvAwsSharedCredentialsFile + "=/var/lib/persist/cache/s3"}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
if got := tc.store.String(); got != tc.want {
t.Errorf("String: %q, want %q", got, tc.want)
}
})
}
}