internal/pkg: implement tar artifact
All checks were successful
Test / Create distribution (push) Successful in 43s
Test / Sandbox (push) Successful in 2m34s
Test / Hakurei (push) Successful in 3m36s
Test / ShareFS (push) Successful in 3m40s
Test / Hpkg (push) Successful in 4m13s
Test / Sandbox (race detector) (push) Successful in 4m57s
Test / Hakurei (race detector) (push) Successful in 5m53s
Test / Flake checks (push) Successful in 1m44s
All checks were successful
Test / Create distribution (push) Successful in 43s
Test / Sandbox (push) Successful in 2m34s
Test / Hakurei (push) Successful in 3m36s
Test / ShareFS (push) Successful in 3m40s
Test / Hpkg (push) Successful in 4m13s
Test / Sandbox (race detector) (push) Successful in 4m57s
Test / Hakurei (race detector) (push) Successful in 5m53s
Test / Flake checks (push) Successful in 1m44s
This is useful for unpacking tarballs downloaded from the internet. Signed-off-by: Ophestra <cat@gensokyo.uk>
This commit is contained in:
229
internal/pkg/tar.go
Normal file
229
internal/pkg/tar.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
)
|
||||
|
||||
const (
|
||||
// TarUncompressed denotes an uncompressed tarball.
|
||||
TarUncompressed = iota
|
||||
// TarGzip denotes a tarball compressed via [gzip].
|
||||
TarGzip
|
||||
)
|
||||
|
||||
// A tarArtifact is an [Artifact] unpacking a tarball backed by a [File].
|
||||
type tarArtifact struct {
|
||||
// Computed ahead of time from the checksum of the identifier of f appended
|
||||
// with parameters of tarArtifact.
|
||||
id ID
|
||||
|
||||
// Caller-supplied backing tarball.
|
||||
f File
|
||||
// Compression on top of the tarball.
|
||||
compression uint64
|
||||
|
||||
// Populated when submitting to or loading from [Cache].
|
||||
pathname *check.Absolute
|
||||
// Checksum of cured directory. Valid if pathname is not nil.
|
||||
checksum Checksum
|
||||
|
||||
// Instance of [Cache] to submit the cured artifact to.
|
||||
c *Cache
|
||||
// Protects the Pathname critical section.
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewTar returns a new [Artifact] backed by the supplied [File] and
|
||||
// compression method.
|
||||
func (c *Cache) NewTar(f File, compression uint64) Artifact {
|
||||
return &tarArtifact{id: KindTar.Ident(
|
||||
binary.LittleEndian.AppendUint64(nil, compression), f,
|
||||
), f: f, compression: compression, c: c}
|
||||
}
|
||||
|
||||
// NewHTTPGetTar is abbreviation for NewHTTPGet passed to NewTar.
|
||||
func (c *Cache) NewHTTPGetTar(
|
||||
ctx context.Context,
|
||||
hc *http.Client,
|
||||
url string,
|
||||
checksum Checksum,
|
||||
compression uint64,
|
||||
) (Artifact, error) {
|
||||
f, err := c.NewHTTPGet(ctx, hc, url, checksum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.NewTar(f, compression), nil
|
||||
}
|
||||
|
||||
// Kind returns the hardcoded [Kind] constant.
|
||||
func (a *tarArtifact) Kind() Kind { return KindTar }
|
||||
|
||||
// ID returns the identifier prepared ahead of time.
|
||||
func (a *tarArtifact) ID() ID { return a.id }
|
||||
|
||||
// Hash cures the [Artifact] and returns its hash.
|
||||
func (a *tarArtifact) Hash() (Checksum, error) {
|
||||
_, err := a.Pathname()
|
||||
return a.checksum, err
|
||||
}
|
||||
|
||||
// A DisallowedTypeflagError describes a disallowed typeflag encountered while
|
||||
// unpacking a tarball.
|
||||
type DisallowedTypeflagError byte
|
||||
|
||||
func (e DisallowedTypeflagError) Error() string {
|
||||
return "disallowed typeflag '" + string(e) + "'"
|
||||
}
|
||||
|
||||
// Pathname cures the [Artifact] and returns its pathname in the [Cache].
|
||||
func (a *tarArtifact) Pathname() (*check.Absolute, error) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
if a.pathname != nil {
|
||||
return a.pathname, nil
|
||||
}
|
||||
|
||||
pathname, _, err := a.c.Store(a.id, func(work *check.Absolute) (err error) {
|
||||
var tr io.ReadCloser
|
||||
|
||||
{
|
||||
var data []byte
|
||||
data, err = a.f.Data()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
tr = io.NopCloser(bytes.NewReader(data))
|
||||
}
|
||||
|
||||
defer func() {
|
||||
closeErr := tr.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
switch a.compression {
|
||||
case TarUncompressed:
|
||||
break
|
||||
|
||||
case TarGzip:
|
||||
if tr, err = gzip.NewReader(tr); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
|
||||
default:
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
type dirTargetPerm struct {
|
||||
path *check.Absolute
|
||||
mode fs.FileMode
|
||||
}
|
||||
var madeDirectories []dirTargetPerm
|
||||
|
||||
var header *tar.Header
|
||||
r := tar.NewReader(tr)
|
||||
for header, err = r.Next(); err == nil; header, err = r.Next() {
|
||||
typeflag := header.Typeflag
|
||||
for {
|
||||
switch typeflag {
|
||||
case 0:
|
||||
if len(header.Name) > 0 && header.Name[len(header.Name)-1] == '/' {
|
||||
typeflag = tar.TypeDir
|
||||
} else {
|
||||
typeflag = tar.TypeReg
|
||||
}
|
||||
continue
|
||||
|
||||
case tar.TypeReg:
|
||||
var f *os.File
|
||||
if f, err = os.OpenFile(
|
||||
work.Append(header.Name).String(),
|
||||
os.O_CREATE|os.O_EXCL|os.O_WRONLY,
|
||||
header.FileInfo().Mode()&0400,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
if _, err = io.Copy(f, r); err != nil {
|
||||
_ = f.Close()
|
||||
return
|
||||
} else if err = f.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
|
||||
case tar.TypeLink:
|
||||
if err = os.Link(
|
||||
header.Linkname,
|
||||
work.Append(header.Name).String(),
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
|
||||
case tar.TypeSymlink:
|
||||
if err = os.Symlink(
|
||||
header.Linkname,
|
||||
work.Append(header.Name).String(),
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
|
||||
case tar.TypeDir:
|
||||
pathname := work.Append(header.Name)
|
||||
madeDirectories = append(madeDirectories, dirTargetPerm{
|
||||
path: pathname,
|
||||
mode: header.FileInfo().Mode(),
|
||||
})
|
||||
if err = os.MkdirAll(
|
||||
pathname.String(),
|
||||
0700,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
|
||||
default:
|
||||
return DisallowedTypeflagError(typeflag)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
if errors.Is(err, io.EOF) {
|
||||
err = nil
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
for _, e := range madeDirectories {
|
||||
if err = os.Chmod(e.path.String(), e.mode&0500); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = os.Chmod(work.String(), 0500)
|
||||
}
|
||||
return
|
||||
}, &a.checksum, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a.pathname = pathname
|
||||
return pathname, nil
|
||||
}
|
||||
Reference in New Issue
Block a user