common

package
v0.6.8 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 20, 2024 License: Apache-2.0 Imports: 28 Imported by: 0

Documentation

Index

Constants

View Source
const (

	// TOCTarName is the name of the JSON file in the tar archive in the
	// table of contents gzip stream.
	TOCTarName = "stargz.index.json"

	// FooterSize is the number of bytes in the footer
	//
	// The footer is an empty gzip stream with no compression and an Extra
	// header of the form "%016xSTARGZ", where the 64 bit hex-encoded
	// number is the offset to the gzip stream of JSON TOC.
	//
	// 51 comes from:
	//
	// 10 bytes  gzip header
	// 2  bytes  XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
	// 2  bytes  Extra: SI1 = 'S', SI2 = 'G'
	// 2  bytes  Extra: LEN = 22 (16 hex digits + len("STARGZ"))
	// 22 bytes  Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
	// 5  bytes  flate header
	// 8  bytes  gzip footer
	// (End of the eStargz blob)
	//
	// NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz.
	FooterSize = 51

	// TOCJSONDigestAnnotation is an annotation for an image layer. This stores the
	// digest of the TOC JSON.
	// This annotation is valid only when it is specified in `.[]layers.annotations`
	// of an image manifest.
	//
	// This is not needed in Starlight
	//
	TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"

	// PrefetchLandmark is a file entry which indicates the end position of
	// prefetch in the stargz file.
	PrefetchLandmark = ".prefetch.landmark"

	// NoPrefetchLandmark is a file entry which indicates that no prefetch should
	// occur in the stargz file.
	NoPrefetchLandmark = ".no.prefetch.landmark"

	EmptyFileHash = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
)

Variables

View Source
var (
	ErrNotImplemented           = errors.New("this feature has not yet been implemented")
	ErrLayerNotFound            = errors.New("cannot find layer")
	ErrMountingPointNotFound    = errors.New("cannot find mounting point")
	ErrNotConsolidated          = errors.New("delta image has not yet been consolidated")
	ErrAlreadyConsolidated      = errors.New("delta image has been consolidated already")
	ErrHashCollision            = errors.New("found two files have the same hash but different size")
	ErrMergedImageNotFound      = errors.New("the requested image has not been merged")
	ErrWrongImageFormat         = errors.New("please use this format <image>:<tag>")
	ErrOrphanNode               = errors.New("an entry node has no parent")
	ErrNoRoPath                 = errors.New("entry does not have path to RO layer")
	ErrImageNotFound            = errors.New("cannot find image")
	ErrNoManager                = errors.New("no manager found")
	ErrUnknownSnapshotParameter = errors.New("snapshots should follow a standard format")
	ErrTocUnknown               = errors.New("please prefetch the delta image")
)

Functions

func CompareByFilename

func CompareByFilename(a, b *OptimizedTraceableEntry) int

func ErrorAggregate

func ErrorAggregate(errs []error) error

Aggregate combines a list of errors into a single new error.

func OpenFooter

func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error)

OpenFooter extracts and parses footer from the given blob.

Types

type ByFilename

type ByFilename []*OptimizedTraceableEntry

func (ByFilename) Len

func (bfn ByFilename) Len() int

func (ByFilename) Less

func (bfn ByFilename) Less(i, j int) bool

func (ByFilename) Swap

func (bfn ByFilename) Swap(i, j int)

type ByHashSize

type ByHashSize []*OptimizedTraceableEntry

func (ByHashSize) Len

func (bhs ByHashSize) Len() int

func (ByHashSize) Less

func (bhs ByHashSize) Less(i, j int) bool

func (ByHashSize) Swap

func (bhs ByHashSize) Swap(i, j int)

type ByRanking

type ByRanking []*OptimizedTraceableEntries

func (ByRanking) Len

func (br ByRanking) Len() int

func (ByRanking) Less

func (br ByRanking) Less(i, j int) bool

func (ByRanking) Swap

func (br ByRanking) Swap(i, j int)

type CacheInterface

type CacheInterface interface {
	Digest() name.Digest
	Size() int64
}

type DeltaImageMetadata added in v0.5.0

type DeltaImageMetadata struct {
	ManifestSize        int64
	ConfigSize          int64
	StarlightHeaderSize int64
	ContentLength       int64
	OriginalLength      int64

	Digest          string
	StarlightDigest string
}

type LayerCache

type LayerCache struct {
	Buffer *io.SectionReader

	Mutex      sync.Mutex
	Ready      bool
	UseCounter int
	LastUsed   time.Time
	// contains filtered or unexported fields
}

func NewLayerCache

func NewLayerCache(layer CacheInterface) *LayerCache

func (*LayerCache) Load

func (lc *LayerCache) Load(ctx context.Context) (err error)

func (*LayerCache) SetReady

func (lc *LayerCache) SetReady(err error)

func (*LayerCache) String

func (lc *LayerCache) String() string

func (*LayerCache) Subscribe

func (lc *LayerCache) Subscribe(errChan *chan error)

type OptimizedTraceableEntries

type OptimizedTraceableEntries struct {
	List    []*OptimizedTraceableEntry
	Ranking float64
}

type OptimizedTraceableEntry

type OptimizedTraceableEntry struct {
	*TraceableEntry

	// ------------------------------------
	// SourceImage starts from 1 not 0.
	// index 0 and -1 are reserved for special purpose.
	SourceImage int `json:"si,omitempty"`

	// AccessCount records number of access during start up
	AccessCount int `json:"ac,omitempty"`
	// SumRank
	SumRank int `json:"sr,omitempty"`
	// SumSquaredRank
	SumSquaredRank float64 `json:"sr2,omitempty"`
}

//////////////////////////////////////////////////

func (*OptimizedTraceableEntry) AddRanking

func (ote *OptimizedTraceableEntry) AddRanking(ranking int)

func (*OptimizedTraceableEntry) ComputeRank

func (ote *OptimizedTraceableEntry) ComputeRank() float64

func (*OptimizedTraceableEntry) Key

func (ote *OptimizedTraceableEntry) Key() string

type Reader

type Reader struct {
	// contains filtered or unexported fields
}

A Reader permits random access reads from a stargz file.

func OpenStargz

func OpenStargz(sr *io.SectionReader) (*Reader, error)

Open opens a stargz file for reading.

Note that each entry name is normalized as the path that is relative to root.

func (*Reader) ChunkEntryForOffset

func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool)

ChunkEntryForOffset returns the TOCEntry containing the byte of the named file at the given offset within the file. Name must be absolute path or one that is relative to root.

func (*Reader) GetTOC

func (r *Reader) GetTOC() (m map[string]*TOCEntry, chunks map[string][]*TOCEntry, json *jtoc)

func (*Reader) Lookup

func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool)

Lookup returns the Table of Contents entry for the given path.

To get the root directory, use the empty string. Path must be absolute path or one that is relative to root.

func (*Reader) OpenFile

func (r *Reader) OpenFile(name string) (*io.SectionReader, error)

OpenFile returns the reader of the specified file payload.

Name must be absolute path or one that is relative to root.

func (*Reader) VerifyTOC

func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error)

VerifyTOC checks that the TOC JSON in the passed blob matches the passed digests and that the TOC JSON contains digests for all chunks contained in the blob. If the verification succceeds, this function returns TOCEntryVerifier which holds all chunk digests in the stargz blob.

type TOCEntry

type TOCEntry struct {
	// Name is the tar entry's name. It is the complete path
	// stored in the tar file, not just the base name.
	Name string `json:"name"`

	// Type is one of "dir", "reg", "symlink", "hardlink", "char",
	// "block", "fifo", or "chunk".
	// The "chunk" type is used for regular file data chunks past the first
	// TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset,
	// ChunkOffset, and ChunkSize populated.
	Type string `json:"type"`

	// Size, for regular files, is the logical size of the file.
	Size int64 `json:"size,omitempty"`

	// ModTime3339 is the modification time of the tar entry. Empty
	// means zero or unknown. Otherwise it's in UTC RFC3339
	// format. Use the ModTime method to access the time.Time value.
	ModTime3339 string `json:"modtime,omitempty"`

	// LinkName, for symlinks and hardlinks, is the link target.
	LinkName string `json:"linkName,omitempty"`

	// Mode is the permission and mode bits.
	Mode int64 `json:"mode,omitempty"`

	// UID is the user ID of the owner.
	UID int `json:"uid,omitempty"`

	// GID is the group ID of the owner.
	GID int `json:"gid,omitempty"`

	// Uname is the username of the owner.
	//
	// In the serialized JSON, this field may only be present for
	// the first entry with the same UID.
	Uname string `json:"userName,omitempty"`

	// Gname is the group name of the owner.
	//
	// In the serialized JSON, this field may only be present for
	// the first entry with the same GID.
	Gname string `json:"groupName,omitempty"`

	// Offset, for regular files, provides the offset in the
	// stargz file to the file's data bytes. See ChunkOffset and
	// ChunkSize.
	Offset int64 `json:"offset,omitempty"`

	// DevMajor is the major device number for "char" and "block" types.
	DevMajor int `json:"devMajor,omitempty"`

	// DevMinor is the major device number for "char" and "block" types.
	DevMinor int `json:"devMinor,omitempty"`

	// NumLink is the number of entry names pointing to this entry.
	// Zero means one name references this entry.
	NumLink int

	// Xattrs are the extended attribute for the entry.
	Xattrs map[string][]byte `json:"xattrs,omitempty"`

	// Digest stores the OCI checksum for regular files payload.
	// It has the form "sha256:abcdef01234....".
	Digest string `json:"digest,omitempty"`

	// ChunkOffset is non-zero if this is a chunk of a large,
	// regular file. If so, the Offset is where the gzip header of
	// ChunkSize bytes at ChunkOffset in Name begin.
	//
	// In serialized form, a "chunkSize" JSON field of zero means
	// that the chunk goes to the end of the file. After reading
	// from the stargz TOC, though, the ChunkSize is initialized
	// to a non-zero file for when Type is either "reg" or
	// "chunk".
	ChunkOffset int64 `json:"chunkOffset,omitempty"`
	ChunkSize   int64 `json:"chunkSize,omitempty"`

	// ChunkDigest stores an OCI digest of the chunk. This must be formed
	// as "sha256:0123abcd...".
	ChunkDigest string `json:"chunkDigest,omitempty"`

	CompressedSize int64 `json:"compressedSize,omitempty"`
	// contains filtered or unexported fields
}

TOCEntry is an entry in the stargz file's TOC (Table of Contents).

func MakeDir

func MakeDir(dirName string) (e *TOCEntry)

func MakeEmptyFile

func MakeEmptyFile(fileName string) (e *TOCEntry)

func MakeOpaqueWhiteoutFile

func MakeOpaqueWhiteoutFile(parentDir string) (e *TOCEntry)

func MakeWhiteoutFile

func MakeWhiteoutFile(baseName, parentDir string) (e *TOCEntry)

MakeWhiteoutFile parent should include the trailing backslash

func (*TOCEntry) AddChild

func (e *TOCEntry) AddChild(baseName string, child *TOCEntry)

func (*TOCEntry) Children

func (e *TOCEntry) Children() map[string]*TOCEntry

func (*TOCEntry) CopyEntry

func (e *TOCEntry) CopyEntry() (c *TOCEntry)

func (*TOCEntry) ForeachChild

func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool)

ForeachChild calls f for each child item. If f returns false, iteration ends. If e is not a directory, f is not called.

func (*TOCEntry) GetChild

func (e *TOCEntry) GetChild(baseName string) (*TOCEntry, bool)

func (*TOCEntry) GetSourceLayer

func (e *TOCEntry) GetSourceLayer() int

func (*TOCEntry) HasChild

func (e *TOCEntry) HasChild(baseName string) (r bool)

func (*TOCEntry) HasChunk

func (e *TOCEntry) HasChunk() bool

func (*TOCEntry) InitModTime

func (e *TOCEntry) InitModTime()

func (*TOCEntry) IsDataType

func (e *TOCEntry) IsDataType() bool

IsDataType reports whether TOCEntry is a regular file or chunk (something that contains regular file data).

func (*TOCEntry) IsDir

func (e *TOCEntry) IsDir() bool

func (*TOCEntry) IsLandmark

func (e *TOCEntry) IsLandmark() bool

func (*TOCEntry) IsMeta

func (e *TOCEntry) IsMeta() bool

func (*TOCEntry) IsRoot

func (e *TOCEntry) IsRoot() bool

func (*TOCEntry) IsWhiteoutFile

func (e *TOCEntry) IsWhiteoutFile() bool

func (*TOCEntry) Landmark

func (e *TOCEntry) Landmark() int

func (*TOCEntry) LookupChild

func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool)

LookupChild returns the directory e's child by its base name.

func (*TOCEntry) ModTime

func (e *TOCEntry) ModTime() time.Time

ModTime returns the entry's modification time.

func (*TOCEntry) NextOffset

func (e *TOCEntry) NextOffset() int64

NextOffset returns the position (relative to the start of the stargz file) of the next gzip boundary after e.Offset.

func (*TOCEntry) RemoveAllChildren

func (e *TOCEntry) RemoveAllChildren()

func (*TOCEntry) RemoveChild

func (e *TOCEntry) RemoveChild(baseName string)

func (*TOCEntry) SetSourceLayer

func (e *TOCEntry) SetSourceLayer(d int)

func (*TOCEntry) Stat

func (e *TOCEntry) Stat() os.FileInfo

Stat returns a FileInfo value representing e.

func (*TOCEntry) ToTarHeader

func (e *TOCEntry) ToTarHeader() (h *tar.Header)

func (*TOCEntry) UpdateMetadataFrom

func (e *TOCEntry) UpdateMetadataFrom(s *TOCEntry)

type TOCEntryVerifier

type TOCEntryVerifier interface {

	// Verifier provides a content verifier that can be used for verifying the
	// contents of the specified TOCEntry.
	Verifier(ce *TOCEntry) (digest.Verifier, error)
}

TOCEntryVerifier holds verifiers that are usable for verifying chunks contained in a eStargz blob.

type TraceableBlobDigest

type TraceableBlobDigest struct {
	digest.Digest `json:"hash"`
	ImageName     string `json:"img"`
}

func (TraceableBlobDigest) String

func (d TraceableBlobDigest) String() string

type TraceableEntry

type TraceableEntry struct {
	*TOCEntry

	Landmark int `json:"lm,omitempty"`

	// Source starts from 1 not 0.
	// index 0 and -1 are reserved for special purpose.
	Source int `json:"s,omitempty"`

	// ConsolidatedSource starts from 1 not 0.
	// index 0 and -1 are reserved for special purpose.
	ConsolidatedSource int `json:"cs,omitempty"`

	Chunks      []*TOCEntry `json:"chunks,omitempty"`
	DeltaOffset *[]int64    `json:"df,omitempty"`

	// UpdateMeta indicates whether this entry is just a metadata update.
	// The content of the file is the same as the old one in the same layer
	// (referring to the same image)
	// If false, it means the content of the file has changed.
	UpdateMeta int `json:"md,omitempty"`
}

//////////////////////////////////////////////////

func ExtendEntry

func ExtendEntry(t *TOCEntry) (d *TraceableEntry)

ExtendEntry creates a deep copy of the t object and clears the source layer identifier You must assign a new source layer

func GetRootNode

func GetRootNode() *TraceableEntry

func (*TraceableEntry) DeepCopy

func (t *TraceableEntry) DeepCopy() (d *TraceableEntry)

DeepCopy creates a deep copy of the object and clears the source layer identifier You must assign a new source layer

func (*TraceableEntry) GetSourceLayer

func (t *TraceableEntry) GetSourceLayer() int

GetSourceLayer get the source layer from the entry. WARNING: if you get load this object from a JSON serialized source (e.g. database), it might not give you the correct information because TOCEntry.sourceLayer is not exported. Please use TraceableEntry.Source instead

func (*TraceableEntry) SetDeltaOffset

func (t *TraceableEntry) SetDeltaOffset(offsets *[]int64)

SetDeltaOffset sets the offset in the image body. If offset is zero, it means no changes were made to the file and the client will do nothing.

func (*TraceableEntry) SetSourceLayer

func (t *TraceableEntry) SetSourceLayer(d int)

SetSourceLayer sets the index of source layer. index should always starts from 1 if the entry comes from an actual layer.

func (*TraceableEntry) ShiftSource

func (t *TraceableEntry) ShiftSource(offset int)

type Writer

type Writer struct {

	// ChunkSize optionally controls the maximum number of bytes
	// of data of a regular file that can be written in one gzip
	// stream before a new gzip stream is started.
	// Zero means to use a default, currently 4 MiB.
	ChunkSize int
	// contains filtered or unexported fields
}

A Writer writes stargz files.

Use NewWriter to create a new Writer.

func NewWriter

func NewWriter(w io.Writer) *Writer

NewWriter returns a new stargz writer writing to w.

The writer must be closed to write its trailing table of contents.

func NewWriterLevel

func NewWriterLevel(w io.Writer, compressionLevel int) *Writer

NewWriterLevel returns a new stargz writer writing to w. The compression level is configurable.

The writer must be closed to write its trailing table of contents.

func (*Writer) AppendTar

func (w *Writer) AppendTar(r io.Reader) error

AppendTar reads the tar or tar.gz file from r and appends each of its contents to w.

The input r can optionally be gzip compressed but the output will always be gzip compressed.

func (*Writer) Close

func (w *Writer) Close() (digest.Digest, error)

Close writes the stargz's table of contents and flushes all the buffers, returning any error.

func (*Writer) DiffID

func (w *Writer) DiffID() string

DiffID returns the SHA-256 of the uncompressed tar bytes. It is only valid to call DiffID after Close.

func (*Writer) Digest

func (w *Writer) Digest() string

Digest returns the SHA-256 of the compressed tar bytes. It is only valid to call Digest after Close

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL