README

Timeless Stack APIs for Go(lang)

Go(lang) APIs for the Timeless Stack (📖 docs here.

These APIs include all message type definitions, message and error enums, and serialization glue for the APIs for projects like rio, repeatr, and hitch.

Function definitions for some RPC APIs are also exported. Generally you will be able to find implementations of these func APIs in each project, and also a package which exports the same funcs again, but works via an exec layer and RPC protocol.

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	FilesetPackFilter_Lossless     = FilesetPackFilter{true, ff_keep, ff_keep, ff_keep, ff_keep, ff_keep, ff_keep}   // The default filters on... nothing, really.
	FilesetPackFilter_Flatten      = FilesetPackFilter{true, 1000, 1000, DefaultTime, ff_keep, ff_keep, ff_keep}     // The default filters on repeatr outputs.
	FilesetPackFilter_Conservative = FilesetPackFilter{true, 1000, 1000, DefaultTime, ff_keep, ff_reject, ff_reject} // The default filters on rio pack.  Guides you away from anything that would require privs to unpack again.

	FilesetUnpackFilter_Lossless     = FilesetUnpackFilter{true, ff_follow, ff_follow, ff_follow, ff_follow, ff_follow, ff_follow}   // The default filters on repeatr inputs.  Follow all instructions, even dev and setid.
	FilesetUnpackFilter_Conservative = FilesetUnpackFilter{true, ff_follow, ff_follow, ff_follow, ff_follow, ff_reject, ff_reject}   // The default filters on rio scan.  Follow all instructions, but halt on dev and setid (make the user aware if they're ingesting those).
	FilesetUnpackFilter_LowPriv      = FilesetUnpackFilter{true, ff_context, ff_context, ff_follow, ff_follow, ff_reject, ff_reject} // The default filters on rio unpack.  Operate lossily (replace uid and gid with the current user's) so that we can run with low privileges.

)
View Source
var (
	Formula_AtlasEntry           = atlas.BuildEntry(Formula{}).StructMap().Autogenerate().Complete()
	FormulaAction_AtlasEntry     = atlas.BuildEntry(FormulaAction{}).StructMap().Autogenerate().Complete()
	FormulaUserinfo_AtlasEntry   = atlas.BuildEntry(FormulaUserinfo{}).StructMap().Autogenerate().Complete()
	FormulaOutputSpec_AtlasEntry = atlas.BuildEntry(FormulaOutputSpec{}).StructMap().Autogenerate().Complete()
	FormulaRunRecord_AtlasEntry  = atlas.BuildEntry(FormulaRunRecord{}).StructMap().Autogenerate().Complete()
)
View Source
var (
	Operation_AtlasEntry       = atlas.BuildEntry(Operation{}).StructMap().Autogenerate().Complete()
	OperationRecord_AtlasEntry = atlas.BuildEntry(OperationRecord{}).StructMap().Autogenerate().Complete()
)
View Source
var DefaultTime int64 = time.Date(2010, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
View Source
var FilesetPackFilter_AsString_AtlasEntry = atlas.BuildEntry(FilesetPackFilter{}).Transform().
	TransformMarshal(atlas.MakeMarshalTransformFunc(
		func(x FilesetPackFilter) (string, error) {
			return x.String(), nil
		})).
	TransformUnmarshal(atlas.MakeUnmarshalTransformFunc(ParseFilesetPackFilter)).
	Complete()
View Source
var FilesetPackFilter_AtlasEntry = atlas.BuildEntry(FilesetPackFilter{}).Transform().
	TransformMarshal(atlas.MakeMarshalTransformFunc(
		func(x FilesetPackFilter) (map[string]string, error) {
			if x.initialized == false {
				return nil, nil
			}
			ffs := make(map[string]string, 6)
			if s := x.string_uid(); s != "" {
				ffs["uid"] = s
			}
			if s := x.string_gid(); s != "" {
				ffs["gid"] = s
			}
			if s := x.string_mtime(); s != "" {
				ffs["mtime"] = s
			}
			if s := x.string_sticky(); s != "" {
				ffs["sticky"] = s
			}
			if s := x.string_setid(); s != "" {
				ffs["setid"] = s
			}
			if s := x.string_dev(); s != "" {
				ffs["dev"] = s
			}
			return ffs, nil
		})).
	TransformUnmarshal(atlas.MakeUnmarshalTransformFunc(
		func(x map[string]string) (FilesetPackFilter, error) {
			ff := FilesetPackFilter{true,
				ff_unspecified, ff_unspecified, ff_unspecified,
				ff_unspecified, ff_unspecified, ff_unspecified,
			}
			if x == nil {
				return ff, nil
			}
			if s, exists := x["uid"]; exists {
				if err := ff.parse_uid(s); err != nil {
					return ff, err
				}
			}
			if s, exists := x["gid"]; exists {
				if err := ff.parse_gid(s); err != nil {
					return ff, err
				}
			}
			if s, exists := x["mtime"]; exists {
				if err := ff.parse_mtime(s); err != nil {
					return ff, err
				}
			}
			if s, exists := x["sticky"]; exists {
				if err := ff.parse_sticky(s); err != nil {
					return ff, err
				}
			}
			if s, exists := x["setid"]; exists {
				if err := ff.parse_setid(s); err != nil {
					return ff, err
				}
			}
			if s, exists := x["dev"]; exists {
				if err := ff.parse_dev(s); err != nil {
					return ff, err
				}
			}
			return ff, nil
		})).
	Complete()
View Source
var FilesetUnpackFilter_AsString_AtlasEntry = atlas.BuildEntry(FilesetUnpackFilter{}).Transform().
	TransformMarshal(atlas.MakeMarshalTransformFunc(
		func(x FilesetUnpackFilter) (string, error) {
			return x.String(), nil
		})).
	TransformUnmarshal(atlas.MakeUnmarshalTransformFunc(ParseFilesetUnpackFilter)).
	Complete()
View Source
var FilesetUnpackFilter_AtlasEntry = atlas.BuildEntry(FilesetUnpackFilter{}).Transform().
	TransformMarshal(atlas.MakeMarshalTransformFunc(
		func(x FilesetUnpackFilter) (map[string]string, error) {
			if x.initialized == false {
				return nil, nil
			}
			ffs := make(map[string]string, 6)
			if s := x.string_uid(); s != "" {
				ffs["uid"] = s
			}
			if s := x.string_gid(); s != "" {
				ffs["gid"] = s
			}
			if s := x.string_mtime(); s != "" {
				ffs["mtime"] = s
			}
			if s := x.string_sticky(); s != "" {
				ffs["sticky"] = s
			}
			if s := x.string_setid(); s != "" {
				ffs["setid"] = s
			}
			if s := x.string_dev(); s != "" {
				ffs["dev"] = s
			}
			return ffs, nil
		})).
	TransformUnmarshal(atlas.MakeUnmarshalTransformFunc(
		func(x map[string]string) (FilesetUnpackFilter, error) {
			ff := FilesetUnpackFilter{true,
				ff_unspecified, ff_unspecified, ff_unspecified,
				ff_unspecified, ff_unspecified, ff_unspecified,
			}
			if x == nil {
				return ff, nil
			}
			if s, exists := x["uid"]; exists {
				if err := ff.parse_uid(s); err != nil {
					return ff, err
				}
			}
			if s, exists := x["gid"]; exists {
				if err := ff.parse_gid(s); err != nil {
					return ff, err
				}
			}
			if s, exists := x["mtime"]; exists {
				if err := ff.parse_mtime(s); err != nil {
					return ff, err
				}
			}
			if s, exists := x["sticky"]; exists {
				if err := ff.parse_sticky(s); err != nil {
					return ff, err
				}
			}
			if s, exists := x["setid"]; exists {
				if err := ff.parse_setid(s); err != nil {
					return ff, err
				}
			}
			if s, exists := x["dev"]; exists {
				if err := ff.parse_dev(s); err != nil {
					return ff, err
				}
			}
			return ff, nil
		})).
	Complete()
View Source
var ImportRef_AtlasEntry = atlas.BuildEntry((*ImportRef)(nil)).Transform().
	TransformMarshal(atlas.MakeMarshalTransformFunc(func(x ImportRef) (string, error) { return x.String(), nil })).
	TransformUnmarshal(atlas.MakeUnmarshalTransformFunc(ParseImportRef)).
	Complete()

Despite the fact this is documented as a union (and it is), we're not using a style of serialization here that Refmt has no explicit support for (we're screwing with strings in an alarmingly intricate way), so, we're doing it on our own in some transform funcs.

View Source
var ItemRef_AtlasEntry = atlas.BuildEntry(ItemRef{}).StructMap().Autogenerate().Complete()
View Source
var Lineage_AtlasEntry = atlas.BuildEntry(Lineage{}).StructMap().Autogenerate().Complete()
View Source
var Module_AtlasEntry = atlas.BuildEntry(Module{}).StructMap().Autogenerate().Complete()
View Source
var Release_AtlasEntry = atlas.BuildEntry(Release{}).StructMap().Autogenerate().Complete()
View Source
var SlotRef_AtlasEntry = atlas.BuildEntry(SlotRef{}).Transform().
	TransformMarshal(atlas.MakeMarshalTransformFunc(func(x SlotRef) (string, error) { return x.String(), nil })).
	TransformUnmarshal(atlas.MakeUnmarshalTransformFunc(ParseSlotRef)).
	Complete()
View Source
var StepUnion_AtlasEntry = atlas.BuildEntry((*StepUnion)(nil)).KeyedUnion().
	Of(map[string]*atlas.AtlasEntry{
		"module":    Module_AtlasEntry,
		"operation": Operation_AtlasEntry,
	})
View Source
var WareID_AtlasEntry = atlas.BuildEntry(WareID{}).Transform().
	TransformMarshal(atlas.MakeMarshalTransformFunc(
		func(x WareID) (string, error) {
			return x.String(), nil
		})).
	TransformUnmarshal(atlas.MakeUnmarshalTransformFunc(
		func(x string) (WareID, error) {
			return ParseWareID(x)
		})).
	Complete()
View Source
var (
	WareSourcing_AtlasEntry = atlas.BuildEntry(WareSourcing{}).StructMap().Autogenerate().Complete()
)

Functions

This section is empty.

Types

type AbsPath

type AbsPath string

type FilesetPackFilter

type FilesetPackFilter struct {
	// contains filtered or unexported fields
}

func MustParseFilesetPackFilter

func MustParseFilesetPackFilter(s string) FilesetPackFilter

func ParseFilesetPackFilter

func ParseFilesetPackFilter(s string) (_ FilesetPackFilter, err error)

func (FilesetPackFilter) Apply

func (FilesetPackFilter) Dev

func (ff FilesetPackFilter) Dev() (keep bool, reject bool)

func (FilesetPackFilter) Gid

func (ff FilesetPackFilter) Gid() (keep bool, setTo int)

func (FilesetPackFilter) IsComplete

func (ff FilesetPackFilter) IsComplete() bool

func (FilesetPackFilter) Mtime

func (ff FilesetPackFilter) Mtime() (keep bool, setTo time.Time)

func (FilesetPackFilter) MtimeUnix

func (ff FilesetPackFilter) MtimeUnix() (keep bool, setTo int64)

func (FilesetPackFilter) Setid

func (ff FilesetPackFilter) Setid() (keep bool, reject bool)

func (FilesetPackFilter) Sticky

func (ff FilesetPackFilter) Sticky() (keep bool)

func (FilesetPackFilter) String

func (x FilesetPackFilter) String() (v string)

func (FilesetPackFilter) Uid

func (ff FilesetPackFilter) Uid() (keep bool, setTo int)

type FilesetUnpackFilter

type FilesetUnpackFilter struct {
	// contains filtered or unexported fields
}

func MustParseFilesetUnpackFilter

func MustParseFilesetUnpackFilter(s string) FilesetUnpackFilter

func ParseFilesetUnpackFilter

func ParseFilesetUnpackFilter(s string) (_ FilesetUnpackFilter, err error)

func (FilesetUnpackFilter) Altering

func (ff FilesetUnpackFilter) Altering() bool

func (FilesetUnpackFilter) Apply

func (FilesetUnpackFilter) Dev

func (ff FilesetUnpackFilter) Dev() (follow bool, reject bool)

func (FilesetUnpackFilter) Gid

func (ff FilesetUnpackFilter) Gid() (follow, setMine bool, setTo int)

func (FilesetUnpackFilter) IsComplete

func (ff FilesetUnpackFilter) IsComplete() bool

func (FilesetUnpackFilter) Mtime

func (ff FilesetUnpackFilter) Mtime() (follow, setNow bool, setTo time.Time)

func (FilesetUnpackFilter) MtimeUnix

func (ff FilesetUnpackFilter) MtimeUnix() (follow, now bool, setTo int64)

func (FilesetUnpackFilter) Setid

func (ff FilesetUnpackFilter) Setid() (follow bool, reject bool)

func (FilesetUnpackFilter) Sticky

func (ff FilesetUnpackFilter) Sticky() (follow bool)

func (FilesetUnpackFilter) String

func (x FilesetUnpackFilter) String() (v string)

func (FilesetUnpackFilter) Uid

func (ff FilesetUnpackFilter) Uid() (follow, setMine bool, setTo int)

type Formula

type Formula struct {
	Inputs  map[AbsPath]WareID
	Action  FormulaAction
	Outputs map[AbsPath]FormulaOutputSpec
}

func (Formula) Clone

func (f Formula) Clone() (f2 Formula)

func (Formula) SetupHash

func (frm Formula) SetupHash() FormulaSetupHash

Returns a hash covering parts of the formula such that the hash may be expected to converge for formulae that describe identical setups.

Specifically, this hash includes the inputs, actions, and output slot specs; it excludes any actual output ware hashes, and excludes any fields which are incidental to correctly reproducing the task, such as warehouse URLs.

The returned string is the base58 encoding of a SHA-384 hash, though there is no reason you should treat it as anything but opaque. The returned string may be relied upon to be all alphanumeric characters.

type FormulaAction

type FormulaAction struct {
	// An array of strings to hand as args to exec -- creates a single process.
	Exec []string `refmt:",omitempty"`

	// Noop may be set as an alternative to Exec; this allows manipulations of
	// files that can be done from pure path of inputs and outputs alone.
	Noop bool `refmt:",omitempty"`

	// How much power to give the process.  Default is quite low.
	Policy FormulaPolicy `refmt:",omitempty"`

	// The working directory to set when invoking the executable.
	// If not set, will be defaulted to "/task".
	Cwd AbsPath `refmt:",omitempty"`

	// Environment variables.
	Env map[string]string `refmt:",omitempty"`

	// User info -- uid, gid, etc.
	Userinfo *FormulaUserinfo `refmt:",omitempty"`

	// Cradle -- enabled by default, enum value for disable.
	Cradle string `refmt:",omitempty"`

	// Hostname to set inside the container (if the executor supports this -- not all do).
	Hostname string `refmt:",omitempty"`
}

FormulaAction defines the action to perform to "evaluate" the formula -- after the input filesets have been assembled, these commands will be run in a contained sandbox on with those filesets, and when the commands terminate, the output filesets will be saved.

The definition of the Action includes at minimum what commands to run, but also includes the option of specifying other execution parameters: things like environment variables, working directory, hostname... and (though hopefully you rarely get hung up and need to change these) also things like UID, GID, username, homedir, and soforth. All of these additional parameters have "sensible defaults" if unset.

The Action also includes the ability to set "Policy" level -- these define simple privilege levels. (The default policy is of extremely low privileges.)

type FormulaOutputSpec

type FormulaOutputSpec struct {
	PackType PackType          `refmt:"packtype"`
	Filter   FilesetPackFilter `refmt:"filters",omitempty`
}

type FormulaPolicy

type FormulaPolicy string

FormulaPolicy constants enumerate the privilege levels a contained process can be started with. (They're a shorthand for linux 'capabilities', with some sensible safe sets pre-selected.)

Policies are meant as a rough, relatively approachable, user-facing shorthand for privilege levels. In practice they typically map onto linux 'capabilities', but this is considered an implementation detail, not guaranteed, and may be executor engine specific (for example, the 'chroot' executor cannot provide fine-grained capabilities at all).

const (
	/*
		Operate with a low privilege, as if you were a regular user on a
		regular system.  No special permissions will be granted
		(and in systems with capabilities support, special permissions
		will not be available even if processes do manage to
		change uid, e.g. through suid binaries; most capabilities
		are dropped).

		This is the safest mode to run as.  And, naturally, the default.

		Note that you may still (separately) set the Userinfo to values like
		uid=0 and gid=0, even while at 'routine' policy privileges.
		This is fine; an executor engine that supports capabilities dropping
		will still result in operations that the "root" user would normally
		be able to perform (like chown any file) will still result in
		permission denied.
	*/
	FormulaPolicy_Routine FormulaPolicy = "routine"

	/*
		Operate with escalated but still relatively safe privilege.
		Dangerous capabilities (e.g. "muck with devices") are dropped,
		but the most commonly used of root's powers (like chown any file)
		are available.

		This may be slightly safer than enabling full 'sysad' mode,
		but you should still prefer to use any of the lower power levels
		if possible.

		This mode is the most similar to what you would experience with
		docker defaults.

		This mode should not be assumed secure when combined with host mounts.
		(For example, one can trivially make an executable file in the
		host mount, set it to owner=0, set it setuid, and thus have a
		beachhead ready for a later phase in an attack.)
	*/
	FormulaPolicy_Governor FormulaPolicy = "governor"

	/*
		Operate with *ALL CAPABILITIES*.

		This is absolutely not secure against untrusted code -- it is
		completely equivalent in power to root on your host.  Please
		try to use any of the lower power levels first.

		Among the things a system administrator may do is rebooting
		the machine and updating the kernel.  Seriously, *only* use
		with trusted code.
	*/
	FormulaPolicy_Sysad FormulaPolicy = "sysad"
)

type FormulaRunRecord

type FormulaRunRecord struct {
	Guid      string             `refmt:"guid"`      // random number, presumed globally unique.
	Time      int64              `refmt:"time"`      // time at start of build.
	FormulaID FormulaSetupHash   `refmt:"formulaID"` // HID of formula ran.
	ExitCode  int                `refmt:"exitCode"`  // exit code of the contained process.
	Results   map[AbsPath]WareID `refmt:"results"`   // wares produced by the run!

	Hostname string            `refmt:",omitempty"` // Optional: hostname.  not a trusted field, but useful for debugging.
	Metadata map[string]string `refmt:",omitempty"` // Optional: escape valve.  you can attach freetext here.
}

type FormulaSetupHash

type FormulaSetupHash string

FormulaSetupHash is an opaque string derived from a cryptographic hash of the deterministic serialization of a Formula. Which is a fancy way of saying it's a fantastic primary key for memoizing computations.

type FormulaUserinfo

type FormulaUserinfo struct {
	Uid      *int    `refmt:",omitempty"`
	Gid      *int    `refmt:",omitempty"`
	Username string  `refmt:",omitempty"`
	Homedir  AbsPath `refmt:",omitempty"`
}

type ImportRef

type ImportRef interface {
	String() string
	// contains filtered or unexported methods
}

ImportRef is a sum type, containing either a catalog reference ("catalog:{moduleName}:{releaseName}:{itemName}") or parent reference ("parent:{slotRef}"; only valid in submodules) or an ingest reference ("ingest:{ingestKind}[:{addntl}]"; only valid on main module).

Ingest references are interesting and should be used sparingly; they're for where new data comes into the Timeless ecosystem -- and that also means ingest references are also where the Timeless Stack abilities to automatically recursively audit where that data came from has reached its end.

Ingest references may explicitly reference wares (ex. "ingest:literal:tar:f00bAr"), or lean on other extensions to bring data into the system (ex. "ingest:git:.:HEAD"). Again, use sparingly: anything beyond "ingest:literal" and your module pipeline has become virtually impossible for anyone to evaluate without whatever additional un-contained un-tracked context your ingest refers to.

Ingest references should be passed on directly as an export of a module. Failure to do so is not *exactly* illegal, but it would make any replay of this module impossible without un-tracked context, and as such most of the tools in the Timeless Stack will issue either warnings or outright errors if the ingested data isn't also in the module exports.

func ParseImportRef

func ParseImportRef(x string) (ImportRef, error)

type ImportRef_Catalog

type ImportRef_Catalog ItemRef

func (ImportRef_Catalog) String

func (x ImportRef_Catalog) String() string

type ImportRef_Ingest

type ImportRef_Ingest struct {
	IngestKind string
	Args       string
}

func (ImportRef_Ingest) String

func (x ImportRef_Ingest) String() string

type ImportRef_Parent

type ImportRef_Parent SlotRef

func (ImportRef_Parent) String

func (x ImportRef_Parent) String() string

type ItemName

type ItemName string

type ItemRef

type ItemRef struct {
	ModuleName
	ReleaseName
	ItemName
}

func ParseItemRef

func ParseItemRef(x string) (v ItemRef, err error)

func (ItemRef) String

func (x ItemRef) String() string

type Lineage

type Lineage struct {
	// Name of self.
	Name ModuleName

	// Ordered list of release entries.
	// Order not particularly important, though UIs generally display in this order.
	// Most recent entries are should be placed at the top (e.g. index zero).
	//
	// Each entry must have a unique ReleaseName in the scope of this Lineage.
	Releases []Release
}

Lineage contains the metadata for all releases for a particular module. Treat it as an append-only record: new releases append to the module's lineage.

type Module

type Module struct {
	Imports map[SlotName]ImportRef
	Steps   map[StepName]StepUnion
	Exports map[ItemName]SlotRef `refmt:",omitempty"`
}

type ModuleName

type ModuleName string

func (ModuleName) Validate

func (x ModuleName) Validate() error

Validate returns errors if the string is not a valid ModuleName. A ModuleName must resemble a domain name (per DNS-1123) with optional subsequent '/'-separated path segments, roughly like:

[[[...]subsubdomain.]subdomain.]domain[/path[/morepath[...]]]

The domain segments are restricted to DNS-1123 characters, and and path segements restricted to [TODO:regexp]. These requirements ensure that mapping module names onto a filesystem path is always a reasonable operation.

type Operation

type Operation struct {
	Inputs  map[AbsPath]SlotRef
	Action  FormulaAction
	Outputs map[SlotName]AbsPath `refmt:",omitempty"`
}

Operation is one of the concrete types of StepUnion which composes a Module; it describes a containerizable computation, all of its input filesystem paths bound to slot references, and all of the paths that should be collected as outputs and assigned to another slot for further use.

When all of the input slot references in an Operation are known, it can be bound, becoming a Formula -- which is structurally similar, but now with all specific, concrete WareID hashes instead of SlotRef.

type OperationRecord

type OperationRecord struct {
	FormulaRunRecord
	Results map[SlotName]WareID
}

OperationRecord is mostly an alias of FormulaRunRecord, but with Results indexed by SlotName from the Operation rather than path in the Formula.

We usually serialize FormulaRunRecord, because it's more convergent when content-addressed; OperationRecord contains immaterial details (e.g. the SlotName). OperationRecord is sometimes more convenient to use internally.

type PackType

type PackType string

A PackType string identifies what kind of packing format is used when packing a ware. It's the first part of a WareID tuple.

Typically, the desired PackType is an argument when using packing tools; whereas the PackType is communicated by the WareID when using unpack tools.

PackTypes are a simple [a-zA-Z0-9] string. Colons in particular are not allowable (since a PackType string is the first part of a WareID).

type Release

type Release struct {
	Name     ReleaseName
	Items    map[ItemName]WareID
	Metadata map[string]string
	Hazards  map[string]string
}

Release describes a single atomic release of wares. Each release must have a name, and contains a set of items, where each item refers to a WareID.

Releases are used to group something chronologically; items in a release are used to distinguish between multiple artifacts in a release.

In the context of building software, a Release usually has semantics lining up with "a bunch of software built from a particular source checkout". And thus, typically, there is also an Item in the release called "src"; and often enough, this will be a "git" wareID. Other Item names likely to appear might be "linux-amd64", for example. All of this is convention, however; releases could just as well be used to track various versions of a photo album.

It is recommended that a series of Release entries in a Lineage should stick to the same set of ItemName over time, because consumers of catalog information generally expect this, and changing Item names may produce work for other people.

type ReleaseName

type ReleaseName string

type SlotName

type SlotName string

type SlotRef

type SlotRef struct {
	StepName // zero for module import reference
	SlotName
}

func ParseSlotRef

func ParseSlotRef(x string) (SlotRef, error)

func (SlotRef) String

func (x SlotRef) String() string

type StepName

type StepName string

type StepUnion

type StepUnion interface {
	// contains filtered or unexported methods
}

type SubmoduleRef

type SubmoduleRef string // .-sep.  really is a []StepName, but we wanted something easily used as a map key.

func (SubmoduleRef) Child

func (ref SubmoduleRef) Child(child StepName) SubmoduleRef

Child appends the stepname to this ref. Think of it as leaving breadcrumbs behind as you zoom in ('Child' and 'Decontextualize' often come in pairs.)

func (SubmoduleRef) Contextualize

func (ref SubmoduleRef) Contextualize(parent SubmoduleRef) SubmoduleRef

Contextualize prepends a set of step references to this ref. Think of it as zooming out.

func (SubmoduleRef) Decontextualize

func (ref SubmoduleRef) Decontextualize() SubmoduleRef

Decontextualize strips the first stepName from the front of the ref. Think of it as zooming in.

func (SubmoduleRef) First

func (ref SubmoduleRef) First() StepName

First returns the first StepName component of the SubmoduleRef. The empty string is returned if this SubmoduleRef is itself zero.

type SubmoduleSlotRef

type SubmoduleSlotRef struct {
	SubmoduleRef
	SlotRef
}

func (SubmoduleSlotRef) Contextualize

func (ref SubmoduleSlotRef) Contextualize(parent SubmoduleRef) SubmoduleSlotRef

Contextualize prepends a set of step references to this ref.

func (SubmoduleSlotRef) Decontextualize

func (ref SubmoduleSlotRef) Decontextualize() SubmoduleSlotRef

Decontextualize strips the first stepName from the front of the ref. Think of it as zooming in.

func (SubmoduleSlotRef) String

func (x SubmoduleSlotRef) String() string

type SubmoduleSlotRefList

type SubmoduleSlotRefList []SubmoduleSlotRef

func (SubmoduleSlotRefList) Len

func (s SubmoduleSlotRefList) Len() int

func (SubmoduleSlotRefList) Less

func (s SubmoduleSlotRefList) Less(i, j int) bool

func (SubmoduleSlotRefList) Swap

func (s SubmoduleSlotRefList) Swap(i, j int)

type SubmoduleStepRef

type SubmoduleStepRef struct {
	SubmoduleRef
	StepName
}

func (SubmoduleStepRef) Contextualize

func (ref SubmoduleStepRef) Contextualize(parent SubmoduleRef) SubmoduleStepRef

Contextualize prepends a set of step references to this ref.

func (SubmoduleStepRef) Decontextualize

func (ref SubmoduleStepRef) Decontextualize() SubmoduleStepRef

Decontextualize strips the first stepName from the front of the ref. Think of it as zooming in.

func (SubmoduleStepRef) String

func (x SubmoduleStepRef) String() string

type WareID

type WareID struct {
	Type PackType
	Hash string
}

WareID is a content-addressable, cryptographic hashes that uniquely identifies a "ware" -- a packed Fileset. (Fileset and Ware are distinct concepts because a fileset is not packed in any particular way and thus has no innate hash; a Ware is packed and hashed.)

Ware IDs are serialized as a string in two parts, separated by a colon -- for example like "git:f23ae1829" or "tar:WJL8or32vD". The first part communicates which kind of packing system computed the hash, and the second part is the hash itself.

func ParseWareID

func ParseWareID(x string) (WareID, error)

func (WareID) String

func (x WareID) String() string

type WareSourcing

type WareSourcing struct {
	ByPackType map[PackType][]WarehouseLocation                `refmt:",omitempty"`
	ByModule   map[ModuleName]map[PackType][]WarehouseLocation `refmt:",omitempty"`
	ByWare     map[WareID][]WarehouseLocation                  `refmt:",omitempty"`
}

WareSourcing contains suggestions on WarehouseLocations which may be able to provide Wares.

This information may be indexed in several different ways: most specifically (and inflexibly, and verbosely) by specific WareID; or by module name; or by pack type in general. (Non-content-addressible WarehouseLocations only semantically make sense when indexed by specific WareID; since the other forms of indexing will recommend the WarehouseLocation for more than one specific WareID, it stands to reason that they the WarehouseLocation ought to specify a system which can store more than one Ware!)

WareSourcing is meant to be reasonable to provide to *more than one* Operation (each of which may also have more than one input, of course) -- the various mechanisms of indexing allow such generalized suggestions.

func (*WareSourcing) Append

func (ws *WareSourcing) Append(ws2 WareSourcing)

func (*WareSourcing) AppendByModule

func (ws *WareSourcing) AppendByModule(modName ModuleName, packtype PackType, locations ...WarehouseLocation)

func (*WareSourcing) AppendByPackType

func (ws *WareSourcing) AppendByPackType(packtype PackType, locations ...WarehouseLocation)

func (*WareSourcing) AppendByWare

func (ws *WareSourcing) AppendByWare(wareID WareID, locations ...WarehouseLocation)

func (WareSourcing) PivotToInputs

func (ws WareSourcing) PivotToInputs(frm Formula) WareSourcing

PivotToInputs is a shortcut for calling PivotToWareIDs with the set of inputs to a bound Op.

func (WareSourcing) PivotToModuleWare

func (ws WareSourcing) PivotToModuleWare(wareID WareID, assumingModName ModuleName) WareSourcing

PivotToModuleWare returns WareSourcing where all data is indexed ByWareID (like PivotToInputs and PivotToWareIDs), also applying any ByModule-index info for the named module. (This is typically used immediately after loading the mirrors info in a module's release catalog, in order to avoid needed to carry around the module-oriented info any longer.)

func (WareSourcing) PivotToWareID

func (ws WareSourcing) PivotToWareID(wareID WareID) (v []WarehouseLocation)

PivotToWareID is like PivotToWareIDs but for a single WareID; and shortcuts immediately to returning a flat list of WarehouseLocation.

func (WareSourcing) PivotToWareIDs

func (ws WareSourcing) PivotToWareIDs(wareIDs map[WareID]struct{}) WareSourcing

PivotToWareIDs returns a new and reduced WareSourcing where all data is indexed ByWareID for each wareID in the argument set. All the ByPackType for a type "tar" will be appended to the ByWareID index for all wareIDs of type "tar", and so forth. ByModule data is ignored (you should flip that to ByWareID-indexed immediately when you load it).

type WareStaging

type WareStaging struct {
	ByPackType map[PackType]WarehouseLocation
}

WareStaging contains instructions on where to store wares that are output from an Operation.

WareStaging only takes a single warehouse location per packtype. It is intended that if you want to replicate the ware storage to multiple locations, you should do this later, *not* while saving the output from the Operation. An Operation may fail if the WarehouseLocation provided by the WareStaging info is not writable.

It is semantically unreasonable to provide a non-content-addressable WarehouseLocation in WareStaging info: WareStaging info is meant to be reasonable to provide to *more than one* Operation (each of which may also have more than one output, of course) -- therefore it is only sensible to provide a WarehouseLocation which is capable of storing more than one Ware! (You may still run Repeatr with non-CA WarehouseLocation configurations for specific outputs; it's only the higher level pipelining tools which become opinionated about this.)

type WarehouseLocation

type WarehouseLocation string

Directories

Path Synopsis
Interfaces of hitch commands.
Interfaces of hitch commands.
fmt
repeatrfmt contains translators for writing repeatr.Event to an io.Writer, in both human-readable and API-friendly variants.
repeatrfmt contains translators for writing repeatr.Event to an io.Writer, in both human-readable and API-friendly variants.
rio