Documentation
¶
Index ¶
- Variables
- type AbsPath
- type FilesetPackFilter
- func (ff FilesetPackFilter) Apply(ff2 FilesetPackFilter) FilesetPackFilter
- func (ff FilesetPackFilter) Dev() (keep bool, reject bool)
- func (ff FilesetPackFilter) Gid() (keep bool, setTo int)
- func (ff FilesetPackFilter) IsComplete() bool
- func (ff FilesetPackFilter) Mtime() (keep bool, setTo time.Time)
- func (ff FilesetPackFilter) MtimeUnix() (keep bool, setTo int64)
- func (ff FilesetPackFilter) Setid() (keep bool, reject bool)
- func (ff FilesetPackFilter) Sticky() (keep bool)
- func (x FilesetPackFilter) String() (v string)
- func (ff FilesetPackFilter) Uid() (keep bool, setTo int)
- type FilesetUnpackFilter
- func (ff FilesetUnpackFilter) Altering() bool
- func (ff FilesetUnpackFilter) Apply(ff2 FilesetUnpackFilter) FilesetUnpackFilter
- func (ff FilesetUnpackFilter) Dev() (follow bool, reject bool)
- func (ff FilesetUnpackFilter) Gid() (follow, setMine bool, setTo int)
- func (ff FilesetUnpackFilter) IsComplete() bool
- func (ff FilesetUnpackFilter) Mtime() (follow, setNow bool, setTo time.Time)
- func (ff FilesetUnpackFilter) MtimeUnix() (follow, now bool, setTo int64)
- func (ff FilesetUnpackFilter) Setid() (follow bool, reject bool)
- func (ff FilesetUnpackFilter) Sticky() (follow bool)
- func (x FilesetUnpackFilter) String() (v string)
- func (ff FilesetUnpackFilter) Uid() (follow, setMine bool, setTo int)
- type Formula
- type FormulaAction
- type FormulaOutputSpec
- type FormulaPolicy
- type FormulaRunRecord
- type FormulaSetupHash
- type FormulaUserinfo
- type ImportRef
- type ImportRef_Catalog
- type ImportRef_Ingest
- type ImportRef_Parent
- type ItemName
- type ItemRef
- type Lineage
- type Module
- type ModuleName
- type Operation
- type OperationRecord
- type PackType
- type Release
- type ReleaseName
- type SlotName
- type SlotRef
- type StepName
- type StepUnion
- type SubmoduleRef
- type SubmoduleSlotRef
- type SubmoduleSlotRefList
- type SubmoduleStepRef
- type WareID
- type WareSourcing
- func (ws *WareSourcing) Append(ws2 WareSourcing)
- func (ws *WareSourcing) AppendByModule(modName ModuleName, packtype PackType, locations ...WarehouseLocation)
- func (ws *WareSourcing) AppendByPackType(packtype PackType, locations ...WarehouseLocation)
- func (ws *WareSourcing) AppendByWare(wareID WareID, locations ...WarehouseLocation)
- func (ws WareSourcing) PivotToInputs(frm Formula) WareSourcing
- func (ws WareSourcing) PivotToModuleWare(wareID WareID, assumingModName ModuleName) WareSourcing
- func (ws WareSourcing) PivotToWareID(wareID WareID) (v []WarehouseLocation)
- func (ws WareSourcing) PivotToWareIDs(wareIDs map[WareID]struct{}) WareSourcing
- type WareStaging
- type WarehouseLocation
Constants ¶
This section is empty.
Variables ¶
var ( FilesetPackFilter_Lossless = FilesetPackFilter{true, ff_keep, ff_keep, ff_keep, ff_keep, ff_keep, ff_keep} // The default filters on... nothing, really. FilesetPackFilter_Flatten = FilesetPackFilter{true, 1000, 1000, DefaultTime, ff_keep, ff_keep, ff_keep} // The default filters on repeatr outputs. FilesetPackFilter_Conservative = FilesetPackFilter{true, 1000, 1000, DefaultTime, ff_keep, ff_reject, ff_reject} // The default filters on rio pack. Guides you away from anything that would require privs to unpack again. FilesetUnpackFilter_Lossless = FilesetUnpackFilter{true, ff_follow, ff_follow, ff_follow, ff_follow, ff_follow, ff_follow} // The default filters on repeatr inputs. Follow all instructions, even dev and setid. FilesetUnpackFilter_Conservative = FilesetUnpackFilter{true, ff_follow, ff_follow, ff_follow, ff_follow, ff_reject, ff_reject} // The default filters on rio scan. Follow all instructions, but halt on dev and setid (make the user aware if they're ingesting those). FilesetUnpackFilter_LowPriv = FilesetUnpackFilter{true, ff_context, ff_context, ff_follow, ff_follow, ff_reject, ff_reject} // The default filters on rio unpack. Operate lossily (replace uid and gid with the current user's) so that we can run with low privileges. )
var ( Atlas_Formula = atlas.MustBuild( Formula_AtlasEntry, FormulaAction_AtlasEntry, FormulaUserinfo_AtlasEntry, FormulaOutputSpec_AtlasEntry, FilesetPackFilter_AtlasEntry, WareID_AtlasEntry, ) Atlas_FormulaRunRecord = atlas.MustBuild( FormulaRunRecord_AtlasEntry, WareID_AtlasEntry, ) )
var ( Formula_AtlasEntry = atlas.BuildEntry(Formula{}).StructMap().Autogenerate().Complete() FormulaAction_AtlasEntry = atlas.BuildEntry(FormulaAction{}).StructMap().Autogenerate().Complete() FormulaUserinfo_AtlasEntry = atlas.BuildEntry(FormulaUserinfo{}).StructMap().Autogenerate().Complete() FormulaOutputSpec_AtlasEntry = atlas.BuildEntry(FormulaOutputSpec{}).StructMap().Autogenerate().Complete() FormulaRunRecord_AtlasEntry = atlas.BuildEntry(FormulaRunRecord{}).StructMap().Autogenerate().Complete() )
var ( Operation_AtlasEntry = atlas.BuildEntry(Operation{}).StructMap().Autogenerate().Complete() OperationRecord_AtlasEntry = atlas.BuildEntry(OperationRecord{}).StructMap().Autogenerate().Complete() )
var Atlas_Catalog = atlas.MustBuild( Lineage_AtlasEntry, Release_AtlasEntry, WareID_AtlasEntry, )
var Atlas_Module = atlas.MustBuild( Module_AtlasEntry, StepUnion_AtlasEntry, Operation_AtlasEntry, SlotRef_AtlasEntry, ImportRef_AtlasEntry, FormulaAction_AtlasEntry, FormulaUserinfo_AtlasEntry, )
var Atlas_WareSourcing = atlas.MustBuild( WareSourcing_AtlasEntry, WareID_AtlasEntry, )
var FilesetPackFilter_AsString_AtlasEntry = atlas.BuildEntry(FilesetPackFilter{}).Transform(). TransformMarshal(atlas.MakeMarshalTransformFunc( func(x FilesetPackFilter) (string, error) { return x.String(), nil })). TransformUnmarshal(atlas.MakeUnmarshalTransformFunc(ParseFilesetPackFilter)). Complete()
var FilesetPackFilter_AtlasEntry = atlas.BuildEntry(FilesetPackFilter{}).Transform(). TransformMarshal(atlas.MakeMarshalTransformFunc( func(x FilesetPackFilter) (map[string]string, error) { if x.initialized == false { return nil, nil } ffs := make(map[string]string, 6) if s := x.string_uid(); s != "" { ffs["uid"] = s } if s := x.string_gid(); s != "" { ffs["gid"] = s } if s := x.string_mtime(); s != "" { ffs["mtime"] = s } if s := x.string_sticky(); s != "" { ffs["sticky"] = s } if s := x.string_setid(); s != "" { ffs["setid"] = s } if s := x.string_dev(); s != "" { ffs["dev"] = s } return ffs, nil })). TransformUnmarshal(atlas.MakeUnmarshalTransformFunc( func(x map[string]string) (FilesetPackFilter, error) { ff := FilesetPackFilter{true, ff_unspecified, ff_unspecified, ff_unspecified, ff_unspecified, ff_unspecified, ff_unspecified, } if x == nil { return ff, nil } if s, exists := x["uid"]; exists { if err := ff.parse_uid(s); err != nil { return ff, err } } if s, exists := x["gid"]; exists { if err := ff.parse_gid(s); err != nil { return ff, err } } if s, exists := x["mtime"]; exists { if err := ff.parse_mtime(s); err != nil { return ff, err } } if s, exists := x["sticky"]; exists { if err := ff.parse_sticky(s); err != nil { return ff, err } } if s, exists := x["setid"]; exists { if err := ff.parse_setid(s); err != nil { return ff, err } } if s, exists := x["dev"]; exists { if err := ff.parse_dev(s); err != nil { return ff, err } } return ff, nil })). Complete()
var FilesetUnpackFilter_AsString_AtlasEntry = atlas.BuildEntry(FilesetUnpackFilter{}).Transform(). TransformMarshal(atlas.MakeMarshalTransformFunc( func(x FilesetUnpackFilter) (string, error) { return x.String(), nil })). TransformUnmarshal(atlas.MakeUnmarshalTransformFunc(ParseFilesetUnpackFilter)). Complete()
var FilesetUnpackFilter_AtlasEntry = atlas.BuildEntry(FilesetUnpackFilter{}).Transform(). TransformMarshal(atlas.MakeMarshalTransformFunc( func(x FilesetUnpackFilter) (map[string]string, error) { if x.initialized == false { return nil, nil } ffs := make(map[string]string, 6) if s := x.string_uid(); s != "" { ffs["uid"] = s } if s := x.string_gid(); s != "" { ffs["gid"] = s } if s := x.string_mtime(); s != "" { ffs["mtime"] = s } if s := x.string_sticky(); s != "" { ffs["sticky"] = s } if s := x.string_setid(); s != "" { ffs["setid"] = s } if s := x.string_dev(); s != "" { ffs["dev"] = s } return ffs, nil })). TransformUnmarshal(atlas.MakeUnmarshalTransformFunc( func(x map[string]string) (FilesetUnpackFilter, error) { ff := FilesetUnpackFilter{true, ff_unspecified, ff_unspecified, ff_unspecified, ff_unspecified, ff_unspecified, ff_unspecified, } if x == nil { return ff, nil } if s, exists := x["uid"]; exists { if err := ff.parse_uid(s); err != nil { return ff, err } } if s, exists := x["gid"]; exists { if err := ff.parse_gid(s); err != nil { return ff, err } } if s, exists := x["mtime"]; exists { if err := ff.parse_mtime(s); err != nil { return ff, err } } if s, exists := x["sticky"]; exists { if err := ff.parse_sticky(s); err != nil { return ff, err } } if s, exists := x["setid"]; exists { if err := ff.parse_setid(s); err != nil { return ff, err } } if s, exists := x["dev"]; exists { if err := ff.parse_dev(s); err != nil { return ff, err } } return ff, nil })). Complete()
var ImportRef_AtlasEntry = atlas.BuildEntry((*ImportRef)(nil)).Transform(). TransformMarshal(atlas.MakeMarshalTransformFunc(func(x ImportRef) (string, error) { return x.String(), nil })). TransformUnmarshal(atlas.MakeUnmarshalTransformFunc(ParseImportRef)). Complete()
Despite the fact this is documented as a union (and it is), we're not using a style of serialization here that Refmt has no explicit support for (we're screwing with strings in an alarmingly intricate way), so, we're doing it on our own in some transform funcs.
var ItemRef_AtlasEntry = atlas.BuildEntry(ItemRef{}).StructMap().Autogenerate().Complete()
var Lineage_AtlasEntry = atlas.BuildEntry(Lineage{}).StructMap().Autogenerate().Complete()
var Module_AtlasEntry = atlas.BuildEntry(Module{}).StructMap().Autogenerate().Complete()
var Release_AtlasEntry = atlas.BuildEntry(Release{}).StructMap().Autogenerate().Complete()
var SlotRef_AtlasEntry = atlas.BuildEntry(SlotRef{}).Transform(). TransformMarshal(atlas.MakeMarshalTransformFunc(func(x SlotRef) (string, error) { return x.String(), nil })). TransformUnmarshal(atlas.MakeUnmarshalTransformFunc(ParseSlotRef)). Complete()
var StepUnion_AtlasEntry = atlas.BuildEntry((*StepUnion)(nil)).KeyedUnion(). Of(map[string]*atlas.AtlasEntry{ "module": Module_AtlasEntry, "operation": Operation_AtlasEntry, })
var WareID_AtlasEntry = atlas.BuildEntry(WareID{}).Transform(). TransformMarshal(atlas.MakeMarshalTransformFunc( func(x WareID) (string, error) { return x.String(), nil })). TransformUnmarshal(atlas.MakeUnmarshalTransformFunc( func(x string) (WareID, error) { return ParseWareID(x) })). Complete()
var (
WareSourcing_AtlasEntry = atlas.BuildEntry(WareSourcing{}).StructMap().Autogenerate().Complete()
)
Functions ¶
This section is empty.
Types ¶
type FilesetPackFilter ¶
type FilesetPackFilter struct {
// contains filtered or unexported fields
}
func MustParseFilesetPackFilter ¶
func MustParseFilesetPackFilter(s string) FilesetPackFilter
func ParseFilesetPackFilter ¶
func ParseFilesetPackFilter(s string) (_ FilesetPackFilter, err error)
func (FilesetPackFilter) Apply ¶
func (ff FilesetPackFilter) Apply(ff2 FilesetPackFilter) FilesetPackFilter
func (FilesetPackFilter) Dev ¶
func (ff FilesetPackFilter) Dev() (keep bool, reject bool)
func (FilesetPackFilter) Gid ¶
func (ff FilesetPackFilter) Gid() (keep bool, setTo int)
func (FilesetPackFilter) IsComplete ¶
func (ff FilesetPackFilter) IsComplete() bool
func (FilesetPackFilter) MtimeUnix ¶
func (ff FilesetPackFilter) MtimeUnix() (keep bool, setTo int64)
func (FilesetPackFilter) Setid ¶
func (ff FilesetPackFilter) Setid() (keep bool, reject bool)
func (FilesetPackFilter) Sticky ¶
func (ff FilesetPackFilter) Sticky() (keep bool)
func (FilesetPackFilter) String ¶
func (x FilesetPackFilter) String() (v string)
func (FilesetPackFilter) Uid ¶
func (ff FilesetPackFilter) Uid() (keep bool, setTo int)
type FilesetUnpackFilter ¶
type FilesetUnpackFilter struct {
// contains filtered or unexported fields
}
func MustParseFilesetUnpackFilter ¶
func MustParseFilesetUnpackFilter(s string) FilesetUnpackFilter
func ParseFilesetUnpackFilter ¶
func ParseFilesetUnpackFilter(s string) (_ FilesetUnpackFilter, err error)
func (FilesetUnpackFilter) Altering ¶
func (ff FilesetUnpackFilter) Altering() bool
func (FilesetUnpackFilter) Apply ¶
func (ff FilesetUnpackFilter) Apply(ff2 FilesetUnpackFilter) FilesetUnpackFilter
func (FilesetUnpackFilter) Dev ¶
func (ff FilesetUnpackFilter) Dev() (follow bool, reject bool)
func (FilesetUnpackFilter) Gid ¶
func (ff FilesetUnpackFilter) Gid() (follow, setMine bool, setTo int)
func (FilesetUnpackFilter) IsComplete ¶
func (ff FilesetUnpackFilter) IsComplete() bool
func (FilesetUnpackFilter) Mtime ¶
func (ff FilesetUnpackFilter) Mtime() (follow, setNow bool, setTo time.Time)
func (FilesetUnpackFilter) MtimeUnix ¶
func (ff FilesetUnpackFilter) MtimeUnix() (follow, now bool, setTo int64)
func (FilesetUnpackFilter) Setid ¶
func (ff FilesetUnpackFilter) Setid() (follow bool, reject bool)
func (FilesetUnpackFilter) Sticky ¶
func (ff FilesetUnpackFilter) Sticky() (follow bool)
func (FilesetUnpackFilter) String ¶
func (x FilesetUnpackFilter) String() (v string)
func (FilesetUnpackFilter) Uid ¶
func (ff FilesetUnpackFilter) Uid() (follow, setMine bool, setTo int)
type Formula ¶
type Formula struct { Inputs map[AbsPath]WareID Action FormulaAction Outputs map[AbsPath]FormulaOutputSpec }
func (Formula) SetupHash ¶
func (frm Formula) SetupHash() FormulaSetupHash
Returns a hash covering parts of the formula such that the hash may be expected to converge for formulae that describe identical setups.
Specifically, this hash includes the inputs, actions, and output slot specs; it excludes any actual output ware hashes, and excludes any fields which are incidental to correctly reproducing the task, such as warehouse URLs.
The returned string is the base58 encoding of a SHA-384 hash, though there is no reason you should treat it as anything but opaque. The returned string may be relied upon to be all alphanumeric characters.
type FormulaAction ¶
type FormulaAction struct { // An array of strings to hand as args to exec -- creates a single process. Exec []string `refmt:",omitempty"` // Noop may be set as an alternative to Exec; this allows manipulations of // files that can be done from pure path of inputs and outputs alone. Noop bool `refmt:",omitempty"` // How much power to give the process. Default is quite low. Policy FormulaPolicy `refmt:",omitempty"` // The working directory to set when invoking the executable. // If not set, will be defaulted to "/task". Cwd AbsPath `refmt:",omitempty"` // Environment variables. Env map[string]string `refmt:",omitempty"` // User info -- uid, gid, etc. Userinfo *FormulaUserinfo `refmt:",omitempty"` // Cradle -- enabled by default, enum value for disable. Cradle string `refmt:",omitempty"` // Hostname to set inside the container (if the executor supports this -- not all do). Hostname string `refmt:",omitempty"` }
FormulaAction defines the action to perform to "evaluate" the formula -- after the input filesets have been assembled, these commands will be run in a contained sandbox on with those filesets, and when the commands terminate, the output filesets will be saved.
The definition of the Action includes at minimum what commands to run, but also includes the option of specifying other execution parameters: things like environment variables, working directory, hostname... and (though hopefully you rarely get hung up and need to change these) also things like UID, GID, username, homedir, and soforth. All of these additional parameters have "sensible defaults" if unset.
The Action also includes the ability to set "Policy" level -- these define simple privilege levels. (The default policy is of extremely low privileges.)
type FormulaOutputSpec ¶
type FormulaOutputSpec struct { PackType PackType `refmt:"packtype"` Filter FilesetPackFilter `refmt:"filters",omitempty` }
type FormulaPolicy ¶
type FormulaPolicy string
FormulaPolicy constants enumerate the privilege levels a contained process can be started with. (They're a shorthand for linux 'capabilities', with some sensible safe sets pre-selected.)
Policies are meant as a rough, relatively approachable, user-facing shorthand for privilege levels. In practice they typically map onto linux 'capabilities', but this is considered an implementation detail, not guaranteed, and may be executor engine specific (for example, the 'chroot' executor cannot provide fine-grained capabilities at all).
const ( /* Operate with a low privilege, as if you were a regular user on a regular system. No special permissions will be granted (and in systems with capabilities support, special permissions will not be available even if processes do manage to change uid, e.g. through suid binaries; most capabilities are dropped). This is the safest mode to run as. And, naturally, the default. Note that you may still (separately) set the Userinfo to values like uid=0 and gid=0, even while at 'routine' policy privileges. This is fine; an executor engine that supports capabilities dropping will still result in operations that the "root" user would normally be able to perform (like chown any file) will still result in permission denied. */ FormulaPolicy_Routine FormulaPolicy = "routine" /* Operate with escalated but still relatively safe privilege. Dangerous capabilities (e.g. "muck with devices") are dropped, but the most commonly used of root's powers (like chown any file) are available. This may be slightly safer than enabling full 'sysad' mode, but you should still prefer to use any of the lower power levels if possible. This mode is the most similar to what you would experience with docker defaults. This mode should not be assumed secure when combined with host mounts. (For example, one can trivially make an executable file in the host mount, set it to owner=0, set it setuid, and thus have a beachhead ready for a later phase in an attack.) */ FormulaPolicy_Governor FormulaPolicy = "governor" /* Operate with *ALL CAPABILITIES*. This is absolutely not secure against untrusted code -- it is completely equivalent in power to root on your host. Please try to use any of the lower power levels first. Among the things a system administrator may do is rebooting the machine and updating the kernel. Seriously, *only* use with trusted code. */ FormulaPolicy_Sysad FormulaPolicy = "sysad" )
type FormulaRunRecord ¶
type FormulaRunRecord struct { Guid string `refmt:"guid"` // random number, presumed globally unique. Time int64 `refmt:"time"` // time at start of build. FormulaID FormulaSetupHash `refmt:"formulaID"` // HID of formula ran. ExitCode int `refmt:"exitCode"` // exit code of the contained process. Results map[AbsPath]WareID `refmt:"results"` // wares produced by the run! Hostname string `refmt:",omitempty"` // Optional: hostname. not a trusted field, but useful for debugging. Metadata map[string]string `refmt:",omitempty"` // Optional: escape valve. you can attach freetext here. }
type FormulaSetupHash ¶
type FormulaSetupHash string
FormulaSetupHash is an opaque string derived from a cryptographic hash of the deterministic serialization of a Formula. Which is a fancy way of saying it's a fantastic primary key for memoizing computations.
type FormulaUserinfo ¶
type ImportRef ¶
type ImportRef interface { String() string // contains filtered or unexported methods }
ImportRef is a sum type, containing either a catalog reference ("catalog:{moduleName}:{releaseName}:{itemName}") or parent reference ("parent:{slotRef}"; only valid in submodules) or an ingest reference ("ingest:{ingestKind}[:{addntl}]"; only valid on main module).
Ingest references are interesting and should be used sparingly; they're for where new data comes into the Timeless ecosystem -- and that also means ingest references are also where the Timeless Stack abilities to automatically recursively audit where that data came from has reached its end.
Ingest references may explicitly reference wares (ex. "ingest:literal:tar:f00bAr"), or lean on other extensions to bring data into the system (ex. "ingest:git:.:HEAD"). Again, use sparingly: anything beyond "ingest:literal" and your module pipeline has become virtually impossible for anyone to evaluate without whatever additional un-contained un-tracked context your ingest refers to.
Ingest references should be passed on directly as an export of a module. Failure to do so is not *exactly* illegal, but it would make any replay of this module impossible without un-tracked context, and as such most of the tools in the Timeless Stack will issue either warnings or outright errors if the ingested data isn't also in the module exports.
func ParseImportRef ¶
type ImportRef_Catalog ¶
type ImportRef_Catalog ItemRef
func (ImportRef_Catalog) String ¶
func (x ImportRef_Catalog) String() string
type ImportRef_Ingest ¶
func (ImportRef_Ingest) String ¶
func (x ImportRef_Ingest) String() string
type ImportRef_Parent ¶
type ImportRef_Parent SlotRef
func (ImportRef_Parent) String ¶
func (x ImportRef_Parent) String() string
type Lineage ¶
type Lineage struct { // Name of self. Name ModuleName // Ordered list of release entries. // Order not particularly important, though UIs generally display in this order. // Most recent entries are should be placed at the top (e.g. index zero). // // Each entry must have a unique ReleaseName in the scope of this Lineage. Releases []Release }
Lineage contains the metadata for all releases for a particular module. Treat it as an append-only record: new releases append to the module's lineage.
type ModuleName ¶
type ModuleName string
func (ModuleName) Validate ¶
func (x ModuleName) Validate() error
Validate returns errors if the string is not a valid ModuleName. A ModuleName must resemble a domain name (per DNS-1123) with optional subsequent '/'-separated path segments, roughly like:
[[[...]subsubdomain.]subdomain.]domain[/path[/morepath[...]]]
The domain segments are restricted to DNS-1123 characters, and and path segements restricted to [TODO:regexp]. These requirements ensure that mapping module names onto a filesystem path is always a reasonable operation.
type Operation ¶
type Operation struct { Inputs map[AbsPath]SlotRef Action FormulaAction Outputs map[SlotName]AbsPath `refmt:",omitempty"` }
Operation is one of the concrete types of StepUnion which composes a Module; it describes a containerizable computation, all of its input filesystem paths bound to slot references, and all of the paths that should be collected as outputs and assigned to another slot for further use.
When all of the input slot references in an Operation are known, it can be bound, becoming a Formula -- which is structurally similar, but now with all specific, concrete WareID hashes instead of SlotRef.
type OperationRecord ¶
type OperationRecord struct { FormulaRunRecord Results map[SlotName]WareID }
OperationRecord is mostly an alias of FormulaRunRecord, but with Results indexed by SlotName from the Operation rather than path in the Formula.
We usually serialize FormulaRunRecord, because it's more convergent when content-addressed; OperationRecord contains immaterial details (e.g. the SlotName). OperationRecord is sometimes more convenient to use internally.
type PackType ¶
type PackType string
A PackType string identifies what kind of packing format is used when packing a ware. It's the first part of a WareID tuple.
Typically, the desired PackType is an argument when using packing tools; whereas the PackType is communicated by the WareID when using unpack tools.
PackTypes are a simple [a-zA-Z0-9] string. Colons in particular are not allowable (since a PackType string is the first part of a WareID).
type Release ¶
type Release struct { Name ReleaseName Items map[ItemName]WareID Metadata map[string]string Hazards map[string]string }
Release describes a single atomic release of wares. Each release must have a name, and contains a set of items, where each item refers to a WareID.
Releases are used to group something chronologically; items in a release are used to distinguish between multiple artifacts in a release.
In the context of building software, a Release usually has semantics lining up with "a bunch of software built from a particular source checkout". And thus, typically, there is also an Item in the release called "src"; and often enough, this will be a "git" wareID. Other Item names likely to appear might be "linux-amd64", for example. All of this is convention, however; releases could just as well be used to track various versions of a photo album.
It is recommended that a series of Release entries in a Lineage should stick to the same set of ItemName over time, because consumers of catalog information generally expect this, and changing Item names may produce work for other people.
type ReleaseName ¶
type ReleaseName string
type SlotRef ¶
func ParseSlotRef ¶
type SubmoduleRef ¶
type SubmoduleRef string // .-sep. really is a []StepName, but we wanted something easily used as a map key.
func (SubmoduleRef) Child ¶
func (ref SubmoduleRef) Child(child StepName) SubmoduleRef
Child appends the stepname to this ref. Think of it as leaving breadcrumbs behind as you zoom in ('Child' and 'Decontextualize' often come in pairs.)
func (SubmoduleRef) Contextualize ¶
func (ref SubmoduleRef) Contextualize(parent SubmoduleRef) SubmoduleRef
Contextualize prepends a set of step references to this ref. Think of it as zooming out.
func (SubmoduleRef) Decontextualize ¶
func (ref SubmoduleRef) Decontextualize() SubmoduleRef
Decontextualize strips the first stepName from the front of the ref. Think of it as zooming in.
func (SubmoduleRef) First ¶
func (ref SubmoduleRef) First() StepName
First returns the first StepName component of the SubmoduleRef. The empty string is returned if this SubmoduleRef is itself zero.
type SubmoduleSlotRef ¶
type SubmoduleSlotRef struct { SubmoduleRef SlotRef }
func (SubmoduleSlotRef) Contextualize ¶
func (ref SubmoduleSlotRef) Contextualize(parent SubmoduleRef) SubmoduleSlotRef
Contextualize prepends a set of step references to this ref.
func (SubmoduleSlotRef) Decontextualize ¶
func (ref SubmoduleSlotRef) Decontextualize() SubmoduleSlotRef
Decontextualize strips the first stepName from the front of the ref. Think of it as zooming in.
func (SubmoduleSlotRef) String ¶
func (x SubmoduleSlotRef) String() string
type SubmoduleSlotRefList ¶
type SubmoduleSlotRefList []SubmoduleSlotRef
func (SubmoduleSlotRefList) Len ¶
func (s SubmoduleSlotRefList) Len() int
func (SubmoduleSlotRefList) Less ¶
func (s SubmoduleSlotRefList) Less(i, j int) bool
func (SubmoduleSlotRefList) Swap ¶
func (s SubmoduleSlotRefList) Swap(i, j int)
type SubmoduleStepRef ¶
type SubmoduleStepRef struct { SubmoduleRef StepName }
func (SubmoduleStepRef) Contextualize ¶
func (ref SubmoduleStepRef) Contextualize(parent SubmoduleRef) SubmoduleStepRef
Contextualize prepends a set of step references to this ref.
func (SubmoduleStepRef) Decontextualize ¶
func (ref SubmoduleStepRef) Decontextualize() SubmoduleStepRef
Decontextualize strips the first stepName from the front of the ref. Think of it as zooming in.
func (SubmoduleStepRef) String ¶
func (x SubmoduleStepRef) String() string
type WareID ¶
WareID is a content-addressable, cryptographic hashes that uniquely identifies a "ware" -- a packed Fileset. (Fileset and Ware are distinct concepts because a fileset is not packed in any particular way and thus has no innate hash; a Ware is packed and hashed.)
Ware IDs are serialized as a string in two parts, separated by a colon -- for example like "git:f23ae1829" or "tar:WJL8or32vD". The first part communicates which kind of packing system computed the hash, and the second part is the hash itself.
func ParseWareID ¶
type WareSourcing ¶
type WareSourcing struct { ByPackType map[PackType][]WarehouseLocation `refmt:",omitempty"` ByModule map[ModuleName]map[PackType][]WarehouseLocation `refmt:",omitempty"` ByWare map[WareID][]WarehouseLocation `refmt:",omitempty"` }
WareSourcing contains suggestions on WarehouseLocations which may be able to provide Wares.
This information may be indexed in several different ways: most specifically (and inflexibly, and verbosely) by specific WareID; or by module name; or by pack type in general. (Non-content-addressible WarehouseLocations only semantically make sense when indexed by specific WareID; since the other forms of indexing will recommend the WarehouseLocation for more than one specific WareID, it stands to reason that they the WarehouseLocation ought to specify a system which can store more than one Ware!)
WareSourcing is meant to be reasonable to provide to *more than one* Operation (each of which may also have more than one input, of course) -- the various mechanisms of indexing allow such generalized suggestions.
func (*WareSourcing) Append ¶
func (ws *WareSourcing) Append(ws2 WareSourcing)
func (*WareSourcing) AppendByModule ¶
func (ws *WareSourcing) AppendByModule(modName ModuleName, packtype PackType, locations ...WarehouseLocation)
func (*WareSourcing) AppendByPackType ¶
func (ws *WareSourcing) AppendByPackType(packtype PackType, locations ...WarehouseLocation)
func (*WareSourcing) AppendByWare ¶
func (ws *WareSourcing) AppendByWare(wareID WareID, locations ...WarehouseLocation)
func (WareSourcing) PivotToInputs ¶
func (ws WareSourcing) PivotToInputs(frm Formula) WareSourcing
PivotToInputs is a shortcut for calling PivotToWareIDs with the set of inputs to a bound Op.
func (WareSourcing) PivotToModuleWare ¶
func (ws WareSourcing) PivotToModuleWare(wareID WareID, assumingModName ModuleName) WareSourcing
PivotToModuleWare returns WareSourcing where all data is indexed ByWareID (like PivotToInputs and PivotToWareIDs), also applying any ByModule-index info for the named module. (This is typically used immediately after loading the mirrors info in a module's release catalog, in order to avoid needed to carry around the module-oriented info any longer.)
func (WareSourcing) PivotToWareID ¶
func (ws WareSourcing) PivotToWareID(wareID WareID) (v []WarehouseLocation)
PivotToWareID is like PivotToWareIDs but for a single WareID; and shortcuts immediately to returning a flat list of WarehouseLocation.
func (WareSourcing) PivotToWareIDs ¶
func (ws WareSourcing) PivotToWareIDs(wareIDs map[WareID]struct{}) WareSourcing
PivotToWareIDs returns a new and reduced WareSourcing where all data is indexed ByWareID for each wareID in the argument set. All the ByPackType for a type "tar" will be appended to the ByWareID index for all wareIDs of type "tar", and so forth. ByModule data is ignored (you should flip that to ByWareID-indexed immediately when you load it).
type WareStaging ¶
type WareStaging struct {
ByPackType map[PackType]WarehouseLocation
}
WareStaging contains instructions on where to store wares that are output from an Operation.
WareStaging only takes a single warehouse location per packtype. It is intended that if you want to replicate the ware storage to multiple locations, you should do this later, *not* while saving the output from the Operation. An Operation may fail if the WarehouseLocation provided by the WareStaging info is not writable.
It is semantically unreasonable to provide a non-content-addressable WarehouseLocation in WareStaging info: WareStaging info is meant to be reasonable to provide to *more than one* Operation (each of which may also have more than one output, of course) -- therefore it is only sensible to provide a WarehouseLocation which is capable of storing more than one Ware! (You may still run Repeatr with non-CA WarehouseLocation configurations for specific outputs; it's only the higher level pipelining tools which become opinionated about this.)
type WarehouseLocation ¶
type WarehouseLocation string
Source Files
¶
- catalog.go
- catalogSerial.go
- catalogValidation.go
- filesetFilters.go
- filesetFiltersSerial.go
- filesetFiltersSerialComp.go
- formula.go
- formulaHashing.go
- formulaSerial.go
- formulaUtil.go
- module.go
- moduleSerial.go
- operation.go
- operationSerial.go
- references.go
- referencesSerial.go
- referencesSort.go
- referencesUtil.go
- referencesValidation.go
- warehousing.go
- warehousingSerial.go
- warehousingUtil.go
- wares.go
- waresSerial.go
Directories
¶
Path | Synopsis |
---|---|
Interfaces of hitch commands.
|
Interfaces of hitch commands. |
fmt
repeatrfmt contains translators for writing repeatr.Event to an io.Writer, in both human-readable and API-friendly variants.
|
repeatrfmt contains translators for writing repeatr.Event to an io.Writer, in both human-readable and API-friendly variants. |