Documentation
¶
Overview ¶
Package desync implements data structures, protocols and features of https://github.com/systemd/casync in order to allow support for additional platforms and improve performace by way of concurrency and caching.
Supports the following casync data structures: catar archives, caibx/caidx index files, castr stores (local or remote).
See desync/cmd for reference implementations of the available features.
Index ¶
- Constants
- Variables
- func CanClone(dstFile, srcFile string) bool
- func ChopFile(ctx context.Context, name string, chunks []IndexChunk, ws WriteStore, n int, ...) error
- func CloneRange(dst, src *os.File, srcOffset, srcLength, dstOffset uint64) error
- func Compress(src []byte) ([]byte, error)
- func Copy(ctx context.Context, ids []ChunkID, src Store, dst WriteStore, n int, ...) error
- func Decompress(dst, src []byte) ([]byte, error)
- func FilemodeToStatMode(mode os.FileMode) uint32
- func GetFileSize(fileName string) (size uint64, err error)
- func IndexFromFile(ctx context.Context, name string, n int, min, avg, max uint64, pb ProgressBar) (Index, ChunkingStats, error)
- func MountIndex(ctx context.Context, idx Index, ifs MountFS, path string, s Store, n int) error
- func NewHTTPHandler(s Store, writable, skipVerifyWrite bool, converters Converters, auth string) http.Handler
- func NewHTTPIndexHandler(s IndexStore, writable bool, auth string) http.Handler
- func SipHash(b []byte) uint64
- func StatModeToFilemode(mode uint32) os.FileMode
- func Tar(ctx context.Context, w io.Writer, fs FilesystemReader) error
- func UnTar(ctx context.Context, r io.Reader, fs FilesystemWriter) error
- func UnTarIndex(ctx context.Context, fs FilesystemWriter, index Index, s Store, n int, ...) error
- func VerifyIndex(ctx context.Context, name string, idx Index, n int, pb ProgressBar) error
- type ArchiveDecoder
- type AssembleOptions
- type Cache
- type Chunk
- type ChunkID
- type ChunkInvalid
- type ChunkMissing
- type ChunkStorage
- type Chunker
- type ChunkingStats
- type Compressor
- type ConsoleIndexStore
- type Converters
- type DedupQueue
- type DefaultProgressBar
- type ExtractStats
- type FailoverGroup
- type File
- type FileSeed
- type FilesystemReader
- type FilesystemWriter
- type FormatACLDefault
- type FormatACLGroup
- type FormatACLGroupObj
- type FormatACLUser
- type FormatDecoder
- type FormatDevice
- type FormatEncoder
- type FormatEntry
- type FormatFCaps
- type FormatFilename
- type FormatGoodbye
- type FormatGoodbyeItem
- type FormatGroup
- type FormatHeader
- type FormatIndex
- type FormatPayload
- type FormatSELinux
- type FormatSymlink
- type FormatTable
- type FormatTableItem
- type FormatUser
- type FormatXAttr
- type GCIndexStore
- type GCStore
- type GCStoreBase
- type GetReaderForRequestBody
- type HTTPHandler
- type HTTPHandlerBase
- type HTTPIndexHandler
- type Hash
- type HashAlgorithm
- type Index
- type IndexChunk
- type IndexMountFS
- type IndexPos
- type IndexSegment
- type IndexStore
- type IndexWriteStore
- type Interrupted
- type InvalidFormat
- type InvalidSeedAction
- type LocalFS
- func (fs *LocalFS) CreateDevice(n NodeDevice) error
- func (fs *LocalFS) CreateDir(n NodeDirectory) error
- func (fs *LocalFS) CreateFile(n NodeFile) error
- func (fs *LocalFS) CreateSymlink(n NodeSymlink) error
- func (fs *LocalFS) Next() (*File, error)
- func (fs *LocalFS) SetDirPermissions(n NodeDirectory) error
- func (fs *LocalFS) SetFilePermissions(n NodeFile) error
- func (fs *LocalFS) SetSymlinkPermissions(n NodeSymlink) error
- type LocalFSOptions
- type LocalIndexStore
- type LocalStore
- func (s LocalStore) Close() error
- func (s LocalStore) GetChunk(id ChunkID) (*Chunk, error)
- func (s LocalStore) HasChunk(id ChunkID) (bool, error)
- func (s LocalStore) Prune(ctx context.Context, ids map[ChunkID]struct{}) error
- func (s LocalStore) RemoveChunk(id ChunkID) error
- func (s LocalStore) StoreChunk(chunk *Chunk) error
- func (s LocalStore) String() string
- func (s LocalStore) Verify(ctx context.Context, n int, repair bool, w io.Writer) error
- type Message
- type MountFS
- type MtreeFS
- type NoSuchObject
- type NodeDevice
- type NodeDirectory
- type NodeFile
- type NodeSymlink
- type NullChunk
- type NullProgressBar
- type Plan
- type ProgressBar
- type Protocol
- func (p *Protocol) Initialize(flags uint64) (uint64, error)
- func (p *Protocol) ReadMessage() (Message, error)
- func (p *Protocol) RecvHello() (uint64, error)
- func (p *Protocol) RequestChunk(id ChunkID) (*Chunk, error)
- func (p *Protocol) SendGoodbye() error
- func (p *Protocol) SendHello(flags uint64) error
- func (p *Protocol) SendMissing(id ChunkID) error
- func (p *Protocol) SendProtocolChunk(id ChunkID, flags uint64, chunk []byte) error
- func (p *Protocol) SendProtocolRequest(id ChunkID, flags uint64) error
- func (p *Protocol) WriteMessage(m Message) error
- type ProtocolServer
- type PruneStore
- type RemoteHTTP
- type RemoteHTTPBase
- func (r *RemoteHTTPBase) Close() error
- func (r *RemoteHTTPBase) GetObject(name string) ([]byte, error)
- func (r *RemoteHTTPBase) IssueHttpRequest(method string, u *url.URL, getReader GetReaderForRequestBody, attempt int) (int, []byte, error)
- func (r *RemoteHTTPBase) IssueRetryableHttpRequest(method string, u *url.URL, getReader GetReaderForRequestBody) (int, []byte, error)
- func (r *RemoteHTTPBase) StoreObject(name string, getReader GetReaderForRequestBody) error
- func (r *RemoteHTTPBase) String() string
- type RemoteHTTPIndex
- type RemoteSSH
- type RepairableCache
- type S3IndexStore
- type S3Store
- type S3StoreBase
- type SFTPIndexStore
- type SFTPStore
- func (s *SFTPStore) Close() error
- func (s *SFTPStore) GetChunk(id ChunkID) (*Chunk, error)
- func (s *SFTPStore) HasChunk(id ChunkID) (bool, error)
- func (s *SFTPStore) Prune(ctx context.Context, ids map[ChunkID]struct{}) error
- func (s *SFTPStore) RemoveChunk(id ChunkID) error
- func (s *SFTPStore) StoreChunk(chunk *Chunk) error
- func (s *SFTPStore) String() string
- type SFTPStoreBase
- type SHA256
- type SHA512256
- type Seed
- type SeedSegment
- type SeedSegmentCandidate
- type SeedSequencer
- type SparseFile
- type SparseFileHandle
- type SparseFileOptions
- type SparseMountFS
- type Store
- type StoreOptions
- type StoreRouter
- type SwapStore
- type SwapWriteStore
- type TarReader
- type TarReaderOptions
- type TarWriter
- type WriteDedupQueue
- type WriteStore
- type Xattrs
Constants ¶
const ( // Format identifiers used in archive files CaFormatEntry = 0x1396fabcea5bbb51 CaFormatUser = 0xf453131aaeeaccb3 CaFormatGroup = 0x25eb6ac969396a52 CaFormatXAttr = 0xb8157091f80bc486 CaFormatACLUser = 0x297dc88b2ef12faf CaFormatACLGroup = 0x36f2acb56cb3dd0b CaFormatACLGroupObj = 0x23047110441f38f3 CaFormatACLDefault = 0xfe3eeda6823c8cd0 CaFormatACLDefaultUser = 0xbdf03df9bd010a91 CaFormatACLDefaultGroup = 0xa0cb1168782d1f51 CaFormatFCaps = 0xf7267db0afed0629 CaFormatSELinux = 0x46faf0602fd26c59 CaFormatSymlink = 0x664a6fb6830e0d6c CaFormatDevice = 0xac3dace369dfe643 CaFormatPayload = 0x8b9e1d93d6dcffc9 CaFormatFilename = 0x6dbb6ebcb3161f0b CaFormatGoodbye = 0xdfd35c5e8327c403 CaFormatGoodbyeTailMarker = 0x57446fa533702943 CaFormatIndex = 0x96824d9c7b129ff9 CaFormatTable = 0xe75b9e112f17417d CaFormatTableTailMarker = 0x4b4f050e5549ecd1 // SipHash key used in Goodbye elements to hash the filename. It's 16 bytes, // split into 2x64bit values, upper and lower part of the key CaFormatGoodbyeHashKey0 = 0x8574442b0f1d84b3 CaFormatGoodbyeHashKey1 = 0x2736ed30d1c22ec1 // Format feature flags CaFormatWith16BitUIDs = 0x1 CaFormatWith32BitUIDs = 0x2 CaFormatWithUserNames = 0x4 CaFormatWithSecTime = 0x8 CaFormatWithUSecTime = 0x10 CaFormatWithNSecTime = 0x20 CaFormatWith2SecTime = 0x40 CaFormatWithReadOnly = 0x80 CaFormatWithPermissions = 0x100 CaFormatWithSymlinks = 0x200 CaFormatWithDeviceNodes = 0x400 CaFormatWithFIFOs = 0x800 CaFormatWithSockets = 0x1000 /* DOS file flags */ CaFormatWithFlagHidden = 0x2000 CaFormatWithFlagSystem = 0x4000 CaFormatWithFlagArchive = 0x8000 /* chattr() flags */ CaFormatWithFlagAppend = 0x10000 CaFormatWithFlagNoAtime = 0x20000 CaFormatWithFlagCompr = 0x40000 CaFormatWithFlagNoCow = 0x80000 CaFormatWithFlagNoDump = 0x100000 CaFormatWithFlagDirSync = 0x200000 CaFormatWithFlagImmutable = 0x400000 CaFormatWithFlagSync = 0x800000 CaFormatWithFlagNoComp = 0x1000000 CaFormatWithFlagProjectInherit = 0x2000000 /* btrfs magic */ CaFormatWithSubvolume = 0x4000000 CaFormatWithSubvolumeRO = 0x8000000 /* Extended Attribute metadata */ CaFormatWithXattrs = 0x10000000 CaFormatWithACL = 0x20000000 CaFormatWithSELinux = 0x40000000 CaFormatWithFcaps = 0x80000000 CaFormatExcludeFile = 0x1000000000000000 CaFormatSHA512256 = 0x2000000000000000 CaFormatExcludeSubmounts = 0x4000000000000000 CaFormatExcludeNoDump = 0x8000000000000000 // Protocol message types CaProtocolHello = 0x3c71d0948ca5fbee CaProtocolIndex = 0xb32a91dd2b3e27f8 CaProtocolIndexEOF = 0x4f0932f1043718f5 CaProtocolArchive = 0x95d6428a69eddcc5 CaProtocolArchiveEOF = 0x450bef663f24cbad CaProtocolRequest = 0x8ab427e0f89d9210 CaProtocolChunk = 0x5213dd180a84bc8c CaProtocolMissing = 0xd010f9fac82b7b6c CaProtocolGoodbye = 0xad205dbf1a3686c3 CaProtocolAbort = 0xe7d9136b7efea352 // Provided services CaProtocolReadableStore = 0x1 CaProtocolWritableStore = 0x2 CaProtocolReadableIndex = 0x4 CaProtocolWritableIndex = 0x8 CaProtocolReadableArchive = 0x10 CaProtocolWritableArchive = 0x20 // Wanted services CaProtocolPullChunks = 0x40 CaProtocolPullIndex = 0x80 CaProtocolPullArchive = 0x100 CaProtocolPushChunks = 0x200 CaProtocolPushIndex = 0x400 CaProtocolPushIndexChunks = 0x800 CaProtocolPushArchive = 0x1000 // Protocol request flags CaProtocolRequestHighPriority = 1 // Chunk properties CaProtocolChunkCompressed = 1 )
const ChunkerWindowSize = 48
ChunkerWindowSize is the number of bytes in the rolling hash window
const CompressedChunkExt = ".cacnk"
CompressedChunkExt is the file extension used for compressed chunks
const DefaultBlockSize = 4096
DefaultBlockSize is used when the actual filesystem block size cannot be determined automatically
const DefaultErrorRetry = 3
const DefaultErrorRetryBaseInterval = 500 * time.Millisecond
const TarFeatureFlags uint64 = CaFormatWith32BitUIDs | CaFormatWithNSecTime | CaFormatWithPermissions | CaFormatWithSymlinks | CaFormatWithDeviceNodes | CaFormatWithFIFOs | CaFormatWithSockets | CaFormatWithXattrs | CaFormatSHA512256 | CaFormatExcludeNoDump | CaFormatExcludeFile
TarFeatureFlags are used as feature flags in the header of catar archives. These should be used in index files when chunking a catar as well. TODO: Find out what CaFormatWithPermissions is as that's not set incasync-produced catar archives.
const UncompressedChunkExt = ""
UncompressedChunkExt is the file extension of uncompressed chunks
Variables ¶
var ( FormatString = map[uint64]string{ CaFormatEntry: "CaFormatEntry", CaFormatUser: "CaFormatUser", CaFormatGroup: "CaFormatGroup", CaFormatXAttr: "CaFormatXAttr", CaFormatACLUser: "CaFormatACLUser", CaFormatACLGroup: "CaFormatACLGroup", CaFormatACLGroupObj: "CaFormatACLGroupObj", CaFormatACLDefault: "CaFormatACLDefault", CaFormatACLDefaultUser: "CaFormatACLDefaultUser", CaFormatACLDefaultGroup: "CaFormatACLDefaultGroup", CaFormatFCaps: "CaFormatFCaps", CaFormatSELinux: "CaFormatSELinux", CaFormatSymlink: "CaFormatSymlink", CaFormatDevice: "CaFormatDevice", CaFormatPayload: "CaFormatPayload", CaFormatFilename: "CaFormatFilename", CaFormatGoodbye: "CaFormatGoodbye", CaFormatGoodbyeTailMarker: "CaFormatGoodbyeTailMarker", CaFormatIndex: "CaFormatIndex", CaFormatTable: "CaFormatTable", CaFormatTableTailMarker: "CaFormatTableTailMarker", } )
var Log = logrus.New()
var MockValidate = false
Functions ¶
func CanClone ¶ added in v0.4.0
CanClone tries to determine if the filesystem allows cloning of blocks between two files. It'll create two tempfiles in the same dirs and attempt to perfom a 0-byte long block clone. If that's successful it'll return true.
func ChopFile ¶ added in v0.2.0
func ChopFile(ctx context.Context, name string, chunks []IndexChunk, ws WriteStore, n int, pb ProgressBar) error
ChopFile split a file according to a list of chunks obtained from an Index and stores them in the provided store
func CloneRange ¶ added in v0.4.0
CloneRange uses the FICLONERANGE ioctl to de-dupe blocks between two files when using XFS or btrfs. Only works at block-boundaries.
func Copy ¶ added in v0.2.0
func Copy(ctx context.Context, ids []ChunkID, src Store, dst WriteStore, n int, pb ProgressBar) error
Copy reads a list of chunks from the provided src store, and copies the ones not already present in the dst store. The goal is to load chunks from remote store to populate a cache. If progress is provided, it'll be called when a chunk has been processed. Used to draw a progress bar, can be nil.
func Decompress ¶ added in v0.2.0
Decompress a block using the only supported algorithm. If you already have a buffer it can be passed into out and will be used. If out=nil, a buffer will be allocated.
func FilemodeToStatMode ¶ added in v0.8.0
FilemodeToStatMode converts Go's os.Filemode value into the syscall equivalent.
func GetFileSize ¶ added in v0.9.3
GetFileSize determines the size, in Bytes, of the file located at the given fileName.
func IndexFromFile ¶ added in v0.2.0
func IndexFromFile(ctx context.Context, name string, n int, min, avg, max uint64, pb ProgressBar, ) (Index, ChunkingStats, error)
IndexFromFile chunks a file in parallel and returns an index. It does not store chunks! Each concurrent chunker starts filesize/n bytes apart and splits independently. Each chunk worker tries to sync with it's next neighbor and if successful stops processing letting the next one continue. The main routine reads and assembles a list of (confirmed) chunks from the workers, starting with the first worker. This algorithm wastes some CPU and I/O if the data doesn't contain chunk boundaries, for example if the whole file contains nil bytes. If progress is not nil, it'll be updated with the confirmed chunk position in the file.
func MountIndex ¶ added in v0.2.0
MountIndex mounts an index file under a FUSE mount point. The mount will only expose a single blob file as represented by the index.
func NewHTTPHandler ¶ added in v0.2.0
func NewHTTPHandler(s Store, writable, skipVerifyWrite bool, converters Converters, auth string) http.Handler
NewHTTPHandler initializes and returns a new HTTP handler for a chunks server.
func NewHTTPIndexHandler ¶ added in v0.3.0
func NewHTTPIndexHandler(s IndexStore, writable bool, auth string) http.Handler
NewHTTPIndexHandler initializes an HTTP index store handler
func SipHash ¶ added in v0.2.0
SipHash is used to calculate the hash in Goodbye element items, hashing the filename.
func StatModeToFilemode ¶ added in v0.8.0
StatModeToFilemode converts syscall mode to Go's os.Filemode value.
func Tar ¶ added in v0.2.0
Tar implements the tar command which recursively parses a directory tree, and produces a stream of encoded casync format elements (catar file).
func UnTar ¶ added in v0.2.0
UnTar implements the untar command, decoding a catar file and writing the contained tree to a target directory.
func UnTarIndex ¶ added in v0.2.0
func UnTarIndex(ctx context.Context, fs FilesystemWriter, index Index, s Store, n int, pb ProgressBar) error
UnTarIndex takes an index file (of a chunked catar), re-assembles the catar and decodes it on-the-fly into the target directory 'dst'. Uses n gorountines to retrieve and decompress the chunks.
func VerifyIndex ¶ added in v0.2.0
VerifyIndex re-calculates the checksums of a blob comparing it to a given index. Fails if the index does not match the blob.
Types ¶
type ArchiveDecoder ¶ added in v0.2.0
type ArchiveDecoder struct {
// contains filtered or unexported fields
}
ArchiveDecoder is used to decode a catar archive.
func NewArchiveDecoder ¶ added in v0.2.0
func NewArchiveDecoder(r io.Reader) ArchiveDecoder
NewArchiveDecoder initializes a decoder for a catar archive.
func (*ArchiveDecoder) Next ¶ added in v0.2.0
func (a *ArchiveDecoder) Next() (interface{}, error)
Next returns a node from an archive, or nil if the end is reached. If NodeFile is returned, the caller should read the file body before calling Next() again as that invalidates the reader.
type AssembleOptions ¶ added in v0.9.3
type AssembleOptions struct { N int InvalidSeedAction InvalidSeedAction }
type Cache ¶
type Cache struct {
// contains filtered or unexported fields
}
Cache is used to connect a (typically remote) store with a local store which functions as disk cache. Any request to the cache for a chunk will first be routed to the local store, and if that fails to the slower remote store. Any chunks retrieved from the remote store will be stored in the local one.
func NewCache ¶
func NewCache(s Store, l WriteStore) Cache
NewCache returns a cache router that uses a local store as cache before accessing a (supposedly slower) remote one.
func (Cache) GetChunk ¶
GetChunk first asks the local store for the chunk and then the remote one. If we get a chunk from the remote, it's stored locally too.
type Chunk ¶ added in v0.4.0
type Chunk struct {
// contains filtered or unexported fields
}
Chunk holds chunk data plain, storage format, or both. If a chunk is created from storage data, such as read from a compressed chunk store, and later the application requires the plain data, it'll be converted on demand by applying the given storage converters in reverse order. The converters can only be used to read the plain data, not to convert back to storage format.
func NewChunk ¶ added in v0.9.1
NewChunk creates a new chunk from plain data. The data is trusted and the ID is calculated on demand.
func NewChunkFromStorage ¶ added in v0.9.1
func NewChunkFromStorage(id ChunkID, b []byte, modifiers Converters, skipVerify bool) (*Chunk, error)
NewChunkFromStorage builds a new chunk from data that is not in plain format. It uses raw storage format from it source and the modifiers are used to convert into plain data as needed.
func NewChunkWithID ¶ added in v0.4.0
NewChunkWithID creates a new chunk from either compressed or uncompressed data (or both if available). It also expects an ID and validates that it matches the uncompressed data unless skipVerify is true. If called with just compressed data, it'll decompress it for the ID validation.
type ChunkID ¶
type ChunkID [32]byte
ChunkID is the SHA512/256 in binary encoding
func ChunkIDFromSlice ¶
ChunkIDFromSlice converts a SHA512/256 encoded as byte slice into a ChunkID. It's expected the slice is of the correct length
func ChunkIDFromString ¶
ChunkIDFromString converts a SHA512/56 encoded as string into a ChunkID
type ChunkInvalid ¶ added in v0.2.0
ChunkInvalid means the hash of the chunk content doesn't match its ID
func (ChunkInvalid) Error ¶ added in v0.2.0
func (e ChunkInvalid) Error() string
type ChunkMissing ¶
type ChunkMissing struct {
ID ChunkID
}
ChunkMissing is returned by a store that can't find a requested chunk
func (ChunkMissing) Error ¶
func (e ChunkMissing) Error() string
type ChunkStorage ¶ added in v0.2.0
ChunkStorage stores chunks in a writable store. It can be safely used by multiple goroutines and contains an internal cache of what chunks have been store previously.
func NewChunkStorage ¶ added in v0.2.0
func NewChunkStorage(ws WriteStore) *ChunkStorage
NewChunkStorage initializes a ChunkStorage object.
func (*ChunkStorage) StoreChunk ¶ added in v0.2.0
func (s *ChunkStorage) StoreChunk(chunk *Chunk) (err error)
StoreChunk stores a single chunk in a synchronous manner.
type Chunker ¶ added in v0.2.0
type Chunker struct {
// contains filtered or unexported fields
}
Chunker is used to break up a data stream into chunks of data.
func NewChunker ¶ added in v0.2.0
NewChunker initializes a chunker for a data stream according to min/avg/max chunk size.
func (*Chunker) Advance ¶ added in v0.8.0
Advance n bytes without producing chunks. This can be used if the content of the next section in the file is known (i.e. it is known that there are a number of null chunks coming). This resets everything in the chunker and behaves as if the streams starts at (current position+n).
type ChunkingStats ¶ added in v0.2.0
ChunkingStats is used to report statistics of a parallel chunking operation.
type ConsoleIndexStore ¶ added in v0.3.0
type ConsoleIndexStore struct{}
ConsoleIndexStore is used for writing/reading indexes from STDOUT/STDIN
func NewConsoleIndexStore ¶ added in v0.3.0
func NewConsoleIndexStore() (ConsoleIndexStore, error)
NewConsoleIndexStore creates an instance of an indexStore that reads/writes to and from console
func (ConsoleIndexStore) Close ¶ added in v0.3.0
func (s ConsoleIndexStore) Close() error
Close the index store.
func (ConsoleIndexStore) GetIndex ¶ added in v0.3.0
func (s ConsoleIndexStore) GetIndex(string) (i Index, e error)
GetIndex reads an index from STDIN and returns it.
func (ConsoleIndexStore) GetIndexReader ¶ added in v0.3.0
func (s ConsoleIndexStore) GetIndexReader(string) (io.ReadCloser, error)
GetIndexReader returns a reader from STDIN
func (ConsoleIndexStore) StoreIndex ¶ added in v0.3.0
func (s ConsoleIndexStore) StoreIndex(name string, idx Index) error
StoreIndex writes the provided indes to STDOUT. The name is ignored.
func (ConsoleIndexStore) String ¶ added in v0.3.0
func (s ConsoleIndexStore) String() string
type Converters ¶ added in v0.9.1
type Converters []converter
Converters are modifiers for chunk data, such as compression or encryption. They are used to prepare chunk data for storage, or to read it from storage. The order of the conversion layers matters. When plain data is prepared for storage, the toStorage method is used in the order the layers are defined. To read from storage, the fromStorage method is called for each layer in reverse order.
type DedupQueue ¶ added in v0.7.0
type DedupQueue struct {
// contains filtered or unexported fields
}
DedupQueue wraps a store and provides deduplication of incoming chunk requests. This is useful when a burst of requests for the same chunk is received and the chunk store serving those is slow. With the DedupQueue wrapper, concurrent requests for the same chunk will result in just one request to the upstread store. Implements the Store interface.
func NewDedupQueue ¶ added in v0.7.0
func NewDedupQueue(store Store) *DedupQueue
NewDedupQueue initializes a new instance of the wrapper.
func (*DedupQueue) Close ¶ added in v0.7.0
func (q *DedupQueue) Close() error
func (*DedupQueue) GetChunk ¶ added in v0.7.0
func (q *DedupQueue) GetChunk(id ChunkID) (*Chunk, error)
func (*DedupQueue) HasChunk ¶ added in v0.7.0
func (q *DedupQueue) HasChunk(id ChunkID) (bool, error)
func (*DedupQueue) String ¶ added in v0.7.0
func (q *DedupQueue) String() string
type DefaultProgressBar ¶ added in v0.9.3
type DefaultProgressBar struct {
*pb.ProgressBar
}
DefaultProgressBar wraps https://github.com/cheggaaa/pb and implements desync.ProgressBar
func (DefaultProgressBar) Set ¶ added in v0.9.3
func (p DefaultProgressBar) Set(current int)
Set the current value
func (DefaultProgressBar) SetTotal ¶ added in v0.9.3
func (p DefaultProgressBar) SetTotal(total int)
SetTotal sets the upper bounds for the progress bar
func (DefaultProgressBar) Start ¶ added in v0.9.3
func (p DefaultProgressBar) Start()
Start displaying the progress bar
type ExtractStats ¶ added in v0.4.0
type ExtractStats struct { ChunksFromSeeds uint64 `json:"chunks-from-seeds"` ChunksFromStore uint64 `json:"chunks-from-store"` ChunksInPlace uint64 `json:"chunks-in-place"` BytesCopied uint64 `json:"bytes-copied-from-seeds"` BytesCloned uint64 `json:"bytes-cloned-from-seeds"` Blocksize uint64 `json:"blocksize"` BytesTotal int64 `json:"bytes-total"` ChunksTotal int `json:"chunks-total"` Seeds int `json:"seeds"` }
ExtractStats contains detailed statistics about a file extract operation, such as if data chunks were copied from seeds or cloned.
func AssembleFile ¶ added in v0.2.0
func AssembleFile(ctx context.Context, name string, idx Index, s Store, seeds []Seed, options AssembleOptions) (*ExtractStats, error)
AssembleFile re-assembles a file based on a list of index chunks. It runs n goroutines, creating one filehandle for the file "name" per goroutine and writes to the file simultaneously. If progress is provided, it'll be called when a chunk has been processed. If the input file exists and is not empty, the algorithm will first confirm if the data matches what is expected and only populate areas that differ from the expected content. This can be used to complete partly written files.
type FailoverGroup ¶ added in v0.7.0
type FailoverGroup struct {
// contains filtered or unexported fields
}
FailoverGroup wraps multiple stores to provide failover when one or more stores in the group fail. Only one of the stores in the group is considered "active" at a time. If an unexpected error is returned from the active store, the next store in the group becomes the active one and the request retried. When all stores returned a failure, the group will pass up the failure to the caller. The active store rotates through all available stores. All stores in the group are expected to contain the same chunks, there is no failover for missing chunks. Implements the Store interface.
func NewFailoverGroup ¶ added in v0.7.0
func NewFailoverGroup(stores ...Store) *FailoverGroup
NewFailoverGroup initializes and returns a store wraps multiple stores to form a group that can fail over between them on failure from one.
func (*FailoverGroup) Close ¶ added in v0.7.0
func (g *FailoverGroup) Close() error
func (*FailoverGroup) GetChunk ¶ added in v0.7.0
func (g *FailoverGroup) GetChunk(id ChunkID) (*Chunk, error)
func (*FailoverGroup) HasChunk ¶ added in v0.7.0
func (g *FailoverGroup) HasChunk(id ChunkID) (bool, error)
func (*FailoverGroup) String ¶ added in v0.7.0
func (g *FailoverGroup) String() string
type File ¶ added in v0.8.0
type File struct { Name string Path string Mode os.FileMode Size uint64 // Link target for symlinks LinkTarget string // Modification time ModTime time.Time // User/group IDs Uid int Gid int // Major/Minor for character or block devices DevMajor uint64 DevMinor uint64 // Extended attributes Xattrs map[string]string // File content. Nil for non-regular files. Data io.ReadCloser }
File represents a filesystem object such as directory, file, symlink or device. It's used when creating archives from a source filesystem which can be a real OS filesystem, or another archive stream such as tar.
type FileSeed ¶ added in v0.4.0
type FileSeed struct {
// contains filtered or unexported fields
}
FileSeed is used to copy or clone blocks from an existing index+blob during file extraction.
func NewIndexSeed ¶ added in v0.4.0
NewIndexSeed initializes a new seed that uses an existing index and its blob
func (*FileSeed) LongestMatchWith ¶ added in v0.4.0
func (s *FileSeed) LongestMatchWith(chunks []IndexChunk) (int, SeedSegment)
LongestMatchWith returns the longest sequence of chunks anywhere in Source that match `chunks` starting at chunks[0], limiting the maximum number of chunks if reflinks are not supported. If there is no match, it returns a length of zero and a nil SeedSegment.
func (*FileSeed) RegenerateIndex ¶ added in v0.9.3
func (*FileSeed) SetInvalid ¶ added in v0.9.3
type FilesystemReader ¶ added in v0.8.0
FilesystemReader is an interface for source filesystem to be used during tar operations. Next() is expected to return files and directories in a consistent and stable order and return io.EOF when no further files are available.
type FilesystemWriter ¶ added in v0.8.0
type FilesystemWriter interface { CreateDir(n NodeDirectory) error CreateFile(n NodeFile) error CreateSymlink(n NodeSymlink) error CreateDevice(n NodeDevice) error }
FilesystemWriter is a filesystem implementation that supports untar'ing a catar archive to.
type FormatACLDefault ¶ added in v0.2.0
type FormatACLDefault struct { FormatHeader UserObjPermissions uint64 GroupObjPermissions uint64 OtherPermissions uint64 MaskPermissions uint64 }
type FormatACLGroup ¶ added in v0.2.0
type FormatACLGroup struct { FormatHeader GID uint64 Permissions uint64 Name string }
type FormatACLGroupObj ¶ added in v0.2.0
type FormatACLGroupObj struct { FormatHeader Permissions uint64 }
type FormatACLUser ¶ added in v0.2.0
type FormatACLUser struct { FormatHeader UID uint64 Permissions uint64 Name string }
type FormatDecoder ¶ added in v0.2.0
type FormatDecoder struct {
// contains filtered or unexported fields
}
FormatDecoder is used to parse and break up a stream of casync format elements found in archives or index files.
func NewFormatDecoder ¶ added in v0.2.0
func NewFormatDecoder(r io.Reader) FormatDecoder
func (*FormatDecoder) Next ¶ added in v0.2.0
func (d *FormatDecoder) Next() (interface{}, error)
Next returns the next format element from the stream. If an element contains a reader, that reader should be used before any subsequent calls as it'll be invalidated then. Returns nil when the end is reached.
type FormatDevice ¶ added in v0.2.0
type FormatDevice struct { FormatHeader Major uint64 Minor uint64 }
type FormatEncoder ¶ added in v0.2.0
type FormatEncoder struct {
// contains filtered or unexported fields
}
FormatEncoder takes casync format elements and encodes them into a stream.
func NewFormatEncoder ¶ added in v0.2.0
func NewFormatEncoder(w io.Writer) FormatEncoder
func (*FormatEncoder) Encode ¶ added in v0.2.0
func (e *FormatEncoder) Encode(v interface{}) (int64, error)
type FormatEntry ¶ added in v0.2.0
type FormatFCaps ¶ added in v0.2.0
type FormatFCaps struct { FormatHeader Data []byte }
type FormatFilename ¶ added in v0.2.0
type FormatFilename struct { FormatHeader Name string }
type FormatGoodbye ¶ added in v0.2.0
type FormatGoodbye struct { FormatHeader Items []FormatGoodbyeItem }
type FormatGoodbyeItem ¶ added in v0.2.0
type FormatGroup ¶ added in v0.2.0
type FormatGroup struct { FormatHeader Name string }
type FormatHeader ¶ added in v0.2.0
type FormatIndex ¶ added in v0.2.0
type FormatIndex struct { FormatHeader FeatureFlags uint64 ChunkSizeMin uint64 ChunkSizeAvg uint64 ChunkSizeMax uint64 }
type FormatPayload ¶ added in v0.2.0
type FormatPayload struct { FormatHeader Data io.Reader }
type FormatSELinux ¶ added in v0.2.0
type FormatSELinux struct { FormatHeader Label string }
type FormatSymlink ¶ added in v0.2.0
type FormatSymlink struct { FormatHeader Target string }
type FormatTable ¶ added in v0.2.0
type FormatTable struct { FormatHeader Items []FormatTableItem }
type FormatTableItem ¶ added in v0.2.0
type FormatUser ¶ added in v0.2.0
type FormatUser struct { FormatHeader Name string }
type FormatXAttr ¶ added in v0.2.0
type FormatXAttr struct { FormatHeader NameAndValue string }
type GCIndexStore ¶ added in v0.9.0
type GCIndexStore struct {
GCStoreBase
}
GCIndexStore is a read-write index store with Google Storage backing
func NewGCIndexStore ¶ added in v0.9.0
func NewGCIndexStore(location *url.URL, opt StoreOptions) (s GCIndexStore, e error)
NewGCIndexStore creates an index store with Google Storage backing. The URL should be provided like this: gc://bucket/prefix
func (GCIndexStore) GetIndex ¶ added in v0.9.0
func (s GCIndexStore) GetIndex(name string) (i Index, e error)
GetIndex returns an Index structure from the store
func (GCIndexStore) GetIndexReader ¶ added in v0.9.0
func (s GCIndexStore) GetIndexReader(name string) (r io.ReadCloser, err error)
GetIndexReader returns a reader for an index from an Google Storage store. Fails if the specified index file does not exist.
func (GCIndexStore) StoreIndex ¶ added in v0.9.0
func (s GCIndexStore) StoreIndex(name string, idx Index) error
StoreIndex writes the index file to the Google Storage store
type GCStore ¶ added in v0.9.0
type GCStore struct {
GCStoreBase
}
GCStore is a read-write store with Google Storage backing
func NewGCStore ¶ added in v0.9.0
func NewGCStore(location *url.URL, opt StoreOptions) (s GCStore, e error)
NewGCStore creates a chunk store with Google Storage backing. The URL should be provided like this: gs://bucketname/prefix Credentials are passed in via the environment variables. TODO
func (GCStore) Prune ¶ added in v0.9.0
Prune removes any chunks from the store that are not contained in a list (map)
func (GCStore) RemoveChunk ¶ added in v0.9.0
RemoveChunk deletes a chunk, typically an invalid one, from the filesystem. Used when verifying and repairing caches.
func (GCStore) StoreChunk ¶ added in v0.9.0
StoreChunk adds a new chunk to the store
type GCStoreBase ¶ added in v0.9.0
type GCStoreBase struct { Location string // contains filtered or unexported fields }
GCStoreBase is the base object for all chunk and index stores with Google Storage backing
func NewGCStoreBase ¶ added in v0.9.0
func NewGCStoreBase(u *url.URL, opt StoreOptions) (GCStoreBase, error)
NewGCStoreBase initializes a base object used for chunk or index stores backed by Google Storage.
func (GCStoreBase) Close ¶ added in v0.9.0
func (s GCStoreBase) Close() error
Close the GCS base store. NOP opertation but needed to implement the store interface.
func (GCStoreBase) String ¶ added in v0.9.0
func (s GCStoreBase) String() string
type GetReaderForRequestBody ¶ added in v0.9.0
type HTTPHandler ¶ added in v0.2.0
type HTTPHandler struct { HTTPHandlerBase SkipVerifyWrite bool // contains filtered or unexported fields }
HTTPHandler is the server-side handler for a HTTP chunk store.
func (HTTPHandler) ServeHTTP ¶ added in v0.2.0
func (h HTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
type HTTPHandlerBase ¶ added in v0.3.0
type HTTPHandlerBase struct {
// contains filtered or unexported fields
}
HTTPHandlerBase is the base object for a HTTP chunk or index store.
type HTTPIndexHandler ¶ added in v0.3.0
type HTTPIndexHandler struct { HTTPHandlerBase // contains filtered or unexported fields }
HTTPIndexHandler is the HTTP handler for index stores.
func (HTTPIndexHandler) ServeHTTP ¶ added in v0.3.0
func (h HTTPIndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
type Hash ¶ added in v0.2.0
type Hash struct {
// contains filtered or unexported fields
}
Hash implements the rolling hash algorithm used to find chunk bounaries in a stream of bytes.
func NewHash ¶ added in v0.2.0
NewHash returns a new instance of a hash. size determines the length of the hash window used and the discriminator is used to find the boundary.
func (*Hash) Initialize ¶ added in v0.2.0
Initialize the window used for the rolling hash calculation. The size of the slice must match the window size
func (*Hash) IsBoundary ¶ added in v0.2.0
IsBoundary returns true if the discriminator and hash match to signal a chunk boundary has been reached
type HashAlgorithm ¶ added in v0.8.0
HashAlgorithm is a digest algorithm used to hash chunks.
var Digest HashAlgorithm = SHA512256{}
Digest algorithm used globally for all chunk hashing. Can be set to SHA512256 (default) or to SHA256.
type Index ¶
type Index struct { Index FormatIndex Chunks []IndexChunk }
Index represents the content of an index file
func ChunkStream ¶ added in v0.2.0
ChunkStream splits up a blob into chunks using the provided chunker (single stream), populates a store with the chunks and returns an index. Hashing and compression is performed in n goroutines while the hashing algorithm is performed serially.
func IndexFromReader ¶ added in v0.2.0
IndexFromReader parses a caibx structure (from a reader) and returns a populated Caibx object
type IndexChunk ¶ added in v0.2.0
IndexChunk is a table entry in an index file containing the chunk ID (SHA256) Similar to an FormatTableItem but with Start and Size instead of just offset to make it easier to use throughout the application.
type IndexMountFS ¶ added in v0.2.0
type IndexMountFS struct { fs.Inode FName string // File name in the mountpoint Idx Index // Index of the blob Store Store }
IndexMountFS is used to FUSE mount an index file (as a blob, not an archive). It present a single file underneath the mountpoint.
func NewIndexMountFS ¶ added in v0.2.0
func NewIndexMountFS(idx Index, name string, s Store) *IndexMountFS
NewIndexMountFS initializes a FUSE filesystem mount based on an index and a chunk store.
func (*IndexMountFS) Close ¶ added in v0.9.1
func (r *IndexMountFS) Close() error
func (*IndexMountFS) OnAdd ¶ added in v0.9.0
func (r *IndexMountFS) OnAdd(ctx context.Context)
OnAdd is used to build the static filesystem structure at the start of the mount.
type IndexPos ¶ added in v0.2.0
type IndexPos struct { Store Store Index Index Length int64 // total length of file // contains filtered or unexported fields }
IndexPos represents a position inside an index file, to permit a seeking reader
func NewIndexReadSeeker ¶ added in v0.2.0
NewIndexReadSeeker initializes a ReadSeeker for indexes.
type IndexSegment ¶ added in v0.4.0
type IndexSegment struct {
// contains filtered or unexported fields
}
IndexSegment represents a contiguous section of an index which is used when assembling a file from seeds. first/last are positions in the index.
type IndexStore ¶ added in v0.3.0
type IndexStore interface { GetIndexReader(name string) (io.ReadCloser, error) GetIndex(name string) (Index, error) io.Closer fmt.Stringer }
IndexStore is implemented by stores that hold indexes.
type IndexWriteStore ¶ added in v0.3.0
type IndexWriteStore interface { IndexStore StoreIndex(name string, idx Index) error }
IndexWriteStore is used by stores that support reading and writing of indexes.
type Interrupted ¶ added in v0.2.0
type Interrupted struct{}
Interrupted is returned when a user interrupted a long-running operation, for example by pressing Ctrl+C
func (Interrupted) Error ¶ added in v0.2.0
func (e Interrupted) Error() string
type InvalidFormat ¶ added in v0.2.0
type InvalidFormat struct {
Msg string
}
InvalidFormat is returned when an error occurred when parsing an archive file
func (InvalidFormat) Error ¶ added in v0.2.0
func (e InvalidFormat) Error() string
type InvalidSeedAction ¶ added in v0.9.3
type InvalidSeedAction int
InvalidSeedAction represent the action that we will take if a seed happens to be invalid. There are currently three options: - fail with an error - skip the invalid seed and try to continue - regenerate the invalid seed index
const ( InvalidSeedActionBailOut InvalidSeedAction = iota InvalidSeedActionSkip InvalidSeedActionRegenerate )
type LocalFS ¶ added in v0.8.0
type LocalFS struct { // Base directory Root string // contains filtered or unexported fields }
LocalFS uses the local filesystem for tar/untar operations.
func NewLocalFS ¶ added in v0.8.0
func NewLocalFS(root string, opts LocalFSOptions) *LocalFS
NewLocalFS initializes a new instance of a local filesystem that can be used for tar/untar operations.
func (*LocalFS) CreateDevice ¶ added in v0.8.0
func (fs *LocalFS) CreateDevice(n NodeDevice) error
func (*LocalFS) CreateDir ¶ added in v0.8.0
func (fs *LocalFS) CreateDir(n NodeDirectory) error
func (*LocalFS) CreateFile ¶ added in v0.8.0
func (*LocalFS) CreateSymlink ¶ added in v0.8.0
func (fs *LocalFS) CreateSymlink(n NodeSymlink) error
func (*LocalFS) Next ¶ added in v0.8.0
Next returns the next filesystem entry or io.EOF when done. The caller is responsible for closing the returned File object.
func (*LocalFS) SetDirPermissions ¶ added in v0.9.0
func (fs *LocalFS) SetDirPermissions(n NodeDirectory) error
func (*LocalFS) SetFilePermissions ¶ added in v0.9.0
func (*LocalFS) SetSymlinkPermissions ¶ added in v0.9.0
func (fs *LocalFS) SetSymlinkPermissions(n NodeSymlink) error
type LocalFSOptions ¶ added in v0.8.0
type LocalFSOptions struct { // Only used when reading from the filesystem. Will only return // files from the same device as the first read operation. OneFileSystem bool // When writing files, use the current owner and don't try to apply the original owner. NoSameOwner bool // Ignore the incoming permissions when writing files. Use the current default instead. NoSamePermissions bool // Reads all timestamps as zero. Used in tar operations to avoid unneccessary changes. NoTime bool }
LocalFSOptions influence the behavior of the filesystem when reading from or writing too it.
type LocalIndexStore ¶ added in v0.3.0
type LocalIndexStore struct {
Path string
}
LocalIndexStore is used to read/write index files on local disk
func NewLocalIndexStore ¶ added in v0.4.0
func NewLocalIndexStore(path string) (LocalIndexStore, error)
NewLocalIndexStore creates an instance of a local index store, it only checks presence of the store
func (LocalIndexStore) Close ¶ added in v0.3.0
func (s LocalIndexStore) Close() error
Close the index store. NOP operation, needed to implement IndexStore interface
func (LocalIndexStore) GetIndex ¶ added in v0.3.0
func (s LocalIndexStore) GetIndex(name string) (i Index, e error)
GetIndex returns an Index structure from the store
func (LocalIndexStore) GetIndexReader ¶ added in v0.3.0
func (s LocalIndexStore) GetIndexReader(name string) (rdr io.ReadCloser, e error)
GetIndexReader returns a reader of an index file in the store or an error if the specified index file does not exist.
func (LocalIndexStore) StoreIndex ¶ added in v0.3.0
func (s LocalIndexStore) StoreIndex(name string, idx Index) error
StoreIndex stores an index in the index store with the given name.
func (LocalIndexStore) String ¶ added in v0.3.0
func (s LocalIndexStore) String() string
type LocalStore ¶
type LocalStore struct { Base string // When accessing chunks, should mtime be updated? Useful when this is // a cache. Old chunks can be identified and removed from the store that way UpdateTimes bool // contains filtered or unexported fields }
LocalStore casync store
func NewLocalStore ¶
func NewLocalStore(dir string, opt StoreOptions) (LocalStore, error)
NewLocalStore creates an instance of a local castore, it only checks presence of the store
func (LocalStore) Close ¶ added in v0.2.0
func (s LocalStore) Close() error
Close the store. NOP opertation, needed to implement Store interface.
func (LocalStore) GetChunk ¶
func (s LocalStore) GetChunk(id ChunkID) (*Chunk, error)
GetChunk reads and returns one (compressed!) chunk from the store
func (LocalStore) HasChunk ¶ added in v0.2.0
func (s LocalStore) HasChunk(id ChunkID) (bool, error)
HasChunk returns true if the chunk is in the store
func (LocalStore) Prune ¶ added in v0.2.0
func (s LocalStore) Prune(ctx context.Context, ids map[ChunkID]struct{}) error
Prune removes any chunks from the store that are not contained in a list of chunks
func (LocalStore) RemoveChunk ¶ added in v0.2.0
func (s LocalStore) RemoveChunk(id ChunkID) error
RemoveChunk deletes a chunk, typically an invalid one, from the filesystem. Used when verifying and repairing caches.
func (LocalStore) StoreChunk ¶
func (s LocalStore) StoreChunk(chunk *Chunk) error
StoreChunk adds a new chunk to the store
func (LocalStore) String ¶ added in v0.2.0
func (s LocalStore) String() string
type MountFS ¶ added in v0.9.1
type MountFS interface { fs.InodeEmbedder Close() error }
type MtreeFS ¶ added in v0.8.0
type MtreeFS struct {
// contains filtered or unexported fields
}
MtreeFS prints the filesystem operations to a writer (which can be os.Stdout) in mtree format.
func NewMtreeFS ¶ added in v0.8.0
NewMtreeFS initializes a new instance of an mtree decoder that writes its output into the provided stream.
func (MtreeFS) CreateDevice ¶ added in v0.8.0
func (fs MtreeFS) CreateDevice(n NodeDevice) error
func (MtreeFS) CreateDir ¶ added in v0.8.0
func (fs MtreeFS) CreateDir(n NodeDirectory) error
func (MtreeFS) CreateFile ¶ added in v0.8.0
func (MtreeFS) CreateSymlink ¶ added in v0.8.0
func (fs MtreeFS) CreateSymlink(n NodeSymlink) error
type NoSuchObject ¶ added in v0.3.0
type NoSuchObject struct {
// contains filtered or unexported fields
}
NoSuchObject is returned by a store that can't find a requested object
func (NoSuchObject) Error ¶ added in v0.3.0
func (e NoSuchObject) Error() string
type NodeDevice ¶ added in v0.2.0
type NodeDevice struct { Name string UID int GID int Mode os.FileMode Major uint64 Minor uint64 Xattrs Xattrs MTime time.Time }
NodeDevice holds device information in a catar archive
type NodeDirectory ¶ added in v0.2.0
type NodeDirectory struct { Name string UID int GID int Mode os.FileMode MTime time.Time Xattrs Xattrs }
NodeDirectory represents a directory in a catar archive
type NodeFile ¶ added in v0.2.0
type NodeFile struct { UID int GID int Mode os.FileMode Name string MTime time.Time Xattrs Xattrs Size uint64 Data io.Reader }
NodeFile holds file permissions and data in a catar archive
type NodeSymlink ¶ added in v0.2.0
type NodeSymlink struct { Name string UID int GID int Mode os.FileMode MTime time.Time Xattrs Xattrs Target string }
NodeSymlink holds symlink information in a catar archive
type NullChunk ¶ added in v0.2.0
NullChunk is used in places where it's common to see requests for chunks containing only 0-bytes. When a chunked file has large areas of 0-bytes, the chunking algorithm does not produce split boundaries, which results in many chunks of 0-bytes of size MAX (max chunk size). The NullChunk can be used to make requesting this kind of chunk more efficient by serving it from memory, rather that request it from disk or network and decompress it repeatedly.
func NewNullChunk ¶ added in v0.2.0
NewNullChunk returns an initialized chunk consisting of 0-bytes of 'size' which must mach the max size used in the index to be effective
type NullProgressBar ¶ added in v0.9.3
type NullProgressBar struct { }
NullProgressBar wraps https://github.com/cheggaaa/pb and is used when we don't want to show a progressbar.
func (NullProgressBar) Add ¶ added in v0.9.3
func (p NullProgressBar) Add(add int) int
func (NullProgressBar) Finish ¶ added in v0.9.3
func (p NullProgressBar) Finish()
func (NullProgressBar) Increment ¶ added in v0.9.3
func (p NullProgressBar) Increment() int
func (NullProgressBar) Set ¶ added in v0.9.3
func (p NullProgressBar) Set(current int)
func (NullProgressBar) SetTotal ¶ added in v0.9.3
func (p NullProgressBar) SetTotal(total int)
func (NullProgressBar) Start ¶ added in v0.9.3
func (p NullProgressBar) Start()
type Plan ¶ added in v0.9.3
type Plan []SeedSegmentCandidate
type ProgressBar ¶ added in v0.2.0
type ProgressBar interface { SetTotal(total int) Start() Finish() Increment() int Add(add int) int Set(current int) io.Writer }
ProgressBar allows clients to provide their own implementations of graphical progress visualizations. Optional, can be nil to disable this feature.
func NewProgressBar ¶ added in v0.9.3
func NewProgressBar(prefix string) ProgressBar
NewProgressBar initializes a wrapper for a https://github.com/cheggaaa/pb progressbar that implements desync.ProgressBar
type Protocol ¶ added in v0.2.0
type Protocol struct {
// contains filtered or unexported fields
}
Protocol handles the casync protocol when using remote stores via SSH
func NewProtocol ¶ added in v0.2.0
NewProtocol creates a new casync protocol handler
func StartProtocol ¶ added in v0.2.0
StartProtocol initiates a connection to the remote store server using the value in CASYNC_SSH_PATH (default "ssh"), and executes the command in CASYNC_REMOTE_PATH (default "casync"). It then performs the HELLO handshake to initialze the connection
func (*Protocol) Initialize ¶ added in v0.2.0
Initialize exchanges HELLOs with the other side to start a protocol session. Returns the (capability) flags provided by the other party.
func (*Protocol) ReadMessage ¶ added in v0.2.0
ReadMessage reads a generic message from the other end, verifies the length, extracts the type and returns the message body as byte slice
func (*Protocol) RecvHello ¶ added in v0.2.0
RecvHello waits for the server to send a HELLO, fails if anything else is received. Returns the flags provided by the server.
func (*Protocol) RequestChunk ¶ added in v0.2.0
RequestChunk sends a request for a specific chunk to the server, waits for the response and returns the bytes in the chunk. Returns an error if the server reports the chunk as missing
func (*Protocol) SendGoodbye ¶ added in v0.2.0
SendGoodbye tells the other side to terminate gracefully
func (*Protocol) SendHello ¶ added in v0.2.0
SendHello sends a HELLO message to the server, with the flags signaling which service is being requested from it.
func (*Protocol) SendMissing ¶ added in v0.2.0
SendMissing tells the client that the requested chunk is not available
func (*Protocol) SendProtocolChunk ¶ added in v0.2.0
SendProtocolChunk responds to a request with the content of a chunk
func (*Protocol) SendProtocolRequest ¶ added in v0.2.0
SendProtocolRequest requests a chunk from a server
func (*Protocol) WriteMessage ¶ added in v0.2.0
WriteMessage sends a generic message to the server
type ProtocolServer ¶ added in v0.2.0
type ProtocolServer struct {
// contains filtered or unexported fields
}
ProtocolServer serves up chunks from a local store using the casync protocol
func NewProtocolServer ¶ added in v0.2.0
NewProtocolServer returns an initialized server that can serve chunks from a chunk store via the casync protocol
type PruneStore ¶ added in v0.2.0
type PruneStore interface { WriteStore Prune(ctx context.Context, ids map[ChunkID]struct{}) error }
PruneStore is a store that supports read, write and pruning of chunks
type RemoteHTTP ¶ added in v0.2.0
type RemoteHTTP struct {
*RemoteHTTPBase
}
RemoteHTTP is a remote casync store accessed via HTTP.
func NewRemoteHTTPStore ¶ added in v0.2.0
func NewRemoteHTTPStore(location *url.URL, opt StoreOptions) (*RemoteHTTP, error)
NewRemoteHTTPStore initializes a new store that pulls chunks via HTTP(S) from a remote web server. n defines the size of idle connections allowed.
func (*RemoteHTTP) GetChunk ¶ added in v0.2.0
func (r *RemoteHTTP) GetChunk(id ChunkID) (*Chunk, error)
GetChunk reads and returns one chunk from the store
func (*RemoteHTTP) HasChunk ¶ added in v0.2.0
func (r *RemoteHTTP) HasChunk(id ChunkID) (bool, error)
HasChunk returns true if the chunk is in the store
func (*RemoteHTTP) StoreChunk ¶ added in v0.2.0
func (r *RemoteHTTP) StoreChunk(chunk *Chunk) error
StoreChunk adds a new chunk to the store
type RemoteHTTPBase ¶ added in v0.3.0
type RemoteHTTPBase struct {
// contains filtered or unexported fields
}
RemoteHTTPBase is the base object for a remote, HTTP-based chunk or index stores.
func NewRemoteHTTPStoreBase ¶ added in v0.3.0
func NewRemoteHTTPStoreBase(location *url.URL, opt StoreOptions) (*RemoteHTTPBase, error)
NewRemoteHTTPStoreBase initializes a base object for HTTP index or chunk stores.
func (*RemoteHTTPBase) Close ¶ added in v0.3.0
func (r *RemoteHTTPBase) Close() error
Close the HTTP store. NOP operation but needed to implement the interface.
func (*RemoteHTTPBase) GetObject ¶ added in v0.3.0
func (r *RemoteHTTPBase) GetObject(name string) ([]byte, error)
GetObject reads and returns an object in the form of []byte from the store
func (*RemoteHTTPBase) IssueHttpRequest ¶ added in v0.9.0
func (r *RemoteHTTPBase) IssueHttpRequest(method string, u *url.URL, getReader GetReaderForRequestBody, attempt int) (int, []byte, error)
Send a single HTTP request.
func (*RemoteHTTPBase) IssueRetryableHttpRequest ¶ added in v0.9.0
func (r *RemoteHTTPBase) IssueRetryableHttpRequest(method string, u *url.URL, getReader GetReaderForRequestBody) (int, []byte, error)
Send a single HTTP request, retrying if a retryable error has occurred.
func (*RemoteHTTPBase) StoreObject ¶ added in v0.3.0
func (r *RemoteHTTPBase) StoreObject(name string, getReader GetReaderForRequestBody) error
StoreObject stores an object to the store.
func (*RemoteHTTPBase) String ¶ added in v0.3.0
func (r *RemoteHTTPBase) String() string
type RemoteHTTPIndex ¶ added in v0.3.0
type RemoteHTTPIndex struct {
*RemoteHTTPBase
}
RemoteHTTPIndex is a remote index store accessed via HTTP.
func NewRemoteHTTPIndexStore ¶ added in v0.3.0
func NewRemoteHTTPIndexStore(location *url.URL, opt StoreOptions) (*RemoteHTTPIndex, error)
NewRemoteHTTPIndexStore initializes a new store that pulls the specified index file via HTTP(S) from a remote web server.
func (*RemoteHTTPIndex) GetIndex ¶ added in v0.3.0
func (r *RemoteHTTPIndex) GetIndex(name string) (i Index, e error)
GetIndex returns an Index structure from the store
func (RemoteHTTPIndex) GetIndexReader ¶ added in v0.3.0
func (r RemoteHTTPIndex) GetIndexReader(name string) (rdr io.ReadCloser, e error)
GetIndexReader returns an index reader from an HTTP store. Fails if the specified index file does not exist.
func (*RemoteHTTPIndex) StoreIndex ¶ added in v0.3.0
func (r *RemoteHTTPIndex) StoreIndex(name string, idx Index) error
StoreIndex adds a new chunk to the store
type RemoteSSH ¶
type RemoteSSH struct {
// contains filtered or unexported fields
}
RemoteSSH is a remote casync store accessed via SSH. Supports running multiple sessions to improve throughput.
func NewRemoteSSHStore ¶
func NewRemoteSSHStore(location *url.URL, opt StoreOptions) (*RemoteSSH, error)
NewRemoteSSHStore establishes up to n connections with a casync chunk server
func (*RemoteSSH) GetChunk ¶
GetChunk requests a chunk from the server and returns a (compressed) one. It uses any of the n sessions this store maintains in its pool. Blocks until one session becomes available
type RepairableCache ¶ added in v0.9.3
type RepairableCache struct {
// contains filtered or unexported fields
}
New cache which GetChunk() function will return ChunkMissing error instead of ChunkInvalid so caller can redownload invalid chunk from store
func NewRepairableCache ¶ added in v0.9.3
func NewRepairableCache(l WriteStore) RepairableCache
Create new RepairableCache that wraps WriteStore and modify its GetChunk() so ChunkInvalid error will be replaced by ChunkMissing error
func (RepairableCache) Close ¶ added in v0.9.3
func (r RepairableCache) Close() error
func (RepairableCache) GetChunk ¶ added in v0.9.3
func (r RepairableCache) GetChunk(id ChunkID) (*Chunk, error)
func (RepairableCache) HasChunk ¶ added in v0.9.3
func (r RepairableCache) HasChunk(id ChunkID) (bool, error)
func (RepairableCache) StoreChunk ¶ added in v0.9.3
func (r RepairableCache) StoreChunk(c *Chunk) error
func (RepairableCache) String ¶ added in v0.9.3
func (r RepairableCache) String() string
type S3IndexStore ¶ added in v0.3.0
type S3IndexStore struct {
S3StoreBase
}
S3IndexStore is a read-write index store with S3 backing
func NewS3IndexStore ¶ added in v0.3.0
func NewS3IndexStore(location *url.URL, s3Creds *credentials.Credentials, region string, opt StoreOptions, lookupType minio.BucketLookupType) (s S3IndexStore, e error)
NewS3IndexStore creates an index store with S3 backing. The URL should be provided like this: s3+http://host:port/bucket Credentials are passed in via the environment variables S3_ACCESS_KEY and S3S3_SECRET_KEY, or via the desync config file.
func (S3IndexStore) GetIndex ¶ added in v0.3.0
func (s S3IndexStore) GetIndex(name string) (i Index, e error)
GetIndex returns an Index structure from the store
func (S3IndexStore) GetIndexReader ¶ added in v0.3.0
func (s S3IndexStore) GetIndexReader(name string) (r io.ReadCloser, e error)
GetIndexReader returns a reader for an index from an S3 store. Fails if the specified index file does not exist.
func (S3IndexStore) StoreIndex ¶ added in v0.3.0
func (s S3IndexStore) StoreIndex(name string, idx Index) error
StoreIndex writes the index file to the S3 store
type S3Store ¶ added in v0.2.0
type S3Store struct {
S3StoreBase
}
S3Store is a read-write store with S3 backing
func NewS3Store ¶ added in v0.2.0
func NewS3Store(location *url.URL, s3Creds *credentials.Credentials, region string, opt StoreOptions, lookupType minio.BucketLookupType) (s S3Store, e error)
NewS3Store creates a chunk store with S3 backing. The URL should be provided like this: s3+http://host:port/bucket Credentials are passed in via the environment variables S3_ACCESS_KEY and S3S3_SECRET_KEY, or via the desync config file.
func (S3Store) Prune ¶ added in v0.2.0
Prune removes any chunks from the store that are not contained in a list (map)
func (S3Store) RemoveChunk ¶ added in v0.2.0
RemoveChunk deletes a chunk, typically an invalid one, from the filesystem. Used when verifying and repairing caches.
func (S3Store) StoreChunk ¶ added in v0.2.0
StoreChunk adds a new chunk to the store
type S3StoreBase ¶ added in v0.3.0
type S3StoreBase struct { Location string // contains filtered or unexported fields }
S3StoreBase is the base object for all chunk and index stores with S3 backing
func NewS3StoreBase ¶ added in v0.3.0
func NewS3StoreBase(u *url.URL, s3Creds *credentials.Credentials, region string, opt StoreOptions, lookupType minio.BucketLookupType) (S3StoreBase, error)
NewS3StoreBase initializes a base object used for chunk or index stores backed by S3.
func (S3StoreBase) Close ¶ added in v0.3.0
func (s S3StoreBase) Close() error
Close the S3 base store. NOP operation but needed to implement the store interface.
func (S3StoreBase) String ¶ added in v0.3.0
func (s S3StoreBase) String() string
type SFTPIndexStore ¶ added in v0.3.0
type SFTPIndexStore struct {
*SFTPStoreBase
}
SFTPIndexStore is an index store backed by SFTP over SSH
func NewSFTPIndexStore ¶ added in v0.3.0
func NewSFTPIndexStore(location *url.URL, opt StoreOptions) (*SFTPIndexStore, error)
NewSFTPIndexStore initializes and index store backed by SFTP over SSH.
func (*SFTPIndexStore) GetIndex ¶ added in v0.3.0
func (s *SFTPIndexStore) GetIndex(name string) (i Index, e error)
GetIndex reads an index from an SFTP store, returns an error if the specified index file does not exist.
func (*SFTPIndexStore) GetIndexReader ¶ added in v0.3.0
func (s *SFTPIndexStore) GetIndexReader(name string) (r io.ReadCloser, e error)
GetIndexReader returns a reader of an index from an SFTP store. Fails if the specified index file does not exist.
func (*SFTPIndexStore) StoreIndex ¶ added in v0.3.0
func (s *SFTPIndexStore) StoreIndex(name string, idx Index) error
StoreIndex adds a new index to the store
type SFTPStore ¶ added in v0.2.0
type SFTPStore struct {
// contains filtered or unexported fields
}
SFTPStore is a chunk store that uses SFTP over SSH.
func NewSFTPStore ¶ added in v0.2.0
func NewSFTPStore(location *url.URL, opt StoreOptions) (*SFTPStore, error)
NewSFTPStore initializes a chunk store using SFTP over SSH.
func (*SFTPStore) GetChunk ¶ added in v0.2.0
GetChunk returns a chunk from an SFTP store, returns ChunkMissing if the file does not exist
func (*SFTPStore) Prune ¶ added in v0.2.0
Prune removes any chunks from the store that are not contained in a list of chunks
func (*SFTPStore) RemoveChunk ¶ added in v0.2.0
RemoveChunk deletes a chunk, typically an invalid one, from the filesystem. Used when verifying and repairing caches.
func (*SFTPStore) StoreChunk ¶ added in v0.2.0
StoreChunk adds a new chunk to the store
type SFTPStoreBase ¶ added in v0.3.0
type SFTPStoreBase struct {
// contains filtered or unexported fields
}
SFTPStoreBase is the base object for SFTP chunk and index stores.
func (*SFTPStoreBase) Close ¶ added in v0.3.0
func (s *SFTPStoreBase) Close() error
Close terminates all client connections
func (*SFTPStoreBase) StoreObject ¶ added in v0.3.0
func (s *SFTPStoreBase) StoreObject(name string, r io.Reader) error
StoreObject adds a new object to a writable index or chunk store.
func (*SFTPStoreBase) String ¶ added in v0.3.0
func (s *SFTPStoreBase) String() string
type Seed ¶ added in v0.4.0
type Seed interface { LongestMatchWith(chunks []IndexChunk) (int, SeedSegment) RegenerateIndex(ctx context.Context, n int, attempt int, seedNumber int) error SetInvalid(value bool) IsInvalid() bool }
Seed represent a source of chunks other than the store. Typically a seed is another index+blob that present on disk already and is used to copy or clone existing chunks or blocks into the target from.
type SeedSegment ¶ added in v0.4.0
type SeedSegment interface { FileName() string Size() uint64 Validate(file *os.File) error WriteInto(dst *os.File, offset, end, blocksize uint64, isBlank bool) (copied uint64, cloned uint64, err error) }
SeedSegment represents a matching range between a Seed and a file being assembled from an Index. It's used to copy or reflink data from seeds into a target file during an extract operation.
type SeedSegmentCandidate ¶ added in v0.9.3
type SeedSegmentCandidate struct {
// contains filtered or unexported fields
}
SeedSegmentCandidate represent a single segment that we expect to use in a Plan
type SeedSequencer ¶ added in v0.4.0
type SeedSequencer struct {
// contains filtered or unexported fields
}
SeedSequencer is used to find sequences of chunks from seed files when assembling a file from an index. Using seeds reduces the need to download and decompress chunks from chunk stores. It also enables the use of reflinking/cloning of sections of files from a seed file where supported to reduce disk usage.
func NewSeedSequencer ¶ added in v0.4.0
func NewSeedSequencer(idx Index, src ...Seed) *SeedSequencer
NewSeedSequencer initializes a new sequencer from a number of seeds.
func (*SeedSequencer) Next ¶ added in v0.4.0
func (r *SeedSequencer) Next() (seed Seed, segment IndexSegment, source SeedSegment, done bool)
Next returns a sequence of index chunks (from the target index) and the longest matching segment from one of the seeds. If source is nil, no match was found in the seeds and the chunk needs to be retrieved from a store. If done is true, the sequencer is complete.
func (*SeedSequencer) Plan ¶ added in v0.9.3
func (r *SeedSequencer) Plan() (plan Plan)
Plan returns a new possible plan, representing an ordered list of segments that can be used to re-assemble the requested file
func (*SeedSequencer) RegenerateInvalidSeeds ¶ added in v0.9.3
RegenerateInvalidSeeds regenerates the index to match the unexpected seed content
func (*SeedSequencer) Rewind ¶ added in v0.9.3
func (r *SeedSequencer) Rewind()
Rewind resets the current target index to the beginning.
type SparseFile ¶ added in v0.9.1
type SparseFile struct {
// contains filtered or unexported fields
}
SparseFile represents a file that is written as it is read (Copy-on-read). It is used as a fast cache. Any chunk read from the store to satisfy a read operation is written to the file.
func NewSparseFile ¶ added in v0.9.1
func NewSparseFile(name string, idx Index, s Store, opt SparseFileOptions) (*SparseFile, error)
func (*SparseFile) Length ¶ added in v0.9.1
func (sf *SparseFile) Length() int64
Length returns the size of the index used for the sparse file.
func (*SparseFile) Open ¶ added in v0.9.1
func (sf *SparseFile) Open() (*SparseFileHandle, error)
Open returns a handle for a sparse file.
func (*SparseFile) WriteState ¶ added in v0.9.1
func (sf *SparseFile) WriteState() error
WriteState saves the state of file, basically which chunks were loaded and which ones weren't.
type SparseFileHandle ¶ added in v0.9.1
type SparseFileHandle struct {
// contains filtered or unexported fields
}
SparseFileHandle is used to access a sparse file. All read operations performed on the handle are either done on the file if the required ranges are available or loaded from the store and written to the file.
func (*SparseFileHandle) Close ¶ added in v0.9.1
func (h *SparseFileHandle) Close() error
type SparseFileOptions ¶ added in v0.9.1
type SparseFileOptions struct { // Optional, save the state of the sparse file on exit or SIGHUP. The state file // contains information which chunks from the index have been read and are // populated in the sparse file. If the state and sparse file exist and match, // the sparse file is used as is (not re-populated). StateSaveFile string // Optional, load all chunks that are marked as read in this state file. It is used // to pre-populate a new sparse file if the sparse file or the save state file aren't // present or don't match the index. SaveStateFile and StateInitFile can be the same. StateInitFile string // Optional, number of goroutines to preload chunks from StateInitFile. StateInitConcurrency int }
type SparseMountFS ¶ added in v0.9.1
type SparseMountFS struct { fs.Inode FName string // File name in the mountpoint // contains filtered or unexported fields }
SparseMountFS is used to FUSE mount an index file (as a blob, not an archive). It uses a (local) sparse file as cache to improve performance. Every chunk that is being read is written into the sparse file
func NewSparseMountFS ¶ added in v0.9.1
func NewSparseMountFS(idx Index, name string, s Store, sparseFile string, opt SparseFileOptions) (*SparseMountFS, error)
NewSparseMountFS initializes a FUSE filesystem mount based on an index, a sparse file and a chunk store.
func (*SparseMountFS) Close ¶ added in v0.9.1
func (r *SparseMountFS) Close() error
Close the sparse file and save its state.
func (*SparseMountFS) OnAdd ¶ added in v0.9.1
func (r *SparseMountFS) OnAdd(ctx context.Context)
OnAdd is used to build the static filesystem structure at the start of the mount.
func (*SparseMountFS) WriteState ¶ added in v0.9.1
func (r *SparseMountFS) WriteState() error
Save the state of the sparse file.
type Store ¶
type Store interface { GetChunk(id ChunkID) (*Chunk, error) HasChunk(id ChunkID) (bool, error) io.Closer fmt.Stringer }
Store is a generic interface implemented by read-only stores, like SSH or HTTP remote stores currently.
type StoreOptions ¶ added in v0.4.0
type StoreOptions struct { // Concurrency used in the store. Depending on store type, it's used for // the number of goroutines, processes, or connection pool size. N int `json:"n,omitempty"` // Cert file name for HTTP SSL connections that require mutual SSL. ClientCert string `json:"client-cert,omitempty"` // Key file name for HTTP SSL connections that require mutual SSL. ClientKey string `json:"client-key,omitempty"` // CA certificates to trust in TLS connections. If not set, the systems CA store is used. CACert string `json:"ca-cert,omitempty"` // Trust any certificate presented by the remote chunk store. TrustInsecure bool `json:"trust-insecure,omitempty"` // Authorization header value for HTTP stores HTTPAuth string `json:"http-auth,omitempty"` // Cookie header value for HTTP stores HTTPCookie string `json:"http-cookie,omitempty"` // Timeout for waiting for objects to be retrieved. Infinite if negative. Default: 1 minute Timeout time.Duration `json:"timeout,omitempty"` // Number of times object retrieval should be attempted on error. Useful when dealing // with unreliable connections. ErrorRetry int `json:"error-retry,omitempty"` // Number of nanoseconds to wait before first retry attempt. // Retry attempt number N for the same request will wait N times this interval. ErrorRetryBaseInterval time.Duration `json:"error-retry-base-interval,omitempty"` // If SkipVerify is true, this store will not verify the data it reads and serves up. This is // helpful when a store is merely a proxy and the data will pass through additional stores // before being used. Verifying the checksum of a chunk requires it be uncompressed, so if // a compressed chunkstore is being proxied, all chunks would have to be decompressed first. // This setting avoids the extra overhead. While this could be used in other cases, it's not // recommended as a damaged chunk might be processed further leading to unpredictable results. SkipVerify bool `json:"skip-verify,omitempty"` // Store and read chunks uncompressed, without chunk file extension Uncompressed bool `json:"uncompressed"` }
StoreOptions provide additional common settings used in chunk stores, such as compression error retry or timeouts. Not all options available are applicable to all types of stores.
func NewStoreOptionsWithDefaults ¶ added in v0.9.6
func NewStoreOptionsWithDefaults() (o StoreOptions)
NewStoreOptionsWithDefaults creates a new StoreOptions struct with the default values set
func (*StoreOptions) UnmarshalJSON ¶ added in v0.9.6
func (o *StoreOptions) UnmarshalJSON(data []byte) error
type StoreRouter ¶ added in v0.2.0
type StoreRouter struct {
Stores []Store
}
StoreRouter is used to route requests to multiple stores. When a chunk is requested from the router, it'll query the first store and if that returns ChunkMissing, it'll move on to the next.
func NewStoreRouter ¶ added in v0.2.0
func NewStoreRouter(stores ...Store) StoreRouter
NewStoreRouter returns an initialized router
func (StoreRouter) Close ¶ added in v0.2.0
func (r StoreRouter) Close() error
Close calls the Close() method on every store in the router. Returns only the first error encountered.
func (StoreRouter) GetChunk ¶ added in v0.2.0
func (r StoreRouter) GetChunk(id ChunkID) (*Chunk, error)
GetChunk queries the available stores in order and moves to the next if it gets a ChunkMissing. Fails if any store returns a different error.
func (StoreRouter) HasChunk ¶ added in v0.2.0
func (r StoreRouter) HasChunk(id ChunkID) (bool, error)
HasChunk returns true if one of the containing stores has the chunk. It goes through the stores in order and returns as soon as the chunk is found.
func (StoreRouter) String ¶ added in v0.2.0
func (r StoreRouter) String() string
type SwapStore ¶ added in v0.9.0
type SwapStore struct {
// contains filtered or unexported fields
}
SwapStore wraps another store and provides the ability to swap out the underlying store with another one while under load. Typically used to reload config for long-running processes, perhaps reloading a store config file on SIGHUP and updating the store on-the-fly without restart.
func NewSwapStore ¶ added in v0.9.0
NewSwapStore creates an instance of a swap store wrapper that allows replacing the wrapped store at runtime.
func (*SwapStore) Close ¶ added in v0.9.0
Close the store. NOP opertation, needed to implement Store interface.
func (*SwapStore) GetChunk ¶ added in v0.9.0
GetChunk reads and returns one (compressed!) chunk from the store
type SwapWriteStore ¶ added in v0.9.0
type SwapWriteStore struct {
SwapStore
}
SwapWriteStore does ther same as SwapStore but implements WriteStore as well.
func NewSwapWriteStore ¶ added in v0.9.0
func NewSwapWriteStore(s Store) *SwapWriteStore
NewSwapWriteStore initializes as new instance of a swap store that supports writing and swapping at runtime.
func (*SwapWriteStore) StoreChunk ¶ added in v0.9.0
func (s *SwapWriteStore) StoreChunk(chunk *Chunk) error
StoreChunk adds a new chunk to the store
type TarReader ¶ added in v0.8.0
type TarReader struct {
// contains filtered or unexported fields
}
TarReader uses a GNU tar archive as source for a tar operation (to produce a catar).
func NewTarReader ¶ added in v0.8.0
func NewTarReader(r io.Reader, opts TarReaderOptions) *TarReader
NewTarFS initializes a new instance of a GNU tar archive that can be used for catar archive tar/untar operations.
type TarReaderOptions ¶ added in v0.9.0
type TarReaderOptions struct {
AddRoot bool
}
type TarWriter ¶ added in v0.8.0
type TarWriter struct {
// contains filtered or unexported fields
}
TarWriter uses a GNU tar archive for tar/untar operations of a catar archive.
func NewTarWriter ¶ added in v0.8.0
NewTarFS initializes a new instance of a GNU tar archive that can be used for catar archive tar/untar operations.
func (TarWriter) CreateDevice ¶ added in v0.8.0
func (fs TarWriter) CreateDevice(n NodeDevice) error
func (TarWriter) CreateDir ¶ added in v0.8.0
func (fs TarWriter) CreateDir(n NodeDirectory) error
func (TarWriter) CreateFile ¶ added in v0.8.0
func (TarWriter) CreateSymlink ¶ added in v0.8.0
func (fs TarWriter) CreateSymlink(n NodeSymlink) error
type WriteDedupQueue ¶ added in v0.9.0
type WriteDedupQueue struct { S WriteStore *DedupQueue // contains filtered or unexported fields }
WriteDedupQueue wraps a writable store and provides deduplication of incoming chunk requests and store operation. This is useful when a burst of requests for the same chunk is received and the chunk store serving those is slow or when the underlying filesystem does not support atomic rename operations (Windows). With the DedupQueue wrapper, concurrent requests for the same chunk will result in just one request to the upstream store. Implements the WriteStore interface.
func NewWriteDedupQueue ¶ added in v0.9.0
func NewWriteDedupQueue(store WriteStore) *WriteDedupQueue
NewWriteDedupQueue initializes a new instance of the wrapper.
func (*WriteDedupQueue) GetChunk ¶ added in v0.9.0
func (q *WriteDedupQueue) GetChunk(id ChunkID) (*Chunk, error)
func (*WriteDedupQueue) HasChunk ¶ added in v0.9.0
func (q *WriteDedupQueue) HasChunk(id ChunkID) (bool, error)
func (*WriteDedupQueue) StoreChunk ¶ added in v0.9.0
func (q *WriteDedupQueue) StoreChunk(chunk *Chunk) error
type WriteStore ¶ added in v0.2.0
WriteStore is implemented by stores supporting both read and write operations such as a local store or an S3 store.
Source Files
¶
- archive.go
- assemble.go
- blocksize.go
- cache.go
- chop.go
- chunk.go
- chunker.go
- chunkstorage.go
- compress.go
- consoleindex.go
- const.go
- copy.go
- coverter.go
- dedupqueue.go
- digest.go
- doc.go
- errors.go
- extractstats.go
- failover.go
- fileseed.go
- filesystem.go
- format.go
- gcs.go
- gcsindex.go
- httphandler.go
- httphandlerbase.go
- httpindexhandler.go
- index.go
- ioctl_linux.go
- local.go
- localfs.go
- localfs_other.go
- localindex.go
- log.go
- make.go
- mount-index.go
- mount-sparse.go
- mtreefs.go
- nullchunk.go
- nullprogressbar.go
- nullseed.go
- progress.go
- progressbar.go
- protocol.go
- protocolserver.go
- reader.go
- readseeker.go
- remotehttp.go
- remotehttpindex.go
- remotessh.go
- s3.go
- s3index.go
- seed.go
- selfseed.go
- sequencer.go
- sftp.go
- sftpindex.go
- sip.go
- sparse-file.go
- store.go
- storerouter.go
- swapstore.go
- tar.go
- tarfs.go
- types.go
- untar.go
- verifyindex.go
- writededupqueue.go
- writer.go