Documentation
¶
Index ¶
- Constants
- Variables
- func AddRecoverySlot(s ObjectStore, masterKey []byte) (mnemonic string, err error)
- func ExtractMasterKey(slots []KeySlot, platformKey []byte, password string) ([]byte, error)
- func ExtractMasterKeyWithKMS(ctx context.Context, slots []KeySlot, decrypter crypto.KMSDecrypter, ...) ([]byte, error)
- func GetConcurrencyHint(s ObjectStore, defaultConcurrency int) int
- func HasKeySlots(s ObjectStore) bool
- func InitEncryptionKey(s ObjectStore, platformKey []byte, password string) ([]byte, error)
- func OpenWithKMS(ctx context.Context, slots []KeySlot, decrypter crypto.KMSDecrypter) ([]byte, error)
- func OpenWithPassword(slots []KeySlot, password string) ([]byte, error)
- func OpenWithPlatformKey(slots []KeySlot, platformKey []byte) ([]byte, error)
- func OpenWithRecoveryKey(slots []KeySlot, recoveryKey []byte) ([]byte, error)
- func SlotTypes(slots []KeySlot) string
- type B2Store
- func (s *B2Store) Delete(ctx context.Context, key string) error
- func (s *B2Store) DeletePrefix(ctx context.Context, prefix string) error
- func (s *B2Store) Exists(ctx context.Context, key string) (bool, error)
- func (s *B2Store) Flush(ctx context.Context) error
- func (s *B2Store) Get(ctx context.Context, key string) ([]byte, error)
- func (s *B2Store) List(ctx context.Context, prefix string) ([]string, error)
- func (s *B2Store) NewWriter(ctx context.Context, key string) io.WriteCloser
- func (s *B2Store) Put(ctx context.Context, key string, data []byte) error
- func (s *B2Store) SignedURL(ctx context.Context, key string, validFor time.Duration) (string, error)
- func (s *B2Store) Size(ctx context.Context, key string) (int64, error)
- func (s *B2Store) TotalSize(ctx context.Context) (int64, error)
- type ChangeType
- type CompressedStore
- func (s *CompressedStore) Delete(ctx context.Context, key string) error
- func (s *CompressedStore) Exists(ctx context.Context, key string) (bool, error)
- func (s *CompressedStore) Flush(ctx context.Context) error
- func (s *CompressedStore) Get(ctx context.Context, key string) ([]byte, error)
- func (s *CompressedStore) List(ctx context.Context, prefix string) ([]string, error)
- func (s *CompressedStore) Put(ctx context.Context, key string, data []byte) error
- func (s *CompressedStore) Size(ctx context.Context, key string) (int64, error)
- func (s *CompressedStore) TotalSize(ctx context.Context) (int64, error)
- func (s *CompressedStore) Unwrap() ObjectStore
- type ConcurrencyHinter
- type DebugStore
- func (s *DebugStore) Delete(ctx context.Context, key string) error
- func (s *DebugStore) Exists(ctx context.Context, key string) (bool, error)
- func (s *DebugStore) Flush(ctx context.Context) error
- func (s *DebugStore) Get(ctx context.Context, key string) ([]byte, error)
- func (s *DebugStore) List(ctx context.Context, prefix string) ([]string, error)
- func (s *DebugStore) Put(ctx context.Context, key string, data []byte) error
- func (s *DebugStore) Size(ctx context.Context, key string) (int64, error)
- func (s *DebugStore) TotalSize(ctx context.Context) (int64, error)
- func (s *DebugStore) Unwrap() ObjectStore
- type EncryptedStore
- type FileChange
- type GDriveChangeSource
- type GDriveSource
- type IncrementalSource
- type KDFParams
- type KeyCacheStore
- func (s *KeyCacheStore) Delete(ctx context.Context, key string) error
- func (s *KeyCacheStore) Exists(ctx context.Context, key string) (bool, error)
- func (s *KeyCacheStore) Flush(ctx context.Context) error
- func (s *KeyCacheStore) Get(ctx context.Context, key string) ([]byte, error)
- func (s *KeyCacheStore) List(ctx context.Context, prefix string) ([]string, error)
- func (s *KeyCacheStore) PreloadKeys(ctx context.Context, prefixes ...string) error
- func (s *KeyCacheStore) Put(ctx context.Context, key string, data []byte) error
- func (s *KeyCacheStore) Size(ctx context.Context, key string) (int64, error)
- func (s *KeyCacheStore) TotalSize(ctx context.Context) (int64, error)
- func (s *KeyCacheStore) Unwrap() ObjectStore
- type KeySlot
- type LocalSource
- type LocalStore
- func (s *LocalStore) Delete(_ context.Context, key string) error
- func (s *LocalStore) Exists(_ context.Context, key string) (bool, error)
- func (s *LocalStore) Flush(ctx context.Context) error
- func (s *LocalStore) Get(_ context.Context, key string) ([]byte, error)
- func (s *LocalStore) List(_ context.Context, prefix string) ([]string, error)
- func (s *LocalStore) Put(_ context.Context, key string, data []byte) error
- func (s *LocalStore) Size(_ context.Context, key string) (int64, error)
- func (s *LocalStore) TotalSize(_ context.Context) (int64, error)
- type MeteredStore
- func (m *MeteredStore) BytesWritten() int64
- func (m *MeteredStore) Delete(ctx context.Context, key string) error
- func (m *MeteredStore) DeleteReturnSize(ctx context.Context, key string) (int64, error)
- func (m *MeteredStore) Put(ctx context.Context, key string, data []byte) error
- func (m *MeteredStore) Reset()
- func (m *MeteredStore) Unwrap() ObjectStore
- type ObjectStore
- type OneDriveChangeSource
- type OneDriveSource
- type PackEntry
- type PackStore
- func (s *PackStore) Delete(ctx context.Context, key string) error
- func (s *PackStore) Exists(ctx context.Context, key string) (bool, error)
- func (s *PackStore) Flush(ctx context.Context) error
- func (s *PackStore) Get(ctx context.Context, key string) ([]byte, error)
- func (s *PackStore) List(ctx context.Context, prefix string) ([]string, error)
- func (s *PackStore) Put(ctx context.Context, key string, data []byte) error
- func (s *PackStore) Repack(ctx context.Context, maxWastedRatio float64) (int64, int, error)
- func (s *PackStore) Size(ctx context.Context, key string) (int64, error)
- func (s *PackStore) TotalSize(ctx context.Context) (int64, error)
- type QuotaStore
- type S3Store
- func (s *S3Store) ConcurrencyHint() int
- func (s *S3Store) Delete(ctx context.Context, key string) error
- func (s *S3Store) Exists(ctx context.Context, key string) (bool, error)
- func (s *S3Store) Flush(ctx context.Context) error
- func (s *S3Store) Get(ctx context.Context, key string) ([]byte, error)
- func (s *S3Store) List(ctx context.Context, prefix string) ([]string, error)
- func (s *S3Store) Put(ctx context.Context, key string, data []byte) error
- func (s *S3Store) Size(ctx context.Context, key string) (int64, error)
- func (s *S3Store) TotalSize(ctx context.Context) (int64, error)
- type SFTPConfig
- type SFTPSource
- func (s *SFTPSource) Close() error
- func (s *SFTPSource) GetFileStream(fileID string) (io.ReadCloser, error)
- func (s *SFTPSource) Info() core.SourceInfo
- func (s *SFTPSource) Size(ctx context.Context) (*SourceSize, error)
- func (s *SFTPSource) Walk(ctx context.Context, callback func(core.FileMeta) error) error
- type SFTPStore
- func (s *SFTPStore) Close() error
- func (s *SFTPStore) Delete(_ context.Context, key string) error
- func (s *SFTPStore) Exists(_ context.Context, key string) (bool, error)
- func (s *SFTPStore) Flush(ctx context.Context) error
- func (s *SFTPStore) Get(_ context.Context, key string) ([]byte, error)
- func (s *SFTPStore) List(_ context.Context, prefix string) ([]string, error)
- func (s *SFTPStore) Put(_ context.Context, key string, data []byte) error
- func (s *SFTPStore) Size(_ context.Context, key string) (int64, error)
- func (s *SFTPStore) TotalSize(_ context.Context) (int64, error)
- type Source
- type SourceSize
- type Unwrapper
Constants ¶
const KeySlotPrefix = "keys/"
KeySlotPrefix is the object key prefix for encryption key slot objects. These objects are stored unencrypted (they contain already-wrapped keys) so they can be read without the encryption key — avoiding a chicken-and-egg problem during key loading.
Variables ¶
var ErrQuotaExceeded = errors.New("storage quota exceeded during backup")
Functions ¶
func AddRecoverySlot ¶
func AddRecoverySlot(s ObjectStore, masterKey []byte) (mnemonic string, err error)
AddRecoverySlot generates a recovery key, wraps the given master key with it, stores the recovery slot, and returns the BIP39 24-word mnemonic.
func ExtractMasterKey ¶
ExtractMasterKey unwraps and returns the raw master key from whichever credential matches. Unlike the OpenWith* functions that return a derived encryption key, this returns the master key itself — needed when adding new key slots to an existing repo.
func ExtractMasterKeyWithKMS ¶ added in v1.4.6
func ExtractMasterKeyWithKMS(ctx context.Context, slots []KeySlot, decrypter crypto.KMSDecrypter, platformKey []byte, password string) ([]byte, error)
ExtractMasterKeyWithKMS is like ExtractMasterKey but also supports kms-platform slots via a crypto.KMSDecrypter.
func GetConcurrencyHint ¶ added in v1.4.3
func GetConcurrencyHint(s ObjectStore, defaultConcurrency int) int
GetConcurrencyHint walks the store wrapper chain and returns the first ConcurrencyHint it finds, defaulting to defaultConcurrency if none exists.
func HasKeySlots ¶
func HasKeySlots(s ObjectStore) bool
HasKeySlots reports whether the store contains any encryption key slots.
func InitEncryptionKey ¶
func InitEncryptionKey(s ObjectStore, platformKey []byte, password string) ([]byte, error)
InitEncryptionKey initializes encryption for a new repository. It generates a master key and creates key slots for whatever credentials are provided. At least one of platformKey or password must be non-empty. Returns the derived encryption key.
func OpenWithKMS ¶ added in v1.4.6
func OpenWithKMS(ctx context.Context, slots []KeySlot, decrypter crypto.KMSDecrypter) ([]byte, error)
OpenWithKMS finds a kms-platform slot, unwraps the master key using the given KMS decrypter, and returns the derived encryption key.
func OpenWithPassword ¶
OpenWithPassword finds a password slot, derives the wrapping key using Argon2id, unwraps the master key, and returns the derived encryption key.
func OpenWithPlatformKey ¶
OpenWithPlatformKey finds a platform slot, unwraps the master key using the given platform key, and returns the derived encryption key.
func OpenWithRecoveryKey ¶
OpenWithRecoveryKey finds a recovery slot, unwraps the master key using the given raw recovery key, and returns the derived encryption key.
Types ¶
type B2Store ¶
func NewB2Store ¶
func NewB2StoreWithPrefix ¶
func (*B2Store) DeletePrefix ¶
DeletePrefix deletes all objects under the given prefix.
func (*B2Store) NewWriter ¶
NewWriter returns a streaming writer to the given key in B2. The caller must Close the writer to finalize the upload.
type ChangeType ¶
type ChangeType string
ChangeType describes the kind of change reported by an IncrementalSource.
const ( ChangeUpsert ChangeType = "upsert" ChangeDelete ChangeType = "delete" )
type CompressedStore ¶ added in v1.1.0
type CompressedStore struct {
// contains filtered or unexported fields
}
CompressedStore wraps an ObjectStore and transparently zstd-compresses on write and decompresses (zstd or gzip) on read. Uncompressed data is returned as-is.
func NewCompressedStore ¶ added in v1.1.0
func NewCompressedStore(inner ObjectStore) *CompressedStore
func (*CompressedStore) Delete ¶ added in v1.1.0
func (s *CompressedStore) Delete(ctx context.Context, key string) error
func (*CompressedStore) Flush ¶ added in v1.4.3
func (s *CompressedStore) Flush(ctx context.Context) error
func (*CompressedStore) TotalSize ¶ added in v1.1.0
func (s *CompressedStore) TotalSize(ctx context.Context) (int64, error)
func (*CompressedStore) Unwrap ¶ added in v1.4.3
func (s *CompressedStore) Unwrap() ObjectStore
type ConcurrencyHinter ¶ added in v1.4.3
type ConcurrencyHinter interface {
ConcurrencyHint() int
}
ConcurrencyHinter is an optional interface that ObjectStore implementations can implement to indicate the optimal number of concurrent operations. Remote stores (S3) benefit from high concurrency; local stores do not.
type DebugStore ¶ added in v1.2.0
type DebugStore struct {
// contains filtered or unexported fields
}
DebugStore wraps an ObjectStore and logs every operation with timing information. Output goes to the provided writer, which should be a ui.SafeLogWriter so lines coexist with progress bars.
func NewDebugStore ¶ added in v1.2.0
func NewDebugStore(inner ObjectStore, w io.Writer) *DebugStore
func (*DebugStore) Delete ¶ added in v1.2.0
func (s *DebugStore) Delete(ctx context.Context, key string) error
func (*DebugStore) TotalSize ¶ added in v1.2.0
func (s *DebugStore) TotalSize(ctx context.Context) (int64, error)
func (*DebugStore) Unwrap ¶ added in v1.4.3
func (s *DebugStore) Unwrap() ObjectStore
type EncryptedStore ¶
type EncryptedStore struct {
ObjectStore
// contains filtered or unexported fields
}
EncryptedStore wraps an ObjectStore and transparently encrypts data on Put and decrypts on Get using AES-256-GCM. Unencrypted (legacy) data is returned as-is on Get, enabling gradual migration.
Objects under the "keys/" prefix are passed through unencrypted because they hold the wrapped master key needed to derive the encryption key.
func NewEncryptedStore ¶
func NewEncryptedStore(inner ObjectStore, key []byte) *EncryptedStore
NewEncryptedStore creates an EncryptedStore that encrypts all Put operations and decrypts Get operations. The key must be 32 bytes (AES-256).
func (*EncryptedStore) Unwrap ¶ added in v1.4.3
func (s *EncryptedStore) Unwrap() ObjectStore
type FileChange ¶
type FileChange struct {
Type ChangeType
Meta core.FileMeta
}
FileChange pairs a change type with file metadata. For deletions only Meta.FileID is required.
type GDriveChangeSource ¶
type GDriveChangeSource struct {
GDriveSource
}
GDriveChangeSource is an IncrementalSource backed by the Google Drive Changes API. It embeds GDriveSource to reuse authentication, full Walk, GetFileStream, and metadata conversion.
func NewGDriveChangeSource ¶
func NewGDriveChangeSource(credsPath, tokenPath string) (*GDriveChangeSource, error)
func (*GDriveChangeSource) GetStartPageToken ¶
func (s *GDriveChangeSource) GetStartPageToken() (string, error)
GetStartPageToken returns the token representing the current head of the Google Drive change stream.
func (*GDriveChangeSource) Info ¶
func (s *GDriveChangeSource) Info() core.SourceInfo
func (*GDriveChangeSource) WalkChanges ¶
func (s *GDriveChangeSource) WalkChanges(ctx context.Context, token string, callback func(FileChange) error) (string, error)
WalkChanges iterates over all changes since the given page token. Folder changes are emitted before file changes so that the engine can resolve parent references incrementally.
type GDriveSource ¶
type GDriveSource struct {
Service *drive.Service
DriveID string // shared drive ID; empty means "My Drive"
RootFolderID string // if empty, defaults to "root" (entire drive)
Account string // Google account email; populated automatically if empty
}
GDriveSource implements Source for Google Drive. By default it backs up the entire "My Drive" root. Set DriveID to back up a shared drive instead, and/or set RootFolderID to restrict to a specific folder within the selected drive.
func NewGDriveSource ¶
func NewGDriveSource(credsPath, tokenPath string) (*GDriveSource, error)
NewGDriveSource creates a new GDriveSource. If credsPath is non-empty it is used as a Google credentials JSON file (user OAuth or service-account). When credsPath is empty the built-in OAuth client credentials are used instead. tokenPath is where the OAuth token will be cached.
func (*GDriveSource) GetFileStream ¶
func (s *GDriveSource) GetFileStream(fileID string) (io.ReadCloser, error)
func (*GDriveSource) Info ¶
func (s *GDriveSource) Info() core.SourceInfo
func (*GDriveSource) Size ¶
func (s *GDriveSource) Size(ctx context.Context) (*SourceSize, error)
Size returns the total size of the drive. For My Drive it uses the fast about.get endpoint. For shared drives it lists all files and sums sizes.
type IncrementalSource ¶
type IncrementalSource interface {
Source
// GetStartPageToken returns the token representing the current head of
// the change stream. Call this before a full Walk to capture the baseline.
GetStartPageToken() (string, error)
// WalkChanges emits only the entries that changed since token.
// It returns the new token to persist for the next run.
WalkChanges(ctx context.Context, token string, callback func(FileChange) error) (newToken string, err error)
}
IncrementalSource extends Source with delta-based walking using a change token stored in the snapshot. On the first run (empty token) the engine falls back to the full Walk; on subsequent runs only changed entries are emitted.
type KDFParams ¶
type KDFParams struct {
Algorithm string `json:"algorithm"`
Salt string `json:"salt"` // base64-encoded
Time uint32 `json:"time"`
Memory uint32 `json:"memory"`
Threads uint8 `json:"threads"`
}
KDFParams holds the parameters for password-based key derivation.
type KeyCacheStore ¶ added in v1.1.0
type KeyCacheStore struct {
// contains filtered or unexported fields
}
KeyCacheStore wraps an ObjectStore and caches key existence from List calls, so that Exists returns immediately for known keys. Thread-safe.
func NewKeyCacheStore ¶ added in v1.1.0
func NewKeyCacheStore(inner ObjectStore) *KeyCacheStore
func (*KeyCacheStore) Delete ¶ added in v1.1.0
func (s *KeyCacheStore) Delete(ctx context.Context, key string) error
func (*KeyCacheStore) Flush ¶ added in v1.4.3
func (s *KeyCacheStore) Flush(ctx context.Context) error
func (*KeyCacheStore) PreloadKeys ¶ added in v1.1.0
func (s *KeyCacheStore) PreloadKeys(ctx context.Context, prefixes ...string) error
func (*KeyCacheStore) TotalSize ¶ added in v1.1.0
func (s *KeyCacheStore) TotalSize(ctx context.Context) (int64, error)
func (*KeyCacheStore) Unwrap ¶ added in v1.4.3
func (s *KeyCacheStore) Unwrap() ObjectStore
type KeySlot ¶
type KeySlot struct {
SlotType string `json:"slot_type"`
WrappedKey string `json:"wrapped_key"`
Label string `json:"label"`
KDFParams *KDFParams `json:"kdf_params,omitempty"`
}
KeySlot is the JSON representation of an encryption key slot stored in B2.
func LoadKeySlots ¶
func LoadKeySlots(s ObjectStore) ([]KeySlot, error)
LoadKeySlots reads all key slot objects from the store.
type LocalSource ¶
type LocalSource struct {
RootPath string
}
LocalSource implements Source for local filesystem
func NewLocalSource ¶
func NewLocalSource(rootPath string) *LocalSource
func (*LocalSource) GetFileStream ¶
func (s *LocalSource) GetFileStream(fileID string) (io.ReadCloser, error)
func (*LocalSource) Info ¶
func (s *LocalSource) Info() core.SourceInfo
func (*LocalSource) Size ¶
func (s *LocalSource) Size(ctx context.Context) (*SourceSize, error)
type LocalStore ¶
type LocalStore struct {
BasePath string
// contains filtered or unexported fields
}
LocalStore implements ObjectStore for the local filesystem.
func NewLocalStore ¶
func NewLocalStore(basePath string) (*LocalStore, error)
type MeteredStore ¶
type MeteredStore struct {
ObjectStore
// contains filtered or unexported fields
}
MeteredStore wraps an ObjectStore and tracks bytes written/deleted.
func NewMeteredStore ¶
func NewMeteredStore(s ObjectStore) *MeteredStore
func (*MeteredStore) BytesWritten ¶
func (m *MeteredStore) BytesWritten() int64
func (*MeteredStore) DeleteReturnSize ¶
func (*MeteredStore) Reset ¶
func (m *MeteredStore) Reset()
func (*MeteredStore) Unwrap ¶ added in v1.4.3
func (m *MeteredStore) Unwrap() ObjectStore
type ObjectStore ¶
type ObjectStore interface {
Put(ctx context.Context, key string, data []byte) error
Get(ctx context.Context, key string) ([]byte, error)
Exists(ctx context.Context, key string) (bool, error)
Delete(ctx context.Context, key string) error
List(ctx context.Context, prefix string) ([]string, error)
Size(ctx context.Context, key string) (int64, error)
TotalSize(ctx context.Context) (int64, error)
Flush(ctx context.Context) error
}
ObjectStore is the interface for content-addressable object storage. Keys are slash-separated paths like "chunk/<hash>" or "snapshot/<hash>".
type OneDriveChangeSource ¶
type OneDriveChangeSource struct {
OneDriveSource
}
OneDriveChangeSource is an IncrementalSource backed by the Microsoft Graph delta API. It embeds OneDriveSource to reuse authentication, full Walk, GetFileStream, and metadata conversion.
func NewOneDriveChangeSource ¶
func NewOneDriveChangeSource(clientID, tokenPath string) (*OneDriveChangeSource, error)
func (*OneDriveChangeSource) GetStartPageToken ¶
func (s *OneDriveChangeSource) GetStartPageToken() (string, error)
GetStartPageToken returns the current head of the OneDrive delta stream by requesting a "latest" delta token. The returned string is a full deltaLink URL.
func (*OneDriveChangeSource) Info ¶
func (s *OneDriveChangeSource) Info() core.SourceInfo
func (*OneDriveChangeSource) WalkChanges ¶
func (s *OneDriveChangeSource) WalkChanges(ctx context.Context, token string, callback func(FileChange) error) (string, error)
WalkChanges iterates over all changes since the given delta token. Folder changes are emitted before file changes so that the engine can resolve parent references incrementally. Returns the new delta token for the next run.
type OneDriveSource ¶
func NewOneDriveSource ¶
func NewOneDriveSource(clientID, tokenPath string) (*OneDriveSource, error)
func (*OneDriveSource) GetFileStream ¶
func (s *OneDriveSource) GetFileStream(fileID string) (io.ReadCloser, error)
func (*OneDriveSource) Info ¶
func (s *OneDriveSource) Info() core.SourceInfo
func (*OneDriveSource) Size ¶
func (s *OneDriveSource) Size(ctx context.Context) (*SourceSize, error)
Size returns the total storage usage for the OneDrive account by calling the /me/drive endpoint which includes quota information.
type PackEntry ¶ added in v1.4.3
PackEntry represents the location of a small object within a packfile.
type PackStore ¶ added in v1.4.3
type PackStore struct {
ObjectStore
// contains filtered or unexported fields
}
PackStore wraps an ObjectStore to aggregate small objects into larger "packfiles". It uses a stateless JSON catalog ("index/packs") to keep track of which pack contains which object.
func NewPackStore ¶ added in v1.4.3
func NewPackStore(inner ObjectStore) (*PackStore, error)
NewPackStore initializes a new MicroPackStore over an existing ObjectStore.
func (*PackStore) Delete ¶ added in v1.4.3
Delete removes an object. For packed objects, it just removes it from the catalog. The actual packfile is not currently garbage collected.
func (*PackStore) Exists ¶ added in v1.4.3
Exists checks the un-flushed buffer, the catalog, or falls back to inner.
func (*PackStore) Flush ¶ added in v1.4.3
Flush ensures any pending small objects are written to a packfile, and uploads the latest JSON catalog.
func (*PackStore) Get ¶ added in v1.4.3
Get retrieves an object from the active buffer, a cached pack, or downloads the pack.
func (*PackStore) List ¶ added in v1.4.3
List returns all keys matching the prefix, merging results from the inner store with the keys currently buffered or indexed in packfiles.
func (*PackStore) Put ¶ added in v1.4.3
Put stores data either in the active packbuffer or directly to the inner store.
func (*PackStore) Repack ¶ added in v1.4.4
Repack analyzes the packfiles and repacks those that have too much wasted space. Wasted space occurs when objects within a packfile are logically deleted (removed from catalog). maxWastedRatio is the threshold (0.0 to 1.0) above which a pack is repacked. For example, 0.3 means a pack is repacked if it is more than 30% empty. Returns the number of bytes reclaimed, number of packs deleted, and error.
type QuotaStore ¶
type QuotaStore struct {
ObjectStore
// contains filtered or unexported fields
}
QuotaStore wraps an ObjectStore and cancels the backup context when cumulative bytes written exceed the remaining budget.
func NewQuotaStore ¶
func NewQuotaStore(inner ObjectStore, budget int64, cancel context.CancelCauseFunc) *QuotaStore
func (*QuotaStore) Unwrap ¶ added in v1.4.3
func (q *QuotaStore) Unwrap() ObjectStore
func (*QuotaStore) Written ¶
func (q *QuotaStore) Written() int64
Written returns the total bytes successfully written through this store.
type S3Store ¶ added in v1.3.0
S3Store implements ObjectStore for Amazon S3 and compatible services.
func NewS3Store ¶ added in v1.3.0
func (*S3Store) ConcurrencyHint ¶ added in v1.4.3
ConcurrencyHint implements ConcurrencyHinter. S3 benefits from highly parallel uploads since each PUT is a separate HTTP round-trip.
type SFTPConfig ¶ added in v1.4.0
type SFTPConfig struct {
Host string
Port string // default "22"
User string
Password string // password auth (optional if key is set)
PrivateKeyPath string // path to PEM-encoded private key (optional if password is set)
}
SFTPConfig holds the parameters needed to connect to an SFTP server.
type SFTPSource ¶ added in v1.4.0
type SFTPSource struct {
// contains filtered or unexported fields
}
SFTPSource implements Source for a remote SFTP filesystem.
func NewSFTPSource ¶ added in v1.4.0
func NewSFTPSource(cfg SFTPConfig, rootPath string) (*SFTPSource, error)
NewSFTPSource connects to the SFTP server described by cfg and returns a source rooted at rootPath.
func (*SFTPSource) Close ¶ added in v1.4.0
func (s *SFTPSource) Close() error
Close releases the underlying SFTP and SSH connections.
func (*SFTPSource) GetFileStream ¶ added in v1.4.0
func (s *SFTPSource) GetFileStream(fileID string) (io.ReadCloser, error)
func (*SFTPSource) Info ¶ added in v1.4.0
func (s *SFTPSource) Info() core.SourceInfo
func (*SFTPSource) Size ¶ added in v1.4.0
func (s *SFTPSource) Size(ctx context.Context) (*SourceSize, error)
type SFTPStore ¶ added in v1.4.0
type SFTPStore struct {
// contains filtered or unexported fields
}
SFTPStore implements ObjectStore backed by an SFTP server.
func NewSFTPStore ¶ added in v1.4.0
func NewSFTPStore(cfg SFTPConfig, basePath string) (*SFTPStore, error)
NewSFTPStore connects to the SFTP server described by cfg and returns a store rooted at basePath. The directory is created if it does not exist.
type Source ¶
type Source interface {
Walk(ctx context.Context, callback func(core.FileMeta) error) error
GetFileStream(fileID string) (io.ReadCloser, error)
Info() core.SourceInfo
Size(ctx context.Context) (*SourceSize, error)
}
Source is the interface for a backup data source (local filesystem, Google Drive, OneDrive, etc.). Implementations MUST ensure that parent folders are visited before their children during Walk.
type SourceSize ¶
SourceSize holds the total size of a source.
type Unwrapper ¶ added in v1.4.3
type Unwrapper interface {
Unwrap() ObjectStore
}
Unwrapper is an optional interface for wrapper stores (CompressedStore, EncryptedStore, etc.) to expose their inner store for introspection.