Documentation
¶
Index ¶
- type BackupManager
- type BackupVerifyResult
- type GC
- type InspectionResult
- type Metrics
- type MetricsSnapshot
- type QuarantineManager
- func (qm *QuarantineManager) Inspect(bucketID, key string) (*InspectionResult, error)
- func (qm *QuarantineManager) ListQuarantined() ([]QuarantinedObject, error)
- func (qm *QuarantineManager) Purge(bucketID, key string) error
- func (qm *QuarantineManager) PurgeAll() (int, error)
- func (qm *QuarantineManager) Revalidate(bucketID, key string) (bool, error)
- type QuarantinedObject
- type ReadChecker
- type ReadVerifier
- type ScrubCoverage
- type ScrubResult
- type Scrubber
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type BackupManager ¶
type BackupManager struct {
// contains filtered or unexported fields
}
BackupManager handles consistent backups of the bbolt database.
func NewBackupManager ¶
NewBackupManager creates a backup manager.
func (*BackupManager) BackupToWriter ¶
func (bm *BackupManager) BackupToWriter(w io.Writer) error
BackupToWriter writes a consistent bbolt snapshot to the given writer. Useful for streaming backups over HTTP.
func (*BackupManager) Prune ¶
Prune removes backups older than the given retention period, keeping at least minKeep.
func (*BackupManager) Run ¶
func (bm *BackupManager) Run() (string, error)
Run creates a consistent snapshot of the bbolt database. It uses the shared meta.DB handle's Backup method which runs inside a read transaction — no second bolt handle is opened.
func (*BackupManager) Verify ¶
func (bm *BackupManager) Verify(backupPath string) (*BackupVerifyResult, error)
Verify opens a backup file and checks that the required bbolt buckets exist and returns basic counts for validation.
type BackupVerifyResult ¶
BackupVerifyResult contains the results of verifying a backup.
type GC ¶
GC performs garbage collection of orphaned files.
func (*GC) NotifyDeletion ¶
func (gc *GC) NotifyDeletion()
NotifyDeletion signals the GC that an object has been deleted. The GC loop is woken and performs an immediate pass. Non-blocking: if a prior signal is still pending, the new signal is coalesced into it.
type InspectionResult ¶
type InspectionResult struct {
Object QuarantinedObject `json:"object"`
FileExists bool `json:"file_exists"`
CurrentChecksum string `json:"current_checksum"`
ChecksumMatch bool `json:"checksum_match"`
FileSize int64 `json:"file_size"`
}
InspectionResult contains details from inspecting a quarantined object.
type Metrics ¶
type Metrics struct {
PutObjectTotal atomic.Int64
GetObjectTotal atomic.Int64
DeleteObjectTotal atomic.Int64
HeadObjectTotal atomic.Int64
ListObjectsTotal atomic.Int64
CreateBucketTotal atomic.Int64
DeleteBucketTotal atomic.Int64
AuthFailures atomic.Int64
ChecksumFailures atomic.Int64
FsyncFailures atomic.Int64
ObjectsQuarantined atomic.Int64
BytesUploaded atomic.Int64
BytesDownloaded atomic.Int64
// contains filtered or unexported fields
}
Metrics tracks operational counters for observability.
func (*Metrics) RecordFsyncFailure ¶
func (m *Metrics) RecordFsyncFailure()
RecordFsyncFailure increments the fsync failure counter. Nil-safe so store hooks can call it unconditionally.
func (*Metrics) Snapshot ¶
func (m *Metrics) Snapshot() MetricsSnapshot
Snapshot returns a JSON-serializable snapshot of all metrics.
type MetricsSnapshot ¶
type MetricsSnapshot struct {
UptimeSeconds int64 `json:"uptime_seconds"`
PutObjectTotal int64 `json:"put_object_total"`
GetObjectTotal int64 `json:"get_object_total"`
DeleteObjectTotal int64 `json:"delete_object_total"`
HeadObjectTotal int64 `json:"head_object_total"`
ListObjectsTotal int64 `json:"list_objects_total"`
CreateBucketTotal int64 `json:"create_bucket_total"`
DeleteBucketTotal int64 `json:"delete_bucket_total"`
AuthFailures int64 `json:"auth_failures"`
ChecksumFailures int64 `json:"checksum_failures"`
FsyncFailures int64 `json:"fsync_failures"`
ObjectsQuarantined int64 `json:"objects_quarantined"`
BytesUploaded int64 `json:"bytes_uploaded"`
BytesDownloaded int64 `json:"bytes_downloaded"`
}
MetricsSnapshot is a JSON-serializable point-in-time copy of metrics.
func (MetricsSnapshot) MarshalJSON ¶
func (s MetricsSnapshot) MarshalJSON() ([]byte, error)
MarshalJSON implements json.Marshaler.
type QuarantineManager ¶
type QuarantineManager struct {
// contains filtered or unexported fields
}
QuarantineManager provides operations on quarantined objects.
func NewQuarantineManager ¶
NewQuarantineManager creates a new QuarantineManager.
func (*QuarantineManager) Inspect ¶
func (qm *QuarantineManager) Inspect(bucketID, key string) (*InspectionResult, error)
Inspect checks file existence and computes the current checksum of a quarantined object.
func (*QuarantineManager) ListQuarantined ¶
func (qm *QuarantineManager) ListQuarantined() ([]QuarantinedObject, error)
ListQuarantined iterates all buckets and returns objects with state="quarantined".
func (*QuarantineManager) Purge ¶
func (qm *QuarantineManager) Purge(bucketID, key string) error
Purge deletes both the metadata and physical file of a quarantined object.
func (*QuarantineManager) PurgeAll ¶
func (qm *QuarantineManager) PurgeAll() (int, error)
PurgeAll purges all quarantined objects and returns the count purged.
func (*QuarantineManager) Revalidate ¶
func (qm *QuarantineManager) Revalidate(bucketID, key string) (bool, error)
Revalidate checks if a quarantined object's checksum now matches and restores it if so.
type QuarantinedObject ¶
type QuarantinedObject struct {
BucketID string `json:"bucket_id"`
BucketName string `json:"bucket_name"`
Key string `json:"key"`
ObjectID string `json:"object_id"`
SizeBytes int64 `json:"size_bytes"`
ExpectedChecksum string `json:"expected_checksum"`
LocationRef string `json:"location_ref"`
}
QuarantinedObject describes an object that has been quarantined.
type ReadChecker ¶
type ReadChecker struct {
// contains filtered or unexported fields
}
ReadChecker decides whether to verify reads probabilistically.
func NewReadChecker ¶
func NewReadChecker(rate float64) *ReadChecker
NewReadChecker creates a checker with the given verification rate. rate=0.05 means ~5% of reads are verified.
func (*ReadChecker) RecordCheck ¶
func (rc *ReadChecker) RecordCheck(valid bool)
RecordCheck records a verification result.
func (*ReadChecker) ShouldVerify ¶
func (rc *ReadChecker) ShouldVerify() bool
ShouldVerify returns true if this read should be verified.
func (*ReadChecker) Stats ¶
func (rc *ReadChecker) Stats() (checked, failed int64)
Stats returns total checks and failures.
type ReadVerifier ¶
type ReadVerifier struct {
// contains filtered or unexported fields
}
ReadVerifier wraps an io.Reader and computes SHA-256 on the fly. After the read completes, call Verify() to check against expected checksum.
func NewReadVerifier ¶
func NewReadVerifier(r io.Reader, expectedSHA256 string) *ReadVerifier
NewReadVerifier wraps r and computes checksum during reading.
func (*ReadVerifier) ActualChecksum ¶
func (rv *ReadVerifier) ActualChecksum() string
ActualChecksum returns the computed checksum. Only valid after EOF.
func (*ReadVerifier) Valid ¶
func (rv *ReadVerifier) Valid() bool
Valid returns true if the checksum matches. Only valid after EOF.
type ScrubCoverage ¶
type ScrubCoverage struct {
TotalChecked int64 `json:"total_checked"`
LastFullScan string `json:"last_full_scan,omitempty"`
InProgress bool `json:"in_progress"`
}
ScrubCoverage reports incremental scrub progress.
type ScrubResult ¶
type ScrubResult struct {
Checked int
Healthy int
Quarantined int
Missing int
Errors int
// Migrated counts healthy records whose on-disk envelope was rewritten
// from the legacy JSON format to the current binary codec during this
// pass. Non-fatal: migration errors are logged and counted as Errors
// rather than failing the scrub.
Migrated int
}
ScrubResult contains the results of a scrub run.
type Scrubber ¶
type Scrubber struct {
// contains filtered or unexported fields
}
Scrubber performs periodic integrity checks on stored objects.
func NewScrubber ¶
func NewScrubber(db *meta.DB, st *store.Store, log *slog.Logger, interval time.Duration, sampleRate float64, scrubBytesPerSec int64, maxPerRun int) *Scrubber
NewScrubber creates a new scrubber. sampleRate controls what fraction of objects are checked per run (1.0 = full scan). scrubBytesPerSec bounds checksum-read bandwidth; <=0 disables the limiter. maxPerRun bounds how many objects per bucket are inspected on each tick; <=0 falls back to 100.
func (*Scrubber) Coverage ¶
func (s *Scrubber) Coverage() ScrubCoverage
Coverage returns incremental scrub progress information.
func (*Scrubber) RunIncremental ¶
func (s *Scrubber) RunIncremental(maxPerRun int) ScrubResult
RunIncremental checks up to maxPerRun objects per bucket, starting from where the last run left off. When a bucket is fully scanned the cursor wraps around. Once all buckets wrap, lastFullScan is updated. Buckets are processed in parallel using a bounded worker pool.
func (*Scrubber) RunOnce ¶
func (s *Scrubber) RunOnce() ScrubResult
RunOnce performs a single scrub pass.