Documentation
¶
Index ¶
- Constants
- Variables
- func Execute() error
- func GetPIDFilePath() string
- func GetTaskFilePath() string
- func IsProcessRunning(pid int) bool
- func ReadPIDFile() (int, error)
- func RemovePIDFile() error
- func RemoveTaskFile() error
- func WritePIDFile() error
- func WriteTaskInfo(info *TaskInfo) error
- type Archiver
- type CacheEntry
- type CacheResponse
- type Config
- type DatabaseConfig
- type PartitionCache
- type PartitionCacheEntry
- type PartitionInfo
- type Phase
- type ProcessResult
- type RowCountCache
- type RowCountEntry
- type S3Config
- type StatusResponse
- type TableCache
- type TaskInfo
- type WSMessage
Constants ¶
View Source
const (
StageSkipped = "Skipped"
)
Stage constants
Variables ¶
View Source
var ( ErrInsufficientPermissions = errors.New("insufficient permissions to read table") ErrPartitionNoPermissions = errors.New("partition tables exist but you don't have SELECT permissions") ErrS3ClientNotInitialized = errors.New("S3 client not initialized") ErrS3UploaderNotInitialized = errors.New("S3 uploader not initialized") )
Error definitions
View Source
var ( ErrDatabaseUserRequired = errors.New("database user is required") ErrDatabaseNameRequired = errors.New("database name is required") ErrDatabasePortInvalid = errors.New("database port must be between 1 and 65535") ErrS3EndpointRequired = errors.New("S3 endpoint is required") ErrS3BucketRequired = errors.New("S3 bucket is required") ErrS3AccessKeyRequired = errors.New("S3 access key is required") ErrS3SecretKeyRequired = errors.New("S3 secret key is required") ErrS3RegionInvalid = errors.New("S3 region contains invalid characters or is too long") ErrTableNameRequired = errors.New("table name is required") ErrTableNameInvalid = errors.New("table name is invalid: must be 1-63 characters, start with a letter or underscore, and contain only letters, numbers, and underscores") ErrStartDateFormatInvalid = errors.New("invalid start date format") ErrEndDateFormatInvalid = errors.New("invalid end date format") ErrWorkersMinimum = errors.New("workers must be at least 1") ErrWorkersMaximum = errors.New("workers must not exceed 1000") )
Static errors for configuration validation
Functions ¶
func GetTaskFilePath ¶
func GetTaskFilePath() string
GetTaskFilePath returns the path to the task info file
func IsProcessRunning ¶
IsProcessRunning checks if a process with given PID is running Works on both Unix and Windows systems
func WriteTaskInfo ¶
WriteTaskInfo writes current task information to file
Types ¶
type Archiver ¶
type Archiver struct {
// contains filtered or unexported fields
}
func (*Archiver) ProcessPartitionWithProgress ¶
func (a *Archiver) ProcessPartitionWithProgress(partition PartitionInfo, program *tea.Program) ProcessResult
type CacheEntry ¶
type CacheEntry struct {
Table string `json:"table"`
Partition string `json:"partition"`
RowCount int64 `json:"rowCount"`
CountTime time.Time `json:"countTime"`
FileSize int64 `json:"fileSize"`
UncompressedSize int64 `json:"uncompressedSize"`
FileMD5 string `json:"fileMD5"`
FileTime time.Time `json:"fileTime"`
S3Key string `json:"s3Key"`
S3Uploaded bool `json:"s3Uploaded"`
S3UploadTime time.Time `json:"s3UploadTime"`
LastError string `json:"lastError"`
ErrorTime time.Time `json:"errorTime"`
}
type CacheResponse ¶
type CacheResponse struct {
Tables []TableCache `json:"tables"`
Timestamp time.Time `json:"timestamp"`
}
type Config ¶
type DatabaseConfig ¶
type PartitionCache ¶
type PartitionCache struct {
Entries map[string]PartitionCacheEntry `json:"entries"`
}
PartitionCache stores both row counts and file metadata
type PartitionCacheEntry ¶
type PartitionCacheEntry struct {
// Row count information
RowCount int64 `json:"row_count"`
CountTime time.Time `json:"count_time"`
// File metadata (stored after processing)
FileSize int64 `json:"file_size,omitempty"` // Compressed size
UncompressedSize int64 `json:"uncompressed_size,omitempty"` // Original size
FileMD5 string `json:"file_md5,omitempty"`
FileTime time.Time `json:"file_time,omitempty"`
// S3 information
S3Key string `json:"s3_key,omitempty"`
S3Uploaded bool `json:"s3_uploaded,omitempty"`
S3UploadTime time.Time `json:"s3_upload_time,omitempty"`
// Error tracking
LastError string `json:"last_error,omitempty"`
ErrorTime time.Time `json:"error_time,omitempty"`
}
type ProcessResult ¶
type RowCountCache ¶
type RowCountCache struct {
Counts map[string]RowCountEntry `json:"counts"`
}
Legacy support - keep old structure for backward compatibility
type RowCountEntry ¶
type StatusResponse ¶
type TableCache ¶
type TableCache struct {
TableName string `json:"tableName"`
Entries []CacheEntry `json:"entries"`
}
type TaskInfo ¶
type TaskInfo struct {
PID int `json:"pid"`
StartTime time.Time `json:"start_time"`
Table string `json:"table"`
StartDate string `json:"start_date"`
EndDate string `json:"end_date"`
CurrentTask string `json:"current_task"`
CurrentPartition string `json:"current_partition,omitempty"`
CurrentStep string `json:"current_step,omitempty"`
Progress float64 `json:"progress"`
TotalItems int `json:"total_items"`
CompletedItems int `json:"completed_items"`
LastUpdate time.Time `json:"last_update"`
}
TaskInfo represents the current archiving task status
func ReadTaskInfo ¶
ReadTaskInfo reads current task information from file
Click to show internal directories.
Click to hide internal directories.