Documentation
¶
Index ¶
- Constants
- func Pack(entries []PackedEntry) ([]byte, error)
- type APIOptimizer
- type APIStats
- type AdaptiveCompressor
- type Backend
- type BatchExecutor
- type BatchProcessor
- type BatchQuery
- type BatchWriter
- type BufferPool
- type BufferPoolStats
- type BulkOptimizer
- type CachedResponse
- type CachedResult
- type ChunkedReader
- type ChunkedWriter
- type CircuitBreaker
- type ConcurrencyStats
- type ConcurrentLimiter
- func (cl *ConcurrentLimiter) Do(ctx context.Context, fn func() error) error
- func (cl *ConcurrentLimiter) DoAsync(ctx context.Context, fn func() error) <-chan error
- func (cl *ConcurrentLimiter) Stats() ConcurrencyStats
- func (cl *ConcurrentLimiter) TryDo(fn func() error) (bool, error)
- func (cl *ConcurrentLimiter) Wait()
- func (cl *ConcurrentLimiter) WaitWithTimeout(timeout time.Duration) bool
- type ConnectionFactory
- type ConnectionPool
- type DBOptimizer
- type DBStats
- type Deduplicator
- type HTTPClient
- type HashIndexer
- type HealthChecker
- type IndexEntry
- type IndexOptimizer
- type IndexStats
- type LRUCache
- type LoadBalanceAlgorithm
- type LoadBalancer
- type MemoryPool
- type OptimizedStore
- type PackedEntry
- type PackedFormat
- type Pipeline
- type PoolStats
- type PoolStatsSnapshot
- type PooledConnection
- type PreparedStatements
- type ProcessFunc
- type QueryAnalyzer
- type QueryCache
- type QueryStats
- type RateLimiter
- type RequestCoalescer
- type ResponseCache
- type ResponseCompressor
- type StorageBackend
- type StorageOptimizer
- type StorageStats
- type StreamProcessor
- func (sp *StreamProcessor) CompressStream(reader io.Reader, writer io.Writer) error
- func (sp *StreamProcessor) DecompressStream(reader io.Reader, writer io.Writer) error
- func (sp *StreamProcessor) NewChunkedReader(reader io.Reader) *ChunkedReader
- func (sp *StreamProcessor) NewChunkedWriter(writer io.Writer) *ChunkedWriter
- func (sp *StreamProcessor) NewPipeline(stages ...ProcessFunc) *Pipeline
- func (sp *StreamProcessor) Process(ctx context.Context, reader io.Reader, writer io.Writer, fn ProcessFunc) error
- func (sp *StreamProcessor) Stats() StreamStats
- type StreamStats
- type StringBuilder
- type TokenBucket
- type Transaction
- type TransactionManager
- type WorkerPool
- type WriteBatcher
- type WriteRequest
Constants ¶
const ( CircuitClosed = iota CircuitOpen CircuitHalfOpen )
const ( SmallBufferSize = 4 * 1024 // 4KB MediumBufferSize = 64 * 1024 // 64KB LargeBufferSize = 1024 * 1024 // 1MB HugeBufferSize = 16 * 1024 * 1024 // 16MB )
Variables ¶
This section is empty.
Functions ¶
Types ¶
type APIOptimizer ¶
type APIOptimizer struct {
// contains filtered or unexported fields
}
APIOptimizer optimizes API performance
func NewAPIOptimizer ¶
func NewAPIOptimizer() *APIOptimizer
NewAPIOptimizer creates an API optimizer
func (*APIOptimizer) Stats ¶
func (ao *APIOptimizer) Stats() map[string]interface{}
Stats returns API optimizer statistics
type APIStats ¶
type APIStats struct { RequestsTotal atomic.Uint64 RequestsSuccess atomic.Uint64 RequestsFailed atomic.Uint64 CacheHits atomic.Uint64 CacheMisses atomic.Uint64 AvgResponseTime atomic.Int64 // microseconds BytesCompressed atomic.Uint64 RateLimited atomic.Uint64 CircuitBroken atomic.Uint64 }
APIStats tracks API performance metrics
type AdaptiveCompressor ¶
type AdaptiveCompressor struct {
// contains filtered or unexported fields
}
AdaptiveCompressor chooses optimal compression based on data
func NewAdaptiveCompressor ¶
func NewAdaptiveCompressor() *AdaptiveCompressor
NewAdaptiveCompressor creates an adaptive compressor
func (*AdaptiveCompressor) Compress ¶
func (ac *AdaptiveCompressor) Compress(data []byte) ([]byte, bool)
Compress compresses data if beneficial
func (*AdaptiveCompressor) Decompress ¶
func (ac *AdaptiveCompressor) Decompress(data []byte) ([]byte, error)
Decompress decompresses data
type Backend ¶
type Backend struct { URL string Weight int Active atomic.Bool Connections atomic.Int64 LastCheck time.Time ResponseTime atomic.Int64 // microseconds }
Backend represents a backend server
type BatchExecutor ¶
type BatchExecutor struct {
// contains filtered or unexported fields
}
BatchExecutor executes queries in batches
func NewBatchExecutor ¶
func NewBatchExecutor() *BatchExecutor
NewBatchExecutor creates a batch executor
func (*BatchExecutor) Execute ¶
func (be *BatchExecutor) Execute(query string, args ...interface{}) error
Execute adds a query to the batch
type BatchProcessor ¶
type BatchProcessor struct {
// contains filtered or unexported fields
}
BatchProcessor processes items in batches with concurrency control
func NewBatchProcessor ¶
func NewBatchProcessor(batchSize, maxConcurrent int) *BatchProcessor
NewBatchProcessor creates a new batch processor
type BatchQuery ¶
BatchQuery represents a batched query
type BatchWriter ¶
type BatchWriter interface {
WriteBatch(requests []WriteRequest) error
}
BatchWriter processes batched writes
type BufferPool ¶
type BufferPool struct {
// contains filtered or unexported fields
}
BufferPool manages a pool of byte slices
type BufferPoolStats ¶
BufferPoolStats contains statistics for a single buffer pool
type BulkOptimizer ¶
type BulkOptimizer struct {
// contains filtered or unexported fields
}
BulkOptimizer optimizes bulk operations
func NewBulkOptimizer ¶
func NewBulkOptimizer(parallel, chunkSize int) *BulkOptimizer
NewBulkOptimizer creates a bulk optimizer
func (*BulkOptimizer) ProcessBulk ¶
func (bo *BulkOptimizer) ProcessBulk(ctx context.Context, items []interface{}, processor func([]interface{}) error) error
ProcessBulk processes items in optimized bulk operations
type CachedResponse ¶
type CachedResponse struct { Body []byte Headers http.Header StatusCode int Timestamp time.Time TTL time.Duration ETag string }
CachedResponse stores cached response data
type CachedResult ¶
CachedResult stores cached query result
type ChunkedReader ¶
type ChunkedReader struct {
// contains filtered or unexported fields
}
ChunkedReader provides a chunked reading interface
func (*ChunkedReader) ReadChunk ¶
func (cr *ChunkedReader) ReadChunk() ([]byte, error)
ReadChunk reads the next chunk
type ChunkedWriter ¶
type ChunkedWriter struct {
// contains filtered or unexported fields
}
ChunkedWriter provides a chunked writing interface
func (*ChunkedWriter) Close ¶
func (cw *ChunkedWriter) Close() error
Close flushes and returns the buffer to the pool
func (*ChunkedWriter) Flush ¶
func (cw *ChunkedWriter) Flush() error
Flush writes buffered data to the underlying writer
type CircuitBreaker ¶
type CircuitBreaker struct {
// contains filtered or unexported fields
}
CircuitBreaker prevents cascading failures
func NewCircuitBreaker ¶
func NewCircuitBreaker(threshold, successThreshold int, resetTimeout time.Duration) *CircuitBreaker
NewCircuitBreaker creates a circuit breaker
type ConcurrencyStats ¶
type ConcurrencyStats struct { ActiveGoroutines int64 TotalStarted uint64 TotalCompleted uint64 TotalBlocked uint64 TotalErrors uint64 MaxGoroutines int }
ConcurrencyStats contains concurrency statistics
type ConcurrentLimiter ¶
type ConcurrentLimiter struct {
// contains filtered or unexported fields
}
ConcurrentLimiter manages goroutine concurrency and resource usage
func NewConcurrentLimiter ¶
func NewConcurrentLimiter(maxGoroutines int) *ConcurrentLimiter
NewConcurrentLimiter creates a new concurrency limiter
func (*ConcurrentLimiter) Do ¶
func (cl *ConcurrentLimiter) Do(ctx context.Context, fn func() error) error
Do executes a function with concurrency limiting
func (*ConcurrentLimiter) DoAsync ¶
func (cl *ConcurrentLimiter) DoAsync(ctx context.Context, fn func() error) <-chan error
DoAsync executes a function asynchronously with limiting
func (*ConcurrentLimiter) Stats ¶
func (cl *ConcurrentLimiter) Stats() ConcurrencyStats
Stats returns limiter statistics
func (*ConcurrentLimiter) TryDo ¶
func (cl *ConcurrentLimiter) TryDo(fn func() error) (bool, error)
TryDo attempts to execute a function without blocking
func (*ConcurrentLimiter) Wait ¶
func (cl *ConcurrentLimiter) Wait()
Wait waits for all active goroutines to complete
func (*ConcurrentLimiter) WaitWithTimeout ¶
func (cl *ConcurrentLimiter) WaitWithTimeout(timeout time.Duration) bool
WaitWithTimeout waits with a timeout
type ConnectionFactory ¶
ConnectionFactory creates new connections
type ConnectionPool ¶
type ConnectionPool struct {
// contains filtered or unexported fields
}
ConnectionPool manages database connections
func NewConnectionPool ¶
func NewConnectionPool(maxConns int) *ConnectionPool
NewConnectionPool creates a connection pool
func (*ConnectionPool) Close ¶
func (cp *ConnectionPool) Close() error
Close closes the connection pool
func (*ConnectionPool) Get ¶
func (cp *ConnectionPool) Get(ctx context.Context) (*PooledConnection, error)
Get acquires a connection from the pool
func (*ConnectionPool) Put ¶
func (cp *ConnectionPool) Put(conn *PooledConnection)
Put returns a connection to the pool
type DBOptimizer ¶
type DBOptimizer struct {
// contains filtered or unexported fields
}
DBOptimizer optimizes database operations
func NewDBOptimizer ¶
func NewDBOptimizer(maxConns int) *DBOptimizer
NewDBOptimizer creates a database optimizer
func (*DBOptimizer) Stats ¶
func (dbo *DBOptimizer) Stats() map[string]interface{}
Stats returns database optimizer statistics
type DBStats ¶
type DBStats struct { QueriesExecuted atomic.Uint64 QueriesCached atomic.Uint64 ConnectionsActive atomic.Int64 ConnectionsIdle atomic.Int64 BatchesExecuted atomic.Uint64 AvgQueryTime atomic.Int64 // microseconds }
DBStats tracks database performance
type Deduplicator ¶
type Deduplicator struct {
// contains filtered or unexported fields
}
Deduplicator eliminates duplicate data storage
func (*Deduplicator) CheckDuplicate ¶
func (d *Deduplicator) CheckDuplicate(data []byte) (string, bool)
CheckDuplicate checks if data is duplicate
func (*Deduplicator) Record ¶
func (d *Deduplicator) Record(hash, key string)
Record records a new unique entry
type HTTPClient ¶
type HTTPClient struct {
// contains filtered or unexported fields
}
HTTPClient optimized HTTP client
type HashIndexer ¶
type HashIndexer struct {
// contains filtered or unexported fields
}
HashIndexer provides fast hash-based lookups
func (*HashIndexer) Add ¶
func (hi *HashIndexer) Add(key string, offset, size int64)
Add adds an entry to the index
func (*HashIndexer) Get ¶
func (hi *HashIndexer) Get(key string) (IndexEntry, bool)
Get retrieves an entry from the index
type HealthChecker ¶
type HealthChecker struct {
// contains filtered or unexported fields
}
HealthChecker checks backend health
func NewHealthChecker ¶
func NewHealthChecker() *HealthChecker
NewHealthChecker creates a health checker
func (*HealthChecker) Check ¶
func (hc *HealthChecker) Check(backend *Backend)
Check checks backend health
type IndexEntry ¶
IndexEntry stores indexed data location
type IndexOptimizer ¶
type IndexOptimizer struct {
// contains filtered or unexported fields
}
IndexOptimizer suggests and creates optimal indexes
type IndexStats ¶
type IndexStats struct { Uses uint64 LastUsed time.Time AvgTime time.Duration TableName string Columns []string }
IndexStats tracks index usage
type LRUCache ¶
type LRUCache struct {
// contains filtered or unexported fields
}
LRUCache implements a thread-safe LRU cache
type LoadBalanceAlgorithm ¶
type LoadBalanceAlgorithm int
LoadBalanceAlgorithm defines load balancing strategy
const ( RoundRobin LoadBalanceAlgorithm = iota LeastConnections WeightedRoundRobin ResponseTime )
type LoadBalancer ¶
type LoadBalancer struct {
// contains filtered or unexported fields
}
LoadBalancer distributes load across backends
func (*LoadBalancer) AddBackend ¶
func (lb *LoadBalancer) AddBackend(url string, weight int)
AddBackend adds a backend server
func (*LoadBalancer) GetBackend ¶
func (lb *LoadBalancer) GetBackend() (*Backend, error)
GetBackend selects a backend based on algorithm
type MemoryPool ¶
type MemoryPool struct {
// contains filtered or unexported fields
}
MemoryPool manages reusable memory buffers to reduce allocations
func (*MemoryPool) Get ¶
func (mp *MemoryPool) Get(size int) []byte
Get retrieves a buffer from the appropriate pool
func (*MemoryPool) NewStringBuilder ¶
func (mp *MemoryPool) NewStringBuilder(estimatedSize int) *StringBuilder
NewStringBuilder creates a string builder backed by pooled memory
func (*MemoryPool) Stats ¶
func (mp *MemoryPool) Stats() PoolStatsSnapshot
Stats returns current pool statistics
type OptimizedStore ¶
type OptimizedStore struct {
// contains filtered or unexported fields
}
OptimizedStore wraps a store with optimizations
func NewOptimizedStore ¶
func NewOptimizedStore(backend StorageBackend, cacheSize int) *OptimizedStore
NewOptimizedStore creates an optimized store
type PackedEntry ¶
PackedEntry is a packed storage entry
type PackedFormat ¶
type PackedFormat struct { Version uint32 Entries []PackedEntry }
PackedFormat provides efficient binary packing
type Pipeline ¶
type Pipeline struct {
// contains filtered or unexported fields
}
Pipeline creates a processing pipeline
type PoolStats ¶
type PoolStats struct { TotalGets atomic.Uint64 TotalPuts atomic.Uint64 TotalNews atomic.Uint64 CurrentInUse atomic.Int64 LastGC atomic.Int64 GCCount atomic.Uint64 }
PoolStats tracks pool performance metrics
type PoolStatsSnapshot ¶
type PoolStatsSnapshot struct { TotalGets uint64 TotalPuts uint64 TotalNews uint64 CurrentInUse int64 LastGC time.Time GCCount uint64 SmallPoolStats BufferPoolStats MediumPoolStats BufferPoolStats LargePoolStats BufferPoolStats HugePoolStats BufferPoolStats }
PoolStatsSnapshot is a point-in-time snapshot of pool statistics
func (PoolStatsSnapshot) EfficiencyRatio ¶
func (s PoolStatsSnapshot) EfficiencyRatio() float64
EfficiencyRatio returns the ratio of gets to news (higher is better)
type PooledConnection ¶
type PooledConnection struct {
// contains filtered or unexported fields
}
PooledConnection wraps a database connection
type PreparedStatements ¶
type PreparedStatements struct {
// contains filtered or unexported fields
}
PreparedStatements manages prepared statements
func NewPreparedStatements ¶
func NewPreparedStatements() *PreparedStatements
NewPreparedStatements creates prepared statement manager
func (*PreparedStatements) Close ¶
func (ps *PreparedStatements) Close() error
Close closes all prepared statements
type ProcessFunc ¶
ProcessFunc is a function that processes a chunk of data
type QueryAnalyzer ¶
type QueryAnalyzer struct {
// contains filtered or unexported fields
}
QueryAnalyzer analyzes queries for optimization
func NewQueryAnalyzer ¶
func NewQueryAnalyzer() *QueryAnalyzer
NewQueryAnalyzer creates a query analyzer
func (*QueryAnalyzer) GetSlowQueries ¶
func (qa *QueryAnalyzer) GetSlowQueries(threshold time.Duration) []string
GetSlowQueries returns queries slower than threshold
type QueryCache ¶
type QueryCache struct {
// contains filtered or unexported fields
}
QueryCache caches query results
func (*QueryCache) Get ¶
func (qc *QueryCache) Get(key string) (interface{}, bool)
Get retrieves cached query result
type QueryStats ¶
type QueryStats struct { Count uint64 TotalTime time.Duration AvgTime time.Duration MinTime time.Duration MaxTime time.Duration LastExecuted time.Time }
QueryStats tracks query performance
type RateLimiter ¶
type RateLimiter struct {
// contains filtered or unexported fields
}
RateLimiter implements token bucket rate limiting
func NewRateLimiter ¶
func NewRateLimiter(rate int, interval time.Duration) *RateLimiter
NewRateLimiter creates a rate limiter
func (*RateLimiter) Allow ¶
func (rl *RateLimiter) Allow(key string) bool
Allow checks if request is allowed
type RequestCoalescer ¶
type RequestCoalescer struct {
// contains filtered or unexported fields
}
RequestCoalescer coalesces duplicate requests
func NewRequestCoalescer ¶
func NewRequestCoalescer() *RequestCoalescer
NewRequestCoalescer creates a request coalescer
type ResponseCache ¶
type ResponseCache struct {
// contains filtered or unexported fields
}
ResponseCache caches API responses
func NewResponseCache ¶
func NewResponseCache(size int, ttl time.Duration) *ResponseCache
NewResponseCache creates a response cache
func (*ResponseCache) Get ¶
func (rc *ResponseCache) Get(key string) (*CachedResponse, bool)
Get retrieves cached response
func (*ResponseCache) Put ¶
func (rc *ResponseCache) Put(key string, resp *CachedResponse) error
Put caches a response
type ResponseCompressor ¶
type ResponseCompressor struct {
// contains filtered or unexported fields
}
ResponseCompressor compresses responses
func NewResponseCompressor ¶
func NewResponseCompressor() *ResponseCompressor
NewResponseCompressor creates a response compressor
func (*ResponseCompressor) CompressHandler ¶
func (rc *ResponseCompressor) CompressHandler(next http.HandlerFunc) http.HandlerFunc
CompressHandler wraps handler with compression
func (*ResponseCompressor) ShouldCompress ¶
func (rc *ResponseCompressor) ShouldCompress(contentType string, size int) bool
ShouldCompress checks if response should be compressed
type StorageBackend ¶
type StorageBackend interface { Read(key string) ([]byte, error) Write(key string, data []byte) error Delete(key string) error }
StorageBackend interface for underlying storage
type StorageOptimizer ¶
type StorageOptimizer struct {
// contains filtered or unexported fields
}
StorageOptimizer optimizes storage operations for govc
func NewStorageOptimizer ¶
func NewStorageOptimizer(cacheSize int) *StorageOptimizer
NewStorageOptimizer creates a new storage optimizer
func (*StorageOptimizer) Stats ¶
func (so *StorageOptimizer) Stats() map[string]interface{}
Stats returns optimizer statistics
type StorageStats ¶
type StorageStats struct { CacheHits atomic.Uint64 CacheMisses atomic.Uint64 BytesCompressed atomic.Uint64 BytesSaved atomic.Uint64 DedupeHits atomic.Uint64 BatchedWrites atomic.Uint64 TotalWrites atomic.Uint64 }
StorageStats tracks storage performance metrics
type StreamProcessor ¶
type StreamProcessor struct {
// contains filtered or unexported fields
}
StreamProcessor handles streaming data processing with minimal memory usage
func NewStreamProcessor ¶
func NewStreamProcessor(pool *MemoryPool, chunkSize int, workers int) *StreamProcessor
NewStreamProcessor creates a new stream processor
func (*StreamProcessor) CompressStream ¶
CompressStream compresses data with streaming and minimal memory
func (*StreamProcessor) DecompressStream ¶
DecompressStream decompresses data with streaming and minimal memory
func (*StreamProcessor) NewChunkedReader ¶
func (sp *StreamProcessor) NewChunkedReader(reader io.Reader) *ChunkedReader
NewChunkedReader creates a new chunked reader
func (*StreamProcessor) NewChunkedWriter ¶
func (sp *StreamProcessor) NewChunkedWriter(writer io.Writer) *ChunkedWriter
NewChunkedWriter creates a new chunked writer
func (*StreamProcessor) NewPipeline ¶
func (sp *StreamProcessor) NewPipeline(stages ...ProcessFunc) *Pipeline
NewPipeline creates a new processing pipeline
func (*StreamProcessor) Process ¶
func (sp *StreamProcessor) Process(ctx context.Context, reader io.Reader, writer io.Writer, fn ProcessFunc) error
Process streams data through a processing function
func (*StreamProcessor) Stats ¶
func (sp *StreamProcessor) Stats() StreamStats
Stats returns current statistics
type StreamStats ¶
StreamStats tracks streaming performance
type StringBuilder ¶
type StringBuilder struct {
// contains filtered or unexported fields
}
GetString gets a buffer and returns it as a string builder
func (*StringBuilder) Release ¶
func (sb *StringBuilder) Release()
Release returns the buffer to the pool
func (*StringBuilder) String ¶
func (sb *StringBuilder) String() string
String returns the built string
type TokenBucket ¶
type TokenBucket struct {
// contains filtered or unexported fields
}
TokenBucket implements token bucket algorithm
type Transaction ¶
type Transaction struct {
// contains filtered or unexported fields
}
Transaction represents a managed transaction
type TransactionManager ¶
type TransactionManager struct {
// contains filtered or unexported fields
}
TransactionManager manages database transactions
func NewTransactionManager ¶
func NewTransactionManager() *TransactionManager
NewTransactionManager creates a transaction manager
func (*TransactionManager) Begin ¶
func (tm *TransactionManager) Begin(db *sql.DB) (*Transaction, error)
Begin starts a new transaction
func (*TransactionManager) Commit ¶
func (tm *TransactionManager) Commit(trans *Transaction) error
Commit commits a transaction
func (*TransactionManager) Rollback ¶
func (tm *TransactionManager) Rollback(trans *Transaction) error
Rollback rolls back a transaction
type WorkerPool ¶
type WorkerPool struct {
// contains filtered or unexported fields
}
WorkerPool manages a pool of worker goroutines
func NewWorkerPool ¶
func NewWorkerPool(minWorkers, maxWorkers int) *WorkerPool
NewWorkerPool creates a new worker pool
func (*WorkerPool) Shutdown ¶
func (wp *WorkerPool) Shutdown(ctx context.Context) error
Shutdown gracefully shuts down the pool
func (*WorkerPool) Submit ¶
func (wp *WorkerPool) Submit(fn func()) bool
Submit submits work to the pool
func (*WorkerPool) SubmitWait ¶
func (wp *WorkerPool) SubmitWait(fn func()) bool
SubmitWait submits work and waits for completion
type WriteBatcher ¶
type WriteBatcher struct {
// contains filtered or unexported fields
}
WriteBatcher batches multiple writes for efficiency
func NewWriteBatcher ¶
func NewWriteBatcher(batchSize int, flushInterval time.Duration) *WriteBatcher
NewWriteBatcher creates a write batcher
func (*WriteBatcher) Close ¶
func (wb *WriteBatcher) Close()
Close flushes pending writes and stops the batcher
type WriteRequest ¶
WriteRequest represents a batched write