pruner

package
v0.41.1-data-migration... Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: May 20, 2025 License: AGPL-3.0 Imports: 18 Imported by: 0

Documentation

Index

Constants

View Source
const NextHeightForUnprunedExecutionDataPackKey = "NextHeightForUnprunedExecutionDataPackKey"

Variables

View Source
var DefaultConfig = PruningConfig{
	Threshold: 30 * 60 * 60 * 24 * 1.2,
	BatchSize: 1200,

	SleepAfterEachBatchCommit: 12 * time.Second,
	SleepAfterEachIteration:   math.MaxInt64,
}

Functions

func EstimateBatchProcessing

func EstimateBatchProcessing(
	start, end uint64,
	batchSize uint,
	sleepAfterEachBatchCommit time.Duration,
	commitDuration time.Duration,
) (
	batchCount uint64, totalDuration time.Duration)

estimateBatchProcessing estimates the number of batches and the total duration start, end are both inclusive

func LoopPruneExecutionDataFromRootToLatestSealed

func LoopPruneExecutionDataFromRootToLatestSealed(
	ctx context.Context,
	log zerolog.Logger,
	metrics module.ExecutionMetrics,
	state protocol.State,
	protocolDB storage.DB,
	headers storage.Headers,
	chunkDataPacks storage.ChunkDataPacks,
	results storage.ExecutionResults,
	chunkDataPacksDB *pebble.DB,
	config PruningConfig,
) error

func NewChunkDataPackPruningEngine

func NewChunkDataPackPruningEngine(
	log zerolog.Logger,
	metrics module.ExecutionMetrics,
	state protocol.State,
	protocolDB storage.DB,
	headers storage.Headers,
	chunkDataPacks storage.ChunkDataPacks,
	results storage.ExecutionResults,
	chunkDataPacksDB *pebble.DB,
	config PruningConfig,
) *component.ComponentManager

NewChunkDataPackPruningEngine creates a component that prunes chunk data packs from root to the latest sealed block.

Types

type ChunkDataPackPruner

type ChunkDataPackPruner struct {
	*pruners.ChunkDataPackPruner
}

func NewChunkDataPackPruner

func NewChunkDataPackPruner(chunkDataPacks storage.ChunkDataPacks, results storage.ExecutionResults) *ChunkDataPackPruner

func (*ChunkDataPackPruner) ExecuteByBlockID

func (c *ChunkDataPackPruner) ExecuteByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) (exception error)

type LatestPrunable

type LatestPrunable struct {
	*latest.LatestSealedAndExecuted
	// contains filtered or unexported fields
}

LatestPrunable decides which blocks are prunable we don't want to prune all the sealed blocks, but keep a certain number of them so that the data is still available for querying

func (*LatestPrunable) Latest

func (l *LatestPrunable) Latest() (*flow.Header, error)

type PruningConfig

type PruningConfig struct {
	Threshold                 uint64        // The threshold is the number of blocks that we want to keep in the database.
	BatchSize                 uint          // The batch size is the number of blocks that we want to delete in one batch.
	SleepAfterEachBatchCommit time.Duration // The sleep time after each batch commit.
	SleepAfterEachIteration   time.Duration // The sleep time after each iteration.
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL