Documentation
¶
Overview ¶
Package core defines structure to hold data and relative functions.
Index ¶
- Constants
- Variables
- func ComputePaddedSize(chunks uint64) (uint64, uint64)
- func Exists(name string) (bool, error)
- func IteratorPaddedSize(dataSize int64, flowPadding bool) uint64
- func MerkleRoot(filename string) (common.Hash, error)
- func MerkleTree(data IterableData) (*merkle.Tree, error)
- func NextPow2(input uint64) uint64
- func NumSegmentsPadded(data IterableData) int
- func NumSplits(total int64, unit int) uint64
- func PaddedSegmentRoot(segmentIndex uint64, chunks []byte, fileSize int64) (common.Hash, uint64)
- func ReadAt(data IterableData, readSize int, offset int64, paddedSize uint64) ([]byte, error)
- func SegmentRange(startChunkIndex, fileSize uint64) (startSegmentIndex, endSegmentIndex uint64)
- func SegmentRoot(chunks []byte, emptyChunksPadded ...uint64) common.Hash
- type DataInMemory
- func (data *DataInMemory) NumChunks() uint64
- func (data *DataInMemory) NumSegments() uint64
- func (data *DataInMemory) Offset() int64
- func (data *DataInMemory) PaddedSize() uint64
- func (data *DataInMemory) Read(buf []byte, offset int64) (int, error)
- func (data *DataInMemory) Size() int64
- func (data *DataInMemory) Split(fragmentSize int64) []IterableData
- type File
- func (file *File) Close() error
- func (file *File) NumChunks() uint64
- func (file *File) NumSegments() uint64
- func (file *File) Offset() int64
- func (file *File) PaddedSize() uint64
- func (file *File) Read(buf []byte, offset int64) (int, error)
- func (file *File) Size() int64
- func (file *File) Split(fragmentSize int64) []IterableData
- type Flow
- type IterableData
- type Iterator
- type TreeBuilderInitializer
Constants ¶
const ( // DefaultChunkSize represents the default chunk size in bytes. DefaultChunkSize = 256 // DefaultSegmentMaxChunks represents the default maximum number of chunks within a segment. DefaultSegmentMaxChunks = 1024 // DefaultSegmentSize represents the default segment size in bytes. DefaultSegmentSize = DefaultChunkSize * DefaultSegmentMaxChunks )
Variables ¶
var ( EmptyChunk = make([]byte, DefaultChunkSize) EmptyChunkHash = crypto.Keccak256Hash(EmptyChunk) )
var ( // ErrFileRequired is returned when manipulate on a folder. ErrFileRequired = errors.New("file required") // ErrFileEmpty is returned when empty file opened. ErrFileEmpty = errors.New("file is empty") )
Functions ¶
func ComputePaddedSize ¶
func IteratorPaddedSize ¶
func MerkleRoot ¶ added in v0.6.0
MerkleRoot returns the merkle root hash of a file on disk
func MerkleTree ¶
func MerkleTree(data IterableData) (*merkle.Tree, error)
MerkleTree create merkle tree of the data.
func NumSegmentsPadded ¶
func NumSegmentsPadded(data IterableData) int
NumSegmentsPadded return the number of segments of padded data
func PaddedSegmentRoot ¶ added in v0.6.2
PaddedSegmentRoot calculates the Merkle root for a given segment based on its index, the chunk data, and the file size. It handles the logic of padding empty chunks if the segment is the last one and doesn't have enough data to form a complete segment.
func SegmentRange ¶ added in v0.6.2
SegmentRange calculates the start and end flow segment index for a file based on the file's start chunk index and file size.
Types ¶
type DataInMemory ¶
type DataInMemory struct {
// contains filtered or unexported fields
}
DataInMemory implement of IterableData, the underlying is memory data
func NewDataInMemory ¶
func NewDataInMemory(data []byte) (*DataInMemory, error)
NewDataInMemory creates DataInMemory from given data
func (*DataInMemory) NumChunks ¶
func (data *DataInMemory) NumChunks() uint64
func (*DataInMemory) NumSegments ¶
func (data *DataInMemory) NumSegments() uint64
func (*DataInMemory) Offset ¶ added in v0.6.2
func (data *DataInMemory) Offset() int64
func (*DataInMemory) PaddedSize ¶
func (data *DataInMemory) PaddedSize() uint64
func (*DataInMemory) Size ¶
func (data *DataInMemory) Size() int64
func (*DataInMemory) Split ¶ added in v0.6.2
func (data *DataInMemory) Split(fragmentSize int64) []IterableData
type File ¶
File implement of IterableData, the underlying is a file on disk
func (*File) NumSegments ¶
func (*File) PaddedSize ¶
func (*File) Split ¶ added in v0.6.2
func (file *File) Split(fragmentSize int64) []IterableData
type Flow ¶
type Flow struct {
// contains filtered or unexported fields
}
func (*Flow) CreateSubmission ¶
func (flow *Flow) CreateSubmission() (*contract.Submission, error)
type IterableData ¶
type IterableData interface { NumChunks() uint64 NumSegments() uint64 Offset() int64 Size() int64 PaddedSize() uint64 Read(buf []byte, offset int64) (int, error) Split(fragmentSize int64) []IterableData }
IterableData defines the data interface to upload to 0g storage network.
type TreeBuilderInitializer ¶
type TreeBuilderInitializer struct {
// contains filtered or unexported fields
}
func (*TreeBuilderInitializer) ParallelCollect ¶
func (t *TreeBuilderInitializer) ParallelCollect(result *parallel.Result) error
ParallelCollect implements parallel.Interface.
func (*TreeBuilderInitializer) ParallelDo ¶
func (t *TreeBuilderInitializer) ParallelDo(ctx context.Context, routine int, task int) (interface{}, error)
ParallelDo implements parallel.Interface.