Documentation
¶
Index ¶
- Constants
- Variables
- func AggregatedBloomFilterKey(fromBlock, toBlock uint64) []byte
- func BlockCommitmentsKey(blockNum uint64) []byte
- func BlockHeaderByNumberKey(blockNum uint64) []byte
- func BlockHeaderNumbersByHashKey(hash *felt.Felt) []byte
- func BucketStrings() []string
- func ClassKey(classHash *felt.Felt) []byte
- func ContractClassHashHistoryKey(addr *felt.Felt) []byte
- func ContractClassHashKey(addr *felt.Felt) []byte
- func ContractDeploymentHeightKey(addr *felt.Felt) []byte
- func ContractHistoryClassHashKey(addr *felt.Felt, blockNum uint64) []byte
- func ContractHistoryNonceKey(addr *felt.Felt, blockNum uint64) []byte
- func ContractHistoryStorageKey(addr, key *felt.Felt, blockNum uint64) []byte
- func ContractKey(addr *felt.Felt) []byte
- func ContractNonceHistoryKey(addr *felt.Felt) []byte
- func ContractNonceKey(addr *felt.Felt) []byte
- func ContractStorageHistoryKey(addr, loc *felt.Felt) []byte
- func ContractStorageKey(addr *felt.Felt, key []byte) []byte
- func L1HandlerTxnHashByMsgHashKey(msgHash []byte) []byte
- func MempoolNodeKey(txnHash *felt.Felt) []byte
- func PeerKey(peerID []byte) []byte
- func ReceiptByBlockNumIndexKey(num, index uint64) []byte
- func ReceiptByBlockNumIndexKeyBytes(key []byte) []byte
- func StateHashToTrieRootsKey(stateCommitment *felt.Felt) []byte
- func StateIDKey(root *felt.Felt) []byte
- func StateUpdateByBlockNumKey(num uint64) []byte
- func TestKeyValueStoreSuite(t *testing.T, newDB func() KeyValueStore)
- func TxBlockNumIndexByHashKey(hash *felt.Felt) []byte
- func TxByBlockNumIndexKey(num, index uint64) []byte
- func TxByBlockNumIndexKeyBytes(key []byte) []byte
- type AggregatedBloomFilterRangeKey
- type Batch
- type Batcher
- type BlockNumIndexKey
- type Bucket
- type BufferBatch
- func (b *BufferBatch) Delete(key []byte) error
- func (b *BufferBatch) DeleteRange(start, end []byte) error
- func (b *BufferBatch) Flush() error
- func (b *BufferBatch) Get(key []byte, cb func(value []byte) error) error
- func (b *BufferBatch) Has(key []byte) (bool, error)
- func (b *BufferBatch) NewIterator(prefix []byte, withUpperBound bool) (Iterator, error)
- func (b *BufferBatch) Put(key, val []byte) error
- func (b *BufferBatch) Reset()
- func (b *BufferBatch) Size() int
- func (b *BufferBatch) Write() error
- type EventListener
- type Helper
- type IndexedBatch
- type IndexedBatcher
- type Iterable
- type Iterator
- type KeyValueRangeDeleter
- type KeyValueReader
- type KeyValueStore
- type KeyValueWriter
- type Listener
- type SelectiveListener
- type Snapshot
- type Snapshotter
- type SyncBatch
- func (s *SyncBatch) Delete(key []byte) error
- func (s *SyncBatch) DeleteRange(start, end []byte) error
- func (s *SyncBatch) Get(key []byte, cb func(value []byte) error) error
- func (s *SyncBatch) Has(key []byte) (bool, error)
- func (s *SyncBatch) NewIterator(lowerBound []byte, withUpperBound bool) (Iterator, error)
- func (s *SyncBatch) Put(key, val []byte) error
- func (s *SyncBatch) Reset()
- func (s *SyncBatch) Size() int
- func (s *SyncBatch) Write() error
Constants ¶
const AggregatedBloomFilterRangeKeySize = 16
const BlockNumIndexKeySize = 16
const DefaultBatchSize = 10 * utils.Megabyte
Variables ¶
var ErrKeyNotFound = errors.New("key not found")
Functions ¶
func AggregatedBloomFilterKey ¶ added in v0.14.6
func BlockCommitmentsKey ¶ added in v0.14.4
func BlockHeaderByNumberKey ¶ added in v0.14.4
func BlockHeaderNumbersByHashKey ¶ added in v0.14.4
func BucketStrings ¶ added in v0.12.0
func BucketStrings() []string
BucketStrings returns a slice of all String values of the enum
func ContractClassHashHistoryKey ¶ added in v0.14.4
func ContractClassHashKey ¶ added in v0.14.4
func ContractDeploymentHeightKey ¶ added in v0.14.4
func ContractHistoryClassHashKey ¶ added in v0.14.4
func ContractHistoryNonceKey ¶ added in v0.14.4
func ContractHistoryStorageKey ¶ added in v0.14.4
func ContractKey ¶ added in v0.14.7
func ContractNonceHistoryKey ¶ added in v0.14.4
func ContractNonceKey ¶ added in v0.14.4
func ContractStorageHistoryKey ¶ added in v0.14.4
func ContractStorageKey ¶ added in v0.14.4
func L1HandlerTxnHashByMsgHashKey ¶ added in v0.14.4
func MempoolNodeKey ¶ added in v0.14.4
func ReceiptByBlockNumIndexKey ¶ added in v0.14.4
func ReceiptByBlockNumIndexKeyBytes ¶ added in v0.14.4
func StateHashToTrieRootsKey ¶ added in v0.14.6
func StateIDKey ¶ added in v0.14.5
func StateUpdateByBlockNumKey ¶ added in v0.14.4
func TestKeyValueStoreSuite ¶ added in v0.14.4
func TestKeyValueStoreSuite(t *testing.T, newDB func() KeyValueStore)
TestKeyValueStoreSuite runs a suite of tests against a KeyValueStore database implementation.
func TxBlockNumIndexByHashKey ¶ added in v0.14.4
func TxByBlockNumIndexKey ¶ added in v0.14.4
func TxByBlockNumIndexKeyBytes ¶ added in v0.14.4
Types ¶
type AggregatedBloomFilterRangeKey ¶ added in v0.14.6
func (*AggregatedBloomFilterRangeKey) MarshalBinary ¶ added in v0.14.6
func (b *AggregatedBloomFilterRangeKey) MarshalBinary() []byte
func (*AggregatedBloomFilterRangeKey) UnmarshalBinary ¶ added in v0.14.6
func (b *AggregatedBloomFilterRangeKey) UnmarshalBinary(data []byte) error
type Batch ¶ added in v0.14.4
type Batch interface { KeyValueWriter KeyValueRangeDeleter // Retrieves the value size of the data stored in the batch for writing Size() int // Flushes the data stored to disk Write() error // Resets the batch Reset() }
A write-only store that gathers changes in-memory and writes them to disk in a single atomic operation. It is not thread-safe for a single batch, but different batches can be used in different threads.
type Batcher ¶ added in v0.14.4
type Batcher interface { // Creates a write-only batch NewBatch() Batch // Creates a write-only batch with a pre-allocated size NewBatchWithSize(size int) Batch }
Produce a batch to write to the database
type BlockNumIndexKey ¶ added in v0.14.4
func (*BlockNumIndexKey) MarshalBinary ¶ added in v0.14.4
func (b *BlockNumIndexKey) MarshalBinary() []byte
func (*BlockNumIndexKey) UnmarshalBinary ¶ added in v0.14.4
func (b *BlockNumIndexKey) UnmarshalBinary(data []byte) error
type Bucket ¶
type Bucket byte
const ( StateTrie Bucket = iota // state metadata (e.g., the state root) Peer // maps peer ID to peer multiaddresses ContractClassHash // maps contract addresses and class hashes ContractStorage // contract storages Class // maps class hashes to classes ContractNonce // contract nonce ChainHeight // Latest height of the blockchain BlockHeaderNumbersByHash BlockHeadersByNumber TransactionBlockNumbersAndIndicesByHash // maps transaction hashes to block number and index TransactionsByBlockNumberAndIndex // maps block number and index to transaction ReceiptsByBlockNumberAndIndex // maps block number and index to transaction receipt StateUpdatesByBlockNumber ClassesTrie ContractStorageHistory ContractNonceHistory ContractClassHashHistory ContractDeploymentHeight L1Height SchemaVersion Unused // Previously used for storing Pending Block BlockCommitments Temporary // used temporarily for migrations SchemaIntermediateState L1HandlerTxnHashByMsgHash // maps l1 handler msg hash to l1 handler txn hash MempoolHead // key of the head node MempoolTail // key of the tail node MempoolLength // number of transactions MempoolNode ClassTrie // ClassTrie + nodetype + path + pathlength -> Trie Node ContractTrieContract // ContractTrieContract + nodetype + path + pathlength -> Trie Node ContractTrieStorage // ContractTrieStorage + nodetype + path + pathlength -> Trie Node Contract // Contract + ContractAddr -> Contract StateHashToTrieRoots // StateHash -> ClassRootHash + ContractRootHash StateID // StateID + root hash -> state id PersistedStateID // PersistedStateID -> state id TrieJournal // TrieJournal -> journal AggregatedBloomFilters // maps block range to AggregatedBloomFilter RunningEventFilter // aggregated filter not full yet )
Pebble does not support buckets to differentiate between groups of keys like Bolt or MDBX does. We use a global prefix list as a poor man's bucket alternative.
func BucketString ¶ added in v0.12.0
BucketString retrieves an enum value from the enum constants string name. Throws an error if the param is not part of the enum.
func BucketValues ¶ added in v0.12.0
func BucketValues() []Bucket
BucketValues returns all values of the enum
func (Bucket) IsABucket ¶ added in v0.12.0
IsABucket returns "true" if the value is listed in the enum definition. "false" otherwise
type BufferBatch ¶ added in v0.14.4
type BufferBatch struct {
// contains filtered or unexported fields
}
TODO: DO NOT USE THIS! This is meant to be a temporary replacement for buffered transaction. After state refactor, we can remove this.
func NewBufferBatch ¶ added in v0.14.4
func NewBufferBatch(txn IndexedBatch) *BufferBatch
func (*BufferBatch) Delete ¶ added in v0.14.4
func (b *BufferBatch) Delete(key []byte) error
func (*BufferBatch) DeleteRange ¶ added in v0.14.4
func (b *BufferBatch) DeleteRange(start, end []byte) error
func (*BufferBatch) Flush ¶ added in v0.14.4
func (b *BufferBatch) Flush() error
func (*BufferBatch) Get ¶ added in v0.14.4
func (b *BufferBatch) Get(key []byte, cb func(value []byte) error) error
func (*BufferBatch) NewIterator ¶ added in v0.14.4
func (b *BufferBatch) NewIterator(prefix []byte, withUpperBound bool) (Iterator, error)
func (*BufferBatch) Put ¶ added in v0.14.4
func (b *BufferBatch) Put(key, val []byte) error
func (*BufferBatch) Reset ¶ added in v0.14.4
func (b *BufferBatch) Reset()
func (*BufferBatch) Size ¶ added in v0.14.4
func (b *BufferBatch) Size() int
func (*BufferBatch) Write ¶ added in v0.14.4
func (b *BufferBatch) Write() error
type EventListener ¶ added in v0.7.0
type Helper ¶ added in v0.14.4
type Helper interface { // This will create a read-write transaction, apply the callback to it, and flush the changes Update(func(IndexedBatch) error) error // This will create a read-only snapshot and apply the callback to it View(func(Snapshot) error) error // TODO(weiihann): honestly this doesn't make sense, but it's currently needed for the metrics // remove this once the metrics are refactored // Returns the underlying database Impl() any }
Helper interface
type IndexedBatch ¶ added in v0.14.4
type IndexedBatch interface { Batch KeyValueReader Iterable }
Same as Batch, but allows for reads from the batch and the disk. Use this only if you need to read from both the in-memory and on-disk data. Write operations will be slower compared to a regular Batch. Ideally, IndexedBatch should not be used at all. Write operations should be done using a regular Batch, and read operations should be done by accessing the database directly.
type IndexedBatcher ¶ added in v0.14.4
type IndexedBatcher interface { NewIndexedBatch() IndexedBatch NewIndexedBatchWithSize(size int) IndexedBatch }
Produce an IndexedBatch to write to the database and read from it.
type Iterable ¶ added in v0.14.4
type Iterable interface { // Returns an iterator over the database's key/value pairs NewIterator(prefix []byte, withUpperBound bool) (Iterator, error) }
Creates iterators over a database's key/value pairs
type Iterator ¶
type Iterator interface { io.Closer // Valid returns true if the iterator is positioned at a valid key/value pair. Valid() bool // First moves the iterator to the first key/value pair. First() bool // Prev moves the iterator to the previous key/value pair Prev() bool // Next moves the iterator to the next key/value pair. It returns whether the // iterator is valid after the call. Once invalid, the iterator remains // invalid. Next() bool // Key returns the key at the current position. Key() []byte // Value returns the value at the current position. Value() ([]byte, error) // Seek would seek to the provided key if present. If absent, it would seek to the next // key in lexicographical order Seek(key []byte) bool }
Provides functionality to iterate over a database's key/value pairs in ascending order. It must be closed after use. A single iterator cannot be used concurrently. Multiple iterators can be used concurrently.
type KeyValueRangeDeleter ¶ added in v0.14.4
type KeyValueRangeDeleter interface { // Deletes a range of keys from start (inclusive) to end (exclusive) DeleteRange(start, end []byte) error }
Exposes a range-deletion interface to the database
type KeyValueReader ¶ added in v0.14.4
type KeyValueReader interface { // Checks if a key exists in the data store Has(key []byte) (bool, error) // If a given key exists, the callback will be called with the value // Example: // // var value []byte // db.Get([]byte("key"), func(v []byte) error { // value = v // return nil // }) Get(key []byte, cb func(value []byte) error) error }
Exposes a read-only interface to the database
type KeyValueStore ¶ added in v0.14.4
type KeyValueStore interface { KeyValueReader KeyValueWriter KeyValueRangeDeleter Batcher IndexedBatcher Snapshotter Iterable Helper Listener io.Closer }
Represents a key-value data store that can handle different operations
type KeyValueWriter ¶ added in v0.14.4
type KeyValueWriter interface { // Inserts a given value into the data store Put(key []byte, value []byte) error // Deletes a given key from the data store Delete(key []byte) error }
Exposes a write-only interface to the database
type Listener ¶ added in v0.14.4
type Listener interface {
WithListener(listener EventListener) KeyValueStore
}
type SelectiveListener ¶ added in v0.7.0
type SelectiveListener struct { OnIOCb func(write bool, duration time.Duration) OnCommitCb func(duration time.Duration) }
func (*SelectiveListener) OnCommit ¶ added in v0.7.4
func (l *SelectiveListener) OnCommit(duration time.Duration)
type Snapshot ¶ added in v0.14.4
type Snapshot interface { KeyValueReader Iterable Close() error }
Represents a read-only view of the database at a specific point in time. If you don't need to read at a specific time, use the database directly.
type Snapshotter ¶ added in v0.14.4
type Snapshotter interface {
NewSnapshot() Snapshot
}
Produces a read-only snapshot of the database
type SyncBatch ¶ added in v0.14.4
type SyncBatch struct {
// contains filtered or unexported fields
}
A wrapper around IndexedBatch that allows for thread-safe operations. Ideally, you shouldn't have to use this at all. If you need to write to batches concurrently, it's better to create a single batch for each goroutine and then merge them afterwards.
func NewSyncBatch ¶ added in v0.14.4
func NewSyncBatch(batch IndexedBatch) *SyncBatch