Documentation ¶
Index ¶
- Variables
- func AssertTrue(b bool)
- func AssertTruef(b bool, format string, args ...interface{})
- func BloomBitsPerKey(numEntries int, fp float64) int
- func BytesToU16(b []byte) uint16
- func BytesToU32(b []byte) uint32
- func BytesToU32Slice(b []byte) []uint32
- func BytesToU64(b []byte) uint64
- func BytesToU64Slice(b []byte) []uint64
- func CalculateChecksum(data []byte, ct pb.Checksum_Algorithm) uint64
- func Check(err error)
- func Check2(_ interface{}, err error)
- func CompareKeys(key1, key2 []byte) int
- func Copy(a []byte) []byte
- func CreateSyncedFile(filename string, sync bool) (*os.File, error)
- func FixedDuration(d time.Duration) string
- func GenerateIV() ([]byte, error)
- func Hash(b []byte) uint32
- func IBytesToString(size uint64, precision int) string
- func KeyWithTs(key []byte, ts uint64) []byte
- func LSMSizeGet(enabled bool, key string) expvar.Var
- func LSMSizeSet(enabled bool, key string, val expvar.Var)
- func NewKV(alloc *z.Allocator) *pb.KV
- func NumBlockedPutsAdd(enabled bool, val int64)
- func NumBytesReadAdd(enabled bool, val int64)
- func NumBytesWrittenAdd(enabled bool, val int64)
- func NumCompactionTablesAdd(enabled bool, val int64)
- func NumGetsAdd(enabled bool, val int64)
- func NumLSMBloomHitsAdd(enabled bool, key string, val int64)
- func NumLSMGetsAdd(enabled bool, key string, val int64)
- func NumMemtableGetsAdd(enabled bool, val int64)
- func NumPutsAdd(enabled bool, val int64)
- func NumReadsAdd(enabled bool, val int64)
- func NumWritesAdd(enabled bool, val int64)
- func OpenExistingFile(filename string, flags Flags) (*os.File, error)
- func OpenSyncedFile(filename string, sync bool) (*os.File, error)
- func OpenTruncFile(filename string, sync bool) (*os.File, error)
- func ParseKey(key []byte) []byte
- func ParseTs(key []byte) uint64
- func PendingWritesSet(enabled bool, key string, val expvar.Var)
- func SafeCopy(a, src []byte) []byte
- func SameKey(src, dst []byte) bool
- func SetKeyTs(key []byte, ts uint64)
- func U16ToBytes(v uint16) []byte
- func U32SliceToBytes(u32s []uint32) []byte
- func U32ToBytes(v uint32) []byte
- func U64SliceToBytes(u64s []uint64) []byte
- func U64ToBytes(v uint64) []byte
- func VerifyChecksum(data []byte, expected *pb.Checksum) error
- func VlogSizeGet(enabled bool, key string) expvar.Var
- func VlogSizeSet(enabled bool, key string, val expvar.Var)
- func Wrap(err error, msg string) error
- func Wrapf(err error, format string, args ...interface{}) error
- func XORBlock(dst, src, key, iv []byte) error
- func XORBlockAllocate(src, key, iv []byte) ([]byte, error)
- func XORBlockStream(w io.Writer, src, key, iv []byte) error
- func ZSTDCompress(dst, src []byte, compressionLevel int) ([]byte, error)
- func ZSTDCompressBound(srcSize int) int
- func ZSTDDecompress(dst, src []byte) ([]byte, error)
- type Filter
- type Flags
- type Iterator
- type PageBuffer
- func (b *PageBuffer) Bytes() []byte
- func (b *PageBuffer) Len() int
- func (b *PageBuffer) NewReaderAt(offset int) *PageBufferReader
- func (b *PageBuffer) Truncate(n int)
- func (b *PageBuffer) Write(data []byte) (int, error)
- func (b *PageBuffer) WriteByte(data byte) error
- func (b *PageBuffer) WriteTo(w io.Writer) (int64, error)
- type PageBufferReader
- type RateMonitor
- type Slice
- type Throttle
- type ValueStruct
- type WaterMark
- func (w *WaterMark) Begin(index uint64)
- func (w *WaterMark) BeginMany(indices []uint64)
- func (w *WaterMark) Done(index uint64)
- func (w *WaterMark) DoneMany(indices []uint64)
- func (w *WaterMark) DoneUntil() uint64
- func (w *WaterMark) Init(closer *z.Closer)
- func (w *WaterMark) LastIndex() uint64
- func (w *WaterMark) SetDoneUntil(val uint64)
- func (w *WaterMark) WaitForMark(ctx context.Context, index uint64) error
Constants ¶
This section is empty.
Variables ¶
var ( // ErrEOF indicates an end of file when trying to read from a memory mapped file // and encountering the end of slice. ErrEOF = errors.New("ErrEOF: End of file") // ErrCommitAfterFinish indicates that write batch commit was called after // finish ErrCommitAfterFinish = errors.New("Batch commit not permitted after finish") )
var ( // CastagnoliCrcTable is a CRC32 polynomial table CastagnoliCrcTable = crc32.MakeTable(crc32.Castagnoli) )
var ErrChecksumMismatch = errors.New("checksum mismatch")
ErrChecksumMismatch is returned at checksum mismatch.
var (
NoEventLog trace.EventLog = nilEventLog{}
)
Functions ¶
func AssertTrue ¶
func AssertTrue(b bool)
AssertTrue asserts that b is true. Otherwise, it would log fatal.
func AssertTruef ¶
AssertTruef is AssertTrue with extra info.
func BloomBitsPerKey ¶
BloomBitsPerKey returns the bits per key required by bloomfilter based on the false positive rate.
func BytesToU16 ¶
BytesToU16 converts the given byte slice to uint16
func BytesToU32 ¶
BytesToU32 converts the given byte slice to uint32
func BytesToU32Slice ¶
BytesToU32Slice converts the given byte slice to uint32 slice
func BytesToU64 ¶
BytesToU64 converts the given byte slice to uint64
func BytesToU64Slice ¶
BytesToU64Slice converts the given byte slice to uint64 slice
func CalculateChecksum ¶
func CalculateChecksum(data []byte, ct pb.Checksum_Algorithm) uint64
CalculateChecksum calculates checksum for data using ct checksum type.
func Check2 ¶
func Check2(_ interface{}, err error)
Check2 acts as convenience wrapper around Check, using the 2nd argument as error.
func CompareKeys ¶
CompareKeys checks the key without timestamp and checks the timestamp if keyNoTs is same. a<timestamp> would be sorted higher than aa<timestamp> if we use bytes.compare All keys should have timestamp.
func CreateSyncedFile ¶
CreateSyncedFile creates a new file (using O_EXCL), errors if it already existed.
func FixedDuration ¶
FixedDuration returns a string representation of the given duration with the hours, minutes, and seconds.
func IBytesToString ¶
IBytesToString converts size in bytes to human readable format. The code is taken from humanize library and changed to provide value upto custom decimal precision. IBytesToString(12312412, 1) -> 11.7 MiB
func NumBlockedPutsAdd ¶
func NumBytesReadAdd ¶
func NumBytesWrittenAdd ¶
func NumCompactionTablesAdd ¶
func NumGetsAdd ¶
func NumLSMBloomHitsAdd ¶
func NumLSMGetsAdd ¶
func NumMemtableGetsAdd ¶
func NumPutsAdd ¶
func NumReadsAdd ¶
func NumWritesAdd ¶
func OpenExistingFile ¶
OpenExistingFile opens an existing file, errors if it doesn't exist.
func OpenSyncedFile ¶
OpenSyncedFile creates the file if one doesn't exist.
func OpenTruncFile ¶
OpenTruncFile opens the file with O_RDWR | O_CREATE | O_TRUNC
func U32SliceToBytes ¶
U32SliceToBytes converts the given Uint32 slice to byte slice
func U64SliceToBytes ¶
U64SliceToBytes converts the given Uint64 slice to byte slice
func VerifyChecksum ¶
VerifyChecksum validates the checksum for the data against the given expected checksum.
func XORBlock ¶
XORBlock encrypts the given data with AES and XOR's with IV. Can be used for both encryption and decryption. IV is of AES block size.
func XORBlockAllocate ¶
func ZSTDCompress ¶
ZSTDCompress compresses a block using ZSTD algorithm.
func ZSTDCompressBound ¶
ZSTDCompressBound returns the worst case size needed for a destination buffer. Klauspost ZSTD library does not provide any API for Compression Bound. This calculation is based on the DataDog ZSTD library. See https://pkg.go.dev/github.com/DataDog/zstd#CompressBound
func ZSTDDecompress ¶
ZSTDDecompress decompresses a block using ZSTD algorithm.
Types ¶
type Filter ¶
type Filter []byte
Filter is an encoded set of []byte keys.
func NewFilter ¶
NewFilter returns a new Bloom filter that encodes a set of []byte keys with the given number of bits per key, approximately.
A good bitsPerKey value is 10, which yields a filter with ~ 1% false positive rate.
func (Filter) MayContain ¶
MayContain returns whether the filter may contain given key. False positives are possible, where it returns true for keys not in the original set.
func (Filter) MayContainKey ¶
type Iterator ¶
type Iterator interface { Next() Rewind() Seek(key []byte) Key() []byte Value() ValueStruct Valid() bool // All iterators should be closed so that file garbage collection works. Close() error }
Iterator is an interface for a basic iterator.
type PageBuffer ¶
type PageBuffer struct {
// contains filtered or unexported fields
}
PageBuffer consists of many pages. A page is a wrapper over []byte. PageBuffer can act as a replacement of bytes.Buffer. Instead of having single underlying buffer, it has multiple underlying buffers. Hence it avoids any copy during relocation(as happens in bytes.Buffer). PageBuffer allocates memory in pages. Once a page is full, it will allocate page with double the size of previous page. Its function are not thread safe.
func NewPageBuffer ¶
func NewPageBuffer(pageSize int) *PageBuffer
NewPageBuffer returns a new PageBuffer with first page having size pageSize.
func (*PageBuffer) Bytes ¶
func (b *PageBuffer) Bytes() []byte
Bytes returns whole Buffer data as single []byte.
func (*PageBuffer) NewReaderAt ¶
func (b *PageBuffer) NewReaderAt(offset int) *PageBufferReader
NewReaderAt returns a reader which starts reading from offset in page buffer.
func (*PageBuffer) Truncate ¶
func (b *PageBuffer) Truncate(n int)
Truncate truncates PageBuffer to length n.
func (*PageBuffer) Write ¶
func (b *PageBuffer) Write(data []byte) (int, error)
Write writes data to PageBuffer b. It returns number of bytes written and any error encountered.
func (*PageBuffer) WriteByte ¶
func (b *PageBuffer) WriteByte(data byte) error
WriteByte writes data byte to PageBuffer and returns any encountered error.
type PageBufferReader ¶
type PageBufferReader struct {
// contains filtered or unexported fields
}
PageBufferReader is a reader for PageBuffer.
type RateMonitor ¶
type RateMonitor struct {
// contains filtered or unexported fields
}
func NewRateMonitor ¶
func NewRateMonitor(numSamples int) *RateMonitor
func (*RateMonitor) Capture ¶
func (rm *RateMonitor) Capture(sent uint64)
Capture captures the current number of sent bytes. This number should be monotonically increasing.
func (*RateMonitor) Rate ¶
func (rm *RateMonitor) Rate() uint64
Rate returns the average rate of transmission smoothed out by the number of samples.
type Slice ¶
type Slice struct {
// contains filtered or unexported fields
}
Slice holds a reusable buf, will reallocate if you request a larger size than ever before. One problem is with n distinct sizes in random order it'll reallocate log(n) times.
type Throttle ¶
type Throttle struct {
// contains filtered or unexported fields
}
Throttle allows a limited number of workers to run at a time. It also provides a mechanism to check for errors encountered by workers and wait for them to finish.
func NewThrottle ¶
NewThrottle creates a new throttle with a max number of workers.
func (*Throttle) Do ¶
Do should be called by workers before they start working. It blocks if there are already maximum number of workers working. If it detects an error from previously Done workers, it would return it.
func (*Throttle) Done ¶
Done should be called by workers when they finish working. They can also pass the error status of work done.
type ValueStruct ¶
type ValueStruct struct { Meta byte UserMeta byte ExpiresAt uint64 Value []byte Version uint64 // This field is not serialized. Only for internal usage. }
ValueStruct represents the value info that can be associated with a key, but also the internal Meta field.
func (*ValueStruct) Decode ¶
func (v *ValueStruct) Decode(b []byte)
Decode uses the length of the slice to infer the length of the Value field.
func (*ValueStruct) Encode ¶
func (v *ValueStruct) Encode(b []byte) uint32
Encode expects a slice of length at least v.EncodedSize().
func (*ValueStruct) EncodeTo ¶
func (v *ValueStruct) EncodeTo(buf *bytes.Buffer)
EncodeTo should be kept in sync with the Encode function above. The reason this function exists is to avoid creating byte arrays per key-value pair in table/builder.go.
func (*ValueStruct) EncodedSize ¶
func (v *ValueStruct) EncodedSize() uint32
EncodedSize is the size of the ValueStruct when encoded
type WaterMark ¶
type WaterMark struct { Name string // contains filtered or unexported fields }
WaterMark is used to keep track of the minimum un-finished index. Typically, an index k becomes finished or "done" according to a WaterMark once Done(k) has been called
- as many times as Begin(k) has, AND
- a positive number of times.
An index may also become "done" by calling SetDoneUntil at a time such that it is not inter-mingled with Begin/Done calls.
Since doneUntil and lastIndex addresses are passed to sync/atomic packages, we ensure that they are 64-bit aligned by putting them at the beginning of the structure.
func (*WaterMark) DoneUntil ¶
DoneUntil returns the maximum index that has the property that all indices less than or equal to it are done.
func (*WaterMark) SetDoneUntil ¶
SetDoneUntil sets the maximum index that has the property that all indices less than or equal to it are done.