Documentation
¶
Index ¶
- Variables
- func BitLockUint32(addr *uint32, mask uint32)
- func BitLockUint64(addr *uint64, mask uint64)
- func BitLockUintptr(addr *uintptr, mask uintptr)
- func BitTryLockUint32(addr *uint32, mask uint32) bool
- func BitTryLockUint64(addr *uint64, mask uint64) bool
- func BitTryLockUintptr(addr *uintptr, mask uintptr) bool
- func BitUnlockUint32(addr *uint32, mask uint32)
- func BitUnlockUint64(addr *uint64, mask uint64)
- func BitUnlockUintptr(addr *uintptr, mask uintptr)
- func BitUnlockWithStoreUint32(addr *uint32, mask uint32, value uint32)
- func BitUnlockWithStoreUint64(addr *uint64, mask uint64, value uint64)
- func BitUnlockWithStoreUintptr(addr *uintptr, mask uintptr, value uintptr)
- func Do(ctx context.Context, fn func() error) error
- func DoTimeout(timeout time.Duration, fn func() error) error
- func Parallel(ctx context.Context, n int, action func(context.Context, int) error) error
- func Repeat(ctx context.Context, interval time.Duration, ...) error
- func SeqLockRead[T any](sl *SeqLock, slot *SeqLockSlot[T]) (v T)
- func SeqLockRead32[T any](sl *SeqLock32, slot *SeqLockSlot[T]) (v T)
- func SeqLockWrite[T any](sl *SeqLock, slot *SeqLockSlot[T], v T)
- func SeqLockWrite32[T any](sl *SeqLock32, slot *SeqLockSlot[T], v T)
- func SeqLockWriteLocked[T any](sl *SeqLock, slot *SeqLockSlot[T], v T)
- func SeqLockWriteLocked32[T any](sl *SeqLock32, slot *SeqLockSlot[T], v T)
- func Wait(ctx context.Context, fn func()) error
- func WaitTimeout(timeout time.Duration, fn func()) error
- func WithAutoShrink() func(*MapConfig)
- func WithBuiltInHasher[T comparable]() func(*MapConfig)
- func WithCapacity(cap int) func(*MapConfig)
- func WithKeyHasher[K comparable](keyHash func(key K, seed uintptr) uintptr, intKey ...bool) func(*MapConfig)
- func WithKeyHasherUnsafe(hs HashFunc, intKey ...bool) func(*MapConfig)
- func WithValueEqual[V any](valEqual func(val, val2 V) bool) func(*MapConfig)
- func WithValueEqualUnsafe(eq EqualFunc) func(*MapConfig)
- type Barter
- type Epoch
- type EqualFunc
- type FairSemaphore
- type FlatMap
- func (m *FlatMap[K, V]) All() func(yield func(K, V) bool)
- func (m *FlatMap[K, V]) Clear()
- func (m *FlatMap[K, V]) Compute(key K, fn func(e *MapEntry[K, V])) (actual V, loaded bool)
- func (m *FlatMap[K, V]) ComputeRange(fn func(e *MapEntry[K, V]) bool, blockWriters ...bool)
- func (m *FlatMap[K, V]) Delete(key K)
- func (m *FlatMap[K, V]) Entries(blockWriters ...bool) func(yield func(e *MapEntry[K, V]) bool)
- func (m *FlatMap[K, V]) Load(key K) (value V, ok bool)
- func (m *FlatMap[K, V]) LoadAndDelete(key K) (previous V, loaded bool)
- func (m *FlatMap[K, V]) LoadAndUpdate(key K, value V) (previous V, loaded bool)
- func (m *FlatMap[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool)
- func (m *FlatMap[K, V]) LoadOrStoreFn(key K, valueFn func() V) (actual V, loaded bool)
- func (m *FlatMap[K, V]) Range(yield func(K, V) bool)
- func (m *FlatMap[K, V]) Rebuild(fn func(m *MapRebuild[K, V]), blockWriters ...bool)
- func (m *FlatMap[K, V]) Size() int
- func (m *FlatMap[K, V]) Store(key K, value V)
- func (m *FlatMap[K, V]) Swap(key K, value V) (previous V, loaded bool)
- func (m *FlatMap[K, V]) ToMap(limit ...int) map[K]V
- type Gate
- type HashFunc
- type IEqualFunc
- type IHashFunc
- type IIntKey
- type Latch
- type Map
- func (m *Map[K, V]) All() func(yield func(K, V) bool)
- func (m *Map[K, V]) Clear()
- func (m *Map[K, V]) CloneTo(clone *Map[K, V])
- func (m *Map[K, V]) CompareAndDelete(key K, old V) (deleted bool)
- func (m *Map[K, V]) CompareAndSwap(key K, old V, new V) (swapped bool)
- func (m *Map[K, V]) Compute(key K, fn func(e *MapEntry[K, V])) (actual V, loaded bool)
- func (m *Map[K, V]) ComputeRange(fn func(e *MapEntry[K, V]) bool, blockWriters ...bool)
- func (m *Map[K, V]) Delete(key K)
- func (m *Map[K, V]) Entries(blockWriters ...bool) func(yield func(e *MapEntry[K, V]) bool)
- func (m *Map[K, V]) Grow(sizeAdd int)
- func (m *Map[K, V]) Load(key K) (value V, ok bool)
- func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool)
- func (m *Map[K, V]) LoadAndUpdate(key K, value V) (previous V, loaded bool)
- func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool)
- func (m *Map[K, V]) LoadOrStoreFn(key K, newValueFn func() V) (actual V, loaded bool)
- func (m *Map[K, V]) Range(yield func(key K, value V) bool)
- func (m *Map[K, V]) Rebuild(fn func(m *MapRebuild[K, V]), blockWriters ...bool)
- func (m *Map[K, V]) Shrink()
- func (m *Map[K, V]) Size() int
- func (m *Map[K, V]) Store(key K, value V)
- func (m *Map[K, V]) Swap(key K, value V) (previous V, loaded bool)
- func (m *Map[K, V]) ToMap(limit ...int) map[K]V
- type MapConfig
- type MapEntry
- type MapRebuild
- func (m *MapRebuild[K, V]) All() func(yield func(K, V) bool)
- func (m *MapRebuild[K, V]) Compute(key K, fn func(e *MapEntry[K, V])) (actual V, loaded bool)
- func (m *MapRebuild[K, V]) Delete(key K)
- func (m *MapRebuild[K, V]) Load(key K) (value V, ok bool)
- func (m *MapRebuild[K, V]) LoadAndDelete(key K) (previous V, loaded bool)
- func (m *MapRebuild[K, V]) LoadAndUpdate(key K, value V) (previous V, loaded bool)
- func (m *MapRebuild[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool)
- func (m *MapRebuild[K, V]) LoadOrStoreFn(key K, valueFn func() V) (actual V, loaded bool)
- func (m *MapRebuild[K, V]) Range(yield func(key K, value V) bool)
- func (m *MapRebuild[K, V]) Size() int
- func (m *MapRebuild[K, V]) Store(key K, value V)
- func (m *MapRebuild[K, V]) Swap(key K, value V) (previous V, loaded bool)
- func (m *MapRebuild[K, V]) ToMap(limit ...int) map[K]V
- type OnceGroup
- type OnceGroupResult
- type PLocal
- type PLocalCounter
- type PLocalCounter64
- type Phaser
- type RWLock
- type RWLock32
- type RWLockGroup
- type Rally
- type SeqLock
- func (l *SeqLock) BeginRead() (s1 uintptr, ok bool)
- func (l *SeqLock) BeginWrite() (s1 uintptr, ok bool)
- func (l *SeqLock) BeginWriteLocked()
- func (l *SeqLock) ClearLocked()
- func (l *SeqLock) EndRead(s1 uintptr) (ok bool)
- func (l *SeqLock) EndWrite(s1 uintptr)
- func (l *SeqLock) EndWriteLocked()
- func (l *SeqLock) Ready() bool
- type SeqLock32
- func (l *SeqLock32) BeginRead() (s1 uint32, ok bool)
- func (l *SeqLock32) BeginWrite() (s1 uint32, ok bool)
- func (l *SeqLock32) BeginWriteLocked()
- func (l *SeqLock32) ClearLocked()
- func (l *SeqLock32) EndRead(s1 uint32) (ok bool)
- func (l *SeqLock32) EndWrite(s1 uint32)
- func (l *SeqLock32) EndWriteLocked()
- func (l *SeqLock32) Ready() bool
- type SeqLockSlot
- type TicketLock
- type TicketLockGroup
- type WaitGroup
- type WorkerPool
Constants ¶
This section is empty.
Variables ¶
var ErrPoolClosed = errors.New("cc: pool closed")
Functions ¶
func BitLockUint32 ¶
BitLockUint32 acquires a bit-lock on the given address using the specified bit mask.
func BitLockUint64 ¶
BitLockUint64 acquires a bit-lock on the given address using the specified bit mask. It assumes the lock is held if (value & mask) != 0. It spins until the lock can be acquired.
This allows embedding a lock bit into an existing uint64 field (e.g., metadata) to save memory and avoid false sharing.
func BitLockUintptr ¶
BitLockUintptr acquires a bit-lock on the given address using the specified bit mask.
func BitTryLockUint32 ¶ added in v1.0.8
BitTryLockUint32 attempts to acquire a bit-lock without blocking. Returns true if the lock was acquired, false otherwise.
func BitTryLockUint64 ¶ added in v1.0.8
BitTryLockUint64 attempts to acquire a bit-lock without blocking. Returns true if the lock was acquired, false otherwise.
func BitTryLockUintptr ¶ added in v1.0.8
BitTryLockUintptr attempts to acquire a bit-lock without blocking. Returns true if the lock was acquired, false otherwise.
func BitUnlockUint32 ¶
BitUnlockUint32 releases the bit-lock by clearing the specified bit mask.
func BitUnlockUint64 ¶
BitUnlockUint64 releases the bit-lock by clearing the specified bit mask. It preserves other bits in the value.
func BitUnlockUintptr ¶
BitUnlockUintptr releases the bit-lock by clearing the specified bit mask.
func BitUnlockWithStoreUint32 ¶
BitUnlockWithStoreUint32 releases the bit-lock and updates the value.
func BitUnlockWithStoreUint64 ¶
BitUnlockWithStoreUint64 releases the bit-lock and simultaneously updates the value. It sets the value to (value &^ mask), effectively clearing the lock bit while storing new data in the other bits.
func BitUnlockWithStoreUintptr ¶
BitUnlockWithStoreUintptr releases the bit-lock and updates the value.
func Do ¶ added in v1.0.2
Do execute fn and waits for it to return or for the context to be canceled. It returns fn's error if it completes, or ctx.Err() if canceled.
Usage:
err := cc.Do(ctx, func() error {
return expensiveOp()
})
func DoTimeout ¶ added in v1.0.2
DoTimeout executes fn and waits for it to return or for the timeout to elapse. It returns fn's error if it completes, or context.DeadlineExceeded if timed out.
func Parallel ¶ added in v1.0.2
Parallel executes n copies of the action concurrently. It blocks until all actions complete or the context is canceled. Returns the first error encountered, if any.
If n <= 0, it defaults to GOMAXPROCS(0).
Usage:
cc.Parallel(ctx, 10, func(ctx context.Context, i int) error {
// process item i
return nil
})
func Repeat ¶ added in v1.0.2
Repeat executes the action periodically with the given interval until the context is canceled. It stops immediately if the action returns an error. The first execution happens immediately.
Usage:
cc.Repeat(ctx, time.Second, func(ctx context.Context) error {
// do something periodically
return nil
})
func SeqLockRead ¶
func SeqLockRead[T any](sl *SeqLock, slot *SeqLockSlot[T]) (v T)
SeqLockRead atomically loads a tear-free snapshot using the external SeqLock. Spins until seq is even and unchanged across two reads; copies the value within the stable window.
func SeqLockRead32 ¶
func SeqLockRead32[T any](sl *SeqLock32, slot *SeqLockSlot[T]) (v T)
SeqLockRead32 atomically loads a tear-free snapshot using the external SeqLock. Spins until seq is even and unchanged across two reads; copies the value within the stable window.
func SeqLockWrite ¶
func SeqLockWrite[T any](sl *SeqLock, slot *SeqLockSlot[T], v T)
SeqLockWrite publishes v guarded by the external SeqLock. Enters odd, copies v, then exits to even to publish a stable snapshot.
func SeqLockWrite32 ¶
func SeqLockWrite32[T any](sl *SeqLock32, slot *SeqLockSlot[T], v T)
SeqLockWrite32 publishes v guarded by the external SeqLock. Enters odd, copies v, then exits to even to publish a stable snapshot.
func SeqLockWriteLocked ¶
func SeqLockWriteLocked[T any](sl *SeqLock, slot *SeqLockSlot[T], v T)
SeqLockWriteLocked publishes v assuming an external lock is held.
func SeqLockWriteLocked32 ¶
func SeqLockWriteLocked32[T any](sl *SeqLock32, slot *SeqLockSlot[T], v T)
SeqLockWriteLocked32 publishes v assuming an external lock is held.
func Wait ¶ added in v1.0.2
Wait executes fn and waits for it to return or for the context to be canceled. It is useful for making blocking calls (that don't support Context) cancellable.
Usage:
cc.Wait(ctx, wg.Wait)
func WaitTimeout ¶ added in v1.0.2
WaitTimeout executes fn and waits for it to return or for the timeout to elapse. It returns context.DeadlineExceeded if the timeout occurs.
Usage:
if err := cc.WaitTimeout(time.Second, wg.Wait); err != nil {
// timed out
}
func WithAutoShrink ¶
func WithAutoShrink() func(*MapConfig)
WithAutoShrink configures automatic map shrinking when the load factor falls below the threshold (default: 1/shrinkFraction). Disabled by default to prioritize performance.
func WithBuiltInHasher ¶
func WithBuiltInHasher[T comparable]() func(*MapConfig)
WithBuiltInHasher returns a MapConfig option that explicitly sets the built-in hash function for the specified type.
This option is useful when you want to explicitly use Go's built-in hasher instead of any optimized variants. It ensures that the map uses the same hashing strategy as Go's native map implementation.
Performance characteristics: - Provides consistent performance across all key sizes - Uses Go's optimized internal hash functions - Guaranteed compatibility with future Go versions - May be slower than specialized hashers for specific use cases
Usage:
m := NewMap[string, int](WithBuiltInHasher[string]())
func WithCapacity ¶
WithCapacity configuring new Map instance with capacity enough to hold cap entries. The capacity is treated as the minimal capacity, meaning that the underlying hash table will never shrink to a smaller capacity. If cap is zero or negative, the value is ignored.
func WithKeyHasher ¶
func WithKeyHasher[K comparable]( keyHash func(key K, seed uintptr) uintptr, intKey ...bool, ) func(*MapConfig)
WithKeyHasher sets a custom key hashing function for the map. This allows you to optimize hash distribution for specific key types or implement custom hashing strategies.
Parameters:
- keyHash: custom hash function that takes a key and seed, returns hash value. Pass nil to use the default built-in hasher
- intKey: optional hash distribution optimization strategies. true: use linear distribution (optimal for sequential keys) false or omitted: auto-detection is used if this parameter is omitted.
Usage:
// Basic custom hasher m := NewMap[string, int](WithKeyHasher(myCustomHashFunc)) // Custom hasher with linear distribution for sequential keys m := NewMap[int, string](WithKeyHasher(myIntHasher, true)) // Custom hasher with shift distribution for random keys m := NewMap[string, int](WithKeyHasher(myStringHasher, false))
Use cases:
- Optimize hash distribution for specific data patterns
- Implement case-insensitive string hashing
- Custom hashing for complex key types
- Performance tuning for known key distributions
- Combine with distribution strategies for optimal performance
func WithKeyHasherUnsafe ¶
WithKeyHasherUnsafe sets a low-level unsafe key hashing function. This is the high-performance version that operates directly on memory pointers. Use this when you need maximum performance and are comfortable with unsafe operations.
Parameters:
- hs: unsafe hash function that operates on raw unsafe.Pointer. The pointer points to the key data in memory. Pass nil to use the default built-in hasher
- intKey: optional hash distribution optimization strategies. true: use linear distribution (optimal for sequential keys) false or omitted: auto-detection is used if this parameter is omitted.
Usage:
// Basic unsafe hasher
unsafeHasher := func(ptr unsafe.Pointer, seed uintptr) uintptr {
// Cast ptr to your key type and implement hashing
key := *(*string)(ptr)
return uintptr(len(key)) // example hash
}
m := NewMap[string, int](WithKeyHasherUnsafe(unsafeHasher))
// Unsafe hasher with specific distribution strategy
m := NewMap[int, string](WithKeyHasherUnsafe(fastIntHasher,true))
Notes:
- You must correctly cast unsafe.Pointer to the actual key type
- Incorrect pointer operations will cause crashes or memory corruption
- Only use if you understand Go's unsafe package
- Distribution strategies still apply to the hash output
func WithValueEqual ¶
WithValueEqual sets a custom value equality function for the map. This is essential for CompareAndSwap and CompareAndDelete operations when working with non-comparable value types or custom equality logic.
Parameters:
- valEqual: custom equality function that compares two values Pass nil to use the default built-in comparison (for comparable types)
Usage:
// For custom structs with specific equality logic
EqualFunc := func(a, b MyStruct) bool {
return a.ID == b.ID && a.Name == b.Name
}
m := NewMap[string, MyStruct](WithValueEqual(EqualFunc))
Use cases:
- Custom equality for structs (compare specific fields)
- Case-insensitive string comparison
- Floating-point comparison with tolerance
- Deep equality for slices/maps
- Required for non-comparable types (slices, maps, functions)
func WithValueEqualUnsafe ¶
WithValueEqualUnsafe sets a low-level unsafe value equality function. This is the high-performance version that operates directly on memory pointers. Use this when you need maximum performance and are comfortable with unsafe operations.
Parameters:
- eq: unsafe equality function that operates on raw unsafe.Pointer Both pointers point to value data in memory Pass nil to use the default built-in comparison
Usage:
unsafeEqual := func(ptr, other unsafe.Pointer) bool {
// Cast pointers to your value type and implement comparison
val1 := *(*MyStruct)(ptr)
val2 := *(*MyStruct)(other)
return val1.ID == val2.ID // example comparison
}
m := NewMap[string, MyStruct](WithValueEqualUnsafe(unsafeEqual))
Notes:
- You must correctly cast unsafe.Pointer to the actual value type
- Both pointers must point to valid memory of the same type
- Incorrect pointer operations will cause crashes or memory corruption
- Only use if you understand Go's unsafe package
Types ¶
type Barter ¶
type Barter[T any] struct { // contains filtered or unexported fields }
Barter (Exchanger) is a synchronization point where two goroutines swap values.
The first goroutine arriving at the exchange point waits for the second. When the second arrives, they exchange values and continue.
Types:
- T: The type of value being exchanged.
Usage:
b := NewBarter[string]()
// G1
v := b.Exchange("from G1")
// G2
v := b.Exchange("from G2")
Implementation: Uses a "Slot" pointer.
- nil: Empty.
- Non-nil: A waiter is present with their value.
type Epoch ¶
type Epoch struct {
// contains filtered or unexported fields
}
Epoch represents a monotonically increasing counter that supports "wait for target" semantics. It is effective for coordinating phases, versions, or milestones.
Features:
- Add(n): Advances the epoch by n.
- WaitAtLeast(n): Blocks until the epoch reaches at least n.
This primitive avoids the "Thundering Herd" problem common in condition variables by managing an ordered list of waiters and only waking those whose requirements are met.
Example:
e := Epoch{}
go func() { e.WaitAtLeast(5); print("Reached 5!") }()
e.Add(5) // Wakes the waiter
Limitations: - Max value: 2^64 - 1. Panics on overflow.
func (*Epoch) WaitAtLeast ¶
WaitAtLeast blocks until the epoch reaches at least the target value.
type FairSemaphore ¶
type FairSemaphore struct {
// contains filtered or unexported fields
}
FairSemaphore is a counting semaphore that guarantees FIFO (First-In-First-Out) order.
Standard Semaphores (like golang.org/x/sync/semaphore) generally optimize for throughput and may allow barging (new waiters stealing permits), which can lead to starvation or unfairness in specific workloads.
FairSemaphore ensures that permits are strictly assigned to waiters in the order of arrival.
Implementation: It uses a mutex-protected linked list of waiters and a TicketLock for the mutex itself to ensure even the internal lock acquisition is fair.
Limitations: - Max permits: 2^63 - 1. Panics on overflow.
func NewFairSemaphore ¶
func NewFairSemaphore(permits int64) *FairSemaphore
NewFairSemaphore creates a FairSemaphore with the given number of permits.
func (*FairSemaphore) Acquire ¶
func (s *FairSemaphore) Acquire(n int64)
Acquire blocks until n permits are available, then takes them.
func (*FairSemaphore) Release ¶
func (s *FairSemaphore) Release(n int64)
Release returns n permits and wakes waiting goroutines.
func (*FairSemaphore) TryAcquire ¶
func (s *FairSemaphore) TryAcquire(n int64) bool
TryAcquire attempts to acquire n permits without blocking.
type FlatMap ¶
type FlatMap[K comparable, V any] struct { // contains filtered or unexported fields }
FlatMap implements a flat hash map using seqlock. Table and key/value pairs are stored inline (flat). Value size is not limited by the CPU word size. Readers use per-bucket seqlock: even sequence means stable; writers flip the bucket sequence to odd during mutation, then even again.
Concurrency model:
- Readers: read s1=seq (must be even), then meta/entries, then s2=seq; if s1!=s2 or s1 is odd, retry the bucket.
- Writers: take the root-bucket lock (opLock in meta), then on the target bucket: seq++, apply changes, seq++, finally release the root lock.
- Resize: copy under the root-bucket lock using the same discipline.
Notes:
- Reuses Map constants and compile-time bucket sizing (entriesPerBucket).
- Buckets are packed without padding for cache-friendly layout.
func NewFlatMap ¶
func NewFlatMap[K comparable, V any]( options ...func(*MapConfig), ) *FlatMap[K, V]
NewFlatMap creates a new seqlock-based flat hash map.
Highlights:
- Optimistic reads via per-bucket seqlock; brief spinning under contention.
- Writes coordinate via a lightweight root-bucket lock and per-bucket seqlock fencing.
- Parallel resize (grow/shrink) with cooperative copying by readers and writers.
Configuration options (aligned with Map):
- WithCapacity(cap): pre-allocate capacity to reduce early resizes.
- WithAutoShrink(): enable automatic shrinking when load drops.
- WithKeyHasher / WithKeyHasherUnsafe / WithBuiltInHasher: custom or built-in hashing.
Example:
m := NewFlatMap[string,int](WithCapacity(1024), WithAutoShrink())
m.Store("a", 1)
v, ok := m.Load("a")
func (*FlatMap[K, V]) All ¶
All returns an iterator function for use with range-over-func. It provides the same functionality as Range but in iterator form.
func (*FlatMap[K, V]) Clear ¶
func (m *FlatMap[K, V]) Clear()
Clear clears all key-value pairs from the map.
func (*FlatMap[K, V]) Compute ¶
Compute performs a compute-style, atomic update for the given key.
Concurrency model:
- Acquires the root-bucket lock to serialize write/resize cooperation.
- If a resize is observed, cooperates to finish copying and restarts on the latest table.
Callback signature:
fn(e *MapEntry[K, V]) - Use e.Loaded() and e.Value() to inspect the current state - Use e.Update(newV) to upsert; Use e.Delete() to remove
Parameters:
- key: The key to process
- fn: Callback function (called regardless of value existence)
Returns:
- actual: the current value in the map after the operation
- loaded: true if the key existed before the operation
func (*FlatMap[K, V]) ComputeRange ¶
ComputeRange iterates all entries and applies a user callback.
Callback signature:
fn(e *MapEntry[K, V]) bool - e.Update(newV): update the entry to newV - e.Delete(): delete the entry - default (no op): keep the entry unchanged - return true to continue; return false to stop iteration
Concurrency & consistency:
- Cooperates with concurrent grow/shrink; if a resize is detected, it helps complete copying, then continues on the latest table.
- Holds the root-bucket lock while processing its bucket chain to coordinate with writers/resize operations.
Parameters:
- fn: user function applied to each key-value pair.
- blockWriters: optional flag (default false). If true, concurrent writers are blocked during iteration; resize operations are always exclusive.
Recommendation: keep fn lightweight to reduce lock hold time.
func (*FlatMap[K, V]) Delete ¶
func (m *FlatMap[K, V]) Delete(key K)
Delete deletes the value for a key.
func (*FlatMap[K, V]) Entries ¶
Entries returns an iterator function for use with range-over-func. It provides the same functionality as ComputeRange but in iterator form.
func (*FlatMap[K, V]) Load ¶
Load retrieves the value for a key.
- Per-bucket seqlock read; an even and stable sequence yields a consistent snapshot.
- Short spinning on observed writes (odd seq) or instability.
- Provides stable latency under high concurrency.
func (*FlatMap[K, V]) LoadAndDelete ¶
LoadAndDelete deletes the value for a key, returning the previous value. The loaded result reports whether the key was present.
func (*FlatMap[K, V]) LoadAndUpdate ¶
LoadAndUpdate updates the value for key if it exists, returning the previous value. The loaded result reports whether the key was present.
func (*FlatMap[K, V]) LoadOrStore ¶
LoadOrStore returns the existing value for the key if present. Otherwise, it stores and returns the given value. The loaded result is true if the value was loaded, false if stored.
func (*FlatMap[K, V]) LoadOrStoreFn ¶
LoadOrStoreFn loads the value for a key if present. Otherwise, it stores and returns the value returned by valueFn. The loaded result is true if the value was loaded, false if stored.
func (*FlatMap[K, V]) Range ¶
Range iterates all entries using per-bucket seqlock reads.
- Copies a consistent snapshot from each bucket when the sequence is stable; otherwise briefly spins and retries.
- Yields outside of locks to minimize contention.
- Returning false from the callback stops iteration early.
func (*FlatMap[K, V]) Rebuild ¶ added in v1.0.3
func (m *FlatMap[K, V]) Rebuild( fn func(m *MapRebuild[K, V]), blockWriters ...bool, )
Rebuild performs a map rebuild operation with the given function. The function is executed with exclusive access (or shared based on blockWriters) to the map.
Parameters:
- fn: The function to execute during rebuild. It receives a MapRebuild instance.
- blockWriters: Optional. If true, concurrent writers are blocked. Default is false (allow writers).
Notes:
- You must use the `m *MapRebuild[K, V]` parameter passed to `fn` for processing. Do not call methods on the Map instance directly, as this may cause deadlocks.
func (*FlatMap[K, V]) Size ¶
Size returns the number of key-value pairs in the map. This operation sums counters across all size stripes for an approximate count.
func (*FlatMap[K, V]) Store ¶
func (m *FlatMap[K, V]) Store(key K, value V)
Store sets the value for a key.
type Gate ¶
type Gate struct {
// contains filtered or unexported fields
}
Gate is a synchronization primitive that can be manually opened and closed, and supports a "pulse" operation to wake up current waiters without keeping the gate open.
It conceptually models a physical gate:
- Open: The gate is open. Waiters pass immediately.
- Close: The gate is closed. Waiters block until Open or Pulse.
- Pulse: The gate remains closed, but all currently blocked waiters are allowed to pass. Future waiters will block.
It is zero-value usable (starts Closed).
Limitations: - Max waiters: 2^32 - 1. Panics on overflow.
func (*Gate) Close ¶
func (e *Gate) Close()
Close signals the gate (sets state to Close). Future calls to Wait() will block.
func (*Gate) Open ¶
func (e *Gate) Open()
Open signals the gate (sets state to Open). All current waiters are woken up. Future calls to Wait() return immediately until Close() is called.
func (*Gate) Pulse ¶
func (e *Gate) Pulse()
Pulse performs a "single cycle" broadcast. It wakes up all currently waiting goroutines, but ensures the gate remains (or becomes) Closed for any subsequent callers.
This is useful for broadcasting a signal (like a condition variable Broadcast) without permanently changing the state to Open.
type HashFunc ¶
HashFunc is the function to hash a value of type K.
func GetBuiltInHasher ¶
func GetBuiltInHasher[T comparable]() HashFunc
GetBuiltInHasher returns Go's built-in hash function for the specified type. This function provides direct access to the same hash function that Go's built-in map uses internally, ensuring optimal performance and compatibility.
The returned hash function is type-specific and optimized for the given comparable type T. It uses Go's internal type representation to access the most efficient hashing implementation available.
Usage:
hashFunc := GetBuiltInHasher[string]() m := NewMap[string, int](WithKeyHasherUnsafe(GetBuiltInHasher[string]()))
type IEqualFunc ¶
IEqualFunc defines a custom equality comparison interface for value types. Value types implementing this interface can provide their own equality logic, serving as an alternative to WithValueEqual for type-specific comparison.
This interface is automatically detected during Map initialization and is essential for non-comparable value types or custom equality semantics. It takes precedence over the default built-in comparison but is overridden by explicit WithValueEqual configuration.
Usage:
type UserProfile struct {
Name string
Tags []string // slice makes this non-comparable
}
func (u *UserProfile) EqualFunc(other UserProfile) bool {
return u.Name == other.Name && slices.EqualFunc(u.Tags, other.Tags)
}
type IHashFunc ¶
IHashFunc defines a custom hash function interface for key types. Key types implementing this interface can provide their own hash computation, serving as an alternative to WithKeyHasher for type-specific optimization.
This interface is automatically detected during Map initialization and takes precedence over the default built-in hasher but is overridden by explicit WithKeyHasher configuration.
Usage:
type UserID struct {
ID int64
Tenant string
}
func (u *UserID) HashFunc(seed uintptr) uintptr {
return uintptr(u.ID) ^ seed
}
type IIntKey ¶
type IIntKey interface {
IntKey() bool
}
IIntKey optional interface for key types to signal hash distribution optimization strategies.
- true: use linear distribution (optimal for sequential keys)
- false or omitted: auto-detection is used if this parameter is omitted.
type Latch ¶
type Latch struct {
// contains filtered or unexported fields
}
Latch is a synchronization primitive for "wait for completion" (One-Way Door). It supports multiple waiters. Once Open() is called, all current and future Wait() calls return immediately.
Limitations: - Max waiters: 2^31 - 1. Panics on overflow.
type Map ¶
type Map[K comparable, V any] struct { // contains filtered or unexported fields }
Map is a high-performance concurrent map implementation that is fully compatible with sync.Map API and significantly outperforms sync.Map in most scenarios.
Core advantages:
- Lock-free reads, fine-grained locking for writes
- Zero-value ready with lazy initialization
- Custom hash and value comparison function support
- Rich batch operations and functional extensions
Usage recommendations:
- Direct declaration: var m Map[string, int]
- Pre-allocate capacity: NewMap(WithCapacity(1000))
Notes:
- Map must not be copied after first use.
func NewMap ¶
func NewMap[K comparable, V any]( options ...func(*MapConfig), ) *Map[K, V]
NewMap creates a new Map instance. Direct initialization is also supported.
Parameters:
- options: configuration options (WithCapacity, WithKeyHasher, etc.)
func (*Map[K, V]) CloneTo ¶
CloneTo copies all key-value pairs from this map to the destination map. The destination map is cleared before copying.
Parameters:
- clone: The destination map to copy into. Must not be nil.
Notes:
This operation is not atomic with respect to concurrent modifications.
The destination map will have the same configuration as the source.
The destination map is cleared before copying to ensure a clean state.
func (*Map[K, V]) CompareAndDelete ¶
CompareAndDelete atomically deletes an existing entry if its value matches the expected value, compatible with `sync.Map`.
func (*Map[K, V]) CompareAndSwap ¶
CompareAndSwap atomically replaces an existing value with a new value if the existing value matches the expected value, compatible with `sync.Map`.
func (*Map[K, V]) Compute ¶
Compute performs a compute-style, atomic update for the given key.
Concurrency model:
- Acquires the root-bucket lock to serialize write/resize cooperation.
- If a resize is observed, cooperates to finish copying and restarts on the latest table.
Callback signature:
fn(e *MapEntry[K, V]) - Use e.Loaded() and e.Value() to inspect the current state - Use e.Update(newV) to upsert; Use e.Delete() to remove
Parameters:
- key: The key to process
- fn: Callback function (called regardless of value existence)
Returns:
- actual: The value as returned by the callback.
- loaded: True if the key existed before the callback, false otherwise.
func (*Map[K, V]) ComputeRange ¶
ComputeRange iterates all entries and applies a user callback.
Callback signature:
fn(e *MapEntry[K, V]) bool - e.Update(newV): update the entry to newV - e.Delete(): delete the entry - default (no op): keep the entry unchanged - return true to continue; return false to stop iteration
Concurrency & consistency:
- Cooperates with concurrent grow/shrink; if a resize is detected, it helps complete copying, then continues on the latest table.
- Holds the root-bucket lock while processing its bucket chain to coordinate with writers/resize operations.
Parameters:
- fn: user function applied to each key-value pair.
- blockWriters: optional flag (default false). If true, concurrent writers are blocked during iteration; resize operations are always exclusive.
Recommendation: keep fn lightweight to reduce lock hold time.
func (*Map[K, V]) Delete ¶
func (m *Map[K, V]) Delete(key K)
Delete removes a key-value pair. compatible with `sync.Map`.
func (*Map[K, V]) Entries ¶
Entries returns an iterator function for use with range-over-func. It provides the same functionality as ComputeRange but in iterator form.
func (*Map[K, V]) Grow ¶
Grow increases the map's capacity by sizeAdd entries to accommodate future growth. This pre-allocation avoids rehashing when adding new entries up to the new capacity.
Parameters:
- sizeAdd specifies the number of additional entries the map should be able to hold.
Notes:
- If the current remaining capacity already exceeds sizeAdd, no growth will be triggered.
func (*Map[K, V]) LoadAndDelete ¶
LoadAndDelete retrieves the value for a key and deletes it from the map. compatible with `sync.Map`.
func (*Map[K, V]) LoadAndUpdate ¶
LoadAndUpdate retrieves the value associated with the given key and updates it if the key exists.
Parameters:
- key: The key to look up in the map.
- value: The new value to set if the key exists.
Returns:
- previous: The loaded value associated with the key (if it existed), otherwise a zero-value of V.
- loaded: True if the key existed and the value was updated, false otherwise.
func (*Map[K, V]) LoadOrStore ¶
LoadOrStore retrieves an existing value or stores a new one if the key doesn't exist, compatible with `sync.Map`.
func (*Map[K, V]) LoadOrStoreFn ¶
LoadOrStoreFn returns the existing value for the key if present. Otherwise, it tries to compute the value using the provided function and, if successful, stores and returns the computed value. The loaded result is true if the value was loaded, or false if computed.
This call locks a hash table bucket while the compute function is executed. It means that modifications on other entries in the bucket will be blocked until the newValueFn executes. Consider this when the function includes long-running operations.
func (*Map[K, V]) Rebuild ¶ added in v1.0.3
func (m *Map[K, V]) Rebuild( fn func(m *MapRebuild[K, V]), blockWriters ...bool, )
Rebuild performs a map rebuild operation with the given function. The function is executed with exclusive access (or shared based on blockWriters) to the map.
Parameters:
- fn: The function to execute during rebuild. It receives a MapRebuild instance.
- blockWriters: Optional. If true, concurrent writers are blocked. Default is false (allow writers).
Notes:
- You must use the `m *MapRebuild[K, V]` parameter passed to `fn` for processing. Do not call methods on the Map instance directly, as this may cause deadlocks.
func (*Map[K, V]) Shrink ¶
func (m *Map[K, V]) Shrink()
Shrink reduces the capacity to fit the current size, always executes regardless of WithAutoShrink.
func (*Map[K, V]) Size ¶
Size returns the number of key-value pairs in the map. This is an O(1) operation.
func (*Map[K, V]) Store ¶
func (m *Map[K, V]) Store(key K, value V)
Store inserts or updates a key-value pair, compatible with `sync.Map`.
type MapConfig ¶
type MapConfig struct {
// contains filtered or unexported fields
}
MapConfig defines configurable options for Map initialization. This structure contains all the configuration parameters that can be used to customize the behavior and performance characteristics of a Map instance.
type MapEntry ¶ added in v1.0.3
type MapEntry[K comparable, V any] struct { // contains filtered or unexported fields }
MapEntry is a temporary view of a map entry It can be updated or deleted during the callback.
WARNING: - Only valid inside the callback; do NOT keep, return, or use it outside. - Not safe across goroutines. 警告:仅在回调期间有效;不可保存或让其指针逃逸,也不可跨协程使用。
func (*MapEntry[K, V]) Delete ¶ added in v1.0.3
func (e *MapEntry[K, V]) Delete()
Delete marks the entry for removal and clears its value.
func (*MapEntry[K, V]) Key ¶ added in v1.0.3
func (e *MapEntry[K, V]) Key() K
Key returns the entry's key.
type MapRebuild ¶ added in v1.0.3
type MapRebuild[K comparable, V any] struct { // contains filtered or unexported fields }
MapRebuild provides access to map operations during a rebuild. It wraps either a Map or a FlatMap, delegating operations to the underlying map. All operations on this struct ignore the rebuild hint (assuming the caller holds the rebuild lock).
WARNING: - Only valid inside the callback; do NOT keep, return, or use it outside. - Not safe across goroutines. 警告:仅在回调期间有效;不可保存或让其指针逃逸,也不可跨协程使用。
func (*MapRebuild[K, V]) All ¶ added in v1.0.3
func (m *MapRebuild[K, V]) All() func(yield func(K, V) bool)
All returns an iterator function for use with range-over-func. It provides the same functionality as Range but in iterator form.
func (*MapRebuild[K, V]) Compute ¶ added in v1.0.3
func (m *MapRebuild[K, V]) Compute( key K, fn func(e *MapEntry[K, V]), ) (actual V, loaded bool)
Compute executes a function for a specific key. It is safe to call inside a rebuild callback.
func (*MapRebuild[K, V]) Delete ¶ added in v1.0.3
func (m *MapRebuild[K, V]) Delete(key K)
Delete deletes the value for a key.
func (*MapRebuild[K, V]) Load ¶ added in v1.0.3
func (m *MapRebuild[K, V]) Load(key K) (value V, ok bool)
Load returns the value stored in the map for a key, or nil if no value is present. The ok result indicates whether value was found in the map.
func (*MapRebuild[K, V]) LoadAndDelete ¶ added in v1.0.3
func (m *MapRebuild[K, V]) LoadAndDelete(key K) (previous V, loaded bool)
LoadAndDelete deletes the value for a key, returning the previous value if any. The loaded result reports whether the key was present.
func (*MapRebuild[K, V]) LoadAndUpdate ¶ added in v1.0.3
func (m *MapRebuild[K, V]) LoadAndUpdate(key K, value V) (previous V, loaded bool)
LoadAndUpdate updates the value for key if it exists, returning the previous value. The loaded result reports whether the key was present.
func (*MapRebuild[K, V]) LoadOrStore ¶ added in v1.0.3
func (m *MapRebuild[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool)
LoadOrStore returns the existing value for the key if present. Otherwise, it stores and returns the given value. The loaded result is true if the value was loaded, false if stored.
func (*MapRebuild[K, V]) LoadOrStoreFn ¶ added in v1.0.3
func (m *MapRebuild[K, V]) LoadOrStoreFn(key K, valueFn func() V) (actual V, loaded bool)
LoadOrStoreFn loads the value for a key if present. Otherwise, it stores and returns the value returned by valueFn. The loaded result is true if the value was loaded, false if stored.
func (*MapRebuild[K, V]) Range ¶ added in v1.0.3
func (m *MapRebuild[K, V]) Range(yield func(key K, value V) bool)
Range calls f sequentially for each key and value present in the map. If f returns false, range stops the iteration.
func (*MapRebuild[K, V]) Size ¶ added in v1.0.3
func (m *MapRebuild[K, V]) Size() int
Size returns the number of key-value pairs in the map. This operation sums counters across all size stripes for an approximate count.
func (*MapRebuild[K, V]) Store ¶ added in v1.0.3
func (m *MapRebuild[K, V]) Store(key K, value V)
Store sets the value for a key.
func (*MapRebuild[K, V]) Swap ¶ added in v1.0.3
func (m *MapRebuild[K, V]) Swap(key K, value V) (previous V, loaded bool)
Swap swaps the value for a key and returns the previous value if any. The loaded result reports whether the key was present.
func (*MapRebuild[K, V]) ToMap ¶ added in v1.0.3
func (m *MapRebuild[K, V]) ToMap(limit ...int) map[K]V
ToMap collect up to limit entries into a map[K]V, limit < 0 is no limit.
type OnceGroup ¶
type OnceGroup[K comparable, V any] struct { // contains filtered or unexported fields }
OnceGroup represents a class of work and forms a namespace in which units of work can be executed with duplicate suppression.
func (*OnceGroup[K, V]) Do ¶
Do execute and returns the results of the given function, making sure that only one execution is in-flight for a given key at a time. If a duplicate comes in, the duplicate caller waits for the original to complete and receives the same results. The return value shared indicates whether v was given to multiple callers.
func (*OnceGroup[K, V]) DoChan ¶
func (g *OnceGroup[K, V]) DoChan( key K, fn func() (V, error), ) <-chan OnceGroupResult[V]
DoChan is like Do but returns a channel that will receive the results when they are ready.
The returned channel will not be closed.
func (*OnceGroup[K, V]) Forget ¶
func (g *OnceGroup[K, V]) Forget(key K)
Forget tells the group to stop tracking a key. Future calls to Do for this key will invoke the function rather than waiting for an existing call to complete.
func (*OnceGroup[K, V]) ForgetUnshared ¶
ForgetUnshared deletes the key only if no duplicates joined.
type OnceGroupResult ¶
OnceGroupResult holds the results of Do, so they can be passed on a channel.
type PLocal ¶ added in v1.0.5
type PLocal[T any] struct { // contains filtered or unexported fields }
PLocal implements a P-local (Processor-local) storage mechanism. It provides a mechanism to access P-specific data shards to minimize lock contention in high-concurrency scenarios. T is the type of the stored data. The zero value of PLocal is ready to use.
func NewPLocal ¶ added in v1.0.5
NewPLocal creates a new PLocal instance with the given provider function. provider is called ONLY ONCE per P to create the initial value when a new slot is allocated (lazy initialization).
DESIGN RATIONALE: We use `func() T` (factory) instead of `func(*T)` (initializer) for ergonomics. We do NOT call this function on every Get() for two reasons:
- **Performance**: PLocal is designed for ~1ns latency. An extra function call, even if inlined, adds overhead checking for nil or executing logic.
- **Statefulness**: Many use cases (counters, RNGs, pre-allocated buffers) rely on state persisting across calls on the same P. Forcing a reset would break these patterns.
If you need a "clean" value every time, you should reset it explicitly:
val := p.Get() val.Reset() // User-defined reset logic, strictly explicit and inlinable.
func (*PLocal[T]) Clear ¶ added in v1.0.5
func (p *PLocal[T]) Clear()
Clear discards all P-local data. Subsequent accesses via With will re-initialize the data using the init function. This is useful for resetting the state or when the data is no longer needed.
func (*PLocal[T]) ForEach ¶ added in v1.0.5
func (p *PLocal[T]) ForEach(fn func(*T))
ForEach iterates over all P-local slots and calls fn on each of them. The iteration is performed on the shards that existed when the call started. Note: If T is not thread-safe, accessing the value in fn while it is being modified in With (on another P) constitutes a data race. For counters, consider using atomic types (e.g. atomic.Int64) as T.
func (*PLocal[T]) Get ¶ added in v1.0.6
func (p *PLocal[T]) Get() *T
Get returns the pointer to the P-local value.
CAUTION: The returned pointer is strictly P-local. The caller MUST ensure that they do not use this pointer after yielding the processor (e.g., blocking, syscall, channel send/recv, runtime.Gosched, etc.).
Performance Note: This method returns the existing slot for the current P. It does NOT assert ownership or reset the value. This enables using PLocal for: - Accumulators (e.g., metrics counters) - Persistent buffers (reusing capacity) - Thread-local RNG states
Usage pattern:
val := p.Get() *val++ // Use immediately
If you need to persist usage across yields, use With or copy the value. This method is designed for extreme low-latency scenarios where function call overhead of With is significant.
func (*PLocal[T]) With ¶ added in v1.0.5
func (p *PLocal[T]) With(fn func(*T))
With executes fn with the P-local value for the current P. The goroutine is pinned to the P during the execution of fn to ensure the value remains local to that P.
Notes: fn must not block or call functions that might yield the processor (e.g., I/O, channel operations, select, runtime.Gosched). Doing so while pinned can delay garbage collection and cause other system-wide pauses.
type PLocalCounter ¶ added in v1.0.5
PLocalCounter is a specialized version of PLocal for uintptr counters. The zero value of PLocalCounter is ready to use.
func NewPLocalCounter ¶ added in v1.0.5
func NewPLocalCounter() *PLocalCounter
NewPLocalCounter creates a new PLocalCounter instance. It pre-allocates shards to avoid allocation on the first access. The zero value of PLocalCounter is also usable.
func (*PLocalCounter) Add ¶ added in v1.0.5
func (p *PLocalCounter) Add(delta uintptr)
Add adds delta to the P-local counter. This is faster than With() because it avoids the callback overhead.
func (*PLocalCounter) Reset ¶ added in v1.0.7
func (p *PLocalCounter) Reset() uintptr
Reset atomically reads the current value and resets all shards to zero. This is useful for periodic metric collection cycles.
func (*PLocalCounter) Value ¶ added in v1.0.5
func (p *PLocalCounter) Value() uintptr
Value returns the aggregated value of the P-local counter across all shards. Note: The result is an approximation if concurrent Adds are happening.
type PLocalCounter64 ¶ added in v1.0.5
PLocalCounter64 is a specialized version of PLocal for uint64 counters. The zero value of PLocalCounter64 is ready to use.
func NewPLocalCounter64 ¶ added in v1.0.5
func NewPLocalCounter64() *PLocalCounter64
NewPLocalCounter64 creates a new PLocalCounter64 instance. It pre-allocates shards to avoid allocation on the first access. The zero value of PLocalCounter64 is also usable.
func (*PLocalCounter64) Add ¶ added in v1.0.5
func (p *PLocalCounter64) Add(delta uint64)
Add adds delta to the P-local counter. This is faster than With() because it avoids the callback overhead.
func (*PLocalCounter64) Reset ¶ added in v1.0.7
func (p *PLocalCounter64) Reset() uint64
Reset atomically reads the current value and resets all shards to zero. This is useful for periodic metric collection cycles.
func (*PLocalCounter64) Value ¶ added in v1.0.5
func (p *PLocalCounter64) Value() uint64
Value returns the aggregated value of the P-local counter across all shards. Note: The result is an approximation if concurrent Adds are happening.
type Phaser ¶
type Phaser struct {
// contains filtered or unexported fields
}
Phaser is a reusable synchronization barrier, similar to [java.util.concurrent.Phaser]. It supports dynamic registration of parties and synchronization in phases.
Concepts:
- Phase: An integer generation number.
- Parties: Number of registered participants.
- Arrive: A party signals it reached the barrier.
- Await: A party waits for others to arrive.
Key Differences from Rally:
- Dynamic: Parties can be added/removed (Register/Deregister) at any time.
- Split-Phase: Arrive() and AwaitAdvance() are separate, allowing "Arrive and Continue" patterns.
Size: 24 bytes (state + TicketLock + Epoch).
Limitations: - Max parties: 65535. Panics on overflow.
func (*Phaser) Arrive ¶
Arrive signals that the current party has reached the barrier. It returns the current phase number. It does NOT wait for others.
func (*Phaser) ArriveAndAwaitAdvance ¶
ArriveAndAwaitAdvance Is equivalent to Arrive() then AwaitAdvance().
func (*Phaser) ArriveAndDeregister ¶
ArriveAndDeregister signals arrival and removes the party.
func (*Phaser) AwaitAdvance ¶
AwaitAdvance waits for the phase to advance from the given 'phase'. If the current phase is already greater than 'phase', it returns immediately. Returns the new phase number.
type RWLock ¶
type RWLock uintptr
RWLock is a spin-based Reader-Writer lock backed by an uintptr. It is writer-preferred to prevent reader starvation.
func (*RWLock) Lock ¶
func (l *RWLock) Lock()
Lock acquires the write lock. It spins until the lock is free.
func (*RWLock) Ready ¶
Ready reports whether the lock has been unlocked at least once and is currently free. It is useful for checking if the protected data has been initialized.
func (*RWLock) TryLock ¶ added in v1.0.7
TryLock attempts to acquire the write lock without blocking. Returns true if the lock was acquired, false otherwise.
type RWLock32 ¶
type RWLock32 uint32
RWLock32 is a spin-based Reader-Writer lock backed by an uint32. It is writer-preferred to prevent reader starvation.
func (*RWLock32) Ready ¶
Ready reports whether the lock has been unlocked at least once and is currently free. It is useful for checking if the protected data has been initialized.
func (*RWLock32) TryLock ¶ added in v1.0.7
TryLock attempts to acquire the write lock without blocking. Returns true if the lock was acquired, false otherwise.
type RWLockGroup ¶
type RWLockGroup[K comparable] struct { // contains filtered or unexported fields }
RWLockGroup allows shared Reader-Writer locking on arbitrary keys. It matches the interface of LockGroup but supports RLock/RUnlock.
Features:
- RLock/RUnlock for shared read access.
- Lock/Unlock for exclusive write access.
- Infinite Keys & Auto-Cleanup.
Usage:
var group RWLockGroup[string]
// Readers
group.RLock("config")
read(config)
group.RUnlock("config")
// Writer
group.Lock("config")
write(config)
group.Unlock("config")
func (*RWLockGroup[K]) Lock ¶
func (g *RWLockGroup[K]) Lock(k K)
Lock acquires an exclusive write lock for the given key.
func (*RWLockGroup[K]) RLock ¶
func (g *RWLockGroup[K]) RLock(k K)
RLock acquires a shared read lock for the given key.
func (*RWLockGroup[K]) RUnlock ¶
func (g *RWLockGroup[K]) RUnlock(k K)
RUnlock releases the read lock for the given key.
func (*RWLockGroup[K]) Unlock ¶
func (g *RWLockGroup[K]) Unlock(k K)
Unlock releases the write lock for the given key.
type Rally ¶
type Rally struct {
// contains filtered or unexported fields
}
Rally is a reusable synchronization primitive that allows a set of goroutines to wait for each other to reach a common barrier point.
It is useful in programs involving a fixed-size party of goroutines that must occasionally wait for each other. The barrier is called "cyclic" because it can be reused after the waiting logic is released.
It is zero-value usable.
Size: 16 bytes (8 byte state + 2*4 byte sema).
Limitations: - Max parties: 2^32 - 1. Panics on overflow.
func (*Rally) Meet ¶
Meet waits until 'parties' number of callers have called Meet on this barrier.
panic if parties <= 0.
If the current goroutine is the last to arrive, it wakes up all other waiting goroutines and resets the barrier for the next generation.
Returns the arrival index (0 to parties-1), where parties-1 indicates the caller was the last to arrive (the one who tripped the barrier).
type SeqLock ¶
type SeqLock RWLock
SeqLock implements a sequence lock (seqlock) for tear-free reads of a single slot.
It is optimized for read-heavy scenarios where readers should not block writers, and consistent snapshots are required.
Usage:
Pair with SeqLockSlot[T]. Use Read/Write helpers or the Locked variants when an external lock is held.
func (*SeqLock) BeginRead ¶
BeginRead starts a read transaction. It returns the current sequence number and true if the lock is free (sequence is even).
func (*SeqLock) BeginWrite ¶
BeginWrite starts a write transaction by incrementing the sequence to odd. It returns the previous sequence number and true if successful.
func (*SeqLock) BeginWriteLocked ¶
func (l *SeqLock) BeginWriteLocked()
BeginWriteLocked starts a write transaction assuming an external lock is held.
func (*SeqLock) ClearLocked ¶
func (l *SeqLock) ClearLocked()
ClearLocked resets the lock state. Only safe when an external lock is held.
func (*SeqLock) EndRead ¶
EndRead finishes a read transaction. It returns true if the sequence number matches s1, indicating a consistent snapshot.
func (*SeqLock) EndWrite ¶
EndWrite finishes a write transaction by incrementing the sequence to even.
func (*SeqLock) EndWriteLocked ¶
func (l *SeqLock) EndWriteLocked()
EndWriteLocked finishes a write transaction assuming an external lock is held.
type SeqLock32 ¶
type SeqLock32 RWLock32
SeqLock32 is a 32-bit sequence lock.
func (*SeqLock32) BeginRead ¶
BeginRead starts a read transaction. It returns the current sequence number and true if the lock is free (sequence is even).
func (*SeqLock32) BeginWrite ¶
BeginWrite starts a write transaction by incrementing the sequence to odd. It returns the previous sequence number and true if successful.
func (*SeqLock32) BeginWriteLocked ¶
func (l *SeqLock32) BeginWriteLocked()
BeginWriteLocked starts a write transaction assuming an external lock is held.
func (*SeqLock32) ClearLocked ¶
func (l *SeqLock32) ClearLocked()
ClearLocked resets the lock state. Only safe when an external lock is held.
func (*SeqLock32) EndRead ¶
EndRead finishes a read transaction. It returns true if the sequence number matches s1, indicating a consistent snapshot.
func (*SeqLock32) EndWrite ¶
EndWrite finishes a write transaction by incrementing the sequence to even.
func (*SeqLock32) EndWriteLocked ¶
func (l *SeqLock32) EndWriteLocked()
EndWriteLocked finishes a write transaction assuming an external lock is held.
type SeqLockSlot ¶
type SeqLockSlot[T any] struct { // contains filtered or unexported fields }
SeqLockSlot holds a value protected by a SeqLock.
Copy semantics:
- Reads: On TSO (amd64/386/s390x), plain typed copies are sufficient. On weak models, uintptr-sized atomic loads are used inside the stable window when alignment/size permit; otherwise a typed copy is used.
- Writes: Always plain typed assignment to preserve GC write barriers and avoid publishing pointers via uintptr/unsafe stores.
Safety:
- ReadUnfenced/WriteUnfenced must be called within a SeqLock transaction or under an external lock.
func (*SeqLockSlot[T]) Ptr ¶
func (slot *SeqLockSlot[T]) Ptr() *T
Ptr returns the address of the inline buffer. Mutations through this pointer must be guarded by an external lock or odd/even sequence; otherwise readers may observe torn data.
func (*SeqLockSlot[T]) ReadUnfenced ¶
func (slot *SeqLockSlot[T]) ReadUnfenced() (v T)
ReadUnfenced copies the value from the slot. It uses atomic loads on weak memory models if possible to avoid reordering. Must be called within a SeqLock read transaction or under a lock.
func (*SeqLockSlot[T]) WriteUnfenced ¶
func (slot *SeqLockSlot[T]) WriteUnfenced(v T)
WriteUnfenced writes v into buf via a plain typed assignment. This preserves Go's GC write barriers and avoids publishing pointers through uintptr/unsafe atomic stores. Must be called under a lock or within a seqlock-stable window.
type TicketLock ¶
type TicketLock struct {
// contains filtered or unexported fields
}
TicketLock is a fair, FIFO (First-In-First-Out) spin-lock.
Unlike sync.Mutex, which allows "barging" (newcomers can steal the lock), TicketLock guarantees that goroutines acquire the lock in the exact order they called Lock().
Implementation: It uses the classic "ticket" algorithm.
- Lock(): Takes a ticket number. Spins/Sleeps until `serving` == `my_ticket`.
- Unlock(): Increments `serving`, allowing the next ticket holder to proceed.
Trade-offs:
- Pros: Strict fairness, preventing starvation. ideal for latency-sensitive but high-contention scenarios where tail latency matters.
- Cons: Under high contention with long critical sections, it can suffer from "lock convoy" if a thread goes to sleep while holding the ticket. However, this implementation uses a hybrid strategy (spin + adaptive delay) to mitigate pure busy-wait issues.
It is recommended for protecting very small critical sections (referencing a few fields) where fairness is strictly required.
func (*TicketLock) Lock ¶
func (m *TicketLock) Lock()
Lock acquires the lock. Blocks until the lock is available.
func (*TicketLock) TryLock ¶ added in v1.0.7
func (m *TicketLock) TryLock() bool
TryLock attempts to acquire the lock without blocking. Returns true if the lock was acquired, false otherwise. Note: Unlike Lock(), TryLock() does not take a ticket, so it may break FIFO ordering if used alongside Lock().
type TicketLockGroup ¶
type TicketLockGroup[K comparable] struct { // contains filtered or unexported fields }
TicketLockGroup allows locking on arbitrary keys (string, int, struct, etc.). It dynamically manages a set of locks associated with values.
Features:
- Infinite Keys: No need to pre-allocate locks.
- Auto-Cleanup: Locks are automatically removed from memory when unlocked and no one else is waiting.
- Low Overhead: Uses a sharded map structure internally for concurrent access.
Usage:
var group TicketLockGroup[string]
group.Lock("user-123")
// Critical section for user-123
group.Unlock("user-123")
Implementation Note: It uses reference counting to safely delete entries.
func (*TicketLockGroup[K]) Lock ¶
func (g *TicketLockGroup[K]) Lock(k K)
Lock acquires the lock for the given key.
func (*TicketLockGroup[K]) Unlock ¶
func (g *TicketLockGroup[K]) Unlock(k K)
Unlock releases the lock for the given key.
type WaitGroup ¶ added in v1.0.1
type WaitGroup struct {
// contains filtered or unexported fields
}
WaitGroup is a reusable WaitGroup. Unlike sync.WaitGroup, it can be reused immediately after the previous batch of tasks is done, without waiting for all Wait() calls to return.
Limitations: - Max tasks: 2^32 - 1 (approx 4.29 billion) - Max waiters: 2^16 - 1 (65535). For higher waiter counts (broadcast), use Gate. - Generations: 2^16 (65536, then wraps)
func (*WaitGroup) Add ¶ added in v1.0.1
Add adds delta, which may be negative, to the WaitGroup counter. If the counter becomes zero, all goroutines blocked on Wait are released. If the counter goes negative, Add panics.
func (*WaitGroup) Count ¶ added in v1.0.1
Count returns the current number of active tasks. Note: This is an approximate value as it can change concurrently.
func (*WaitGroup) Done ¶ added in v1.0.1
func (wg *WaitGroup) Done()
Done decrements the WaitGroup counter by one.
func (*WaitGroup) Go ¶ added in v1.0.1
func (wg *WaitGroup) Go(f func())
Go calls f in a new goroutine and adds that task to the WaitGroup. When f returns, the task is removed from the WaitGroup.
func (*WaitGroup) TryWait ¶ added in v1.0.1
TryWait returns true if the WaitGroup counter is zero. It does not block.
type WorkerPool ¶ added in v1.0.7
type WorkerPool struct {
// OnPanic is called when a worker panics.
// If nil, the panic is ignored.
OnPanic func(r any)
// contains filtered or unexported fields
}
WorkerPool uses the Reusable WaitGroup and robust worker logic.
func NewWorkerPool ¶ added in v1.0.7
func NewWorkerPool(workers int, queueSize int) *WorkerPool
func (*WorkerPool) Close ¶ added in v1.0.7
func (p *WorkerPool) Close()
Close gracefully shuts down the pool. It waits for all submitted tasks to complete.
func (*WorkerPool) Pending ¶ added in v1.0.7
func (p *WorkerPool) Pending() int
Pending returns the number of tasks waiting in the queue.
func (*WorkerPool) Running ¶ added in v1.0.7
func (p *WorkerPool) Running() int
Running returns the number of currently active workers.
func (*WorkerPool) Submit ¶ added in v1.0.7
func (p *WorkerPool) Submit(task func()) error
func (*WorkerPool) Wait ¶ added in v1.0.7
func (p *WorkerPool) Wait()
Wait blocks until all submitted tasks complete, without closing the pool. This is useful for batch synchronization while keeping the pool alive.