Skip to content

Commit

Permalink
Use presized Map/MapOf argument as the minimal map capacity
Browse files Browse the repository at this point in the history
  • Loading branch information
puzpuzpuz committed Feb 25, 2024
1 parent 1386eb4 commit f57c45d
Show file tree
Hide file tree
Showing 6 changed files with 131 additions and 51 deletions.
14 changes: 14 additions & 0 deletions example_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package xsync_test

import (
"errors"
"fmt"

"github.com/puzpuzpuz/xsync/v3"
Expand Down Expand Up @@ -54,4 +55,17 @@ func ExampleMapOf_Compute() {
})
// v: 84, ok: false
fmt.Printf("v: %v, ok: %v\n", v, ok)

// Propagate an error from the compute function to the outer scope.
var err error
v, ok = counts.Compute(42, func(oldValue int, loaded bool) (newValue int, delete bool) {
if oldValue == 42 {
err = errors.New("something went wrong")
return 0, true // no need to create a key/value pair
}
newValue = 0
delete = false
return
})
fmt.Printf("err: %v\n", err)
}
10 changes: 5 additions & 5 deletions export_test.go
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
package xsync

const (
EntriesPerMapBucket = entriesPerMapBucket
MapLoadFactor = mapLoadFactor
MinMapTableLen = minMapTableLen
MinMapTableCap = minMapTableCap
MaxMapCounterLen = maxMapCounterLen
EntriesPerMapBucket = entriesPerMapBucket
MapLoadFactor = mapLoadFactor
DefaultMinMapTableLen = defaultMinMapTableLen
DefaultMinMapTableCap = defaultMinMapTableLen * entriesPerMapBucket
MaxMapCounterLen = maxMapCounterLen
)

type (
Expand Down
41 changes: 25 additions & 16 deletions map.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,8 @@ const (
// key-value pairs (this is a soft limit)
mapLoadFactor = 0.75
// minimal table size, i.e. number of buckets; thus, minimal map
// capacity can be calculated as entriesPerMapBucket*minMapTableLen
minMapTableLen = 32
// minimal table capacity
minMapTableCap = minMapTableLen * entriesPerMapBucket
// capacity can be calculated as entriesPerMapBucket*defaultMinMapTableLen
defaultMinMapTableLen = 32
// minimum counter stripes to use
minMapCounterLen = 8
// maximum counter stripes to use; stands for around 4KB of memory
Expand Down Expand Up @@ -76,6 +74,7 @@ type Map struct {
resizeMu sync.Mutex // only used along with resizeCond
resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications)
table unsafe.Pointer // *mapTable
minTableLen int
}

type mapTable struct {
Expand Down Expand Up @@ -121,7 +120,7 @@ type rangeEntry struct {

// NewMap creates a new Map instance.
func NewMap() *Map {
return NewMapPresized(minMapTableCap)
return NewMapPresized(defaultMinMapTableLen * entriesPerMapBucket)
}

// NewMapPresized creates a new Map instance with capacity enough to hold
Expand All @@ -130,19 +129,20 @@ func NewMapPresized(sizeHint int) *Map {
m := &Map{}
m.resizeCond = *sync.NewCond(&m.resizeMu)
var table *mapTable
if sizeHint <= minMapTableCap {
table = newMapTable(minMapTableLen)
if sizeHint <= defaultMinMapTableLen*entriesPerMapBucket {
table = newMapTable(defaultMinMapTableLen)
} else {
tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket))
table = newMapTable(int(tableLen))
}
m.minTableLen = len(table.buckets)
atomic.StorePointer(&m.table, unsafe.Pointer(table))
return m
}

func newMapTable(tableLen int) *mapTable {
buckets := make([]bucketPadded, tableLen)
counterLen := tableLen >> 10
func newMapTable(minTableLen int) *mapTable {
buckets := make([]bucketPadded, minTableLen)
counterLen := minTableLen >> 10
if counterLen < minMapCounterLen {
counterLen = minMapCounterLen
} else if counterLen > maxMapCounterLen {
Expand Down Expand Up @@ -240,6 +240,10 @@ func (m *Map) LoadAndStore(key string, value interface{}) (actual interface{}, l
// Otherwise, it computes the value using the provided function and
// returns the computed value. The loaded result is true if the value
// was loaded, false if stored.
//
// This call locks a hash table bucket, i.e. a few map entries, while
// the compute function is executed. Consider this when the valueFn
// function includes long-running operations.
func (m *Map) LoadOrCompute(key string, valueFn func() interface{}) (actual interface{}, loaded bool) {
return m.doCompute(
key,
Expand All @@ -258,6 +262,10 @@ func (m *Map) LoadOrCompute(key string, valueFn func() interface{}) (actual inte
// The ok result indicates whether value was computed and stored, thus, is
// present in the map. The actual result contains the new value in cases where
// the value was computed and stored. See the example for a few use cases.
//
// This call locks a hash table bucket, i.e. a few map entries, while
// the compute function is executed. Consider this when the valueFn
// function includes long-running operations.
func (m *Map) Compute(
key string,
valueFn func(oldValue interface{}, loaded bool) (newValue interface{}, delete bool),
Expand Down Expand Up @@ -461,7 +469,7 @@ func (m *Map) resize(knownTable *mapTable, hint mapResizeHint) {
// Fast path for shrink attempts.
if hint == mapShrinkHint {
shrinkThreshold := int64((knownTableLen * entriesPerMapBucket) / mapShrinkFraction)
if knownTableLen == minMapTableLen || knownTable.sumSize() > shrinkThreshold {
if knownTableLen == m.minTableLen || knownTable.sumSize() > shrinkThreshold {
return
}
}
Expand All @@ -481,7 +489,7 @@ func (m *Map) resize(knownTable *mapTable, hint mapResizeHint) {
newTable = newMapTable(tableLen << 1)
case mapShrinkHint:
shrinkThreshold := int64((tableLen * entriesPerMapBucket) / mapShrinkFraction)
if tableLen > minMapTableLen && table.sumSize() <= shrinkThreshold {
if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold {
// Shrink the table with factor of 2.
atomic.AddInt64(&m.totalShrinks, 1)
newTable = newMapTable(tableLen >> 1)
Expand All @@ -494,7 +502,7 @@ func (m *Map) resize(knownTable *mapTable, hint mapResizeHint) {
return
}
case mapClearHint:
newTable = newMapTable(minMapTableLen)
newTable = newMapTable(m.minTableLen)
default:
panic(fmt.Sprintf("unexpected resize hint: %d", hint))
}
Expand Down Expand Up @@ -581,9 +589,10 @@ func isEmptyBucket(rootb *bucketPadded) bool {
// may reflect any mapping for that key from any point during the
// Range call.
//
// It is safe to modify the map while iterating it. However, the
// concurrent modification rule apply, i.e. the changes may be not
// reflected in the subsequently iterated entries.
// It is safe to modify the map while iterating it, including entry
// creation, modification and deletion. However, the concurrent
// modification rule apply, i.e. the changes may be not reflected
// in the subsequently iterated entries.
func (m *Map) Range(f func(key string, value interface{}) bool) {
var zeroEntry rangeEntry
// Pre-allocate array big enough to fit entries for most hash tables.
Expand Down
39 changes: 31 additions & 8 deletions map_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -496,8 +496,8 @@ func TestMapStoreThenParallelDelete_DoesNotShrinkBelowMinTableLen(t *testing.T)
<-cdone

stats := CollectMapStats(m)
if stats.RootBuckets < MinMapTableLen {
t.Fatalf("table was too small: %d", stats.RootBuckets)
if stats.RootBuckets != DefaultMinMapTableLen {
t.Fatalf("table length was different from the minimum: %d", stats.RootBuckets)
}
}

Expand Down Expand Up @@ -573,10 +573,33 @@ func assertMapCapacity(t *testing.T, m *Map, expectedCap int) {
}

func TestNewMapPresized(t *testing.T) {
assertMapCapacity(t, NewMap(), MinMapTableCap)
assertMapCapacity(t, NewMap(), DefaultMinMapTableCap)
assertMapCapacity(t, NewMapPresized(1000), 1536)
assertMapCapacity(t, NewMapPresized(0), MinMapTableCap)
assertMapCapacity(t, NewMapPresized(-1), MinMapTableCap)
assertMapCapacity(t, NewMapPresized(0), DefaultMinMapTableCap)
assertMapCapacity(t, NewMapPresized(-1), DefaultMinMapTableCap)
}

func TestNewMapPresized_DoesNotShrinkBelowMinTableLen(t *testing.T) {
const minTableLen = 1024
const numEntries = minTableLen * EntriesPerMapBucket
m := NewMapPresized(numEntries)
for i := 0; i < numEntries; i++ {
m.Store(strconv.Itoa(i), i)
}

stats := CollectMapStats(m)
if stats.RootBuckets <= minTableLen {
t.Fatalf("table did not grow: %d", stats.RootBuckets)
}

for i := 0; i < numEntries; i++ {
m.Delete(strconv.Itoa(int(i)))
}

stats = CollectMapStats(m)
if stats.RootBuckets != minTableLen {
t.Fatalf("table length was different from the minimum: %d", stats.RootBuckets)
}
}

func TestMapResize(t *testing.T) {
Expand All @@ -594,7 +617,7 @@ func TestMapResize(t *testing.T) {
if stats.Capacity > expectedCapacity {
t.Fatalf("capacity was too large: %d, expected: %d", stats.Capacity, expectedCapacity)
}
if stats.RootBuckets <= MinMapTableLen {
if stats.RootBuckets <= DefaultMinMapTableLen {
t.Fatalf("table was too small: %d", stats.RootBuckets)
}
if stats.TotalGrowths == 0 {
Expand All @@ -618,7 +641,7 @@ func TestMapResize(t *testing.T) {
if stats.Capacity != expectedCapacity {
t.Fatalf("capacity was too large: %d, expected: %d", stats.Capacity, expectedCapacity)
}
if stats.RootBuckets != MinMapTableLen {
if stats.RootBuckets != DefaultMinMapTableLen {
t.Fatalf("table was too large: %d", stats.RootBuckets)
}
if stats.TotalShrinks == 0 {
Expand Down Expand Up @@ -696,7 +719,7 @@ func parallelRandResizer(t *testing.T, m *Map, numIters, numEntries int, cdone c

func TestMapParallelResize(t *testing.T) {
const numIters = 1_000
const numEntries = 2 * EntriesPerMapBucket * MinMapTableLen
const numEntries = 2 * EntriesPerMapBucket * DefaultMinMapTableLen
m := NewMap()
cdone := make(chan bool)
go parallelRandResizer(t, m, numIters, numEntries, cdone)
Expand Down
39 changes: 25 additions & 14 deletions mapof.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ type MapOf[K comparable, V any] struct {
resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications)
table unsafe.Pointer // *mapOfTable
hasher func(K, uint64) uint64
minTableLen int
}

type mapOfTable[K comparable, V any] struct {
Expand Down Expand Up @@ -66,7 +67,7 @@ type entryOf[K comparable, V any] struct {

// NewMapOf creates a new MapOf instance.
func NewMapOf[K comparable, V any]() *MapOf[K, V] {
return NewMapOfPresized[K, V](minMapTableCap)
return NewMapOfPresized[K, V](defaultMinMapTableLen * entriesPerMapBucket)
}

// NewMapOfPresized creates a new MapOf instance with capacity enough
Expand All @@ -84,19 +85,20 @@ func newMapOfPresized[K comparable, V any](
m.resizeCond = *sync.NewCond(&m.resizeMu)
m.hasher = hasher
var table *mapOfTable[K, V]
if sizeHint <= minMapTableCap {
table = newMapOfTable[K, V](minMapTableLen)
if sizeHint <= defaultMinMapTableLen*entriesPerMapBucket {
table = newMapOfTable[K, V](defaultMinMapTableLen)
} else {
tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket))
table = newMapOfTable[K, V](int(tableLen))
}
m.minTableLen = len(table.buckets)
atomic.StorePointer(&m.table, unsafe.Pointer(table))
return m
}

func newMapOfTable[K comparable, V any](tableLen int) *mapOfTable[K, V] {
buckets := make([]bucketOfPadded, tableLen)
counterLen := tableLen >> 10
func newMapOfTable[K comparable, V any](minTableLen int) *mapOfTable[K, V] {
buckets := make([]bucketOfPadded, minTableLen)
counterLen := minTableLen >> 10
if counterLen < minMapCounterLen {
counterLen = minMapCounterLen
} else if counterLen > maxMapCounterLen {
Expand All @@ -111,8 +113,8 @@ func newMapOfTable[K comparable, V any](tableLen int) *mapOfTable[K, V] {
return t
}

// Load returns the value stored in the map for a key, or nil if no
// value is present.
// Load returns the value stored in the map for a key, or zero value
// of type V if no value is present.
// The ok result indicates whether value was found in the map.
func (m *MapOf[K, V]) Load(key K) (value V, ok bool) {
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
Expand Down Expand Up @@ -190,6 +192,10 @@ func (m *MapOf[K, V]) LoadAndStore(key K, value V) (actual V, loaded bool) {
// Otherwise, it computes the value using the provided function and
// returns the computed value. The loaded result is true if the value
// was loaded, false if stored.
//
// This call locks a hash table bucket, i.e. a few map entries, while
// the compute function is executed. Consider this when the valueFn
// function includes long-running operations.
func (m *MapOf[K, V]) LoadOrCompute(key K, valueFn func() V) (actual V, loaded bool) {
return m.doCompute(
key,
Expand All @@ -208,6 +214,10 @@ func (m *MapOf[K, V]) LoadOrCompute(key K, valueFn func() V) (actual V, loaded b
// The ok result indicates whether value was computed and stored, thus, is
// present in the map. The actual result contains the new value in cases where
// the value was computed and stored. See the example for a few use cases.
//
// This call locks a hash table bucket, i.e. a few map entries, while
// the compute function is executed. Consider this when the valueFn
// function includes long-running operations.
func (m *MapOf[K, V]) Compute(
key K,
valueFn func(oldValue V, loaded bool) (newValue V, delete bool),
Expand Down Expand Up @@ -410,7 +420,7 @@ func (m *MapOf[K, V]) resize(knownTable *mapOfTable[K, V], hint mapResizeHint) {
// Fast path for shrink attempts.
if hint == mapShrinkHint {
shrinkThreshold := int64((knownTableLen * entriesPerMapBucket) / mapShrinkFraction)
if knownTableLen == minMapTableLen || knownTable.sumSize() > shrinkThreshold {
if knownTableLen == m.minTableLen || knownTable.sumSize() > shrinkThreshold {
return
}
}
Expand All @@ -430,7 +440,7 @@ func (m *MapOf[K, V]) resize(knownTable *mapOfTable[K, V], hint mapResizeHint) {
newTable = newMapOfTable[K, V](tableLen << 1)
case mapShrinkHint:
shrinkThreshold := int64((tableLen * entriesPerMapBucket) / mapShrinkFraction)
if tableLen > minMapTableLen && table.sumSize() <= shrinkThreshold {
if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold {
// Shrink the table with factor of 2.
atomic.AddInt64(&m.totalShrinks, 1)
newTable = newMapOfTable[K, V](tableLen >> 1)
Expand All @@ -443,7 +453,7 @@ func (m *MapOf[K, V]) resize(knownTable *mapOfTable[K, V], hint mapResizeHint) {
return
}
case mapClearHint:
newTable = newMapOfTable[K, V](minMapTableLen)
newTable = newMapOfTable[K, V](m.minTableLen)
default:
panic(fmt.Sprintf("unexpected resize hint: %d", hint))
}
Expand Down Expand Up @@ -497,9 +507,10 @@ func copyBucketOf[K comparable, V any](
// may reflect any mapping for that key from any point during the
// Range call.
//
// It is safe to modify the map while iterating it. However, the
// concurrent modification rule apply, i.e. the changes may be not
// reflected in the subsequently iterated entries.
// It is safe to modify the map while iterating it, including entry
// creation, modification and deletion. However, the concurrent
// modification rule apply, i.e. the changes may be not reflected
// in the subsequently iterated entries.
func (m *MapOf[K, V]) Range(f func(key K, value V) bool) {
var zeroPtr unsafe.Pointer
// Pre-allocate array big enough to fit entries for most hash tables.
Expand Down
Loading

0 comments on commit f57c45d

Please sign in to comment.