diff --git a/.github/workflows/build-32-bit.yml b/.github/workflows/build-32-bit.yml index bc24367..cda90a1 100644 --- a/.github/workflows/build-32-bit.yml +++ b/.github/workflows/build-32-bit.yml @@ -6,7 +6,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go-version: [1.21.x] + go-version: [1.19.x, 1.20.x, 1.21.x] name: Build with Go ${{ matrix.go-version }} 32-bit steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c5ad8bb..3438645 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -6,7 +6,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go-version: [1.21.x] + go-version: [1.19.x, 1.20.x, 1.21.x] name: Build with Go ${{ matrix.go-version }} steps: - uses: actions/checkout@v3 diff --git a/README.md b/README.md index 94dca04..12a71cb 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ -[![GoDoc reference](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/puzpuzpuz/xsync/v2) -[![GoReport](https://goreportcard.com/badge/github.com/puzpuzpuz/xsync/v2)](https://goreportcard.com/report/github.com/puzpuzpuz/xsync/v2) +[![GoDoc reference](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/puzpuzpuz/xsync/v3) +[![GoReport](https://goreportcard.com/badge/github.com/puzpuzpuz/xsync/v3)](https://goreportcard.com/report/github.com/puzpuzpuz/xsync/v3) [![codecov](https://codecov.io/gh/puzpuzpuz/xsync/branch/main/graph/badge.svg)](https://codecov.io/gh/puzpuzpuz/xsync) # xsync @@ -16,15 +16,15 @@ Also, a non-scientific, unfair benchmark comparing Java's [j.u.c.ConcurrentHashM ## Usage -The latest xsync major version is v2, so `/v2` suffix should be used when importing the library: +The latest xsync major version is v3, so `/v3` suffix should be used when importing the library: ```go import ( - "github.com/puzpuzpuz/xsync/v2" + "github.com/puzpuzpuz/xsync/v3" ) ``` -*Note for v1 users*: v1 support is discontinued, so please upgrade to v2. While the API has some breaking changes, the migration should be trivial. +*Note for v1 users*: v1 support is discontinued, so please upgrade to v3. While the API has some breaking changes, the migration should be trivial. ### Counter @@ -35,7 +35,7 @@ c := xsync.NewCounter() // increment and decrement the counter c.Inc() c.Dec() -// read the current value +// read the current value v := c.Value() ``` @@ -58,10 +58,10 @@ CLHT is built around idea to organize the hash table in cache-line-sized buckets One important difference with `sync.Map` is that only string keys are supported. That's because Golang standard library does not expose the built-in hash functions for `interface{}` values. -`MapOf[K, V]` is an implementation with parametrized value type. It is available for Go 1.18 or later. While it's still a CLHT-inspired hash map, `MapOf`'s design is quite different from `Map`. As a result, less GC pressure and less atomic operations on reads. +`MapOf[K, V]` is an implementation with parametrized value type. While it's still a CLHT-inspired hash map, `MapOf`'s design is quite different from `Map`. As a result, less GC pressure and less atomic operations on reads. ```go -m := xsync.NewMapOf[string]() +m := xsync.NewMapOf[string, string]() m.Store("foo", "bar") v, ok := m.Load("foo") ``` @@ -73,17 +73,7 @@ type Point struct { x int32 y int32 } -m := NewTypedMapOf[Point, int](func(seed maphash.Seed, p Point) uint64 { - // provide a hash function when creating the MapOf; - // we recommend using the hash/maphash package for the function - var h maphash.Hash - h.SetSeed(seed) - binary.Write(&h, binary.LittleEndian, p.x) - hash := h.Sum64() - h.Reset() - binary.Write(&h, binary.LittleEndian, p.y) - return 31*hash + h.Sum64() -}) +m := NewMapOf[Point, int]() m.Store(Point{42, 42}, 42) v, ok := m.Load(point{42, 42}) ``` diff --git a/counter.go b/counter.go index 4bf2c91..4d4dc87 100644 --- a/counter.go +++ b/counter.go @@ -62,7 +62,7 @@ func (c *Counter) Add(delta int64) { t, ok := ptokenPool.Get().(*ptoken) if !ok { t = new(ptoken) - t.idx = fastrand() + t.idx = runtime_fastrand() } for { stripe := &c.stripes[t.idx&c.mask] @@ -71,7 +71,7 @@ func (c *Counter) Add(delta int64) { break } // Give a try with another randomly selected stripe. - t.idx = fastrand() + t.idx = runtime_fastrand() } ptokenPool.Put(t) } diff --git a/counter_test.go b/counter_test.go index d715ca6..368bbb5 100644 --- a/counter_test.go +++ b/counter_test.go @@ -5,7 +5,7 @@ import ( "sync/atomic" "testing" - . "github.com/puzpuzpuz/xsync/v2" + . "github.com/puzpuzpuz/xsync/v3" ) func TestCounterInc(t *testing.T) { diff --git a/example_test.go b/example_test.go index b68d4de..1c3115d 100644 --- a/example_test.go +++ b/example_test.go @@ -1,42 +1,13 @@ -//go:build go1.18 -// +build go1.18 - package xsync_test import ( - "encoding/binary" "fmt" - "hash/maphash" - "time" - "github.com/puzpuzpuz/xsync/v2" + "github.com/puzpuzpuz/xsync/v3" ) -func ExampleNewTypedMapOf() { - type Person struct { - GivenName string - FamilyName string - YearOfBirth int16 - } - age := xsync.NewTypedMapOf[Person, int](func(seed maphash.Seed, p Person) uint64 { - var h maphash.Hash - h.SetSeed(seed) - h.WriteString(p.GivenName) - hash := h.Sum64() - h.Reset() - h.WriteString(p.FamilyName) - hash = 31*hash + h.Sum64() - h.Reset() - binary.Write(&h, binary.LittleEndian, p.YearOfBirth) - return 31*hash + h.Sum64() - }) - Y := time.Now().Year() - age.Store(Person{"Ada", "Lovelace", 1815}, Y-1815) - age.Store(Person{"Charles", "Babbage", 1791}, Y-1791) -} - func ExampleMapOf_Compute() { - counts := xsync.NewIntegerMapOf[int, int]() + counts := xsync.NewMapOf[int, int]() // Store a new value. v, ok := counts.Compute(42, func(oldValue int, loaded bool) (newValue int, delete bool) { diff --git a/export_mapof_test.go b/export_mapof_test.go deleted file mode 100644 index a91627a..0000000 --- a/export_mapof_test.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -package xsync - -type ( - BucketOfPadded = bucketOfPadded -) - -func CollectMapOfStats[K comparable, V any](m *MapOf[K, V]) MapStats { - return MapStats{m.stats()} -} diff --git a/export_test.go b/export_test.go index 16d373c..936f351 100644 --- a/export_test.go +++ b/export_test.go @@ -1,7 +1,5 @@ package xsync -import "hash/maphash" - const ( EntriesPerMapBucket = entriesPerMapBucket MapLoadFactor = mapLoadFactor @@ -11,7 +9,8 @@ const ( ) type ( - BucketPadded = bucketPadded + BucketPadded = bucketPadded + BucketOfPadded = bucketOfPadded ) type MapStats struct { @@ -50,14 +49,33 @@ func DisableAssertions() { assertionsEnabled = false } -func HashString(seed maphash.Seed, s string) uint64 { - return hashString(seed, s) -} - func Fastrand() uint32 { - return fastrand() + return runtime_fastrand() } func NextPowOf2(v uint32) uint32 { return nextPowOf2(v) } + +func MakeSeed() uint64 { + return makeSeed() +} + +func HashString(s string, seed uint64) uint64 { + return hashString(s, seed) +} + +func MakeHasher[T comparable]() func(T, uint64) uint64 { + return makeHasher[T]() +} + +func CollectMapOfStats[K comparable, V any](m *MapOf[K, V]) MapStats { + return MapStats{m.stats()} +} + +func NewMapOfPresizedWithHasher[K comparable, V any]( + hasher func(K, uint64) uint64, + sizeHint int, +) *MapOf[K, V] { + return newMapOfPresized[K, V](hasher, sizeHint) +} diff --git a/go.mod b/go.mod index 5406868..a98d069 100644 --- a/go.mod +++ b/go.mod @@ -1,3 +1,3 @@ -module github.com/puzpuzpuz/xsync/v2 +module github.com/puzpuzpuz/xsync/v3 go 1.20 diff --git a/hashing.go b/hashing.go deleted file mode 100644 index a3dff1a..0000000 --- a/hashing.go +++ /dev/null @@ -1,174 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -package xsync - -import ( - "hash/maphash" - "reflect" - "unsafe" -) - -// makeHashFunc creates a fast hash function for the given comparable type. -// The only limitation is that the type should not contain interfaces inside -// based on runtime.typehash. -func makeHashFunc[T comparable]() func(maphash.Seed, T) uint64 { - var zero T - - isInterface := reflect.TypeOf(&zero).Elem().Kind() == reflect.Interface - is64Bit := unsafe.Sizeof(uintptr(0)) == 8 - - if isInterface { - if is64Bit { - return func(seed maphash.Seed, value T) uint64 { - seed64 := *(*uint64)(unsafe.Pointer(&seed)) - iValue := any(value) - i := (*iface)(unsafe.Pointer(&iValue)) - return uint64(runtime_typehash(i.typ, noescape(i.word), uintptr(seed64))) - } - } else { - return func(seed maphash.Seed, value T) uint64 { - seed64 := *(*uint64)(unsafe.Pointer(&seed)) - iValue := any(value) - i := (*iface)(unsafe.Pointer(&iValue)) - - lo := runtime_typehash(i.typ, noescape(i.word), uintptr(seed64)) - hi := runtime_typehash(i.typ, noescape(i.word), uintptr(seed64>>32)) - return uint64(hi)<<32 | uint64(lo) - } - } - } else { - var iZero any = zero - i := (*iface)(unsafe.Pointer(&iZero)) - - if is64Bit { - return func(seed maphash.Seed, value T) uint64 { - seed64 := *(*uint64)(unsafe.Pointer(&seed)) - return uint64(runtime_typehash(i.typ, noescape(unsafe.Pointer(&value)), uintptr(seed64))) - } - } else { - return func(seed maphash.Seed, value T) uint64 { - seed64 := *(*uint64)(unsafe.Pointer(&seed)) - - lo := runtime_typehash(i.typ, noescape(unsafe.Pointer(&value)), uintptr(seed64)) - hi := runtime_typehash(i.typ, noescape(unsafe.Pointer(&value)), uintptr(seed64>>32)) - return uint64(hi)<<32 | uint64(lo) - } - } - } -} - -// DRY version of makeHashFunc -func makeHashFuncDRY[T comparable]() func(maphash.Seed, T) uint64 { - var zero T - - if reflect.TypeOf(&zero).Elem().Kind() == reflect.Interface { - return func(seed maphash.Seed, value T) uint64 { - iValue := any(value) - i := (*iface)(unsafe.Pointer(&iValue)) - return runtime_typehash64(i.typ, noescape(i.word), seed) - } - } else { - var iZero any = zero - i := (*iface)(unsafe.Pointer(&iZero)) - return func(seed maphash.Seed, value T) uint64 { - return runtime_typehash64(i.typ, noescape(unsafe.Pointer(&value)), seed) - } - } -} - -func makeHashFuncNative[T comparable]() func(maphash.Seed, T) uint64 { - hasher := makeHashFuncNativeInternal(make(map[T]struct{})) - - is64Bit := unsafe.Sizeof(uintptr(0)) == 8 - - if is64Bit { - return func(seed maphash.Seed, value T) uint64 { - seed64 := *(*uint64)(unsafe.Pointer(&seed)) - return uint64(hasher(noescape(unsafe.Pointer(&value)), uintptr(seed64))) - } - } else { - return func(seed maphash.Seed, value T) uint64 { - seed64 := *(*uint64)(unsafe.Pointer(&seed)) - lo := hasher(noescape(unsafe.Pointer(&value)), uintptr(seed64)) - hi := hasher(noescape(unsafe.Pointer(&value)), uintptr(seed64>>32)) - return uint64(hi)<<32 | uint64(lo) - } - } - -} - -type nativeHasher func(unsafe.Pointer, uintptr) uintptr - -func makeHashFuncNativeInternal(mapValue any) nativeHasher { - // go/src/runtime/type.go - type tflag uint8 - type nameOff int32 - type typeOff int32 - - // go/src/runtime/type.go - type _type struct { - size uintptr - ptrdata uintptr - hash uint32 - tflag tflag - align uint8 - fieldAlign uint8 - kind uint8 - equal func(unsafe.Pointer, unsafe.Pointer) bool - gcdata *byte - str nameOff - ptrToThis typeOff - } - - // go/src/runtime/type.go - type maptype struct { - typ _type - key *_type - elem *_type - bucket *_type - // function for hashing keys (ptr to key, seed) -> hash - hasher nativeHasher - keysize uint8 - elemsize uint8 - bucketsize uint16 - flags uint32 - } - - type mapiface struct { - typ *maptype - val uintptr - } - - i := (*mapiface)(unsafe.Pointer(&mapValue)) - return i.typ.hasher -} - -// how interface is represented in memory -type iface struct { - typ uintptr - word unsafe.Pointer -} - -// same as runtime_typehash, but always returns a uint64 -// see: maphash.rthash function for details -func runtime_typehash64(t uintptr, p unsafe.Pointer, seed maphash.Seed) uint64 { - seed64 := *(*uint64)(unsafe.Pointer(&seed)) - if unsafe.Sizeof(uintptr(0)) == 8 { - return uint64(runtime_typehash(t, noescape(p), uintptr(seed64))) - } - - lo := runtime_typehash(t, noescape(p), uintptr(seed64)) - hi := runtime_typehash(t, noescape(p), uintptr(seed64>>32)) - return uint64(hi)<<32 | uint64(lo) -} - -//go:nosplit -//go:nocheckptr -func noescape(p unsafe.Pointer) unsafe.Pointer { - x := uintptr(p) - return unsafe.Pointer(x ^ 0) -} - -//go:linkname runtime_typehash runtime.typehash -func runtime_typehash(t uintptr, p unsafe.Pointer, h uintptr) uintptr diff --git a/hashing_test.go b/hashing_test.go deleted file mode 100644 index 81e48d2..0000000 --- a/hashing_test.go +++ /dev/null @@ -1,154 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -package xsync - -import ( - "fmt" - "hash/maphash" - "testing" -) - -func TestMakeHashFunc(t *testing.T) { - type User struct { - Name string - City string - } - - seed := maphash.MakeSeed() - - hashString := makeHashFunc[string]() - hashUser := makeHashFunc[User]() - hashAny := makeHashFunc[any]() // this declaration requires go 1.20+, though makeHashFunc itself can work with go.18+ - - hashUserNative := makeHashFuncNative[User]() - - // Not that much to test TBH. - - // check that hash is not always the same - for i := 0; ; i++ { - if hashString(seed, "foo") != hashString(seed, "bar") { - break - } - if i >= 100 { - t.Error("hashString is always the same") - break - } - - seed = maphash.MakeSeed() // try with a new seed - } - - // do the same for hash any - for i := 0; ; i++ { - if hashAny(seed, "foo") != hashAny(seed, "bar") { - break - } - if i >= 100 { - t.Error("hashAny is always the same") - break - } - - seed = maphash.MakeSeed() // try with a new seed - } - - if hashString(seed, "foo") != hashString(seed, "foo") { - t.Error("hashString is not deterministic") - } - - if hashUser(seed, User{Name: "John", City: "New York"}) != hashUser(seed, User{Name: "John", City: "New York"}) { - t.Error("hashUser is not deterministic") - } - - if hashAny(seed, User{Name: "John", City: "New York"}) != hashAny(seed, User{Name: "John", City: "New York"}) { - t.Error("hashAny is not deterministic") - } - - // just for fun, compare with native hash function - if hashUser(seed, User{Name: "John", City: "New York"}) != hashUserNative(seed, User{Name: "John", City: "New York"}) { - t.Error("hashUser and hashUserNative return different values") - } - -} - -func expectEqualHashes[T comparable](t *testing.T, val1, val2 T) { - t.Helper() - - if val1 != val2 { - t.Error("use expectDifferentHashes for non-equal values") - return - } - - hash := makeHashFunc[T]() - seed := maphash.MakeSeed() - - if hash(seed, val1) != hash(seed, val2) { - t.Error("two invocations of hash for the same value return different results") - } -} - -func BenchmarkMakeHashFunc(b *testing.B) { - type Point struct { - X, Y, Z int - } - - type User struct { - ID int - FirstName string - LastName string - IsActive bool - City string - } - - type PadInside struct { - A int - B byte - C int - } - - type PadTrailing struct { - A int - B byte - } - - doBenchmarkMakeHashFunc(b, int64(116)) - doBenchmarkMakeHashFunc(b, int32(116)) - doBenchmarkMakeHashFunc(b, 3.14) - doBenchmarkMakeHashFunc(b, "test key test key test key test key test key test key test key test key test key ") - doBenchmarkMakeHashFunc(b, Point{1, 2, 3}) - doBenchmarkMakeHashFunc(b, User{ID: 1, FirstName: "John", LastName: "Smith", IsActive: true, City: "New York"}) - doBenchmarkMakeHashFunc(b, PadInside{}) - doBenchmarkMakeHashFunc(b, PadTrailing{}) - doBenchmarkMakeHashFunc(b, [1024]byte{}) - doBenchmarkMakeHashFunc(b, [128]Point{}) - doBenchmarkMakeHashFunc(b, [128]User{}) - doBenchmarkMakeHashFunc(b, [128]PadInside{}) - doBenchmarkMakeHashFunc(b, [128]PadTrailing{}) -} - -func doBenchmarkMakeHashFunc[T comparable](b *testing.B, val T) { - hash := makeHashFunc[T]() - hashDry := makeHashFuncDRY[T]() - hashNative := makeHashFuncNative[T]() - seed := maphash.MakeSeed() - - b.Run(fmt.Sprintf("%T normal", val), func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - _ = hash(seed, val) - } - }) - - b.Run(fmt.Sprintf("%T dry", val), func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - _ = hashDry(seed, val) - } - }) - - b.Run(fmt.Sprintf("%T native", val), func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - _ = hashNative(seed, val) - } - }) -} diff --git a/map.go b/map.go index 749293c..05e0231 100644 --- a/map.go +++ b/map.go @@ -2,7 +2,6 @@ package xsync import ( "fmt" - "hash/maphash" "math" "runtime" "strings" @@ -85,7 +84,7 @@ type mapTable struct { // used to determine if a table shrinking is needed // occupies min(buckets_memory/1024, 64KB) of memory size []counterStripe - seed maphash.Seed + seed uint64 } type counterStripe struct { @@ -153,7 +152,7 @@ func newMapTable(tableLen int) *mapTable { t := &mapTable{ buckets: buckets, size: counter, - seed: maphash.MakeSeed(), + seed: makeSeed(), } return t } @@ -163,7 +162,7 @@ func newMapTable(tableLen int) *mapTable { // The ok result indicates whether value was found in the map. func (m *Map) Load(key string) (value interface{}, ok bool) { table := (*mapTable)(atomic.LoadPointer(&m.table)) - hash := hashString(table.seed, key) + hash := hashString(key, table.seed) bidx := uint64(len(table.buckets)-1) & hash b := &table.buckets[bidx] for { @@ -313,7 +312,7 @@ func (m *Map) doCompute( ) table := (*mapTable)(atomic.LoadPointer(&m.table)) tableLen := len(table.buckets) - hash := hashString(table.seed, key) + hash := hashString(key, table.seed) bidx := uint64(len(table.buckets)-1) & hash rootb := &table.buckets[bidx] lockBucket(&rootb.topHashMutex) @@ -517,7 +516,7 @@ func copyBucket(b *bucketPadded, destTable *mapTable) (copied int) { for i := 0; i < entriesPerMapBucket; i++ { if b.keys[i] != nil { k := derefKey(b.keys[i]) - hash := hashString(destTable.seed, k) + hash := hashString(k, destTable.seed) bidx := uint64(len(destTable.buckets)-1) & hash destb := &destTable.buckets[bidx] appendToBucket(hash, b.keys[i], b.values[i], destb) diff --git a/map_test.go b/map_test.go index 51f0fff..a16b39a 100644 --- a/map_test.go +++ b/map_test.go @@ -12,7 +12,7 @@ import ( "time" "unsafe" - . "github.com/puzpuzpuz/xsync/v2" + . "github.com/puzpuzpuz/xsync/v3" ) const ( diff --git a/mapof.go b/mapof.go index b791617..2d40f09 100644 --- a/mapof.go +++ b/mapof.go @@ -1,18 +1,14 @@ -//go:build go1.18 -// +build go1.18 - package xsync import ( "fmt" - "hash/maphash" "math" "sync" "sync/atomic" "unsafe" ) -// MapOf is like a Go map[string]V but is safe for concurrent +// MapOf is like a Go map[K]V but is safe for concurrent // use by multiple goroutines without additional locking or // coordination. It follows the interface of sync.Map with // a number of valuable extensions like Compute or Size. @@ -35,7 +31,7 @@ type MapOf[K comparable, V any] struct { resizeMu sync.Mutex // only used along with resizeCond resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications) table unsafe.Pointer // *mapOfTable - hasher func(maphash.Seed, K) uint64 + hasher func(K, uint64) uint64 } type mapOfTable[K comparable, V any] struct { @@ -44,7 +40,7 @@ type mapOfTable[K comparable, V any] struct { // used to determine if a table shrinking is needed // occupies min(buckets_memory/1024, 64KB) of memory size []counterStripe - seed maphash.Seed + seed uint64 } // bucketOfPadded is a CL-sized map bucket holding up to @@ -68,54 +64,22 @@ type entryOf[K comparable, V any] struct { value V } -// NewMapOf creates a new MapOf instance with string keys. -func NewMapOf[V any]() *MapOf[string, V] { - return NewTypedMapOfPresized[string, V](hashString, minMapTableCap) +// NewMapOf creates a new MapOf instance. +func NewMapOf[K comparable, V any]() *MapOf[K, V] { + return NewMapOfPresized[K, V](minMapTableCap) } -// NewMapOfPresized creates a new MapOf instance with string keys and capacity -// enough to hold sizeHint entries. If sizeHint is zero or negative, the value +// NewMapOfPresized creates a new MapOf instance with capacity enough +// to hold sizeHint entries. If sizeHint is zero or negative, the value // is ignored. -func NewMapOfPresized[V any](sizeHint int) *MapOf[string, V] { - return NewTypedMapOfPresized[string, V](hashString, sizeHint) -} - -// IntegerConstraint represents any integer type. -type IntegerConstraint interface { - // Recreation of golang.org/x/exp/constraints.Integer to avoid taking a dependency on an - // experimental package. - ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr -} - -// NewIntegerMapOf creates a new MapOf instance with integer typed keys. -func NewIntegerMapOf[K IntegerConstraint, V any]() *MapOf[K, V] { - return NewTypedMapOfPresized[K, V](hashUint64[K], minMapTableCap) -} - -// NewIntegerMapOfPresized creates a new MapOf instance with integer typed keys -// and capacity enough to hold sizeHint entries. If sizeHint is zero or -// negative, the value is ignored. -func NewIntegerMapOfPresized[K IntegerConstraint, V any](sizeHint int) *MapOf[K, V] { - return NewTypedMapOfPresized[K, V](hashUint64[K], sizeHint) +func NewMapOfPresized[K comparable, V any](sizeHint int) *MapOf[K, V] { + return newMapOfPresized[K, V](makeHasher[K](), sizeHint) } -// NewTypedMapOf creates a new MapOf instance with arbitrarily typed keys. -// -// Keys are hashed to uint64 using the hasher function. It is strongly -// recommended to use the hash/maphash package to implement hasher. See the -// example for how to do that. -func NewTypedMapOf[K comparable, V any](hasher func(maphash.Seed, K) uint64) *MapOf[K, V] { - return NewTypedMapOfPresized[K, V](hasher, minMapTableCap) -} - -// NewTypedMapOfPresized creates a new MapOf instance with arbitrarily typed -// keys and capacity enough to hold sizeHint entries. If sizeHint is zero or -// negative, the value is ignored. -// -// Keys are hashed to uint64 using the hasher function. It is strongly -// recommended to use the hash/maphash package to implement hasher. See the -// example for how to do that. -func NewTypedMapOfPresized[K comparable, V any](hasher func(maphash.Seed, K) uint64, sizeHint int) *MapOf[K, V] { +func newMapOfPresized[K comparable, V any]( + hasher func(K, uint64) uint64, + sizeHint int, +) *MapOf[K, V] { m := &MapOf[K, V]{} m.resizeCond = *sync.NewCond(&m.resizeMu) m.hasher = hasher @@ -130,20 +94,6 @@ func NewTypedMapOfPresized[K comparable, V any](hasher func(maphash.Seed, K) uin return m } -// NewUniversalMapOf creates a new MapOf instance with arbitrarily typed comparable keys. -// The only limitation is that key type should not contain interfaces inside. -func NewUniversalMapOf[K comparable, V any]() *MapOf[K, V] { - return NewTypedMapOfPresized[K, V](makeHashFunc[K](), minMapTableCap) -} - -// NewUniversalMapOfPresized creates a new MapOf instance with arbitrarily typed -// comparable keys and capacity enough to hold sizeHint entries. If sizeHint is zero or -// negative, the value is ignored. -// The only limitation is that key type should not contain interfaces inside. -func NewUniversalMapOfPresized[K comparable, V any](sizeHint int) *MapOf[K, V] { - return NewTypedMapOfPresized[K, V](makeHashFunc[K](), sizeHint) -} - func newMapOfTable[K comparable, V any](tableLen int) *mapOfTable[K, V] { buckets := make([]bucketOfPadded, tableLen) counterLen := tableLen >> 10 @@ -156,7 +106,7 @@ func newMapOfTable[K comparable, V any](tableLen int) *mapOfTable[K, V] { t := &mapOfTable[K, V]{ buckets: buckets, size: counter, - seed: maphash.MakeSeed(), + seed: makeSeed(), } return t } @@ -166,7 +116,7 @@ func newMapOfTable[K comparable, V any](tableLen int) *mapOfTable[K, V] { // The ok result indicates whether value was found in the map. func (m *MapOf[K, V]) Load(key K) (value V, ok bool) { table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table)) - hash := shiftHash(m.hasher(table.seed, key)) + hash := shiftHash(m.hasher(key, table.seed)) bidx := uint64(len(table.buckets)-1) & hash b := &table.buckets[bidx] for { @@ -312,7 +262,7 @@ func (m *MapOf[K, V]) doCompute( ) table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table)) tableLen := len(table.buckets) - hash := shiftHash(m.hasher(table.seed, key)) + hash := shiftHash(m.hasher(key, table.seed)) bidx := uint64(len(table.buckets)-1) & hash rootb := &table.buckets[bidx] rootb.mu.Lock() @@ -511,7 +461,7 @@ func (m *MapOf[K, V]) resize(table *mapOfTable[K, V], hint mapResizeHint) { func copyBucketOf[K comparable, V any]( b *bucketOfPadded, destTable *mapOfTable[K, V], - hasher func(maphash.Seed, K) uint64, + hasher func(K, uint64) uint64, ) (copied int) { rootb := b rootb.mu.Lock() @@ -519,7 +469,7 @@ func copyBucketOf[K comparable, V any]( for i := 0; i < entriesPerMapBucket; i++ { if b.entries[i] != nil { e := (*entryOf[K, V])(b.entries[i]) - hash := shiftHash(hasher(destTable.seed, e.key)) + hash := shiftHash(hasher(e.key, destTable.seed)) bidx := uint64(len(destTable.buckets)-1) & hash destb := &destTable.buckets[bidx] appendToBucketOf(hash, b.entries[i], destb) diff --git a/mapof_test.go b/mapof_test.go index ed14a9b..b27816e 100644 --- a/mapof_test.go +++ b/mapof_test.go @@ -1,11 +1,6 @@ -//go:build go1.18 -// +build go1.18 - package xsync_test import ( - "encoding/binary" - "hash/maphash" "math" "math/rand" "strconv" @@ -15,7 +10,7 @@ import ( "time" "unsafe" - . "github.com/puzpuzpuz/xsync/v2" + . "github.com/puzpuzpuz/xsync/v3" ) type point struct { @@ -23,16 +18,6 @@ type point struct { y int32 } -func pointHash(seed maphash.Seed, p point) uint64 { - var h maphash.Hash - h.SetSeed(seed) - binary.Write(&h, binary.LittleEndian, p.x) - hash := h.Sum64() - h.Reset() - binary.Write(&h, binary.LittleEndian, p.y) - return 31*hash + h.Sum64() -} - func TestMap_BucketOfStructSize(t *testing.T) { size := unsafe.Sizeof(BucketOfPadded{}) if size != 64 { @@ -41,7 +26,7 @@ func TestMap_BucketOfStructSize(t *testing.T) { } func TestMapOf_MissingEntry(t *testing.T) { - m := NewMapOf[string]() + m := NewMapOf[string, string]() v, ok := m.Load("foo") if ok { t.Fatalf("value was not expected: %v", v) @@ -55,7 +40,7 @@ func TestMapOf_MissingEntry(t *testing.T) { } func TestMapOf_EmptyStringKey(t *testing.T) { - m := NewMapOf[string]() + m := NewMapOf[string, string]() m.Store("", "foobar") v, ok := m.Load("") if !ok { @@ -67,7 +52,7 @@ func TestMapOf_EmptyStringKey(t *testing.T) { } func TestMapOfStore_NilValue(t *testing.T) { - m := NewMapOf[*struct{}]() + m := NewMapOf[string, *struct{}]() m.Store("foo", nil) v, ok := m.Load("foo") if !ok { @@ -79,7 +64,7 @@ func TestMapOfStore_NilValue(t *testing.T) { } func TestMapOfLoadOrStore_NilValue(t *testing.T) { - m := NewMapOf[*struct{}]() + m := NewMapOf[string, *struct{}]() m.LoadOrStore("foo", nil) v, loaded := m.LoadOrStore("foo", nil) if !loaded { @@ -92,7 +77,7 @@ func TestMapOfLoadOrStore_NilValue(t *testing.T) { func TestMapOfLoadOrStore_NonNilValue(t *testing.T) { type foo struct{} - m := NewMapOf[*foo]() + m := NewMapOf[string, *foo]() newv := &foo{} v, loaded := m.LoadOrStore("foo", newv) if loaded { @@ -112,7 +97,7 @@ func TestMapOfLoadOrStore_NonNilValue(t *testing.T) { } func TestMapOfLoadAndStore_NilValue(t *testing.T) { - m := NewMapOf[*struct{}]() + m := NewMapOf[string, *struct{}]() m.LoadAndStore("foo", nil) v, loaded := m.LoadAndStore("foo", nil) if !loaded { @@ -131,7 +116,7 @@ func TestMapOfLoadAndStore_NilValue(t *testing.T) { } func TestMapOfLoadAndStore_NonNilValue(t *testing.T) { - m := NewMapOf[int]() + m := NewMapOf[string, int]() v1 := 1 v, loaded := m.LoadAndStore("foo", v1) if loaded { @@ -159,7 +144,7 @@ func TestMapOfLoadAndStore_NonNilValue(t *testing.T) { func TestMapOfRange(t *testing.T) { const numEntries = 1000 - m := NewMapOf[int]() + m := NewMapOf[string, int]() for i := 0; i < numEntries; i++ { m.Store(strconv.Itoa(i), i) } @@ -185,7 +170,7 @@ func TestMapOfRange(t *testing.T) { } func TestMapOfRange_FalseReturned(t *testing.T) { - m := NewMapOf[int]() + m := NewMapOf[string, int]() for i := 0; i < 100; i++ { m.Store(strconv.Itoa(i), i) } @@ -201,7 +186,7 @@ func TestMapOfRange_FalseReturned(t *testing.T) { func TestMapOfRange_NestedDelete(t *testing.T) { const numEntries = 256 - m := NewMapOf[int]() + m := NewMapOf[string, int]() for i := 0; i < numEntries; i++ { m.Store(strconv.Itoa(i), i) } @@ -216,9 +201,9 @@ func TestMapOfRange_NestedDelete(t *testing.T) { } } -func TestMapOfStore(t *testing.T) { +func TestMapOfStringStore(t *testing.T) { const numEntries = 128 - m := NewMapOf[int]() + m := NewMapOf[string, int]() for i := 0; i < numEntries; i++ { m.Store(strconv.Itoa(i), i) } @@ -233,9 +218,9 @@ func TestMapOfStore(t *testing.T) { } } -func TestIntegerMapOfStore(t *testing.T) { +func TestMapOfIntStore(t *testing.T) { const numEntries = 128 - m := NewIntegerMapOf[int, int]() + m := NewMapOf[int, int]() for i := 0; i < numEntries; i++ { m.Store(i, i) } @@ -250,9 +235,9 @@ func TestIntegerMapOfStore(t *testing.T) { } } -func TestTypedMapOfStore_StructKeys_IntValues(t *testing.T) { +func TestMapOfStore_StructKeys_IntValues(t *testing.T) { const numEntries = 128 - m := NewTypedMapOf[point, int](pointHash) + m := NewMapOf[point, int]() for i := 0; i < numEntries; i++ { m.Store(point{int32(i), -int32(i)}, i) } @@ -267,9 +252,9 @@ func TestTypedMapOfStore_StructKeys_IntValues(t *testing.T) { } } -func TestTypedMapOfStore_StructKeys_StructValues(t *testing.T) { +func TestMapOfStore_StructKeys_StructValues(t *testing.T) { const numEntries = 128 - m := NewTypedMapOf[point, point](pointHash) + m := NewMapOf[point, point]() for i := 0; i < numEntries; i++ { m.Store(point{int32(i), -int32(i)}, point{-int32(i), int32(i)}) } @@ -287,13 +272,13 @@ func TestTypedMapOfStore_StructKeys_StructValues(t *testing.T) { } } -func TestTypedMapOfStore_HashCodeCollisions(t *testing.T) { +func TestMapOfStore_HashCodeCollisions(t *testing.T) { const numEntries = 1000 - m := NewTypedMapOf[int, int](func(_ maphash.Seed, i int) uint64 { + m := NewMapOfPresizedWithHasher[int, int](func(i int, _ uint64) uint64 { // We intentionally use an awful hash function here to make sure // that the map copes with key collisions. return 42 - }) + }, numEntries) for i := 0; i < numEntries; i++ { m.Store(i, i) } @@ -310,7 +295,7 @@ func TestTypedMapOfStore_HashCodeCollisions(t *testing.T) { func TestMapOfLoadOrStore(t *testing.T) { const numEntries = 1000 - m := NewMapOf[int]() + m := NewMapOf[string, int]() for i := 0; i < numEntries; i++ { m.Store(strconv.Itoa(i), i) } @@ -323,7 +308,7 @@ func TestMapOfLoadOrStore(t *testing.T) { func TestMapOfLoadOrCompute(t *testing.T) { const numEntries = 1000 - m := NewMapOf[int]() + m := NewMapOf[string, int]() for i := 0; i < numEntries; i++ { v, loaded := m.LoadOrCompute(strconv.Itoa(i), func() int { return i @@ -349,7 +334,7 @@ func TestMapOfLoadOrCompute(t *testing.T) { } func TestMapOfLoadOrCompute_FunctionCalledOnce(t *testing.T) { - m := NewIntegerMapOf[int, int]() + m := NewMapOf[int, int]() for i := 0; i < 100; { m.LoadOrCompute(i, func() (v int) { v, i = i, i+1 @@ -365,7 +350,7 @@ func TestMapOfLoadOrCompute_FunctionCalledOnce(t *testing.T) { } func TestMapOfCompute(t *testing.T) { - m := NewMapOf[int]() + m := NewMapOf[string, int]() // Store a new value. v, ok := m.Compute("foobar", func(oldValue int, loaded bool) (newValue int, delete bool) { if oldValue != 0 { @@ -440,9 +425,9 @@ func TestMapOfCompute(t *testing.T) { } } -func TestMapOfStoreThenDelete(t *testing.T) { +func TestMapOfStringStoreThenDelete(t *testing.T) { const numEntries = 1000 - m := NewMapOf[int]() + m := NewMapOf[string, int]() for i := 0; i < numEntries; i++ { m.Store(strconv.Itoa(i), i) } @@ -454,9 +439,9 @@ func TestMapOfStoreThenDelete(t *testing.T) { } } -func TestIntegerMapOfStoreThenDelete(t *testing.T) { +func TestMapOfIntStoreThenDelete(t *testing.T) { const numEntries = 1000 - m := NewIntegerMapOf[int32, int32]() + m := NewMapOf[int32, int32]() for i := 0; i < numEntries; i++ { m.Store(int32(i), int32(i)) } @@ -468,9 +453,9 @@ func TestIntegerMapOfStoreThenDelete(t *testing.T) { } } -func TestTypedMapOfStoreThenDelete(t *testing.T) { +func TestMapOfStructStoreThenDelete(t *testing.T) { const numEntries = 1000 - m := NewTypedMapOf[point, string](pointHash) + m := NewMapOf[point, string]() for i := 0; i < numEntries; i++ { m.Store(point{int32(i), 42}, strconv.Itoa(i)) } @@ -482,9 +467,9 @@ func TestTypedMapOfStoreThenDelete(t *testing.T) { } } -func TestMapOfStoreThenLoadAndDelete(t *testing.T) { +func TestMapOfStringStoreThenLoadAndDelete(t *testing.T) { const numEntries = 1000 - m := NewMapOf[int]() + m := NewMapOf[string, int]() for i := 0; i < numEntries; i++ { m.Store(strconv.Itoa(i), i) } @@ -498,9 +483,9 @@ func TestMapOfStoreThenLoadAndDelete(t *testing.T) { } } -func TestIntegerMapOfStoreThenLoadAndDelete(t *testing.T) { +func TestMapOfIntStoreThenLoadAndDelete(t *testing.T) { const numEntries = 1000 - m := NewIntegerMapOf[int, int]() + m := NewMapOf[int, int]() for i := 0; i < numEntries; i++ { m.Store(i, i) } @@ -514,9 +499,9 @@ func TestIntegerMapOfStoreThenLoadAndDelete(t *testing.T) { } } -func TestTypedMapOfStoreThenLoadAndDelete(t *testing.T) { +func TestMapOfStructStoreThenLoadAndDelete(t *testing.T) { const numEntries = 1000 - m := NewTypedMapOf[point, int](pointHash) + m := NewMapOf[point, int]() for i := 0; i < numEntries; i++ { m.Store(point{42, int32(i)}, i) } @@ -541,7 +526,7 @@ func sizeBasedOnTypedRange(m *MapOf[string, int]) int { func TestMapOfSize(t *testing.T) { const numEntries = 1000 - m := NewMapOf[int]() + m := NewMapOf[string, int]() size := m.Size() if size != 0 { t.Fatalf("zero size expected: %d", size) @@ -575,7 +560,7 @@ func TestMapOfSize(t *testing.T) { func TestMapOfClear(t *testing.T) { const numEntries = 1000 - m := NewMapOf[int]() + m := NewMapOf[string, int]() for i := 0; i < numEntries; i++ { m.Store(strconv.Itoa(i), i) } @@ -602,25 +587,17 @@ func assertMapOfCapacity[K comparable, V any](t *testing.T, m *MapOf[K, V], expe } func TestNewMapOfPresized(t *testing.T) { - assertMapOfCapacity(t, NewMapOf[string](), MinMapTableCap) - assertMapOfCapacity(t, NewMapOfPresized[string](0), MinMapTableCap) - assertMapOfCapacity(t, NewMapOfPresized[string](-100), MinMapTableCap) - assertMapOfCapacity(t, NewMapOfPresized[string](500), 768) - - assertMapOfCapacity(t, NewIntegerMapOf[int, int](), MinMapTableCap) - assertMapOfCapacity(t, NewIntegerMapOfPresized[int, int](0), MinMapTableCap) - assertMapOfCapacity(t, NewIntegerMapOfPresized[int, int](-1), MinMapTableCap) - assertMapOfCapacity(t, NewIntegerMapOfPresized[int, int](1_000_000), 1_572_864) - - assertMapOfCapacity(t, NewTypedMapOf[point, point](pointHash), MinMapTableCap) - assertMapOfCapacity(t, NewTypedMapOfPresized[point, point](pointHash, 0), MinMapTableCap) - assertMapOfCapacity(t, NewTypedMapOfPresized[point, point](pointHash, -42), MinMapTableCap) - assertMapOfCapacity(t, NewTypedMapOfPresized[point, point](pointHash, 100), 192) + assertMapOfCapacity(t, NewMapOf[string, string](), MinMapTableCap) + assertMapOfCapacity(t, NewMapOfPresized[string, string](0), MinMapTableCap) + assertMapOfCapacity(t, NewMapOfPresized[string, string](-100), MinMapTableCap) + assertMapOfCapacity(t, NewMapOfPresized[string, string](500), 768) + assertMapOfCapacity(t, NewMapOfPresized[int, int](1_000_000), 1_572_864) + assertMapOfCapacity(t, NewMapOfPresized[point, point](100), 192) } func TestMapOfResize(t *testing.T) { const numEntries = 100_000 - m := NewMapOf[int]() + m := NewMapOf[string, int]() for i := 0; i < numEntries; i++ { m.Store(strconv.Itoa(i), i) @@ -668,7 +645,7 @@ func TestMapOfResize(t *testing.T) { func TestMapOfResize_CounterLenLimit(t *testing.T) { const numEntries = 1_000_000 - m := NewMapOf[string]() + m := NewMapOf[string, string]() for i := 0; i < numEntries; i++ { m.Store("foo"+strconv.Itoa(i), "bar"+strconv.Itoa(i)) @@ -696,7 +673,7 @@ func parallelSeqTypedResizer(t *testing.T, m *MapOf[int, int], numEntries int, p func TestMapOfParallelResize_GrowOnly(t *testing.T) { const numEntries = 100_000 - m := NewIntegerMapOf[int, int]() + m := NewMapOf[int, int]() cdone := make(chan bool) go parallelSeqTypedResizer(t, m, numEntries, true, cdone) go parallelSeqTypedResizer(t, m, numEntries, false, cdone) @@ -736,7 +713,7 @@ func parallelRandTypedResizer(t *testing.T, m *MapOf[string, int], numIters, num func TestMapOfParallelResize(t *testing.T) { const numIters = 1_000 const numEntries = 2 * EntriesPerMapBucket * MinMapTableLen - m := NewMapOf[int]() + m := NewMapOf[string, int]() cdone := make(chan bool) go parallelRandTypedResizer(t, m, numIters, numEntries, cdone) go parallelRandTypedResizer(t, m, numIters, numEntries, cdone) @@ -782,7 +759,7 @@ func parallelRandTypedClearer(t *testing.T, m *MapOf[string, int], numIters, num func TestMapOfParallelClear(t *testing.T) { const numIters = 100 const numEntries = 1_000 - m := NewMapOf[int]() + m := NewMapOf[string, int]() cdone := make(chan bool) go parallelRandTypedClearer(t, m, numIters, numEntries, cdone) go parallelRandTypedClearer(t, m, numIters, numEntries, cdone) @@ -825,7 +802,7 @@ func TestMapOfParallelStores(t *testing.T) { const numStorers = 4 const numIters = 10_000 const numEntries = 100 - m := NewMapOf[int]() + m := NewMapOf[string, int]() cdone := make(chan bool) for i := 0; i < numStorers; i++ { go parallelSeqTypedStorer(t, m, i, numIters, numEntries, cdone) @@ -889,7 +866,7 @@ func parallelTypedLoader(t *testing.T, m *MapOf[string, int], numIters, numEntri func TestMapOfAtomicSnapshot(t *testing.T) { const numIters = 100_000 const numEntries = 100 - m := NewMapOf[int]() + m := NewMapOf[string, int]() cdone := make(chan bool) // Update or delete random entry in parallel with loads. go parallelRandTypedStorer(t, m, numIters, numEntries, cdone) @@ -905,7 +882,7 @@ func TestMapOfParallelStoresAndDeletes(t *testing.T) { const numWorkers = 2 const numIters = 100_000 const numEntries = 1000 - m := NewMapOf[int]() + m := NewMapOf[string, int]() cdone := make(chan bool) // Update random entry in parallel with deletes. for i := 0; i < numWorkers; i++ { @@ -932,7 +909,7 @@ func parallelTypedComputer(t *testing.T, m *MapOf[uint64, uint64], numIters, num func TestMapOfParallelComputes(t *testing.T) { const numWorkers = 4 // Also stands for numEntries. const numIters = 10_000 - m := NewIntegerMapOf[uint64, uint64]() + m := NewMapOf[uint64, uint64]() cdone := make(chan bool) for i := 0; i < numWorkers; i++ { go parallelTypedComputer(t, m, numIters, numWorkers, cdone) @@ -979,7 +956,7 @@ func parallelTypedRangeDeleter(t *testing.T, m *MapOf[int, int], numEntries int, func TestMapOfParallelRange(t *testing.T) { const numEntries = 10_000 - m := NewIntegerMapOfPresized[int, int](numEntries) + m := NewMapOfPresized[int, int](numEntries) for i := 0; i < numEntries; i++ { m.Store(i, i) } @@ -1019,7 +996,7 @@ func BenchmarkMapOf_NoWarmUp(b *testing.B) { continue } b.Run(bc.name, func(b *testing.B) { - m := NewMapOf[int]() + m := NewMapOf[string, int]() benchmarkMapOfStringKeys(b, func(k string) (int, bool) { return m.Load(k) }, func(k string, v int) { @@ -1034,7 +1011,7 @@ func BenchmarkMapOf_NoWarmUp(b *testing.B) { func BenchmarkMapOf_WarmUp(b *testing.B) { for _, bc := range benchmarkCases { b.Run(bc.name, func(b *testing.B) { - m := NewMapOfPresized[int](benchmarkNumEntries) + m := NewMapOfPresized[string, int](benchmarkNumEntries) for i := 0; i < benchmarkNumEntries; i++ { m.Store(benchmarkKeyPrefix+strconv.Itoa(i), i) } @@ -1075,15 +1052,15 @@ func benchmarkMapOfStringKeys( }) } -func BenchmarkIntegerMapOf_NoWarmUp(b *testing.B) { +func BenchmarkMapOfInt_NoWarmUp(b *testing.B) { for _, bc := range benchmarkCases { if bc.readPercentage == 100 { // This benchmark doesn't make sense without a warm-up. continue } b.Run(bc.name, func(b *testing.B) { - m := NewIntegerMapOf[int, int]() - benchmarkMapOfIntegerKeys(b, func(k int) (int, bool) { + m := NewMapOf[int, int]() + benchmarkMapOfIntKeys(b, func(k int) (int, bool) { return m.Load(k) }, func(k int, v int) { m.Store(k, v) @@ -1094,15 +1071,15 @@ func BenchmarkIntegerMapOf_NoWarmUp(b *testing.B) { } } -func BenchmarkIntegerMapOf_WarmUp(b *testing.B) { +func BenchmarkMapOfInt_WarmUp(b *testing.B) { for _, bc := range benchmarkCases { b.Run(bc.name, func(b *testing.B) { - m := NewIntegerMapOfPresized[int, int](benchmarkNumEntries) + m := NewMapOfPresized[int, int](benchmarkNumEntries) for i := 0; i < benchmarkNumEntries; i++ { m.Store(i, i) } b.ResetTimer() - benchmarkMapOfIntegerKeys(b, func(k int) (int, bool) { + benchmarkMapOfIntKeys(b, func(k int) (int, bool) { return m.Load(k) }, func(k int, v int) { m.Store(k, v) @@ -1113,7 +1090,7 @@ func BenchmarkIntegerMapOf_WarmUp(b *testing.B) { } } -func BenchmarkIntegerMapStandard_NoWarmUp(b *testing.B) { +func BenchmarkIntMapStandard_NoWarmUp(b *testing.B) { for _, bc := range benchmarkCases { if bc.readPercentage == 100 { // This benchmark doesn't make sense without a warm-up. @@ -1121,7 +1098,7 @@ func BenchmarkIntegerMapStandard_NoWarmUp(b *testing.B) { } b.Run(bc.name, func(b *testing.B) { var m sync.Map - benchmarkMapOfIntegerKeys(b, func(k int) (value int, ok bool) { + benchmarkMapOfIntKeys(b, func(k int) (value int, ok bool) { v, ok := m.Load(k) if ok { return v.(int), ok @@ -1139,7 +1116,7 @@ func BenchmarkIntegerMapStandard_NoWarmUp(b *testing.B) { // This is a nice scenario for sync.Map since a lot of updates // will hit the readOnly part of the map. -func BenchmarkIntegerMapStandard_WarmUp(b *testing.B) { +func BenchmarkIntMapStandard_WarmUp(b *testing.B) { for _, bc := range benchmarkCases { b.Run(bc.name, func(b *testing.B) { var m sync.Map @@ -1147,7 +1124,7 @@ func BenchmarkIntegerMapStandard_WarmUp(b *testing.B) { m.Store(i, i) } b.ResetTimer() - benchmarkMapOfIntegerKeys(b, func(k int) (value int, ok bool) { + benchmarkMapOfIntKeys(b, func(k int) (value int, ok bool) { v, ok := m.Load(k) if ok { return v.(int), ok @@ -1163,7 +1140,7 @@ func BenchmarkIntegerMapStandard_WarmUp(b *testing.B) { } } -func benchmarkMapOfIntegerKeys( +func benchmarkMapOfIntKeys( b *testing.B, loadFn func(k int) (int, bool), storeFn func(k int, v int), @@ -1189,7 +1166,7 @@ func benchmarkMapOfIntegerKeys( } func BenchmarkMapOfRange(b *testing.B) { - m := NewMapOfPresized[int](benchmarkNumEntries) + m := NewMapOfPresized[string, int](benchmarkNumEntries) for i := 0; i < benchmarkNumEntries; i++ { m.Store(benchmarkKeys[i], i) } diff --git a/mpmcqueue_test.go b/mpmcqueue_test.go index 10c416f..2d3cb09 100644 --- a/mpmcqueue_test.go +++ b/mpmcqueue_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - . "github.com/puzpuzpuz/xsync/v2" + . "github.com/puzpuzpuz/xsync/v3" ) func TestQueue_InvalidSize(t *testing.T) { diff --git a/mpmcqueueof_test.go b/mpmcqueueof_test.go index 7f4e161..aed7480 100644 --- a/mpmcqueueof_test.go +++ b/mpmcqueueof_test.go @@ -15,7 +15,7 @@ import ( "testing" "time" - . "github.com/puzpuzpuz/xsync/v2" + . "github.com/puzpuzpuz/xsync/v3" ) func TestQueueOf_InvalidSize(t *testing.T) { diff --git a/rbmutex.go b/rbmutex.go index c4a503f..a20a141 100644 --- a/rbmutex.go +++ b/rbmutex.go @@ -74,7 +74,7 @@ func (mu *RBMutex) RLock() *RToken { t, ok := rtokenPool.Get().(*RToken) if !ok { t = new(RToken) - t.slot = fastrand() + t.slot = runtime_fastrand() } // Try all available slots to distribute reader threads to slots. for i := 0; i < len(mu.rslots); i++ { diff --git a/rbmutex_test.go b/rbmutex_test.go index 9ccfe53..4808578 100644 --- a/rbmutex_test.go +++ b/rbmutex_test.go @@ -11,7 +11,7 @@ import ( "sync/atomic" "testing" - . "github.com/puzpuzpuz/xsync/v2" + . "github.com/puzpuzpuz/xsync/v3" ) func TestRBMutexSerialReader(t *testing.T) { diff --git a/util.go b/util.go index d8a64f8..7368912 100644 --- a/util.go +++ b/util.go @@ -1,10 +1,7 @@ package xsync import ( - "hash/maphash" - "reflect" "runtime" - "unsafe" _ "unsafe" ) @@ -44,20 +41,6 @@ func parallelism() uint32 { return numCores } -// hashString calculates a hash of s with the given seed. -func hashString(seed maphash.Seed, s string) uint64 { - seed64 := *(*uint64)(unsafe.Pointer(&seed)) - if s == "" { - return seed64 - } - strh := (*reflect.StringHeader)(unsafe.Pointer(&s)) - return uint64(memhash(unsafe.Pointer(strh.Data), uintptr(seed64), uintptr(strh.Len))) -} - -//go:noescape -//go:linkname memhash runtime.memhash -func memhash(p unsafe.Pointer, h, s uintptr) uintptr - //go:noescape -//go:linkname fastrand runtime.fastrand -func fastrand() uint32 +//go:linkname runtime_fastrand runtime.fastrand +func runtime_fastrand() uint32 diff --git a/util_hash.go b/util_hash.go new file mode 100644 index 0000000..9588dcd --- /dev/null +++ b/util_hash.go @@ -0,0 +1,77 @@ +package xsync + +import ( + "reflect" + "unsafe" +) + +// makeSeed creates a random seed. +func makeSeed() uint64 { + var s1 uint32 + for { + s1 = runtime_fastrand() + // We use seed 0 to indicate an uninitialized seed/hash, + // so keep trying until we get a non-zero seed. + if s1 != 0 { + break + } + } + s2 := runtime_fastrand() + return uint64(s1)<<32 | uint64(s2) +} + +// hashString calculates a hash of s with the given seed. +func hashString(s string, seed uint64) uint64 { + if s == "" { + return seed + } + strh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + return uint64(runtime_memhash(unsafe.Pointer(strh.Data), uintptr(seed), uintptr(strh.Len))) +} + +//go:noescape +//go:linkname runtime_memhash runtime.memhash +func runtime_memhash(p unsafe.Pointer, h, s uintptr) uintptr + +// makeHasher creates a fast hash function for the given comparable type. +// The only limitation is that the type should not contain interfaces inside +// based on runtime.typehash. +func makeHasher[T comparable]() func(T, uint64) uint64 { + var zero T + + if reflect.TypeOf(&zero).Elem().Kind() == reflect.Interface { + return func(value T, seed uint64) uint64 { + iValue := any(value) + i := (*iface)(unsafe.Pointer(&iValue)) + return runtime_typehash64(i.typ, i.word, seed) + } + } else { + var iZero any = zero + i := (*iface)(unsafe.Pointer(&iZero)) + return func(value T, seed uint64) uint64 { + return runtime_typehash64(i.typ, unsafe.Pointer(&value), seed) + } + } +} + +// how interface is represented in memory +type iface struct { + typ uintptr + word unsafe.Pointer +} + +// same as runtime_typehash, but always returns a uint64 +// see: maphash.rthash function for details +func runtime_typehash64(t uintptr, p unsafe.Pointer, seed uint64) uint64 { + if unsafe.Sizeof(uintptr(0)) == 8 { + return uint64(runtime_typehash(t, p, uintptr(seed))) + } + + lo := runtime_typehash(t, p, uintptr(seed)) + hi := runtime_typehash(t, p, uintptr(seed>>32)) + return uint64(hi)<<32 | uint64(lo) +} + +//go:noescape +//go:linkname runtime_typehash runtime.typehash +func runtime_typehash(t uintptr, p unsafe.Pointer, h uintptr) uintptr diff --git a/util_hash_test.go b/util_hash_test.go new file mode 100644 index 0000000..8b41e60 --- /dev/null +++ b/util_hash_test.go @@ -0,0 +1,225 @@ +//go:build go1.20 +// +build go1.20 + +package xsync_test + +import ( + "fmt" + "hash/maphash" + "testing" + "unsafe" + + . "github.com/puzpuzpuz/xsync/v3" +) + +func TestMakeHashFunc(t *testing.T) { + type User struct { + Name string + City string + } + + seed := MakeSeed() + + hashString := MakeHasher[string]() + hashUser := MakeHasher[User]() + hashAny := MakeHasher[any]() // this declaration requires go 1.20+ + + hashUserMap := makeMapHasher[User]() + + // Not that much to test TBH. + + // check that hash is not always the same + for i := 0; ; i++ { + if hashString("foo", seed) != hashString("bar", seed) { + break + } + if i >= 100 { + t.Error("hashString is always the same") + break + } + + seed = MakeSeed() // try with a new seed + } + + // do the same for hash any + for i := 0; ; i++ { + if hashAny("foo", seed) != hashAny("bar", seed) { + break + } + if i >= 100 { + t.Error("hashAny is always the same") + break + } + + seed = MakeSeed() // try with a new seed + } + + if hashString("foo", seed) != hashString("foo", seed) { + t.Error("hashString is not deterministic") + } + + if hashUser(User{Name: "John", City: "New York"}, seed) != hashUser(User{Name: "John", City: "New York"}, seed) { + t.Error("hashUser is not deterministic") + } + + if hashAny(User{Name: "John", City: "New York"}, seed) != hashAny(User{Name: "John", City: "New York"}, seed) { + t.Error("hashAny is not deterministic") + } + + // just for fun, compare with native hash function + if hashUser(User{Name: "John", City: "New York"}, seed) != hashUserMap(User{Name: "John", City: "New York"}, seed) { + t.Error("hashUser and hashUserNative return different values") + } +} + +func BenchmarkMapHashString(b *testing.B) { + fn := func(seed maphash.Seed, s string) uint64 { + var h maphash.Hash + h.SetSeed(seed) + h.WriteString(s) + return h.Sum64() + } + seed := maphash.MakeSeed() + for i := 0; i < b.N; i++ { + _ = fn(seed, benchmarkKeyPrefix) + } + // about 13ns/op on x86-64 +} + +func BenchmarkHashString(b *testing.B) { + seed := MakeSeed() + for i := 0; i < b.N; i++ { + _ = HashString(benchmarkKeyPrefix, seed) + } + // about 4ns/op on x86-64 +} + +func makeMapHasher[T comparable]() func(T, uint64) uint64 { + hasher := makeMapHasherInternal(make(map[T]struct{})) + + is64Bit := unsafe.Sizeof(uintptr(0)) == 8 + + if is64Bit { + return func(value T, seed uint64) uint64 { + seed64 := *(*uint64)(unsafe.Pointer(&seed)) + return uint64(hasher(runtime_noescape(unsafe.Pointer(&value)), uintptr(seed64))) + } + } else { + return func(value T, seed uint64) uint64 { + seed64 := *(*uint64)(unsafe.Pointer(&seed)) + lo := hasher(runtime_noescape(unsafe.Pointer(&value)), uintptr(seed64)) + hi := hasher(runtime_noescape(unsafe.Pointer(&value)), uintptr(seed64>>32)) + return uint64(hi)<<32 | uint64(lo) + } + } +} + +//go:noescape +//go:linkname runtime_noescape runtime.noescape +func runtime_noescape(p unsafe.Pointer) unsafe.Pointer + +type nativeHasher func(unsafe.Pointer, uintptr) uintptr + +//lint:ignore U1000 unused fields are necessary to access the hasher +func makeMapHasherInternal(mapValue any) nativeHasher { + // go/src/runtime/type.go + type tflag uint8 + type nameOff int32 + type typeOff int32 + + // go/src/runtime/type.go + type _type struct { + size uintptr + ptrdata uintptr + hash uint32 + tflag tflag + align uint8 + fieldAlign uint8 + kind uint8 + equal func(unsafe.Pointer, unsafe.Pointer) bool + gcdata *byte + str nameOff + ptrToThis typeOff + } + + // go/src/runtime/type.go + type maptype struct { + typ _type + key *_type + elem *_type + bucket *_type + // function for hashing keys (ptr to key, seed) -> hash + hasher nativeHasher + keysize uint8 + elemsize uint8 + bucketsize uint16 + flags uint32 + } + + type mapiface struct { + typ *maptype + val uintptr + } + + i := (*mapiface)(unsafe.Pointer(&mapValue)) + return i.typ.hasher +} + +func BenchmarkMakeHashFunc(b *testing.B) { + type Point struct { + X, Y, Z int + } + + type User struct { + ID int + FirstName string + LastName string + IsActive bool + City string + } + + type PadInside struct { + A int + B byte + C int + } + + type PadTrailing struct { + A int + B byte + } + + doBenchmarkMakeHashFunc(b, int64(116)) + doBenchmarkMakeHashFunc(b, int32(116)) + doBenchmarkMakeHashFunc(b, 3.14) + doBenchmarkMakeHashFunc(b, "test key test key test key test key test key test key test key test key test key ") + doBenchmarkMakeHashFunc(b, Point{1, 2, 3}) + doBenchmarkMakeHashFunc(b, User{ID: 1, FirstName: "Ivan", LastName: "Ivanov", IsActive: true, City: "Sofia"}) + doBenchmarkMakeHashFunc(b, PadInside{}) + doBenchmarkMakeHashFunc(b, PadTrailing{}) + doBenchmarkMakeHashFunc(b, [1024]byte{}) + doBenchmarkMakeHashFunc(b, [128]Point{}) + doBenchmarkMakeHashFunc(b, [128]User{}) + doBenchmarkMakeHashFunc(b, [128]PadInside{}) + doBenchmarkMakeHashFunc(b, [128]PadTrailing{}) +} + +func doBenchmarkMakeHashFunc[T comparable](b *testing.B, val T) { + hash := MakeHasher[T]() + hashNativeMap := makeMapHasher[T]() + seed := MakeSeed() + + b.Run(fmt.Sprintf("%T normal", val), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _ = hash(val, seed) + } + }) + + b.Run(fmt.Sprintf("%T map native", val), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _ = hashNativeMap(val, seed) + } + }) +} diff --git a/util_mapof.go b/util_mapof.go deleted file mode 100644 index fbb00c4..0000000 --- a/util_mapof.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -package xsync - -import ( - "hash/maphash" - "unsafe" -) - -// hashUint64 calculates a hash of v with the given seed. -// -//lint:ignore U1000 used in MapOf -func hashUint64[K IntegerConstraint](seed maphash.Seed, k K) uint64 { - n := uint64(k) - // Java's Long standard hash function. - n = n ^ (n >> 32) - nseed := *(*uint64)(unsafe.Pointer(&seed)) - // 64-bit variation of boost's hash_combine. - nseed ^= n + 0x9e3779b97f4a7c15 + (nseed << 12) + (nseed >> 4) - return nseed -} diff --git a/util_test.go b/util_test.go index 32b3a3d..8a07f00 100644 --- a/util_test.go +++ b/util_test.go @@ -1,11 +1,10 @@ package xsync_test import ( - "hash/maphash" "math/rand" "testing" - . "github.com/puzpuzpuz/xsync/v2" + . "github.com/puzpuzpuz/xsync/v3" ) func TestNextPowOf2(t *testing.T) { @@ -52,25 +51,3 @@ func BenchmarkRand(b *testing.B) { } // about 12 ns/op on x86-64 } - -func BenchmarkMapHashString(b *testing.B) { - fn := func(seed maphash.Seed, s string) uint64 { - var h maphash.Hash - h.SetSeed(seed) - h.WriteString(s) - return h.Sum64() - } - seed := maphash.MakeSeed() - for i := 0; i < b.N; i++ { - _ = fn(seed, benchmarkKeyPrefix) - } - // about 13ns/op on x86-64 -} - -func BenchmarkHashString(b *testing.B) { - seed := maphash.MakeSeed() - for i := 0; i < b.N; i++ { - _ = HashString(seed, benchmarkKeyPrefix) - } - // about 4ns/op on x86-64 -}