diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml
index eb6d708..5a8c7a3 100644
--- a/.github/workflows/go.yaml
+++ b/.github/workflows/go.yaml
@@ -18,7 +18,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: "1.20"
+ go-version: "1.21"
- name: Test
run: |
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..d48c759
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+.idea
+.vscode
\ No newline at end of file
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..f6c405d
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,10 @@
+test:
+ go test -race ./...
+bench:
+ go test -run=^$ -bench=. ./...
+build:
+ protoc --go_out=. --go_opt=paths=source_relative \
+ --go-grpc_out=. --go-grpc_opt=paths=source_relative \
+ ./servicepb/service.proto
+doc:
+ pkgsite -gorepo ./...
\ No newline at end of file
diff --git a/cache.go b/cache.go
index 39e5ca9..735a3c0 100644
--- a/cache.go
+++ b/cache.go
@@ -1,98 +1,111 @@
package nitecache
import (
+ "context"
"errors"
"fmt"
- "regexp"
+ "github.com/MysteriousPotato/nitecache/servicepb"
+ "google.golang.org/grpc/credentials/insecure"
"sync"
"time"
+
+ "github.com/MysteriousPotato/nitecache/hashring"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
)
var (
- addrReg = regexp.MustCompile("^[^:]+:[0-9]{2,5}$")
- ErrDuplicatePeer = errors.New("duplicate peer detected")
- ErrInvalidPeerAddr = errors.New("invalid peer address")
- ErrTableNotFound = errors.New("table not found")
- ErrMissingSelfInPeers = errors.New("peers must contain the current node")
- ErrMissingMembers = errors.New("peers must contain at least one member")
+ ErrDuplicatePeer = errors.New("duplicate peer detected")
+ ErrTableNotFound = errors.New("table not found")
+ ErrMissingMembers = errors.New("peers must contain at least one member")
+ ErrCacheDestroyed = errors.New("can't use cache after tear down")
)
-type CacheOpt func(c *Cache)
-
-type Cache struct {
- ring *hashring
- selfID string
- clients clients
- mu *sync.RWMutex
- tables map[string]itable
- metrics *Metrics
- closeCh chan bool
- virtualNodes int
- //Defaults to FNV-1
- hashFunc HashFunc
- //Defaults to 2 seconds
- timeout time.Duration
- //opt to skip server start
- testMode bool
-}
-
-type itable interface {
+type (
+ CacheOpt func(c *Cache)
+ // Cache hold the nitecache instance. The zero value is not read for use.
+ //
+ // Refer to [NewCache] for creating an instance.
+ Cache struct {
+ ring *hashring.Ring
+ self Member
+ clients clients
+ clientMu *sync.Mutex
+ tables map[string]table
+ tablesMu *sync.Mutex
+ metrics *metrics
+ virtualNodes int
+ hashFunc hashring.HashFunc
+ timeout time.Duration
+ members []Member
+ grpcOpts []grpc.ServerOption
+ service server
+ transportCredentials credentials.TransportCredentials
+ }
+)
+
+type Member struct {
+ ID string
+ Addr string
+}
+
+type table interface {
getLocally(key string) (item, error)
- putLocally(itm item)
- evictLocally(key string)
- executeLocally(key, function string, args []byte) (item, error)
- TearDown()
+ putLocally(itm item) error
+ evictLocally(key string) error
+ callLocally(ctx context.Context, key, function string, args []byte) (item, error)
+ tearDown()
}
// NewCache Creates a new [Cache] instance
-// This should only be called once for a same set of peers, so that gRPC connections can be reused
-// Create a new [Table] instead if you need to store different values
-func NewCache(selfID string, peers []Member, opts ...CacheOpt) (*Cache, error) {
+//
+// This should only be called once for a same set of peers, so that connections can be reused.
+//
+// Create a new [Table] using [NewTable] if you need to store different values.
+//
+// Members must have a unique ID and Addr.
+//
+// Ex.:
+//
+// func() {
+// self := nitecache.Member{ID: "1", Addr: "localhost:8000"}
+// c, err := nitecache.NewCache(self, nitecache.Member{self})
+// ...
+// }
+func NewCache(self Member, peers []Member, opts ...CacheOpt) (*Cache, error) {
c := &Cache{
- selfID: selfID,
- tables: make(map[string]itable),
- mu: &sync.RWMutex{},
- clients: clients{},
- metrics: newMetrics(),
- closeCh: make(chan bool),
- virtualNodes: 32,
- hashFunc: defaultHashFunc,
+ self: self,
+ clients: clients{},
+ clientMu: &sync.Mutex{},
+ tables: make(map[string]table),
+ tablesMu: &sync.Mutex{},
+ metrics: newMetrics(),
+ virtualNodes: 32,
+ hashFunc: hashring.DefaultHashFunc,
+ timeout: time.Second * 3,
+ members: []Member{},
+ transportCredentials: insecure.NewCredentials(),
}
for _, opt := range opts {
opt(c)
}
- var self Member
- for _, p := range peers {
- if p.ID == selfID {
- self = p
+ var peersIncludeSelf bool
+ for _, peer := range peers {
+ if peer.ID == self.ID {
+ peersIncludeSelf = true
break
}
}
- if self == (Member{}) {
- return nil, ErrMissingSelfInPeers
+ if !peersIncludeSelf {
+ peers = append(peers, self)
}
- if !c.testMode {
- server, start, err := newServer(self.Addr, c)
- if err != nil {
- return nil, fmt.Errorf("unable to create cache server: %w", err)
- }
- go func() {
- if err := start(); err != nil {
- panic(fmt.Errorf("unable to create cache server: %w", err))
- }
- }()
- go func() {
- ticker := time.NewTicker(time.Second)
- for range ticker.C {
- select {
- case <-c.closeCh:
- server.Stop()
- }
- }
- }()
+ var err error
+ c.service, err = newService(self.Addr, c)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create cache service: %w", err)
}
if err := c.SetPeers(peers); err != nil {
@@ -102,14 +115,16 @@ func NewCache(selfID string, peers []Member, opts ...CacheOpt) (*Cache, error) {
return c, nil
}
-// VirtualNodeOpt sets the number of points on the hashring per node
+// VirtualNodeOpt sets the number of points/node on the hashring
+// Defaults to 32
func VirtualNodeOpt(nodes int) func(c *Cache) {
return func(c *Cache) {
c.virtualNodes = nodes
}
}
-// TimeoutOpt sets the timeout for grpc client timeout
+// TimeoutOpt sets the timeout for grpc clients
+// Defaults to 3 seconds
func TimeoutOpt(timeout time.Duration) func(c *Cache) {
return func(c *Cache) {
c.timeout = timeout
@@ -117,105 +132,143 @@ func TimeoutOpt(timeout time.Duration) func(c *Cache) {
}
// HashFuncOpt sets the hash function used to determine hashring keys
-func HashFuncOpt(hashFunc HashFunc) func(c *Cache) {
+// Defaults to FNV-1 algorithm
+func HashFuncOpt(hashFunc hashring.HashFunc) func(c *Cache) {
return func(c *Cache) {
c.hashFunc = hashFunc
}
}
-func testModeOpt(c *Cache) {
- c.testMode = true
+// GRPCTransportCredentials sets the credentials for the gRPC server
+func GRPCTransportCredentials(opts credentials.TransportCredentials) func(c *Cache) {
+ return func(c *Cache) {
+ c.transportCredentials = opts
+ }
+}
+
+// GRPCServerOpts sets the options when creating the gRPC service.
+func GRPCServerOpts(opts ...grpc.ServerOption) func(c *Cache) {
+ return func(c *Cache) {
+ c.grpcOpts = opts
+ }
}
-// GetMetrics Can safely be called from a goroutine, returns a copy of the current cache Metrics.
-// For Metrics specific to a [Table], refer to [Table.GetMetrics]
-func (c *Cache) GetMetrics() Metrics {
- return c.metrics.getCopy()
+// GetMetrics Returns a copy of the current cache Metrics.
+// For Metrics specific to a [Table], refer to [Table.GetMetrics].
+func (c *Cache) GetMetrics() (Metrics, error) {
+ if c.isZero() {
+ return Metrics{}, ErrCacheDestroyed
+ }
+ return c.metrics.getCopy(), nil
}
+// SetPeers will update the cache members to the new value.
func (c *Cache) SetPeers(peers []Member) error {
+ if c.isZero() {
+ return ErrCacheDestroyed
+ }
+
if len(peers) == 0 {
return ErrMissingMembers
}
- membersAddrMap := map[string]any{}
- membersIDMap := map[string]any{}
+ membersAddrMap := map[string]struct{}{}
+ membersIDMap := map[string]struct{}{}
var containsSelf bool
for _, p := range peers {
- if ok := addrReg.MatchString(p.Addr); !ok {
- return fmt.Errorf("%w: %v", ErrInvalidPeerAddr, p.Addr)
- }
if _, ok := membersAddrMap[p.Addr]; ok {
return fmt.Errorf("%w for Address %v", ErrDuplicatePeer, p.Addr)
}
if _, ok := membersIDMap[p.ID]; ok {
return fmt.Errorf("%w for ID %v", ErrDuplicatePeer, p.ID)
}
- if c.selfID == p.ID {
+ if c.self.ID == p.ID {
containsSelf = true
}
- membersAddrMap[p.Addr] = nil
- membersIDMap[p.ID] = nil
+ membersAddrMap[p.Addr] = struct{}{}
+ membersIDMap[p.ID] = struct{}{}
}
if !containsSelf {
- return ErrMissingSelfInPeers
+ peers = append(peers, c.self)
}
- members := make(Members, len(peers))
+ members := make([]string, len(peers))
for i, p := range peers {
- members[i] = p
+ members[i] = p.ID
}
var err error
if c.ring == nil {
- c.ring, err = newRing(
- ringCfg{
- Members: members,
- VirtualNodes: c.virtualNodes,
- HashFunc: c.hashFunc,
- },
- )
+ c.ring, err = hashring.New(hashring.Opt{
+ Members: members,
+ VirtualNodes: c.virtualNodes,
+ HashFunc: c.hashFunc,
+ })
if err != nil {
return fmt.Errorf("unable to create hashring: %w", err)
}
} else {
- if err := c.ring.setMembers(members); err != nil {
+ if err := c.ring.SetMembers(members); err != nil {
return fmt.Errorf("unable to update hashring: %w", err)
}
}
- if err := c.clients.set(peers, c.timeout); err != nil {
+ if err := c.setClients(peers); err != nil {
return err
}
return nil
}
-// TearDown Call this whenever a cache is not needed anymore.
+// TearDown properly tears down all [Table] from [Cache], closes all client connections and stops the grpc server.
//
-// It will properly teardown all [Table]s from [Cache], close all client connections and stop the gRPC server
+// Once called, using it or any of its table references cause [ErrCacheDestroyed] to be returned.
func (c *Cache) TearDown() error {
+ if c.isZero() {
+ return ErrCacheDestroyed
+ }
+
var errs []error
- // Stop server on next tick
- c.closeCh <- true
- // Close all client connections
- for _, c := range c.clients {
- if err := c.conn.Close(); err != nil {
+ for _, client := range c.clients {
+ if err := client.conn.Close(); err != nil {
errs = append(errs, err)
}
}
- // Teardown tables
- for _, t := range c.tables {
- t.TearDown()
+
+ c.service.server.GracefulStop()
+
+ for i := range c.tables {
+ c.tables[i].tearDown()
}
+ *c = Cache{}
- if errs != nil {
- return errors.Join(errs...)
+ return errors.Join(errs...)
+}
+
+// ListenAndServe starts the cache grpc server
+func (c *Cache) ListenAndServe() error {
+ if c.isZero() {
+ return ErrCacheDestroyed
}
- return nil
+ return c.service.server.Serve(c.service.listener)
}
-func (c *Cache) getTable(name string) (itable, error) {
+// HealthCheckPeers checks the status of the cache's grpc clients
+func (c *Cache) HealthCheckPeers(ctx context.Context) error {
+ if c.isZero() {
+ return ErrCacheDestroyed
+ }
+
+ var errs []error
+ for _, client := range c.clients {
+ if _, err := client.HealthCheck(ctx, &servicepb.Empty{}); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ return errors.Join(errs...)
+}
+
+func (c *Cache) getTable(name string) (table, error) {
t, ok := c.tables[name]
if !ok {
return nil, ErrTableNotFound
@@ -224,10 +277,51 @@ func (c *Cache) getTable(name string) (itable, error) {
return t, nil
}
-func (c *Cache) getClient(p Member) (client, error) {
- cl, ok := c.clients[p.ID]
+func (c *Cache) getClient(p string) (*client, error) {
+ cl, ok := c.clients[p]
if !ok {
- return client{}, fmt.Errorf("unable to find peer client with ID %v", p.ID)
+ return nil, fmt.Errorf("unable to find peer client with ID %v", p)
}
return cl, nil
}
+
+// Cleanup clients that are not present in peers and create new clients for new peers
+func (c *Cache) setClients(peers []Member) error {
+ c.clientMu.Lock()
+ defer c.clientMu.Unlock()
+
+ peersMap := map[string]Member{}
+ for _, p := range peers {
+ peersMap[p.ID] = p
+ }
+
+ var errs []error
+ for id := range c.clients {
+ if _, ok := peersMap[id]; !ok {
+ if err := c.clients[id].conn.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ delete(c.clients, id)
+ }
+ }
+
+ for id, p := range peersMap {
+ if _, ok := c.clients[id]; ok {
+ continue
+ }
+ client, err := newClient(p.Addr, c)
+ if err != nil {
+ return err
+ }
+ c.clients[id] = client
+ }
+
+ if errs != nil {
+ return errors.Join(errs...)
+ }
+ return nil
+}
+
+func (c *Cache) isZero() bool {
+ return c == nil || c.tables == nil
+}
diff --git a/cache_test.go b/cache_test.go
index 3bf7f64..859b729 100644
--- a/cache_test.go
+++ b/cache_test.go
@@ -1,102 +1,84 @@
-package nitecache
+package nitecache_test
import (
"context"
"errors"
- "github.com/MysteriousPotato/nitecache/test"
"reflect"
"testing"
"time"
+
+ "github.com/MysteriousPotato/nitecache"
+ test "github.com/MysteriousPotato/nitecache/test_utils"
)
func TestCache_SetPeers(t *testing.T) {
- c, err := NewCache(
- "potato",
- []Member{{ID: "potato", Addr: test.GetUniqueAddr()}},
- testModeOpt,
- )
+ self := nitecache.Member{ID: "potato", Addr: test.GetUniqueAddr()}
+ c, err := nitecache.NewCache(self, []nitecache.Member{self})
if err != nil {
- t.Error(err)
+ t.Fatal(err)
}
tests := []struct {
name string
- members []Member
+ members []nitecache.Member
expected error
- }{
- {
- name: "invalid address",
- expected: ErrInvalidPeerAddr,
- members: Members{
- {
- ID: "potato",
- Addr: "potato",
- },
- },
- }, {
- name: "members not including current node",
- expected: ErrMissingSelfInPeers,
- members: Members{
- {
- ID: "zucchini",
- Addr: test.GetUniqueAddr(),
- },
+ }{{
+ name: "duplicate member",
+ expected: nitecache.ErrDuplicatePeer,
+ members: []nitecache.Member{
+ {
+ ID: "potato",
+ Addr: test.GetUniqueAddr(),
+ }, {
+ ID: "potato",
+ Addr: test.GetUniqueAddr(),
},
- }, {
- name: "duplicate member",
- expected: ErrDuplicatePeer,
- members: Members{
- {
- ID: "potato",
- Addr: test.GetUniqueAddr(),
- }, {
- ID: "potato",
- Addr: test.GetUniqueAddr(),
- },
- },
- }, {
- name: "no member",
- expected: ErrMissingMembers,
},
- }
+ }, {
+ name: "no member",
+ expected: nitecache.ErrMissingMembers,
+ }}
for _, tt := range tests {
- t.Run(
- tt.name, func(t *testing.T) {
- if err := c.SetPeers(tt.members); !errors.Is(err, tt.expected) {
- t.Errorf("expected err: %v, got: %v", tt.expected, err)
- }
- },
- )
+ t.Run(tt.name, func(t *testing.T) {
+ if err := c.SetPeers(tt.members); !errors.Is(err, tt.expected) {
+ t.Errorf("expected err: %v, got: %v", tt.expected, err)
+ }
+ })
+ }
+
+ if err := c.TearDown(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := c.SetPeers(nil); !errors.Is(err, nitecache.ErrCacheDestroyed) {
+ t.Fatalf("expected err: %v\ngot:%v", nitecache.ErrCacheDestroyed, err)
}
}
func TestSingleNodeCacheTable(t *testing.T) {
- self := Member{
+ ctx := context.Background()
+ self := nitecache.Member{
ID: "1",
Addr: test.GetUniqueAddr(),
}
- c, err := NewCache(self.ID, []Member{self})
+ c, err := nitecache.NewCache(self, []nitecache.Member{self})
if err != nil {
- t.Error(err)
+ t.Fatal(err)
}
- defer test.TearDown(c)
- table := NewTable[string]("tt").
+ table := nitecache.NewTable[string]("tt").
WithGetter(
func(key string) (string, time.Duration, error) {
return "empty", time.Hour, nil
},
).
- WithFunction(
- "execute", func(s string, args []byte) (string, time.Duration, error) {
- return "execute", 0, nil
- },
- ).
+ WithProcedure("procedure", func(_ context.Context, _ string, _ []byte) (string, time.Duration, error) {
+ return "procedure", 0, nil
+ }).
Build(c)
- ctx := context.Background()
tests := []struct {
op string
key string
@@ -105,12 +87,12 @@ func TestSingleNodeCacheTable(t *testing.T) {
{op: "get", key: "1"},
{op: "put", key: "1", value: "1"},
{op: "get", key: "1"},
- {op: "execute", key: "1"},
+ {op: "call", key: "1"},
{op: "evict", key: "1"},
{op: "get", key: "1"},
{op: "put", key: "1", value: "2"},
}
- expected := []string{"empty", "1", "execute", "empty"}
+ expected := []string{"empty", "1", "procedure", "empty"}
var got []string
for _, tt := range tests {
@@ -132,8 +114,8 @@ func TestSingleNodeCacheTable(t *testing.T) {
t.Fatal(err)
}
break
- case "execute":
- v, err := table.Execute(ctx, "key", "execute", []byte{})
+ case "call":
+ v, err := table.Call(ctx, "key", "procedure", nil)
if err != nil {
t.Fatal(err)
}
@@ -145,45 +127,84 @@ func TestSingleNodeCacheTable(t *testing.T) {
if !reflect.DeepEqual(got, expected) {
t.Fatalf("\nexpect: %v\ngot: %v", expected, got)
}
+
+ if err := c.TearDown(); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := table.Get(ctx, ""); !errors.Is(err, nitecache.ErrCacheDestroyed) {
+ t.Fatalf("expected err: %v\ngot:%v", nitecache.ErrCacheDestroyed, err)
+ }
+
+ if err := table.Put(ctx, "", "", 0); !errors.Is(err, nitecache.ErrCacheDestroyed) {
+ t.Fatalf("expected err: %v\ngot:%v", nitecache.ErrCacheDestroyed, err)
+ }
+
+ if err := table.Evict(ctx, ""); !errors.Is(err, nitecache.ErrCacheDestroyed) {
+ t.Fatalf("expected err: %v\ngot:%v", nitecache.ErrCacheDestroyed, err)
+ }
+
+ if _, err := table.Call(ctx, "", "", nil); !errors.Is(err, nitecache.ErrCacheDestroyed) {
+ t.Fatalf("expected err: %v\ngot:%v", nitecache.ErrCacheDestroyed, err)
+ }
}
func TestMultiNodeCacheTable(t *testing.T) {
- members := []Member{
+ members := []nitecache.Member{
{
ID: "1",
Addr: test.GetUniqueAddr(),
}, {
ID: "2",
Addr: test.GetUniqueAddr(),
+ }, {
+ ID: "3",
+ Addr: test.GetUniqueAddr(),
},
}
- caches := make([]*Cache, len(members))
- tables := make([]*Table[string], len(members))
+ caches := make([]*nitecache.Cache, len(members))
+ tables := make([]*nitecache.Table[string], len(members))
for i, m := range members {
func() {
- c, err := NewCache(m.ID, members)
+ c, err := nitecache.NewCache(
+ m,
+ members,
+ nitecache.VirtualNodeOpt(1),
+ nitecache.HashFuncOpt(test.SimpleHashFunc),
+ nitecache.TimeoutOpt(time.Second*5),
+ )
if err != nil {
- t.Error(err)
+ t.Fatal(err)
}
- defer test.TearDown(c)
+
+ go func() {
+ if err := c.ListenAndServe(); err != nil {
+ t.Error(err)
+ return
+ }
+ }()
caches[i] = c
- tables[i] = NewTable[string]("test").
+ tables[i] = nitecache.NewTable[string]("test").
WithGetter(
func(key string) (string, time.Duration, error) {
return "empty", time.Hour, nil
},
).
- WithFunction(
- "execute", func(s string, args []byte) (string, time.Duration, error) {
- return "execute", 0, nil
+ WithProcedure(
+ "procedure", func(_ context.Context, _ string, _ []byte) (string, time.Duration, error) {
+ return "procedure", 0, nil
},
).
Build(c)
}()
}
+ for _, c := range caches {
+ test.WaitForServer(t, c)
+ }
+
ctx := context.Background()
tests := []struct {
op string
@@ -193,23 +214,23 @@ func TestMultiNodeCacheTable(t *testing.T) {
{op: "get", key: "1"},
{op: "put", key: "1", value: "1"},
{op: "get", key: "1"},
- {op: "execute", key: "1"},
+ {op: "call", key: "1"},
{op: "evict", key: "1"},
{op: "get", key: "1"},
{op: "put", key: "1", value: "2"},
- {op: "get", key: "3swerwedf"},
- {op: "put", key: "3swerwedf", value: "1"},
- {op: "get", key: "3swerwedf"},
- {op: "execute", key: "3swerwedf"},
- {op: "evict", key: "3swerwedf"},
- {op: "get", key: "3swerwedf"},
- {op: "put", key: "3swerwedf", value: "2"},
+ {op: "get", key: "2"},
+ {op: "put", key: "2", value: "1"},
+ {op: "get", key: "2"},
+ {op: "call", key: "2"},
+ {op: "evict", key: "2"},
+ {op: "get", key: "2"},
+ {op: "put", key: "2", value: "2"},
{op: "evict", key: "1"},
- {op: "evict", key: "3swerwedf"},
+ {op: "evict", key: "2"},
}
for _, table := range tables {
- expected := []string{"empty", "1", "execute", "empty", "empty", "1", "execute", "empty"}
+ expected := []string{"empty", "1", "procedure", "empty", "empty", "1", "procedure", "empty"}
var got []string
for _, tt := range tests {
@@ -231,8 +252,8 @@ func TestMultiNodeCacheTable(t *testing.T) {
t.Fatal(err)
}
break
- case "execute":
- v, err := table.Execute(ctx, tt.key, "execute", []byte{})
+ case "call":
+ v, err := table.Call(ctx, tt.key, "procedure", []byte{})
if err != nil {
t.Fatal(err)
}
diff --git a/codec.go b/codec.go
index ffc7878..4495555 100644
--- a/codec.go
+++ b/codec.go
@@ -1,18 +1,80 @@
package nitecache
-import "encoding/json"
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+)
-type Codec[T any] interface {
- Encode(value T) ([]byte, error)
- Decode(bytes []byte, value *T) error
+type (
+ // Codec defines the interface to implement custom marshalling/unmarshalling
+ //
+ // If a generic type of string or []byte is supplied to the table, nitecache will automatically use [StringCodec]
+ // Otherwise nitecache will default to [JsonCodec].
+ //
+ // Example:
+ //
+ // type (
+ // Coord struct {
+ // x, y float64
+ // }
+ // CoordCodec struct{}
+ // )
+ //
+ // func (c CoordCodec) Decode(b []byte, v *Coord) error {
+ // buf := bytes.NewBuffer(b)
+ // _, err := fmt.Fscanln(buf, &v.x, &v.y)
+ // return err
+ // }
+ //
+ // func (c CoordCodec) Encode(v Coord) ([]byte, error) {
+ // var buf bytes.Buffer
+ // if _, err := fmt.Fprintln(&buf, v.x, v.y); err != nil {
+ // return nil, err
+ // }
+ // return buf.Bytes(), nil
+ // }
+ Codec[T any] interface {
+ Encode(value T) ([]byte, error)
+ Decode(bytes []byte, value *T) error
+ }
+ // StringCodec implements [Codec] by preventing unnecessary marshalling/unmarshalling for string-like tables.
+ //
+ // This codec will automatically be used for string/[]byte tables.
+ // However, nitecache won't use it for string/[]byte alias, so you'll have to supply the codec yourself.
+ StringCodec[T ~string | ~[]byte] struct{}
+ // JsonCodec implements [Codec] using [encoding/json]
+ //
+ // This is the default codec for all type except []byte and string.
+ JsonCodec[T any] struct{}
+ // GobCodec implements [Codec] using [encoding/gob]
+ GobCodec[T any] struct{}
+)
+
+func (c StringCodec[T]) Decode(b []byte, v *T) error {
+ *v = T(b)
+ return nil
}
-type jsonCodec[T any] struct{}
+func (c StringCodec[T]) Encode(v T) ([]byte, error) {
+ return []byte(v), nil
+}
-func (c jsonCodec[T]) Decode(b []byte, v *T) error {
+func (c JsonCodec[T]) Decode(b []byte, v *T) error {
return json.Unmarshal(b, v)
}
-func (c jsonCodec[T]) Encode(v T) ([]byte, error) {
+func (c JsonCodec[T]) Encode(v T) ([]byte, error) {
return json.Marshal(v)
}
+
+func (c GobCodec[T]) Decode(b []byte, v *T) error {
+ buf := bytes.NewBuffer(b)
+ return gob.NewDecoder(buf).Decode(v)
+}
+
+func (c GobCodec[T]) Encode(v T) ([]byte, error) {
+ var buf bytes.Buffer
+ err := gob.NewEncoder(&buf).Encode(v)
+ return buf.Bytes(), err
+}
diff --git a/codec_test.go b/codec_test.go
new file mode 100644
index 0000000..72f1c9a
--- /dev/null
+++ b/codec_test.go
@@ -0,0 +1,97 @@
+package nitecache_test
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/MysteriousPotato/nitecache"
+ "github.com/MysteriousPotato/nitecache/test_utils"
+ "reflect"
+ "testing"
+)
+
+type (
+ codecTest[T any] struct {
+ decoded T
+ encoded []byte
+ }
+ Coord struct {
+ x, y float64
+ }
+ CoordCodec struct{}
+)
+
+func (c CoordCodec) Decode(b []byte, v *Coord) error {
+ buf := bytes.NewBuffer(b)
+ _, err := fmt.Fscanln(buf, &v.x, &v.y)
+ return err
+}
+
+func (c CoordCodec) Encode(v Coord) ([]byte, error) {
+ var buf bytes.Buffer
+ if _, err := fmt.Fprintln(&buf, v.x, v.y); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func TestStringCodec(t *testing.T) {
+ type strLike []byte
+ strLikeTests := []codecTest[strLike]{
+ {
+ decoded: strLike("potato"),
+ encoded: []byte("potato"),
+ }, {
+ decoded: strLike("'potato'"),
+ encoded: []byte("'potato'"),
+ }, {
+ decoded: strLike("123"),
+ encoded: []byte("123"),
+ },
+ }
+ strLikeCodec := nitecache.StringCodec[strLike]{}
+
+ for _, strTest := range strLikeTests {
+ encoded, err := strLikeCodec.Encode(strTest.decoded)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(encoded, strTest.encoded) {
+ t.Errorf("expected str encoded: %v, got: %v", strTest.encoded, encoded)
+ }
+
+ var decoded strLike
+ if err = strLikeCodec.Decode(strTest.encoded, &decoded); err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(decoded, strTest.decoded) {
+ t.Errorf("expected str decoded: %v, got: %v", strTest.decoded, decoded)
+ }
+ }
+}
+
+func TestCustomCodec(t *testing.T) {
+ ctx := context.Background()
+ self := nitecache.Member{ID: "1", Addr: test.GetUniqueAddr()}
+ c, err := nitecache.NewCache(self, []nitecache.Member{self})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ table := nitecache.NewTable[Coord]("coord").WithCodec(CoordCodec{}).Build(c)
+ expected := Coord{
+ x: 101.143,
+ y: 32.766,
+ }
+ if err = table.Put(ctx, "test", expected, 0); err != nil {
+ t.Fatal(err)
+ }
+
+ v, err := table.Get(ctx, "test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(v, expected) {
+ t.Errorf("expected: %+v, got: %+v", expected, v)
+ }
+}
diff --git a/eviction_policy.go b/eviction_policy.go
deleted file mode 100644
index b8f31c5..0000000
--- a/eviction_policy.go
+++ /dev/null
@@ -1,214 +0,0 @@
-package nitecache
-
-import (
- "container/list"
- "sync"
-)
-
-// EvictionPolicy Currently supports lru, lfu and no eviction policy
-//
-// For lru and lfu implementations, threshold represents the number of items at which the policy will start eviction.
-type EvictionPolicy interface {
- push(key string)
- evict(key string)
- apply()
- setEvictFn(onEvict func(key string))
-}
-
-type NoEvictionPolicy struct{}
-
-func (n NoEvictionPolicy) push(_ string) {}
-func (n NoEvictionPolicy) evict(_ string) {}
-func (n NoEvictionPolicy) setEvictFn(_ func(key string)) {}
-func (n NoEvictionPolicy) apply() {}
-
-type lru struct {
- threshold int64
- evictionQueue *list.List
- hashMap map[string]*list.Element
- size int64
- mu *sync.Mutex
- onEvict func(key string)
-}
-
-// NewLruPolicy see [EvictionPolicy]
-func NewLruPolicy(threshold int64) EvictionPolicy {
- return &lru{
- threshold: threshold,
- evictionQueue: list.New(),
- hashMap: make(map[string]*list.Element),
- mu: &sync.Mutex{},
- }
-}
-
-func (l *lru) setEvictFn(onEvict func(key string)) {
- l.onEvict = onEvict
-}
-
-func (l *lru) push(key string) {
- l.mu.Lock()
- defer l.mu.Unlock()
-
- ele, ok := l.hashMap[key]
- if ok {
- ele.Value = &key
- l.evictionQueue.MoveToBack(ele)
- } else {
- v := &key
- l.hashMap[key] = l.evictionQueue.PushBack(v)
- l.size += 1
- }
-}
-
-func (l *lru) evict(key string) {
- l.mu.Lock()
- defer l.mu.Unlock()
-
- ele, ok := l.hashMap[key]
- if ok {
- l.size -= 1
- l.evictionQueue.Remove(ele)
- }
-}
-
-func (l *lru) apply() {
- l.mu.Lock()
- defer l.mu.Unlock()
-
- for l.size > l.threshold {
- ele := l.evictionQueue.Front()
- key := *ele.Value.(*string)
-
- l.size -= 1
- l.onEvict(key)
- l.evictionQueue.Remove(ele)
- delete(l.hashMap, key)
- }
-}
-
-// see [EvictionPolicy]
-type lfu struct {
- threshold int
- size int
- freqList *list.List
- hashMap map[string]*lfuEntry
- mu *sync.Mutex
- onEvict func(key string)
-}
-
-type lfuEntry struct {
- key string
- parent *list.Element
-}
-
-type lfuNode struct {
- count int
- entries map[string]*lfuEntry
-}
-
-// NewLfuPolicy see [EvictionPolicy]
-func NewLfuPolicy(threshold int) EvictionPolicy {
- return &lfu{
- threshold: threshold,
- freqList: list.New(),
- hashMap: make(map[string]*lfuEntry),
- mu: &sync.Mutex{},
- }
-}
-
-func (l *lfu) setEvictFn(onEvict func(key string)) {
- l.onEvict = onEvict
-}
-
-func (l *lfu) push(key string) {
- l.mu.Lock()
- defer l.mu.Unlock()
-
- //Upsert the entry and update cache size
- entry, ok := l.hashMap[key]
- if !ok {
- entry = &lfuEntry{key: key}
- l.hashMap[key] = entry
- l.size += 1
- }
-
- if entry.parent == nil {
- //create a new freqList node if necessary && Add the new entry to the freqList node
- first := l.freqList.Front()
- if first == nil || first.Value.(*lfuNode).count != 0 {
- entry.parent = l.freqList.PushFront(
- &lfuNode{
- count: 0,
- entries: map[string]*lfuEntry{
- key: entry,
- },
- },
- )
- } else {
- first.Value.(*lfuNode).entries[key] = l.hashMap[key]
- entry.parent = first
- }
- } else {
- //Create a new freqList node if necessary && move the entry to the next freqList node
- prevNode := entry.parent
- nextCount := prevNode.Value.(*lfuNode).count + 1
-
- if next := entry.parent.Next(); next != nil && next.Value.(*lfuNode).count == nextCount {
- next.Value.(*lfuNode).entries[key] = entry
- entry.parent = next
- } else {
- entry.parent = l.freqList.InsertAfter(
- &lfuNode{
- count: nextCount,
- entries: map[string]*lfuEntry{
- key: entry,
- },
- }, entry.parent,
- )
- }
- l.unsafeRemoveFreqEntry(prevNode, entry)
- }
-}
-
-func (l *lfu) evict(key string) {
- l.mu.Lock()
- defer l.mu.Unlock()
-
- value, ok := l.hashMap[key]
- if ok {
- delete(l.hashMap, value.key)
- l.unsafeRemoveFreqEntry(value.parent, value)
- }
-}
-
-func (l *lfu) apply() {
- l.mu.Lock()
- defer l.mu.Unlock()
-
- for l.size > l.threshold {
- node := l.freqList.Front()
- nodeValue := node.Value.(*lfuNode)
-
- var entry *lfuEntry
- for _, e := range nodeValue.entries {
- entry = e
- break
- }
-
- l.size -= 1
- l.onEvict(entry.key)
- delete(l.hashMap, entry.key)
- l.unsafeRemoveFreqEntry(node, entry)
- }
-}
-
-// Not concurrently safe!
-// Removes a specific entry from a given freqList node
-func (l *lfu) unsafeRemoveFreqEntry(node *list.Element, entry *lfuEntry) {
- nodeValue := node.Value.(*lfuNode)
-
- delete(nodeValue.entries, entry.key)
- if len(nodeValue.entries) == 0 {
- l.freqList.Remove(node)
- }
-}
diff --git a/eviction_policy_test.go b/eviction_policy_test.go
deleted file mode 100644
index d9e4046..0000000
--- a/eviction_policy_test.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package nitecache
-
-import (
- "reflect"
- "testing"
-)
-
-func TestLru(t *testing.T) {
- ops := []struct {
- key string
- }{
- {key: "1"},
- {key: "2"},
- {key: "3"},
- {key: "2"},
- {key: "1"},
- {key: "1"},
- {key: "2"},
- {key: "3"},
- }
-
- expected := []string{"1", "3", "1"}
- var got []string
-
- lru := NewLruPolicy(2) // 2 items
- lru.setEvictFn(
- func(key string) {
- got = append(got, key)
- },
- )
-
- for _, op := range ops {
- lru.push(op.key)
- lru.apply()
- }
-
- if !reflect.DeepEqual(got, expected) {
- t.Fatalf("Expected %v\ngot %v", expected, got)
- }
-}
-
-func TestLfu(t *testing.T) {
- ops := []struct {
- key string
- }{
- {key: "1"},
- {key: "1"},
- {key: "1"},
- {key: "2"},
- {key: "2"},
- {key: "3"},
- {key: "2"},
- {key: "3"},
- }
-
- expected := []string{"3", "3"}
- var got []string
-
- lfu := NewLfuPolicy(2) // 2 items
- lfu.setEvictFn(
- func(key string) {
- got = append(got, key)
- },
- )
-
- for _, op := range ops {
- lfu.push(op.key)
- lfu.apply()
- }
-
- if !reflect.DeepEqual(got, expected) {
- t.Fatalf("Expected %v\ngot %v", expected, got)
- }
-}
diff --git a/go.mod b/go.mod
index 5b2a034..4456bd4 100644
--- a/go.mod
+++ b/go.mod
@@ -1,20 +1,18 @@
module github.com/MysteriousPotato/nitecache
-go 1.20
-
-replace google.golang.org/grpc => github.com/grpc/grpc-go v1.53.0
+go 1.21
require (
- google.golang.org/grpc v1.53.0
- google.golang.org/protobuf v1.28.1
+ github.com/MysteriousPotato/go-lockable v1.0.0
+ golang.org/x/sync v0.4.0
+ google.golang.org/grpc v1.59.0
+ google.golang.org/protobuf v1.31.0
)
require (
- github.com/MysteriousPotato/go-lockable v0.1.0
- github.com/golang/protobuf v1.5.2 // indirect
- golang.org/x/net v0.7.0 // indirect
- golang.org/x/sync v0.1.0
- golang.org/x/sys v0.5.0 // indirect
- golang.org/x/text v0.7.0 // indirect
- google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
+ github.com/golang/protobuf v1.5.3 // indirect
+ golang.org/x/net v0.17.0 // indirect
+ golang.org/x/sys v0.13.0 // indirect
+ golang.org/x/text v0.13.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect
)
diff --git a/go.sum b/go.sum
index 3fe4c01..3ca994e 100644
--- a/go.sum
+++ b/go.sum
@@ -1,24 +1,25 @@
-github.com/MysteriousPotato/go-lockable v0.1.0 h1:sg6kpGxH/6iCA+UsiKd56sJJYlECQeC6C3NSGZveTxw=
-github.com/MysteriousPotato/go-lockable v0.1.0/go.mod h1:ocAbkS7kPVpK71d7X6c5U1R+j2Dj7oUsOXPYalzdnas=
+github.com/MysteriousPotato/go-lockable v1.0.0 h1:bnEeLQEkDS97musqsKbBtWaRoyaoeb8vFmIn+XKZe40=
+github.com/MysteriousPotato/go-lockable v1.0.0/go.mod h1:ocAbkS7kPVpK71d7X6c5U1R+j2Dj7oUsOXPYalzdnas=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/grpc/grpc-go v1.53.0 h1:E07rCOq2xmraY7/tgrqRfvFS04d+23N1t2OxJZ/yjnk=
-github.com/grpc/grpc-go v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
-golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
-golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
-golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
-golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
+golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w=
-google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE=
+google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
+google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
-google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
diff --git a/hashring.go b/hashring.go
deleted file mode 100644
index ce2b81e..0000000
--- a/hashring.go
+++ /dev/null
@@ -1,152 +0,0 @@
-package nitecache
-
-import (
- "crypto/md5"
- "fmt"
- "hash/fnv"
- "sort"
- "strconv"
- "sync"
-)
-
-type HashFunc func(key string) (int, error)
-
-type ringCfg struct {
- Members []Member
- VirtualNodes int
- HashFunc func(key string) (int, error)
-}
-
-type hashring struct {
- hashFunc func(key string) (int, error)
- hashMap map[int]Member
- points []int
- mu *sync.RWMutex
- members Members
- virtualNodes int
-}
-
-func newRing(cfg ringCfg) (*hashring, error) {
- r := &hashring{
- hashFunc: cfg.HashFunc,
- hashMap: map[int]Member{},
- points: []int{},
- mu: &sync.RWMutex{},
- members: cfg.Members,
- virtualNodes: cfg.VirtualNodes,
- }
-
- if err := r.populate(); err != nil {
- return nil, fmt.Errorf("unable to populate hasring: %w", err)
- }
-
- return r, nil
-}
-
-func defaultHashFunc(key string) (int, error) {
- hash := fnv.New64()
- if _, err := hash.Write([]byte(key)); err != nil {
- return 0, fmt.Errorf("unable to populate hashring: %w", err)
- }
- return int(hash.Sum64()), nil
-}
-
-func (r *hashring) getOwner(key string) (Member, error) {
- r.mu.RLock()
- defer r.mu.RUnlock()
-
- if len(r.members) == 1 {
- return r.members[0], nil
- }
-
- hash := fnv.New64()
- if _, err := hash.Write([]byte(key)); err != nil {
- return Member{}, fmt.Errorf("unable to get write hash: %w", err)
- }
- sum := int(hash.Sum64())
-
- i := sort.Search(
- len(r.points), func(i int) bool {
- return r.points[i] >= sum
- },
- )
- if i == len(r.points) {
- i = 0
- }
-
- return r.hashMap[r.points[i]], nil
-}
-
-func (r *hashring) setMembers(newMembers Members) error {
- //We don't need points for a single node
- if len(newMembers) == 1 {
- r.members = newMembers
- r.clearPoints()
- return nil
- }
-
- //In case no changes happened
- if r.members.equals(newMembers) {
- return nil
- }
-
- //Create a new hashring, in order to minimize downtime
- newRing := hashring{
- mu: &sync.RWMutex{},
- points: []int{},
- hashMap: map[int]Member{},
- members: newMembers,
- hashFunc: r.hashFunc,
- virtualNodes: r.virtualNodes,
- }
-
- if err := newRing.populate(); err != nil {
- return fmt.Errorf("unable to populate hasring: %w", err)
- }
-
- r.mu.Lock()
- defer r.mu.Unlock()
- *r = newRing
-
- return nil
-}
-
-func (r *hashring) populate() error {
- r.clearPoints()
-
- //To make sure the collision prevention mechanism produces consistent results for identical Members, we need to sort Members
- sort.Slice(
- r.members, func(i, j int) bool {
- return r.members[i].ID < r.members[j].ID
- },
- )
-
- for i, m := range r.members {
- for n := 0; n < r.virtualNodes; n++ {
- keyHash := md5.Sum([]byte(strconv.Itoa(n) + m.ID))
-
- //Avoid collisions by prefixing the hash until a unique point is created
- for prefix := ""; ; prefix += "-" {
- hash, err := r.hashFunc(prefix + string(keyHash[:]))
- if err != nil {
- return fmt.Errorf("unable to populate hashring: %w", err)
- }
-
- if _, ok := r.hashMap[hash]; ok {
- continue
- }
-
- r.hashMap[hash] = r.members[i]
- r.points = append(r.points, hash)
- break
- }
- }
- }
- sort.Ints(r.points)
- return nil
-}
-
-func (r *hashring) clearPoints() {
- r.points = []int{}
- r.hashMap = map[int]Member{}
-}
diff --git a/hashring/hashring.go b/hashring/hashring.go
new file mode 100644
index 0000000..293cf27
--- /dev/null
+++ b/hashring/hashring.go
@@ -0,0 +1,199 @@
+package hashring
+
+import (
+ "fmt"
+ "hash/fnv"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+type (
+ HashFunc func(key string) (int, error)
+ Opt struct {
+ Members []string
+ VirtualNodes int
+ HashFunc func(key string) (int, error)
+ }
+ Ring struct {
+ hashFunc func(key string) (int, error)
+ hashMap map[int]string
+ points []int
+ mu *sync.RWMutex
+ members []string
+ virtualNodes int
+ }
+)
+
+func New(opt Opt) (*Ring, error) {
+ r := &Ring{
+ hashFunc: opt.HashFunc,
+ hashMap: map[int]string{},
+ points: []int{},
+ mu: &sync.RWMutex{},
+ members: opt.Members,
+ virtualNodes: opt.VirtualNodes,
+ }
+
+ if err := r.populate(); err != nil {
+ return nil, fmt.Errorf("unable to populate hasring: %w", err)
+ }
+
+ return r, nil
+}
+
+func DefaultHashFunc(key string) (int, error) {
+ hash := fnv.New64()
+ if _, err := hash.Write([]byte(key)); err != nil {
+ return 0, fmt.Errorf("unable to populate Ring: %w", err)
+ }
+ return int(hash.Sum64()), nil
+}
+
+func (r *Ring) GetOwner(key string) (string, error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if len(r.members) == 1 {
+ return r.members[0], nil
+ }
+
+ sum, err := r.hashFunc(key)
+ if err != nil {
+ return "", err
+ }
+
+ i := sort.Search(
+ len(r.points), func(i int) bool {
+ return r.points[i] >= sum
+ },
+ )
+ if i == len(r.points) {
+ i = 0
+ }
+
+ return r.hashMap[r.points[i]], nil
+}
+
+func (r *Ring) SetMembers(newMembers []string) error {
+ //We don't need points for a single node
+ if len(newMembers) == 1 {
+ r.members = newMembers
+ r.clearPoints()
+ return nil
+ }
+
+ if SliceEquals(newMembers, r.members) {
+ return nil
+ }
+
+ //Create a new Ring, in order to minimize downtime
+ ring := Ring{
+ mu: &sync.RWMutex{},
+ points: []int{},
+ hashMap: map[int]string{},
+ members: newMembers,
+ hashFunc: r.hashFunc,
+ virtualNodes: r.virtualNodes,
+ }
+
+ if err := ring.populate(); err != nil {
+ return fmt.Errorf("unable to populate hasring: %w", err)
+ }
+
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ *r = ring
+
+ return nil
+}
+
+func (r *Ring) Members() []string {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ members := make([]string, len(r.members))
+ copy(members, r.members)
+
+ return members
+}
+
+func (r *Ring) Points() []int {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ points := make([]int, len(r.points))
+ copy(points, r.points)
+
+ return points
+}
+
+func (r *Ring) VirtualNodes() int {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ return r.virtualNodes
+}
+
+func (r *Ring) populate() error {
+ r.clearPoints()
+
+ //To make sure the collision prevention mechanism produces consistent results across all members, we need to sort members
+ sort.Slice(
+ r.members, func(i, j int) bool {
+ return r.members[i] < r.members[j]
+ },
+ )
+
+ for i, m := range r.members {
+ for n := 0; n < r.virtualNodes; n++ {
+ key := strconv.Itoa(n) + m
+
+ //Avoid collisions by prefixing the hash until a unique point is created
+ for prefix := ""; ; prefix += "-" {
+ hash, err := r.hashFunc(prefix + key)
+ if err != nil {
+ return fmt.Errorf("unable to populate Ring: %w", err)
+ }
+
+ if _, ok := r.hashMap[hash]; ok {
+ continue
+ }
+
+ r.hashMap[hash] = r.members[i]
+ r.points = append(r.points, hash)
+ break
+ }
+ }
+ }
+ sort.Ints(r.points)
+ return nil
+}
+
+func (r *Ring) clearPoints() {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ r.points = []int{}
+ r.hashMap = map[int]string{}
+}
+
+// SliceEquals checks for order independent equality
+func SliceEquals(slice1 []string, slice2 []string) bool {
+ if len(slice1) != len(slice2) {
+ return false
+ }
+
+ newMembersMap := make(map[string]struct{}, len(slice1))
+ for _, newMember := range slice1 {
+ newMembersMap[newMember] = struct{}{}
+ }
+ for _, member := range slice2 {
+ if _, ok := newMembersMap[member]; !ok {
+ return false
+ }
+ delete(newMembersMap, member)
+ }
+
+ return len(newMembersMap) == 0
+}
diff --git a/hashring/hashring_test.go b/hashring/hashring_test.go
new file mode 100644
index 0000000..b2476b0
--- /dev/null
+++ b/hashring/hashring_test.go
@@ -0,0 +1,104 @@
+package hashring_test
+
+import (
+ "github.com/MysteriousPotato/nitecache/hashring"
+ "github.com/MysteriousPotato/nitecache/test_utils"
+ "testing"
+)
+
+func assertRing(t *testing.T, r *hashring.Ring, members []string) {
+ ringMembers := r.Members()
+ if !hashring.SliceEquals(members, ringMembers) {
+ t.Errorf("expected: %v Members, got: %v Members", members, ringMembers)
+ }
+
+ gotPointsLen := len(r.Points())
+ expectedPointsLen := len(ringMembers) * r.VirtualNodes()
+ if gotPointsLen != expectedPointsLen {
+ t.Errorf("expected: %v points, got: %v points", expectedPointsLen, gotPointsLen)
+ }
+}
+
+func TestRing_New(t *testing.T) {
+ mTest := []string{"node-1", "node-2", "node-3"}
+ cfg := hashring.Opt{
+ Members: mTest,
+ VirtualNodes: 10,
+ HashFunc: hashring.DefaultHashFunc,
+ }
+
+ ring, err := hashring.New(cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+ assertRing(t, ring, mTest)
+}
+
+func TestRing_SetMembers(t *testing.T) {
+ mTest := []string{"node-1", "node-2", "node-3"}
+ cfg := hashring.Opt{
+ Members: mTest,
+ VirtualNodes: 10,
+ HashFunc: hashring.DefaultHashFunc,
+ }
+
+ ring, err := hashring.New(cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ mTest[0] = "node-4"
+ mTest = append(mTest, "node-5")
+ if err = ring.SetMembers(mTest); err != nil {
+ t.Fatal(err)
+ }
+ assertRing(t, ring, mTest)
+
+ mTest = []string{"node-1", "node-2", "node-3"}
+
+ if err = ring.SetMembers(mTest); err != nil {
+ t.Fatal(err)
+ }
+ assertRing(t, ring, mTest)
+}
+
+func TestRing_GetOwner(t *testing.T) {
+ mTest := []string{"10", "20", "30"}
+ cfg := hashring.Opt{
+ Members: mTest,
+ VirtualNodes: 10,
+ HashFunc: test.SimpleHashFunc,
+ }
+
+ ring, err := hashring.New(cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tests := []struct {
+ expected string
+ key string
+ }{{
+ expected: "20",
+ key: "12",
+ }, {
+ expected: "10",
+ key: "0",
+ }, {
+ expected: "10",
+ key: "100",
+ }, {
+ expected: "30",
+ key: "30",
+ }}
+
+ for _, tt := range tests {
+ owner, err := ring.GetOwner(tt.key)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if owner != tt.expected {
+ t.Fatalf("expected owner %s for key %s, got: %s", tt.expected, tt.key, owner)
+ }
+ }
+}
diff --git a/hashring_test.go b/hashring_test.go
deleted file mode 100644
index 5a0960b..0000000
--- a/hashring_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package nitecache
-
-import (
- "testing"
-)
-
-func assertRing(t *testing.T, r *hashring, members []Member) {
- if !r.members.equals(members) {
- t.Errorf("expected: %v Members, got: %v Members", members, r.members)
- }
-
- gotPointsLen := len(r.points)
- expectedPointsLen := len(r.members) * r.virtualNodes
- if gotPointsLen != expectedPointsLen {
- t.Errorf("expected: %v points, got: %v points", expectedPointsLen, gotPointsLen)
- }
-}
-
-func TestRing_NewOK(t *testing.T) {
- mTest := []Member{
- {ID: "node-1"},
- {ID: "node-2"},
- {ID: "node-3"},
- }
- cfg := ringCfg{
- Members: mTest,
- VirtualNodes: 10,
- HashFunc: defaultHashFunc,
- }
-
- ring, err := newRing(cfg)
- if err != nil {
- t.Error(err)
- }
- assertRing(t, ring, mTest)
-}
-
-func TestRing_setMembers(t *testing.T) {
- mTest := []Member{
- {ID: "node-1"},
- {ID: "node-2"},
- {ID: "node-3"},
- }
- cfg := ringCfg{
- Members: mTest,
- VirtualNodes: 10,
- HashFunc: defaultHashFunc,
- }
-
- ring, err := newRing(cfg)
- if err != nil {
- t.Error(err)
- }
-
- mTest[0].ID = "node-4"
- mTest = append(mTest, Member{ID: "node-5"})
- if err := ring.setMembers(mTest); err != nil {
- t.Error(err)
- }
- assertRing(t, ring, mTest)
-
- mTest = []Member{
- {ID: "node-1"},
- {ID: "node-2"},
- {ID: "node-3"},
- }
- if err := ring.setMembers(mTest); err != nil {
- t.Error(err)
- }
- assertRing(t, ring, mTest)
-}
diff --git a/images/logo.png b/images/logo.png
deleted file mode 100644
index 8479363..0000000
Binary files a/images/logo.png and /dev/null differ
diff --git a/inmem/cache.go b/inmem/cache.go
new file mode 100644
index 0000000..11e7605
--- /dev/null
+++ b/inmem/cache.go
@@ -0,0 +1,47 @@
+package inmem
+
+import "sync"
+
+// Cache is essentially a wrapper around map[T]K that support concurrent safety
+type Cache[T comparable, K any] struct {
+ internal map[T]K
+ mu *sync.RWMutex
+}
+
+func NewCache[T comparable, K any]() *Cache[T, K] {
+ return &Cache[T, K]{
+ internal: map[T]K{},
+ mu: &sync.RWMutex{},
+ }
+}
+
+func (c *Cache[T, K]) Get(key T, _ ...Opt) (K, bool) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ v, ok := c.internal[key]
+ return v, ok
+}
+
+func (c *Cache[T, K]) Put(key T, value K, _ ...Opt) bool {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ _, ok := c.internal[key]
+ c.internal[key] = value
+
+ return ok
+}
+
+func (c *Cache[T, K]) Evict(key T) bool {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if _, ok := c.internal[key]; ok {
+ delete(c.internal, key)
+ return true
+ }
+ return false
+}
+
+func (c *Cache[T, K]) Inc(_ string) bool { return false }
diff --git a/inmem/lfu.go b/inmem/lfu.go
new file mode 100644
index 0000000..d5f599f
--- /dev/null
+++ b/inmem/lfu.go
@@ -0,0 +1,184 @@
+package inmem
+
+import (
+ "container/list"
+ "sync"
+)
+
+type (
+ // LFU cache (least frequently used)
+ //
+ // The zero value is not ready for use. Refer to [NewLFU] for the factory method.
+ LFU[T comparable, K any] struct {
+ threshold int
+ size int
+ freqList *list.List
+ hashMap map[T]*lfuEntry[T, K]
+ mu *sync.RWMutex
+ }
+ lfuEntry[T comparable, K any] struct {
+ key T
+ value K
+ nodeKey *list.Element
+ parent *list.Element
+ }
+ lfuNode[T any] struct {
+ count int
+ keys *list.List
+ }
+)
+
+// NewLFU creates an in memory cache that applies an LFU policy.
+//
+// When the cache must eviction keys and multiple keys have the same usage count, [LFU] fallbacks to an LRU policy to determine which key to evict.
+func NewLFU[T comparable, K any](threshold int) *LFU[T, K] {
+ return &LFU[T, K]{
+ threshold: threshold,
+ freqList: list.New(),
+ hashMap: make(map[T]*lfuEntry[T, K]),
+ mu: &sync.RWMutex{},
+ }
+}
+
+func (l *LFU[T, K]) Get(key T, opts ...Opt) (K, bool) {
+ o := getOpts(opts...)
+
+ var unlocked bool
+ l.mu.RLock()
+ defer func() {
+ if !unlocked {
+ l.mu.RUnlock()
+ }
+ }()
+
+ //Upsert the entry and update cache size
+ entry, ok := l.hashMap[key]
+ if !ok {
+ var empty K
+ return empty, false
+ }
+
+ value := entry.value
+ if !o.skipInc {
+ l.mu.RUnlock()
+ unlocked = true
+
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ // Check if key is still present between "lock promotion"
+ if _, ok := l.hashMap[key]; ok {
+ l.unsafeUpdateCount(entry, false)
+ }
+ }
+ return value, ok
+}
+
+func (l *LFU[T, K]) Put(key T, value K, opts ...Opt) bool {
+ o := getOpts(opts...)
+
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ // Upsert the entry and update cache size
+ entry, ok := l.hashMap[key]
+ if ok {
+ entry.value = value
+ } else {
+ entry = &lfuEntry[T, K]{key: key, value: value}
+ l.hashMap[key] = entry
+ l.size += 1
+ }
+ l.unsafeApplyPolicy()
+
+ if !ok || !o.skipInc {
+ l.unsafeUpdateCount(entry, !ok)
+ }
+ return ok
+}
+
+func (l *LFU[T, K]) Evict(key T) bool {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ if value, ok := l.hashMap[key]; ok {
+ delete(l.hashMap, value.key)
+ l.unsafeRemoveFreqEntry(value.parent, value.nodeKey)
+ return true
+ }
+ return false
+}
+
+func (l *LFU[T, K]) Inc(key T) bool {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ if entry, ok := l.hashMap[key]; ok {
+ l.unsafeUpdateCount(entry, false)
+ return true
+ }
+ return false
+}
+
+func (l *LFU[T, K]) Values() map[T]K {
+ l.mu.RLock()
+ defer l.mu.RUnlock()
+
+ values := make(map[T]K, l.size)
+ for k, v := range l.hashMap {
+ values[k] = v.value
+ }
+ return values
+}
+
+func (l *LFU[T, K]) unsafeUpdateCount(entry *lfuEntry[T, K], isNewEntry bool) {
+ var currentNode, prevNode *list.Element
+ var nextCount int
+ if isNewEntry {
+ currentNode = l.freqList.Front()
+ } else {
+ currentNode = entry.parent.Next()
+ prevNode = entry.parent
+ nextCount = prevNode.Value.(*lfuNode[T]).count + 1
+ }
+
+ if currentNode == nil || currentNode.Value.(*lfuNode[T]).count != nextCount {
+ parentNodeEntries := list.New()
+ entry.nodeKey = parentNodeEntries.PushFront(entry.key)
+ entry.parent = l.freqList.PushFront(&lfuNode[T]{
+ keys: parentNodeEntries,
+ count: nextCount,
+ })
+ } else {
+ entry.nodeKey = currentNode.Value.(*lfuNode[T]).keys.PushFront(entry.key)
+ entry.parent = currentNode
+ }
+
+ if prevNode != nil {
+ l.unsafeRemoveFreqEntry(prevNode, entry.nodeKey)
+ }
+}
+
+// Not concurrently safe!
+func (l *LFU[T, K]) unsafeApplyPolicy() {
+ for l.size > l.threshold {
+ node := l.freqList.Front()
+ nodeValue := node.Value.(*lfuNode[T])
+ entry := nodeValue.keys.Back()
+
+ l.size -= 1
+ delete(l.hashMap, entry.Value.(T))
+
+ l.unsafeRemoveFreqEntry(node, entry)
+ }
+}
+
+// Not concurrently safe!
+// Removes a specific entry from a given freqList node
+func (l *LFU[T, K]) unsafeRemoveFreqEntry(node *list.Element, entry *list.Element) {
+ nodeValue := node.Value.(*lfuNode[T])
+ nodeValue.keys.Remove(entry)
+ if nodeValue.keys.Front() == nil {
+ l.freqList.Remove(node)
+ }
+}
diff --git a/inmem/lfu_test.go b/inmem/lfu_test.go
new file mode 100644
index 0000000..dcf7a44
--- /dev/null
+++ b/inmem/lfu_test.go
@@ -0,0 +1,134 @@
+package inmem_test
+
+import (
+ "github.com/MysteriousPotato/nitecache/inmem"
+ "reflect"
+ "strconv"
+ "sync"
+ "testing"
+)
+
+func TestLfu(t *testing.T) {
+ ops := []struct {
+ key int
+ value int
+ }{
+ {key: 1, value: 0},
+ {key: 1, value: 1},
+ {key: 1, value: 2},
+ {key: 2, value: 3},
+ {key: 2, value: 4},
+ {key: 4, value: 5},
+ {key: 3, value: 6},
+ {key: 2, value: 7},
+ {key: 3, value: 8},
+ }
+
+ expected := map[int]int{1: 2, 2: 7, 3: 8}
+
+ lfu := inmem.NewLFU[int, int](3)
+ for _, op := range ops {
+ lfu.Put(op.key, op.value)
+ }
+
+ for k, expectedV := range expected {
+ v, ok := lfu.Get(k)
+ if !ok {
+ t.Fatalf("Value not found for key: %q", k)
+ }
+ if v != expectedV {
+ t.Fatalf("Expected %v for key %q\ngot %v", expected, k, v)
+ }
+ }
+
+ got := lfu.Values()
+ if !reflect.DeepEqual(got, expected) {
+ t.Fatalf("Expected %v\ngot %v", expected, got)
+ }
+}
+
+func TestLFUConcurrentAccess(t *testing.T) {
+ goroutinesCount := 100
+ iterations := 1000
+
+ lfu := inmem.NewLFU[int, int](128)
+ wg := sync.WaitGroup{}
+
+ wg.Add(goroutinesCount)
+ for i := 0; i < goroutinesCount; i++ {
+ go func() {
+ defer wg.Done()
+
+ for j := 0; j < iterations; j++ {
+ lfu.Put(j, j)
+ lfu.Get(j)
+ lfu.Inc(j)
+ lfu.Evict(j)
+ }
+ }()
+ }
+
+ wg.Wait()
+}
+
+func BenchmarkLFUPut(b *testing.B) {
+ for _, threshold := range []int{10, 100, 1000, 10000, 100000} {
+ b.Run("threshold="+strconv.Itoa(threshold), func(b *testing.B) {
+ lfu := inmem.NewLFU[int, int](threshold)
+ for i := 0; i < b.N; i++ {
+ lfu.Put(i, i)
+ }
+ })
+ }
+}
+
+func BenchmarkLFUGet(b *testing.B) {
+ for _, threshold := range []int{10, 100, 1000, 10000, 100000} {
+ b.Run("threshold="+strconv.Itoa(threshold), func(b *testing.B) {
+ lfu := inmem.NewLFU[int, int](threshold)
+ for i := 0; i < b.N; i++ {
+ lfu.Put(i, i)
+ }
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ lfu.Get(i)
+ }
+ })
+ }
+}
+
+func BenchmarkLFUEvict(b *testing.B) {
+ for _, threshold := range []int{10, 100, 1000, 10000, 100000} {
+ b.Run("threshold="+strconv.Itoa(threshold), func(b *testing.B) {
+ lfu := inmem.NewLFU[int, int](threshold)
+ for i := 0; i < b.N; i++ {
+ lfu.Put(i, i)
+ }
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ lfu.Evict(i)
+ }
+ })
+ }
+}
+
+func BenchmarkLFUInc(b *testing.B) {
+ for _, threshold := range []int{10, 100, 1000, 10000, 100000} {
+ b.Run("threshold="+strconv.Itoa(threshold), func(b *testing.B) {
+ lfu := inmem.NewLFU[int, int](threshold)
+ for i := 0; i < b.N; i++ {
+ lfu.Put(i, i)
+ }
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ lfu.Inc(i)
+ }
+ })
+ }
+}
diff --git a/inmem/lru.go b/inmem/lru.go
new file mode 100644
index 0000000..cb9fed0
--- /dev/null
+++ b/inmem/lru.go
@@ -0,0 +1,137 @@
+package inmem
+
+import (
+ "container/list"
+ "sync"
+)
+
+type (
+ // LRU cache (least recently used)
+ //
+ // The zero value is not ready for use. Refer to [NewLRU] for the factory method.
+ LRU[T comparable, K any] struct {
+ threshold int
+ evictionQueue *list.List
+ hashMap map[T]*list.Element
+ size int
+ mu *sync.RWMutex
+ }
+ node[T comparable, K any] struct {
+ key T
+ value K
+ }
+)
+
+// NewLRU creates an in memory cache that applies an LRU policy.
+func NewLRU[T comparable, K any](threshold int) *LRU[T, K] {
+ return &LRU[T, K]{
+ threshold: threshold,
+ evictionQueue: list.New(),
+ hashMap: make(map[T]*list.Element),
+ mu: &sync.RWMutex{},
+ }
+}
+
+func (l *LRU[T, K]) Get(key T, opts ...Opt) (K, bool) {
+ o := getOpts(opts...)
+
+ var unlocked bool
+ l.mu.RLock()
+ defer func() {
+ if !unlocked {
+ l.mu.RUnlock()
+ }
+ }()
+
+ ele, ok := l.hashMap[key]
+ if !ok {
+ var empty K
+ return empty, false
+ }
+
+ value := ele.Value.(*node[T, K]).value
+ if !o.skipInc {
+ l.mu.RUnlock()
+ unlocked = true
+
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ // Check if key is still present between "lock promotion"
+ if _, ok := l.hashMap[key]; ok {
+ l.evictionQueue.MoveToBack(ele)
+ }
+ }
+
+ return value, ok
+}
+
+func (l *LRU[T, K]) Put(key T, value K, opts ...Opt) bool {
+ o := getOpts(opts...)
+
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ ele, ok := l.hashMap[key]
+ if ok {
+ ele.Value.(*node[T, K]).value = value
+ if !o.skipInc {
+ l.evictionQueue.MoveToBack(ele)
+ }
+ } else {
+ l.size += 1
+ l.hashMap[key] = l.evictionQueue.PushBack(&node[T, K]{
+ key: key,
+ value: value,
+ })
+ }
+ l.unsafeApplyPolicy()
+
+ return ok
+}
+
+func (l *LRU[T, K]) Evict(key T) bool {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ if ele, ok := l.hashMap[key]; ok {
+ l.size -= 1
+ l.evictionQueue.Remove(ele)
+ return true
+ }
+ return false
+}
+
+func (l *LRU[T, K]) Inc(key T) bool {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ if ele, ok := l.hashMap[key]; ok {
+ l.evictionQueue.MoveToBack(ele)
+ return true
+ }
+ return false
+}
+
+func (l *LRU[T, K]) Values() map[T]K {
+ l.mu.RLock()
+ defer l.mu.RUnlock()
+
+ values := make(map[T]K, l.size)
+ for k, element := range l.hashMap {
+ values[k] = element.Value.(*node[T, K]).value
+ }
+ return values
+}
+
+// Not concurrently safe!
+func (l *LRU[T, K]) unsafeApplyPolicy() {
+ for l.size > l.threshold {
+ ele := l.evictionQueue.Front()
+ n := ele.Value.(*node[T, K])
+
+ l.size -= 1
+ l.evictionQueue.Remove(ele)
+ delete(l.hashMap, n.key)
+ }
+}
diff --git a/inmem/lru_test.go b/inmem/lru_test.go
new file mode 100644
index 0000000..3b5b48c
--- /dev/null
+++ b/inmem/lru_test.go
@@ -0,0 +1,133 @@
+package inmem_test
+
+import (
+ "github.com/MysteriousPotato/nitecache/inmem"
+ "reflect"
+ "strconv"
+ "sync"
+ "testing"
+)
+
+func TestLru(t *testing.T) {
+ ops := []struct {
+ key int
+ value int
+ }{
+ {key: 1, value: 0},
+ {key: 2, value: 1},
+ {key: 3, value: 2},
+ {key: 2, value: 3},
+ {key: 1, value: 4},
+ {key: 1, value: 5},
+ {key: 4, value: 6},
+ {key: 2, value: 7},
+ {key: 3, value: 8},
+ }
+ expected := map[int]int{2: 7, 3: 8, 4: 6}
+
+ lru := inmem.NewLRU[int, int](3)
+ for _, op := range ops {
+ lru.Put(op.key, op.value)
+ }
+
+ for k, expectedV := range expected {
+ v, ok := lru.Get(k)
+ if !ok {
+ t.Fatalf("Value not found for key: %q", k)
+ }
+ if v != expectedV {
+ t.Fatalf("Expected %v for key %q\ngot %v", expected, k, v)
+ }
+ }
+
+ got := lru.Values()
+ if !reflect.DeepEqual(got, expected) {
+ t.Fatalf("Expected %v\ngot %v", expected, got)
+ }
+}
+
+func TestLRUConcurrentAccess(t *testing.T) {
+ goroutinesCount := 100
+ iterations := 1000
+
+ lru := inmem.NewLRU[int, int](128)
+ wg := sync.WaitGroup{}
+
+ wg.Add(goroutinesCount)
+ for i := 0; i < goroutinesCount; i++ {
+ go func() {
+ defer wg.Done()
+
+ for j := 0; j < iterations; j++ {
+ lru.Put(j, j)
+ lru.Get(j)
+ lru.Inc(j)
+ lru.Evict(j)
+ }
+ }()
+ }
+
+ wg.Wait()
+}
+
+func BenchmarkLRUPut(b *testing.B) {
+ for _, threshold := range []int{10, 100, 1000, 10000, 100000} {
+ b.Run("threshold="+strconv.Itoa(threshold), func(b *testing.B) {
+ lru := inmem.NewLRU[int, int](threshold)
+ for i := 0; i < b.N; i++ {
+ lru.Put(i, i)
+ }
+ })
+ }
+}
+
+func BenchmarkLRUGet(b *testing.B) {
+ for _, threshold := range []int{10, 100, 1000, 10000, 100000} {
+ b.Run("threshold="+strconv.Itoa(threshold), func(b *testing.B) {
+ lru := inmem.NewLRU[int, int](threshold)
+ for i := 0; i < b.N; i++ {
+ lru.Put(i, i)
+ }
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ lru.Get(i)
+ }
+ })
+ }
+}
+
+func BenchmarkLRUEvict(b *testing.B) {
+ for _, threshold := range []int{10, 100, 1000, 10000, 100000} {
+ b.Run("threshold="+strconv.Itoa(threshold), func(b *testing.B) {
+ lru := inmem.NewLRU[int, int](threshold)
+ for i := 0; i < b.N; i++ {
+ lru.Put(i, i)
+ }
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ lru.Evict(i)
+ }
+ })
+ }
+}
+
+func BenchmarkLRUInc(b *testing.B) {
+ for _, threshold := range []int{10, 100, 1000, 10000, 100000} {
+ b.Run("threshold="+strconv.Itoa(threshold), func(b *testing.B) {
+ lru := inmem.NewLRU[int, int](threshold)
+ for i := 0; i < b.N; i++ {
+ lru.Put(i, i)
+ }
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ lru.Inc(i)
+ }
+ })
+ }
+}
diff --git a/inmem/options.go b/inmem/options.go
new file mode 100644
index 0000000..15831e4
--- /dev/null
+++ b/inmem/options.go
@@ -0,0 +1,25 @@
+package inmem
+
+type (
+ Opt func(*opts)
+ opts struct {
+ skipInc bool
+ }
+)
+
+// SkipInc specify whether the "count" used to apply the LFU/LRU policies must be incremented.
+//
+// # Defaults to false
+//
+// This is useful when multiple get/put operations must count as a single access.
+func SkipInc(skip bool) Opt {
+ return func(o *opts) { o.skipInc = skip }
+}
+
+func getOpts(options ...Opt) *opts {
+ defaultOpts := &opts{}
+ for _, o := range options {
+ o(defaultOpts)
+ }
+ return defaultOpts
+}
diff --git a/member.go b/member.go
deleted file mode 100644
index 66bfe22..0000000
--- a/member.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package nitecache
-
-type Members []Member
-
-type Member struct {
- ID string
- Addr string
-}
-
-func (m Member) isZero() bool {
- return m == (Member{})
-}
-
-func (ms Members) equals(others Members) bool {
- if len(ms) != len(others) {
- return false
- }
-
- hashMap := make(map[string]any, len(ms))
- for _, m := range ms {
- hashMap[m.ID] = nil
- }
-
- for _, other := range others {
- if _, ok := hashMap[other.ID]; !ok {
- return false
- }
- }
- return true
-}
diff --git a/metrics.go b/metrics.go
index 48e9b4b..6724fdc 100644
--- a/metrics.go
+++ b/metrics.go
@@ -1,73 +1,85 @@
package nitecache
import (
+ "maps"
"sync"
"sync/atomic"
)
-// Metrics are limited to the scope of the owner nodes.
-//
-// For example, if node-1 queries node-2, metrics will be registered on node-2 only.
-type Metrics struct {
- Miss int64
- Get int64
- Put int64
- Evict int64
- Execute map[string]int64
- mu *sync.Mutex
-}
+type (
+ // Metrics are limited to the scope of the owner nodes.
+ //
+ // For example, if node-1 queries node-2, metrics will be registered on node-2 only.
+ Metrics struct {
+ Miss int64
+ Get int64
+ Put int64
+ Evict int64
+ Call map[string]int64
+ }
+ metrics struct {
+ Miss atomic.Int64
+ Get atomic.Int64
+ Put atomic.Int64
+ Evict atomic.Int64
+ Call map[string]int64
+ mu *sync.RWMutex
+ }
+)
-func (m Metrics) getCopy() Metrics {
- m.mu.Lock()
- defer m.mu.Unlock()
+func (m *metrics) getCopy() Metrics {
+ m.mu.RLock()
+ defer m.mu.RUnlock()
return Metrics{
- Miss: m.Miss,
- Get: m.Get,
- Put: m.Put,
- Evict: m.Evict,
- Execute: m.Execute,
+ Miss: m.Miss.Load(),
+ Get: m.Get.Load(),
+ Put: m.Put.Load(),
+ Evict: m.Evict.Load(),
+ Call: maps.Clone(m.Call),
}
}
-func newMetrics() *Metrics {
- return &Metrics{
- Execute: make(map[string]int64),
- mu: &sync.Mutex{},
+func newMetrics() *metrics {
+ return &metrics{
+ Call: make(map[string]int64),
+ mu: &sync.RWMutex{},
}
}
-func incMiss(metrics ...*Metrics) {
- for _, m := range metrics {
- atomic.AddInt64(&m.Miss, 1)
+func incMiss(ms ...*metrics) {
+ for _, m := range ms {
+ m.Miss.Add(1)
}
}
-func incGet(metrics ...*Metrics) {
- for _, m := range metrics {
- atomic.AddInt64(&m.Get, 1)
+func incGet(ms ...*metrics) {
+ for _, m := range ms {
+ m.Get.Add(1)
}
}
-func incPut(metrics ...*Metrics) {
- for _, m := range metrics {
- atomic.AddInt64(&m.Put, 1)
+func incPut(ms ...*metrics) {
+ for _, m := range ms {
+ m.Put.Add(1)
}
}
-func incEvict(metrics ...*Metrics) {
- for _, m := range metrics {
- atomic.AddInt64(&m.Evict, 1)
+func incEvict(ms ...*metrics) {
+ for _, m := range ms {
+ m.Evict.Add(1)
}
}
-func incExecute(function string, metrics ...*Metrics) {
- inc := func(m *Metrics) {
- m.mu.Lock()
- defer m.mu.Unlock()
- m.Execute[function]++
- }
- for _, m := range metrics {
- inc(m)
+func incCalls(procedure string, ms ...*metrics) {
+ for _, m := range ms {
+ incCall(procedure, m)
}
}
+
+func incCall(procedure string, m *metrics) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ m.Call[procedure]++
+}
diff --git a/metrics_test.go b/metrics_test.go
index 985cdb5..ca574df 100644
--- a/metrics_test.go
+++ b/metrics_test.go
@@ -1,32 +1,39 @@
-package nitecache
+package nitecache_test
import (
"context"
"errors"
- "github.com/MysteriousPotato/nitecache/test"
+ "github.com/MysteriousPotato/nitecache"
"reflect"
"testing"
"time"
+
+ test "github.com/MysteriousPotato/nitecache/test_utils"
)
func TestMetrics(t *testing.T) {
- c, err := NewCache("1", []Member{{ID: "1", Addr: test.GetUniqueAddr()}})
+ ctx := context.Background()
+ c, err := nitecache.NewCache(
+ nitecache.Member{ID: "1", Addr: test.GetUniqueAddr()},
+ []nitecache.Member{
+ {ID: "1", Addr: test.GetUniqueAddr()},
+ },
+ )
if err != nil {
t.Fatal(err)
}
- defer test.TearDown(c)
- tables := []*Table[int]{
- NewTable[int]("table-1").
- WithFunction(
- "function", func(v int, args []byte) (int, time.Duration, error) {
+ tables := []*nitecache.Table[int]{
+ nitecache.NewTable[int]("table-1").
+ WithProcedure(
+ "function", func(_ context.Context, v int, args []byte) (int, time.Duration, error) {
return v, 0, nil
},
).
Build(c),
- NewTable[int]("table-2").
- WithFunction(
- "function", func(v int, args []byte) (int, time.Duration, error) {
+ nitecache.NewTable[int]("table-2").
+ WithProcedure(
+ "function", func(_ context.Context, v int, args []byte) (int, time.Duration, error) {
return v, 0, nil
},
).
@@ -45,12 +52,11 @@ func TestMetrics(t *testing.T) {
{op: "evict", key: "1"},
{op: "update", key: "2"},
}
- ctx := context.TODO()
for _, table := range tables {
for _, op := range ops {
if op.op == "get" {
- if _, err := table.Get(ctx, op.key); err != nil && !errors.Is(err, ErrKeyNotFound) {
+ if _, err := table.Get(ctx, op.key); err != nil && !errors.Is(err, nitecache.ErrKeyNotFound) {
t.Fatal(err)
}
}
@@ -65,45 +71,65 @@ func TestMetrics(t *testing.T) {
}
}
if op.op == "update" {
- if _, err := table.Execute(ctx, op.key, "function", []byte{}); err != nil {
+ if _, err := table.Call(ctx, op.key, "function", []byte{}); err != nil {
t.Fatal(err)
}
}
}
}
- expectedGlobal := Metrics{
+ expectedGlobal := nitecache.Metrics{
Miss: 2,
Get: 4,
Put: 2,
Evict: 2,
- Execute: map[string]int64{
+ Call: map[string]int64{
"function": 4,
},
}
- expectedTable := Metrics{
+ expectedTable := nitecache.Metrics{
Miss: 1,
Get: 2,
Put: 1,
Evict: 1,
- Execute: map[string]int64{
+ Call: map[string]int64{
"function": 2,
},
}
- gotTable1 := tables[0].GetMetrics()
- gotTable2 := tables[1].GetMetrics()
- gotGlobal := c.GetMetrics()
+ gotTable1, err := tables[0].GetMetrics()
+ if err != nil {
+ t.Fatal(err)
+ }
+ gotTable2, err := tables[1].GetMetrics()
+ if err != nil {
+ t.Fatal(err)
+ }
+ gotGlobal, err := c.GetMetrics()
+ if err != nil {
+ t.Fatal(err)
+ }
if !reflect.DeepEqual(expectedGlobal, gotGlobal) {
- t.Fatalf("expected global: %+v\ngot:%+v", expectedGlobal, gotGlobal)
+ t.Fatalf("expected global metrics: %+v\ngot:%+v", expectedGlobal, gotGlobal)
}
if !reflect.DeepEqual(expectedTable, gotTable1) {
- t.Fatalf("expected table: %+v\ngot:%+v", expectedTable, gotTable1)
+ t.Fatalf("expected table metrics: %+v\ngot:%+v", expectedTable, gotTable1)
}
if !reflect.DeepEqual(expectedTable, gotTable2) {
- t.Fatalf("expected table: %+v\ngot:%+v", expectedTable, gotTable2)
+ t.Fatalf("expected table metrics: %+v\ngot:%+v", expectedTable, gotTable2)
+ }
+
+ if err = c.TearDown(); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := tables[0].GetMetrics(); !errors.Is(err, nitecache.ErrCacheDestroyed) {
+ t.Fatalf("expected err: %v\ngot:%v", nitecache.ErrCacheDestroyed, err)
+ }
+ if _, err := c.GetMetrics(); !errors.Is(err, nitecache.ErrCacheDestroyed) {
+ t.Fatalf("expected err: %v\ngot:%v", nitecache.ErrCacheDestroyed, err)
}
}
diff --git a/readme.md b/readme.md
index 9074ce8..d586e72 100644
--- a/readme.md
+++ b/readme.md
@@ -2,9 +2,9 @@
@@ -23,70 +23,58 @@
## About The Project
-***disclaimer*** : This project is still in its experimental phase.
+***disclaimer*** : This project is still experimental. Expect breaking changes.
nitecache is an embedded and distributed cache library for golang that supports:
- sharding
- explicit cache eviction
- ttl
- lfu/lru eviction policies
-- updating keys using golang functions
-- grpc
+- rpc for value updates
- type safety using generics
-(back to top)
-
-
## Getting started
### Installation
-- requires go version >= 1.20
+- requires go version >= 1.21
```sh
-go get github.com/MysteriousPotato/nitecache
+go get github.com/MysteriousPotato/nitecache@v0.4.0
```
### Usage
##### Creating a cache instance:
+
``` go
// Both ID and Addr must be unique across peers
-selfID := "1"
+self := {ID: "1", Addr: "node1:8100"},
members := []nitecache.Member{
- {ID: "1", Addr: "node1:8100"},
+ self, // You can omit self from the members list
{ID: "2", Addr: "node2:8100"},
}
-c, err := nitecache.NewCache(
- selfID,
- members,
- nitecache.VirtualNodeOpt(64),
- nitecache.TimeoutOpt(time.Second*5),
- nitecache.HashFuncOpt(
- func(key string) (int, error) {
- return int(crc32.ChecksumIEEE([]byte(key))), nil
- },
- ),
-)
-if err != nil {
- panic(err)
+cache, err := nitecache.NewCache(self, members)
+...
+go func() {
+ err := cache.ListenAndServe()
+ ...
}
-defer func() {
- if err := c.TearDown(); err != nil {
- panic(err)
- }
-}()
+
+
+
```
##### Creating a table:
+
``` go
// Specify the name of the table
-table := nitecache.NewTable[string]("key-value-store").
+table := nitecache.NewTable[string]("sessions").
// If WithEvictionPolicy is omitted, nitecache won't apply any eviction policy
- WithEvictionPolicy(nitecache.NewLruPolicy(256<<20)).
+ WithStorage(nitecache.LRU(1024)).
// Option to specify the cache-aside getter
// If WithGetter is omitted, nitecache will return an error on cache miss.
WithGetter(
@@ -103,64 +91,51 @@ table := nitecache.NewTable[string]("key-value-store").
```
##### Retrieving a value by key:
-``` go
-// If no corresponding value is found and no cache-aside getter was provided, returns ErrKeyNotFound.
-value, err := table.Get(ctx, "key")
-if err != nil {
-}
-```
-##### Retrieving a value by key:
``` go
// If no corresponding value is found and no cache-aside getter was provided, returns ErrKeyNotFound.
value, err := table.Get(ctx, "key")
if err != nil {
}
```
+
##### Create a new entry:
+
``` go
-if err := table.Put(ctx, "key", sess, time.Hour); err != nil {
+if err := table.Put(ctx, "key", session, time.Hour); err != nil {
}
```
##### Evicting a value by key:
+
``` go
if err := table.Evict(ctx, "key"); err != nil {
}
```
-##### Registering and using a function to update a value:
+##### Registering a RPC for value updates:
+
``` go
-// WithFunction is used to register the function.
+// WithProcedure is used to register the RPC.
table := nitecache.NewTable[RateLimitEntry]("rate-limiter").
- WithFunction(
- "updateUsername", func(r RateLimitEntry, args []byte) (Session, time.Duration, error) {
- r.Count++
- return r, 0, nil
- },
- ).
+ WithProcedure("incrementRL", func(r RateLimitEntry, params []byte) (RateLimitEntry, time.Duration, error) {
+ r.Count++
+ return r, 0, nil
+ }).
Build(c)
-// Executes previously registered function "updateUsername".
-// You can pass arguments as bytes to the function call for more flexibility.
-// This can be useful if you need previous state to properly update a value.
-sess, err = table.Execute(ctx, "key", "updateUsername", []byte("new username"))
+// You can pass parameters as bytes to the function call for more flexibility.
+rlEntry, err = table.Call(ctx, "key", "incrementRL", []byte("Hello there!)])
if err != nil {
}
```
-(back to top)
-
-
## Roadmap
See the [open issues](https://github.com/MysteriousPotato/nitecache/issues) for a full list of proposed features (and known issues).
-(back to top)
-
-
## Contributing
@@ -169,13 +144,9 @@ Contributions are what make the open source community such an amazing place to l
If you have a suggestion that would make this better, please fork the repo and create a pull request. You can also simply open an issue with the tag "enhancement".
-(back to top)
-
## License
-Distributed under the MIT License. See [LICENSE](https://github.com/MysteriousPotato/nitecache/blob/master/LICENSE) for more information.
-
-(back to top)
\ No newline at end of file
+Distributed under the MIT License. See [LICENSE](https://github.com/MysteriousPotato/nitecache/blob/main/LICENSE) for more information.
\ No newline at end of file
diff --git a/service.go b/service.go
index 1cb241e..be9d24b 100644
--- a/service.go
+++ b/service.go
@@ -2,13 +2,11 @@ package nitecache
import (
"context"
- "errors"
"net"
"time"
"github.com/MysteriousPotato/nitecache/servicepb"
"google.golang.org/grpc"
- "google.golang.org/grpc/credentials/insecure"
)
type service struct {
@@ -16,51 +14,47 @@ type service struct {
cache *Cache
}
-type client struct {
- conn *grpc.ClientConn
- servicepb.ServiceClient
- timeout time.Duration
+type server struct {
+ listener net.Listener
+ server *grpc.Server
}
-type clients map[string]client
+type (
+ client struct {
+ conn *grpc.ClientConn
+ servicepb.ServiceClient
+ }
+ clients map[string]*client
+)
-// We use a service mesh for automatic mTLS, but it would probably be better to support configuration for others who might not...
-func newClient(addr string, timeout time.Duration) (client, error) {
+func newClient(addr string, c *Cache) (*client, error) {
conn, err := grpc.Dial(
addr,
- grpc.WithTransportCredentials(insecure.NewCredentials()),
- grpc.WithUnaryInterceptor(timeoutInterceptor(timeout)),
+ grpc.WithTransportCredentials(c.transportCredentials),
+ grpc.WithUnaryInterceptor(timeoutInterceptor(c.timeout)),
)
if err != nil {
- return client{}, err
+ return nil, err
}
+
grpcClient := servicepb.NewServiceClient(conn)
- return client{
+ return &client{
ServiceClient: grpcClient,
conn: conn,
}, nil
}
-func newServer(addr string, cache *Cache) (*grpc.Server, func() error, error) {
- lis, err := net.Listen("tcp", addr)
+func newService(addr string, cache *Cache) (server, error) {
+ grpcServer := grpc.NewServer(cache.grpcOpts...)
+ servicepb.RegisterServiceServer(grpcServer, &service{cache: cache})
+
+ listener, err := net.Listen("tcp", addr)
if err != nil {
- return nil, nil, err
+ return server{}, err
}
- var opts []grpc.ServerOption
- grpcServer := grpc.NewServer(opts...)
-
- servicepb.RegisterServiceServer(
- grpcServer, &service{
- cache: cache,
- },
- )
-
- start := func() error {
- return grpcServer.Serve(lis)
- }
- return grpcServer, start, nil
+ return server{server: grpcServer, listener: listener}, nil
}
func timeoutInterceptor(timeout time.Duration) func(
@@ -116,44 +110,44 @@ func (s service) Get(_ context.Context, r *servicepb.GetRequest) (*servicepb.Get
}, nil
}
-func (s service) Put(_ context.Context, r *servicepb.PutRequest) (*servicepb.EmptyResponse, error) {
+func (s service) Put(_ context.Context, r *servicepb.PutRequest) (*servicepb.Empty, error) {
t, err := s.cache.getTable(r.Table)
if err != nil {
return nil, err
}
- t.putLocally(
- item{
- Expire: time.UnixMicro(r.Item.Expire),
- Value: r.Item.Value,
- Key: r.Item.Key,
- },
- )
- return &servicepb.EmptyResponse{}, nil
+ return &servicepb.Empty{}, t.putLocally(item{
+ Expire: time.UnixMicro(r.Item.Expire),
+ Value: r.Item.Value,
+ Key: r.Item.Key,
+ })
}
-func (s service) Evict(_ context.Context, r *servicepb.EvictRequest) (*servicepb.EmptyResponse, error) {
+func (s service) Evict(_ context.Context, r *servicepb.EvictRequest) (*servicepb.Empty, error) {
t, err := s.cache.getTable(r.Table)
if err != nil {
return nil, err
}
- t.evictLocally(r.Key)
- return &servicepb.EmptyResponse{}, nil
+ return &servicepb.Empty{}, t.evictLocally(r.Key)
}
-func (s service) Execute(_ context.Context, r *servicepb.ExecuteRequest) (*servicepb.ExecuteResponse, error) {
+func (s service) HealthCheck(_ context.Context, _ *servicepb.Empty) (*servicepb.Empty, error) {
+ return &servicepb.Empty{}, nil
+}
+
+func (s service) Call(ctx context.Context, r *servicepb.CallRequest) (*servicepb.CallResponse, error) {
t, err := s.cache.getTable(r.Table)
if err != nil {
return nil, err
}
- item, err := t.executeLocally(r.Key, r.Function, r.Args)
+ item, err := t.callLocally(ctx, r.Key, r.Procedure, r.Args)
if err != nil {
return nil, err
}
- return &servicepb.ExecuteResponse{
+ return &servicepb.CallResponse{
Item: &servicepb.Item{
Expire: item.Expire.UnixMicro(),
Value: item.Value,
@@ -161,41 +155,3 @@ func (s service) Execute(_ context.Context, r *servicepb.ExecuteRequest) (*servi
},
}, nil
}
-
-// Cleanup clients that are not present in peers and create new clients for new peers
-func (cs clients) set(peers []Member, timeout time.Duration) error {
- if timeout == 0 {
- timeout = time.Second * 2
- }
-
- peersMap := map[string]Member{}
- for _, p := range peers {
- peersMap[p.ID] = p
- }
-
- var errs []error
- for id := range cs {
- if _, ok := peersMap[id]; !ok {
- if err := cs[id].conn.Close(); err != nil {
- errs = append(errs, err)
- }
- delete(cs, id)
- }
- }
-
- for id, p := range peersMap {
- if _, ok := cs[id]; ok {
- continue
- }
- client, err := newClient(p.Addr, timeout)
- if err != nil {
- return err
- }
- cs[id] = client
- }
-
- if errs != nil {
- return errors.Join(errs...)
- }
- return nil
-}
diff --git a/servicepb/proto.sh b/servicepb/proto.sh
deleted file mode 100644
index a3cd99b..0000000
--- a/servicepb/proto.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-protoc --go_out=. --go_opt=paths=source_relative \
- --go-grpc_out=. --go-grpc_opt=paths=source_relative \
- ./service.proto
\ No newline at end of file
diff --git a/servicepb/service.pb.go b/servicepb/service.pb.go
index 55b3c1b..27ba9c2 100644
--- a/servicepb/service.pb.go
+++ b/servicepb/service.pb.go
@@ -1,8 +1,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
-// protoc v3.14.0
-// source: service.proto
+// protoc-gen-go v1.31.0
+// protoc v4.24.4
+// source: servicepb/service.proto
package servicepb
@@ -33,7 +33,7 @@ type Item struct {
func (x *Item) Reset() {
*x = Item{}
if protoimpl.UnsafeEnabled {
- mi := &file_service_proto_msgTypes[0]
+ mi := &file_servicepb_service_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -46,7 +46,7 @@ func (x *Item) String() string {
func (*Item) ProtoMessage() {}
func (x *Item) ProtoReflect() protoreflect.Message {
- mi := &file_service_proto_msgTypes[0]
+ mi := &file_servicepb_service_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -59,7 +59,7 @@ func (x *Item) ProtoReflect() protoreflect.Message {
// Deprecated: Use Item.ProtoReflect.Descriptor instead.
func (*Item) Descriptor() ([]byte, []int) {
- return file_service_proto_rawDescGZIP(), []int{0}
+ return file_servicepb_service_proto_rawDescGZIP(), []int{0}
}
func (x *Item) GetKey() string {
@@ -95,7 +95,7 @@ type GetRequest struct {
func (x *GetRequest) Reset() {
*x = GetRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_service_proto_msgTypes[1]
+ mi := &file_servicepb_service_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -108,7 +108,7 @@ func (x *GetRequest) String() string {
func (*GetRequest) ProtoMessage() {}
func (x *GetRequest) ProtoReflect() protoreflect.Message {
- mi := &file_service_proto_msgTypes[1]
+ mi := &file_servicepb_service_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -121,7 +121,7 @@ func (x *GetRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetRequest.ProtoReflect.Descriptor instead.
func (*GetRequest) Descriptor() ([]byte, []int) {
- return file_service_proto_rawDescGZIP(), []int{1}
+ return file_servicepb_service_proto_rawDescGZIP(), []int{1}
}
func (x *GetRequest) GetTable() string {
@@ -149,7 +149,7 @@ type GetResponse struct {
func (x *GetResponse) Reset() {
*x = GetResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_service_proto_msgTypes[2]
+ mi := &file_servicepb_service_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -162,7 +162,7 @@ func (x *GetResponse) String() string {
func (*GetResponse) ProtoMessage() {}
func (x *GetResponse) ProtoReflect() protoreflect.Message {
- mi := &file_service_proto_msgTypes[2]
+ mi := &file_servicepb_service_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -175,7 +175,7 @@ func (x *GetResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetResponse.ProtoReflect.Descriptor instead.
func (*GetResponse) Descriptor() ([]byte, []int) {
- return file_service_proto_rawDescGZIP(), []int{2}
+ return file_servicepb_service_proto_rawDescGZIP(), []int{2}
}
func (x *GetResponse) GetItem() *Item {
@@ -197,7 +197,7 @@ type PutRequest struct {
func (x *PutRequest) Reset() {
*x = PutRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_service_proto_msgTypes[3]
+ mi := &file_servicepb_service_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -210,7 +210,7 @@ func (x *PutRequest) String() string {
func (*PutRequest) ProtoMessage() {}
func (x *PutRequest) ProtoReflect() protoreflect.Message {
- mi := &file_service_proto_msgTypes[3]
+ mi := &file_servicepb_service_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -223,7 +223,7 @@ func (x *PutRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PutRequest.ProtoReflect.Descriptor instead.
func (*PutRequest) Descriptor() ([]byte, []int) {
- return file_service_proto_rawDescGZIP(), []int{3}
+ return file_servicepb_service_proto_rawDescGZIP(), []int{3}
}
func (x *PutRequest) GetTable() string {
@@ -252,7 +252,7 @@ type EvictRequest struct {
func (x *EvictRequest) Reset() {
*x = EvictRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_service_proto_msgTypes[4]
+ mi := &file_servicepb_service_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -265,7 +265,7 @@ func (x *EvictRequest) String() string {
func (*EvictRequest) ProtoMessage() {}
func (x *EvictRequest) ProtoReflect() protoreflect.Message {
- mi := &file_service_proto_msgTypes[4]
+ mi := &file_servicepb_service_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -278,7 +278,7 @@ func (x *EvictRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use EvictRequest.ProtoReflect.Descriptor instead.
func (*EvictRequest) Descriptor() ([]byte, []int) {
- return file_service_proto_rawDescGZIP(), []int{4}
+ return file_servicepb_service_proto_rawDescGZIP(), []int{4}
}
func (x *EvictRequest) GetTable() string {
@@ -295,34 +295,34 @@ func (x *EvictRequest) GetKey() string {
return ""
}
-type ExecuteRequest struct {
+type CallRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"`
- Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
- Function string `protobuf:"bytes,3,opt,name=function,proto3" json:"function,omitempty"`
- Args []byte `protobuf:"bytes,4,opt,name=args,proto3" json:"args,omitempty"`
+ Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"`
+ Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+ Procedure string `protobuf:"bytes,3,opt,name=procedure,proto3" json:"procedure,omitempty"`
+ Args []byte `protobuf:"bytes,4,opt,name=args,proto3" json:"args,omitempty"`
}
-func (x *ExecuteRequest) Reset() {
- *x = ExecuteRequest{}
+func (x *CallRequest) Reset() {
+ *x = CallRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_service_proto_msgTypes[5]
+ mi := &file_servicepb_service_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *ExecuteRequest) String() string {
+func (x *CallRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ExecuteRequest) ProtoMessage() {}
+func (*CallRequest) ProtoMessage() {}
-func (x *ExecuteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_service_proto_msgTypes[5]
+func (x *CallRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_servicepb_service_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -333,40 +333,40 @@ func (x *ExecuteRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ExecuteRequest.ProtoReflect.Descriptor instead.
-func (*ExecuteRequest) Descriptor() ([]byte, []int) {
- return file_service_proto_rawDescGZIP(), []int{5}
+// Deprecated: Use CallRequest.ProtoReflect.Descriptor instead.
+func (*CallRequest) Descriptor() ([]byte, []int) {
+ return file_servicepb_service_proto_rawDescGZIP(), []int{5}
}
-func (x *ExecuteRequest) GetTable() string {
+func (x *CallRequest) GetTable() string {
if x != nil {
return x.Table
}
return ""
}
-func (x *ExecuteRequest) GetKey() string {
+func (x *CallRequest) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
-func (x *ExecuteRequest) GetFunction() string {
+func (x *CallRequest) GetProcedure() string {
if x != nil {
- return x.Function
+ return x.Procedure
}
return ""
}
-func (x *ExecuteRequest) GetArgs() []byte {
+func (x *CallRequest) GetArgs() []byte {
if x != nil {
return x.Args
}
return nil
}
-type ExecuteResponse struct {
+type CallResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
@@ -374,23 +374,23 @@ type ExecuteResponse struct {
Item *Item `protobuf:"bytes,1,opt,name=item,proto3" json:"item,omitempty"`
}
-func (x *ExecuteResponse) Reset() {
- *x = ExecuteResponse{}
+func (x *CallResponse) Reset() {
+ *x = CallResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_service_proto_msgTypes[6]
+ mi := &file_servicepb_service_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *ExecuteResponse) String() string {
+func (x *CallResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ExecuteResponse) ProtoMessage() {}
+func (*CallResponse) ProtoMessage() {}
-func (x *ExecuteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_service_proto_msgTypes[6]
+func (x *CallResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_servicepb_service_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -401,41 +401,41 @@ func (x *ExecuteResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ExecuteResponse.ProtoReflect.Descriptor instead.
-func (*ExecuteResponse) Descriptor() ([]byte, []int) {
- return file_service_proto_rawDescGZIP(), []int{6}
+// Deprecated: Use CallResponse.ProtoReflect.Descriptor instead.
+func (*CallResponse) Descriptor() ([]byte, []int) {
+ return file_servicepb_service_proto_rawDescGZIP(), []int{6}
}
-func (x *ExecuteResponse) GetItem() *Item {
+func (x *CallResponse) GetItem() *Item {
if x != nil {
return x.Item
}
return nil
}
-type EmptyResponse struct {
+type Empty struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
-func (x *EmptyResponse) Reset() {
- *x = EmptyResponse{}
+func (x *Empty) Reset() {
+ *x = Empty{}
if protoimpl.UnsafeEnabled {
- mi := &file_service_proto_msgTypes[7]
+ mi := &file_servicepb_service_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *EmptyResponse) String() string {
+func (x *Empty) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*EmptyResponse) ProtoMessage() {}
+func (*Empty) ProtoMessage() {}
-func (x *EmptyResponse) ProtoReflect() protoreflect.Message {
- mi := &file_service_proto_msgTypes[7]
+func (x *Empty) ProtoReflect() protoreflect.Message {
+ mi := &file_servicepb_service_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -446,116 +446,119 @@ func (x *EmptyResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use EmptyResponse.ProtoReflect.Descriptor instead.
-func (*EmptyResponse) Descriptor() ([]byte, []int) {
- return file_service_proto_rawDescGZIP(), []int{7}
-}
-
-var File_service_proto protoreflect.FileDescriptor
-
-var file_service_proto_rawDesc = []byte{
- 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
- 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x22, 0x46, 0x0a, 0x04, 0x49, 0x74,
- 0x65, 0x6d, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78,
- 0x70, 0x69, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x65, 0x78, 0x70, 0x69,
- 0x72, 0x65, 0x22, 0x34, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x32, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70,
- 0x62, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x22, 0x47, 0x0a, 0x0a,
- 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61,
+// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
+func (*Empty) Descriptor() ([]byte, []int) {
+ return file_servicepb_service_proto_rawDescGZIP(), []int{7}
+}
+
+var File_servicepb_service_proto protoreflect.FileDescriptor
+
+var file_servicepb_service_proto_rawDesc = []byte{
+ 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2f, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x70, 0x62, 0x22, 0x46, 0x0a, 0x04, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x22, 0x34, 0x0a, 0x0a,
+ 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61,
0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x12, 0x23, 0x0a, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f,
- 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x52,
- 0x04, 0x69, 0x74, 0x65, 0x6d, 0x22, 0x36, 0x0a, 0x0c, 0x45, 0x76, 0x69, 0x63, 0x74, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b,
- 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x68, 0x0a,
- 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x22, 0x32, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x23, 0x0a, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0f, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2e, 0x49, 0x74, 0x65, 0x6d,
+ 0x52, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x22, 0x47, 0x0a, 0x0a, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x04, 0x69, 0x74,
+ 0x65, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x70, 0x62, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x22,
+ 0x36, 0x0a, 0x0c, 0x45, 0x76, 0x69, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x0c, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x22, 0x36, 0x0a, 0x0f, 0x45, 0x78, 0x65, 0x63, 0x75,
- 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x04, 0x69, 0x74,
- 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x70, 0x62, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x22,
- 0x0f, 0x0a, 0x0d, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x32, 0xfd, 0x01, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x03,
- 0x47, 0x65, 0x74, 0x12, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2e,
- 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x12, 0x38, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x15, 0x2e, 0x73, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2e, 0x45,
- 0x6d, 0x70, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c,
- 0x0a, 0x05, 0x45, 0x76, 0x69, 0x63, 0x74, 0x12, 0x17, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x70, 0x62, 0x2e, 0x45, 0x76, 0x69, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x18, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70,
- 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x07,
- 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x12, 0x19, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x70, 0x62, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2e, 0x45,
- 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x42, 0x17, 0x5a, 0x15, 0x2e, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2f,
- 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x33,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x67, 0x0a, 0x0b, 0x43, 0x61, 0x6c, 0x6c, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x1c,
+ 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x64, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x64, 0x75, 0x72, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x61, 0x72, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73,
+ 0x22, 0x33, 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x23, 0x0a, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f,
+ 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x52,
+ 0x04, 0x69, 0x74, 0x65, 0x6d, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0x99,
+ 0x02, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x03, 0x47, 0x65,
+ 0x74, 0x12, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2e, 0x47, 0x65,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x30, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x10, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70,
+ 0x74, 0x79, 0x22, 0x00, 0x12, 0x34, 0x0a, 0x05, 0x45, 0x76, 0x69, 0x63, 0x74, 0x12, 0x17, 0x2e,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2e, 0x45, 0x76, 0x69, 0x63, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x04, 0x43, 0x61,
+ 0x6c, 0x6c, 0x12, 0x16, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2e, 0x43,
+ 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x12, 0x10, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62,
+ 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x10, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x42, 0x17, 0x5a, 0x15, 0x2e, 0x2f,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x62, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
- file_service_proto_rawDescOnce sync.Once
- file_service_proto_rawDescData = file_service_proto_rawDesc
+ file_servicepb_service_proto_rawDescOnce sync.Once
+ file_servicepb_service_proto_rawDescData = file_servicepb_service_proto_rawDesc
)
-func file_service_proto_rawDescGZIP() []byte {
- file_service_proto_rawDescOnce.Do(func() {
- file_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_service_proto_rawDescData)
+func file_servicepb_service_proto_rawDescGZIP() []byte {
+ file_servicepb_service_proto_rawDescOnce.Do(func() {
+ file_servicepb_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_servicepb_service_proto_rawDescData)
})
- return file_service_proto_rawDescData
+ return file_servicepb_service_proto_rawDescData
}
-var file_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
-var file_service_proto_goTypes = []interface{}{
- (*Item)(nil), // 0: servicepb.Item
- (*GetRequest)(nil), // 1: servicepb.GetRequest
- (*GetResponse)(nil), // 2: servicepb.GetResponse
- (*PutRequest)(nil), // 3: servicepb.PutRequest
- (*EvictRequest)(nil), // 4: servicepb.EvictRequest
- (*ExecuteRequest)(nil), // 5: servicepb.ExecuteRequest
- (*ExecuteResponse)(nil), // 6: servicepb.ExecuteResponse
- (*EmptyResponse)(nil), // 7: servicepb.EmptyResponse
+var file_servicepb_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_servicepb_service_proto_goTypes = []interface{}{
+ (*Item)(nil), // 0: servicepb.Item
+ (*GetRequest)(nil), // 1: servicepb.GetRequest
+ (*GetResponse)(nil), // 2: servicepb.GetResponse
+ (*PutRequest)(nil), // 3: servicepb.PutRequest
+ (*EvictRequest)(nil), // 4: servicepb.EvictRequest
+ (*CallRequest)(nil), // 5: servicepb.CallRequest
+ (*CallResponse)(nil), // 6: servicepb.CallResponse
+ (*Empty)(nil), // 7: servicepb.Empty
}
-var file_service_proto_depIdxs = []int32{
+var file_servicepb_service_proto_depIdxs = []int32{
0, // 0: servicepb.GetResponse.item:type_name -> servicepb.Item
0, // 1: servicepb.PutRequest.item:type_name -> servicepb.Item
- 0, // 2: servicepb.ExecuteResponse.item:type_name -> servicepb.Item
+ 0, // 2: servicepb.CallResponse.item:type_name -> servicepb.Item
1, // 3: servicepb.Service.Get:input_type -> servicepb.GetRequest
3, // 4: servicepb.Service.Put:input_type -> servicepb.PutRequest
4, // 5: servicepb.Service.Evict:input_type -> servicepb.EvictRequest
- 5, // 6: servicepb.Service.Execute:input_type -> servicepb.ExecuteRequest
- 2, // 7: servicepb.Service.Get:output_type -> servicepb.GetResponse
- 7, // 8: servicepb.Service.Put:output_type -> servicepb.EmptyResponse
- 7, // 9: servicepb.Service.Evict:output_type -> servicepb.EmptyResponse
- 6, // 10: servicepb.Service.Execute:output_type -> servicepb.ExecuteResponse
- 7, // [7:11] is the sub-list for method output_type
- 3, // [3:7] is the sub-list for method input_type
+ 5, // 6: servicepb.Service.Call:input_type -> servicepb.CallRequest
+ 7, // 7: servicepb.Service.HealthCheck:input_type -> servicepb.Empty
+ 2, // 8: servicepb.Service.Get:output_type -> servicepb.GetResponse
+ 7, // 9: servicepb.Service.Put:output_type -> servicepb.Empty
+ 7, // 10: servicepb.Service.Evict:output_type -> servicepb.Empty
+ 6, // 11: servicepb.Service.Call:output_type -> servicepb.CallResponse
+ 7, // 12: servicepb.Service.HealthCheck:output_type -> servicepb.Empty
+ 8, // [8:13] is the sub-list for method output_type
+ 3, // [3:8] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
-func init() { file_service_proto_init() }
-func file_service_proto_init() {
- if File_service_proto != nil {
+func init() { file_servicepb_service_proto_init() }
+func file_servicepb_service_proto_init() {
+ if File_servicepb_service_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
- file_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_servicepb_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Item); i {
case 0:
return &v.state
@@ -567,7 +570,7 @@ func file_service_proto_init() {
return nil
}
}
- file_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_servicepb_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetRequest); i {
case 0:
return &v.state
@@ -579,7 +582,7 @@ func file_service_proto_init() {
return nil
}
}
- file_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_servicepb_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetResponse); i {
case 0:
return &v.state
@@ -591,7 +594,7 @@ func file_service_proto_init() {
return nil
}
}
- file_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_servicepb_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PutRequest); i {
case 0:
return &v.state
@@ -603,7 +606,7 @@ func file_service_proto_init() {
return nil
}
}
- file_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_servicepb_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EvictRequest); i {
case 0:
return &v.state
@@ -615,8 +618,8 @@ func file_service_proto_init() {
return nil
}
}
- file_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ExecuteRequest); i {
+ file_servicepb_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CallRequest); i {
case 0:
return &v.state
case 1:
@@ -627,8 +630,8 @@ func file_service_proto_init() {
return nil
}
}
- file_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ExecuteResponse); i {
+ file_servicepb_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CallResponse); i {
case 0:
return &v.state
case 1:
@@ -639,8 +642,8 @@ func file_service_proto_init() {
return nil
}
}
- file_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EmptyResponse); i {
+ file_servicepb_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Empty); i {
case 0:
return &v.state
case 1:
@@ -656,18 +659,18 @@ func file_service_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_service_proto_rawDesc,
+ RawDescriptor: file_servicepb_service_proto_rawDesc,
NumEnums: 0,
NumMessages: 8,
NumExtensions: 0,
NumServices: 1,
},
- GoTypes: file_service_proto_goTypes,
- DependencyIndexes: file_service_proto_depIdxs,
- MessageInfos: file_service_proto_msgTypes,
+ GoTypes: file_servicepb_service_proto_goTypes,
+ DependencyIndexes: file_servicepb_service_proto_depIdxs,
+ MessageInfos: file_servicepb_service_proto_msgTypes,
}.Build()
- File_service_proto = out.File
- file_service_proto_rawDesc = nil
- file_service_proto_goTypes = nil
- file_service_proto_depIdxs = nil
+ File_servicepb_service_proto = out.File
+ file_servicepb_service_proto_rawDesc = nil
+ file_servicepb_service_proto_goTypes = nil
+ file_servicepb_service_proto_depIdxs = nil
}
diff --git a/servicepb/service.proto b/servicepb/service.proto
index a71c917..ce58b92 100644
--- a/servicepb/service.proto
+++ b/servicepb/service.proto
@@ -6,9 +6,10 @@ package servicepb;
service Service{
rpc Get(GetRequest) returns (GetResponse) {}
- rpc Put(PutRequest) returns (EmptyResponse) {}
- rpc Evict(EvictRequest) returns (EmptyResponse) {}
- rpc Execute(ExecuteRequest) returns (ExecuteResponse) {}
+ rpc Put(PutRequest) returns (Empty) {}
+ rpc Evict(EvictRequest) returns (Empty) {}
+ rpc Call(CallRequest) returns (CallResponse) {}
+ rpc HealthCheck(Empty) returns (Empty) {}
}
message Item{
@@ -36,16 +37,16 @@ message EvictRequest{
string key = 2;
}
-message ExecuteRequest{
+message CallRequest{
string table = 1;
string key = 2;
- string function = 3;
+ string procedure = 3;
bytes args = 4;
}
-message ExecuteResponse{
+message CallResponse{
Item item = 1;
}
-message EmptyResponse{
+message Empty{
}
diff --git a/servicepb/service_grpc.pb.go b/servicepb/service_grpc.pb.go
index faf9d8a..70af797 100644
--- a/servicepb/service_grpc.pb.go
+++ b/servicepb/service_grpc.pb.go
@@ -1,8 +1,8 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.2.0
-// - protoc v3.14.0
-// source: service.proto
+// - protoc-gen-go-grpc v1.3.0
+// - protoc v4.24.4
+// source: servicepb/service.proto
package servicepb
@@ -18,14 +18,23 @@ import (
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
+const (
+ Service_Get_FullMethodName = "/servicepb.Service/Get"
+ Service_Put_FullMethodName = "/servicepb.Service/Put"
+ Service_Evict_FullMethodName = "/servicepb.Service/Evict"
+ Service_Call_FullMethodName = "/servicepb.Service/Call"
+ Service_HealthCheck_FullMethodName = "/servicepb.Service/HealthCheck"
+)
+
// ServiceClient is the client API for Service service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type ServiceClient interface {
Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error)
- Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*EmptyResponse, error)
- Evict(ctx context.Context, in *EvictRequest, opts ...grpc.CallOption) (*EmptyResponse, error)
- Execute(ctx context.Context, in *ExecuteRequest, opts ...grpc.CallOption) (*ExecuteResponse, error)
+ Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*Empty, error)
+ Evict(ctx context.Context, in *EvictRequest, opts ...grpc.CallOption) (*Empty, error)
+ Call(ctx context.Context, in *CallRequest, opts ...grpc.CallOption) (*CallResponse, error)
+ HealthCheck(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error)
}
type serviceClient struct {
@@ -38,34 +47,43 @@ func NewServiceClient(cc grpc.ClientConnInterface) ServiceClient {
func (c *serviceClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) {
out := new(GetResponse)
- err := c.cc.Invoke(ctx, "/servicepb.Service/Get", in, out, opts...)
+ err := c.cc.Invoke(ctx, Service_Get_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*Empty, error) {
+ out := new(Empty)
+ err := c.cc.Invoke(ctx, Service_Put_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *serviceClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*EmptyResponse, error) {
- out := new(EmptyResponse)
- err := c.cc.Invoke(ctx, "/servicepb.Service/Put", in, out, opts...)
+func (c *serviceClient) Evict(ctx context.Context, in *EvictRequest, opts ...grpc.CallOption) (*Empty, error) {
+ out := new(Empty)
+ err := c.cc.Invoke(ctx, Service_Evict_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *serviceClient) Evict(ctx context.Context, in *EvictRequest, opts ...grpc.CallOption) (*EmptyResponse, error) {
- out := new(EmptyResponse)
- err := c.cc.Invoke(ctx, "/servicepb.Service/Evict", in, out, opts...)
+func (c *serviceClient) Call(ctx context.Context, in *CallRequest, opts ...grpc.CallOption) (*CallResponse, error) {
+ out := new(CallResponse)
+ err := c.cc.Invoke(ctx, Service_Call_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *serviceClient) Execute(ctx context.Context, in *ExecuteRequest, opts ...grpc.CallOption) (*ExecuteResponse, error) {
- out := new(ExecuteResponse)
- err := c.cc.Invoke(ctx, "/servicepb.Service/Execute", in, out, opts...)
+func (c *serviceClient) HealthCheck(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) {
+ out := new(Empty)
+ err := c.cc.Invoke(ctx, Service_HealthCheck_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -77,9 +95,10 @@ func (c *serviceClient) Execute(ctx context.Context, in *ExecuteRequest, opts ..
// for forward compatibility
type ServiceServer interface {
Get(context.Context, *GetRequest) (*GetResponse, error)
- Put(context.Context, *PutRequest) (*EmptyResponse, error)
- Evict(context.Context, *EvictRequest) (*EmptyResponse, error)
- Execute(context.Context, *ExecuteRequest) (*ExecuteResponse, error)
+ Put(context.Context, *PutRequest) (*Empty, error)
+ Evict(context.Context, *EvictRequest) (*Empty, error)
+ Call(context.Context, *CallRequest) (*CallResponse, error)
+ HealthCheck(context.Context, *Empty) (*Empty, error)
mustEmbedUnimplementedServiceServer()
}
@@ -90,14 +109,17 @@ type UnimplementedServiceServer struct {
func (UnimplementedServiceServer) Get(context.Context, *GetRequest) (*GetResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Get not implemented")
}
-func (UnimplementedServiceServer) Put(context.Context, *PutRequest) (*EmptyResponse, error) {
+func (UnimplementedServiceServer) Put(context.Context, *PutRequest) (*Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method Put not implemented")
}
-func (UnimplementedServiceServer) Evict(context.Context, *EvictRequest) (*EmptyResponse, error) {
+func (UnimplementedServiceServer) Evict(context.Context, *EvictRequest) (*Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method Evict not implemented")
}
-func (UnimplementedServiceServer) Execute(context.Context, *ExecuteRequest) (*ExecuteResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Execute not implemented")
+func (UnimplementedServiceServer) Call(context.Context, *CallRequest) (*CallResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Call not implemented")
+}
+func (UnimplementedServiceServer) HealthCheck(context.Context, *Empty) (*Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method HealthCheck not implemented")
}
func (UnimplementedServiceServer) mustEmbedUnimplementedServiceServer() {}
@@ -122,7 +144,7 @@ func _Service_Get_Handler(srv interface{}, ctx context.Context, dec func(interfa
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/servicepb.Service/Get",
+ FullMethod: Service_Get_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ServiceServer).Get(ctx, req.(*GetRequest))
@@ -140,7 +162,7 @@ func _Service_Put_Handler(srv interface{}, ctx context.Context, dec func(interfa
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/servicepb.Service/Put",
+ FullMethod: Service_Put_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ServiceServer).Put(ctx, req.(*PutRequest))
@@ -158,7 +180,7 @@ func _Service_Evict_Handler(srv interface{}, ctx context.Context, dec func(inter
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/servicepb.Service/Evict",
+ FullMethod: Service_Evict_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ServiceServer).Evict(ctx, req.(*EvictRequest))
@@ -166,20 +188,38 @@ func _Service_Evict_Handler(srv interface{}, ctx context.Context, dec func(inter
return interceptor(ctx, in, info, handler)
}
-func _Service_Execute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ExecuteRequest)
+func _Service_Call_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CallRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceServer).Call(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Service_Call_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceServer).Call(ctx, req.(*CallRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Service_HealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Empty)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
- return srv.(ServiceServer).Execute(ctx, in)
+ return srv.(ServiceServer).HealthCheck(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/servicepb.Service/Execute",
+ FullMethod: Service_HealthCheck_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ServiceServer).Execute(ctx, req.(*ExecuteRequest))
+ return srv.(ServiceServer).HealthCheck(ctx, req.(*Empty))
}
return interceptor(ctx, in, info, handler)
}
@@ -204,10 +244,14 @@ var Service_ServiceDesc = grpc.ServiceDesc{
Handler: _Service_Evict_Handler,
},
{
- MethodName: "Execute",
- Handler: _Service_Execute_Handler,
+ MethodName: "Call",
+ Handler: _Service_Call_Handler,
+ },
+ {
+ MethodName: "HealthCheck",
+ Handler: _Service_HealthCheck_Handler,
},
},
Streams: []grpc.StreamDesc{},
- Metadata: "service.proto",
+ Metadata: "servicepb/service.proto",
}
diff --git a/store.go b/store.go
index 297e788..7460c8a 100644
--- a/store.go
+++ b/store.go
@@ -1,6 +1,8 @@
package nitecache
import (
+ "context"
+ "github.com/MysteriousPotato/nitecache/inmem"
"time"
"github.com/MysteriousPotato/go-lockable"
@@ -9,159 +11,147 @@ import (
// Getter Type used for auto cache filling
type Getter[T any] func(key string) (T, time.Duration, error)
-type storeOpts[T any] struct {
- getter Getter[T]
- evictionPolicy EvictionPolicy
- codec Codec[T]
+type (
+ storeOpts[T any] struct {
+ getter Getter[T]
+ storage Storage
+ codec Codec[T]
+ }
+ store[T any] struct {
+ lock lockable.Lockable[string]
+ getter Getter[T]
+ internal Storage
+ codec Codec[T]
+ }
+ item struct {
+ Expire time.Time
+ Value []byte
+ Key string
+ }
+)
+
+type Storage interface {
+ Put(key string, value item, opt ...inmem.Opt) bool
+ Evict(key string) bool
+ Get(key string, opt ...inmem.Opt) (item, bool)
}
-type store[T any] struct {
- items lockable.Map[string, item]
- getter Getter[T]
- evictionPolicy EvictionPolicy
- closeCh chan bool
- codec Codec[T]
+func LFU(threshold int) Storage {
+ return inmem.NewLFU[string, item](threshold)
}
-type item struct {
- Expire time.Time
- Value []byte
- Key string
+func LRU(threshold int) Storage {
+ return inmem.NewLRU[string, item](threshold)
}
func newStore[T any](opts storeOpts[T]) *store[T] {
- if opts.evictionPolicy == nil {
- opts.evictionPolicy = NoEvictionPolicy{}
- }
if opts.codec == nil {
- opts.codec = &jsonCodec[T]{}
- }
- s := store[T]{
- items: lockable.NewMap[string, item](),
- evictionPolicy: opts.evictionPolicy,
- getter: opts.getter,
- codec: opts.codec,
- closeCh: make(chan bool),
- }
- s.evictionPolicy.setEvictFn(s.items.Delete)
-
- go func() {
- ticker := time.NewTicker(time.Second)
- for range ticker.C {
- select {
- case <-s.closeCh:
- return
- default:
- s.evictionPolicy.apply()
- }
+ var v T
+ anyV := any(v)
+ if _, isByteSlice := anyV.([]byte); isByteSlice {
+ opts.codec = any(StringCodec[[]byte]{}).(Codec[T])
+ } else if _, isString := anyV.(string); isString {
+ opts.codec = any(StringCodec[string]{}).(Codec[T])
+ } else {
+ opts.codec = &JsonCodec[T]{}
}
- }()
-
- return &s
-}
+ }
-func (s store[T]) newItem(key string, value T, ttl time.Duration) (item, error) {
- var expire time.Time
- if ttl != 0 {
- expire = time.Now().Add(ttl)
+ var storage Storage
+ if opts.storage == nil {
+ storage = inmem.NewCache[string, item]()
+ } else {
+ storage = opts.storage
}
- b, err := s.codec.Encode(value)
- if err != nil {
- return item{}, err
+ s := store[T]{
+ lock: lockable.New[string](),
+ getter: opts.getter,
+ codec: opts.codec,
+ internal: storage,
}
- return item{
- Expire: expire,
- Value: b,
- Key: key,
- }, nil
+ return &s
}
func (s store[T]) get(key string) (item, bool, error) {
var unlocked bool
- s.items.RLockKey(key)
+ s.lock.RLockKey(key)
defer func() {
if !unlocked {
- s.items.RUnlockKey(key)
+ s.lock.RUnlockKey(key)
}
}()
- itm, hit := s.items.Load(key)
+ itm, hit := s.internal.Get(key)
if s.getter != nil && (!hit || itm.isExpired()) {
- s.items.RUnlockKey(key)
+ s.lock.RUnlockKey(key)
unlocked = true
- s.items.LockKey(key)
- defer s.items.UnlockKey(key)
+ s.lock.LockKey(key)
+ defer s.lock.UnlockKey(key)
- item, err := s.unsafeCacheAside(key)
+ itm, err := s.unsafeCacheAside(key)
if err != nil {
- return item, false, err
+ return itm, false, err
}
- return item, false, nil
+ return itm, false, nil
}
-
- s.evictionPolicy.push(key)
-
return itm, hit, nil
}
func (s store[T]) put(itm item) {
- s.items.LockKey(itm.Key)
- defer s.items.UnlockKey(itm.Key)
+ s.lock.LockKey(itm.Key)
+ defer s.lock.UnlockKey(itm.Key)
- s.items.Store(itm.Key, itm)
- s.evictionPolicy.push(itm.Key)
+ s.internal.Put(itm.Key, itm)
}
func (s store[T]) evict(key string) {
- s.items.LockKey(key)
- defer s.items.UnlockKey(key)
+ s.lock.LockKey(key)
+ defer s.lock.UnlockKey(key)
- s.items.Delete(key)
- s.evictionPolicy.evict(key)
+ s.internal.Evict(key)
}
-func (s store[T]) update(key string, fn func(value T) (T, time.Duration, error)) (item, bool, error) {
- s.items.LockKey(key)
- defer s.items.UnlockKey(key)
-
- itm, hit := s.items.Load(key)
- if s.getter != nil && (!hit || itm.isExpired()) {
+func (s store[T]) update(
+ ctx context.Context,
+ key string,
+ args []byte,
+ fn func(context.Context, T, []byte) (T, time.Duration, error),
+) (item, error) {
+ s.lock.LockKey(key)
+ defer s.lock.UnlockKey(key)
+
+ oldItem, ok := s.internal.Get(key, inmem.SkipInc(true))
+ var skipInc bool
+ if !ok && s.getter != nil {
+ skipInc = true
var err error
- itm, err = s.unsafeCacheAside(key)
- if err != nil {
- return item{}, false, err
+ if oldItem, err = s.unsafeCacheAside(key); err != nil {
+ return item{}, err
}
}
- v, err := s.decode(itm)
+ v, err := s.decode(oldItem)
if err != nil {
- return item{}, hit, err
+ return item{}, err
}
- newVal, ttl, err := fn(v)
+ newValue, ttl, err := fn(ctx, v, args)
if err != nil {
- return item{}, hit, err
+ return item{}, err
}
- b, err := s.codec.Encode(newVal)
+ newItem, err := s.newItem(key, newValue, ttl)
if err != nil {
- return item{}, hit, err
- }
-
- newItem := item{
- Value: b,
- Expire: time.Now().Add(ttl),
- Key: key,
+ return item{}, err
}
- s.items.Store(key, newItem)
- s.evictionPolicy.push(key)
+ s.internal.Put(newItem.Key, newItem, inmem.SkipInc(skipInc))
- return newItem, hit, nil
+ return newItem, nil
}
// Make sure to lock the key before using this
@@ -176,13 +166,29 @@ func (s store[T]) unsafeCacheAside(key string) (item, error) {
return item{}, err
}
- s.items.Store(key, newItem)
-
- s.evictionPolicy.push(key)
+ s.internal.Put(key, newItem, inmem.SkipInc(true))
return newItem, nil
}
+func (s store[T]) newItem(key string, value T, ttl time.Duration) (item, error) {
+ var expire time.Time
+ if ttl != 0 {
+ expire = time.Now().Add(ttl)
+ }
+
+ b, err := s.codec.Encode(value)
+ if err != nil {
+ return item{}, err
+ }
+
+ return item{
+ Expire: expire,
+ Value: b,
+ Key: key,
+ }, nil
+}
+
func (s store[T]) decode(itm item) (T, error) {
var v T
if len(itm.Value) == 0 {
@@ -200,10 +206,6 @@ func (s store[T]) getEmptyValue() T {
return v
}
-func (s store[T]) close() {
- s.closeCh <- true
-}
-
func (i item) isExpired() bool {
return !i.Expire.IsZero() && i.Expire.Before(time.Now())
}
diff --git a/store_test.go b/store_test.go
index 3a4530a..1b45dd0 100644
--- a/store_test.go
+++ b/store_test.go
@@ -1,19 +1,19 @@
package nitecache
import (
+ "context"
"reflect"
+ "strings"
"testing"
"time"
)
func TestStore(t *testing.T) {
- s := newStore(
- storeOpts[string]{
- getter: func(key string) (string, time.Duration, error) {
- return "empty", 0, nil
- },
+ s := newStore(storeOpts[string]{
+ getter: func(_ string) (string, time.Duration, error) {
+ return "empty", 0, nil
},
- )
+ })
ops := []struct {
op string
@@ -30,7 +30,7 @@ func TestStore(t *testing.T) {
{op: "update", key: "1"},
{op: "update", key: "2"},
}
- expected := []string{"empty", "1", "empty", "1", "11", "emptyempty"}
+ expected := []string{"empty", "1", "empty", "1", "1 1", "empty empty"}
var got []string
for _, op := range ops {
@@ -58,9 +58,12 @@ func TestStore(t *testing.T) {
s.evict(op.key)
}
if op.op == "update" {
- item, _, err := s.update(
- op.key, func(value string) (string, time.Duration, error) {
- return value + value, 0, nil
+ item, err := s.update(
+ context.Background(),
+ op.key,
+ nil,
+ func(ctx context.Context, value string, args []byte) (string, time.Duration, error) {
+ return strings.Join([]string{value, value}, " "), 0, nil
},
)
if err != nil {
@@ -80,3 +83,39 @@ func TestStore(t *testing.T) {
t.Fatalf("expected: %v\ngot:%v", expected, got)
}
}
+
+func TestAutoCodecDetection(t *testing.T) {
+ stringStore := newStore(storeOpts[string]{})
+
+ str := "test"
+ encodedStr, err := stringStore.codec.Encode(str)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(string(encodedStr), str) {
+ t.Fatalf("expected %s, got %s", str, string(encodedStr))
+ }
+
+ bytesStore := newStore(storeOpts[[]byte]{})
+ bytes := []byte("test")
+ encodedBytes, err := bytesStore.codec.Encode(bytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(encodedBytes, bytes) {
+ t.Fatalf("expected %s, got %s", string(bytes), string(encodedBytes))
+ }
+
+ mapsStore := newStore(storeOpts[map[string]string]{})
+ expectedMap := []byte(`{"key":"value"}`)
+ encodedMap, err := mapsStore.codec.Encode(map[string]string{"key": "value"})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(encodedMap, expectedMap) {
+ t.Fatalf("expected %s, got %s", string(expectedMap), string(encodedMap))
+ }
+}
diff --git a/table.go b/table.go
index 9471c46..80b81d1 100644
--- a/table.go
+++ b/table.go
@@ -4,7 +4,6 @@ import (
"context"
"errors"
"fmt"
- "sync"
"time"
"github.com/MysteriousPotato/nitecache/servicepb"
@@ -12,37 +11,40 @@ import (
)
var (
- ErrFunctionNotFound = errors.New("table function not found")
- ErrKeyNotFound = errors.New("key not found")
+ ErrRPCNotFound = errors.New("RPC not found")
+ ErrKeyNotFound = errors.New("key not found")
)
-// Function type used for registering functions through [TableBuilder.WithFunction]
-type Function[T any] func(v T, args []byte) (T, time.Duration, error)
+// Procedure defines the type used for registering RPCs through [TableBuilder.WithProcedure].
+type Procedure[T any] func(ctx context.Context, v T, args []byte) (T, time.Duration, error)
type Table[T any] struct {
- name string
- store *store[T]
- hotStore *store[T]
- hotCacheEnabled bool
- getSF *singleflight.Group
- evictSF *singleflight.Group
- functions map[string]Function[T]
- functionsMu *sync.RWMutex
- metrics *Metrics
- cache *Cache
+ name string
+ store *store[T]
+ hotStore *store[T]
+ getSF *singleflight.Group
+ evictSF *singleflight.Group
+ procedures map[string]Procedure[T]
+ metrics *metrics
+ cache *Cache
}
-func (t Table[T]) Get(ctx context.Context, key string) (T, error) {
- owner, err := t.cache.ring.getOwner(key)
+func (t *Table[T]) Get(ctx context.Context, key string) (T, error) {
+ if t.isZero() {
+ var empty T
+ return empty, ErrCacheDestroyed
+ }
+
+ ownerID, err := t.cache.ring.GetOwner(key)
if err != nil {
return t.store.getEmptyValue(), err
}
var itm item
- if owner.ID == t.cache.selfID {
+ if ownerID == t.cache.self.ID {
itm, err = t.getLocally(key)
} else {
- client, err := t.cache.getClient(owner)
+ client, err := t.cache.getClient(ownerID)
if err != nil {
return t.store.getEmptyValue(), err
}
@@ -68,8 +70,12 @@ func (t Table[T]) Get(ctx context.Context, key string) (T, error) {
return v, nil
}
-func (t Table[T]) Put(ctx context.Context, key string, value T, ttl time.Duration) error {
- m, err := t.cache.ring.getOwner(key)
+func (t *Table[T]) Put(ctx context.Context, key string, value T, ttl time.Duration) error {
+ if t.isZero() {
+ return ErrCacheDestroyed
+ }
+
+ ownerID, err := t.cache.ring.GetOwner(key)
if err != nil {
return err
}
@@ -79,10 +85,12 @@ func (t Table[T]) Put(ctx context.Context, key string, value T, ttl time.Duratio
return err
}
- if m.ID == t.cache.selfID {
- t.putLocally(itm)
+ if ownerID == t.cache.self.ID {
+ if err := t.putLocally(itm); err != nil {
+ return err
+ }
} else {
- client, err := t.cache.getClient(m)
+ client, err := t.cache.getClient(ownerID)
if err != nil {
return err
}
@@ -95,16 +103,22 @@ func (t Table[T]) Put(ctx context.Context, key string, value T, ttl time.Duratio
return nil
}
-func (t Table[T]) Evict(ctx context.Context, key string) error {
- m, err := t.cache.ring.getOwner(key)
+func (t *Table[T]) Evict(ctx context.Context, key string) error {
+ if t.isZero() {
+ return ErrCacheDestroyed
+ }
+
+ ownerID, err := t.cache.ring.GetOwner(key)
if err != nil {
return err
}
- if m.ID == t.cache.selfID {
- t.evictLocally(key)
+ if ownerID == t.cache.self.ID {
+ if err := t.evictLocally(key); err != nil {
+ return err
+ }
} else {
- client, err := t.cache.getClient(m)
+ client, err := t.cache.getClient(ownerID)
if err != nil {
return err
}
@@ -116,23 +130,30 @@ func (t Table[T]) Evict(ctx context.Context, key string) error {
return nil
}
-// Execute Executes a function previously registered through [TableBuilder.WithFunction] to atomically update the value for a given key
-func (t Table[T]) Execute(ctx context.Context, key, function string, args []byte) (T, error) {
- owner, err := t.cache.ring.getOwner(key)
+// Call calls an RPC previously registered through [TableBuilder.WithProcedure] on the owner node to update the value for the given key.
+//
+// Call acquires a lock exclusive to the given key until the RPC has finished executing.
+func (t *Table[T]) Call(ctx context.Context, key, function string, args []byte) (T, error) {
+ if t.isZero() {
+ var empty T
+ return empty, ErrCacheDestroyed
+ }
+
+ ownerID, err := t.cache.ring.GetOwner(key)
if err != nil {
return t.store.getEmptyValue(), err
}
var itm item
- if owner.ID == t.cache.selfID {
- itm, err = t.executeLocally(key, function, args)
+ if ownerID == t.cache.self.ID {
+ itm, err = t.callLocally(ctx, key, function, args)
} else {
- client, err := t.cache.getClient(owner)
+ client, err := t.cache.getClient(ownerID)
if err != nil {
return t.store.getEmptyValue(), err
}
- itm, err = t.executeFromPeer(ctx, key, function, args, client)
+ itm, err = t.callFromPeer(ctx, key, function, args, client)
if err != nil {
return t.store.getEmptyValue(), err
}
@@ -150,15 +171,21 @@ func (t Table[T]) Execute(ctx context.Context, key, function string, args []byte
}
// GetHot looks up local cache if the current node is the owner, otherwise looks up hot cache.
-// GetHot does not call the getter to autofill cache, it is not token into account into metrics.
-func (t Table[T]) GetHot(key string) (T, error) {
- owner, err := t.cache.ring.getOwner(key)
+//
+// GetHot does not call the getter to autofill cache, does not increment metrics and does not affect the main cache's LFU/LRU (if used).
+func (t *Table[T]) GetHot(key string) (T, error) {
+ if t.isZero() {
+ var empty T
+ return empty, ErrCacheDestroyed
+ }
+
+ ownerID, err := t.cache.ring.GetOwner(key)
if err != nil {
return t.store.getEmptyValue(), err
}
var itm item
- if owner.ID == t.cache.selfID {
+ if ownerID == t.cache.self.ID {
itm, _, err = t.store.get(key)
if err != nil {
return t.store.getEmptyValue(), err
@@ -182,190 +209,160 @@ func (t Table[T]) GetHot(key string) (T, error) {
return v, nil
}
-// GetMetrics Can safely be called from a goroutine, returns a copy of the current table Metrics.
-// For global cache Metrics, refer to [GetMetrics]
-func (t Table[T]) GetMetrics() Metrics {
- return t.metrics.getCopy()
+// GetMetrics returns a copy of the current table Metrics. For global cache Metrics, refer to [Cache.GetMetrics]
+func (t *Table[T]) GetMetrics() (Metrics, error) {
+ if t.isZero() {
+ return Metrics{}, ErrCacheDestroyed
+ }
+ return t.metrics.getCopy(), nil
}
-func (t Table[T]) getLocally(key string) (item, error) {
+func (t *Table[T]) getLocally(key string) (item, error) {
incGet(t.metrics, t.cache.metrics)
- sfRes, err, _ := t.getSF.Do(
- key, func() (any, error) {
- i, hit, err := t.store.get(key)
- if !hit {
- incMiss(t.metrics, t.cache.metrics)
- }
- return i, err
- },
- )
+ sfRes, err, _ := t.getSF.Do(key, func() (any, error) {
+ i, hit, err := t.store.get(key)
+ if !hit {
+ incMiss(t.metrics, t.cache.metrics)
+ }
+ return i, err
+ })
return sfRes.(item), err
}
-func (t Table[T]) putLocally(itm item) {
+func (t *Table[T]) putLocally(itm item) error {
incPut(t.metrics, t.cache.metrics)
t.store.put(itm)
+ return nil
}
-func (t Table[T]) evictLocally(key string) {
+func (t *Table[T]) evictLocally(key string) error {
incEvict(t.metrics, t.cache.metrics)
- _, _, _ = t.evictSF.Do(
- key, func() (any, error) {
- t.store.evict(key)
- return nil, nil
- },
- )
+ _, _, _ = t.evictSF.Do(key, func() (any, error) {
+ t.store.evict(key)
+ return nil, nil
+ })
+ return nil
}
-func (t Table[T]) executeLocally(key, function string, args []byte) (item, error) {
- incExecute(function, t.metrics, t.cache.metrics)
- fn, ok := t.functions[function]
- if !ok {
- return item{}, ErrFunctionNotFound
- }
-
- t.store.items.LockKey(key)
- defer t.store.items.UnlockKey(key)
+func (t *Table[T]) callLocally(ctx context.Context, key, procedure string, args []byte) (item, error) {
+ incCalls(procedure, t.metrics, t.cache.metrics)
- oldItem, _ := t.store.items.Load(key)
- v, err := t.store.decode(oldItem)
- if err != nil {
- return item{}, err
+ // Can be access concurrently since no write is possible at this point
+ fn, ok := t.procedures[procedure]
+ if !ok {
+ return item{}, ErrRPCNotFound
}
- newValue, ttl, err := fn(v, args)
- if err != nil {
- return item{}, err
- }
+ return t.store.update(ctx, key, args, fn)
+}
- newItem, err := t.store.newItem(key, newValue, ttl)
- if err != nil {
- return item{}, err
- }
+func (t *Table[T]) getFromPeer(ctx context.Context, key string, owner *client) (item, error) {
+ sfRes, err, _ := t.getSF.Do(key, func() (any, error) {
+ res, err := owner.Get(ctx, &servicepb.GetRequest{
+ Table: t.name,
+ Key: key,
+ })
+ if err != nil {
+ return item{}, err
+ }
- t.store.items.Store(newItem.Key, newItem)
+ itm := item{
+ Expire: time.UnixMicro(res.Item.Expire),
+ Value: res.Item.Value,
+ Key: key,
+ }
- return newItem, err
-}
+ if t.hotStore != nil {
+ t.hotStore.put(itm)
+ }
-func (t Table[T]) getFromPeer(ctx context.Context, key string, owner client) (item, error) {
- sfRes, err, _ := t.getSF.Do(
- key, func() (any, error) {
- out, err := owner.Get(
- ctx, &servicepb.GetRequest{
- Table: t.name,
- Key: key,
- },
- )
- if err != nil {
- return item{}, err
- }
-
- itm := item{
- Expire: time.UnixMicro(out.Item.Expire),
- Value: out.Item.Value,
- Key: out.Item.Key,
- }
-
- if t.hotCacheEnabled {
- t.hotStore.put(itm)
- }
-
- return itm, nil
- },
- )
+ return itm, nil
+ })
return sfRes.(item), err
}
-func (t Table[T]) putFromPeer(ctx context.Context, itm item, owner client) error {
- if _, err := owner.Put(
- ctx, &servicepb.PutRequest{
- Table: t.name,
- Item: &servicepb.Item{
- Expire: itm.Expire.UnixMicro(),
- Value: itm.Value,
- Key: itm.Key,
- },
+func (t *Table[T]) putFromPeer(ctx context.Context, itm item, owner *client) error {
+ if _, err := owner.Put(ctx, &servicepb.PutRequest{
+ Table: t.name,
+ Item: &servicepb.Item{
+ Expire: itm.Expire.UnixMicro(),
+ Value: itm.Value,
+ Key: itm.Key,
},
- ); err != nil {
+ }); err != nil {
return err
}
- if t.hotCacheEnabled {
+ if t.hotStore != nil {
t.hotStore.put(itm)
}
return nil
}
-func (t Table[T]) evictFromPeer(ctx context.Context, key string, owner client) error {
- _, err, _ := t.evictSF.Do(
- key, func() (any, error) {
- if _, err := owner.Evict(
- ctx, &servicepb.EvictRequest{
- Table: t.name,
- Key: key,
- },
- ); err != nil {
- return nil, err
- }
-
- if t.hotCacheEnabled {
- t.hotStore.evict(key)
- }
-
- return nil, nil
- },
- )
+func (t *Table[T]) evictFromPeer(ctx context.Context, key string, owner *client) error {
+ _, err, _ := t.evictSF.Do(key, func() (any, error) {
+ if _, err := owner.Evict(ctx, &servicepb.EvictRequest{
+ Table: t.name,
+ Key: key,
+ }); err != nil {
+ return nil, err
+ }
+
+ if t.hotStore != nil {
+ t.hotStore.evict(key)
+ }
+
+ return nil, nil
+ })
return err
}
-func (t Table[T]) executeFromPeer(
+func (t *Table[T]) callFromPeer(
ctx context.Context,
- key, function string,
+ key, procedure string,
args []byte,
- owner client,
+ owner *client,
) (item, error) {
- out, err := owner.Execute(
- ctx, &servicepb.ExecuteRequest{
- Table: t.name,
- Key: key,
- Function: function,
- Args: args,
- },
- )
+ res, err := owner.Call(ctx, &servicepb.CallRequest{
+ Table: t.name,
+ Key: key,
+ Procedure: procedure,
+ Args: args,
+ })
if err != nil {
return item{}, err
}
itm := item{
- Expire: time.UnixMicro(out.Item.Expire),
- Value: out.Item.Value,
+ Expire: time.UnixMicro(res.Item.Expire),
+ Value: res.Item.Value,
Key: key,
}
- if t.hotCacheEnabled {
+ if t.hotStore != nil {
t.hotStore.put(itm)
}
return itm, nil
}
-func (t Table[T]) getFromHotCache(key string) (item, error) {
- if !t.hotCacheEnabled {
+func (t *Table[T]) getFromHotCache(key string) (item, error) {
+ if t.hotStore == nil {
return item{}, fmt.Errorf("hot cache not enabled")
}
itm, _, _ := t.hotStore.get(key)
return itm, nil
}
-// TearDown Call this whenever a table is not needed anymore
-//
-// It will properly free the [Table] from [Cache] and close all goroutines
-func (t Table[T]) TearDown() {
- t.store.closeCh <- true
- t.hotStore.closeCh <- true
- delete(t.cache.tables, t.name)
+func (t *Table[T]) tearDown() {
+ if t != nil {
+ *t = Table[T]{}
+ }
+}
+
+func (t *Table[T]) isZero() bool {
+ return t == nil || t.cache == nil
}
diff --git a/table_builder.go b/table_builder.go
index f2fd96a..1eff379 100644
--- a/table_builder.go
+++ b/table_builder.go
@@ -2,85 +2,90 @@ package nitecache
import (
"golang.org/x/sync/singleflight"
- "sync"
)
type TableBuilder[T any] struct {
- name string
- evictionPolicy EvictionPolicy
- hotCacheEnabled bool
- functions map[string]Function[T]
- getter Getter[T]
- codec Codec[T]
+ name string
+ storage Storage
+ hotStorage Storage
+ procedures map[string]Procedure[T]
+ getter Getter[T]
+ codec Codec[T]
}
func NewTable[T any](name string) *TableBuilder[T] {
return &TableBuilder[T]{
- name: name,
- functions: map[string]Function[T]{},
+ name: name,
+ procedures: map[string]Procedure[T]{},
}
}
-// WithGetter Adds a callback function used for auto cache filling
+// WithGetter sets the auto cache filling function.
func (tb *TableBuilder[T]) WithGetter(fn Getter[T]) *TableBuilder[T] {
tb.getter = fn
return tb
}
-// WithEvictionPolicy see [EvictionPolicy]
-func (tb *TableBuilder[T]) WithEvictionPolicy(policy EvictionPolicy) *TableBuilder[T] {
- tb.evictionPolicy = policy
+// WithStorage specifies how to store values.
+//
+// Must be one of [LFU], [LRU] or nil.
+//
+// if nil, the table will always grow unless keys are explicitly evicted.
+func (tb *TableBuilder[T]) WithStorage(storage Storage) *TableBuilder[T] {
+ tb.storage = storage
return tb
}
-// WithFunction Registers a function that can be called using [Table.Execute]
-func (tb *TableBuilder[T]) WithFunction(name string, function Function[T]) *TableBuilder[T] {
- tb.functions[name] = function
+// WithProcedure Registers an RPC that can be called using [Table.Call].
+func (tb *TableBuilder[T]) WithProcedure(name string, function Procedure[T]) *TableBuilder[T] {
+ tb.procedures[name] = function
return tb
}
-// WithHotCache If hot cache is enable, a new cache will be populated with values gotten from other peers that can be accessed only through [Table.GetHot].
-// The owner of the hot cache is responsible for keeping it up to date.
-func (tb *TableBuilder[T]) WithHotCache() *TableBuilder[T] {
- tb.hotCacheEnabled = true
+// WithHotCache enables hot cache.
+//
+// If hot cache is enabled, a new cache will be populated with values gotten from other peers that can be accessed only through [Table.GetHot].
+//
+// The owner of the hot cache is responsible for keeping it up to date (i.e. calls to [Table.Put] and [Table.Evict] won't update hot cache of other peers).
+func (tb *TableBuilder[T]) WithHotCache(storage Storage) *TableBuilder[T] {
+ tb.hotStorage = storage
return tb
}
-// WithCodec overrides the default encoding/decoding behavior. Defaults to stdlib [json.Marshal]/[json.Unmarshal]
+// WithCodec overrides the default encoding/decoding behavior.
+//
+// Defaults to [BytesCodec] for []byte tables and [JsonCodec] for any other types.
+// See [Codec] for custom implementation.
func (tb *TableBuilder[T]) WithCodec(codec Codec[T]) *TableBuilder[T] {
tb.codec = codec
return tb
}
func (tb *TableBuilder[T]) Build(c *Cache) *Table[T] {
- if tb.evictionPolicy == nil {
- tb.evictionPolicy = NoEvictionPolicy{}
- }
-
t := &Table[T]{
- name: tb.name,
- getSF: &singleflight.Group{},
- evictSF: &singleflight.Group{},
- functions: tb.functions,
- functionsMu: &sync.RWMutex{},
- metrics: newMetrics(),
- hotCacheEnabled: tb.hotCacheEnabled,
- hotStore: newStore(
- storeOpts[T]{
- evictionPolicy: tb.evictionPolicy,
- codec: tb.codec,
- },
- ),
- store: newStore(
- storeOpts[T]{
- evictionPolicy: tb.evictionPolicy,
- getter: tb.getter,
- codec: tb.codec,
- },
- ),
+ name: tb.name,
+ getSF: &singleflight.Group{},
+ evictSF: &singleflight.Group{},
+ procedures: tb.procedures,
+ metrics: newMetrics(),
+ store: newStore(storeOpts[T]{
+ storage: tb.storage,
+ getter: tb.getter,
+ codec: tb.codec,
+ }),
cache: c,
}
+ if tb.hotStorage != nil {
+ t.hotStore = newStore(storeOpts[T]{
+ storage: tb.hotStorage,
+ codec: tb.codec,
+ })
+ }
+
+ c.tablesMu.Lock()
+ defer c.tablesMu.Unlock()
+
c.tables[tb.name] = t
return t
}
diff --git a/test/test_suite.go b/test/test_suite.go
deleted file mode 100644
index 8a8c8c1..0000000
--- a/test/test_suite.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package test
-
-import (
- "strconv"
- "sync"
-)
-
-var mu = sync.Mutex{}
-var i = 0
-var start = 8000
-
-type cache interface {
- TearDown() error
-}
-
-func GetUniqueAddr() string {
- mu.Lock()
- defer mu.Unlock()
- i++
- return "localhost:" + strconv.Itoa(start+i)
-}
-
-func TearDown(c cache) {
- go func() { _ = c.TearDown() }()
-}
diff --git a/test_utils/test_utils.go b/test_utils/test_utils.go
new file mode 100644
index 0000000..39313e5
--- /dev/null
+++ b/test_utils/test_utils.go
@@ -0,0 +1,50 @@
+package test
+
+import (
+ "context"
+ "github.com/MysteriousPotato/nitecache"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+)
+
+var (
+ mu = sync.Mutex{}
+ port = 50000
+)
+
+func GetUniqueAddr() string {
+ mu.Lock()
+ defer mu.Unlock()
+
+ port++
+ return "127.0.0.1:" + strconv.Itoa(port)
+}
+
+func SimpleHashFunc(key string) (int, error) {
+ return strconv.Atoi(key)
+}
+
+func WaitForServer(t *testing.T, c *nitecache.Cache) {
+ timeout := time.Second * 5
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+
+ healthCheckDone := make(chan struct{})
+ go func() {
+ for {
+ if err := c.HealthCheckPeers(ctx); err == nil {
+ healthCheckDone <- struct{}{}
+ }
+ time.Sleep(time.Millisecond * 100)
+ }
+ }()
+
+ select {
+ case <-healthCheckDone:
+ return
+ case <-ctx.Done():
+ t.Fatalf("clients health check failed after %s: %v", timeout.String(), ctx.Err())
+ }
+}