diff --git a/cmd/secretd/root.go b/cmd/secretd/root.go index 0f0cfd9da..a9eb1f28d 100644 --- a/cmd/secretd/root.go +++ b/cmd/secretd/root.go @@ -109,7 +109,9 @@ func NewRootCmd() (*cobra.Command, app.EncodingConfig) { return dir } - tempApp := app.NewSecretNetworkApp(log.NewNopLogger(), dbm.NewMemDB(), nil, true, true, simtestutil.NewAppOptionsWithFlagHome(tempDir()), compute.DefaultWasmConfig()) + wasmConfig := compute.DefaultWasmConfig() + wasmConfig.InitEnclave = false + tempApp := app.NewSecretNetworkApp(log.NewNopLogger(), dbm.NewMemDB(), nil, true, true, simtestutil.NewAppOptionsWithFlagHome(tempDir()), wasmConfig) encodingConfig := app.EncodingConfig{ InterfaceRegistry: tempApp.GetInterfaceRegistry(), diff --git a/cosmos-sdk-store/CHANGELOG.md b/cosmos-sdk-store/CHANGELOG.md new file mode 100755 index 000000000..cdd7bb1c5 --- /dev/null +++ b/cosmos-sdk-store/CHANGELOG.md @@ -0,0 +1,66 @@ + + +# Changelog + +## v1.1.0 (March 20, 2024) + +### Improvements + +* [#19770](https://github.com/cosmos/cosmos-sdk/pull/19770) Upgrade IAVL to IAVL v1.1.1. + +## v1.0.2 (January 10, 2024) + +### Bug Fixes + +* [#18897](https://github.com/cosmos/cosmos-sdk/pull/18897) Replace panic in pruning to avoid consensus halting. + +## v1.0.1 (November 28, 2023) + +### Bug Fixes + +* [#18563](https://github.com/cosmos/cosmos-sdk/pull/18563) `LastCommitID().Hash` will always return `sha256([]byte{})` if the store is empty. + +## v1.0.0 (October 31, 2023) + +### Features + +* [#17294](https://github.com/cosmos/cosmos-sdk/pull/17294) Add snapshot manager Close method. +* [#15568](https://github.com/cosmos/cosmos-sdk/pull/15568) Migrate the `iavl` to the new key format. + * Remove `DeleteVersion`, `DeleteVersions`, `LazyLoadVersionForOverwriting` from `iavl` tree API. + * Add `DeleteVersionsTo` and `SaveChangeSet`, since it will keep versions sequentially like `fromVersion` to `toVersion`. + * Refactor the pruning manager to use `DeleteVersionsTo`. +* [#15712](https://github.com/cosmos/cosmos-sdk/pull/15712) Add `WorkingHash` function to the store interface to get the current app hash before commit. +* [#14645](https://github.com/cosmos/cosmos-sdk/pull/14645) Add limit to the length of key and value. +* [#15683](https://github.com/cosmos/cosmos-sdk/pull/15683) `rootmulti.Store.CacheMultiStoreWithVersion` now can handle loading archival states that don't persist any of the module stores the current state has. +* [#16060](https://github.com/cosmos/cosmos-sdk/pull/16060) Support saving restoring snapshot locally. +* [#14746](https://github.com/cosmos/cosmos-sdk/pull/14746) The `store` module is extracted to have a separate go.mod file which allows it be a standalone module. +* [#14410](https://github.com/cosmos/cosmos-sdk/pull/14410) `rootmulti.Store.loadVersion` has validation to check if all the module stores' height is correct, it will error if any module store has incorrect height. + +### Improvements + +* [#17158](https://github.com/cosmos/cosmos-sdk/pull/17158) Start the goroutine after need to create a snapshot. + +### API Breaking Changes + +* [#16321](https://github.com/cosmos/cosmos-sdk/pull/16321) QueryInterface defines its own request and response types instead of relying on comet/abci & returns an error diff --git a/cosmos-sdk-store/README.md b/cosmos-sdk-store/README.md new file mode 100755 index 000000000..45b29a0b5 --- /dev/null +++ b/cosmos-sdk-store/README.md @@ -0,0 +1,24 @@ +# Cosmos-sdk -> cosmossdk.io/store@v1.0.2 +## cosmossdk.io/store@v1.0.2 +The version of cosmossdk.io/store@v1.0.2 has an issue we have come across during the upgrade to Cosmoms-sdk 0.50.x +This issue is based on the fact that during the store __Write__ operation not every store gets updated. +As a result when we call +``` +func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStore, error) +``` +a call to retrieve __cacheStore__ here: +``` +cacheStore, err = store.(*iavl.Store).GetImmutable(version) +``` +fails and we cannot process **multistore** queries because they fail with an error: +``` +ERR [*] Cache for error="version does not exist" module=server module store=evidence +ERR [*] Cache for error="version does not exist" module=server module store=feegrant +ERR [*] Cache for error="version does not exist" module=server module store=hooks-for-ibc +ERR [*] Cache for error="version does not exist" module=server module store=feeibc +ERR [*] Cache for error="version does not exist" module=server module store=authz +ERR [*] Cache for error="version does not exist" module=server module store=emergencybutton + +``` +We introduced a fix for __CacheMultiStoreWithVersion__ where we bypass errors for certain modules. + diff --git a/cosmos-sdk-store/cache/benchmark_test.go b/cosmos-sdk-store/cache/benchmark_test.go new file mode 100755 index 000000000..76f875a0d --- /dev/null +++ b/cosmos-sdk-store/cache/benchmark_test.go @@ -0,0 +1,49 @@ +package cache + +import ( + "testing" + + "cosmossdk.io/store/types" +) + +func freshMgr() *CommitKVStoreCacheManager { + return &CommitKVStoreCacheManager{ + caches: map[string]types.CommitKVStore{ + "a1": nil, + "alalalalalal": nil, + }, + } +} + +func populate(mgr *CommitKVStoreCacheManager) { + mgr.caches["this one"] = (types.CommitKVStore)(nil) + mgr.caches["those ones are the ones"] = (types.CommitKVStore)(nil) + mgr.caches["very huge key right here and there are we going to ones are the ones"] = (types.CommitKVStore)(nil) +} + +func BenchmarkReset(b *testing.B) { + b.ReportAllocs() + mgr := freshMgr() + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + mgr.Reset() + if len(mgr.caches) != 0 { + b.Fatal("Reset failed") + } + populate(mgr) + if len(mgr.caches) == 0 { + b.Fatal("populate failed") + } + mgr.Reset() + if len(mgr.caches) != 0 { + b.Fatal("Reset failed") + } + } + + if mgr == nil { + b.Fatal("Impossible condition") + } +} diff --git a/cosmos-sdk-store/cache/cache.go b/cosmos-sdk-store/cache/cache.go new file mode 100755 index 000000000..98d17d034 --- /dev/null +++ b/cosmos-sdk-store/cache/cache.go @@ -0,0 +1,132 @@ +package cache + +import ( + "fmt" + + lru "github.com/hashicorp/golang-lru" + + "cosmossdk.io/store/cachekv" + "cosmossdk.io/store/types" +) + +var ( + _ types.CommitKVStore = (*CommitKVStoreCache)(nil) + _ types.MultiStorePersistentCache = (*CommitKVStoreCacheManager)(nil) + + // DefaultCommitKVStoreCacheSize defines the persistent ARC cache size for a + // CommitKVStoreCache. + DefaultCommitKVStoreCacheSize uint = 1000 +) + +type ( + // CommitKVStoreCache implements an inter-block (persistent) cache that wraps a + // CommitKVStore. Reads first hit the internal ARC (Adaptive Replacement Cache). + // During a cache miss, the read is delegated to the underlying CommitKVStore + // and cached. Deletes and writes always happen to both the cache and the + // CommitKVStore in a write-through manner. Caching performed in the + // CommitKVStore and below is completely irrelevant to this layer. + CommitKVStoreCache struct { + types.CommitKVStore + cache *lru.ARCCache + } + + // CommitKVStoreCacheManager maintains a mapping from a StoreKey to a + // CommitKVStoreCache. Each CommitKVStore, per StoreKey, is meant to be used + // in an inter-block (persistent) manner and typically provided by a + // CommitMultiStore. + CommitKVStoreCacheManager struct { + cacheSize uint + caches map[string]types.CommitKVStore + } +) + +func NewCommitKVStoreCache(store types.CommitKVStore, size uint) *CommitKVStoreCache { + cache, err := lru.NewARC(int(size)) + if err != nil { + panic(fmt.Errorf("failed to create KVStore cache: %s", err)) + } + + return &CommitKVStoreCache{ + CommitKVStore: store, + cache: cache, + } +} + +func NewCommitKVStoreCacheManager(size uint) *CommitKVStoreCacheManager { + return &CommitKVStoreCacheManager{ + cacheSize: size, + caches: make(map[string]types.CommitKVStore), + } +} + +// GetStoreCache returns a Cache from the CommitStoreCacheManager for a given +// StoreKey. If no Cache exists for the StoreKey, then one is created and set. +// The returned Cache is meant to be used in a persistent manner. +func (cmgr *CommitKVStoreCacheManager) GetStoreCache(key types.StoreKey, store types.CommitKVStore) types.CommitKVStore { + if cmgr.caches[key.Name()] == nil { + cmgr.caches[key.Name()] = NewCommitKVStoreCache(store, cmgr.cacheSize) + } + + return cmgr.caches[key.Name()] +} + +// Unwrap returns the underlying CommitKVStore for a given StoreKey. +func (cmgr *CommitKVStoreCacheManager) Unwrap(key types.StoreKey) types.CommitKVStore { + if ckv, ok := cmgr.caches[key.Name()]; ok { + return ckv.(*CommitKVStoreCache).CommitKVStore + } + + return nil +} + +// Reset resets in the internal caches. +func (cmgr *CommitKVStoreCacheManager) Reset() { + // Clear the map. + // Please note that we are purposefully using the map clearing idiom. + // See https://github.com/cosmos/cosmos-sdk/issues/6681. + for key := range cmgr.caches { + delete(cmgr.caches, key) + } +} + +// CacheWrap implements the CacheWrapper interface +func (ckv *CommitKVStoreCache) CacheWrap() types.CacheWrap { + return cachekv.NewStore(ckv) +} + +// Get retrieves a value by key. It will first look in the write-through cache. +// If the value doesn't exist in the write-through cache, the query is delegated +// to the underlying CommitKVStore. +func (ckv *CommitKVStoreCache) Get(key []byte) []byte { + types.AssertValidKey(key) + + keyStr := string(key) + valueI, ok := ckv.cache.Get(keyStr) + if ok { + // cache hit + return valueI.([]byte) + } + + // cache miss; write to cache + value := ckv.CommitKVStore.Get(key) + ckv.cache.Add(keyStr, value) + + return value +} + +// Set inserts a key/value pair into both the write-through cache and the +// underlying CommitKVStore. +func (ckv *CommitKVStoreCache) Set(key, value []byte) { + types.AssertValidKey(key) + types.AssertValidValue(value) + + ckv.cache.Add(string(key), value) + ckv.CommitKVStore.Set(key, value) +} + +// Delete removes a key/value pair from both the write-through cache and the +// underlying CommitKVStore. +func (ckv *CommitKVStoreCache) Delete(key []byte) { + ckv.cache.Remove(string(key)) + ckv.CommitKVStore.Delete(key) +} diff --git a/cosmos-sdk-store/cache/cache_test.go b/cosmos-sdk-store/cache/cache_test.go new file mode 100755 index 000000000..efbf22c8e --- /dev/null +++ b/cosmos-sdk-store/cache/cache_test.go @@ -0,0 +1,101 @@ +package cache_test + +import ( + "fmt" + "testing" + + dbm "github.com/cosmos/cosmos-db" + "github.com/cosmos/iavl" + "github.com/stretchr/testify/require" + + "cosmossdk.io/log" + "cosmossdk.io/store/cache" + "cosmossdk.io/store/cachekv" + iavlstore "cosmossdk.io/store/iavl" + "cosmossdk.io/store/types" + "cosmossdk.io/store/wrapper" +) + +func TestGetOrSetStoreCache(t *testing.T) { + db := wrapper.NewDBWrapper(dbm.NewMemDB()) + mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) + + sKey := types.NewKVStoreKey("test") + tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger()) + store := iavlstore.UnsafeNewStore(tree) + store2 := mngr.GetStoreCache(sKey, store) + + require.NotNil(t, store2) + require.Equal(t, store2, mngr.GetStoreCache(sKey, store)) +} + +func TestUnwrap(t *testing.T) { + db := wrapper.NewDBWrapper(dbm.NewMemDB()) + mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) + + sKey := types.NewKVStoreKey("test") + tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger()) + store := iavlstore.UnsafeNewStore(tree) + _ = mngr.GetStoreCache(sKey, store) + + require.Equal(t, store, mngr.Unwrap(sKey)) + require.Nil(t, mngr.Unwrap(types.NewKVStoreKey("test2"))) +} + +func TestStoreCache(t *testing.T) { + db := wrapper.NewDBWrapper(dbm.NewMemDB()) + mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) + + sKey := types.NewKVStoreKey("test") + tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger()) + store := iavlstore.UnsafeNewStore(tree) + kvStore := mngr.GetStoreCache(sKey, store) + + for i := uint(0); i < cache.DefaultCommitKVStoreCacheSize*2; i++ { + key := []byte(fmt.Sprintf("key_%d", i)) + value := []byte(fmt.Sprintf("value_%d", i)) + + kvStore.Set(key, value) + + res := kvStore.Get(key) + require.Equal(t, res, value) + require.Equal(t, res, store.Get(key)) + + kvStore.Delete(key) + + require.Nil(t, kvStore.Get(key)) + require.Nil(t, store.Get(key)) + } +} + +func TestReset(t *testing.T) { + db := wrapper.NewDBWrapper(dbm.NewMemDB()) + mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) + + sKey := types.NewKVStoreKey("test") + tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger()) + store := iavlstore.UnsafeNewStore(tree) + store2 := mngr.GetStoreCache(sKey, store) + + require.NotNil(t, store2) + require.Equal(t, store2, mngr.GetStoreCache(sKey, store)) + + // reset and check if the cache is gone + mngr.Reset() + require.Nil(t, mngr.Unwrap(sKey)) + + // check if the cache is recreated + require.Equal(t, store2, mngr.GetStoreCache(sKey, store)) +} + +func TestCacheWrap(t *testing.T) { + db := wrapper.NewDBWrapper(dbm.NewMemDB()) + mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) + + sKey := types.NewKVStoreKey("test") + tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger()) + store := iavlstore.UnsafeNewStore(tree) + + cacheWrapper := mngr.GetStoreCache(sKey, store).CacheWrap() + require.IsType(t, &cachekv.Store{}, cacheWrapper) +} diff --git a/cosmos-sdk-store/cachekv/README.md b/cosmos-sdk-store/cachekv/README.md new file mode 100755 index 000000000..66f0916de --- /dev/null +++ b/cosmos-sdk-store/cachekv/README.md @@ -0,0 +1,140 @@ +# CacheKVStore specification + +A `CacheKVStore` is cache wrapper for a `KVStore`. It extends the operations of the `KVStore` to work with a write-back cache, allowing for reduced I/O operations and more efficient disposing of changes (e.g. after processing a failed transaction). + +The core goals the CacheKVStore seeks to solve are: + +* Buffer all writes to the parent store, so they can be dropped if they need to be reverted +* Allow iteration over contiguous spans of keys +* Act as a cache, improving access time for reads that have already been done (by replacing tree access with hashtable access, avoiding disk I/O) + * Note: We actually fail to achieve this for iteration right now + * Note: Need to consider this getting too large and dropping some cached reads +* Make subsequent reads account for prior buffered writes +* Write all buffered changes to the parent store + +We should revisit these goals with time (for instance it's unclear that all disk writes need to be buffered to the end of the block), but this is the current status. + +## Types and Structs + +```go +type Store struct { + mtx sync.Mutex + cache map[string]*cValue + deleted map[string]struct{} + unsortedCache map[string]struct{} + sortedCache *dbm.MemDB // always ascending sorted + parent types.KVStore +} +``` + +The Store struct wraps the underlying `KVStore` (`parent`) with additional data structures for implementing the cache. Mutex is used as IAVL trees (the `KVStore` in application) are not safe for concurrent use. + +### `cache` + +The main mapping of key-value pairs stored in cache. This map contains both keys that are cached from read operations as well as ‘dirty’ keys which map to a value that is potentially different than what is in the underlying `KVStore`. + +Values that are mapped to in `cache` are wrapped in a `cValue` struct, which contains the value and a boolean flag (`dirty`) representing whether the value has been written since the last write-back to `parent`. + +```go +type cValue struct { + value []byte + dirty bool +} +``` + +### `deleted` + +Key-value pairs that are to be deleted from `parent` are stored in the `deleted` map. Keys are mapped to an empty struct to implement a set. + +### `unsortedCache` + +Similar to `deleted`, this is a set of keys that are dirty and will need to be updated in the parent `KVStore` upon a write. Keys are mapped to an empty struct to implement a set. + +### `sortedCache` + +A database that will be populated by the keys in `unsortedCache` during iteration over the cache. The keys are always held in sorted order. + +## CRUD Operations and Writing + +The `Set`, `Get`, and `Delete` functions all call `setCacheValue()`, which is the only entry point to mutating `cache` (besides `Write()`, which clears it). + +`setCacheValue()` inserts a key-value pair into `cache`. Two boolean parameters, `deleted` and `dirty`, are passed in to flag whether the inserted key should also be inserted into the `deleted` and `dirty` sets. Keys will be removed from the `deleted` set if they are written to after being deleted. + +### `Get` + +`Get` first attempts to return the value from `cache`. If the key does not exist in `cache`, `parent.Get()` is called instead. This value from the parent is passed into `setCacheValue()` with `deleted=false` and `dirty=false`. + +### `Has` + +`Has` returns true if `Get` returns a non-nil value. As a result of calling `Get`, it may mutate the cache by caching the read. + +### `Set` + +New values are written by setting or updating the value of a key in `cache`. `Set` does not write to `parent`. + +Calls `setCacheValue()` with `deleted=false` and `dirty=true`. + +### `Delete` + +A value being deleted from the `KVStore` is represented with a `nil` value in `cache`, and an insertion of the key into the `deleted` set. `Delete` does not write to `parent`. + +Calls `setCacheValue()` with `deleted=true` and `dirty=true`. + +### `Write` + +Key-value pairs in the cache are written to `parent` in ascending order of their keys. + +A slice of all dirty keys in `cache` is made, then sorted in increasing order. These keys are iterated over to update `parent`. + +If a key is marked for deletion (checked with `isDeleted()`), then `parent.Delete()` is called. Otherwise, `parent.Set()` is called to update the underlying `KVStore` with the value in cache. + +## Iteration + +Efficient iteration over keys in `KVStore` is important for generating Merkle range proofs. Iteration over `CacheKVStore` requires producing all key-value pairs from the underlying `KVStore` while taking into account updated values from the cache. + +In the current implementation, there is no guarantee that all values in `parent` have been cached. As a result, iteration is achieved by interleaved iteration through both `parent` and the cache (failing to actually benefit from caching). + +[cacheMergeIterator](https://github.com/cosmos/cosmos-sdk/blob/d8391cb6796d770b02448bee70b865d824e43449/store/cachekv/mergeiterator.go) implements functions to provide a single iterator with an input of iterators over `parent` and the cache. This iterator iterates over keys from both iterators in a shared lexicographic order, and overrides the value provided by the parent iterator if the same key is dirty or deleted in the cache. + +### Implementation Overview + +Iterators over `parent` and the cache are generated and passed into `cacheMergeIterator`, which returns a single, interleaved iterator. Implementation of the `parent` iterator is up to the underlying `KVStore`. The remainder of this section covers the generation of the cache iterator. + +Recall that `unsortedCache` is an unordered set of dirty cache keys. Our goal is to construct an ordered iterator over cache keys that fall within the `start` and `end` bounds requested. + +Generating the cache iterator can be decomposed into four parts: + +1. Finding all keys that exist in the range we are iterating over +2. Sorting this list of keys +3. Inserting these keys into `sortedCache` and removing them from `unsortedCache` +4. Returning an iterator over `sortedCache` with the desired range + +Currently, the implementation for the first two parts is split into two cases, depending on the size of the unsorted cache. The two cases are as follows. + +If the size of `unsortedCache` is less than `minSortSize` (currently 1024), a linear time approach is taken to search over keys. + +```go +n := len(store.unsortedCache) +unsorted := make([]*kv.Pair, 0) + +if n < minSortSize { + for key := range store.unsortedCache { + if dbm.IsKeyInDomain(conv.UnsafeStrToBytes(key), start, end) { + cacheValue := store.cache[key] + unsorted = append(unsorted, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) + } + } + store.clearUnsortedCacheSubset(unsorted, stateUnsorted) + return +} +``` + +Here, we iterate through all the keys in `unsortedCache` (i.e., the dirty cache keys), collecting those within the requested range in an unsorted slice called `unsorted`. + +At this point, part 3. is achieved in `clearUnsortedCacheSubset()`. This function iterates through `unsorted`, removing each key from `unsortedCache`. Afterwards, `unsorted` is sorted. Lastly, it iterates through the now sorted slice, inserting key-value pairs into `sortedCache`. Any key marked for deletion is mapped to an arbitrary value (`[]byte{}`). + +In the case that the size of `unsortedCache` is larger than `minSortSize`, a linear time approach to finding keys within the desired range is too slow to use. Instead, a slice of all keys in `unsortedCache` is sorted, and binary search is used to find the beginning and ending indices of the desired range. This produces an already-sorted slice that is passed into the same `clearUnsortedCacheSubset()` function. An iota identifier (`sortedState`) is used to skip the sorting step in the function. + +Finally, part 4. is achieved with `memIterator`, which implements an iterator over the items in `sortedCache`. + +As of [PR #12885](https://github.com/cosmos/cosmos-sdk/pull/12885), an optimization to the binary search case mitigates the overhead of sorting the entirety of the key set in `unsortedCache`. To avoid wasting the compute spent sorting, we should ensure that a reasonable amount of values are removed from `unsortedCache`. If the length of the range for iteration is less than `minSortedCache`, we widen the range of values for removal from `unsortedCache` to be up to `minSortedCache` in length. This amortizes the cost of processing elements across multiple calls. \ No newline at end of file diff --git a/cosmos-sdk-store/cachekv/bench_helper_test.go b/cosmos-sdk-store/cachekv/bench_helper_test.go new file mode 100755 index 000000000..be7fec4b3 --- /dev/null +++ b/cosmos-sdk-store/cachekv/bench_helper_test.go @@ -0,0 +1,44 @@ +package cachekv_test + +import "crypto/rand" + +func randSlice(sliceSize int) []byte { + bz := make([]byte, sliceSize) + _, _ = rand.Read(bz) + return bz +} + +func incrementByteSlice(bz []byte) { + for index := len(bz) - 1; index >= 0; index-- { + if bz[index] < 255 { + bz[index]++ + break + } else { + bz[index] = 0 + } + } +} + +// Generate many keys starting at startKey, and are in sequential order +func generateSequentialKeys(startKey []byte, numKeys int) [][]byte { + toReturn := make([][]byte, 0, numKeys) + cur := make([]byte, len(startKey)) + copy(cur, startKey) + for i := 0; i < numKeys; i++ { + newKey := make([]byte, len(startKey)) + copy(newKey, cur) + toReturn = append(toReturn, newKey) + incrementByteSlice(cur) + } + return toReturn +} + +// Generate many random, unsorted keys +func generateRandomKeys(keySize, numKeys int) [][]byte { + toReturn := make([][]byte, 0, numKeys) + for i := 0; i < numKeys; i++ { + newKey := randSlice(keySize) + toReturn = append(toReturn, newKey) + } + return toReturn +} diff --git a/cosmos-sdk-store/cachekv/benchmark_test.go b/cosmos-sdk-store/cachekv/benchmark_test.go new file mode 100755 index 000000000..158549b4b --- /dev/null +++ b/cosmos-sdk-store/cachekv/benchmark_test.go @@ -0,0 +1,136 @@ +package cachekv_test + +import ( + fmt "fmt" + "testing" + + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/require" + + "cosmossdk.io/store/cachekv" + "cosmossdk.io/store/dbadapter" + "cosmossdk.io/store/types" +) + +func DoBenchmarkDeepCacheStack(b *testing.B, depth int) { + b.Helper() + db := dbm.NewMemDB() + initialStore := cachekv.NewStore(dbadapter.Store{DB: db}) + + nItems := 20 + for i := 0; i < nItems; i++ { + initialStore.Set([]byte(fmt.Sprintf("hello%03d", i)), []byte{0}) + } + + var stack CacheStack + stack.Reset(initialStore) + + for i := 0; i < depth; i++ { + stack.Snapshot() + + store := stack.CurrentStore() + store.Set([]byte(fmt.Sprintf("hello%03d", i)), []byte{byte(i)}) + } + + store := stack.CurrentStore() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + it := store.Iterator(nil, nil) + items := make([][]byte, 0, nItems) + for ; it.Valid(); it.Next() { + items = append(items, it.Key()) + it.Value() + } + it.Close() + require.Equal(b, nItems, len(items)) + } +} + +func BenchmarkDeepCacheStack1(b *testing.B) { + DoBenchmarkDeepCacheStack(b, 1) +} + +func BenchmarkDeepCacheStack3(b *testing.B) { + DoBenchmarkDeepCacheStack(b, 3) +} + +func BenchmarkDeepCacheStack10(b *testing.B) { + DoBenchmarkDeepCacheStack(b, 10) +} + +func BenchmarkDeepCacheStack13(b *testing.B) { + DoBenchmarkDeepCacheStack(b, 13) +} + +// CacheStack manages a stack of nested cache store to +// support the evm `StateDB`'s `Snapshot` and `RevertToSnapshot` methods. +type CacheStack struct { + initialStore types.CacheKVStore + // Context of the initial state before transaction execution. + // It's the context used by `StateDB.CommitedState`. + cacheStores []types.CacheKVStore +} + +// CurrentContext returns the top context of cached stack, +// if the stack is empty, returns the initial context. +func (cs *CacheStack) CurrentStore() types.CacheKVStore { + l := len(cs.cacheStores) + if l == 0 { + return cs.initialStore + } + return cs.cacheStores[l-1] +} + +// Reset sets the initial context and clear the cache context stack. +func (cs *CacheStack) Reset(initialStore types.CacheKVStore) { + cs.initialStore = initialStore + cs.cacheStores = nil +} + +// IsEmpty returns true if the cache context stack is empty. +func (cs *CacheStack) IsEmpty() bool { + return len(cs.cacheStores) == 0 +} + +// Commit commits all the cached contexts from top to bottom in order and clears the stack by setting an empty slice of cache contexts. +func (cs *CacheStack) Commit() { + // commit in order from top to bottom + for i := len(cs.cacheStores) - 1; i >= 0; i-- { + cs.cacheStores[i].Write() + } + cs.cacheStores = nil +} + +// CommitToRevision commit the cache after the target revision, +// to improve efficiency of db operations. +func (cs *CacheStack) CommitToRevision(target int) error { + if target < 0 || target >= len(cs.cacheStores) { + return fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cacheStores)) + } + + // commit in order from top to bottom + for i := len(cs.cacheStores) - 1; i > target; i-- { + cs.cacheStores[i].Write() + } + cs.cacheStores = cs.cacheStores[0 : target+1] + + return nil +} + +// Snapshot pushes a new cached context to the stack, +// and returns the index of it. +func (cs *CacheStack) Snapshot() int { + cs.cacheStores = append(cs.cacheStores, cachekv.NewStore(cs.CurrentStore())) + return len(cs.cacheStores) - 1 +} + +// RevertToSnapshot pops all the cached contexts after the target index (inclusive). +// the target should be snapshot index returned by `Snapshot`. +// This function panics if the index is out of bounds. +func (cs *CacheStack) RevertToSnapshot(target int) { + if target < 0 || target >= len(cs.cacheStores) { + panic(fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cacheStores))) + } + cs.cacheStores = cs.cacheStores[:target] +} diff --git a/cosmos-sdk-store/cachekv/internal/btree.go b/cosmos-sdk-store/cachekv/internal/btree.go new file mode 100755 index 000000000..209f7e58c --- /dev/null +++ b/cosmos-sdk-store/cachekv/internal/btree.go @@ -0,0 +1,91 @@ +package internal + +import ( + "bytes" + "errors" + + "github.com/tidwall/btree" + + "cosmossdk.io/store/types" +) + +const ( + // The approximate number of items and children per B-tree node. Tuned with benchmarks. + // copied from memdb. + bTreeDegree = 32 +) + +var errKeyEmpty = errors.New("key cannot be empty") + +// BTree implements the sorted cache for cachekv store, +// we don't use MemDB here because cachekv is used extensively in sdk core path, +// we need it to be as fast as possible, while `MemDB` is mainly used as a mocking db in unit tests. +// +// We choose tidwall/btree over google/btree here because it provides API to implement step iterator directly. +type BTree struct { + tree *btree.BTreeG[item] +} + +// NewBTree creates a wrapper around `btree.BTreeG`. +func NewBTree() BTree { + return BTree{ + tree: btree.NewBTreeGOptions(byKeys, btree.Options{ + Degree: bTreeDegree, + NoLocks: false, + }), + } +} + +func (bt BTree) Set(key, value []byte) { + bt.tree.Set(newItem(key, value)) +} + +func (bt BTree) Get(key []byte) []byte { + i, found := bt.tree.Get(newItem(key, nil)) + if !found { + return nil + } + return i.value +} + +func (bt BTree) Delete(key []byte) { + bt.tree.Delete(newItem(key, nil)) +} + +func (bt BTree) Iterator(start, end []byte) (types.Iterator, error) { + if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { + return nil, errKeyEmpty + } + return newMemIterator(start, end, bt, true), nil +} + +func (bt BTree) ReverseIterator(start, end []byte) (types.Iterator, error) { + if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { + return nil, errKeyEmpty + } + return newMemIterator(start, end, bt, false), nil +} + +// Copy the tree. This is a copy-on-write operation and is very fast because +// it only performs a shadowed copy. +func (bt BTree) Copy() BTree { + return BTree{ + tree: bt.tree.Copy(), + } +} + +// item is a btree item with byte slices as keys and values +type item struct { + key []byte + value []byte +} + +// byKeys compares the items by key +func byKeys(a, b item) bool { + return bytes.Compare(a.key, b.key) == -1 +} + +// newItem creates a new pair item. +func newItem(key, value []byte) item { + return item{key: key, value: value} +} diff --git a/cosmos-sdk-store/cachekv/internal/btree_test.go b/cosmos-sdk-store/cachekv/internal/btree_test.go new file mode 100755 index 000000000..06437997f --- /dev/null +++ b/cosmos-sdk-store/cachekv/internal/btree_test.go @@ -0,0 +1,204 @@ +package internal + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "cosmossdk.io/store/types" +) + +func TestGetSetDelete(t *testing.T) { + db := NewBTree() + + // A nonexistent key should return nil. + value := db.Get([]byte("a")) + require.Nil(t, value) + + // Set and get a value. + db.Set([]byte("a"), []byte{0x01}) + db.Set([]byte("b"), []byte{0x02}) + value = db.Get([]byte("a")) + require.Equal(t, []byte{0x01}, value) + + value = db.Get([]byte("b")) + require.Equal(t, []byte{0x02}, value) + + // Deleting a non-existent value is fine. + db.Delete([]byte("x")) + + // Delete a value. + db.Delete([]byte("a")) + + value = db.Get([]byte("a")) + require.Nil(t, value) + + db.Delete([]byte("b")) + + value = db.Get([]byte("b")) + require.Nil(t, value) +} + +func TestDBIterator(t *testing.T) { + db := NewBTree() + + for i := 0; i < 10; i++ { + if i != 6 { // but skip 6. + db.Set(int642Bytes(int64(i)), []byte{}) + } + } + + // Blank iterator keys should error + _, err := db.ReverseIterator([]byte{}, nil) + require.Equal(t, errKeyEmpty, err) + _, err = db.ReverseIterator(nil, []byte{}) + require.Equal(t, errKeyEmpty, err) + + itr, err := db.Iterator(nil, nil) + require.NoError(t, err) + verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator") + + ritr, err := db.ReverseIterator(nil, nil) + require.NoError(t, err) + verifyIterator(t, ritr, []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator") + + itr, err = db.Iterator(nil, int642Bytes(0)) + require.NoError(t, err) + verifyIterator(t, itr, []int64(nil), "forward iterator to 0") + + ritr, err = db.ReverseIterator(int642Bytes(10), nil) + require.NoError(t, err) + verifyIterator(t, ritr, []int64(nil), "reverse iterator from 10 (ex)") + + itr, err = db.Iterator(int642Bytes(0), nil) + require.NoError(t, err) + verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0") + + itr, err = db.Iterator(int642Bytes(1), nil) + require.NoError(t, err) + verifyIterator(t, itr, []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1") + + ritr, err = db.ReverseIterator(nil, int642Bytes(10)) + require.NoError(t, err) + verifyIterator(t, ritr, + []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10 (ex)") + + ritr, err = db.ReverseIterator(nil, int642Bytes(9)) + require.NoError(t, err) + verifyIterator(t, ritr, + []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9 (ex)") + + ritr, err = db.ReverseIterator(nil, int642Bytes(8)) + require.NoError(t, err) + verifyIterator(t, ritr, + []int64{7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8 (ex)") + + itr, err = db.Iterator(int642Bytes(5), int642Bytes(6)) + require.NoError(t, err) + verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 6") + + itr, err = db.Iterator(int642Bytes(5), int642Bytes(7)) + require.NoError(t, err) + verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 7") + + itr, err = db.Iterator(int642Bytes(5), int642Bytes(8)) + require.NoError(t, err) + verifyIterator(t, itr, []int64{5, 7}, "forward iterator from 5 to 8") + + itr, err = db.Iterator(int642Bytes(6), int642Bytes(7)) + require.NoError(t, err) + verifyIterator(t, itr, []int64(nil), "forward iterator from 6 to 7") + + itr, err = db.Iterator(int642Bytes(6), int642Bytes(8)) + require.NoError(t, err) + verifyIterator(t, itr, []int64{7}, "forward iterator from 6 to 8") + + itr, err = db.Iterator(int642Bytes(7), int642Bytes(8)) + require.NoError(t, err) + verifyIterator(t, itr, []int64{7}, "forward iterator from 7 to 8") + + ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(5)) + require.NoError(t, err) + verifyIterator(t, ritr, []int64{4}, "reverse iterator from 5 (ex) to 4") + + ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(6)) + require.NoError(t, err) + verifyIterator(t, ritr, + []int64{5, 4}, "reverse iterator from 6 (ex) to 4") + + ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(7)) + require.NoError(t, err) + verifyIterator(t, ritr, + []int64{5, 4}, "reverse iterator from 7 (ex) to 4") + + ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(6)) + require.NoError(t, err) + verifyIterator(t, ritr, []int64{5}, "reverse iterator from 6 (ex) to 5") + + ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(7)) + require.NoError(t, err) + verifyIterator(t, ritr, []int64{5}, "reverse iterator from 7 (ex) to 5") + + ritr, err = db.ReverseIterator(int642Bytes(6), int642Bytes(7)) + require.NoError(t, err) + verifyIterator(t, ritr, + []int64(nil), "reverse iterator from 7 (ex) to 6") + + ritr, err = db.ReverseIterator(int642Bytes(10), nil) + require.NoError(t, err) + verifyIterator(t, ritr, []int64(nil), "reverse iterator to 10") + + ritr, err = db.ReverseIterator(int642Bytes(6), nil) + require.NoError(t, err) + verifyIterator(t, ritr, []int64{9, 8, 7}, "reverse iterator to 6") + + ritr, err = db.ReverseIterator(int642Bytes(5), nil) + require.NoError(t, err) + verifyIterator(t, ritr, []int64{9, 8, 7, 5}, "reverse iterator to 5") + + ritr, err = db.ReverseIterator(int642Bytes(8), int642Bytes(9)) + require.NoError(t, err) + verifyIterator(t, ritr, []int64{8}, "reverse iterator from 9 (ex) to 8") + + ritr, err = db.ReverseIterator(int642Bytes(2), int642Bytes(4)) + require.NoError(t, err) + verifyIterator(t, ritr, + []int64{3, 2}, "reverse iterator from 4 (ex) to 2") + + ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(2)) + require.NoError(t, err) + verifyIterator(t, ritr, + []int64(nil), "reverse iterator from 2 (ex) to 4") + + // Ensure that the iterators don't panic with an empty database. + db2 := NewBTree() + + itr, err = db2.Iterator(nil, nil) + require.NoError(t, err) + verifyIterator(t, itr, nil, "forward iterator with empty db") + + ritr, err = db2.ReverseIterator(nil, nil) + require.NoError(t, err) + verifyIterator(t, ritr, nil, "reverse iterator with empty db") +} + +func verifyIterator(t *testing.T, itr types.Iterator, expected []int64, msg string) { + t.Helper() + i := 0 + for itr.Valid() { + key := itr.Key() + require.Equal(t, expected[i], bytes2Int64(key), "iterator: %d mismatches", i) + itr.Next() + i++ + } + require.Equal(t, i, len(expected), "expected to have fully iterated over all the elements in iter") + require.NoError(t, itr.Close()) +} + +func int642Bytes(i int64) []byte { + return types.Uint64ToBigEndian(uint64(i)) +} + +func bytes2Int64(buf []byte) int64 { + return int64(types.BigEndianToUint64(buf)) +} diff --git a/cosmos-sdk-store/cachekv/internal/memiterator.go b/cosmos-sdk-store/cachekv/internal/memiterator.go new file mode 100755 index 000000000..9dbba7587 --- /dev/null +++ b/cosmos-sdk-store/cachekv/internal/memiterator.go @@ -0,0 +1,120 @@ +package internal + +import ( + "bytes" + "errors" + + "github.com/tidwall/btree" + + "cosmossdk.io/store/types" +) + +var _ types.Iterator = (*memIterator)(nil) + +// memIterator iterates over iterKVCache items. +// if value is nil, means it was deleted. +// Implements Iterator. +type memIterator struct { + iter btree.IterG[item] + + start []byte + end []byte + ascending bool + valid bool +} + +func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator { + iter := items.tree.Iter() + var valid bool + if ascending { + if start != nil { + valid = iter.Seek(newItem(start, nil)) + } else { + valid = iter.First() + } + } else { + if end != nil { + valid = iter.Seek(newItem(end, nil)) + if !valid { + valid = iter.Last() + } else { + // end is exclusive + valid = iter.Prev() + } + } else { + valid = iter.Last() + } + } + + mi := &memIterator{ + iter: iter, + start: start, + end: end, + ascending: ascending, + valid: valid, + } + + if mi.valid { + mi.valid = mi.keyInRange(mi.Key()) + } + + return mi +} + +func (mi *memIterator) Domain() (start, end []byte) { + return mi.start, mi.end +} + +func (mi *memIterator) Close() error { + mi.iter.Release() + return nil +} + +func (mi *memIterator) Error() error { + if !mi.Valid() { + return errors.New("invalid memIterator") + } + return nil +} + +func (mi *memIterator) Valid() bool { + return mi.valid +} + +func (mi *memIterator) Next() { + mi.assertValid() + + if mi.ascending { + mi.valid = mi.iter.Next() + } else { + mi.valid = mi.iter.Prev() + } + + if mi.valid { + mi.valid = mi.keyInRange(mi.Key()) + } +} + +func (mi *memIterator) keyInRange(key []byte) bool { + if mi.ascending && mi.end != nil && bytes.Compare(key, mi.end) >= 0 { + return false + } + if !mi.ascending && mi.start != nil && bytes.Compare(key, mi.start) < 0 { + return false + } + return true +} + +func (mi *memIterator) Key() []byte { + return mi.iter.Item().key +} + +func (mi *memIterator) Value() []byte { + return mi.iter.Item().value +} + +func (mi *memIterator) assertValid() { + if err := mi.Error(); err != nil { + panic(err) + } +} diff --git a/cosmos-sdk-store/cachekv/internal/mergeiterator.go b/cosmos-sdk-store/cachekv/internal/mergeiterator.go new file mode 100755 index 000000000..58e9497b3 --- /dev/null +++ b/cosmos-sdk-store/cachekv/internal/mergeiterator.go @@ -0,0 +1,235 @@ +package internal + +import ( + "bytes" + "errors" + + "cosmossdk.io/store/types" +) + +// cacheMergeIterator merges a parent Iterator and a cache Iterator. +// The cache iterator may return nil keys to signal that an item +// had been deleted (but not deleted in the parent). +// If the cache iterator has the same key as the parent, the +// cache shadows (overrides) the parent. +// +// TODO: Optimize by memoizing. +type cacheMergeIterator struct { + parent types.Iterator + cache types.Iterator + ascending bool + + valid bool +} + +var _ types.Iterator = (*cacheMergeIterator)(nil) + +func NewCacheMergeIterator(parent, cache types.Iterator, ascending bool) types.Iterator { + iter := &cacheMergeIterator{ + parent: parent, + cache: cache, + ascending: ascending, + } + + iter.valid = iter.skipUntilExistsOrInvalid() + return iter +} + +// Domain implements Iterator. +// Returns parent domain because cache and parent domains are the same. +func (iter *cacheMergeIterator) Domain() (start, end []byte) { + return iter.parent.Domain() +} + +// Valid implements Iterator. +func (iter *cacheMergeIterator) Valid() bool { + return iter.valid +} + +// Next implements Iterator +func (iter *cacheMergeIterator) Next() { + iter.assertValid() + + switch { + case !iter.parent.Valid(): + // If parent is invalid, get the next cache item. + iter.cache.Next() + case !iter.cache.Valid(): + // If cache is invalid, get the next parent item. + iter.parent.Next() + default: + // Both are valid. Compare keys. + keyP, keyC := iter.parent.Key(), iter.cache.Key() + switch iter.compare(keyP, keyC) { + case -1: // parent < cache + iter.parent.Next() + case 0: // parent == cache + iter.parent.Next() + iter.cache.Next() + case 1: // parent > cache + iter.cache.Next() + } + } + iter.valid = iter.skipUntilExistsOrInvalid() +} + +// Key implements Iterator +func (iter *cacheMergeIterator) Key() []byte { + iter.assertValid() + + // If parent is invalid, get the cache key. + if !iter.parent.Valid() { + return iter.cache.Key() + } + + // If cache is invalid, get the parent key. + if !iter.cache.Valid() { + return iter.parent.Key() + } + + // Both are valid. Compare keys. + keyP, keyC := iter.parent.Key(), iter.cache.Key() + + cmp := iter.compare(keyP, keyC) + switch cmp { + case -1: // parent < cache + return keyP + case 0: // parent == cache + return keyP + case 1: // parent > cache + return keyC + default: + panic("invalid compare result") + } +} + +// Value implements Iterator +func (iter *cacheMergeIterator) Value() []byte { + iter.assertValid() + + // If parent is invalid, get the cache value. + if !iter.parent.Valid() { + return iter.cache.Value() + } + + // If cache is invalid, get the parent value. + if !iter.cache.Valid() { + return iter.parent.Value() + } + + // Both are valid. Compare keys. + keyP, keyC := iter.parent.Key(), iter.cache.Key() + + cmp := iter.compare(keyP, keyC) + switch cmp { + case -1: // parent < cache + return iter.parent.Value() + case 0: // parent == cache + return iter.cache.Value() + case 1: // parent > cache + return iter.cache.Value() + default: + panic("invalid comparison result") + } +} + +// Close implements Iterator +func (iter *cacheMergeIterator) Close() error { + err1 := iter.cache.Close() + if err := iter.parent.Close(); err != nil { + return err + } + + return err1 +} + +// Error returns an error if the cacheMergeIterator is invalid defined by the +// Valid method. +func (iter *cacheMergeIterator) Error() error { + if !iter.Valid() { + return errors.New("invalid cacheMergeIterator") + } + + return nil +} + +// If not valid, panics. +// NOTE: May have side-effect of iterating over cache. +func (iter *cacheMergeIterator) assertValid() { + if err := iter.Error(); err != nil { + panic(err) + } +} + +// Like bytes.Compare but opposite if not ascending. +func (iter *cacheMergeIterator) compare(a, b []byte) int { + if iter.ascending { + return bytes.Compare(a, b) + } + + return bytes.Compare(a, b) * -1 +} + +// Skip all delete-items from the cache w/ `key < until`. After this function, +// current cache item is a non-delete-item, or `until <= key`. +// If the current cache item is not a delete item, does nothing. +// If `until` is nil, there is no limit, and cache may end up invalid. +// CONTRACT: cache is valid. +func (iter *cacheMergeIterator) skipCacheDeletes(until []byte) { + for iter.cache.Valid() && + iter.cache.Value() == nil && + (until == nil || iter.compare(iter.cache.Key(), until) < 0) { + iter.cache.Next() + } +} + +// Fast forwards cache (or parent+cache in case of deleted items) until current +// item exists, or until iterator becomes invalid. +// Returns whether the iterator is valid. +func (iter *cacheMergeIterator) skipUntilExistsOrInvalid() bool { + for { + // If parent is invalid, fast-forward cache. + if !iter.parent.Valid() { + iter.skipCacheDeletes(nil) + return iter.cache.Valid() + } + // Parent is valid. + + if !iter.cache.Valid() { + return true + } + // Parent is valid, cache is valid. + + // Compare parent and cache. + keyP := iter.parent.Key() + keyC := iter.cache.Key() + + switch iter.compare(keyP, keyC) { + case -1: // parent < cache. + return true + + case 0: // parent == cache. + // Skip over if cache item is a delete. + valueC := iter.cache.Value() + if valueC == nil { + iter.parent.Next() + iter.cache.Next() + + continue + } + // Cache is not a delete. + + return true // cache exists. + case 1: // cache < parent + // Skip over if cache item is a delete. + valueC := iter.cache.Value() + if valueC == nil { + iter.skipCacheDeletes(keyP) + continue + } + // Cache is not a delete. + + return true // cache exists. + } + } +} diff --git a/cosmos-sdk-store/cachekv/search_benchmark_test.go b/cosmos-sdk-store/cachekv/search_benchmark_test.go new file mode 100755 index 000000000..ecdc86a8e --- /dev/null +++ b/cosmos-sdk-store/cachekv/search_benchmark_test.go @@ -0,0 +1,44 @@ +package cachekv + +import ( + "strconv" + "testing" + + "cosmossdk.io/store/cachekv/internal" +) + +func BenchmarkLargeUnsortedMisses(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + store := generateStore() + b.StartTimer() + + for k := 0; k < 10000; k++ { + // cache has A + Z values + // these are within range, but match nothing + store.dirtyItems([]byte("B1"), []byte("B2")) + } + } +} + +func generateStore() *Store { + cache := map[string]*cValue{} + unsorted := map[string]struct{}{} + for i := 0; i < 5000; i++ { + key := "A" + strconv.Itoa(i) + unsorted[key] = struct{}{} + cache[key] = &cValue{} + } + + for i := 0; i < 5000; i++ { + key := "Z" + strconv.Itoa(i) + unsorted[key] = struct{}{} + cache[key] = &cValue{} + } + + return &Store{ + cache: cache, + unsortedCache: unsorted, + sortedCache: internal.NewBTree(), + } +} diff --git a/cosmos-sdk-store/cachekv/search_test.go b/cosmos-sdk-store/cachekv/search_test.go new file mode 100755 index 000000000..41321c076 --- /dev/null +++ b/cosmos-sdk-store/cachekv/search_test.go @@ -0,0 +1,141 @@ +package cachekv + +import "testing" + +func TestFindStartIndex(t *testing.T) { + tests := []struct { + name string + sortedL []string + query string + want int + }{ + { + name: "non-existent value", + sortedL: []string{"a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, + query: "o", + want: 8, + }, + { + name: "dupes start at index 0", + sortedL: []string{"a", "a", "a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, + query: "a", + want: 0, + }, + { + name: "dupes start at non-index 0", + sortedL: []string{"a", "c", "c", "c", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, + query: "c", + want: 1, + }, + { + name: "at end", + sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z"}, + query: "z", + want: 7, + }, + { + name: "dupes at end", + sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z", "z", "z", "z"}, + query: "z", + want: 7, + }, + { + name: "entirely dupes", + sortedL: []string{"z", "z", "z", "z", "z"}, + query: "z", + want: 0, + }, + { + name: "non-existent but within >=start", + sortedL: []string{"z", "z", "z", "z", "z"}, + query: "p", + want: 0, + }, + { + name: "non-existent and out of range", + sortedL: []string{"d", "e", "f", "g", "h"}, + query: "z", + want: -1, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + body := tt.sortedL + got := findStartIndex(body, tt.query) + if got != tt.want { + t.Fatalf("Got: %d, want: %d", got, tt.want) + } + }) + } +} + +func TestFindEndIndex(t *testing.T) { + tests := []struct { + name string + sortedL []string + query string + want int + }{ + { + name: "non-existent value", + sortedL: []string{"a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, + query: "o", + want: 7, + }, + { + name: "dupes start at index 0", + sortedL: []string{"a", "a", "a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, + query: "a", + want: 0, + }, + { + name: "dupes start at non-index 0", + sortedL: []string{"a", "c", "c", "c", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, + query: "c", + want: 1, + }, + { + name: "at end", + sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z"}, + query: "z", + want: 7, + }, + { + name: "dupes at end", + sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z", "z", "z", "z"}, + query: "z", + want: 7, + }, + { + name: "entirely dupes", + sortedL: []string{"z", "z", "z", "z", "z"}, + query: "z", + want: 0, + }, + { + name: "non-existent and out of range", + sortedL: []string{"z", "z", "z", "z", "z"}, + query: "p", + want: -1, + }, + { + name: "non-existent and out of range", + sortedL: []string{"d", "e", "f", "g", "h"}, + query: "z", + want: 4, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + body := tt.sortedL + got := findEndIndex(body, tt.query) + if got != tt.want { + t.Fatalf("Got: %d, want: %d", got, tt.want) + } + }) + } +} diff --git a/cosmos-sdk-store/cachekv/store.go b/cosmos-sdk-store/cachekv/store.go new file mode 100755 index 000000000..08cfc2b32 --- /dev/null +++ b/cosmos-sdk-store/cachekv/store.go @@ -0,0 +1,408 @@ +package cachekv + +import ( + "bytes" + "io" + "sort" + "sync" + + dbm "github.com/cosmos/cosmos-db" + + "cosmossdk.io/math" + "cosmossdk.io/store/cachekv/internal" + "cosmossdk.io/store/internal/conv" + "cosmossdk.io/store/internal/kv" + "cosmossdk.io/store/tracekv" + "cosmossdk.io/store/types" +) + +// cValue represents a cached value. +// If dirty is true, it indicates the cached value is different from the underlying value. +type cValue struct { + value []byte + dirty bool +} + +// Store wraps an in-memory cache around an underlying types.KVStore. +type Store struct { + mtx sync.Mutex + cache map[string]*cValue + unsortedCache map[string]struct{} + sortedCache internal.BTree // always ascending sorted + parent types.KVStore +} + +var _ types.CacheKVStore = (*Store)(nil) + +// NewStore creates a new Store object +func NewStore(parent types.KVStore) *Store { + return &Store{ + cache: make(map[string]*cValue), + unsortedCache: make(map[string]struct{}), + sortedCache: internal.NewBTree(), + parent: parent, + } +} + +// GetStoreType implements Store. +func (store *Store) GetStoreType() types.StoreType { + return store.parent.GetStoreType() +} + +// Get implements types.KVStore. +func (store *Store) Get(key []byte) (value []byte) { + store.mtx.Lock() + defer store.mtx.Unlock() + + types.AssertValidKey(key) + + cacheValue, ok := store.cache[conv.UnsafeBytesToStr(key)] + if !ok { + value = store.parent.Get(key) + store.setCacheValue(key, value, false) + } else { + value = cacheValue.value + } + + return value +} + +// Set implements types.KVStore. +func (store *Store) Set(key, value []byte) { + types.AssertValidKey(key) + types.AssertValidValue(value) + + store.mtx.Lock() + defer store.mtx.Unlock() + store.setCacheValue(key, value, true) +} + +// Has implements types.KVStore. +func (store *Store) Has(key []byte) bool { + value := store.Get(key) + return value != nil +} + +// Delete implements types.KVStore. +func (store *Store) Delete(key []byte) { + types.AssertValidKey(key) + + store.mtx.Lock() + defer store.mtx.Unlock() + + store.setCacheValue(key, nil, true) +} + +func (store *Store) resetCaches() { + if len(store.cache) > 100_000 { + // Cache is too large. We likely did something linear time + // (e.g. Epoch block, Genesis block, etc). Free the old caches from memory, and let them get re-allocated. + // TODO: In a future CacheKV redesign, such linear workloads should get into a different cache instantiation. + // 100_000 is arbitrarily chosen as it solved Osmosis' InitGenesis RAM problem. + store.cache = make(map[string]*cValue) + store.unsortedCache = make(map[string]struct{}) + } else { + // Clear the cache using the map clearing idiom + // and not allocating fresh objects. + // Please see https://bencher.orijtech.com/perfclinic/mapclearing/ + for key := range store.cache { + delete(store.cache, key) + } + for key := range store.unsortedCache { + delete(store.unsortedCache, key) + } + } + store.sortedCache = internal.NewBTree() +} + +// Implements Cachetypes.KVStore. +func (store *Store) Write() { + store.mtx.Lock() + defer store.mtx.Unlock() + + if len(store.cache) == 0 && len(store.unsortedCache) == 0 { + store.sortedCache = internal.NewBTree() + return + } + + type cEntry struct { + key string + val *cValue + } + + // We need a copy of all of the keys. + // Not the best. To reduce RAM pressure, we copy the values as well + // and clear out the old caches right after the copy. + sortedCache := make([]cEntry, 0, len(store.cache)) + + for key, dbValue := range store.cache { + if dbValue.dirty { + sortedCache = append(sortedCache, cEntry{key, dbValue}) + } + } + store.resetCaches() + sort.Slice(sortedCache, func(i, j int) bool { + return sortedCache[i].key < sortedCache[j].key + }) + + // TODO: Consider allowing usage of Batch, which would allow the write to + // at least happen atomically. + for _, obj := range sortedCache { + // We use []byte(key) instead of conv.UnsafeStrToBytes because we cannot + // be sure if the underlying store might do a save with the byteslice or + // not. Once we get confirmation that .Delete is guaranteed not to + // save the byteslice, then we can assume only a read-only copy is sufficient. + if obj.val.value != nil { + // It already exists in the parent, hence update it. + store.parent.Set([]byte(obj.key), obj.val.value) + } else { + store.parent.Delete([]byte(obj.key)) + } + } +} + +// CacheWrap implements CacheWrapper. +func (store *Store) CacheWrap() types.CacheWrap { + return NewStore(store) +} + +// CacheWrapWithTrace implements the CacheWrapper interface. +func (store *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { + return NewStore(tracekv.NewStore(store, w, tc)) +} + +//---------------------------------------- +// Iteration + +// Iterator implements types.KVStore. +func (store *Store) Iterator(start, end []byte) types.Iterator { + return store.iterator(start, end, true) +} + +// ReverseIterator implements types.KVStore. +func (store *Store) ReverseIterator(start, end []byte) types.Iterator { + return store.iterator(start, end, false) +} + +func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator { + store.mtx.Lock() + defer store.mtx.Unlock() + + store.dirtyItems(start, end) + isoSortedCache := store.sortedCache.Copy() + + var ( + err error + parent, cache types.Iterator + ) + + if ascending { + parent = store.parent.Iterator(start, end) + cache, err = isoSortedCache.Iterator(start, end) + } else { + parent = store.parent.ReverseIterator(start, end) + cache, err = isoSortedCache.ReverseIterator(start, end) + } + if err != nil { + panic(err) + } + + return internal.NewCacheMergeIterator(parent, cache, ascending) +} + +func findStartIndex(strL []string, startQ string) int { + // Modified binary search to find the very first element in >=startQ. + if len(strL) == 0 { + return -1 + } + + var left, right, mid int + right = len(strL) - 1 + for left <= right { + mid = (left + right) >> 1 + midStr := strL[mid] + if midStr == startQ { + // Handle condition where there might be multiple values equal to startQ. + // We are looking for the very first value < midStL, that i+1 will be the first + // element >= midStr. + for i := mid - 1; i >= 0; i-- { + if strL[i] != midStr { + return i + 1 + } + } + return 0 + } + if midStr < startQ { + left = mid + 1 + } else { // midStrL > startQ + right = mid - 1 + } + } + if left >= 0 && left < len(strL) && strL[left] >= startQ { + return left + } + return -1 +} + +func findEndIndex(strL []string, endQ string) int { + if len(strL) == 0 { + return -1 + } + + // Modified binary search to find the very first element > 1 + midStr := strL[mid] + if midStr == endQ { + // Handle condition where there might be multiple values equal to startQ. + // We are looking for the very first value < midStL, that i+1 will be the first + // element >= midStr. + for i := mid - 1; i >= 0; i-- { + if strL[i] < midStr { + return i + 1 + } + } + return 0 + } + if midStr < endQ { + left = mid + 1 + } else { // midStrL > startQ + right = mid - 1 + } + } + + // Binary search failed, now let's find a value less than endQ. + for i := right; i >= 0; i-- { + if strL[i] < endQ { + return i + } + } + + return -1 +} + +type sortState int + +const ( + stateUnsorted sortState = iota + stateAlreadySorted +) + +const minSortSize = 1024 + +// Constructs a slice of dirty items, to use w/ memIterator. +func (store *Store) dirtyItems(start, end []byte) { + startStr, endStr := conv.UnsafeBytesToStr(start), conv.UnsafeBytesToStr(end) + if end != nil && startStr > endStr { + // Nothing to do here. + return + } + + n := len(store.unsortedCache) + unsorted := make([]*kv.Pair, 0) + // If the unsortedCache is too big, its costs too much to determine + // whats in the subset we are concerned about. + // If you are interleaving iterator calls with writes, this can easily become an + // O(N^2) overhead. + // Even without that, too many range checks eventually becomes more expensive + // than just not having the cache. + if n < minSortSize { + for key := range store.unsortedCache { + // dbm.IsKeyInDomain is nil safe and returns true iff key is greater than start + if dbm.IsKeyInDomain(conv.UnsafeStrToBytes(key), start, end) { + cacheValue := store.cache[key] + unsorted = append(unsorted, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) + } + } + store.clearUnsortedCacheSubset(unsorted, stateUnsorted) + return + } + + // Otherwise it is large so perform a modified binary search to find + // the target ranges for the keys that we should be looking for. + strL := make([]string, 0, n) + for key := range store.unsortedCache { + strL = append(strL, key) + } + sort.Strings(strL) + + // Now find the values within the domain + // [start, end) + startIndex := findStartIndex(strL, startStr) + if startIndex < 0 { + startIndex = 0 + } + + var endIndex int + if end == nil { + endIndex = len(strL) - 1 + } else { + endIndex = findEndIndex(strL, endStr) + } + if endIndex < 0 { + endIndex = len(strL) - 1 + } + + // Since we spent cycles to sort the values, we should process and remove a reasonable amount + // ensure start to end is at least minSortSize in size + // if below minSortSize, expand it to cover additional values + // this amortizes the cost of processing elements across multiple calls + if endIndex-startIndex < minSortSize { + endIndex = math.Min(startIndex+minSortSize, len(strL)-1) + if endIndex-startIndex < minSortSize { + startIndex = math.Max(endIndex-minSortSize, 0) + } + } + + kvL := make([]*kv.Pair, 0, 1+endIndex-startIndex) + for i := startIndex; i <= endIndex; i++ { + key := strL[i] + cacheValue := store.cache[key] + kvL = append(kvL, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) + } + + // kvL was already sorted so pass it in as is. + store.clearUnsortedCacheSubset(kvL, stateAlreadySorted) +} + +func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sortState) { + n := len(store.unsortedCache) + if len(unsorted) == n { // This pattern allows the Go compiler to emit the map clearing idiom for the entire map. + for key := range store.unsortedCache { + delete(store.unsortedCache, key) + } + } else { // Otherwise, normally delete the unsorted keys from the map. + for _, kv := range unsorted { + delete(store.unsortedCache, conv.UnsafeBytesToStr(kv.Key)) + } + } + + if sortState == stateUnsorted { + sort.Slice(unsorted, func(i, j int) bool { + return bytes.Compare(unsorted[i].Key, unsorted[j].Key) < 0 + }) + } + + for _, item := range unsorted { + // sortedCache is able to store `nil` value to represent deleted items. + store.sortedCache.Set(item.Key, item.Value) + } +} + +//---------------------------------------- +// etc + +// Only entrypoint to mutate store.cache. +// A `nil` value means a deletion. +func (store *Store) setCacheValue(key, value []byte, dirty bool) { + keyStr := conv.UnsafeBytesToStr(key) + store.cache[keyStr] = &cValue{ + value: value, + dirty: dirty, + } + if dirty { + store.unsortedCache[keyStr] = struct{}{} + } +} diff --git a/cosmos-sdk-store/cachekv/store_bench_test.go b/cosmos-sdk-store/cachekv/store_bench_test.go new file mode 100755 index 000000000..8f15855e0 --- /dev/null +++ b/cosmos-sdk-store/cachekv/store_bench_test.go @@ -0,0 +1,153 @@ +package cachekv_test + +import ( + "testing" + + dbm "github.com/cosmos/cosmos-db" + + "cosmossdk.io/store/cachekv" + "cosmossdk.io/store/dbadapter" +) + +var sink interface{} + +const defaultValueSizeBz = 1 << 12 + +// This benchmark measures the time of iterator.Next() when the parent store is blank +func benchmarkBlankParentIteratorNext(b *testing.B, keysize int) { + b.Helper() + mem := dbadapter.Store{DB: dbm.NewMemDB()} + kvstore := cachekv.NewStore(mem) + // Use a singleton for value, to not waste time computing it + value := randSlice(defaultValueSizeBz) + // Use simple values for keys, pick a random start, + // and take next b.N keys sequentially after.] + startKey := randSlice(32) + + // Add 1 to avoid issues when b.N = 1 + keys := generateSequentialKeys(startKey, b.N+1) + for _, k := range keys { + kvstore.Set(k, value) + } + + b.ReportAllocs() + b.ResetTimer() + + iter := kvstore.Iterator(keys[0], keys[b.N]) + defer iter.Close() + + for ; iter.Valid(); iter.Next() { + _ = iter.Key() + // deadcode elimination stub + sink = iter + } +} + +// Benchmark setting New keys to a store, where the new keys are in sequence. +func benchmarkBlankParentAppend(b *testing.B, keysize int) { + b.Helper() + mem := dbadapter.Store{DB: dbm.NewMemDB()} + kvstore := cachekv.NewStore(mem) + + // Use a singleton for value, to not waste time computing it + value := randSlice(32) + // Use simple values for keys, pick a random start, + // and take next b.N keys sequentially after. + startKey := randSlice(32) + + keys := generateSequentialKeys(startKey, b.N) + + b.ReportAllocs() + b.ResetTimer() + + for _, k := range keys { + kvstore.Set(k, value) + } +} + +// Benchmark setting New keys to a store, where the new keys are random. +// the speed of this function does not depend on the values in the parent store +func benchmarkRandomSet(b *testing.B, keysize int) { + b.Helper() + mem := dbadapter.Store{DB: dbm.NewMemDB()} + kvstore := cachekv.NewStore(mem) + + // Use a singleton for value, to not waste time computing it + value := randSlice(defaultValueSizeBz) + // Add 1 to avoid issues when b.N = 1 + keys := generateRandomKeys(keysize, b.N+1) + + b.ReportAllocs() + b.ResetTimer() + + for _, k := range keys { + kvstore.Set(k, value) + } + + iter := kvstore.Iterator(keys[0], keys[b.N]) + defer iter.Close() + + for ; iter.Valid(); iter.Next() { + _ = iter.Key() + // deadcode elimination stub + sink = iter + } +} + +// Benchmark creating an iterator on a parent with D entries, +// that are all deleted in the cacheKV store. +// We essentially are benchmarking the cacheKV iterator creation & iteration times +// with the number of entries deleted in the parent. +func benchmarkIteratorOnParentWithManyDeletes(b *testing.B, numDeletes int) { + b.Helper() + mem := dbadapter.Store{DB: dbm.NewMemDB()} + + // Use a singleton for value, to not waste time computing it + value := randSlice(32) + // Use simple values for keys, pick a random start, + // and take next D keys sequentially after. + startKey := randSlice(32) + // Add 1 to avoid issues when numDeletes = 1 + keys := generateSequentialKeys(startKey, numDeletes+1) + // setup parent db with D keys. + for _, k := range keys { + mem.Set(k, value) + } + kvstore := cachekv.NewStore(mem) + // Delete all keys from the cache KV store. + // The keys[1:] is to keep at least one entry in parent, due to a bug in the SDK iterator design. + // Essentially the iterator will never be valid, in that it should never run. + // However, this is incompatible with the for loop structure the SDK uses, hence + // causes a panic. Thus we do keys[1:]. + for _, k := range keys[1:] { + kvstore.Delete(k) + } + + b.ReportAllocs() + b.ResetTimer() + + iter := kvstore.Iterator(keys[0], keys[numDeletes]) + defer iter.Close() + + for ; iter.Valid(); iter.Next() { + _ = iter.Key() + // deadcode elimination stub + sink = iter + } +} + +func BenchmarkBlankParentIteratorNextKeySize32(b *testing.B) { + benchmarkBlankParentIteratorNext(b, 32) +} + +func BenchmarkBlankParentAppendKeySize32(b *testing.B) { + benchmarkBlankParentAppend(b, 32) +} + +func BenchmarkSetKeySize32(b *testing.B) { + benchmarkRandomSet(b, 32) +} + +func BenchmarkIteratorOnParentWith1MDeletes(b *testing.B) { + benchmarkIteratorOnParentWithManyDeletes(b, 1_000_000) +} diff --git a/cosmos-sdk-store/cachekv/store_test.go b/cosmos-sdk-store/cachekv/store_test.go new file mode 100755 index 000000000..3c5622355 --- /dev/null +++ b/cosmos-sdk-store/cachekv/store_test.go @@ -0,0 +1,694 @@ +package cachekv_test + +import ( + "fmt" + "testing" + + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/require" + + "cosmossdk.io/math/unsafe" + "cosmossdk.io/store/cachekv" + "cosmossdk.io/store/dbadapter" + "cosmossdk.io/store/types" +) + +func newCacheKVStore() types.CacheKVStore { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + return cachekv.NewStore(mem) +} + +func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) } +func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) } + +func TestCacheKVStore(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + st := cachekv.NewStore(mem) + + require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") + + // put something in mem and in cache + mem.Set(keyFmt(1), valFmt(1)) + st.Set(keyFmt(1), valFmt(1)) + require.Equal(t, valFmt(1), st.Get(keyFmt(1))) + + // update it in cache, shoudn't change mem + st.Set(keyFmt(1), valFmt(2)) + require.Equal(t, valFmt(2), st.Get(keyFmt(1))) + require.Equal(t, valFmt(1), mem.Get(keyFmt(1))) + + // write it. should change mem + st.Write() + require.Equal(t, valFmt(2), mem.Get(keyFmt(1))) + require.Equal(t, valFmt(2), st.Get(keyFmt(1))) + + // more writes and checks + st.Write() + st.Write() + require.Equal(t, valFmt(2), mem.Get(keyFmt(1))) + require.Equal(t, valFmt(2), st.Get(keyFmt(1))) + + // make a new one, check it + st = cachekv.NewStore(mem) + require.Equal(t, valFmt(2), st.Get(keyFmt(1))) + + // make a new one and delete - should not be removed from mem + st = cachekv.NewStore(mem) + st.Delete(keyFmt(1)) + require.Empty(t, st.Get(keyFmt(1))) + require.Equal(t, mem.Get(keyFmt(1)), valFmt(2)) + + // Write. should now be removed from both + st.Write() + require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") + require.Empty(t, mem.Get(keyFmt(1)), "Expected `key1` to be empty") +} + +func TestCacheKVStoreNoNilSet(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + st := cachekv.NewStore(mem) + require.Panics(t, func() { st.Set([]byte("key"), nil) }, "setting a nil value should panic") + require.Panics(t, func() { st.Set(nil, []byte("value")) }, "setting a nil key should panic") + require.Panics(t, func() { st.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") +} + +func TestCacheKVStoreNested(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + st := cachekv.NewStore(mem) + + // set. check its there on st and not on mem. + st.Set(keyFmt(1), valFmt(1)) + require.Empty(t, mem.Get(keyFmt(1))) + require.Equal(t, valFmt(1), st.Get(keyFmt(1))) + + // make a new from st and check + st2 := cachekv.NewStore(st) + require.Equal(t, valFmt(1), st2.Get(keyFmt(1))) + + // update the value on st2, check it only effects st2 + st2.Set(keyFmt(1), valFmt(3)) + require.Equal(t, []byte(nil), mem.Get(keyFmt(1))) + require.Equal(t, valFmt(1), st.Get(keyFmt(1))) + require.Equal(t, valFmt(3), st2.Get(keyFmt(1))) + + // st2 writes to its parent, st. doesnt effect mem + st2.Write() + require.Equal(t, []byte(nil), mem.Get(keyFmt(1))) + require.Equal(t, valFmt(3), st.Get(keyFmt(1))) + + // updates mem + st.Write() + require.Equal(t, valFmt(3), mem.Get(keyFmt(1))) +} + +func TestCacheKVIteratorBounds(t *testing.T) { + st := newCacheKVStore() + + // set some items + nItems := 5 + for i := 0; i < nItems; i++ { + st.Set(keyFmt(i), valFmt(i)) + } + + // iterate over all of them + itr := st.Iterator(nil, nil) + i := 0 + for ; itr.Valid(); itr.Next() { + k, v := itr.Key(), itr.Value() + require.Equal(t, keyFmt(i), k) + require.Equal(t, valFmt(i), v) + i++ + } + require.Equal(t, nItems, i) + require.NoError(t, itr.Close()) + + // iterate over none + itr = st.Iterator(bz("money"), nil) + i = 0 + for ; itr.Valid(); itr.Next() { + i++ + } + require.Equal(t, 0, i) + require.NoError(t, itr.Close()) + + // iterate over lower + itr = st.Iterator(keyFmt(0), keyFmt(3)) + i = 0 + for ; itr.Valid(); itr.Next() { + k, v := itr.Key(), itr.Value() + require.Equal(t, keyFmt(i), k) + require.Equal(t, valFmt(i), v) + i++ + } + require.Equal(t, 3, i) + require.NoError(t, itr.Close()) + + // iterate over upper + itr = st.Iterator(keyFmt(2), keyFmt(4)) + i = 2 + for ; itr.Valid(); itr.Next() { + k, v := itr.Key(), itr.Value() + require.Equal(t, keyFmt(i), k) + require.Equal(t, valFmt(i), v) + i++ + } + require.Equal(t, 4, i) + require.NoError(t, itr.Close()) +} + +func TestCacheKVReverseIteratorBounds(t *testing.T) { + st := newCacheKVStore() + + // set some items + nItems := 5 + for i := 0; i < nItems; i++ { + st.Set(keyFmt(i), valFmt(i)) + } + + // iterate over all of them + itr := st.ReverseIterator(nil, nil) + i := 0 + for ; itr.Valid(); itr.Next() { + k, v := itr.Key(), itr.Value() + require.Equal(t, keyFmt(nItems-1-i), k) + require.Equal(t, valFmt(nItems-1-i), v) + i++ + } + require.Equal(t, nItems, i) + require.NoError(t, itr.Close()) + + // iterate over none + itr = st.ReverseIterator(bz("money"), nil) + i = 0 + for ; itr.Valid(); itr.Next() { + i++ + } + require.Equal(t, 0, i) + require.NoError(t, itr.Close()) + + // iterate over lower + end := 3 + itr = st.ReverseIterator(keyFmt(0), keyFmt(end)) + i = 0 + for ; itr.Valid(); itr.Next() { + i++ + k, v := itr.Key(), itr.Value() + require.Equal(t, keyFmt(end-i), k) + require.Equal(t, valFmt(end-i), v) + } + require.Equal(t, 3, i) + require.NoError(t, itr.Close()) + + // iterate over upper + end = 4 + itr = st.ReverseIterator(keyFmt(2), keyFmt(end)) + i = 0 + for ; itr.Valid(); itr.Next() { + i++ + k, v := itr.Key(), itr.Value() + require.Equal(t, keyFmt(end-i), k) + require.Equal(t, valFmt(end-i), v) + } + require.Equal(t, 2, i) + require.NoError(t, itr.Close()) +} + +func TestCacheKVMergeIteratorBasics(t *testing.T) { + st := newCacheKVStore() + + // set and delete an item in the cache, iterator should be empty + k, v := keyFmt(0), valFmt(0) + st.Set(k, v) + st.Delete(k) + assertIterateDomain(t, st, 0) + + // now set it and assert its there + st.Set(k, v) + assertIterateDomain(t, st, 1) + + // write it and assert its there + st.Write() + assertIterateDomain(t, st, 1) + + // remove it in cache and assert its not + st.Delete(k) + assertIterateDomain(t, st, 0) + + // write the delete and assert its not there + st.Write() + assertIterateDomain(t, st, 0) + + // add two keys and assert theyre there + k1, v1 := keyFmt(1), valFmt(1) + st.Set(k, v) + st.Set(k1, v1) + assertIterateDomain(t, st, 2) + + // write it and assert theyre there + st.Write() + assertIterateDomain(t, st, 2) + + // remove one in cache and assert its not + st.Delete(k1) + assertIterateDomain(t, st, 1) + + // write the delete and assert its not there + st.Write() + assertIterateDomain(t, st, 1) + + // delete the other key in cache and asserts its empty + st.Delete(k) + assertIterateDomain(t, st, 0) +} + +func TestCacheKVMergeIteratorDeleteLast(t *testing.T) { + st := newCacheKVStore() + + // set some items and write them + nItems := 5 + for i := 0; i < nItems; i++ { + st.Set(keyFmt(i), valFmt(i)) + } + st.Write() + + // set some more items and leave dirty + for i := nItems; i < nItems*2; i++ { + st.Set(keyFmt(i), valFmt(i)) + } + + // iterate over all of them + assertIterateDomain(t, st, nItems*2) + + // delete them all + for i := 0; i < nItems*2; i++ { + last := nItems*2 - 1 - i + st.Delete(keyFmt(last)) + assertIterateDomain(t, st, last) + } +} + +func TestCacheKVMergeIteratorDeletes(t *testing.T) { + st := newCacheKVStore() + truth := dbm.NewMemDB() + + // set some items and write them + nItems := 10 + for i := 0; i < nItems; i++ { + doOp(t, st, truth, opSet, i) + } + st.Write() + + // delete every other item, starting from 0 + for i := 0; i < nItems; i += 2 { + doOp(t, st, truth, opDel, i) + assertIterateDomainCompare(t, st, truth) + } + + // reset + st = newCacheKVStore() + truth = dbm.NewMemDB() + + // set some items and write them + for i := 0; i < nItems; i++ { + doOp(t, st, truth, opSet, i) + } + st.Write() + + // delete every other item, starting from 1 + for i := 1; i < nItems; i += 2 { + doOp(t, st, truth, opDel, i) + assertIterateDomainCompare(t, st, truth) + } +} + +func TestCacheKVMergeIteratorChunks(t *testing.T) { + st := newCacheKVStore() + + // Use the truth to check values on the merge iterator + truth := dbm.NewMemDB() + + // sets to the parent + setRange(t, st, truth, 0, 20) + setRange(t, st, truth, 40, 60) + st.Write() + + // sets to the cache + setRange(t, st, truth, 20, 40) + setRange(t, st, truth, 60, 80) + assertIterateDomainCheck(t, st, truth, []keyRange{{0, 80}}) + + // remove some parents and some cache + deleteRange(t, st, truth, 15, 25) + assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 80}}) + + // remove some parents and some cache + deleteRange(t, st, truth, 35, 45) + assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 35}, {45, 80}}) + + // write, add more to the cache, and delete some cache + st.Write() + setRange(t, st, truth, 38, 42) + deleteRange(t, st, truth, 40, 43) + assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 35}, {38, 40}, {45, 80}}) +} + +func TestCacheKVMergeIteratorDomain(t *testing.T) { + st := newCacheKVStore() + + itr := st.Iterator(nil, nil) + start, end := itr.Domain() + require.Equal(t, start, end) + require.NoError(t, itr.Close()) + + itr = st.Iterator(keyFmt(40), keyFmt(60)) + start, end = itr.Domain() + require.Equal(t, keyFmt(40), start) + require.Equal(t, keyFmt(60), end) + require.NoError(t, itr.Close()) + + start, end = st.ReverseIterator(keyFmt(0), keyFmt(80)).Domain() + require.Equal(t, keyFmt(0), start) + require.Equal(t, keyFmt(80), end) +} + +func TestCacheKVMergeIteratorRandom(t *testing.T) { + st := newCacheKVStore() + truth := dbm.NewMemDB() + + start, end := 25, 975 + max := 1000 + setRange(t, st, truth, start, end) + + // do an op, test the iterator + for i := 0; i < 2000; i++ { + doRandomOp(t, st, truth, max) + assertIterateDomainCompare(t, st, truth) + } +} + +func TestNilEndIterator(t *testing.T) { + const SIZE = 3000 + + tests := []struct { + name string + write bool + startIndex int + end []byte + }{ + {name: "write=false, end=nil", write: false, end: nil, startIndex: 1000}, + {name: "write=false, end=nil; full key scan", write: false, end: nil, startIndex: 2000}, + {name: "write=true, end=nil", write: true, end: nil, startIndex: 1000}, + {name: "write=false, end=non-nil", write: false, end: keyFmt(3000), startIndex: 1000}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + st := newCacheKVStore() + + for i := 0; i < SIZE; i++ { + kstr := keyFmt(i) + st.Set(kstr, valFmt(i)) + } + + if tt.write { + st.Write() + } + + itr := st.Iterator(keyFmt(tt.startIndex), tt.end) + i := tt.startIndex + j := 0 + for itr.Valid() { + require.Equal(t, keyFmt(i), itr.Key()) + require.Equal(t, valFmt(i), itr.Value()) + itr.Next() + i++ + j++ + } + + require.Equal(t, SIZE-tt.startIndex, j) + require.NoError(t, itr.Close()) + }) + } +} + +// TestIteratorDeadlock demonstrate the deadlock issue in cache store. +func TestIteratorDeadlock(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + store := cachekv.NewStore(mem) + // the channel buffer is 64 and received once, so put at least 66 elements. + for i := 0; i < 66; i++ { + store.Set([]byte(fmt.Sprintf("key%d", i)), []byte{1}) + } + it := store.Iterator(nil, nil) + defer it.Close() + store.Set([]byte("key20"), []byte{1}) + // it'll be blocked here with previous version, or enable lock on btree. + it2 := store.Iterator(nil, nil) + defer it2.Close() +} + +//------------------------------------------------------------------------------------------- +// do some random ops + +const ( + opSet = 0 + opSetRange = 1 + opDel = 2 + opDelRange = 3 + opWrite = 4 + + totalOps = 5 // number of possible operations +) + +func randInt(n int) int { + return unsafe.NewRand().Int() % n +} + +// useful for replaying a error case if we find one +func doOp(t *testing.T, st types.CacheKVStore, truth dbm.DB, op int, args ...int) { + t.Helper() + switch op { + case opSet: + k := args[0] + st.Set(keyFmt(k), valFmt(k)) + err := truth.Set(keyFmt(k), valFmt(k)) + require.NoError(t, err) + case opSetRange: + start := args[0] + end := args[1] + setRange(t, st, truth, start, end) + case opDel: + k := args[0] + st.Delete(keyFmt(k)) + err := truth.Delete(keyFmt(k)) + require.NoError(t, err) + case opDelRange: + start := args[0] + end := args[1] + deleteRange(t, st, truth, start, end) + case opWrite: + st.Write() + } +} + +func doRandomOp(t *testing.T, st types.CacheKVStore, truth dbm.DB, maxKey int) { + t.Helper() + r := randInt(totalOps) + switch r { + case opSet: + k := randInt(maxKey) + st.Set(keyFmt(k), valFmt(k)) + err := truth.Set(keyFmt(k), valFmt(k)) + require.NoError(t, err) + case opSetRange: + start := randInt(maxKey - 2) + end := randInt(maxKey-start) + start + setRange(t, st, truth, start, end) + case opDel: + k := randInt(maxKey) + st.Delete(keyFmt(k)) + err := truth.Delete(keyFmt(k)) + require.NoError(t, err) + case opDelRange: + start := randInt(maxKey - 2) + end := randInt(maxKey-start) + start + deleteRange(t, st, truth, start, end) + case opWrite: + st.Write() + } +} + +//------------------------------------------------------------------------------------------- + +// iterate over whole domain +func assertIterateDomain(t *testing.T, st types.KVStore, expectedN int) { + t.Helper() + itr := st.Iterator(nil, nil) + i := 0 + for ; itr.Valid(); itr.Next() { + k, v := itr.Key(), itr.Value() + require.Equal(t, keyFmt(i), k) + require.Equal(t, valFmt(i), v) + i++ + } + require.Equal(t, expectedN, i) + require.NoError(t, itr.Close()) +} + +func assertIterateDomainCheck(t *testing.T, st types.KVStore, mem dbm.DB, r []keyRange) { + t.Helper() + // iterate over each and check they match the other + itr := st.Iterator(nil, nil) + itr2, err := mem.Iterator(nil, nil) // ground truth + require.NoError(t, err) + + krc := newKeyRangeCounter(r) + i := 0 + + for ; krc.valid(); krc.next() { + require.True(t, itr.Valid()) + require.True(t, itr2.Valid()) + + // check the key/val matches the ground truth + k, v := itr.Key(), itr.Value() + k2, v2 := itr2.Key(), itr2.Value() + require.Equal(t, k, k2) + require.Equal(t, v, v2) + + // check they match the counter + require.Equal(t, k, keyFmt(krc.key())) + + itr.Next() + itr2.Next() + i++ + } + + require.False(t, itr.Valid()) + require.False(t, itr2.Valid()) + require.NoError(t, itr.Close()) + require.NoError(t, itr2.Close()) +} + +func assertIterateDomainCompare(t *testing.T, st types.KVStore, mem dbm.DB) { + t.Helper() + // iterate over each and check they match the other + itr := st.Iterator(nil, nil) + itr2, err := mem.Iterator(nil, nil) // ground truth + require.NoError(t, err) + checkIterators(t, itr, itr2) + checkIterators(t, itr2, itr) + require.NoError(t, itr.Close()) + require.NoError(t, itr2.Close()) +} + +func checkIterators(t *testing.T, itr, itr2 types.Iterator) { + t.Helper() + for ; itr.Valid(); itr.Next() { + require.True(t, itr2.Valid()) + k, v := itr.Key(), itr.Value() + k2, v2 := itr2.Key(), itr2.Value() + require.Equal(t, k, k2) + require.Equal(t, v, v2) + itr2.Next() + } + require.False(t, itr.Valid()) + require.False(t, itr2.Valid()) +} + +//-------------------------------------------------------- + +func setRange(t *testing.T, st types.KVStore, mem dbm.DB, start, end int) { + t.Helper() + for i := start; i < end; i++ { + st.Set(keyFmt(i), valFmt(i)) + err := mem.Set(keyFmt(i), valFmt(i)) + require.NoError(t, err) + } +} + +func deleteRange(t *testing.T, st types.KVStore, mem dbm.DB, start, end int) { + t.Helper() + for i := start; i < end; i++ { + st.Delete(keyFmt(i)) + err := mem.Delete(keyFmt(i)) + require.NoError(t, err) + } +} + +//-------------------------------------------------------- + +type keyRange struct { + start int + end int +} + +func (kr keyRange) len() int { + return kr.end - kr.start +} + +func newKeyRangeCounter(kr []keyRange) *keyRangeCounter { + return &keyRangeCounter{keyRanges: kr} +} + +// we can iterate over this and make sure our real iterators have all the right keys +type keyRangeCounter struct { + rangeIdx int + idx int + keyRanges []keyRange +} + +func (krc *keyRangeCounter) valid() bool { + maxRangeIdx := len(krc.keyRanges) - 1 + maxRange := krc.keyRanges[maxRangeIdx] + + // if we're not in the max range, we're valid + if krc.rangeIdx <= maxRangeIdx && + krc.idx < maxRange.len() { + return true + } + + return false +} + +func (krc *keyRangeCounter) next() { + thisKeyRange := krc.keyRanges[krc.rangeIdx] + if krc.idx == thisKeyRange.len()-1 { + krc.rangeIdx++ + krc.idx = 0 + } else { + krc.idx++ + } +} + +func (krc *keyRangeCounter) key() int { + thisKeyRange := krc.keyRanges[krc.rangeIdx] + return thisKeyRange.start + krc.idx +} + +//-------------------------------------------------------- + +func bz(s string) []byte { return []byte(s) } + +func BenchmarkCacheKVStoreGetNoKeyFound(b *testing.B) { + b.ReportAllocs() + st := newCacheKVStore() + b.ResetTimer() + // assumes b.N < 2**24 + for i := 0; i < b.N; i++ { + st.Get([]byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)}) + } +} + +func BenchmarkCacheKVStoreGetKeyFound(b *testing.B) { + b.ReportAllocs() + st := newCacheKVStore() + for i := 0; i < b.N; i++ { + arr := []byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)} + st.Set(arr, arr) + } + b.ResetTimer() + // assumes b.N < 2**24 + for i := 0; i < b.N; i++ { + st.Get([]byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)}) + } +} diff --git a/cosmos-sdk-store/cachemulti/store.go b/cosmos-sdk-store/cachemulti/store.go new file mode 100755 index 000000000..696911370 --- /dev/null +++ b/cosmos-sdk-store/cachemulti/store.go @@ -0,0 +1,170 @@ +package cachemulti + +import ( + "fmt" + "io" + + dbm "github.com/cosmos/cosmos-db" + + "cosmossdk.io/store/cachekv" + "cosmossdk.io/store/dbadapter" + "cosmossdk.io/store/tracekv" + "cosmossdk.io/store/types" +) + +// storeNameCtxKey is the TraceContext metadata key that identifies +// the store which emitted a given trace. +const storeNameCtxKey = "store_name" + +//---------------------------------------- +// Store + +// Store holds many branched stores. +// Implements MultiStore. +// NOTE: a Store (and MultiStores in general) should never expose the +// keys for the substores. +type Store struct { + db types.CacheKVStore + stores map[types.StoreKey]types.CacheWrap + keys map[string]types.StoreKey + + traceWriter io.Writer + traceContext types.TraceContext +} + +var _ types.CacheMultiStore = Store{} + +// NewFromKVStore creates a new Store object from a mapping of store keys to +// CacheWrapper objects and a KVStore as the database. Each CacheWrapper store +// is a branched store. +func NewFromKVStore( + store types.KVStore, stores map[types.StoreKey]types.CacheWrapper, + keys map[string]types.StoreKey, traceWriter io.Writer, traceContext types.TraceContext, +) Store { + cms := Store{ + db: cachekv.NewStore(store), + stores: make(map[types.StoreKey]types.CacheWrap, len(stores)), + keys: keys, + traceWriter: traceWriter, + traceContext: traceContext, + } + + for key, store := range stores { + if cms.TracingEnabled() { + tctx := cms.traceContext.Clone().Merge(types.TraceContext{ + storeNameCtxKey: key.Name(), + }) + + store = tracekv.NewStore(store.(types.KVStore), cms.traceWriter, tctx) + } + cms.stores[key] = cachekv.NewStore(store.(types.KVStore)) + } + + return cms +} + +// NewStore creates a new Store object from a mapping of store keys to +// CacheWrapper objects. Each CacheWrapper store is a branched store. +func NewStore( + db dbm.DB, stores map[types.StoreKey]types.CacheWrapper, keys map[string]types.StoreKey, + traceWriter io.Writer, traceContext types.TraceContext, +) Store { + return NewFromKVStore(dbadapter.Store{DB: db}, stores, keys, traceWriter, traceContext) +} + +func newCacheMultiStoreFromCMS(cms Store) Store { + stores := make(map[types.StoreKey]types.CacheWrapper) + for k, v := range cms.stores { + stores[k] = v + } + + return NewFromKVStore(cms.db, stores, nil, cms.traceWriter, cms.traceContext) +} + +// SetTracer sets the tracer for the MultiStore that the underlying +// stores will utilize to trace operations. A MultiStore is returned. +func (cms Store) SetTracer(w io.Writer) types.MultiStore { + cms.traceWriter = w + return cms +} + +// SetTracingContext updates the tracing context for the MultiStore by merging +// the given context with the existing context by key. Any existing keys will +// be overwritten. It is implied that the caller should update the context when +// necessary between tracing operations. It returns a modified MultiStore. +func (cms Store) SetTracingContext(tc types.TraceContext) types.MultiStore { + if cms.traceContext != nil { + for k, v := range tc { + cms.traceContext[k] = v + } + } else { + cms.traceContext = tc + } + + return cms +} + +// TracingEnabled returns if tracing is enabled for the MultiStore. +func (cms Store) TracingEnabled() bool { + return cms.traceWriter != nil +} + +// LatestVersion returns the branch version of the store +func (cms Store) LatestVersion() int64 { + panic("cannot get latest version from branch cached multi-store") +} + +// GetStoreType returns the type of the store. +func (cms Store) GetStoreType() types.StoreType { + return types.StoreTypeMulti +} + +// Write calls Write on each underlying store. +func (cms Store) Write() { + cms.db.Write() + for _, store := range cms.stores { + store.Write() + } +} + +// Implements CacheWrapper. +func (cms Store) CacheWrap() types.CacheWrap { + return cms.CacheMultiStore().(types.CacheWrap) +} + +// CacheWrapWithTrace implements the CacheWrapper interface. +func (cms Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { + return cms.CacheWrap() +} + +// Implements MultiStore. +func (cms Store) CacheMultiStore() types.CacheMultiStore { + return newCacheMultiStoreFromCMS(cms) +} + +// CacheMultiStoreWithVersion implements the MultiStore interface. It will panic +// as an already cached multi-store cannot load previous versions. +// +// TODO: The store implementation can possibly be modified to support this as it +// seems safe to load previous versions (heights). +func (cms Store) CacheMultiStoreWithVersion(_ int64) (types.CacheMultiStore, error) { + panic("cannot branch cached multi-store with a version") +} + +// GetStore returns an underlying Store by key. +func (cms Store) GetStore(key types.StoreKey) types.Store { + s := cms.stores[key] + if key == nil || s == nil { + panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key)) + } + return s.(types.Store) +} + +// GetKVStore returns an underlying KVStore by key. +func (cms Store) GetKVStore(key types.StoreKey) types.KVStore { + store := cms.stores[key] + if key == nil || store == nil { + panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key)) + } + return store.(types.KVStore) +} diff --git a/cosmos-sdk-store/cachemulti/store_test.go b/cosmos-sdk-store/cachemulti/store_test.go new file mode 100755 index 000000000..0ea7785bf --- /dev/null +++ b/cosmos-sdk-store/cachemulti/store_test.go @@ -0,0 +1,24 @@ +package cachemulti + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "cosmossdk.io/store/types" +) + +func TestStoreGetKVStore(t *testing.T) { + require := require.New(t) + + s := Store{stores: map[types.StoreKey]types.CacheWrap{}} + key := types.NewKVStoreKey("abc") + errMsg := fmt.Sprintf("kv store with key %v has not been registered in stores", key) + + require.PanicsWithValue(errMsg, + func() { s.GetStore(key) }) + + require.PanicsWithValue(errMsg, + func() { s.GetKVStore(key) }) +} diff --git a/cosmos-sdk-store/dbadapter/store.go b/cosmos-sdk-store/dbadapter/store.go new file mode 100755 index 000000000..013e26df2 --- /dev/null +++ b/cosmos-sdk-store/dbadapter/store.go @@ -0,0 +1,90 @@ +package dbadapter + +import ( + "io" + + dbm "github.com/cosmos/cosmos-db" + + "cosmossdk.io/store/cachekv" + "cosmossdk.io/store/tracekv" + "cosmossdk.io/store/types" +) + +// Wrapper type for dbm.Db with implementation of KVStore +type Store struct { + dbm.DB +} + +// Get wraps the underlying DB's Get method panicing on error. +func (dsa Store) Get(key []byte) []byte { + v, err := dsa.DB.Get(key) + if err != nil { + panic(err) + } + + return v +} + +// Has wraps the underlying DB's Has method panicing on error. +func (dsa Store) Has(key []byte) bool { + ok, err := dsa.DB.Has(key) + if err != nil { + panic(err) + } + + return ok +} + +// Set wraps the underlying DB's Set method panicing on error. +func (dsa Store) Set(key, value []byte) { + types.AssertValidKey(key) + types.AssertValidValue(value) + if err := dsa.DB.Set(key, value); err != nil { + panic(err) + } +} + +// Delete wraps the underlying DB's Delete method panicing on error. +func (dsa Store) Delete(key []byte) { + if err := dsa.DB.Delete(key); err != nil { + panic(err) + } +} + +// Iterator wraps the underlying DB's Iterator method panicing on error. +func (dsa Store) Iterator(start, end []byte) types.Iterator { + iter, err := dsa.DB.Iterator(start, end) + if err != nil { + panic(err) + } + + return iter +} + +// ReverseIterator wraps the underlying DB's ReverseIterator method panicing on error. +func (dsa Store) ReverseIterator(start, end []byte) types.Iterator { + iter, err := dsa.DB.ReverseIterator(start, end) + if err != nil { + panic(err) + } + + return iter +} + +// GetStoreType returns the type of the store. +func (Store) GetStoreType() types.StoreType { + return types.StoreTypeDB +} + +// CacheWrap branches the underlying store. +func (dsa Store) CacheWrap() types.CacheWrap { + return cachekv.NewStore(dsa) +} + +// CacheWrapWithTrace implements KVStore. +func (dsa Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { + return cachekv.NewStore(tracekv.NewStore(dsa, w, tc)) +} + +// dbm.DB implements KVStore so we can CacheKVStore it. +var _ types.KVStore = Store{} diff --git a/cosmos-sdk-store/dbadapter/store_test.go b/cosmos-sdk-store/dbadapter/store_test.go new file mode 100755 index 000000000..9685887f9 --- /dev/null +++ b/cosmos-sdk-store/dbadapter/store_test.go @@ -0,0 +1,86 @@ +package dbadapter_test + +import ( + "bytes" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "cosmossdk.io/store/cachekv" + "cosmossdk.io/store/dbadapter" + "cosmossdk.io/store/mock" + "cosmossdk.io/store/types" +) + +var errFoo = errors.New("dummy") + +func TestAccessors(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockDB := mock.NewMockDB(mockCtrl) + store := dbadapter.Store{mockDB} + key := []byte("test") + value := []byte("testvalue") + + require.Panics(t, func() { store.Set(nil, []byte("value")) }, "setting a nil key should panic") + require.Panics(t, func() { store.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") + + require.Equal(t, types.StoreTypeDB, store.GetStoreType()) + store.GetStoreType() + + retFoo := []byte("xxx") + mockDB.EXPECT().Get(gomock.Eq(key)).Times(1).Return(retFoo, nil) + require.True(t, bytes.Equal(retFoo, store.Get(key))) + + mockDB.EXPECT().Get(gomock.Eq(key)).Times(1).Return(nil, errFoo) + require.Panics(t, func() { store.Get(key) }) + + mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(true, nil) + require.True(t, store.Has(key)) + + mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(false, nil) + require.False(t, store.Has(key)) + + mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(false, errFoo) + require.Panics(t, func() { store.Has(key) }) + + mockDB.EXPECT().Set(gomock.Eq(key), gomock.Eq(value)).Times(1).Return(nil) + require.NotPanics(t, func() { store.Set(key, value) }) + + mockDB.EXPECT().Set(gomock.Eq(key), gomock.Eq(value)).Times(1).Return(errFoo) + require.Panics(t, func() { store.Set(key, value) }) + + mockDB.EXPECT().Delete(gomock.Eq(key)).Times(1).Return(nil) + require.NotPanics(t, func() { store.Delete(key) }) + + mockDB.EXPECT().Delete(gomock.Eq(key)).Times(1).Return(errFoo) + require.Panics(t, func() { store.Delete(key) }) + + start, end := []byte("start"), []byte("end") + mockDB.EXPECT().Iterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, nil) + require.NotPanics(t, func() { store.Iterator(start, end) }) + + mockDB.EXPECT().Iterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, errFoo) + require.Panics(t, func() { store.Iterator(start, end) }) + + mockDB.EXPECT().ReverseIterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, nil) + require.NotPanics(t, func() { store.ReverseIterator(start, end) }) + + mockDB.EXPECT().ReverseIterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, errFoo) + require.Panics(t, func() { store.ReverseIterator(start, end) }) +} + +func TestCacheWraps(t *testing.T) { + mockCtrl := gomock.NewController(t) + mockDB := mock.NewMockDB(mockCtrl) + store := dbadapter.Store{mockDB} + + cacheWrapper := store.CacheWrap() + require.IsType(t, &cachekv.Store{}, cacheWrapper) + + cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) + require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) +} diff --git a/cosmos-sdk-store/gaskv/store.go b/cosmos-sdk-store/gaskv/store.go new file mode 100755 index 000000000..e0f96af71 --- /dev/null +++ b/cosmos-sdk-store/gaskv/store.go @@ -0,0 +1,176 @@ +package gaskv + +import ( + "io" + + "cosmossdk.io/store/types" +) + +var _ types.KVStore = &Store{} + +// Store applies gas tracking to an underlying KVStore. It implements the +// KVStore interface. +type Store struct { + gasMeter types.GasMeter + gasConfig types.GasConfig + parent types.KVStore +} + +// NewStore returns a reference to a new GasKVStore. +func NewStore(parent types.KVStore, gasMeter types.GasMeter, gasConfig types.GasConfig) *Store { + kvs := &Store{ + gasMeter: gasMeter, + gasConfig: gasConfig, + parent: parent, + } + return kvs +} + +// Implements Store. +func (gs *Store) GetStoreType() types.StoreType { + return gs.parent.GetStoreType() +} + +// Implements KVStore. +func (gs *Store) Get(key []byte) (value []byte) { + gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostFlat, types.GasReadCostFlatDesc) + value = gs.parent.Get(key) + + // TODO overflow-safe math? + gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(len(key)), types.GasReadPerByteDesc) + gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(len(value)), types.GasReadPerByteDesc) + + return value +} + +// Implements KVStore. +func (gs *Store) Set(key, value []byte) { + types.AssertValidKey(key) + types.AssertValidValue(value) + gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostFlat, types.GasWriteCostFlatDesc) + // TODO overflow-safe math? + gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(len(key)), types.GasWritePerByteDesc) + gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(len(value)), types.GasWritePerByteDesc) + gs.parent.Set(key, value) +} + +// Implements KVStore. +func (gs *Store) Has(key []byte) bool { + gs.gasMeter.ConsumeGas(gs.gasConfig.HasCost, types.GasHasDesc) + return gs.parent.Has(key) +} + +// Implements KVStore. +func (gs *Store) Delete(key []byte) { + // charge gas to prevent certain attack vectors even though space is being freed + gs.gasMeter.ConsumeGas(gs.gasConfig.DeleteCost, types.GasDeleteDesc) + gs.parent.Delete(key) +} + +// Iterator implements the KVStore interface. It returns an iterator which +// incurs a flat gas cost for seeking to the first key/value pair and a variable +// gas cost based on the current value's length if the iterator is valid. +func (gs *Store) Iterator(start, end []byte) types.Iterator { + return gs.iterator(start, end, true) +} + +// ReverseIterator implements the KVStore interface. It returns a reverse +// iterator which incurs a flat gas cost for seeking to the first key/value pair +// and a variable gas cost based on the current value's length if the iterator +// is valid. +func (gs *Store) ReverseIterator(start, end []byte) types.Iterator { + return gs.iterator(start, end, false) +} + +// Implements KVStore. +func (gs *Store) CacheWrap() types.CacheWrap { + panic("cannot CacheWrap a GasKVStore") +} + +// CacheWrapWithTrace implements the KVStore interface. +func (gs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { + panic("cannot CacheWrapWithTrace a GasKVStore") +} + +func (gs *Store) iterator(start, end []byte, ascending bool) types.Iterator { + var parent types.Iterator + if ascending { + parent = gs.parent.Iterator(start, end) + } else { + parent = gs.parent.ReverseIterator(start, end) + } + + gi := newGasIterator(gs.gasMeter, gs.gasConfig, parent) + gi.(*gasIterator).consumeSeekGas() + + return gi +} + +type gasIterator struct { + gasMeter types.GasMeter + gasConfig types.GasConfig + parent types.Iterator +} + +func newGasIterator(gasMeter types.GasMeter, gasConfig types.GasConfig, parent types.Iterator) types.Iterator { + return &gasIterator{ + gasMeter: gasMeter, + gasConfig: gasConfig, + parent: parent, + } +} + +// Implements Iterator. +func (gi *gasIterator) Domain() (start, end []byte) { + return gi.parent.Domain() +} + +// Implements Iterator. +func (gi *gasIterator) Valid() bool { + return gi.parent.Valid() +} + +// Next implements the Iterator interface. It seeks to the next key/value pair +// in the iterator. It incurs a flat gas cost for seeking and a variable gas +// cost based on the current value's length if the iterator is valid. +func (gi *gasIterator) Next() { + gi.consumeSeekGas() + gi.parent.Next() +} + +// Key implements the Iterator interface. It returns the current key and it does +// not incur any gas cost. +func (gi *gasIterator) Key() (key []byte) { + key = gi.parent.Key() + return key +} + +// Value implements the Iterator interface. It returns the current value and it +// does not incur any gas cost. +func (gi *gasIterator) Value() (value []byte) { + value = gi.parent.Value() + return value +} + +// Implements Iterator. +func (gi *gasIterator) Close() error { + return gi.parent.Close() +} + +// Error delegates the Error call to the parent iterator. +func (gi *gasIterator) Error() error { + return gi.parent.Error() +} + +// consumeSeekGas consumes on each iteration step a flat gas cost and a variable gas cost +// based on the current value's length. +func (gi *gasIterator) consumeSeekGas() { + if gi.Valid() { + key := gi.Key() + value := gi.Value() + + gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(len(key)), types.GasValuePerByteDesc) + gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(len(value)), types.GasValuePerByteDesc) + } + gi.gasMeter.ConsumeGas(gi.gasConfig.IterNextCostFlat, types.GasIterNextCostFlatDesc) +} diff --git a/cosmos-sdk-store/gaskv/store_test.go b/cosmos-sdk-store/gaskv/store_test.go new file mode 100755 index 000000000..354832d17 --- /dev/null +++ b/cosmos-sdk-store/gaskv/store_test.go @@ -0,0 +1,120 @@ +package gaskv_test + +import ( + "fmt" + "testing" + + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/require" + + "cosmossdk.io/store/dbadapter" + "cosmossdk.io/store/gaskv" + "cosmossdk.io/store/types" +) + +func bz(s string) []byte { return []byte(s) } + +func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) } +func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) } + +func TestGasKVStoreBasic(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + meter := types.NewGasMeter(10000) + st := gaskv.NewStore(mem, meter, types.KVGasConfig()) + + require.Equal(t, types.StoreTypeDB, st.GetStoreType()) + require.Panics(t, func() { st.CacheWrap() }) + require.Panics(t, func() { st.CacheWrapWithTrace(nil, nil) }) + + require.Panics(t, func() { st.Set(nil, []byte("value")) }, "setting a nil key should panic") + require.Panics(t, func() { st.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") + + require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") + st.Set(keyFmt(1), valFmt(1)) + require.Equal(t, valFmt(1), st.Get(keyFmt(1))) + st.Delete(keyFmt(1)) + require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") + require.Equal(t, meter.GasConsumed(), types.Gas(6858)) +} + +func TestGasKVStoreIterator(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + meter := types.NewGasMeter(100000) + st := gaskv.NewStore(mem, meter, types.KVGasConfig()) + require.False(t, st.Has(keyFmt(1))) + require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") + require.Empty(t, st.Get(keyFmt(2)), "Expected `key2` to be empty") + require.Empty(t, st.Get(keyFmt(3)), "Expected `key3` to be empty") + + st.Set(keyFmt(1), valFmt(1)) + require.True(t, st.Has(keyFmt(1))) + st.Set(keyFmt(2), valFmt(2)) + require.True(t, st.Has(keyFmt(2))) + st.Set(keyFmt(3), valFmt(0)) + + iterator := st.Iterator(nil, nil) + start, end := iterator.Domain() + require.Nil(t, start) + require.Nil(t, end) + require.NoError(t, iterator.Error()) + + t.Cleanup(func() { + if err := iterator.Close(); err != nil { + t.Fatal(err) + } + }) + ka := iterator.Key() + require.Equal(t, ka, keyFmt(1)) + va := iterator.Value() + require.Equal(t, va, valFmt(1)) + iterator.Next() + kb := iterator.Key() + require.Equal(t, kb, keyFmt(2)) + vb := iterator.Value() + require.Equal(t, vb, valFmt(2)) + iterator.Next() + require.Equal(t, types.Gas(14565), meter.GasConsumed()) + kc := iterator.Key() + require.Equal(t, kc, keyFmt(3)) + vc := iterator.Value() + require.Equal(t, vc, valFmt(0)) + iterator.Next() + require.Equal(t, types.Gas(14667), meter.GasConsumed()) + require.False(t, iterator.Valid()) + require.Panics(t, iterator.Next) + require.Equal(t, types.Gas(14697), meter.GasConsumed()) + require.NoError(t, iterator.Error()) + + reverseIterator := st.ReverseIterator(nil, nil) + t.Cleanup(func() { + if err := reverseIterator.Close(); err != nil { + t.Fatal(err) + } + }) + require.Equal(t, reverseIterator.Key(), keyFmt(3)) + reverseIterator.Next() + require.Equal(t, reverseIterator.Key(), keyFmt(2)) + reverseIterator.Next() + require.Equal(t, reverseIterator.Key(), keyFmt(1)) + reverseIterator.Next() + require.False(t, reverseIterator.Valid()) + require.Panics(t, reverseIterator.Next) + require.Equal(t, types.Gas(15135), meter.GasConsumed()) +} + +func TestGasKVStoreOutOfGasSet(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + meter := types.NewGasMeter(0) + st := gaskv.NewStore(mem, meter, types.KVGasConfig()) + require.Panics(t, func() { st.Set(keyFmt(1), valFmt(1)) }, "Expected out-of-gas") +} + +func TestGasKVStoreOutOfGasIterator(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + meter := types.NewGasMeter(20000) + st := gaskv.NewStore(mem, meter, types.KVGasConfig()) + st.Set(keyFmt(1), valFmt(1)) + iterator := st.Iterator(nil, nil) + iterator.Next() + require.Panics(t, func() { iterator.Value() }, "Expected out-of-gas") +} diff --git a/cosmos-sdk-store/go.mod b/cosmos-sdk-store/go.mod new file mode 100755 index 000000000..f587701c2 --- /dev/null +++ b/cosmos-sdk-store/go.mod @@ -0,0 +1,78 @@ +module cosmossdk.io/store + +go 1.21 + +require ( + cosmossdk.io/errors v1.0.0 + cosmossdk.io/log v1.3.1 + cosmossdk.io/math v1.3.0 + github.com/cometbft/cometbft v0.38.6 + github.com/cosmos/cosmos-db v1.0.2 + github.com/cosmos/gogoproto v1.4.11 + github.com/cosmos/iavl v1.1.1 + github.com/cosmos/ics23/go v0.10.0 + github.com/golang/mock v1.6.0 + github.com/golang/protobuf v1.5.4 // indirect + github.com/hashicorp/go-hclog v1.5.0 + github.com/hashicorp/go-metrics v0.5.1 + github.com/hashicorp/go-plugin v1.5.2 + github.com/hashicorp/golang-lru v1.0.2 + github.com/spf13/cast v1.6.0 // indirect + github.com/stretchr/testify v1.8.4 + github.com/tidwall/btree v1.7.0 + golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f + google.golang.org/grpc v1.60.0 + google.golang.org/protobuf v1.33.0 + gotest.tools/v3 v3.5.1 +) + +require ( + github.com/DataDog/zstd v1.5.5 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cockroachdb/errors v1.11.1 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble v1.1.0 // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/emicklei/dot v1.6.1 // indirect + github.com/fatih/color v1.15.0 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/hashicorp/go-immutable-radix v1.0.0 // indirect + github.com/hashicorp/go-uuid v1.0.1 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/jhump/protoreflect v1.15.3 // indirect + github.com/klauspost/compress v1.17.7 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/linxGnu/grocksdb v1.8.12 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/petermattis/goid v0.0.0-20221215004737-a150e88a970d // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.19.0 // indirect + github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/common v0.50.0 // indirect + github.com/prometheus/procfs v0.13.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rs/zerolog v1.32.0 // indirect + github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/net v0.22.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/cosmos-sdk-store/go.sum b/cosmos-sdk-store/go.sum new file mode 100755 index 000000000..a2692063e --- /dev/null +++ b/cosmos-sdk-store/go.sum @@ -0,0 +1,377 @@ +cosmossdk.io/errors v1.0.0 h1:nxF07lmlBbB8NKQhtJ+sJm6ef5uV1XkvPXG2bUntb04= +cosmossdk.io/errors v1.0.0/go.mod h1:+hJZLuhdDE0pYN8HkOrVNwrIOYvUGnn6+4fjnJs/oV0= +cosmossdk.io/log v1.3.1 h1:UZx8nWIkfbbNEWusZqzAx3ZGvu54TZacWib3EzUYmGI= +cosmossdk.io/log v1.3.1/go.mod h1:2/dIomt8mKdk6vl3OWJcPk2be3pGOS8OQaLUM/3/tCM= +cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= +cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= +github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ= +github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= +github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= +github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v1.1.0 h1:pcFh8CdCIt2kmEpK0OIatq67Ln9uGDYY3d5XnE0LJG4= +github.com/cockroachdb/pebble v1.1.0/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/cometbft/cometbft v0.38.6 h1:QSgpCzrGWJ2KUq1qpw+FCfASRpE27T6LQbfEHscdyOk= +github.com/cometbft/cometbft v0.38.6/go.mod h1:8rSPxzUJYquCN8uuBgbUHOMg2KAwvr7CyUw+6ukO4nw= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cosmos/cosmos-db v1.0.2 h1:hwMjozuY1OlJs/uh6vddqnk9j7VamLv+0DBlbEXbAKs= +github.com/cosmos/cosmos-db v1.0.2/go.mod h1:Z8IXcFJ9PqKK6BIsVOB3QXtkKoqUOp1vRvPT39kOXEA= +github.com/cosmos/gogoproto v1.4.11 h1:LZcMHrx4FjUgrqQSWeaGC1v/TeuVFqSLa43CC6aWR2g= +github.com/cosmos/gogoproto v1.4.11/go.mod h1:/g39Mh8m17X8Q/GDEs5zYTSNaNnInBSohtaxzQnYq1Y= +github.com/cosmos/iavl v1.1.1 h1:64nTi8s3gEoGqhA8TyAWFWfz7/pg0anKzHNSc1ETc7Q= +github.com/cosmos/iavl v1.1.1/go.mod h1:jLeUvm6bGT1YutCaL2fIar/8vGUE8cPZvh/gXEWDaDM= +github.com/cosmos/ics23/go v0.10.0 h1:iXqLLgp2Lp+EdpIuwXTYIQU+AiHj9mOC2X9ab++bZDM= +github.com/cosmos/ics23/go v0.10.0/go.mod h1:ZfJSmng/TBNTBkFemHHHj5YY7VAU/MBU980F4VU1NG0= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/emicklei/dot v1.6.1 h1:ujpDlBkkwgWUY+qPId5IwapRW/xEoligRSYjioR6DFI= +github.com/emicklei/dot v1.6.1/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-metrics v0.5.1 h1:rfPwUqFU6uZXNvGl4hzjY8LEBsqFVU4si1H9/Hqck/U= +github.com/hashicorp/go-metrics v0.5.1/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE= +github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= +github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= +github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/linxGnu/grocksdb v1.8.12 h1:1/pCztQUOa3BX/1gR3jSZDoaKFpeHFvQ1XrqZpSvZVo= +github.com/linxGnu/grocksdb v1.8.12/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae h1:FatpGJD2jmJfhZiFDElaC0QhZUDQnxUeAwTGkfAHN3I= +github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q= +github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/petermattis/goid v0.0.0-20221215004737-a150e88a970d h1:htwtWgtQo8YS6JFWWi2DNgY0RwSGJ1ruMoxY6CUUclk= +github.com/petermattis/goid v0.0.0-20221215004737-a150e88a970d/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.50.0 h1:YSZE6aa9+luNa2da6/Tik0q0A5AbR+U003TItK57CPQ= +github.com/prometheus/common v0.50.0/go.mod h1:wHFBCEVWVmHMUpg7pYcOm2QUR/ocQdYSJVQJKnHc3xQ= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= +github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= +github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= +github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f h1:3CW0unweImhOzd5FmYuRsD4Y4oQFKZIjAnKbjV4WIrw= +golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= +google.golang.org/grpc v1.60.0 h1:6FQAR0kM31P6MRdeluor2w2gPaS4SVNrD/DNTxrQ15k= +google.golang.org/grpc v1.60.0/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/cosmos-sdk-store/iavl/store.go b/cosmos-sdk-store/iavl/store.go new file mode 100755 index 000000000..7bdcb1df9 --- /dev/null +++ b/cosmos-sdk-store/iavl/store.go @@ -0,0 +1,417 @@ +package iavl + +import ( + "errors" + "fmt" + "io" + + cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + dbm "github.com/cosmos/cosmos-db" + "github.com/cosmos/iavl" + ics23 "github.com/cosmos/ics23/go" + + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/log" + "cosmossdk.io/store/cachekv" + "cosmossdk.io/store/internal/kv" + "cosmossdk.io/store/metrics" + pruningtypes "cosmossdk.io/store/pruning/types" + "cosmossdk.io/store/tracekv" + "cosmossdk.io/store/types" + "cosmossdk.io/store/wrapper" +) + +const ( + DefaultIAVLCacheSize = 500000 +) + +var ( + _ types.KVStore = (*Store)(nil) + _ types.CommitStore = (*Store)(nil) + _ types.CommitKVStore = (*Store)(nil) + _ types.Queryable = (*Store)(nil) + _ types.StoreWithInitialVersion = (*Store)(nil) +) + +// Store Implements types.KVStore and CommitKVStore. +type Store struct { + tree Tree + logger log.Logger + metrics metrics.StoreMetrics +} + +// LoadStore returns an IAVL Store as a CommitKVStore. Internally, it will load the +// store's version (id) from the provided DB. An error is returned if the version +// fails to load, or if called with a positive version on an empty tree. +func LoadStore(db dbm.DB, logger log.Logger, key types.StoreKey, id types.CommitID, cacheSize int, disableFastNode bool, metrics metrics.StoreMetrics) (types.CommitKVStore, error) { + return LoadStoreWithInitialVersion(db, logger, key, id, 0, cacheSize, disableFastNode, metrics) +} + +// LoadStoreWithInitialVersion returns an IAVL Store as a CommitKVStore setting its initialVersion +// to the one given. Internally, it will load the store's version (id) from the +// provided DB. An error is returned if the version fails to load, or if called with a positive +// version on an empty tree. +func LoadStoreWithInitialVersion(db dbm.DB, logger log.Logger, key types.StoreKey, id types.CommitID, initialVersion uint64, cacheSize int, disableFastNode bool, metrics metrics.StoreMetrics) (types.CommitKVStore, error) { + tree := iavl.NewMutableTree(wrapper.NewDBWrapper(db), cacheSize, disableFastNode, logger, iavl.InitialVersionOption(initialVersion)) + + isUpgradeable, err := tree.IsUpgradeable() + if err != nil { + return nil, err + } + + if isUpgradeable && logger != nil { + logger.Info( + "Upgrading IAVL storage for faster queries + execution on live state. This may take a while", + "store_key", key.String(), + "version", initialVersion, + "commit", fmt.Sprintf("%X", id), + ) + } + + _, err = tree.LoadVersion(id.Version) + if err != nil { + return nil, err + } + + if logger != nil { + logger.Debug("Finished loading IAVL tree") + } + + return &Store{ + tree: tree, + logger: logger, + metrics: metrics, + }, nil +} + +// UnsafeNewStore returns a reference to a new IAVL Store with a given mutable +// IAVL tree reference. It should only be used for testing purposes. +// +// CONTRACT: The IAVL tree should be fully loaded. +// CONTRACT: PruningOptions passed in as argument must be the same as pruning options +// passed into iavl.MutableTree +func UnsafeNewStore(tree *iavl.MutableTree) *Store { + return &Store{ + tree: tree, + metrics: metrics.NewNoOpMetrics(), + } +} + +// GetImmutable returns a reference to a new store backed by an immutable IAVL +// tree at a specific version (height) without any pruning options. This should +// be used for querying and iteration only. If the version does not exist or has +// been pruned, an empty immutable IAVL tree will be used. +// Any mutable operations executed will result in a panic. +func (st *Store) GetImmutable(version int64) (*Store, error) { + if !st.VersionExists(version) { + return nil, errors.New("version mismatch on immutable IAVL tree; version does not exist. Version has either been pruned, or is for a future block height") + } + + iTree, err := st.tree.GetImmutable(version) + if err != nil { + return nil, err + } + + return &Store{ + tree: &immutableTree{iTree}, + metrics: st.metrics, + }, nil +} + +// Commit commits the current store state and returns a CommitID with the new +// version and hash. +func (st *Store) Commit() types.CommitID { + defer st.metrics.MeasureSince("store", "iavl", "commit") + + hash, version, err := st.tree.SaveVersion() + if err != nil { + panic(err) + } + + return types.CommitID{ + Version: version, + Hash: hash, + } +} + +// WorkingHash returns the hash of the current working tree. +func (st *Store) WorkingHash() []byte { + return st.tree.WorkingHash() +} + +// LastCommitID implements Committer. +func (st *Store) LastCommitID() types.CommitID { + return types.CommitID{ + Version: st.tree.Version(), + Hash: st.tree.Hash(), + } +} + +// SetPruning panics as pruning options should be provided at initialization +// since IAVl accepts pruning options directly. +func (st *Store) SetPruning(_ pruningtypes.PruningOptions) { + panic("cannot set pruning options on an initialized IAVL store") +} + +// SetPruning panics as pruning options should be provided at initialization +// since IAVl accepts pruning options directly. +func (st *Store) GetPruning() pruningtypes.PruningOptions { + panic("cannot get pruning options on an initialized IAVL store") +} + +// VersionExists returns whether or not a given version is stored. +func (st *Store) VersionExists(version int64) bool { + return st.tree.VersionExists(version) +} + +// GetAllVersions returns all versions in the iavl tree +func (st *Store) GetAllVersions() []int { + return st.tree.AvailableVersions() +} + +// Implements Store. +func (st *Store) GetStoreType() types.StoreType { + return types.StoreTypeIAVL +} + +// Implements Store. +func (st *Store) CacheWrap() types.CacheWrap { + return cachekv.NewStore(st) +} + +// CacheWrapWithTrace implements the Store interface. +func (st *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { + return cachekv.NewStore(tracekv.NewStore(st, w, tc)) +} + +// Implements types.KVStore. +func (st *Store) Set(key, value []byte) { + types.AssertValidKey(key) + types.AssertValidValue(value) + _, err := st.tree.Set(key, value) + if err != nil && st.logger != nil { + st.logger.Error("iavl set error", "error", err.Error()) + } +} + +// Implements types.KVStore. +func (st *Store) Get(key []byte) []byte { + defer st.metrics.MeasureSince("store", "iavl", "get") + value, err := st.tree.Get(key) + if err != nil { + panic(err) + } + return value +} + +// Implements types.KVStore. +func (st *Store) Has(key []byte) (exists bool) { + defer st.metrics.MeasureSince("store", "iavl", "has") + has, err := st.tree.Has(key) + if err != nil { + panic(err) + } + return has +} + +// Implements types.KVStore. +func (st *Store) Delete(key []byte) { + defer st.metrics.MeasureSince("store", "iavl", "delete") + _, _, err := st.tree.Remove(key) + if err != nil { + panic(err) + } +} + +// DeleteVersionsTo deletes versions upto the given version from the MutableTree. An error +// is returned if any single version is invalid or the delete fails. All writes +// happen in a single batch with a single commit. +func (st *Store) DeleteVersionsTo(version int64) error { + return st.tree.DeleteVersionsTo(version) +} + +// LoadVersionForOverwriting attempts to load a tree at a previously committed +// version. Any versions greater than targetVersion will be deleted. +func (st *Store) LoadVersionForOverwriting(targetVersion int64) error { + return st.tree.LoadVersionForOverwriting(targetVersion) +} + +// Implements types.KVStore. +func (st *Store) Iterator(start, end []byte) types.Iterator { + iterator, err := st.tree.Iterator(start, end, true) + if err != nil { + panic(err) + } + return iterator +} + +// Implements types.KVStore. +func (st *Store) ReverseIterator(start, end []byte) types.Iterator { + iterator, err := st.tree.Iterator(start, end, false) + if err != nil { + panic(err) + } + return iterator +} + +// SetInitialVersion sets the initial version of the IAVL tree. It is used when +// starting a new chain at an arbitrary height. +func (st *Store) SetInitialVersion(version int64) { + st.tree.SetInitialVersion(uint64(version)) +} + +// Exports the IAVL store at the given version, returning an iavl.Exporter for the tree. +func (st *Store) Export(version int64) (*iavl.Exporter, error) { + istore, err := st.GetImmutable(version) + if err != nil { + return nil, errorsmod.Wrapf(err, "iavl export failed for version %v", version) + } + tree, ok := istore.tree.(*immutableTree) + if !ok || tree == nil { + return nil, fmt.Errorf("iavl export failed: unable to fetch tree for version %v", version) + } + return tree.Export() +} + +// Import imports an IAVL tree at the given version, returning an iavl.Importer for importing. +func (st *Store) Import(version int64) (*iavl.Importer, error) { + tree, ok := st.tree.(*iavl.MutableTree) + if !ok { + return nil, errors.New("iavl import failed: unable to find mutable tree") + } + return tree.Import(version) +} + +// Handle gatest the latest height, if height is 0 +func getHeight(tree Tree, req *types.RequestQuery) int64 { + height := req.Height + if height == 0 { + latest := tree.Version() + if tree.VersionExists(latest - 1) { + height = latest - 1 + } else { + height = latest + } + } + return height +} + +// Query implements ABCI interface, allows queries +// +// by default we will return from (latest height -1), +// as we will have merkle proofs immediately (header height = data height + 1) +// If latest-1 is not present, use latest (which must be present) +// if you care to have the latest data to see a tx results, you must +// explicitly set the height you want to see +func (st *Store) Query(req *types.RequestQuery) (res *types.ResponseQuery, err error) { + defer st.metrics.MeasureSince("store", "iavl", "query") + + if len(req.Data) == 0 { + return &types.ResponseQuery{}, errorsmod.Wrap(types.ErrTxDecode, "query cannot be zero length") + } + + tree := st.tree + + // store the height we chose in the response, with 0 being changed to the + // latest height + res = &types.ResponseQuery{ + Height: getHeight(tree, req), + } + + switch req.Path { + case "/key": // get by key + key := req.Data // data holds the key bytes + + res.Key = key + if !st.VersionExists(res.Height) { + res.Log = iavl.ErrVersionDoesNotExist.Error() + break + } + + value, err := tree.GetVersioned(key, res.Height) + if err != nil { + panic(err) + } + res.Value = value + + if !req.Prove { + break + } + + // Continue to prove existence/absence of value + // Must convert store.Tree to iavl.MutableTree with given version to use in CreateProof + iTree, err := tree.GetImmutable(res.Height) + if err != nil { + // sanity check: If value for given version was retrieved, immutable tree must also be retrievable + panic(fmt.Sprintf("version exists in store but could not retrieve corresponding versioned tree in store, %s", err.Error())) + } + mtree := &iavl.MutableTree{ + ImmutableTree: iTree, + } + + // get proof from tree and convert to merkle.Proof before adding to result + res.ProofOps = getProofFromTree(mtree, req.Data, res.Value != nil) + + case "/subspace": + pairs := kv.Pairs{ + Pairs: make([]kv.Pair, 0), + } + + subspace := req.Data + res.Key = subspace + + iterator := types.KVStorePrefixIterator(st, subspace) + for ; iterator.Valid(); iterator.Next() { + pairs.Pairs = append(pairs.Pairs, kv.Pair{Key: iterator.Key(), Value: iterator.Value()}) + } + if err := iterator.Close(); err != nil { + panic(fmt.Errorf("failed to close iterator: %w", err)) + } + + bz, err := pairs.Marshal() + if err != nil { + panic(fmt.Errorf("failed to marshal KV pairs: %w", err)) + } + + res.Value = bz + + default: + return &types.ResponseQuery{}, errorsmod.Wrapf(types.ErrUnknownRequest, "unexpected query path: %v", req.Path) + } + + return res, err +} + +// TraverseStateChanges traverses the state changes between two versions and calls the given function. +func (st *Store) TraverseStateChanges(startVersion, endVersion int64, fn func(version int64, changeSet *iavl.ChangeSet) error) error { + return st.tree.TraverseStateChanges(startVersion, endVersion, fn) +} + +// Takes a MutableTree, a key, and a flag for creating existence or absence proof and returns the +// appropriate merkle.Proof. Since this must be called after querying for the value, this function should never error +// Thus, it will panic on error rather than returning it +func getProofFromTree(tree *iavl.MutableTree, key []byte, exists bool) *cmtprotocrypto.ProofOps { + var ( + commitmentProof *ics23.CommitmentProof + err error + ) + + if exists { + // value was found + commitmentProof, err = tree.GetMembershipProof(key) + if err != nil { + // sanity check: If value was found, membership proof must be creatable + panic(fmt.Sprintf("unexpected value for empty proof: %s", err.Error())) + } + } else { + // value wasn't found + commitmentProof, err = tree.GetNonMembershipProof(key) + if err != nil { + // sanity check: If value wasn't found, nonmembership proof must be creatable + panic(fmt.Sprintf("unexpected error for nonexistence proof: %s", err.Error())) + } + } + + op := types.NewIavlCommitmentOp(key, commitmentProof) + return &cmtprotocrypto.ProofOps{Ops: []cmtprotocrypto.ProofOp{op.ProofOp()}} +} diff --git a/cosmos-sdk-store/iavl/store_test.go b/cosmos-sdk-store/iavl/store_test.go new file mode 100755 index 000000000..7ad24d7fe --- /dev/null +++ b/cosmos-sdk-store/iavl/store_test.go @@ -0,0 +1,714 @@ +package iavl + +import ( + "bytes" + crand "crypto/rand" + "fmt" + "math" + "sort" + "testing" + + dbm "github.com/cosmos/cosmos-db" + "github.com/cosmos/iavl" + "github.com/stretchr/testify/require" + + "cosmossdk.io/log" + "cosmossdk.io/store/cachekv" + "cosmossdk.io/store/internal/kv" + "cosmossdk.io/store/metrics" + "cosmossdk.io/store/types" + "cosmossdk.io/store/wrapper" +) + +var ( + cacheSize = 100 + treeData = map[string]string{ + "hello": "goodbye", + "aloha": "shalom", + } + nMoreData = 0 +) + +func randBytes(numBytes int) []byte { + b := make([]byte, numBytes) + _, _ = crand.Read(b) + return b +} + +// make a tree with data from above and save it +func newAlohaTree(t *testing.T, db dbm.DB) (*iavl.MutableTree, types.CommitID) { + t.Helper() + tree := iavl.NewMutableTree(wrapper.NewDBWrapper(db), cacheSize, false, log.NewNopLogger()) + + for k, v := range treeData { + _, err := tree.Set([]byte(k), []byte(v)) + require.NoError(t, err) + } + + for i := 0; i < nMoreData; i++ { + key := randBytes(12) + value := randBytes(50) + _, err := tree.Set(key, value) + require.NoError(t, err) + } + + hash, ver, err := tree.SaveVersion() + require.Nil(t, err) + + return tree, types.CommitID{Version: ver, Hash: hash} +} + +func TestLoadStore(t *testing.T) { + db := dbm.NewMemDB() + tree, _ := newAlohaTree(t, db) + store := UnsafeNewStore(tree) + + // Create non-pruned height H + updated, err := tree.Set([]byte("hello"), []byte("hallo")) + require.NoError(t, err) + require.True(t, updated) + hash, verH, err := tree.SaveVersion() + cIDH := types.CommitID{Version: verH, Hash: hash} + require.Nil(t, err) + + // Create pruned height Hp + updated, err = tree.Set([]byte("hello"), []byte("hola")) + require.NoError(t, err) + require.True(t, updated) + hash, verHp, err := tree.SaveVersion() + cIDHp := types.CommitID{Version: verHp, Hash: hash} + require.Nil(t, err) + + // TODO: Prune this height + + // Create current height Hc + updated, err = tree.Set([]byte("hello"), []byte("ciao")) + require.NoError(t, err) + require.True(t, updated) + hash, verHc, err := tree.SaveVersion() + cIDHc := types.CommitID{Version: verHc, Hash: hash} + require.Nil(t, err) + + // Querying an existing store at some previous non-pruned height H + hStore, err := store.GetImmutable(verH) + require.NoError(t, err) + require.Equal(t, string(hStore.Get([]byte("hello"))), "hallo") + + // Querying an existing store at some previous pruned height Hp + hpStore, err := store.GetImmutable(verHp) + require.NoError(t, err) + require.Equal(t, string(hpStore.Get([]byte("hello"))), "hola") + + // Querying an existing store at current height Hc + hcStore, err := store.GetImmutable(verHc) + require.NoError(t, err) + require.Equal(t, string(hcStore.Get([]byte("hello"))), "ciao") + + // Querying a new store at some previous non-pruned height H + newHStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDH, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) + require.NoError(t, err) + require.Equal(t, string(newHStore.Get([]byte("hello"))), "hallo") + + // Querying a new store at some previous pruned height Hp + newHpStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHp, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) + require.NoError(t, err) + require.Equal(t, string(newHpStore.Get([]byte("hello"))), "hola") + + // Querying a new store at current height H + newHcStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHc, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) + require.NoError(t, err) + require.Equal(t, string(newHcStore.Get([]byte("hello"))), "ciao") +} + +func TestGetImmutable(t *testing.T) { + db := dbm.NewMemDB() + tree, _ := newAlohaTree(t, db) + store := UnsafeNewStore(tree) + + updated, err := tree.Set([]byte("hello"), []byte("adios")) + require.NoError(t, err) + require.True(t, updated) + hash, ver, err := tree.SaveVersion() + cID := types.CommitID{Version: ver, Hash: hash} + require.Nil(t, err) + + _, err = store.GetImmutable(cID.Version + 1) + require.Error(t, err) + + newStore, err := store.GetImmutable(cID.Version - 1) + require.NoError(t, err) + require.Equal(t, newStore.Get([]byte("hello")), []byte("goodbye")) + + newStore, err = store.GetImmutable(cID.Version) + require.NoError(t, err) + require.Equal(t, newStore.Get([]byte("hello")), []byte("adios")) + + res, err := newStore.Query(&types.RequestQuery{Data: []byte("hello"), Height: cID.Version, Path: "/key", Prove: true}) + require.NoError(t, err) + require.Equal(t, res.Value, []byte("adios")) + require.NotNil(t, res.ProofOps) + + require.Panics(t, func() { newStore.Set(nil, nil) }) + require.Panics(t, func() { newStore.Delete(nil) }) + require.Panics(t, func() { newStore.Commit() }) +} + +func TestTestGetImmutableIterator(t *testing.T) { + db := dbm.NewMemDB() + tree, cID := newAlohaTree(t, db) + store := UnsafeNewStore(tree) + + newStore, err := store.GetImmutable(cID.Version) + require.NoError(t, err) + + iter := newStore.Iterator([]byte("aloha"), []byte("hellz")) + expected := []string{"aloha", "hello"} + var i int + + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, treeData[expectedKey]) + i++ + } + + require.Equal(t, len(expected), i) +} + +func TestIAVLStoreGetSetHasDelete(t *testing.T) { + db := dbm.NewMemDB() + tree, _ := newAlohaTree(t, db) + iavlStore := UnsafeNewStore(tree) + + key := "hello" + + exists := iavlStore.Has([]byte(key)) + require.True(t, exists) + + value := iavlStore.Get([]byte(key)) + require.EqualValues(t, value, treeData[key]) + + value2 := "notgoodbye" + iavlStore.Set([]byte(key), []byte(value2)) + + value = iavlStore.Get([]byte(key)) + require.EqualValues(t, value, value2) + + iavlStore.Delete([]byte(key)) + + exists = iavlStore.Has([]byte(key)) + require.False(t, exists) +} + +func TestIAVLStoreNoNilSet(t *testing.T) { + db := dbm.NewMemDB() + tree, _ := newAlohaTree(t, db) + iavlStore := UnsafeNewStore(tree) + + require.Panics(t, func() { iavlStore.Set(nil, []byte("value")) }, "setting a nil key should panic") + require.Panics(t, func() { iavlStore.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") + + require.Panics(t, func() { iavlStore.Set([]byte("key"), nil) }, "setting a nil value should panic") +} + +func TestIAVLIterator(t *testing.T) { + db := dbm.NewMemDB() + tree, _ := newAlohaTree(t, db) + iavlStore := UnsafeNewStore(tree) + iter := iavlStore.Iterator([]byte("aloha"), []byte("hellz")) + expected := []string{"aloha", "hello"} + var i int + + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, treeData[expectedKey]) + i++ + } + require.Equal(t, len(expected), i) + + iter = iavlStore.Iterator([]byte("golang"), []byte("rocks")) + expected = []string{"hello"} + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, treeData[expectedKey]) + i++ + } + require.Equal(t, len(expected), i) + + iter = iavlStore.Iterator(nil, []byte("golang")) + expected = []string{"aloha"} + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, treeData[expectedKey]) + i++ + } + require.Equal(t, len(expected), i) + + iter = iavlStore.Iterator(nil, []byte("shalom")) + expected = []string{"aloha", "hello"} + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, treeData[expectedKey]) + i++ + } + require.Equal(t, len(expected), i) + + iter = iavlStore.Iterator(nil, nil) + expected = []string{"aloha", "hello"} + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, treeData[expectedKey]) + i++ + } + require.Equal(t, len(expected), i) + + iter = iavlStore.Iterator([]byte("golang"), nil) + expected = []string{"hello"} + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, treeData[expectedKey]) + i++ + } + require.Equal(t, len(expected), i) +} + +func TestIAVLReverseIterator(t *testing.T) { + db := wrapper.NewDBWrapper(dbm.NewMemDB()) + + tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) + + iavlStore := UnsafeNewStore(tree) + + iavlStore.Set([]byte{0x00}, []byte("0")) + iavlStore.Set([]byte{0x00, 0x00}, []byte("0 0")) + iavlStore.Set([]byte{0x00, 0x01}, []byte("0 1")) + iavlStore.Set([]byte{0x00, 0x02}, []byte("0 2")) + iavlStore.Set([]byte{0x01}, []byte("1")) + + testReverseIterator := func(t *testing.T, start, end []byte, expected []string) { + t.Helper() + iter := iavlStore.ReverseIterator(start, end) + var i int + for i = 0; iter.Valid(); iter.Next() { + expectedValue := expected[i] + value := iter.Value() + require.EqualValues(t, string(value), expectedValue) + i++ + } + require.Equal(t, len(expected), i) + } + + testReverseIterator(t, nil, nil, []string{"1", "0 2", "0 1", "0 0", "0"}) + testReverseIterator(t, []byte{0x00}, nil, []string{"1", "0 2", "0 1", "0 0", "0"}) + testReverseIterator(t, []byte{0x00}, []byte{0x00, 0x01}, []string{"0 0", "0"}) + testReverseIterator(t, []byte{0x00}, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"}) + testReverseIterator(t, []byte{0x00, 0x01}, []byte{0x01}, []string{"0 2", "0 1"}) + testReverseIterator(t, nil, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"}) +} + +func TestIAVLPrefixIterator(t *testing.T) { + db := wrapper.NewDBWrapper(dbm.NewMemDB()) + tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) + + iavlStore := UnsafeNewStore(tree) + + iavlStore.Set([]byte("test1"), []byte("test1")) + iavlStore.Set([]byte("test2"), []byte("test2")) + iavlStore.Set([]byte("test3"), []byte("test3")) + iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(0)}, []byte("test4")) + iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(1)}, []byte("test4")) + iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(255)}, []byte("test4")) + iavlStore.Set([]byte{byte(255), byte(255), byte(0)}, []byte("test4")) + iavlStore.Set([]byte{byte(255), byte(255), byte(1)}, []byte("test4")) + iavlStore.Set([]byte{byte(255), byte(255), byte(255)}, []byte("test4")) + + var i int + + iter := types.KVStorePrefixIterator(iavlStore, []byte("test")) + expected := []string{"test1", "test2", "test3"} + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, expectedKey) + i++ + } + iter.Close() + require.Equal(t, len(expected), i) + + iter = types.KVStorePrefixIterator(iavlStore, []byte{byte(55), byte(255), byte(255)}) + expected2 := [][]byte{ + {byte(55), byte(255), byte(255), byte(0)}, + {byte(55), byte(255), byte(255), byte(1)}, + {byte(55), byte(255), byte(255), byte(255)}, + } + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected2[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, []byte("test4")) + i++ + } + iter.Close() + require.Equal(t, len(expected), i) + + iter = types.KVStorePrefixIterator(iavlStore, []byte{byte(255), byte(255)}) + expected2 = [][]byte{ + {byte(255), byte(255), byte(0)}, + {byte(255), byte(255), byte(1)}, + {byte(255), byte(255), byte(255)}, + } + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected2[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, []byte("test4")) + i++ + } + iter.Close() + require.Equal(t, len(expected), i) +} + +func TestIAVLReversePrefixIterator(t *testing.T) { + db := wrapper.NewDBWrapper(dbm.NewMemDB()) + tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) + + iavlStore := UnsafeNewStore(tree) + + iavlStore.Set([]byte("test1"), []byte("test1")) + iavlStore.Set([]byte("test2"), []byte("test2")) + iavlStore.Set([]byte("test3"), []byte("test3")) + iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(0)}, []byte("test4")) + iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(1)}, []byte("test4")) + iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(255)}, []byte("test4")) + iavlStore.Set([]byte{byte(255), byte(255), byte(0)}, []byte("test4")) + iavlStore.Set([]byte{byte(255), byte(255), byte(1)}, []byte("test4")) + iavlStore.Set([]byte{byte(255), byte(255), byte(255)}, []byte("test4")) + + var i int + + iter := types.KVStoreReversePrefixIterator(iavlStore, []byte("test")) + expected := []string{"test3", "test2", "test1"} + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, expectedKey) + i++ + } + require.Equal(t, len(expected), i) + + iter = types.KVStoreReversePrefixIterator(iavlStore, []byte{byte(55), byte(255), byte(255)}) + expected2 := [][]byte{ + {byte(55), byte(255), byte(255), byte(255)}, + {byte(55), byte(255), byte(255), byte(1)}, + {byte(55), byte(255), byte(255), byte(0)}, + } + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected2[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, []byte("test4")) + i++ + } + require.Equal(t, len(expected), i) + + iter = types.KVStoreReversePrefixIterator(iavlStore, []byte{byte(255), byte(255)}) + expected2 = [][]byte{ + {byte(255), byte(255), byte(255)}, + {byte(255), byte(255), byte(1)}, + {byte(255), byte(255), byte(0)}, + } + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected2[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, []byte("test4")) + i++ + } + require.Equal(t, len(expected), i) +} + +func nextVersion(iavl *Store) { + key := []byte(fmt.Sprintf("Key for tree: %d", iavl.LastCommitID().Version)) + value := []byte(fmt.Sprintf("Value for tree: %d", iavl.LastCommitID().Version)) + iavl.Set(key, value) + iavl.Commit() +} + +func TestIAVLNoPrune(t *testing.T) { + db := wrapper.NewDBWrapper(dbm.NewMemDB()) + tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) + + iavlStore := UnsafeNewStore(tree) + nextVersion(iavlStore) + + for i := 1; i < 100; i++ { + for j := 1; j <= i; j++ { + require.True(t, iavlStore.VersionExists(int64(j)), + "Missing version %d with latest version %d. Should be storing all versions", + j, i) + } + + nextVersion(iavlStore) + } +} + +func TestIAVLStoreQuery(t *testing.T) { + db := wrapper.NewDBWrapper(dbm.NewMemDB()) + tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) + + iavlStore := UnsafeNewStore(tree) + + k1, v1 := []byte("key1"), []byte("val1") + k2, v2 := []byte("key2"), []byte("val2") + v3 := []byte("val3") + + ksub := []byte("key") + KVs0 := kv.Pairs{} + KVs1 := kv.Pairs{ + Pairs: []kv.Pair{ + {Key: k1, Value: v1}, + {Key: k2, Value: v2}, + }, + } + KVs2 := kv.Pairs{ + Pairs: []kv.Pair{ + {Key: k1, Value: v3}, + {Key: k2, Value: v2}, + }, + } + + valExpSubEmpty, err := KVs0.Marshal() + require.NoError(t, err) + + valExpSub1, err := KVs1.Marshal() + require.NoError(t, err) + + valExpSub2, err := KVs2.Marshal() + require.NoError(t, err) + + cid := iavlStore.Commit() + ver := cid.Version + query := types.RequestQuery{Path: "/key", Data: k1, Height: ver} + querySub := types.RequestQuery{Path: "/subspace", Data: ksub, Height: ver} + + // query subspace before anything set + qres, err := iavlStore.Query(&querySub) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Equal(t, valExpSubEmpty, qres.Value) + + // set data + iavlStore.Set(k1, v1) + iavlStore.Set(k2, v2) + + // set data without commit, doesn't show up + qres, err = iavlStore.Query(&query) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Nil(t, qres.Value) + + // commit it, but still don't see on old version + cid = iavlStore.Commit() + qres, err = iavlStore.Query(&query) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Nil(t, qres.Value) + + // but yes on the new version + query.Height = cid.Version + qres, err = iavlStore.Query(&query) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Equal(t, v1, qres.Value) + + // and for the subspace + qres, err = iavlStore.Query(&querySub) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Equal(t, valExpSub1, qres.Value) + + // modify + iavlStore.Set(k1, v3) + cid = iavlStore.Commit() + + // query will return old values, as height is fixed + qres, err = iavlStore.Query(&query) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Equal(t, v1, qres.Value) + + // update to latest in the query and we are happy + query.Height = cid.Version + qres, err = iavlStore.Query(&query) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Equal(t, v3, qres.Value) + query2 := types.RequestQuery{Path: "/key", Data: k2, Height: cid.Version} + + qres, err = iavlStore.Query(&query2) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Equal(t, v2, qres.Value) + // and for the subspace + qres, err = iavlStore.Query(&querySub) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Equal(t, valExpSub2, qres.Value) + + // default (height 0) will show latest -1 + query0 := types.RequestQuery{Path: "/key", Data: k1} + qres, err = iavlStore.Query(&query0) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Equal(t, v1, qres.Value) +} + +func BenchmarkIAVLIteratorNext(b *testing.B) { + b.ReportAllocs() + db := wrapper.NewDBWrapper(dbm.NewMemDB()) + treeSize := 1000 + tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) + + for i := 0; i < treeSize; i++ { + key := randBytes(4) + value := randBytes(50) + _, err := tree.Set(key, value) + require.NoError(b, err) + } + + iavlStore := UnsafeNewStore(tree) + iterators := make([]types.Iterator, b.N/treeSize) + + for i := 0; i < len(iterators); i++ { + iterators[i] = iavlStore.Iterator([]byte{0}, []byte{255, 255, 255, 255, 255}) + } + + b.ResetTimer() + for i := 0; i < len(iterators); i++ { + iter := iterators[i] + for j := 0; j < treeSize; j++ { + iter.Next() + } + } +} + +func TestSetInitialVersion(t *testing.T) { + testCases := []struct { + name string + storeFn func(db *dbm.MemDB) *Store + expPanic bool + }{ + { + "works with a mutable tree", + func(db *dbm.MemDB) *Store { + tree := iavl.NewMutableTree(wrapper.NewDBWrapper(db), cacheSize, false, log.NewNopLogger()) + store := UnsafeNewStore(tree) + + return store + }, false, + }, + { + "throws error on immutable tree", + func(db *dbm.MemDB) *Store { + tree := iavl.NewMutableTree(wrapper.NewDBWrapper(db), cacheSize, false, log.NewNopLogger()) + store := UnsafeNewStore(tree) + _, version, err := store.tree.SaveVersion() + require.NoError(t, err) + require.Equal(t, int64(1), version) + store, err = store.GetImmutable(1) + require.NoError(t, err) + + return store + }, true, + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + db := dbm.NewMemDB() + store := tc.storeFn(db) + + if tc.expPanic { + require.Panics(t, func() { store.SetInitialVersion(5) }) + } else { + store.SetInitialVersion(5) + cid := store.Commit() + require.Equal(t, int64(5), cid.GetVersion()) + } + }) + } +} + +func TestCacheWraps(t *testing.T) { + db := dbm.NewMemDB() + tree, _ := newAlohaTree(t, db) + store := UnsafeNewStore(tree) + + cacheWrapper := store.CacheWrap() + require.IsType(t, &cachekv.Store{}, cacheWrapper) + + cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) + require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) +} + +func TestChangeSets(t *testing.T) { + db := dbm.NewMemDB() + treeSize := 1000 + treeVersion := int64(10) + targetVersion := int64(6) + tree := iavl.NewMutableTree(wrapper.NewDBWrapper(db), cacheSize, false, log.NewNopLogger(), iavl.FlushThresholdOption(math.MaxInt)) + + for j := int64(0); j < treeVersion; j++ { + keys := [][]byte{} + for i := 0; i < treeSize; i++ { + keys = append(keys, randBytes(4)) + } + sort.Slice(keys, func(p, q int) bool { + return bytes.Compare(keys[p], keys[q]) < 0 + }) + for i := 0; i < treeSize; i++ { + key := keys[i] + value := randBytes(50) + _, err := tree.Set(key, value) + require.NoError(t, err) + } + _, _, err := tree.SaveVersion() + require.NoError(t, err) + } + + changeSets := []*iavl.ChangeSet{} + iavlStore := UnsafeNewStore(tree) + commitID := iavlStore.LastCommitID() + + require.NoError(t, iavlStore.TraverseStateChanges(targetVersion+1, treeVersion, func(v int64, cs *iavl.ChangeSet) error { + changeSets = append(changeSets, cs) + return nil + })) + require.NoError(t, iavlStore.LoadVersionForOverwriting(targetVersion)) + + for i, cs := range changeSets { + v, err := tree.SaveChangeSet(cs) + require.NoError(t, err) + require.Equal(t, v, targetVersion+int64(i+1)) + } + + restoreCommitID := iavlStore.LastCommitID() + require.Equal(t, commitID, restoreCommitID) +} diff --git a/cosmos-sdk-store/iavl/tree.go b/cosmos-sdk-store/iavl/tree.go new file mode 100755 index 000000000..889fc1d5a --- /dev/null +++ b/cosmos-sdk-store/iavl/tree.go @@ -0,0 +1,98 @@ +package iavl + +import ( + "fmt" + + "github.com/cosmos/iavl" + idb "github.com/cosmos/iavl/db" +) + +var ( + _ Tree = (*immutableTree)(nil) + _ Tree = (*iavl.MutableTree)(nil) +) + +type ( + // Tree defines an interface that both mutable and immutable IAVL trees + // must implement. For mutable IAVL trees, the interface is directly + // implemented by an iavl.MutableTree. For an immutable IAVL tree, a wrapper + // must be made. + Tree interface { + Has(key []byte) (bool, error) + Get(key []byte) ([]byte, error) + Set(key, value []byte) (bool, error) + Remove(key []byte) ([]byte, bool, error) + SaveVersion() ([]byte, int64, error) + Version() int64 + Hash() []byte + WorkingHash() []byte + VersionExists(version int64) bool + DeleteVersionsTo(version int64) error + GetVersioned(key []byte, version int64) ([]byte, error) + GetImmutable(version int64) (*iavl.ImmutableTree, error) + SetInitialVersion(version uint64) + Iterator(start, end []byte, ascending bool) (idb.Iterator, error) + AvailableVersions() []int + LoadVersionForOverwriting(targetVersion int64) error + TraverseStateChanges(startVersion, endVersion int64, fn func(version int64, changeSet *iavl.ChangeSet) error) error + } + + // immutableTree is a simple wrapper around a reference to an iavl.ImmutableTree + // that implements the Tree interface. It should only be used for querying + // and iteration, specifically at previous heights. + immutableTree struct { + *iavl.ImmutableTree + } +) + +func (it *immutableTree) Set(_, _ []byte) (bool, error) { + panic("cannot call 'Set' on an immutable IAVL tree") +} + +func (it *immutableTree) Remove(_ []byte) ([]byte, bool, error) { + panic("cannot call 'Remove' on an immutable IAVL tree") +} + +func (it *immutableTree) SaveVersion() ([]byte, int64, error) { + panic("cannot call 'SaveVersion' on an immutable IAVL tree") +} + +func (it *immutableTree) DeleteVersionsTo(_ int64) error { + panic("cannot call 'DeleteVersionsTo' on an immutable IAVL tree") +} + +func (it *immutableTree) SetInitialVersion(_ uint64) { + panic("cannot call 'SetInitialVersion' on an immutable IAVL tree") +} + +func (it *immutableTree) VersionExists(version int64) bool { + return it.Version() == version +} + +func (it *immutableTree) GetVersioned(key []byte, version int64) ([]byte, error) { + if it.Version() != version { + return nil, fmt.Errorf("version mismatch on immutable IAVL tree; got: %d, expected: %d", version, it.Version()) + } + + return it.Get(key) +} + +func (it *immutableTree) GetImmutable(version int64) (*iavl.ImmutableTree, error) { + if it.Version() != version { + return nil, fmt.Errorf("version mismatch on immutable IAVL tree; got: %d, expected: %d", version, it.Version()) + } + + return it.ImmutableTree, nil +} + +func (it *immutableTree) AvailableVersions() []int { + return []int{} +} + +func (it *immutableTree) LoadVersionForOverwriting(targetVersion int64) error { + panic("cannot call 'LoadVersionForOverwriting' on an immutable IAVL tree") +} + +func (it *immutableTree) WorkingHash() []byte { + panic("cannot call 'WorkingHash' on an immutable IAVL tree") +} diff --git a/cosmos-sdk-store/iavl/tree_test.go b/cosmos-sdk-store/iavl/tree_test.go new file mode 100755 index 000000000..243355e42 --- /dev/null +++ b/cosmos-sdk-store/iavl/tree_test.go @@ -0,0 +1,41 @@ +package iavl + +import ( + "testing" + + dbm "github.com/cosmos/cosmos-db" + "github.com/cosmos/iavl" + "github.com/stretchr/testify/require" + + "cosmossdk.io/log" + "cosmossdk.io/store/wrapper" +) + +func TestImmutableTreePanics(t *testing.T) { + t.Parallel() + immTree := iavl.NewImmutableTree(wrapper.NewDBWrapper(dbm.NewMemDB()), 100, false, log.NewNopLogger()) + it := &immutableTree{immTree} + require.Panics(t, func() { + _, err := it.Set([]byte{}, []byte{}) + require.NoError(t, err) + }) + require.Panics(t, func() { + _, _, err := it.Remove([]byte{}) + require.NoError(t, err) + }) + require.Panics(t, func() { _, _, _ = it.SaveVersion() }) + require.Panics(t, func() { _ = it.DeleteVersionsTo(int64(1)) }) + + val, err := it.GetVersioned(nil, 1) + require.Error(t, err) + require.Nil(t, val) + + imm, err := it.GetImmutable(1) + require.Error(t, err) + require.Nil(t, imm) + + imm, err = it.GetImmutable(0) + require.NoError(t, err) + require.NotNil(t, imm) + require.Equal(t, immTree, imm) +} diff --git a/cosmos-sdk-store/internal/conv/doc.go b/cosmos-sdk-store/internal/conv/doc.go new file mode 100755 index 000000000..1c86f5c14 --- /dev/null +++ b/cosmos-sdk-store/internal/conv/doc.go @@ -0,0 +1,2 @@ +// Package conv provides internal functions for convertions and data manipulation +package conv diff --git a/cosmos-sdk-store/internal/conv/string.go b/cosmos-sdk-store/internal/conv/string.go new file mode 100755 index 000000000..96d89c3a5 --- /dev/null +++ b/cosmos-sdk-store/internal/conv/string.go @@ -0,0 +1,19 @@ +package conv + +import ( + "unsafe" +) + +// UnsafeStrToBytes uses unsafe to convert string into byte array. Returned bytes +// must not be altered after this function is called as it will cause a segmentation fault. +func UnsafeStrToBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) // ref https://github.com/golang/go/issues/53003#issuecomment-1140276077 +} + +// UnsafeBytesToStr is meant to make a zero allocation conversion +// from []byte -> string to speed up operations, it is not meant +// to be used generally, but for a specific pattern to delete keys +// from a map. +func UnsafeBytesToStr(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} diff --git a/cosmos-sdk-store/internal/conv/string_test.go b/cosmos-sdk-store/internal/conv/string_test.go new file mode 100755 index 000000000..3a1451753 --- /dev/null +++ b/cosmos-sdk-store/internal/conv/string_test.go @@ -0,0 +1,54 @@ +package conv + +import ( + "runtime" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/suite" +) + +func TestStringSuite(t *testing.T) { + suite.Run(t, new(StringSuite)) +} + +type StringSuite struct{ suite.Suite } + +func unsafeConvertStr() []byte { + return UnsafeStrToBytes("abc") +} + +func (s *StringSuite) TestUnsafeStrToBytes() { + // we convert in other function to trigger GC. We want to check that + // the underlying array in []bytes is accessible after GC will finish swapping. + for i := 0; i < 5; i++ { + b := unsafeConvertStr() + runtime.GC() + <-time.NewTimer(2 * time.Millisecond).C + b2 := append(b, 'd') + s.Equal("abc", string(b)) + s.Equal("abcd", string(b2)) + } +} + +func unsafeConvertBytes() string { + return UnsafeBytesToStr([]byte("abc")) +} + +func (s *StringSuite) TestUnsafeBytesToStr() { + // we convert in other function to trigger GC. We want to check that + // the underlying array in []bytes is accessible after GC will finish swapping. + for i := 0; i < 5; i++ { + str := unsafeConvertBytes() + runtime.GC() + <-time.NewTimer(2 * time.Millisecond).C + s.Equal("abc", str) + } +} + +func BenchmarkUnsafeStrToBytes(b *testing.B) { + for i := 0; i < b.N; i++ { + UnsafeStrToBytes(strconv.Itoa(i)) + } +} diff --git a/cosmos-sdk-store/internal/kv/helpers.go b/cosmos-sdk-store/internal/kv/helpers.go new file mode 100755 index 000000000..5bccea122 --- /dev/null +++ b/cosmos-sdk-store/internal/kv/helpers.go @@ -0,0 +1,17 @@ +package kv + +import "fmt" + +// AssertKeyAtLeastLength panics when store key length is less than the given length. +func AssertKeyAtLeastLength(bz []byte, length int) { + if len(bz) < length { + panic(fmt.Sprintf("expected key of length at least %d, got %d", length, len(bz))) + } +} + +// AssertKeyLength panics when store key length is not equal to the given length. +func AssertKeyLength(bz []byte, length int) { + if len(bz) != length { + panic(fmt.Sprintf("unexpected key length; got: %d, expected: %d", len(bz), length)) + } +} diff --git a/cosmos-sdk-store/internal/kv/kv.go b/cosmos-sdk-store/internal/kv/kv.go new file mode 100755 index 000000000..1f3da91cc --- /dev/null +++ b/cosmos-sdk-store/internal/kv/kv.go @@ -0,0 +1,28 @@ +package kv + +import ( + "bytes" + "sort" +) + +func (kvs Pairs) Len() int { return len(kvs.Pairs) } +func (kvs Pairs) Less(i, j int) bool { + switch bytes.Compare(kvs.Pairs[i].Key, kvs.Pairs[j].Key) { + case -1: + return true + + case 0: + return bytes.Compare(kvs.Pairs[i].Value, kvs.Pairs[j].Value) < 0 + + case 1: + return false + + default: + panic("invalid comparison result") + } +} + +func (kvs Pairs) Swap(i, j int) { kvs.Pairs[i], kvs.Pairs[j] = kvs.Pairs[j], kvs.Pairs[i] } + +// Sort invokes sort.Sort on kvs. +func (kvs Pairs) Sort() { sort.Sort(kvs) } diff --git a/cosmos-sdk-store/internal/kv/kv.pb.go b/cosmos-sdk-store/internal/kv/kv.pb.go new file mode 100755 index 000000000..847bd11d4 --- /dev/null +++ b/cosmos-sdk-store/internal/kv/kv.pb.go @@ -0,0 +1,559 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cosmos/store/internal/kv/v1beta1/kv.proto + +package kv + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Pairs defines a repeated slice of Pair objects. +type Pairs struct { + Pairs []Pair `protobuf:"bytes,1,rep,name=pairs,proto3" json:"pairs"` +} + +func (m *Pairs) Reset() { *m = Pairs{} } +func (m *Pairs) String() string { return proto.CompactTextString(m) } +func (*Pairs) ProtoMessage() {} +func (*Pairs) Descriptor() ([]byte, []int) { + return fileDescriptor_534782c4083e056d, []int{0} +} +func (m *Pairs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Pairs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Pairs.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Pairs) XXX_Merge(src proto.Message) { + xxx_messageInfo_Pairs.Merge(m, src) +} +func (m *Pairs) XXX_Size() int { + return m.Size() +} +func (m *Pairs) XXX_DiscardUnknown() { + xxx_messageInfo_Pairs.DiscardUnknown(m) +} + +var xxx_messageInfo_Pairs proto.InternalMessageInfo + +func (m *Pairs) GetPairs() []Pair { + if m != nil { + return m.Pairs + } + return nil +} + +// Pair defines a key/value bytes tuple. +type Pair struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Pair) Reset() { *m = Pair{} } +func (m *Pair) String() string { return proto.CompactTextString(m) } +func (*Pair) ProtoMessage() {} +func (*Pair) Descriptor() ([]byte, []int) { + return fileDescriptor_534782c4083e056d, []int{1} +} +func (m *Pair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Pair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Pair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Pair) XXX_Merge(src proto.Message) { + xxx_messageInfo_Pair.Merge(m, src) +} +func (m *Pair) XXX_Size() int { + return m.Size() +} +func (m *Pair) XXX_DiscardUnknown() { + xxx_messageInfo_Pair.DiscardUnknown(m) +} + +var xxx_messageInfo_Pair proto.InternalMessageInfo + +func (m *Pair) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Pair) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Pairs)(nil), "cosmos.store.internal.kv.v1beta1.Pairs") + proto.RegisterType((*Pair)(nil), "cosmos.store.internal.kv.v1beta1.Pair") +} + +func init() { + proto.RegisterFile("cosmos/store/internal/kv/v1beta1/kv.proto", fileDescriptor_534782c4083e056d) +} + +var fileDescriptor_534782c4083e056d = []byte{ + // 217 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xce, 0x2f, 0xce, + 0xcd, 0x2f, 0xd6, 0x2f, 0x2e, 0xc9, 0x2f, 0x4a, 0xd5, 0xcf, 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b, + 0xcc, 0xd1, 0xcf, 0x2e, 0xd3, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0xcf, 0x2e, 0xd3, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x52, 0x80, 0x28, 0xd5, 0x03, 0x2b, 0xd5, 0x83, 0x29, 0xd5, + 0xcb, 0x2e, 0xd3, 0x83, 0x2a, 0x95, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd6, 0x07, 0xb1, + 0x20, 0xfa, 0x94, 0xbc, 0xb9, 0x58, 0x03, 0x12, 0x33, 0x8b, 0x8a, 0x85, 0x9c, 0xb8, 0x58, 0x0b, + 0x40, 0x0c, 0x09, 0x46, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x35, 0x3d, 0x42, 0x06, 0xea, 0x81, 0xf4, + 0x39, 0xb1, 0x9c, 0xb8, 0x27, 0xcf, 0x10, 0x04, 0xd1, 0xaa, 0xa4, 0xc7, 0xc5, 0x02, 0x12, 0x14, + 0x12, 0xe0, 0x62, 0xce, 0x4e, 0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0x31, 0x85, + 0x44, 0xb8, 0x58, 0xcb, 0x12, 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x93, 0xc5, + 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, + 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0xc9, 0x41, 0x6c, 0x2f, 0x4e, 0xc9, + 0xd6, 0xcb, 0xcc, 0xc7, 0xf4, 0x7f, 0x12, 0x1b, 0xd8, 0xf5, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x5d, 0xad, 0x97, 0xdd, 0x22, 0x01, 0x00, 0x00, +} + +func (m *Pairs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Pairs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Pairs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Pairs) > 0 { + for iNdEx := len(m.Pairs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Pairs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintKv(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Pair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Pair) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Pair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintKv(dAtA []byte, offset int, v uint64) int { + offset -= sovKv(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Pairs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Pairs) > 0 { + for _, e := range m.Pairs { + l = e.Size() + n += 1 + l + sovKv(uint64(l)) + } + } + return n +} + +func (m *Pair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + return n +} + +func sovKv(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozKv(x uint64) (n int) { + return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Pairs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Pairs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Pairs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthKv + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pairs = append(m.Pairs, Pair{}) + if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKv(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthKv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Pair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Pair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Pair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthKv + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthKv + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKv(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthKv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipKv(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthKv + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupKv + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthKv + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupKv = fmt.Errorf("proto: unexpected end of group") +) diff --git a/cosmos-sdk-store/internal/maps/bench_test.go b/cosmos-sdk-store/internal/maps/bench_test.go new file mode 100755 index 000000000..4d7f680c7 --- /dev/null +++ b/cosmos-sdk-store/internal/maps/bench_test.go @@ -0,0 +1,13 @@ +package maps + +import "testing" + +func BenchmarkKVPairBytes(b *testing.B) { + kvp := NewKVPair(make([]byte, 128), make([]byte, 1e6)) + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + b.SetBytes(int64(len(kvp.Bytes()))) + } +} diff --git a/cosmos-sdk-store/internal/maps/maps.go b/cosmos-sdk-store/internal/maps/maps.go new file mode 100755 index 000000000..2ee7d09b5 --- /dev/null +++ b/cosmos-sdk-store/internal/maps/maps.go @@ -0,0 +1,216 @@ +package maps + +import ( + "encoding/binary" + + "github.com/cometbft/cometbft/crypto/merkle" + "github.com/cometbft/cometbft/crypto/tmhash" + cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + + "cosmossdk.io/store/internal/kv" + "cosmossdk.io/store/internal/tree" +) + +// merkleMap defines a merkle-ized tree from a map. Leave values are treated as +// hash(key) | hash(value). Leaves are sorted before Merkle hashing. +type merkleMap struct { + kvs kv.Pairs + sorted bool +} + +func newMerkleMap() *merkleMap { + return &merkleMap{ + kvs: kv.Pairs{}, + sorted: false, + } +} + +// Set creates a kv.Pair from the provided key and value. The value is hashed prior +// to creating a kv.Pair. The created kv.Pair is appended to the MerkleMap's slice +// of kv.Pairs. Whenever called, the MerkleMap must be resorted. +func (sm *merkleMap) set(key string, value []byte) { + byteKey := []byte(key) + assertValidKey(byteKey) + + sm.sorted = false + + // The value is hashed, so you can check for equality with a cached value (say) + // and make a determination to fetch or not. + vhash := tmhash.Sum(value) + + sm.kvs.Pairs = append(sm.kvs.Pairs, kv.Pair{ + Key: byteKey, + Value: vhash, + }) +} + +// Hash returns the merkle root of items sorted by key. Note, it is unstable. +func (sm *merkleMap) hash() []byte { + sm.sort() + return hashKVPairs(sm.kvs) +} + +func (sm *merkleMap) sort() { + if sm.sorted { + return + } + + sm.kvs.Sort() + sm.sorted = true +} + +// hashKVPairs hashes a kvPair and creates a merkle tree where the leaves are +// byte slices. +func hashKVPairs(kvs kv.Pairs) []byte { + kvsH := make([][]byte, len(kvs.Pairs)) + for i, kvp := range kvs.Pairs { + kvsH[i] = KVPair(kvp).Bytes() + } + + return tree.HashFromByteSlices(kvsH) +} + +// --------------------------------------------- + +// Merkle tree from a map. +// Leaves are `hash(key) | hash(value)`. +// Leaves are sorted before Merkle hashing. +type simpleMap struct { + Kvs kv.Pairs + sorted bool +} + +func newSimpleMap() *simpleMap { + return &simpleMap{ + Kvs: kv.Pairs{}, + sorted: false, + } +} + +// Set creates a kv pair of the key and the hash of the value, +// and then appends it to SimpleMap's kv pairs. +func (sm *simpleMap) Set(key string, value []byte) { + byteKey := []byte(key) + assertValidKey(byteKey) + sm.sorted = false + + // The value is hashed, so you can + // check for equality with a cached value (say) + // and make a determination to fetch or not. + vhash := tmhash.Sum(value) + + sm.Kvs.Pairs = append(sm.Kvs.Pairs, kv.Pair{ + Key: byteKey, + Value: vhash, + }) +} + +// Hash Merkle root hash of items sorted by key +// (UNSTABLE: and by value too if duplicate key). +func (sm *simpleMap) Hash() []byte { + sm.Sort() + return hashKVPairs(sm.Kvs) +} + +func (sm *simpleMap) Sort() { + if sm.sorted { + return + } + sm.Kvs.Sort() + sm.sorted = true +} + +// Returns a copy of sorted KVPairs. +// NOTE these contain the hashed key and value. +func (sm *simpleMap) KVPairs() kv.Pairs { + sm.Sort() + kvs := kv.Pairs{ + Pairs: make([]kv.Pair, len(sm.Kvs.Pairs)), + } + + copy(kvs.Pairs, sm.Kvs.Pairs) + return kvs +} + +//---------------------------------------- + +// A local extension to KVPair that can be hashed. +// Key and value are length prefixed and concatenated, +// then hashed. +type KVPair kv.Pair + +// NewKVPair takes in a key and value and creates a kv.Pair +// wrapped in the local extension KVPair +func NewKVPair(key, value []byte) KVPair { + return KVPair(kv.Pair{ + Key: key, + Value: value, + }) +} + +// Bytes returns key || value, with both the +// key and value length prefixed. +func (kv KVPair) Bytes() []byte { + // In the worst case: + // * 8 bytes to Uvarint encode the length of the key + // * 8 bytes to Uvarint encode the length of the value + // So preallocate for the worst case, which will in total + // be a maximum of 14 bytes wasted, if len(key)=1, len(value)=1, + // but that's going to rare. + buf := make([]byte, 8+len(kv.Key)+8+len(kv.Value)) + + // Encode the key, prefixed with its length. + nlk := binary.PutUvarint(buf, uint64(len(kv.Key))) + nk := copy(buf[nlk:], kv.Key) + + // Encode the value, prefixing with its length. + nlv := binary.PutUvarint(buf[nlk+nk:], uint64(len(kv.Value))) + nv := copy(buf[nlk+nk+nlv:], kv.Value) + + return buf[:nlk+nk+nlv+nv] +} + +// HashFromMap computes a merkle tree from sorted map and returns the merkle +// root. +func HashFromMap(m map[string][]byte) []byte { + mm := newMerkleMap() + for k, v := range m { + mm.set(k, v) + } + + return mm.hash() +} + +// ProofsFromMap generates proofs from a map. The keys/values of the map will be used as the keys/values +// in the underlying key-value pairs. +// The keys are sorted before the proofs are computed. +func ProofsFromMap(m map[string][]byte) ([]byte, map[string]*cmtprotocrypto.Proof, []string) { + sm := newSimpleMap() + for k, v := range m { + sm.Set(k, v) + } + + sm.Sort() + kvs := sm.Kvs + kvsBytes := make([][]byte, len(kvs.Pairs)) + for i, kvp := range kvs.Pairs { + kvsBytes[i] = KVPair(kvp).Bytes() + } + + rootHash, proofList := merkle.ProofsFromByteSlices(kvsBytes) + proofs := make(map[string]*cmtprotocrypto.Proof) + keys := make([]string, len(proofList)) + + for i, kvp := range kvs.Pairs { + proofs[string(kvp.Key)] = proofList[i].ToProto() + keys[i] = string(kvp.Key) + } + + return rootHash, proofs, keys +} + +func assertValidKey(key []byte) { + if len(key) == 0 { + panic("key is nil") + } +} diff --git a/cosmos-sdk-store/internal/maps/maps_test.go b/cosmos-sdk-store/internal/maps/maps_test.go new file mode 100755 index 000000000..ce7ad72e6 --- /dev/null +++ b/cosmos-sdk-store/internal/maps/maps_test.go @@ -0,0 +1,104 @@ +package maps + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEmptyKeyMerkleMap(t *testing.T) { + db := newMerkleMap() + require.Panics(t, func() { db.set("", []byte("value")) }, "setting an empty key should panic") +} + +func TestMerkleMap(t *testing.T) { + tests := []struct { + keys []string + values []string // each string gets converted to []byte in test + want string + }{ + {[]string{}, []string{}, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, + {[]string{"key1"}, []string{"value1"}, "a44d3cc7daba1a4600b00a2434b30f8b970652169810d6dfa9fb1793a2189324"}, + {[]string{"key1"}, []string{"value2"}, "0638e99b3445caec9d95c05e1a3fc1487b4ddec6a952ff337080360b0dcc078c"}, + // swap order with 2 keys + { + []string{"key1", "key2"}, + []string{"value1", "value2"}, + "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3", + }, + { + []string{"key2", "key1"}, + []string{"value2", "value1"}, + "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3", + }, + // swap order with 3 keys + { + []string{"key1", "key2", "key3"}, + []string{"value1", "value2", "value3"}, + "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc", + }, + { + []string{"key1", "key3", "key2"}, + []string{"value1", "value3", "value2"}, + "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc", + }, + } + for i, tc := range tests { + db := newMerkleMap() + for i := 0; i < len(tc.keys); i++ { + db.set(tc.keys[i], []byte(tc.values[i])) + } + + got := db.hash() + assert.Equal(t, tc.want, fmt.Sprintf("%x", got), "Hash didn't match on tc %d", i) + } +} + +func TestEmptyKeySimpleMap(t *testing.T) { + db := newSimpleMap() + require.Panics(t, func() { db.Set("", []byte("value")) }, "setting an empty key should panic") +} + +func TestSimpleMap(t *testing.T) { + tests := []struct { + keys []string + values []string // each string gets converted to []byte in test + want string + }{ + {[]string{}, []string{}, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, + {[]string{"key1"}, []string{"value1"}, "a44d3cc7daba1a4600b00a2434b30f8b970652169810d6dfa9fb1793a2189324"}, + {[]string{"key1"}, []string{"value2"}, "0638e99b3445caec9d95c05e1a3fc1487b4ddec6a952ff337080360b0dcc078c"}, + // swap order with 2 keys + { + []string{"key1", "key2"}, + []string{"value1", "value2"}, + "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3", + }, + { + []string{"key2", "key1"}, + []string{"value2", "value1"}, + "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3", + }, + // swap order with 3 keys + { + []string{"key1", "key2", "key3"}, + []string{"value1", "value2", "value3"}, + "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc", + }, + { + []string{"key1", "key3", "key2"}, + []string{"value1", "value3", "value2"}, + "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc", + }, + } + for i, tc := range tests { + db := newSimpleMap() + for i := 0; i < len(tc.keys); i++ { + db.Set(tc.keys[i], []byte(tc.values[i])) + } + got := db.Hash() + assert.Equal(t, tc.want, fmt.Sprintf("%x", got), "Hash didn't match on tc %d", i) + } +} diff --git a/cosmos-sdk-store/internal/proofs/convert.go b/cosmos-sdk-store/internal/proofs/convert.go new file mode 100755 index 000000000..05cd60434 --- /dev/null +++ b/cosmos-sdk-store/internal/proofs/convert.go @@ -0,0 +1,98 @@ +package proofs + +import ( + "fmt" + "math/bits" + + cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + ics23 "github.com/cosmos/ics23/go" +) + +// ConvertExistenceProof will convert the given proof into a valid +// existence proof, if that's what it is. +// +// This is the simplest case of the range proof and we will focus on +// demoing compatibility here +func ConvertExistenceProof(p *cmtprotocrypto.Proof, key, value []byte) (*ics23.ExistenceProof, error) { + path, err := convertInnerOps(p) + if err != nil { + return nil, err + } + + proof := &ics23.ExistenceProof{ + Key: key, + Value: value, + Leaf: convertLeafOp(), + Path: path, + } + return proof, nil +} + +// this is adapted from merkle/hash.go:leafHash() +// and merkle/simple_map.go:KVPair.Bytes() +func convertLeafOp() *ics23.LeafOp { + prefix := []byte{0} + + return &ics23.LeafOp{ + Hash: ics23.HashOp_SHA256, + PrehashKey: ics23.HashOp_NO_HASH, + PrehashValue: ics23.HashOp_SHA256, + Length: ics23.LengthOp_VAR_PROTO, + Prefix: prefix, + } +} + +func convertInnerOps(p *cmtprotocrypto.Proof) ([]*ics23.InnerOp, error) { + inners := make([]*ics23.InnerOp, 0, len(p.Aunts)) + path := buildPath(p.Index, p.Total) + + if len(p.Aunts) != len(path) { + return nil, fmt.Errorf("calculated a path different length (%d) than provided by SimpleProof (%d)", len(path), len(p.Aunts)) + } + + for i, aunt := range p.Aunts { + auntRight := path[i] + + // combine with: 0x01 || lefthash || righthash + inner := &ics23.InnerOp{Hash: ics23.HashOp_SHA256} + if auntRight { + inner.Prefix = []byte{1} + inner.Suffix = aunt + } else { + inner.Prefix = append([]byte{1}, aunt...) + } + inners = append(inners, inner) + } + return inners, nil +} + +// buildPath returns a list of steps from leaf to root +// in each step, true means index is left side, false index is right side +// code adapted from merkle/simple_proof.go:computeHashFromAunts +func buildPath(idx, total int64) []bool { + if total < 2 { + return nil + } + numLeft := getSplitPoint(total) + goLeft := idx < numLeft + + // we put goLeft at the end of the array, as we recurse from top to bottom, + // and want the leaf to be first in array, root last + if goLeft { + return append(buildPath(idx, numLeft), goLeft) + } + return append(buildPath(idx-numLeft, total-numLeft), goLeft) +} + +func getSplitPoint(length int64) int64 { + if length < 1 { + panic("Trying to split a tree with size < 1") + } + uLength := uint(length) + bitlen := bits.Len(uLength) + k := int64(1 << uint(bitlen-1)) + if k == length { + k >>= 1 + } + return k +} diff --git a/cosmos-sdk-store/internal/proofs/convert_test.go b/cosmos-sdk-store/internal/proofs/convert_test.go new file mode 100755 index 000000000..19c5a6761 --- /dev/null +++ b/cosmos-sdk-store/internal/proofs/convert_test.go @@ -0,0 +1,105 @@ +package proofs + +import ( + "bytes" + "fmt" + "testing" +) + +func TestLeafOp(t *testing.T) { + proof := GenerateRangeProof(20, Middle) + + converted, err := ConvertExistenceProof(proof.Proof, proof.Key, proof.Value) + if err != nil { + t.Fatal(err) + } + + leaf := converted.GetLeaf() + if leaf == nil { + t.Fatalf("Missing leaf node") + } + + hash, err := leaf.Apply(converted.Key, converted.Value) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(hash, proof.Proof.LeafHash) { + t.Errorf("Calculated: %X\nExpected: %X", hash, proof.Proof.LeafHash) + } +} + +func TestBuildPath(t *testing.T) { + cases := map[string]struct { + idx int64 + total int64 + expected []bool + }{ + "pair left": { + idx: 0, + total: 2, + expected: []bool{true}, + }, + "pair right": { + idx: 1, + total: 2, + expected: []bool{false}, + }, + "power of 2": { + idx: 3, + total: 8, + expected: []bool{false, false, true}, + }, + "size of 7 right most": { + idx: 6, + total: 7, + expected: []bool{false, false}, + }, + "size of 6 right-left (from top)": { + idx: 4, + total: 6, + expected: []bool{true, false}, + }, + "size of 6 left-right-left (from top)": { + idx: 2, + total: 7, + expected: []bool{true, false, true}, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + path := buildPath(tc.idx, tc.total) + if len(path) != len(tc.expected) { + t.Fatalf("Got %v\nExpected %v", path, tc.expected) + } + for i := range path { + if path[i] != tc.expected[i] { + t.Fatalf("Differ at %d\nGot %v\nExpected %v", i, path, tc.expected) + } + } + }) + } +} + +func TestConvertProof(t *testing.T) { + for i := 0; i < 100; i++ { + t.Run(fmt.Sprintf("Run %d", i), func(t *testing.T) { + proof := GenerateRangeProof(57, Left) + + converted, err := ConvertExistenceProof(proof.Proof, proof.Key, proof.Value) + if err != nil { + t.Fatal(err) + } + + calc, err := converted.Calculate() + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(calc, proof.RootHash) { + t.Errorf("Calculated: %X\nExpected: %X", calc, proof.RootHash) + } + }) + } +} diff --git a/cosmos-sdk-store/internal/proofs/create.go b/cosmos-sdk-store/internal/proofs/create.go new file mode 100755 index 000000000..55874d99c --- /dev/null +++ b/cosmos-sdk-store/internal/proofs/create.go @@ -0,0 +1,103 @@ +package proofs + +import ( + "errors" + "sort" + + ics23 "github.com/cosmos/ics23/go" + + sdkmaps "cosmossdk.io/store/internal/maps" +) + +var ( + ErrEmptyKey = errors.New("key is empty") + ErrEmptyKeyInData = errors.New("data contains empty key") +) + +/* +CreateMembershipProof will produce a CommitmentProof that the given key (and queries value) exists in the map. +If the key doesn't exist in the tree, this will return an error. +*/ +func CreateMembershipProof(data map[string][]byte, key []byte) (*ics23.CommitmentProof, error) { + if len(key) == 0 { + return nil, ErrEmptyKey + } + exist, err := createExistenceProof(data, key) + if err != nil { + return nil, err + } + proof := &ics23.CommitmentProof{ + Proof: &ics23.CommitmentProof_Exist{ + Exist: exist, + }, + } + return proof, nil +} + +/* +CreateNonMembershipProof will produce a CommitmentProof that the given key doesn't exist in the map. +If the key exists in the tree, this will return an error. +*/ +func CreateNonMembershipProof(data map[string][]byte, key []byte) (*ics23.CommitmentProof, error) { + if len(key) == 0 { + return nil, ErrEmptyKey + } + // ensure this key is not in the store + if _, ok := data[string(key)]; ok { + return nil, errors.New("cannot create non-membership proof if key is in map") + } + + keys := SortedKeys(data) + rightidx := sort.SearchStrings(keys, string(key)) + + var err error + nonexist := &ics23.NonExistenceProof{ + Key: key, + } + + // include left proof unless key is left of entire map + if rightidx >= 1 { + leftkey := keys[rightidx-1] + nonexist.Left, err = createExistenceProof(data, []byte(leftkey)) + if err != nil { + return nil, err + } + } + + // include right proof unless key is right of entire map + if rightidx < len(keys) { + rightkey := keys[rightidx] + nonexist.Right, err = createExistenceProof(data, []byte(rightkey)) + if err != nil { + return nil, err + } + + } + + proof := &ics23.CommitmentProof{ + Proof: &ics23.CommitmentProof_Nonexist{ + Nonexist: nonexist, + }, + } + return proof, nil +} + +func createExistenceProof(data map[string][]byte, key []byte) (*ics23.ExistenceProof, error) { + for k := range data { + if k == "" { + return nil, ErrEmptyKeyInData + } + } + value, ok := data[string(key)] + if !ok { + return nil, errors.New("cannot make existence proof if key is not in map") + } + + _, proofs, _ := sdkmaps.ProofsFromMap(data) + proof := proofs[string(key)] + if proof == nil { + return nil, errors.New("returned no proof for key") + } + + return ConvertExistenceProof(proof, key, value) +} diff --git a/cosmos-sdk-store/internal/proofs/create_test.go b/cosmos-sdk-store/internal/proofs/create_test.go new file mode 100755 index 000000000..16818e657 --- /dev/null +++ b/cosmos-sdk-store/internal/proofs/create_test.go @@ -0,0 +1,125 @@ +package proofs + +import ( + "errors" + "testing" + + ics23 "github.com/cosmos/ics23/go" + "github.com/stretchr/testify/assert" +) + +func TestCreateMembership(t *testing.T) { + cases := map[string]struct { + size int + loc Where + }{ + "small left": {size: 100, loc: Left}, + "small middle": {size: 100, loc: Middle}, + "small right": {size: 100, loc: Right}, + "big left": {size: 5431, loc: Left}, + "big middle": {size: 5431, loc: Middle}, + "big right": {size: 5431, loc: Right}, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + data := BuildMap(tc.size) + allkeys := SortedKeys(data) + key := GetKey(allkeys, tc.loc) + nonKey := GetNonKey(allkeys, tc.loc) + + // error if the key does not exist + proof, err := CreateMembershipProof(data, []byte(nonKey)) + assert.EqualError(t, err, "cannot make existence proof if key is not in map") + assert.Nil(t, proof) + + val := data[key] + proof, err = CreateMembershipProof(data, []byte(key)) + if err != nil { + t.Fatalf("Creating Proof: %+v", err) + } + if proof.GetExist() == nil { + t.Fatal("Unexpected proof format") + } + + root := CalcRoot(data) + err = proof.GetExist().Verify(ics23.TendermintSpec, root, []byte(key), val) + if err != nil { + t.Fatalf("Verifying Proof: %+v", err) + } + + valid := ics23.VerifyMembership(ics23.TendermintSpec, root, proof, []byte(key), val) + if !valid { + t.Fatalf("Membership Proof Invalid") + } + }) + } +} + +func TestCreateNonMembership(t *testing.T) { + cases := map[string]struct { + size int + loc Where + }{ + "small left": {size: 100, loc: Left}, + "small middle": {size: 100, loc: Middle}, + "small right": {size: 100, loc: Right}, + "big left": {size: 5431, loc: Left}, + "big middle": {size: 5431, loc: Middle}, + "big right": {size: 5431, loc: Right}, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + data := BuildMap(tc.size) + allkeys := SortedKeys(data) + nonKey := GetNonKey(allkeys, tc.loc) + key := GetKey(allkeys, tc.loc) + + // error if the key exists + proof, err := CreateNonMembershipProof(data, []byte(key)) + assert.EqualError(t, err, "cannot create non-membership proof if key is in map") + assert.Nil(t, proof) + + proof, err = CreateNonMembershipProof(data, []byte(nonKey)) + if err != nil { + t.Fatalf("Creating Proof: %+v", err) + } + if proof.GetNonexist() == nil { + t.Fatal("Unexpected proof format") + } + + root := CalcRoot(data) + err = proof.GetNonexist().Verify(ics23.TendermintSpec, root, []byte(nonKey)) + if err != nil { + t.Fatalf("Verifying Proof: %+v", err) + } + + valid := ics23.VerifyNonMembership(ics23.TendermintSpec, root, proof, []byte(nonKey)) + if !valid { + t.Fatalf("Non Membership Proof Invalid") + } + }) + } +} + +func TestInvalidKey(t *testing.T) { + tests := []struct { + name string + f func(data map[string][]byte, key []byte) (*ics23.CommitmentProof, error) + data map[string][]byte + key []byte + err error + }{ + {"CreateMembershipProof empty key", CreateMembershipProof, map[string][]byte{"": nil}, []byte(""), ErrEmptyKey}, + {"CreateMembershipProof empty key in data", CreateMembershipProof, map[string][]byte{"": nil, " ": nil}, []byte(" "), ErrEmptyKeyInData}, + {"CreateNonMembershipProof empty key", CreateNonMembershipProof, map[string][]byte{" ": nil}, []byte(""), ErrEmptyKey}, + {"CreateNonMembershipProof empty key in data", CreateNonMembershipProof, map[string][]byte{"": nil}, []byte(" "), ErrEmptyKeyInData}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + _, err := tc.f(tc.data, tc.key) + assert.True(t, errors.Is(err, tc.err)) + }) + } +} diff --git a/cosmos-sdk-store/internal/proofs/helpers.go b/cosmos-sdk-store/internal/proofs/helpers.go new file mode 100755 index 000000000..59c3bf0a9 --- /dev/null +++ b/cosmos-sdk-store/internal/proofs/helpers.go @@ -0,0 +1,101 @@ +package proofs + +import ( + "sort" + + cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + "golang.org/x/exp/maps" + + "cosmossdk.io/math/unsafe" + sdkmaps "cosmossdk.io/store/internal/maps" +) + +// SimpleResult contains a merkle.SimpleProof along with all data needed to build the confio/proof +type SimpleResult struct { + Key []byte + Value []byte + Proof *cmtprotocrypto.Proof + RootHash []byte +} + +// GenerateRangeProof makes a tree of size and returns a range proof for one random element +// +// returns a range proof and the root hash of the tree +func GenerateRangeProof(size int, loc Where) *SimpleResult { + data := BuildMap(size) + root, proofs, allkeys := sdkmaps.ProofsFromMap(data) + + key := GetKey(allkeys, loc) + proof := proofs[key] + + res := &SimpleResult{ + Key: []byte(key), + Value: toValue(key), + Proof: proof, + RootHash: root, + } + return res +} + +// Where selects a location for a key - Left, Right, or Middle +type Where int + +const ( + Left Where = iota + Right + Middle +) + +func SortedKeys(data map[string][]byte) []string { + keys := maps.Keys(data) + sort.Strings(keys) + return keys +} + +func CalcRoot(data map[string][]byte) []byte { + root, _, _ := sdkmaps.ProofsFromMap(data) + return root +} + +// GetKey this returns a key, on Left/Right/Middle +func GetKey(allkeys []string, loc Where) string { + if loc == Left { + return allkeys[0] + } + if loc == Right { + return allkeys[len(allkeys)-1] + } + // select a random index between 1 and allkeys-2 + idx := unsafe.NewRand().Int()%(len(allkeys)-2) + 1 + return allkeys[idx] +} + +// GetNonKey returns a missing key - Left of all, Right of all, or in the Middle +func GetNonKey(allkeys []string, loc Where) string { + if loc == Left { + return string([]byte{1, 1, 1, 1}) + } + if loc == Right { + return string([]byte{0xff, 0xff, 0xff, 0xff}) + } + // otherwise, next to an existing key (copy before mod) + key := GetKey(allkeys, loc) + key = key[:len(key)-2] + string([]byte{255, 255}) + return key +} + +func toValue(key string) []byte { + return []byte("value_for_" + key) +} + +// BuildMap creates random key/values and stores in a map, +// returns a list of all keys in sorted order +func BuildMap(size int) map[string][]byte { + data := make(map[string][]byte) + // insert lots of info and store the bytes + for i := 0; i < size; i++ { + key := unsafe.Str(20) + data[key] = toValue(key) + } + return data +} diff --git a/cosmos-sdk-store/internal/tree/hash.go b/cosmos-sdk-store/internal/tree/hash.go new file mode 100755 index 000000000..a4facd93e --- /dev/null +++ b/cosmos-sdk-store/internal/tree/hash.go @@ -0,0 +1,68 @@ +package tree + +import ( + "crypto/sha256" + "hash" + "math/bits" +) + +var ( + leafPrefix = []byte{0} + innerPrefix = []byte{1} +) + +// HashFromByteSlices computes a Merkle tree where the leaves are the byte slice, +// in the provided order. It follows RFC-6962. +func HashFromByteSlices(items [][]byte) []byte { + return hashFromByteSlices(sha256.New(), items) +} + +func hashFromByteSlices(sha hash.Hash, items [][]byte) []byte { + switch len(items) { + case 0: + return emptyHash() + case 1: + return leafHashOpt(sha, items[0]) + default: + k := getSplitPoint(int64(len(items))) + left := hashFromByteSlices(sha, items[:k]) + right := hashFromByteSlices(sha, items[k:]) + return innerHashOpt(sha, left, right) + } +} + +// returns tmhash(0x00 || leaf) +func leafHashOpt(s hash.Hash, leaf []byte) []byte { + s.Reset() + s.Write(leafPrefix) + s.Write(leaf) + return s.Sum(nil) +} + +func innerHashOpt(s hash.Hash, left, right []byte) []byte { + s.Reset() + s.Write(innerPrefix) + s.Write(left) + s.Write(right) + return s.Sum(nil) +} + +// returns tmhash() +func emptyHash() []byte { + h := sha256.Sum256([]byte{}) + return h[:] +} + +// getSplitPoint returns the largest power of 2 less than length +func getSplitPoint(length int64) int64 { + if length < 1 { + panic("Trying to split a tree with size < 1") + } + uLength := uint(length) + bitlen := bits.Len(uLength) + k := int64(1 << uint(bitlen-1)) + if k == length { + k >>= 1 + } + return k +} diff --git a/cosmos-sdk-store/listenkv/store.go b/cosmos-sdk-store/listenkv/store.go new file mode 100755 index 000000000..b08a6e395 --- /dev/null +++ b/cosmos-sdk-store/listenkv/store.go @@ -0,0 +1,142 @@ +package listenkv + +import ( + "io" + + "cosmossdk.io/store/types" +) + +var _ types.KVStore = &Store{} + +// Store implements the KVStore interface with listening enabled. +// Operations are traced on each core KVStore call and written to any of the +// underlying listeners with the proper key and operation permissions +type Store struct { + parent types.KVStore + listener *types.MemoryListener + parentStoreKey types.StoreKey +} + +// NewStore returns a reference to a new traceKVStore given a parent +// KVStore implementation and a buffered writer. +func NewStore(parent types.KVStore, parentStoreKey types.StoreKey, listener *types.MemoryListener) *Store { + return &Store{parent: parent, listener: listener, parentStoreKey: parentStoreKey} +} + +// Get implements the KVStore interface. It traces a read operation and +// delegates a Get call to the parent KVStore. +func (s *Store) Get(key []byte) []byte { + value := s.parent.Get(key) + return value +} + +// Set implements the KVStore interface. It traces a write operation and +// delegates the Set call to the parent KVStore. +func (s *Store) Set(key, value []byte) { + types.AssertValidKey(key) + s.parent.Set(key, value) + s.listener.OnWrite(s.parentStoreKey, key, value, false) +} + +// Delete implements the KVStore interface. It traces a write operation and +// delegates the Delete call to the parent KVStore. +func (s *Store) Delete(key []byte) { + s.parent.Delete(key) + s.listener.OnWrite(s.parentStoreKey, key, nil, true) +} + +// Has implements the KVStore interface. It delegates the Has call to the +// parent KVStore. +func (s *Store) Has(key []byte) bool { + return s.parent.Has(key) +} + +// Iterator implements the KVStore interface. It delegates the Iterator call +// the to the parent KVStore. +func (s *Store) Iterator(start, end []byte) types.Iterator { + return s.iterator(start, end, true) +} + +// ReverseIterator implements the KVStore interface. It delegates the +// ReverseIterator call the to the parent KVStore. +func (s *Store) ReverseIterator(start, end []byte) types.Iterator { + return s.iterator(start, end, false) +} + +// iterator facilitates iteration over a KVStore. It delegates the necessary +// calls to it's parent KVStore. +func (s *Store) iterator(start, end []byte, ascending bool) types.Iterator { + var parent types.Iterator + + if ascending { + parent = s.parent.Iterator(start, end) + } else { + parent = s.parent.ReverseIterator(start, end) + } + + return newTraceIterator(parent, s.listener) +} + +type listenIterator struct { + parent types.Iterator + listener *types.MemoryListener +} + +func newTraceIterator(parent types.Iterator, listener *types.MemoryListener) types.Iterator { + return &listenIterator{parent: parent, listener: listener} +} + +// Domain implements the Iterator interface. +func (li *listenIterator) Domain() (start, end []byte) { + return li.parent.Domain() +} + +// Valid implements the Iterator interface. +func (li *listenIterator) Valid() bool { + return li.parent.Valid() +} + +// Next implements the Iterator interface. +func (li *listenIterator) Next() { + li.parent.Next() +} + +// Key implements the Iterator interface. +func (li *listenIterator) Key() []byte { + key := li.parent.Key() + return key +} + +// Value implements the Iterator interface. +func (li *listenIterator) Value() []byte { + value := li.parent.Value() + return value +} + +// Close implements the Iterator interface. +func (li *listenIterator) Close() error { + return li.parent.Close() +} + +// Error delegates the Error call to the parent iterator. +func (li *listenIterator) Error() error { + return li.parent.Error() +} + +// GetStoreType implements the KVStore interface. It returns the underlying +// KVStore type. +func (s *Store) GetStoreType() types.StoreType { + return s.parent.GetStoreType() +} + +// CacheWrap implements the KVStore interface. It panics as a Store +// cannot be cache wrapped. +func (s *Store) CacheWrap() types.CacheWrap { + panic("cannot CacheWrap a ListenKVStore") +} + +// CacheWrapWithTrace implements the KVStore interface. It panics as a +// Store cannot be cache wrapped. +func (s *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { + panic("cannot CacheWrapWithTrace a ListenKVStore") +} diff --git a/cosmos-sdk-store/listenkv/store_test.go b/cosmos-sdk-store/listenkv/store_test.go new file mode 100755 index 000000000..51b88912c --- /dev/null +++ b/cosmos-sdk-store/listenkv/store_test.go @@ -0,0 +1,281 @@ +package listenkv_test + +import ( + "fmt" + "testing" + + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/require" + + "cosmossdk.io/store/dbadapter" + "cosmossdk.io/store/internal/kv" + "cosmossdk.io/store/listenkv" + "cosmossdk.io/store/prefix" + "cosmossdk.io/store/types" +) + +func bz(s string) []byte { return []byte(s) } + +func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) } +func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) } + +var kvPairs = []kv.Pair{ + {Key: keyFmt(1), Value: valFmt(1)}, + {Key: keyFmt(2), Value: valFmt(2)}, + {Key: keyFmt(3), Value: valFmt(3)}, +} + +var testStoreKey = types.NewKVStoreKey("listen_test") + +func newListenKVStore(listener *types.MemoryListener) *listenkv.Store { + store := newEmptyListenKVStore(listener) + + for _, kvPair := range kvPairs { + store.Set(kvPair.Key, kvPair.Value) + } + + return store +} + +func newEmptyListenKVStore(listener *types.MemoryListener) *listenkv.Store { + memDB := dbadapter.Store{DB: dbm.NewMemDB()} + + return listenkv.NewStore(memDB, testStoreKey, listener) +} + +func TestListenKVStoreGet(t *testing.T) { + testCases := []struct { + key []byte + expectedValue []byte + }{ + { + key: kvPairs[0].Key, + expectedValue: kvPairs[0].Value, + }, + { + key: []byte("does-not-exist"), + expectedValue: nil, + }, + } + + for _, tc := range testCases { + listener := types.NewMemoryListener() + + store := newListenKVStore(listener) + value := store.Get(tc.key) + + require.Equal(t, tc.expectedValue, value) + } +} + +func TestListenKVStoreSet(t *testing.T) { + testCases := []struct { + key []byte + value []byte + expectedOut *types.StoreKVPair + }{ + { + key: kvPairs[0].Key, + value: kvPairs[0].Value, + expectedOut: &types.StoreKVPair{ + Key: kvPairs[0].Key, + Value: kvPairs[0].Value, + StoreKey: testStoreKey.Name(), + Delete: false, + }, + }, + { + key: kvPairs[1].Key, + value: kvPairs[1].Value, + expectedOut: &types.StoreKVPair{ + Key: kvPairs[1].Key, + Value: kvPairs[1].Value, + StoreKey: testStoreKey.Name(), + Delete: false, + }, + }, + { + key: kvPairs[2].Key, + value: kvPairs[2].Value, + expectedOut: &types.StoreKVPair{ + Key: kvPairs[2].Key, + Value: kvPairs[2].Value, + StoreKey: testStoreKey.Name(), + Delete: false, + }, + }, + } + + for _, tc := range testCases { + listener := types.NewMemoryListener() + + store := newEmptyListenKVStore(listener) + store.Set(tc.key, tc.value) + storeKVPair := listener.PopStateCache()[0] + + require.Equal(t, tc.expectedOut, storeKVPair) + } + + listener := types.NewMemoryListener() + store := newEmptyListenKVStore(listener) + require.Panics(t, func() { store.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") + require.Panics(t, func() { store.Set(nil, []byte("value")) }, "setting a nil key should panic") +} + +func TestListenKVStoreDelete(t *testing.T) { + testCases := []struct { + key []byte + expectedOut *types.StoreKVPair + }{ + { + key: kvPairs[0].Key, + expectedOut: &types.StoreKVPair{ + Key: kvPairs[0].Key, + Value: nil, + StoreKey: testStoreKey.Name(), + Delete: true, + }, + }, + } + + for _, tc := range testCases { + listener := types.NewMemoryListener() + + store := newListenKVStore(listener) + store.Delete(tc.key) + cache := listener.PopStateCache() + require.NotEmpty(t, cache) + storeKVPair := cache[len(cache)-1] + + require.Equal(t, tc.expectedOut, storeKVPair) + } +} + +func TestListenKVStoreHas(t *testing.T) { + testCases := []struct { + key []byte + expected bool + }{ + { + key: kvPairs[0].Key, + expected: true, + }, + } + + for _, tc := range testCases { + listener := types.NewMemoryListener() + + store := newListenKVStore(listener) + ok := store.Has(tc.key) + + require.Equal(t, tc.expected, ok) + } +} + +func TestTestListenKVStoreIterator(t *testing.T) { + listener := types.NewMemoryListener() + + store := newListenKVStore(listener) + iterator := store.Iterator(nil, nil) + + s, e := iterator.Domain() + require.Equal(t, []byte(nil), s) + require.Equal(t, []byte(nil), e) + + testCases := []struct { + expectedKey []byte + expectedValue []byte + }{ + { + expectedKey: kvPairs[0].Key, + expectedValue: kvPairs[0].Value, + }, + { + expectedKey: kvPairs[1].Key, + expectedValue: kvPairs[1].Value, + }, + { + expectedKey: kvPairs[2].Key, + expectedValue: kvPairs[2].Value, + }, + } + + for _, tc := range testCases { + ka := iterator.Key() + require.Equal(t, tc.expectedKey, ka) + + va := iterator.Value() + require.Equal(t, tc.expectedValue, va) + + iterator.Next() + } + + require.False(t, iterator.Valid()) + require.Panics(t, iterator.Next) + require.NoError(t, iterator.Close()) +} + +func TestTestListenKVStoreReverseIterator(t *testing.T) { + listener := types.NewMemoryListener() + + store := newListenKVStore(listener) + iterator := store.ReverseIterator(nil, nil) + + s, e := iterator.Domain() + require.Equal(t, []byte(nil), s) + require.Equal(t, []byte(nil), e) + + testCases := []struct { + expectedKey []byte + expectedValue []byte + }{ + { + expectedKey: kvPairs[2].Key, + expectedValue: kvPairs[2].Value, + }, + { + expectedKey: kvPairs[1].Key, + expectedValue: kvPairs[1].Value, + }, + { + expectedKey: kvPairs[0].Key, + expectedValue: kvPairs[0].Value, + }, + } + + for _, tc := range testCases { + ka := iterator.Key() + require.Equal(t, tc.expectedKey, ka) + + va := iterator.Value() + require.Equal(t, tc.expectedValue, va) + + iterator.Next() + } + + require.False(t, iterator.Valid()) + require.Panics(t, iterator.Next) + require.NoError(t, iterator.Close()) +} + +func TestListenKVStorePrefix(t *testing.T) { + store := newEmptyListenKVStore(nil) + pStore := prefix.NewStore(store, []byte("listen_prefix")) + require.IsType(t, prefix.Store{}, pStore) +} + +func TestListenKVStoreGetStoreType(t *testing.T) { + memDB := dbadapter.Store{DB: dbm.NewMemDB()} + store := newEmptyListenKVStore(nil) + require.Equal(t, memDB.GetStoreType(), store.GetStoreType()) +} + +func TestListenKVStoreCacheWrap(t *testing.T) { + store := newEmptyListenKVStore(nil) + require.Panics(t, func() { store.CacheWrap() }) +} + +func TestListenKVStoreCacheWrapWithTrace(t *testing.T) { + store := newEmptyListenKVStore(nil) + require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) }) +} diff --git a/cosmos-sdk-store/mem/mem_test.go b/cosmos-sdk-store/mem/mem_test.go new file mode 100755 index 000000000..6595b45dc --- /dev/null +++ b/cosmos-sdk-store/mem/mem_test.go @@ -0,0 +1,53 @@ +package mem_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "cosmossdk.io/store/cachekv" + "cosmossdk.io/store/mem" + pruningtypes "cosmossdk.io/store/pruning/types" + "cosmossdk.io/store/types" +) + +func TestStore(t *testing.T) { + db := mem.NewStore() + require.Equal(t, types.StoreTypeMemory, db.GetStoreType()) + + key, value := []byte("key"), []byte("value") + + require.Nil(t, db.Get(key)) + db.Set(key, value) + require.Equal(t, value, db.Get(key)) + + newValue := []byte("newValue") + db.Set(key, newValue) + require.Equal(t, newValue, db.Get(key)) + + db.Delete(key) + require.Nil(t, db.Get(key)) + + cacheWrapper := db.CacheWrap() + require.IsType(t, &cachekv.Store{}, cacheWrapper) + + cacheWrappedWithTrace := db.CacheWrapWithTrace(nil, nil) + require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) +} + +func TestCommit(t *testing.T) { + db := mem.NewStore() + key, value := []byte("key"), []byte("value") + + db.Set(key, value) + id := db.Commit() + require.True(t, id.IsZero()) + require.True(t, db.LastCommitID().IsZero()) + require.Equal(t, value, db.Get(key)) +} + +func TestStorePrunningOptions(t *testing.T) { + // this is a no-op + db := mem.NewStore() + require.Equal(t, pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined), db.GetPruning()) +} diff --git a/cosmos-sdk-store/mem/store.go b/cosmos-sdk-store/mem/store.go new file mode 100755 index 000000000..b819d7536 --- /dev/null +++ b/cosmos-sdk-store/mem/store.go @@ -0,0 +1,62 @@ +package mem + +import ( + "io" + + dbm "github.com/cosmos/cosmos-db" + + "cosmossdk.io/store/cachekv" + "cosmossdk.io/store/dbadapter" + pruningtypes "cosmossdk.io/store/pruning/types" + "cosmossdk.io/store/tracekv" + "cosmossdk.io/store/types" +) + +var ( + _ types.KVStore = (*Store)(nil) + _ types.Committer = (*Store)(nil) +) + +// Store implements an in-memory only KVStore. Entries are persisted between +// commits and thus between blocks. State in Memory store is not committed as part of app state but maintained privately by each node +type Store struct { + dbadapter.Store +} + +func NewStore() *Store { + return NewStoreWithDB(dbm.NewMemDB()) +} + +func NewStoreWithDB(db *dbm.MemDB) *Store { //nolint: interfacer // Concrete return type is fine here. + return &Store{Store: dbadapter.Store{DB: db}} +} + +// GetStoreType returns the Store's type. +func (s Store) GetStoreType() types.StoreType { + return types.StoreTypeMemory +} + +// CacheWrap branches the underlying store. +func (s Store) CacheWrap() types.CacheWrap { + return cachekv.NewStore(s) +} + +// CacheWrapWithTrace implements KVStore. +func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { + return cachekv.NewStore(tracekv.NewStore(s, w, tc)) +} + +// Commit performs a no-op as entries are persistent between commitments. +func (s *Store) Commit() (id types.CommitID) { return } + +func (s *Store) SetPruning(pruning pruningtypes.PruningOptions) {} + +// GetPruning is a no-op as pruning options cannot be directly set on this store. +// They must be set on the root commit multi-store. +func (s *Store) GetPruning() pruningtypes.PruningOptions { + return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined) +} + +func (s Store) LastCommitID() (id types.CommitID) { return } + +func (s Store) WorkingHash() (hash []byte) { return } diff --git a/cosmos-sdk-store/metrics/telemetry.go b/cosmos-sdk-store/metrics/telemetry.go new file mode 100755 index 000000000..d5bc55c45 --- /dev/null +++ b/cosmos-sdk-store/metrics/telemetry.go @@ -0,0 +1,56 @@ +package metrics + +import ( + "time" + + "github.com/hashicorp/go-metrics" +) + +// StoreMetrics defines the set of metrics for the store package +type StoreMetrics interface { + MeasureSince(keys ...string) +} + +var ( + _ StoreMetrics = Metrics{} + _ StoreMetrics = NoOpMetrics{} +) + +// Metrics defines the metrics wrapper for the store package +type Metrics struct { + Labels []metrics.Label +} + +// NewMetrics returns a new instance of the Metrics with labels set by the node operator +func NewMetrics(labels [][]string) Metrics { + gatherer := Metrics{} + + if numGlobalLables := len(labels); numGlobalLables > 0 { + parsedGlobalLabels := make([]metrics.Label, numGlobalLables) + for i, gl := range labels { + parsedGlobalLabels[i] = metrics.Label{Name: gl[0], Value: gl[1]} + } + + gatherer.Labels = parsedGlobalLabels + } + + return gatherer +} + +// MeasureSince provides a wrapper functionality for emitting a time measure +// metric with global labels (if any). +func (m Metrics) MeasureSince(keys ...string) { + start := time.Now() + metrics.MeasureSinceWithLabels(keys, start.UTC(), m.Labels) +} + +// NoOpMetrics is a no-op implementation of the StoreMetrics interface +type NoOpMetrics struct{} + +// NewNoOpMetrics returns a new instance of the NoOpMetrics +func NewNoOpMetrics() NoOpMetrics { + return NoOpMetrics{} +} + +// MeasureSince is a no-op implementation of the StoreMetrics interface to avoid time.Now() calls +func (m NoOpMetrics) MeasureSince(keys ...string) {} diff --git a/cosmos-sdk-store/mock/cosmos_cosmos_db_DB.go b/cosmos-sdk-store/mock/cosmos_cosmos_db_DB.go new file mode 100755 index 000000000..4a79ee795 --- /dev/null +++ b/cosmos-sdk-store/mock/cosmos_cosmos_db_DB.go @@ -0,0 +1,221 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/cosmos/cosmos-db (interfaces: DB) + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + db "github.com/cosmos/cosmos-db" + gomock "github.com/golang/mock/gomock" +) + +// MockDB is a mock of DB interface. +type MockDB struct { + ctrl *gomock.Controller + recorder *MockDBMockRecorder +} + +// MockDBMockRecorder is the mock recorder for MockDB. +type MockDBMockRecorder struct { + mock *MockDB +} + +// NewMockDB creates a new mock instance. +func NewMockDB(ctrl *gomock.Controller) *MockDB { + mock := &MockDB{ctrl: ctrl} + mock.recorder = &MockDBMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDB) EXPECT() *MockDBMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockDB) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockDBMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockDB)(nil).Close)) +} + +// Delete mocks base method. +func (m *MockDB) Delete(arg0 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockDBMockRecorder) Delete(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDB)(nil).Delete), arg0) +} + +// DeleteSync mocks base method. +func (m *MockDB) DeleteSync(arg0 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteSync", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteSync indicates an expected call of DeleteSync. +func (mr *MockDBMockRecorder) DeleteSync(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSync", reflect.TypeOf((*MockDB)(nil).DeleteSync), arg0) +} + +// Get mocks base method. +func (m *MockDB) Get(arg0 []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockDBMockRecorder) Get(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDB)(nil).Get), arg0) +} + +// Has mocks base method. +func (m *MockDB) Has(arg0 []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Has", arg0) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Has indicates an expected call of Has. +func (mr *MockDBMockRecorder) Has(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockDB)(nil).Has), arg0) +} + +// Iterator mocks base method. +func (m *MockDB) Iterator(arg0, arg1 []byte) (db.Iterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Iterator", arg0, arg1) + ret0, _ := ret[0].(db.Iterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Iterator indicates an expected call of Iterator. +func (mr *MockDBMockRecorder) Iterator(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockDB)(nil).Iterator), arg0, arg1) +} + +// NewBatch mocks base method. +func (m *MockDB) NewBatch() db.Batch { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewBatch") + ret0, _ := ret[0].(db.Batch) + return ret0 +} + +// NewBatch indicates an expected call of NewBatch. +func (mr *MockDBMockRecorder) NewBatch() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBatch", reflect.TypeOf((*MockDB)(nil).NewBatch)) +} + +// NewBatchWithSize mocks base method. +func (m *MockDB) NewBatchWithSize(arg0 int) db.Batch { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewBatchWithSize", arg0) + ret0, _ := ret[0].(db.Batch) + return ret0 +} + +// NewBatchWithSize indicates an expected call of NewBatchWithSize. +func (mr *MockDBMockRecorder) NewBatchWithSize(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBatchWithSize", reflect.TypeOf((*MockDB)(nil).NewBatchWithSize), arg0) +} + +// Print mocks base method. +func (m *MockDB) Print() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Print") + ret0, _ := ret[0].(error) + return ret0 +} + +// Print indicates an expected call of Print. +func (mr *MockDBMockRecorder) Print() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Print", reflect.TypeOf((*MockDB)(nil).Print)) +} + +// ReverseIterator mocks base method. +func (m *MockDB) ReverseIterator(arg0, arg1 []byte) (db.Iterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReverseIterator", arg0, arg1) + ret0, _ := ret[0].(db.Iterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReverseIterator indicates an expected call of ReverseIterator. +func (mr *MockDBMockRecorder) ReverseIterator(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockDB)(nil).ReverseIterator), arg0, arg1) +} + +// Set mocks base method. +func (m *MockDB) Set(arg0, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Set", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Set indicates an expected call of Set. +func (mr *MockDBMockRecorder) Set(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockDB)(nil).Set), arg0, arg1) +} + +// SetSync mocks base method. +func (m *MockDB) SetSync(arg0, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetSync", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetSync indicates an expected call of SetSync. +func (mr *MockDBMockRecorder) SetSync(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSync", reflect.TypeOf((*MockDB)(nil).SetSync), arg0, arg1) +} + +// Stats mocks base method. +func (m *MockDB) Stats() map[string]string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Stats") + ret0, _ := ret[0].(map[string]string) + return ret0 +} + +// Stats indicates an expected call of Stats. +func (mr *MockDBMockRecorder) Stats() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stats", reflect.TypeOf((*MockDB)(nil).Stats)) +} diff --git a/cosmos-sdk-store/prefix/store.go b/cosmos-sdk-store/prefix/store.go new file mode 100755 index 000000000..32b9e8247 --- /dev/null +++ b/cosmos-sdk-store/prefix/store.go @@ -0,0 +1,207 @@ +package prefix + +import ( + "bytes" + "errors" + "io" + + "cosmossdk.io/store/cachekv" + "cosmossdk.io/store/tracekv" + "cosmossdk.io/store/types" +) + +var _ types.KVStore = Store{} + +// Store is similar with cometbft/cometbft/libs/db/prefix_db +// both gives access only to the limited subset of the store +// for convinience or safety +type Store struct { + parent types.KVStore + prefix []byte +} + +func NewStore(parent types.KVStore, prefix []byte) Store { + return Store{ + parent: parent, + prefix: prefix, + } +} + +func cloneAppend(bz, tail []byte) (res []byte) { + res = make([]byte, len(bz)+len(tail)) + copy(res, bz) + copy(res[len(bz):], tail) + return +} + +func (s Store) key(key []byte) (res []byte) { + if key == nil { + panic("nil key on Store") + } + res = cloneAppend(s.prefix, key) + return +} + +// Implements Store +func (s Store) GetStoreType() types.StoreType { + return s.parent.GetStoreType() +} + +// Implements CacheWrap +func (s Store) CacheWrap() types.CacheWrap { + return cachekv.NewStore(s) +} + +// CacheWrapWithTrace implements the KVStore interface. +func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { + return cachekv.NewStore(tracekv.NewStore(s, w, tc)) +} + +// Implements KVStore +func (s Store) Get(key []byte) []byte { + res := s.parent.Get(s.key(key)) + return res +} + +// Implements KVStore +func (s Store) Has(key []byte) bool { + return s.parent.Has(s.key(key)) +} + +// Implements KVStore +func (s Store) Set(key, value []byte) { + types.AssertValidKey(key) + types.AssertValidValue(value) + s.parent.Set(s.key(key), value) +} + +// Implements KVStore +func (s Store) Delete(key []byte) { + s.parent.Delete(s.key(key)) +} + +// Implements KVStore +// Check https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db.go#L106 +func (s Store) Iterator(start, end []byte) types.Iterator { + newstart := cloneAppend(s.prefix, start) + + var newend []byte + if end == nil { + newend = cpIncr(s.prefix) + } else { + newend = cloneAppend(s.prefix, end) + } + + iter := s.parent.Iterator(newstart, newend) + + return newPrefixIterator(s.prefix, start, end, iter) +} + +// ReverseIterator implements KVStore +// Check https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db.go#L129 +func (s Store) ReverseIterator(start, end []byte) types.Iterator { + newstart := cloneAppend(s.prefix, start) + + var newend []byte + if end == nil { + newend = cpIncr(s.prefix) + } else { + newend = cloneAppend(s.prefix, end) + } + + iter := s.parent.ReverseIterator(newstart, newend) + + return newPrefixIterator(s.prefix, start, end, iter) +} + +var _ types.Iterator = (*prefixIterator)(nil) + +type prefixIterator struct { + prefix []byte + start []byte + end []byte + iter types.Iterator + valid bool +} + +func newPrefixIterator(prefix, start, end []byte, parent types.Iterator) *prefixIterator { + return &prefixIterator{ + prefix: prefix, + start: start, + end: end, + iter: parent, + valid: parent.Valid() && bytes.HasPrefix(parent.Key(), prefix), + } +} + +// Implements Iterator +func (pi *prefixIterator) Domain() ([]byte, []byte) { + return pi.start, pi.end +} + +// Implements Iterator +func (pi *prefixIterator) Valid() bool { + return pi.valid && pi.iter.Valid() +} + +// Implements Iterator +func (pi *prefixIterator) Next() { + if !pi.valid { + panic("prefixIterator invalid, cannot call Next()") + } + + if pi.iter.Next(); !pi.iter.Valid() || !bytes.HasPrefix(pi.iter.Key(), pi.prefix) { + // TODO: shouldn't pi be set to nil instead? + pi.valid = false + } +} + +// Implements Iterator +func (pi *prefixIterator) Key() (key []byte) { + if !pi.valid { + panic("prefixIterator invalid, cannot call Key()") + } + + key = pi.iter.Key() + key = stripPrefix(key, pi.prefix) + + return +} + +// Implements Iterator +func (pi *prefixIterator) Value() []byte { + if !pi.valid { + panic("prefixIterator invalid, cannot call Value()") + } + + return pi.iter.Value() +} + +// Implements Iterator +func (pi *prefixIterator) Close() error { + return pi.iter.Close() +} + +// Error returns an error if the prefixIterator is invalid defined by the Valid +// method. +func (pi *prefixIterator) Error() error { + if !pi.Valid() { + return errors.New("invalid prefixIterator") + } + + return nil +} + +// copied from github.com/cometbft/cometbft/libs/db/prefix_db.go +func stripPrefix(key, prefix []byte) []byte { + if len(key) < len(prefix) || !bytes.Equal(key[:len(prefix)], prefix) { + panic("should not happen") + } + + return key[len(prefix):] +} + +// wrapping types.PrefixEndBytes +func cpIncr(bz []byte) []byte { + return types.PrefixEndBytes(bz) +} diff --git a/cosmos-sdk-store/prefix/store_test.go b/cosmos-sdk-store/prefix/store_test.go new file mode 100755 index 000000000..738835770 --- /dev/null +++ b/cosmos-sdk-store/prefix/store_test.go @@ -0,0 +1,451 @@ +package prefix + +import ( + "crypto/rand" + "testing" + + dbm "github.com/cosmos/cosmos-db" + tiavl "github.com/cosmos/iavl" + "github.com/stretchr/testify/require" + + "cosmossdk.io/log" + "cosmossdk.io/store/cachekv" + "cosmossdk.io/store/dbadapter" + "cosmossdk.io/store/gaskv" + "cosmossdk.io/store/iavl" + "cosmossdk.io/store/types" + "cosmossdk.io/store/wrapper" +) + +// copied from iavl/store_test.go +var ( + cacheSize = 100 +) + +func bz(s string) []byte { return []byte(s) } + +type kvpair struct { + key []byte + value []byte +} + +func genRandomKVPairs(t *testing.T) []kvpair { + t.Helper() + kvps := make([]kvpair, 20) + + for i := 0; i < 20; i++ { + kvps[i].key = make([]byte, 32) + _, err := rand.Read(kvps[i].key) + require.NoError(t, err) + kvps[i].value = make([]byte, 32) + _, err = rand.Read(kvps[i].value) + require.NoError(t, err) + } + + return kvps +} + +func setRandomKVPairs(t *testing.T, store types.KVStore) []kvpair { + t.Helper() + kvps := genRandomKVPairs(t) + for _, kvp := range kvps { + store.Set(kvp.key, kvp.value) + } + return kvps +} + +func testPrefixStore(t *testing.T, baseStore types.KVStore, prefix []byte) { + t.Helper() + prefixStore := NewStore(baseStore, prefix) + prefixPrefixStore := NewStore(prefixStore, []byte("prefix")) + + require.Panics(t, func() { prefixStore.Get(nil) }) + require.Panics(t, func() { prefixStore.Set(nil, []byte{}) }) + + kvps := setRandomKVPairs(t, prefixPrefixStore) + + for i := 0; i < 20; i++ { + key := kvps[i].key + value := kvps[i].value + require.True(t, prefixPrefixStore.Has(key)) + require.Equal(t, value, prefixPrefixStore.Get(key)) + + key = append([]byte("prefix"), key...) + require.True(t, prefixStore.Has(key)) + require.Equal(t, value, prefixStore.Get(key)) + key = append(prefix, key...) + require.True(t, baseStore.Has(key)) + require.Equal(t, value, baseStore.Get(key)) + + key = kvps[i].key + prefixPrefixStore.Delete(key) + require.False(t, prefixPrefixStore.Has(key)) + require.Nil(t, prefixPrefixStore.Get(key)) + key = append([]byte("prefix"), key...) + require.False(t, prefixStore.Has(key)) + require.Nil(t, prefixStore.Get(key)) + key = append(prefix, key...) + require.False(t, baseStore.Has(key)) + require.Nil(t, baseStore.Get(key)) + } +} + +func TestIAVLStorePrefix(t *testing.T) { + db := wrapper.NewDBWrapper(dbm.NewMemDB()) + tree := tiavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) + iavlStore := iavl.UnsafeNewStore(tree) + + testPrefixStore(t, iavlStore, []byte("test")) +} + +func TestPrefixKVStoreNoNilSet(t *testing.T) { + meter := types.NewGasMeter(100000000) + mem := dbadapter.Store{DB: dbm.NewMemDB()} + gasStore := gaskv.NewStore(mem, meter, types.KVGasConfig()) + require.Panics(t, func() { gasStore.Set([]byte("key"), nil) }, "setting a nil value should panic") +} + +func TestPrefixStoreIterate(t *testing.T) { + db := dbm.NewMemDB() + baseStore := dbadapter.Store{DB: db} + prefix := []byte("test") + prefixStore := NewStore(baseStore, prefix) + + setRandomKVPairs(t, prefixStore) + + bIter := types.KVStorePrefixIterator(baseStore, prefix) + pIter := types.KVStorePrefixIterator(prefixStore, nil) + + for bIter.Valid() && pIter.Valid() { + require.Equal(t, bIter.Key(), append(prefix, pIter.Key()...)) + require.Equal(t, bIter.Value(), pIter.Value()) + + bIter.Next() + pIter.Next() + } + + bIter.Close() + pIter.Close() +} + +func incFirstByte(bz []byte) { + bz[0]++ +} + +func TestCloneAppend(t *testing.T) { + kvps := genRandomKVPairs(t) + for _, kvp := range kvps { + bz := cloneAppend(kvp.key, kvp.value) + require.Equal(t, bz, append(kvp.key, kvp.value...)) + + incFirstByte(bz) + require.NotEqual(t, bz, append(kvp.key, kvp.value...)) + + bz = cloneAppend(kvp.key, kvp.value) + incFirstByte(kvp.key) + require.NotEqual(t, bz, append(kvp.key, kvp.value...)) + + bz = cloneAppend(kvp.key, kvp.value) + incFirstByte(kvp.value) + require.NotEqual(t, bz, append(kvp.key, kvp.value...)) + } +} + +func TestPrefixStoreIteratorEdgeCase(t *testing.T) { + db := dbm.NewMemDB() + baseStore := dbadapter.Store{DB: db} + + // overflow in cpIncr + prefix := []byte{0xAA, 0xFF, 0xFF} + prefixStore := NewStore(baseStore, prefix) + + // ascending order + baseStore.Set([]byte{0xAA, 0xFF, 0xFE}, []byte{}) + baseStore.Set([]byte{0xAA, 0xFF, 0xFE, 0x00}, []byte{}) + baseStore.Set([]byte{0xAA, 0xFF, 0xFF}, []byte{}) + baseStore.Set([]byte{0xAA, 0xFF, 0xFF, 0x00}, []byte{}) + baseStore.Set([]byte{0xAB}, []byte{}) + baseStore.Set([]byte{0xAB, 0x00}, []byte{}) + baseStore.Set([]byte{0xAB, 0x00, 0x00}, []byte{}) + + iter := prefixStore.Iterator(nil, nil) + + checkDomain(t, iter, nil, nil) + checkItem(t, iter, []byte{}, bz("")) + checkNext(t, iter, true) + checkItem(t, iter, []byte{0x00}, bz("")) + checkNext(t, iter, false) + + checkInvalid(t, iter) + + iter.Close() +} + +func TestPrefixStoreReverseIteratorEdgeCase(t *testing.T) { + db := dbm.NewMemDB() + baseStore := dbadapter.Store{DB: db} + + // overflow in cpIncr + prefix := []byte{0xAA, 0xFF, 0xFF} + prefixStore := NewStore(baseStore, prefix) + + // descending order + baseStore.Set([]byte{0xAB, 0x00, 0x00}, []byte{}) + baseStore.Set([]byte{0xAB, 0x00}, []byte{}) + baseStore.Set([]byte{0xAB}, []byte{}) + baseStore.Set([]byte{0xAA, 0xFF, 0xFF, 0x00}, []byte{}) + baseStore.Set([]byte{0xAA, 0xFF, 0xFF}, []byte{}) + baseStore.Set([]byte{0xAA, 0xFF, 0xFE, 0x00}, []byte{}) + baseStore.Set([]byte{0xAA, 0xFF, 0xFE}, []byte{}) + + iter := prefixStore.ReverseIterator(nil, nil) + + checkDomain(t, iter, nil, nil) + checkItem(t, iter, []byte{0x00}, bz("")) + checkNext(t, iter, true) + checkItem(t, iter, []byte{}, bz("")) + checkNext(t, iter, false) + + checkInvalid(t, iter) + + iter.Close() + + db = dbm.NewMemDB() + baseStore = dbadapter.Store{DB: db} + + // underflow in cpDecr + prefix = []byte{0xAA, 0x00, 0x00} + prefixStore = NewStore(baseStore, prefix) + + baseStore.Set([]byte{0xAB, 0x00, 0x01, 0x00, 0x00}, []byte{}) + baseStore.Set([]byte{0xAB, 0x00, 0x01, 0x00}, []byte{}) + baseStore.Set([]byte{0xAB, 0x00, 0x01}, []byte{}) + baseStore.Set([]byte{0xAA, 0x00, 0x00, 0x00}, []byte{}) + baseStore.Set([]byte{0xAA, 0x00, 0x00}, []byte{}) + baseStore.Set([]byte{0xA9, 0xFF, 0xFF, 0x00}, []byte{}) + baseStore.Set([]byte{0xA9, 0xFF, 0xFF}, []byte{}) + + iter = prefixStore.ReverseIterator(nil, nil) + + checkDomain(t, iter, nil, nil) + checkItem(t, iter, []byte{0x00}, bz("")) + checkNext(t, iter, true) + checkItem(t, iter, []byte{}, bz("")) + checkNext(t, iter, false) + + checkInvalid(t, iter) + + iter.Close() +} + +// Tests below are ported from https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db_test.go + +func mockStoreWithStuff() types.KVStore { + db := dbm.NewMemDB() + store := dbadapter.Store{DB: db} + // Under "key" prefix + store.Set(bz("key"), bz("value")) + store.Set(bz("key1"), bz("value1")) + store.Set(bz("key2"), bz("value2")) + store.Set(bz("key3"), bz("value3")) + store.Set(bz("something"), bz("else")) + store.Set(bz("k"), bz("val")) + store.Set(bz("ke"), bz("valu")) + store.Set(bz("kee"), bz("valuu")) + return store +} + +func checkValue(t *testing.T, store types.KVStore, key, expected []byte) { + t.Helper() + bz := store.Get(key) + require.Equal(t, expected, bz) +} + +func checkValid(t *testing.T, itr types.Iterator, expected bool) { + t.Helper() + valid := itr.Valid() + require.Equal(t, expected, valid) +} + +func checkNext(t *testing.T, itr types.Iterator, expected bool) { + t.Helper() + itr.Next() + valid := itr.Valid() + require.Equal(t, expected, valid) +} + +func checkDomain(t *testing.T, itr types.Iterator, start, end []byte) { + t.Helper() + ds, de := itr.Domain() + require.Equal(t, start, ds) + require.Equal(t, end, de) +} + +func checkItem(t *testing.T, itr types.Iterator, key, value []byte) { + t.Helper() + require.Exactly(t, key, itr.Key()) + require.Exactly(t, value, itr.Value()) +} + +func checkInvalid(t *testing.T, itr types.Iterator) { + t.Helper() + checkValid(t, itr, false) + checkKeyPanics(t, itr) + checkValuePanics(t, itr) + checkNextPanics(t, itr) +} + +func checkKeyPanics(t *testing.T, itr types.Iterator) { + t.Helper() + require.Panics(t, func() { itr.Key() }) +} + +func checkValuePanics(t *testing.T, itr types.Iterator) { + t.Helper() + require.Panics(t, func() { itr.Value() }) +} + +func checkNextPanics(t *testing.T, itr types.Iterator) { + t.Helper() + require.Panics(t, func() { itr.Next() }) +} + +func TestPrefixDBSimple(t *testing.T) { + store := mockStoreWithStuff() + pstore := NewStore(store, bz("key")) + + checkValue(t, pstore, bz("key"), nil) + checkValue(t, pstore, bz(""), bz("value")) + checkValue(t, pstore, bz("key1"), nil) + checkValue(t, pstore, bz("1"), bz("value1")) + checkValue(t, pstore, bz("key2"), nil) + checkValue(t, pstore, bz("2"), bz("value2")) + checkValue(t, pstore, bz("key3"), nil) + checkValue(t, pstore, bz("3"), bz("value3")) + checkValue(t, pstore, bz("something"), nil) + checkValue(t, pstore, bz("k"), nil) + checkValue(t, pstore, bz("ke"), nil) + checkValue(t, pstore, bz("kee"), nil) +} + +func TestPrefixDBIterator1(t *testing.T) { + store := mockStoreWithStuff() + pstore := NewStore(store, bz("key")) + + itr := pstore.Iterator(nil, nil) + checkDomain(t, itr, nil, nil) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBIterator2(t *testing.T) { + store := mockStoreWithStuff() + pstore := NewStore(store, bz("key")) + + itr := pstore.Iterator(nil, bz("")) + checkDomain(t, itr, nil, bz("")) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBIterator3(t *testing.T) { + store := mockStoreWithStuff() + pstore := NewStore(store, bz("key")) + + itr := pstore.Iterator(bz(""), nil) + checkDomain(t, itr, bz(""), nil) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBIterator4(t *testing.T) { + store := mockStoreWithStuff() + pstore := NewStore(store, bz("key")) + + itr := pstore.Iterator(bz(""), bz("")) + checkDomain(t, itr, bz(""), bz("")) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator1(t *testing.T) { + store := mockStoreWithStuff() + pstore := NewStore(store, bz("key")) + + itr := pstore.ReverseIterator(nil, nil) + checkDomain(t, itr, nil, nil) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, true) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator2(t *testing.T) { + store := mockStoreWithStuff() + pstore := NewStore(store, bz("key")) + + itr := pstore.ReverseIterator(bz(""), nil) + checkDomain(t, itr, bz(""), nil) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, true) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator3(t *testing.T) { + store := mockStoreWithStuff() + pstore := NewStore(store, bz("key")) + + itr := pstore.ReverseIterator(nil, bz("")) + checkDomain(t, itr, nil, bz("")) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator4(t *testing.T) { + store := mockStoreWithStuff() + pstore := NewStore(store, bz("key")) + + itr := pstore.ReverseIterator(bz(""), bz("")) + checkInvalid(t, itr) + itr.Close() +} + +func TestCacheWraps(t *testing.T) { + db := dbm.NewMemDB() + store := dbadapter.Store{DB: db} + + cacheWrapper := store.CacheWrap() + require.IsType(t, &cachekv.Store{}, cacheWrapper) + + cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) + require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) +} diff --git a/cosmos-sdk-store/pruning/README.md b/cosmos-sdk-store/pruning/README.md new file mode 100755 index 000000000..2548807e2 --- /dev/null +++ b/cosmos-sdk-store/pruning/README.md @@ -0,0 +1,30 @@ +# Pruning + +## Overview + +Pruning is the mechanism for deleting old application heights from the disk. Depending on the use case, +nodes may require different pruning strategies. For example, archive nodes must keep all +the states and prune nothing. On the other hand, a regular validator node may want to only keep 100 latest heights for performance reasons. + +## Strategies + +The strategies are configured in `app.toml`, with the format `pruning = ""` where the options are: + +* `default`: only the last 362,880 states(approximately 3.5 weeks worth of state) are kept; pruning at 10 block intervals +* `nothing`: all historic states will be saved, nothing will be deleted (i.e. archiving node) +* `everything`: 2 latest states will be kept; pruning at 10 block intervals. +* `custom`: allow pruning options to be manually specified through 'pruning-keep-recent', and 'pruning-interval' + +If no strategy is given to the BaseApp, `nothing` is selected. However, we perform validation on the CLI layer to require these to be always set in the config file. + +## Custom Pruning + +These are applied if and only if the pruning strategy is custom: + +* `pruning-keep-recent`: N means to keep all of the last N states +* `pruning-interval`: N means to delete old states from disk every Nth block. + +## Relationship to State Sync Snapshots + +Snapshot settings are optional. However, if set, they have an effect on how pruning is done by +persisting the heights that are multiples of `state-sync.snapshot-interval` until after the snapshot is complete. See the "Relationship to Pruning" section in `snapshots/README.md` for more details. diff --git a/cosmos-sdk-store/pruning/export_test.go b/cosmos-sdk-store/pruning/export_test.go new file mode 100755 index 000000000..676ff132f --- /dev/null +++ b/cosmos-sdk-store/pruning/export_test.go @@ -0,0 +1,8 @@ +package pruning + +var ( + PruneSnapshotHeightsKey = pruneSnapshotHeightsKey + + Int64SliceToBytes = int64SliceToBytes + LoadPruningSnapshotHeights = loadPruningSnapshotHeights +) diff --git a/cosmos-sdk-store/pruning/manager.go b/cosmos-sdk-store/pruning/manager.go new file mode 100755 index 000000000..9a99e4915 --- /dev/null +++ b/cosmos-sdk-store/pruning/manager.go @@ -0,0 +1,191 @@ +package pruning + +import ( + "encoding/binary" + "fmt" + "sort" + "sync" + + dbm "github.com/cosmos/cosmos-db" + + "cosmossdk.io/log" + "cosmossdk.io/store/pruning/types" +) + +// Manager is an abstraction to handle the logic needed for +// determining when to prune old heights of the store +// based on the strategy described by the pruning options. +type Manager struct { + db dbm.DB + logger log.Logger + opts types.PruningOptions + snapshotInterval uint64 + // Snapshots are taken in a separate goroutine from the regular execution + // and can be delivered asynchrounously via HandleSnapshotHeight. + // Therefore, we sync access to pruneSnapshotHeights with this mutex. + pruneSnapshotHeightsMx sync.RWMutex + // These are the heights that are multiples of snapshotInterval and kept for state sync snapshots. + // The heights are added to be pruned when a snapshot is complete. + pruneSnapshotHeights []int64 +} + +// NegativeHeightsError is returned when a negative height is provided to the manager. +type NegativeHeightsError struct { + Height int64 +} + +var _ error = &NegativeHeightsError{} + +func (e *NegativeHeightsError) Error() string { + return fmt.Sprintf("failed to get pruned heights: %d", e.Height) +} + +var pruneSnapshotHeightsKey = []byte("s/prunesnapshotheights") + +// NewManager returns a new Manager with the given db and logger. +// The retuned manager uses a pruning strategy of "nothing" which +// keeps all heights. Users of the Manager may change the strategy +// by calling SetOptions. +func NewManager(db dbm.DB, logger log.Logger) *Manager { + return &Manager{ + db: db, + logger: logger, + opts: types.NewPruningOptions(types.PruningNothing), + pruneSnapshotHeights: []int64{0}, + } +} + +// SetOptions sets the pruning strategy on the manager. +func (m *Manager) SetOptions(opts types.PruningOptions) { + m.opts = opts +} + +// GetOptions fetches the pruning strategy from the manager. +func (m *Manager) GetOptions() types.PruningOptions { + return m.opts +} + +// HandleSnapshotHeight persists the snapshot height to be pruned at the next appropriate +// height defined by the pruning strategy. It flushes the update to disk and panics if the flush fails. +// The input height must be greater than 0, and the pruning strategy must not be set to pruning nothing. +// If either of these conditions is not met, this function does nothing. +func (m *Manager) HandleSnapshotHeight(height int64) { + if m.opts.GetPruningStrategy() == types.PruningNothing || height <= 0 { + return + } + + m.pruneSnapshotHeightsMx.Lock() + defer m.pruneSnapshotHeightsMx.Unlock() + + m.logger.Debug("HandleSnapshotHeight", "height", height) + m.pruneSnapshotHeights = append(m.pruneSnapshotHeights, height) + sort.Slice(m.pruneSnapshotHeights, func(i, j int) bool { return m.pruneSnapshotHeights[i] < m.pruneSnapshotHeights[j] }) + k := 1 + for ; k < len(m.pruneSnapshotHeights); k++ { + if m.pruneSnapshotHeights[k] != m.pruneSnapshotHeights[k-1]+int64(m.snapshotInterval) { + break + } + } + m.pruneSnapshotHeights = m.pruneSnapshotHeights[k-1:] + + // flush the updates to disk so that they are not lost if crash happens. + if err := m.db.SetSync(pruneSnapshotHeightsKey, int64SliceToBytes(m.pruneSnapshotHeights)); err != nil { + panic(err) + } +} + +// SetSnapshotInterval sets the interval at which the snapshots are taken. +func (m *Manager) SetSnapshotInterval(snapshotInterval uint64) { + m.snapshotInterval = snapshotInterval +} + +// GetPruningHeight returns the height which can prune upto if it is able to prune at the given height. +func (m *Manager) GetPruningHeight(height int64) int64 { + if m.opts.GetPruningStrategy() == types.PruningNothing { + return 0 + } + if m.opts.Interval <= 0 { + return 0 + } + + if height%int64(m.opts.Interval) != 0 || height <= int64(m.opts.KeepRecent) { + return 0 + } + + // Consider the snapshot height + pruneHeight := height - 1 - int64(m.opts.KeepRecent) // we should keep the current height at least + + m.pruneSnapshotHeightsMx.RLock() + defer m.pruneSnapshotHeightsMx.RUnlock() + + // snapshotInterval is zero, indicating that all heights can be pruned + if m.snapshotInterval <= 0 { + return pruneHeight + } + + if len(m.pruneSnapshotHeights) == 0 { // the length should be greater than zero + return 0 + } + + // the snapshot `m.pruneSnapshotHeights[0]` is already operated, + // so we can prune upto `m.pruneSnapshotHeights[0] + int64(m.snapshotInterval) - 1` + snHeight := m.pruneSnapshotHeights[0] + int64(m.snapshotInterval) - 1 + if snHeight < pruneHeight { + return snHeight + } + return pruneHeight +} + +// LoadSnapshotHeights loads the snapshot heights from the database as a crash recovery. +func (m *Manager) LoadSnapshotHeights(db dbm.DB) error { + if m.opts.GetPruningStrategy() == types.PruningNothing { + return nil + } + + loadedPruneSnapshotHeights, err := loadPruningSnapshotHeights(db) + if err != nil { + return err + } + + if len(loadedPruneSnapshotHeights) > 0 { + m.pruneSnapshotHeightsMx.Lock() + defer m.pruneSnapshotHeightsMx.Unlock() + m.pruneSnapshotHeights = loadedPruneSnapshotHeights + } + + return nil +} + +func loadPruningSnapshotHeights(db dbm.DB) ([]int64, error) { + bz, err := db.Get(pruneSnapshotHeightsKey) + if err != nil { + return nil, fmt.Errorf("failed to get post-snapshot pruned heights: %w", err) + } + if len(bz) == 0 { + return []int64{}, nil + } + + pruneSnapshotHeights := make([]int64, len(bz)/8) + i, offset := 0, 0 + for offset < len(bz) { + h := int64(binary.BigEndian.Uint64(bz[offset : offset+8])) + if h < 0 { + return nil, &NegativeHeightsError{Height: h} + } + pruneSnapshotHeights[i] = h + i++ + offset += 8 + } + + return pruneSnapshotHeights, nil +} + +func int64SliceToBytes(slice []int64) []byte { + bz := make([]byte, 0, len(slice)*8) + for _, ph := range slice { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, uint64(ph)) + bz = append(bz, buf...) + } + return bz +} diff --git a/cosmos-sdk-store/pruning/manager_test.go b/cosmos-sdk-store/pruning/manager_test.go new file mode 100755 index 000000000..006891de8 --- /dev/null +++ b/cosmos-sdk-store/pruning/manager_test.go @@ -0,0 +1,303 @@ +package pruning_test + +import ( + "errors" + "fmt" + "testing" + + db "github.com/cosmos/cosmos-db" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "cosmossdk.io/log" + "cosmossdk.io/store/mock" + "cosmossdk.io/store/pruning" + "cosmossdk.io/store/pruning/types" +) + +const dbErr = "db error" + +func TestNewManager(t *testing.T) { + manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) + require.NotNil(t, manager) + require.Equal(t, types.PruningNothing, manager.GetOptions().GetPruningStrategy()) +} + +func TestStrategies(t *testing.T) { + testcases := map[string]struct { + strategy types.PruningOptions + snapshotInterval uint64 + strategyToAssert types.PruningStrategy + isValid bool + }{ + "prune nothing - no snapshot": { + strategy: types.NewPruningOptions(types.PruningNothing), + strategyToAssert: types.PruningNothing, + }, + "prune nothing - snapshot": { + strategy: types.NewPruningOptions(types.PruningNothing), + strategyToAssert: types.PruningNothing, + snapshotInterval: 100, + }, + "prune default - no snapshot": { + strategy: types.NewPruningOptions(types.PruningDefault), + strategyToAssert: types.PruningDefault, + }, + "prune default - snapshot": { + strategy: types.NewPruningOptions(types.PruningDefault), + strategyToAssert: types.PruningDefault, + snapshotInterval: 100, + }, + "prune everything - no snapshot": { + strategy: types.NewPruningOptions(types.PruningEverything), + strategyToAssert: types.PruningEverything, + }, + "prune everything - snapshot": { + strategy: types.NewPruningOptions(types.PruningEverything), + strategyToAssert: types.PruningEverything, + snapshotInterval: 100, + }, + "custom 100-10-15": { + strategy: types.NewCustomPruningOptions(100, 15), + snapshotInterval: 10, + strategyToAssert: types.PruningCustom, + }, + "custom 10-10-15": { + strategy: types.NewCustomPruningOptions(10, 15), + snapshotInterval: 10, + strategyToAssert: types.PruningCustom, + }, + "custom 100-0-15": { + strategy: types.NewCustomPruningOptions(100, 15), + snapshotInterval: 0, + strategyToAssert: types.PruningCustom, + }, + } + + for name, tc := range testcases { + tc := tc // Local copy to avoid shadowing. + t.Run(name, func(t *testing.T) { + t.Parallel() + + manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) + require.NotNil(t, manager) + + curStrategy := tc.strategy + manager.SetSnapshotInterval(tc.snapshotInterval) + + pruneStrategy := curStrategy.GetPruningStrategy() + require.Equal(t, tc.strategyToAssert, pruneStrategy) + + // Validate strategy parameters + switch pruneStrategy { + case types.PruningDefault: + require.Equal(t, uint64(362880), curStrategy.KeepRecent) + require.Equal(t, uint64(10), curStrategy.Interval) + case types.PruningNothing: + require.Equal(t, uint64(0), curStrategy.KeepRecent) + require.Equal(t, uint64(0), curStrategy.Interval) + case types.PruningEverything: + require.Equal(t, uint64(2), curStrategy.KeepRecent) + require.Equal(t, uint64(10), curStrategy.Interval) + default: + // + } + + manager.SetOptions(curStrategy) + require.Equal(t, tc.strategy, manager.GetOptions()) + + curKeepRecent := curStrategy.KeepRecent + snHeight := int64(tc.snapshotInterval - 1) + for curHeight := int64(0); curHeight < 110000; curHeight++ { + if tc.snapshotInterval != 0 { + if curHeight > int64(tc.snapshotInterval) && curHeight%int64(tc.snapshotInterval) == int64(tc.snapshotInterval)-1 { + manager.HandleSnapshotHeight(curHeight - int64(tc.snapshotInterval) + 1) + snHeight = curHeight + } + } + + pruningHeightActual := manager.GetPruningHeight(curHeight) + curHeightStr := fmt.Sprintf("height: %d", curHeight) + + switch curStrategy.GetPruningStrategy() { + case types.PruningNothing: + require.Equal(t, int64(0), pruningHeightActual, curHeightStr) + default: + if curHeight > int64(curKeepRecent) && curHeight%int64(curStrategy.Interval) == 0 { + pruningHeightExpected := curHeight - int64(curKeepRecent) - 1 + if tc.snapshotInterval > 0 && snHeight < pruningHeightExpected { + pruningHeightExpected = snHeight + } + require.Equal(t, pruningHeightExpected, pruningHeightActual, curHeightStr) + } else { + require.Equal(t, int64(0), pruningHeightActual, curHeightStr) + } + } + } + }) + } +} + +func TestPruningHeight_Inputs(t *testing.T) { + keepRecent := int64(types.NewPruningOptions(types.PruningEverything).KeepRecent) + interval := int64(types.NewPruningOptions(types.PruningEverything).Interval) + + testcases := map[string]struct { + height int64 + expectedResult int64 + strategy types.PruningStrategy + }{ + "currentHeight is negative - prune everything - invalid currentHeight": { + -1, + 0, + types.PruningEverything, + }, + "currentHeight is zero - prune everything - invalid currentHeight": { + 0, + 0, + types.PruningEverything, + }, + "currentHeight is positive but within keep recent- prune everything - not kept": { + keepRecent, + 0, + types.PruningEverything, + }, + "currentHeight is positive and equal to keep recent+1 - no kept": { + keepRecent + 1, + 0, + types.PruningEverything, + }, + "currentHeight is positive and greater than keep recent+1 but not multiple of interval - no kept": { + keepRecent + 2, + 0, + types.PruningEverything, + }, + "currentHeight is positive and greater than keep recent+1 and multiple of interval - kept": { + interval, + interval - keepRecent - 1, + types.PruningEverything, + }, + "pruning nothing, currentHeight is positive and greater than keep recent - not kept": { + interval, + 0, + types.PruningNothing, + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) + require.NotNil(t, manager) + manager.SetOptions(types.NewPruningOptions(tc.strategy)) + + pruningHeightActual := manager.GetPruningHeight(tc.height) + require.Equal(t, tc.expectedResult, pruningHeightActual) + }) + } +} + +func TestHandleSnapshotHeight_DbErr_Panic(t *testing.T) { + ctrl := gomock.NewController(t) + + // Setup + dbMock := mock.NewMockDB(ctrl) + + dbMock.EXPECT().SetSync(gomock.Any(), gomock.Any()).Return(errors.New(dbErr)).Times(1) + + manager := pruning.NewManager(dbMock, log.NewNopLogger()) + manager.SetOptions(types.NewPruningOptions(types.PruningEverything)) + require.NotNil(t, manager) + + defer func() { + if r := recover(); r == nil { + t.Fail() + } + }() + + manager.HandleSnapshotHeight(10) +} + +func TestHandleSnapshotHeight_LoadFromDisk(t *testing.T) { + snapshotInterval := uint64(10) + + // Setup + db := db.NewMemDB() + manager := pruning.NewManager(db, log.NewNopLogger()) + require.NotNil(t, manager) + + manager.SetOptions(types.NewPruningOptions(types.PruningEverything)) + manager.SetSnapshotInterval(snapshotInterval) + + expected := 0 + for snapshotHeight := int64(-1); snapshotHeight < 100; snapshotHeight++ { + snapshotHeightStr := fmt.Sprintf("snaphost height: %d", snapshotHeight) + if snapshotHeight > int64(snapshotInterval) && snapshotHeight%int64(snapshotInterval) == 1 { + // Test flush + manager.HandleSnapshotHeight(snapshotHeight - 1) + expected = 1 + } + + loadedSnapshotHeights, err := pruning.LoadPruningSnapshotHeights(db) + require.NoError(t, err) + require.Equal(t, expected, len(loadedSnapshotHeights), snapshotHeightStr) + + // Test load back + err = manager.LoadSnapshotHeights(db) + require.NoError(t, err) + + loadedSnapshotHeights, err = pruning.LoadPruningSnapshotHeights(db) + require.NoError(t, err) + require.Equal(t, expected, len(loadedSnapshotHeights), snapshotHeightStr) + } +} + +func TestLoadPruningSnapshotHeights(t *testing.T) { + var ( + manager = pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) + err error + ) + require.NotNil(t, manager) + + // must not be PruningNothing + manager.SetOptions(types.NewPruningOptions(types.PruningDefault)) + + testcases := map[string]struct { + getFlushedPruningSnapshotHeights func() []int64 + expectedResult error + }{ + "negative snapshotPruningHeight - error": { + getFlushedPruningSnapshotHeights: func() []int64 { + return []int64{5, -2, 3} + }, + expectedResult: &pruning.NegativeHeightsError{Height: -2}, + }, + "non-negative - success": { + getFlushedPruningSnapshotHeights: func() []int64 { + return []int64{5, 0, 3} + }, + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + db := db.NewMemDB() + + if tc.getFlushedPruningSnapshotHeights != nil { + err = db.Set(pruning.PruneSnapshotHeightsKey, pruning.Int64SliceToBytes(tc.getFlushedPruningSnapshotHeights())) + require.NoError(t, err) + } + + err = manager.LoadSnapshotHeights(db) + require.Equal(t, tc.expectedResult, err) + }) + } +} + +func TestLoadSnapshotHeights_PruneNothing(t *testing.T) { + manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) + require.NotNil(t, manager) + + manager.SetOptions(types.NewPruningOptions(types.PruningNothing)) + + require.Nil(t, manager.LoadSnapshotHeights(db.NewMemDB())) +} diff --git a/cosmos-sdk-store/pruning/types/options.go b/cosmos-sdk-store/pruning/types/options.go new file mode 100755 index 000000000..229dbed98 --- /dev/null +++ b/cosmos-sdk-store/pruning/types/options.go @@ -0,0 +1,130 @@ +package types + +import ( + "errors" + "fmt" +) + +// PruningOptions defines the pruning strategy used when determining which +// heights are removed from disk when committing state. +type PruningOptions struct { + // KeepRecent defines how many recent heights to keep on disk. + KeepRecent uint64 + + // Interval defines when the pruned heights are removed from disk. + Interval uint64 + + // Strategy defines the kind of pruning strategy. See below for more information on each. + Strategy PruningStrategy +} + +type PruningStrategy int + +// Pruning option string constants +const ( + PruningOptionDefault = "default" + PruningOptionEverything = "everything" + PruningOptionNothing = "nothing" + PruningOptionCustom = "custom" +) + +const ( + // PruningDefault defines a pruning strategy where the last 362880 heights are + // kept where to-be pruned heights are pruned at every 10th height. + // The last 362880 heights are kept(approximately 3.5 weeks worth of state) assuming the typical + // block time is 6s. If these values do not match the applications' requirements, use the "custom" option. + PruningDefault PruningStrategy = iota + // PruningEverything defines a pruning strategy where all committed heights are + // deleted, storing only the current height and last 2 states. To-be pruned heights are + // pruned at every 10th height. + PruningEverything + // PruningNothing defines a pruning strategy where all heights are kept on disk. + // This is the only stretegy where KeepEvery=1 is allowed with state-sync snapshots disabled. + PruningNothing + // PruningCustom defines a pruning strategy where the user specifies the pruning. + PruningCustom + // PruningUndefined defines an undefined pruning strategy. It is to be returned by stores that do not support pruning. + PruningUndefined +) + +const ( + pruneEverythingKeepRecent = 2 + pruneEverythingInterval = 10 +) + +var ( + ErrPruningIntervalZero = errors.New("'pruning-interval' must not be 0. If you want to disable pruning, select pruning = \"nothing\"") + ErrPruningIntervalTooSmall = fmt.Errorf("'pruning-interval' must not be less than %d. For the most aggressive pruning, select pruning = \"everything\"", pruneEverythingInterval) + ErrPruningKeepRecentTooSmall = fmt.Errorf("'pruning-keep-recent' must not be less than %d. For the most aggressive pruning, select pruning = \"everything\"", pruneEverythingKeepRecent) +) + +func NewPruningOptions(pruningStrategy PruningStrategy) PruningOptions { + switch pruningStrategy { + case PruningDefault: + return PruningOptions{ + KeepRecent: 362880, + Interval: 10, + Strategy: PruningDefault, + } + case PruningEverything: + return PruningOptions{ + KeepRecent: pruneEverythingKeepRecent, + Interval: pruneEverythingInterval, + Strategy: PruningEverything, + } + case PruningNothing: + return PruningOptions{ + KeepRecent: 0, + Interval: 0, + Strategy: PruningNothing, + } + default: + return PruningOptions{ + Strategy: PruningCustom, + } + } +} + +func NewCustomPruningOptions(keepRecent, interval uint64) PruningOptions { + return PruningOptions{ + KeepRecent: keepRecent, + Interval: interval, + Strategy: PruningCustom, + } +} + +func (po PruningOptions) GetPruningStrategy() PruningStrategy { + return po.Strategy +} + +func (po PruningOptions) Validate() error { + if po.Strategy == PruningNothing { + return nil + } + if po.Interval == 0 { + return ErrPruningIntervalZero + } + if po.Interval < pruneEverythingInterval { + return ErrPruningIntervalTooSmall + } + if po.KeepRecent < pruneEverythingKeepRecent { + return ErrPruningKeepRecentTooSmall + } + return nil +} + +func NewPruningOptionsFromString(strategy string) PruningOptions { + switch strategy { + case PruningOptionEverything: + return NewPruningOptions(PruningEverything) + + case PruningOptionNothing: + return NewPruningOptions(PruningNothing) + + case PruningOptionDefault: + return NewPruningOptions(PruningDefault) + + default: + return NewPruningOptions(PruningDefault) + } +} diff --git a/cosmos-sdk-store/pruning/types/options_test.go b/cosmos-sdk-store/pruning/types/options_test.go new file mode 100755 index 000000000..abc6bf39e --- /dev/null +++ b/cosmos-sdk-store/pruning/types/options_test.go @@ -0,0 +1,65 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPruningOptions_Validate(t *testing.T) { + testCases := []struct { + opts PruningOptions + expectErr error + }{ + {NewPruningOptions(PruningDefault), nil}, + {NewPruningOptions(PruningEverything), nil}, + {NewPruningOptions(PruningNothing), nil}, + {NewPruningOptions(PruningCustom), ErrPruningIntervalZero}, + {NewCustomPruningOptions(2, 10), nil}, + {NewCustomPruningOptions(100, 15), nil}, + {NewCustomPruningOptions(1, 10), ErrPruningKeepRecentTooSmall}, + {NewCustomPruningOptions(2, 9), ErrPruningIntervalTooSmall}, + {NewCustomPruningOptions(2, 0), ErrPruningIntervalZero}, + {NewCustomPruningOptions(2, 0), ErrPruningIntervalZero}, + } + + for _, tc := range testCases { + err := tc.opts.Validate() + require.Equal(t, tc.expectErr, err, "options: %v, err: %s", tc.opts, err) + } +} + +func TestPruningOptions_GetStrategy(t *testing.T) { + testCases := []struct { + opts PruningOptions + expectedStrategy PruningStrategy + }{ + {NewPruningOptions(PruningDefault), PruningDefault}, + {NewPruningOptions(PruningEverything), PruningEverything}, + {NewPruningOptions(PruningNothing), PruningNothing}, + {NewPruningOptions(PruningCustom), PruningCustom}, + {NewCustomPruningOptions(2, 10), PruningCustom}, + } + + for _, tc := range testCases { + actualStrategy := tc.opts.GetPruningStrategy() + require.Equal(t, tc.expectedStrategy, actualStrategy) + } +} + +func TestNewPruningOptionsFromString(t *testing.T) { + testCases := []struct { + optString string + expect PruningOptions + }{ + {PruningOptionDefault, NewPruningOptions(PruningDefault)}, + {PruningOptionEverything, NewPruningOptions(PruningEverything)}, + {PruningOptionNothing, NewPruningOptions(PruningNothing)}, + {"invalid", NewPruningOptions(PruningDefault)}, + } + + for _, tc := range testCases { + actual := NewPruningOptionsFromString(tc.optString) + require.Equal(t, tc.expect, actual) + } +} diff --git a/cosmos-sdk-store/reexport.go b/cosmos-sdk-store/reexport.go new file mode 100755 index 000000000..9865cb9b0 --- /dev/null +++ b/cosmos-sdk-store/reexport.go @@ -0,0 +1,29 @@ +package store + +import ( + "cosmossdk.io/store/types" +) + +// Import cosmos-sdk/types/store.go for convenience. +type ( + Store = types.Store + Committer = types.Committer + CommitStore = types.CommitStore + MultiStore = types.MultiStore + CacheMultiStore = types.CacheMultiStore + CommitMultiStore = types.CommitMultiStore + KVStore = types.KVStore + Iterator = types.Iterator + CacheKVStore = types.CacheKVStore + CommitKVStore = types.CommitKVStore + CacheWrapper = types.CacheWrapper + CacheWrap = types.CacheWrap + CommitID = types.CommitID + Key = types.StoreKey + Type = types.StoreType + Queryable = types.Queryable + TraceContext = types.TraceContext + Gas = types.Gas + GasMeter = types.GasMeter + GasConfig = types.GasConfig +) diff --git a/cosmos-sdk-store/rootmulti/dbadapter.go b/cosmos-sdk-store/rootmulti/dbadapter.go new file mode 100755 index 000000000..65cd41c66 --- /dev/null +++ b/cosmos-sdk-store/rootmulti/dbadapter.go @@ -0,0 +1,49 @@ +package rootmulti + +import ( + "cosmossdk.io/store/dbadapter" + pruningtypes "cosmossdk.io/store/pruning/types" + "cosmossdk.io/store/types" +) + +var commithash = []byte("FAKE_HASH") + +var ( + _ types.KVStore = (*commitDBStoreAdapter)(nil) + _ types.Committer = (*commitDBStoreAdapter)(nil) +) + +//---------------------------------------- +// commitDBStoreWrapper should only be used for simulation/debugging, +// as it doesn't compute any commit hash, and it cannot load older state. + +// Wrapper type for dbm.Db with implementation of KVStore +type commitDBStoreAdapter struct { + dbadapter.Store +} + +func (cdsa commitDBStoreAdapter) Commit() types.CommitID { + return types.CommitID{ + Version: -1, + Hash: commithash, + } +} + +func (cdsa commitDBStoreAdapter) LastCommitID() types.CommitID { + return types.CommitID{ + Version: -1, + Hash: commithash, + } +} + +func (cdsa commitDBStoreAdapter) WorkingHash() []byte { + return commithash +} + +func (cdsa commitDBStoreAdapter) SetPruning(_ pruningtypes.PruningOptions) {} + +// GetPruning is a no-op as pruning options cannot be directly set on this store. +// They must be set on the root commit multi-store. +func (cdsa commitDBStoreAdapter) GetPruning() pruningtypes.PruningOptions { + return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined) +} diff --git a/cosmos-sdk-store/rootmulti/proof.go b/cosmos-sdk-store/rootmulti/proof.go new file mode 100755 index 000000000..78217a160 --- /dev/null +++ b/cosmos-sdk-store/rootmulti/proof.go @@ -0,0 +1,27 @@ +package rootmulti + +import ( + "github.com/cometbft/cometbft/crypto/merkle" + + storetypes "cosmossdk.io/store/types" +) + +// RequireProof returns whether proof is required for the subpath. +func RequireProof(subpath string) bool { + // XXX: create a better convention. + // Currently, only when query subpath is "/key", will proof be included in + // response. If there are some changes about proof building in iavlstore.go, + // we must change code here to keep consistency with iavlStore#Query. + return subpath == "/key" +} + +//----------------------------------------------------------------------------- + +// XXX: This should be managed by the rootMultiStore which may want to register +// more proof ops? +func DefaultProofRuntime() (prt *merkle.ProofRuntime) { + prt = merkle.NewProofRuntime() + prt.RegisterOpDecoder(storetypes.ProofOpIAVLCommitment, storetypes.CommitmentOpDecoder) + prt.RegisterOpDecoder(storetypes.ProofOpSimpleMerkleCommitment, storetypes.CommitmentOpDecoder) + return +} diff --git a/cosmos-sdk-store/rootmulti/proof_test.go b/cosmos-sdk-store/rootmulti/proof_test.go new file mode 100755 index 000000000..d573937c3 --- /dev/null +++ b/cosmos-sdk-store/rootmulti/proof_test.go @@ -0,0 +1,152 @@ +package rootmulti + +import ( + "testing" + + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/require" + + "cosmossdk.io/log" + "cosmossdk.io/store/iavl" + "cosmossdk.io/store/metrics" + "cosmossdk.io/store/types" +) + +func TestVerifyIAVLStoreQueryProof(t *testing.T) { + // Create main tree for testing. + db := dbm.NewMemDB() + iStore, err := iavl.LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), types.CommitID{}, iavl.DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) + store := iStore.(*iavl.Store) + require.Nil(t, err) + store.Set([]byte("MYKEY"), []byte("MYVALUE")) + cid := store.Commit() + + // Get Proof + res, err := store.Query(&types.RequestQuery{ + Path: "/key", // required path to get key/value+proof + Data: []byte("MYKEY"), + Prove: true, + }) + require.NoError(t, err) + require.NotNil(t, res.ProofOps) + + // Verify proof. + prt := DefaultProofRuntime() + err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte("MYVALUE")) + require.Nil(t, err) + + // Verify (bad) proof. + err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY_NOT", []byte("MYVALUE")) + require.NotNil(t, err) + + // Verify (bad) proof. + err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY/MYKEY", []byte("MYVALUE")) + require.NotNil(t, err) + + // Verify (bad) proof. + err = prt.VerifyValue(res.ProofOps, cid.Hash, "MYKEY", []byte("MYVALUE")) + require.NotNil(t, err) + + // Verify (bad) proof. + err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte("MYVALUE_NOT")) + require.NotNil(t, err) + + // Verify (bad) proof. + err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte(nil)) + require.NotNil(t, err) +} + +func TestVerifyMultiStoreQueryProof(t *testing.T) { + // Create main tree for testing. + db := dbm.NewMemDB() + store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) + iavlStoreKey := types.NewKVStoreKey("iavlStoreKey") + + store.MountStoreWithDB(iavlStoreKey, types.StoreTypeIAVL, nil) + require.NoError(t, store.LoadVersion(0)) + + iavlStore := store.GetCommitStore(iavlStoreKey).(*iavl.Store) + iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE")) + cid := store.Commit() + + // Get Proof + res, err := store.Query(&types.RequestQuery{ + Path: "/iavlStoreKey/key", // required path to get key/value+proof + Data: []byte("MYKEY"), + Prove: true, + }) + require.NoError(t, err) + require.NotNil(t, res.ProofOps) + + // Verify proof. + prt := DefaultProofRuntime() + err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte("MYVALUE")) + require.Nil(t, err) + + // Verify proof. + err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte("MYVALUE")) + require.Nil(t, err) + + // Verify (bad) proof. + err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY_NOT", []byte("MYVALUE")) + require.NotNil(t, err) + + // Verify (bad) proof. + err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY/MYKEY", []byte("MYVALUE")) + require.NotNil(t, err) + + // Verify (bad) proof. + err = prt.VerifyValue(res.ProofOps, cid.Hash, "iavlStoreKey/MYKEY", []byte("MYVALUE")) + require.NotNil(t, err) + + // Verify (bad) proof. + err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte("MYVALUE")) + require.NotNil(t, err) + + // Verify (bad) proof. + err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte("MYVALUE_NOT")) + require.NotNil(t, err) + + // Verify (bad) proof. + err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte(nil)) + require.NotNil(t, err) +} + +func TestVerifyMultiStoreQueryProofAbsence(t *testing.T) { + // Create main tree for testing. + db := dbm.NewMemDB() + store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) + iavlStoreKey := types.NewKVStoreKey("iavlStoreKey") + + store.MountStoreWithDB(iavlStoreKey, types.StoreTypeIAVL, nil) + err := store.LoadVersion(0) + require.NoError(t, err) + + iavlStore := store.GetCommitStore(iavlStoreKey).(*iavl.Store) + iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE")) + cid := store.Commit() // Commit with empty iavl store. + + // Get Proof + res, err := store.Query(&types.RequestQuery{ + Path: "/iavlStoreKey/key", // required path to get key/value+proof + Data: []byte("MYABSENTKEY"), + Prove: true, + }) + require.NoError(t, err) + require.NotNil(t, res.ProofOps) + + // Verify proof. + prt := DefaultProofRuntime() + err = prt.VerifyAbsence(res.ProofOps, cid.Hash, "/iavlStoreKey/MYABSENTKEY") + require.Nil(t, err) + + // Verify (bad) proof. + prt = DefaultProofRuntime() + err = prt.VerifyAbsence(res.ProofOps, cid.Hash, "/MYABSENTKEY") + require.NotNil(t, err) + + // Verify (bad) proof. + prt = DefaultProofRuntime() + err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYABSENTKEY", []byte("")) + require.NotNil(t, err) +} diff --git a/cosmos-sdk-store/rootmulti/snapshot_test.go b/cosmos-sdk-store/rootmulti/snapshot_test.go new file mode 100755 index 000000000..635be9297 --- /dev/null +++ b/cosmos-sdk-store/rootmulti/snapshot_test.go @@ -0,0 +1,321 @@ +package rootmulti_test + +import ( + "crypto/sha256" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "math/rand" + "testing" + + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "cosmossdk.io/log" + "cosmossdk.io/store/iavl" + "cosmossdk.io/store/metrics" + "cosmossdk.io/store/rootmulti" + "cosmossdk.io/store/snapshots" + snapshottypes "cosmossdk.io/store/snapshots/types" + "cosmossdk.io/store/types" +) + +func newMultiStoreWithGeneratedData(db dbm.DB, stores uint8, storeKeys uint64) *rootmulti.Store { + multiStore := rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) + r := rand.New(rand.NewSource(49872768940)) // Fixed seed for deterministic tests + + keys := []*types.KVStoreKey{} + for i := uint8(0); i < stores; i++ { + key := types.NewKVStoreKey(fmt.Sprintf("store%v", i)) + multiStore.MountStoreWithDB(key, types.StoreTypeIAVL, nil) + keys = append(keys, key) + } + err := multiStore.LoadLatestVersion() + if err != nil { + panic(err) + } + + for _, key := range keys { + store := multiStore.GetCommitKVStore(key).(*iavl.Store) + for i := uint64(0); i < storeKeys; i++ { + k := make([]byte, 8) + v := make([]byte, 1024) + binary.BigEndian.PutUint64(k, i) + _, err := r.Read(v) + if err != nil { + panic(err) + } + store.Set(k, v) + } + } + + multiStore.Commit() + err = multiStore.LoadLatestVersion() + if err != nil { + panic(err) + } + + return multiStore +} + +func newMultiStoreWithMixedMounts(db dbm.DB) *rootmulti.Store { + store := rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) + store.MountStoreWithDB(types.NewKVStoreKey("iavl1"), types.StoreTypeIAVL, nil) + store.MountStoreWithDB(types.NewKVStoreKey("iavl2"), types.StoreTypeIAVL, nil) + store.MountStoreWithDB(types.NewKVStoreKey("iavl3"), types.StoreTypeIAVL, nil) + store.MountStoreWithDB(types.NewTransientStoreKey("trans1"), types.StoreTypeTransient, nil) + if err := store.LoadLatestVersion(); err != nil { + panic(err) + } + return store +} + +func newMultiStoreWithMixedMountsAndBasicData(db dbm.DB) *rootmulti.Store { + store := newMultiStoreWithMixedMounts(db) + store1 := store.GetStoreByName("iavl1").(types.CommitKVStore) + store2 := store.GetStoreByName("iavl2").(types.CommitKVStore) + trans1 := store.GetStoreByName("trans1").(types.KVStore) + + store1.Set([]byte("a"), []byte{1}) + store1.Set([]byte("b"), []byte{1}) + store2.Set([]byte("X"), []byte{255}) + store2.Set([]byte("A"), []byte{101}) + trans1.Set([]byte("x1"), []byte{91}) + store.Commit() + + store1.Set([]byte("b"), []byte{2}) + store1.Set([]byte("c"), []byte{3}) + store2.Set([]byte("B"), []byte{102}) + store.Commit() + + store2.Set([]byte("C"), []byte{103}) + store2.Delete([]byte("X")) + trans1.Set([]byte("x2"), []byte{92}) + store.Commit() + + return store +} + +func assertStoresEqual(t *testing.T, expect, actual types.CommitKVStore, msgAndArgs ...interface{}) { + t.Helper() + assert.Equal(t, expect.LastCommitID(), actual.LastCommitID()) + expectIter := expect.Iterator(nil, nil) + expectMap := map[string][]byte{} + for ; expectIter.Valid(); expectIter.Next() { + expectMap[string(expectIter.Key())] = expectIter.Value() + } + require.NoError(t, expectIter.Error()) + + actualIter := expect.Iterator(nil, nil) + actualMap := map[string][]byte{} + for ; actualIter.Valid(); actualIter.Next() { + actualMap[string(actualIter.Key())] = actualIter.Value() + } + require.NoError(t, actualIter.Error()) + + assert.Equal(t, expectMap, actualMap, msgAndArgs...) +} + +func TestMultistoreSnapshot_Checksum(t *testing.T) { + // Chunks from different nodes must fit together, so all nodes must produce identical chunks. + // This checksum test makes sure that the byte stream remains identical. If the test fails + // without having changed the data (e.g. because the Protobuf or zlib encoding changes), + // snapshottypes.CurrentFormat must be bumped. + store := newMultiStoreWithGeneratedData(dbm.NewMemDB(), 5, 10000) + version := uint64(store.LastCommitID().Version) + + testcases := []struct { + format uint32 + chunkHashes []string + }{ + {1, []string{ + "503e5b51b657055b77e88169fadae543619368744ad15f1de0736c0a20482f24", + "e1a0daaa738eeb43e778aefd2805e3dd720798288a410b06da4b8459c4d8f72e", + "aa048b4ee0f484965d7b3b06822cf0772cdcaad02f3b1b9055e69f2cb365ef3c", + "7921eaa3ed4921341e504d9308a9877986a879fe216a099c86e8db66fcba4c63", + "a4a864e6c02c9fca5837ec80dc84f650b25276ed7e4820cf7516ced9f9901b86", + "980925390cc50f14998ecb1e87de719ca9dd7e72f5fefbe445397bf670f36c31", + }}, + } + for _, tc := range testcases { + tc := tc + t.Run(fmt.Sprintf("Format %v", tc.format), func(t *testing.T) { + ch := make(chan io.ReadCloser) + go func() { + streamWriter := snapshots.NewStreamWriter(ch) + defer streamWriter.Close() + require.NotNil(t, streamWriter) + err := store.Snapshot(version, streamWriter) + require.NoError(t, err) + }() + hashes := []string{} + hasher := sha256.New() + for chunk := range ch { + hasher.Reset() + _, err := io.Copy(hasher, chunk) + require.NoError(t, err) + hashes = append(hashes, hex.EncodeToString(hasher.Sum(nil))) + } + assert.Equal(t, tc.chunkHashes, hashes, + "Snapshot output for format %v has changed", tc.format) + }) + } +} + +func TestMultistoreSnapshot_Errors(t *testing.T) { + store := newMultiStoreWithMixedMountsAndBasicData(dbm.NewMemDB()) + + testcases := map[string]struct { + height uint64 + expectType error + }{ + "0 height": {0, nil}, + "unknown height": {9, nil}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + err := store.Snapshot(tc.height, nil) + require.Error(t, err) + if tc.expectType != nil { + assert.True(t, errors.Is(err, tc.expectType)) + } + }) + } +} + +func TestMultistoreSnapshotRestore(t *testing.T) { + source := newMultiStoreWithMixedMountsAndBasicData(dbm.NewMemDB()) + target := newMultiStoreWithMixedMounts(dbm.NewMemDB()) + version := uint64(source.LastCommitID().Version) + require.EqualValues(t, 3, version) + dummyExtensionItem := snapshottypes.SnapshotItem{ + Item: &snapshottypes.SnapshotItem_Extension{ + Extension: &snapshottypes.SnapshotExtensionMeta{ + Name: "test", + Format: 1, + }, + }, + } + + chunks := make(chan io.ReadCloser, 100) + go func() { + streamWriter := snapshots.NewStreamWriter(chunks) + require.NotNil(t, streamWriter) + defer streamWriter.Close() + err := source.Snapshot(version, streamWriter) + require.NoError(t, err) + // write an extension metadata + err = streamWriter.WriteMsg(&dummyExtensionItem) + require.NoError(t, err) + }() + + streamReader, err := snapshots.NewStreamReader(chunks) + require.NoError(t, err) + nextItem, err := target.Restore(version, snapshottypes.CurrentFormat, streamReader) + require.NoError(t, err) + require.Equal(t, *dummyExtensionItem.GetExtension(), *nextItem.GetExtension()) + + assert.Equal(t, source.LastCommitID(), target.LastCommitID()) + for _, key := range source.StoreKeysByName() { + sourceStore := source.GetStoreByName(key.Name()).(types.CommitKVStore) + targetStore := target.GetStoreByName(key.Name()).(types.CommitKVStore) + switch sourceStore.GetStoreType() { + case types.StoreTypeTransient: + assert.False(t, targetStore.Iterator(nil, nil).Valid(), + "transient store %v not empty", key.Name()) + default: + assertStoresEqual(t, sourceStore, targetStore, "store %q not equal", key.Name()) + } + } +} + +func benchmarkMultistoreSnapshot(b *testing.B, stores uint8, storeKeys uint64) { + b.Helper() + b.Skip("Noisy with slow setup time, please see https://github.com/cosmos/cosmos-sdk/issues/8855.") + + b.ReportAllocs() + b.StopTimer() + source := newMultiStoreWithGeneratedData(dbm.NewMemDB(), stores, storeKeys) + version := source.LastCommitID().Version + require.EqualValues(b, 1, version) + b.StartTimer() + + for i := 0; i < b.N; i++ { + target := rootmulti.NewStore(dbm.NewMemDB(), log.NewNopLogger(), metrics.NewNoOpMetrics()) + for _, key := range source.StoreKeysByName() { + target.MountStoreWithDB(key, types.StoreTypeIAVL, nil) + } + err := target.LoadLatestVersion() + require.NoError(b, err) + require.EqualValues(b, 0, target.LastCommitID().Version) + + chunks := make(chan io.ReadCloser) + go func() { + streamWriter := snapshots.NewStreamWriter(chunks) + require.NotNil(b, streamWriter) + err := source.Snapshot(uint64(version), streamWriter) + require.NoError(b, err) + }() + for reader := range chunks { + _, err := io.Copy(io.Discard, reader) + require.NoError(b, err) + err = reader.Close() + require.NoError(b, err) + } + } +} + +func benchmarkMultistoreSnapshotRestore(b *testing.B, stores uint8, storeKeys uint64) { + b.Helper() + b.Skip("Noisy with slow setup time, please see https://github.com/cosmos/cosmos-sdk/issues/8855.") + + b.ReportAllocs() + b.StopTimer() + source := newMultiStoreWithGeneratedData(dbm.NewMemDB(), stores, storeKeys) + version := uint64(source.LastCommitID().Version) + require.EqualValues(b, 1, version) + b.StartTimer() + + for i := 0; i < b.N; i++ { + target := rootmulti.NewStore(dbm.NewMemDB(), log.NewNopLogger(), metrics.NewNoOpMetrics()) + for _, key := range source.StoreKeysByName() { + target.MountStoreWithDB(key, types.StoreTypeIAVL, nil) + } + err := target.LoadLatestVersion() + require.NoError(b, err) + require.EqualValues(b, 0, target.LastCommitID().Version) + + chunks := make(chan io.ReadCloser) + go func() { + writer := snapshots.NewStreamWriter(chunks) + require.NotNil(b, writer) + err := source.Snapshot(version, writer) + require.NoError(b, err) + }() + reader, err := snapshots.NewStreamReader(chunks) + require.NoError(b, err) + _, err = target.Restore(version, snapshottypes.CurrentFormat, reader) + require.NoError(b, err) + require.Equal(b, source.LastCommitID(), target.LastCommitID()) + } +} + +func BenchmarkMultistoreSnapshot100K(b *testing.B) { + benchmarkMultistoreSnapshot(b, 10, 10000) +} + +func BenchmarkMultistoreSnapshot1M(b *testing.B) { + benchmarkMultistoreSnapshot(b, 10, 100000) +} + +func BenchmarkMultistoreSnapshotRestore100K(b *testing.B) { + benchmarkMultistoreSnapshotRestore(b, 10, 10000) +} + +func BenchmarkMultistoreSnapshotRestore1M(b *testing.B) { + benchmarkMultistoreSnapshotRestore(b, 10, 100000) +} diff --git a/cosmos-sdk-store/rootmulti/store.go b/cosmos-sdk-store/rootmulti/store.go new file mode 100755 index 000000000..04b41eb35 --- /dev/null +++ b/cosmos-sdk-store/rootmulti/store.go @@ -0,0 +1,1256 @@ +package rootmulti + +import ( + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "sort" + "strings" + "sync" + + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + dbm "github.com/cosmos/cosmos-db" + protoio "github.com/cosmos/gogoproto/io" + gogotypes "github.com/cosmos/gogoproto/types" + iavltree "github.com/cosmos/iavl" + + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/log" + "cosmossdk.io/store/cachemulti" + "cosmossdk.io/store/dbadapter" + "cosmossdk.io/store/iavl" + "cosmossdk.io/store/listenkv" + "cosmossdk.io/store/mem" + "cosmossdk.io/store/metrics" + "cosmossdk.io/store/pruning" + pruningtypes "cosmossdk.io/store/pruning/types" + snapshottypes "cosmossdk.io/store/snapshots/types" + "cosmossdk.io/store/tracekv" + "cosmossdk.io/store/transient" + "cosmossdk.io/store/types" +) + +const ( + latestVersionKey = "s/latest" + commitInfoKeyFmt = "s/%d" // s/ +) + +const iavlDisablefastNodeDefault = false + +// keysFromStoreKeyMap returns a slice of keys for the provided map lexically sorted by StoreKey.Name() +func keysFromStoreKeyMap[V any](m map[types.StoreKey]V) []types.StoreKey { + keys := make([]types.StoreKey, 0, len(m)) + for key := range m { + keys = append(keys, key) + } + sort.Slice(keys, func(i, j int) bool { + ki, kj := keys[i], keys[j] + return ki.Name() < kj.Name() + }) + return keys +} + +// Store is composed of many CommitStores. Name contrasts with +// cacheMultiStore which is used for branching other MultiStores. It implements +// the CommitMultiStore interface. +type Store struct { + db dbm.DB + logger log.Logger + lastCommitInfo *types.CommitInfo + pruningManager *pruning.Manager + iavlCacheSize int + iavlDisableFastNode bool + storesParams map[types.StoreKey]storeParams + stores map[types.StoreKey]types.CommitKVStore + keysByName map[string]types.StoreKey + initialVersion int64 + removalMap map[types.StoreKey]bool + traceWriter io.Writer + traceContext types.TraceContext + traceContextMutex sync.Mutex + interBlockCache types.MultiStorePersistentCache + listeners map[types.StoreKey]*types.MemoryListener + metrics metrics.StoreMetrics + commitHeader cmtproto.Header +} + +var ( + _ types.CommitMultiStore = (*Store)(nil) + _ types.Queryable = (*Store)(nil) +) + +// NewStore returns a reference to a new Store object with the provided DB. The +// store will be created with a PruneNothing pruning strategy by default. After +// a store is created, KVStores must be mounted and finally LoadLatestVersion or +// LoadVersion must be called. +func NewStore(db dbm.DB, logger log.Logger, metricGatherer metrics.StoreMetrics) *Store { + return &Store{ + db: db, + logger: logger, + iavlCacheSize: iavl.DefaultIAVLCacheSize, + iavlDisableFastNode: iavlDisablefastNodeDefault, + storesParams: make(map[types.StoreKey]storeParams), + stores: make(map[types.StoreKey]types.CommitKVStore), + keysByName: make(map[string]types.StoreKey), + listeners: make(map[types.StoreKey]*types.MemoryListener), + removalMap: make(map[types.StoreKey]bool), + pruningManager: pruning.NewManager(db, logger), + metrics: metricGatherer, + } +} + +// GetPruning fetches the pruning strategy from the root store. +func (rs *Store) GetPruning() pruningtypes.PruningOptions { + return rs.pruningManager.GetOptions() +} + +// SetPruning sets the pruning strategy on the root store and all the sub-stores. +// Note, calling SetPruning on the root store prior to LoadVersion or +// LoadLatestVersion performs a no-op as the stores aren't mounted yet. +func (rs *Store) SetPruning(pruningOpts pruningtypes.PruningOptions) { + rs.pruningManager.SetOptions(pruningOpts) +} + +// SetMetrics sets the metrics gatherer for the store package +func (rs *Store) SetMetrics(metrics metrics.StoreMetrics) { + rs.metrics = metrics +} + +// SetSnapshotInterval sets the interval at which the snapshots are taken. +// It is used by the store to determine which heights to retain until after the snapshot is complete. +func (rs *Store) SetSnapshotInterval(snapshotInterval uint64) { + rs.pruningManager.SetSnapshotInterval(snapshotInterval) +} + +func (rs *Store) SetIAVLCacheSize(cacheSize int) { + rs.iavlCacheSize = cacheSize +} + +func (rs *Store) SetIAVLDisableFastNode(disableFastNode bool) { + rs.iavlDisableFastNode = disableFastNode +} + +// GetStoreType implements Store. +func (rs *Store) GetStoreType() types.StoreType { + return types.StoreTypeMulti +} + +// MountStoreWithDB implements CommitMultiStore. +func (rs *Store) MountStoreWithDB(key types.StoreKey, typ types.StoreType, db dbm.DB) { + if key == nil { + panic("MountIAVLStore() key cannot be nil") + } + if _, ok := rs.storesParams[key]; ok { + panic(fmt.Sprintf("store duplicate store key %v", key)) + } + if _, ok := rs.keysByName[key.Name()]; ok { + panic(fmt.Sprintf("store duplicate store key name %v", key)) + } + rs.storesParams[key] = newStoreParams(key, db, typ, 0) + rs.keysByName[key.Name()] = key +} + +// GetCommitStore returns a mounted CommitStore for a given StoreKey. If the +// store is wrapped in an inter-block cache, it will be unwrapped before returning. +func (rs *Store) GetCommitStore(key types.StoreKey) types.CommitStore { + return rs.GetCommitKVStore(key) +} + +// GetCommitKVStore returns a mounted CommitKVStore for a given StoreKey. If the +// store is wrapped in an inter-block cache, it will be unwrapped before returning. +func (rs *Store) GetCommitKVStore(key types.StoreKey) types.CommitKVStore { + // If the Store has an inter-block cache, first attempt to lookup and unwrap + // the underlying CommitKVStore by StoreKey. If it does not exist, fallback to + // the main mapping of CommitKVStores. + if rs.interBlockCache != nil { + if store := rs.interBlockCache.Unwrap(key); store != nil { + return store + } + } + + return rs.stores[key] +} + +// StoreKeysByName returns mapping storeNames -> StoreKeys +func (rs *Store) StoreKeysByName() map[string]types.StoreKey { + return rs.keysByName +} + +// LoadLatestVersionAndUpgrade implements CommitMultiStore +func (rs *Store) LoadLatestVersionAndUpgrade(upgrades *types.StoreUpgrades) error { + ver := GetLatestVersion(rs.db) + return rs.loadVersion(ver, upgrades) +} + +// LoadVersionAndUpgrade allows us to rename substores while loading an older version +func (rs *Store) LoadVersionAndUpgrade(ver int64, upgrades *types.StoreUpgrades) error { + return rs.loadVersion(ver, upgrades) +} + +// LoadLatestVersion implements CommitMultiStore. +func (rs *Store) LoadLatestVersion() error { + ver := GetLatestVersion(rs.db) + return rs.loadVersion(ver, nil) +} + +// LoadVersion implements CommitMultiStore. +func (rs *Store) LoadVersion(ver int64) error { + return rs.loadVersion(ver, nil) +} + +func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error { + infos := make(map[string]types.StoreInfo) + + rs.logger.Debug("loadVersion", "ver", ver) + cInfo := &types.CommitInfo{} + + // load old data if we are not version 0 + if ver != 0 { + var err error + cInfo, err = rs.GetCommitInfo(ver) + if err != nil { + return err + } + + // convert StoreInfos slice to map + for _, storeInfo := range cInfo.StoreInfos { + infos[storeInfo.Name] = storeInfo + } + } + + // load each Store (note this doesn't panic on unmounted keys now) + newStores := make(map[types.StoreKey]types.CommitKVStore) + + storesKeys := make([]types.StoreKey, 0, len(rs.storesParams)) + + for key := range rs.storesParams { + storesKeys = append(storesKeys, key) + } + + if upgrades != nil { + // deterministic iteration order for upgrades + // (as the underlying store may change and + // upgrades make store changes where the execution order may matter) + sort.Slice(storesKeys, func(i, j int) bool { + return storesKeys[i].Name() < storesKeys[j].Name() + }) + } + + for _, key := range storesKeys { + storeParams := rs.storesParams[key] + commitID := rs.getCommitID(infos, key.Name()) + rs.logger.Debug("loadVersion commitID", "key", key, "ver", ver, "hash", fmt.Sprintf("%x", commitID.Hash)) + + // If it has been added, set the initial version + if upgrades.IsAdded(key.Name()) || upgrades.RenamedFrom(key.Name()) != "" { + storeParams.initialVersion = uint64(ver) + 1 + } else if commitID.Version != ver && storeParams.typ == types.StoreTypeIAVL { + return fmt.Errorf("version of store %s mismatch root store's version; expected %d got %d; new stores should be added using StoreUpgrades", key.Name(), ver, commitID.Version) + } + + store, err := rs.loadCommitStoreFromParams(key, commitID, storeParams) + if err != nil { + return errorsmod.Wrap(err, "failed to load store") + } + + newStores[key] = store + + // If it was deleted, remove all data + if upgrades.IsDeleted(key.Name()) { + if err := deleteKVStore(store.(types.KVStore)); err != nil { + return errorsmod.Wrapf(err, "failed to delete store %s", key.Name()) + } + rs.removalMap[key] = true + } else if oldName := upgrades.RenamedFrom(key.Name()); oldName != "" { + // handle renames specially + // make an unregistered key to satisfy loadCommitStore params + oldKey := types.NewKVStoreKey(oldName) + oldParams := newStoreParams(oldKey, storeParams.db, storeParams.typ, 0) + + // load from the old name + oldStore, err := rs.loadCommitStoreFromParams(oldKey, rs.getCommitID(infos, oldName), oldParams) + if err != nil { + return errorsmod.Wrapf(err, "failed to load old store %s", oldName) + } + + // move all data + if err := moveKVStoreData(oldStore.(types.KVStore), store.(types.KVStore)); err != nil { + return errorsmod.Wrapf(err, "failed to move store %s -> %s", oldName, key.Name()) + } + + // add the old key so its deletion is committed + newStores[oldKey] = oldStore + // this will ensure it's not perpetually stored in commitInfo + rs.removalMap[oldKey] = true + } + } + + rs.lastCommitInfo = cInfo + rs.stores = newStores + + // load any snapshot heights we missed from disk to be pruned on the next run + if err := rs.pruningManager.LoadSnapshotHeights(rs.db); err != nil { + return err + } + + return nil +} + +func (rs *Store) getCommitID(infos map[string]types.StoreInfo, name string) types.CommitID { + info, ok := infos[name] + if !ok { + return types.CommitID{} + } + + return info.CommitId +} + +func deleteKVStore(kv types.KVStore) error { + // Note that we cannot write while iterating, so load all keys here, delete below + var keys [][]byte + itr := kv.Iterator(nil, nil) + for itr.Valid() { + keys = append(keys, itr.Key()) + itr.Next() + } + if err := itr.Close(); err != nil { + return err + } + + for _, k := range keys { + kv.Delete(k) + } + return nil +} + +// we simulate move by a copy and delete +func moveKVStoreData(oldDB, newDB types.KVStore) error { + // we read from one and write to another + itr := oldDB.Iterator(nil, nil) + for itr.Valid() { + newDB.Set(itr.Key(), itr.Value()) + itr.Next() + } + if err := itr.Close(); err != nil { + return err + } + + // then delete the old store + return deleteKVStore(oldDB) +} + +// PruneSnapshotHeight prunes the given height according to the prune strategy. +// If the strategy is PruneNothing, this is a no-op. +// For other strategies, this height is persisted until the snapshot is operated. +func (rs *Store) PruneSnapshotHeight(height int64) { + rs.pruningManager.HandleSnapshotHeight(height) +} + +// SetInterBlockCache sets the Store's internal inter-block (persistent) cache. +// When this is defined, all CommitKVStores will be wrapped with their respective +// inter-block cache. +func (rs *Store) SetInterBlockCache(c types.MultiStorePersistentCache) { + rs.interBlockCache = c +} + +// SetTracer sets the tracer for the MultiStore that the underlying +// stores will utilize to trace operations. A MultiStore is returned. +func (rs *Store) SetTracer(w io.Writer) types.MultiStore { + rs.traceWriter = w + return rs +} + +// SetTracingContext updates the tracing context for the MultiStore by merging +// the given context with the existing context by key. Any existing keys will +// be overwritten. It is implied that the caller should update the context when +// necessary between tracing operations. It returns a modified MultiStore. +func (rs *Store) SetTracingContext(tc types.TraceContext) types.MultiStore { + rs.traceContextMutex.Lock() + defer rs.traceContextMutex.Unlock() + rs.traceContext = rs.traceContext.Merge(tc) + + return rs +} + +func (rs *Store) getTracingContext() types.TraceContext { + rs.traceContextMutex.Lock() + defer rs.traceContextMutex.Unlock() + + if rs.traceContext == nil { + return nil + } + + ctx := types.TraceContext{} + for k, v := range rs.traceContext { + ctx[k] = v + } + + return ctx +} + +// TracingEnabled returns if tracing is enabled for the MultiStore. +func (rs *Store) TracingEnabled() bool { + return rs.traceWriter != nil +} + +// AddListeners adds a listener for the KVStore belonging to the provided StoreKey +func (rs *Store) AddListeners(keys []types.StoreKey) { + for i := range keys { + listener := rs.listeners[keys[i]] + if listener == nil { + rs.listeners[keys[i]] = types.NewMemoryListener() + } + } +} + +// ListeningEnabled returns if listening is enabled for a specific KVStore +func (rs *Store) ListeningEnabled(key types.StoreKey) bool { + if ls, ok := rs.listeners[key]; ok { + return ls != nil + } + return false +} + +// PopStateCache returns the accumulated state change messages from the CommitMultiStore +// Calling PopStateCache destroys only the currently accumulated state in each listener +// not the state in the store itself. This is a mutating and destructive operation. +// This method has been synchronized. +func (rs *Store) PopStateCache() []*types.StoreKVPair { + var cache []*types.StoreKVPair + for key := range rs.listeners { + ls := rs.listeners[key] + if ls != nil { + cache = append(cache, ls.PopStateCache()...) + } + } + sort.SliceStable(cache, func(i, j int) bool { + return cache[i].StoreKey < cache[j].StoreKey + }) + return cache +} + +// LatestVersion returns the latest version in the store +func (rs *Store) LatestVersion() int64 { + return rs.LastCommitID().Version +} + +// LastCommitID implements Committer/CommitStore. +func (rs *Store) LastCommitID() types.CommitID { + if rs.lastCommitInfo == nil { + emptyHash := sha256.Sum256([]byte{}) + appHash := emptyHash[:] + return types.CommitID{ + Version: GetLatestVersion(rs.db), + Hash: appHash, // set empty apphash to sha256([]byte{}) if info is nil + } + } + if len(rs.lastCommitInfo.CommitID().Hash) == 0 { + emptyHash := sha256.Sum256([]byte{}) + appHash := emptyHash[:] + return types.CommitID{ + Version: rs.lastCommitInfo.Version, + Hash: appHash, // set empty apphash to sha256([]byte{}) if hash is nil + } + } + + return rs.lastCommitInfo.CommitID() +} + +// Commit implements Committer/CommitStore. +func (rs *Store) Commit() types.CommitID { + var previousHeight, version int64 + if rs.lastCommitInfo.GetVersion() == 0 && rs.initialVersion > 1 { + // This case means that no commit has been made in the store, we + // start from initialVersion. + version = rs.initialVersion + } else { + // This case can means two things: + // - either there was already a previous commit in the store, in which + // case we increment the version from there, + // - or there was no previous commit, and initial version was not set, + // in which case we start at version 1. + previousHeight = rs.lastCommitInfo.GetVersion() + version = previousHeight + 1 + } + + if rs.commitHeader.Height != version { + rs.logger.Debug("commit header and version mismatch", "header_height", rs.commitHeader.Height, "version", version) + } + + rs.lastCommitInfo = commitStores(version, rs.stores, rs.removalMap) + rs.lastCommitInfo.Timestamp = rs.commitHeader.Time + defer rs.flushMetadata(rs.db, version, rs.lastCommitInfo) + + // remove remnants of removed stores + for sk := range rs.removalMap { + if _, ok := rs.stores[sk]; ok { + delete(rs.stores, sk) + delete(rs.storesParams, sk) + delete(rs.keysByName, sk.Name()) + } + } + + // reset the removalMap + rs.removalMap = make(map[types.StoreKey]bool) + + if err := rs.handlePruning(version); err != nil { + rs.logger.Error( + "failed to prune store, please check your pruning configuration", + "err", err, + ) + } + + return types.CommitID{ + Version: version, + Hash: rs.lastCommitInfo.Hash(), + } +} + +// WorkingHash returns the current hash of the store. +// it will be used to get the current app hash before commit. +func (rs *Store) WorkingHash() []byte { + storeInfos := make([]types.StoreInfo, 0, len(rs.stores)) + storeKeys := keysFromStoreKeyMap(rs.stores) + + for _, key := range storeKeys { + store := rs.stores[key] + + if store.GetStoreType() != types.StoreTypeIAVL { + continue + } + + if !rs.removalMap[key] { + si := types.StoreInfo{ + Name: key.Name(), + CommitId: types.CommitID{ + Hash: store.WorkingHash(), + }, + } + storeInfos = append(storeInfos, si) + } + } + + sort.SliceStable(storeInfos, func(i, j int) bool { + return storeInfos[i].Name < storeInfos[j].Name + }) + json, err := json.MarshalIndent(storeInfos, " ", " ") + if err != nil { + fmt.Printf("ERR JSON MARSHAL INDENT: %+v\n", err) + } + fmt.Printf("STOREINFOS: %+v\n", string(json)) + fmt.Printf("CALCULATED APP HASH: %X\n", types.CommitInfo{StoreInfos: storeInfos}.Hash()) + + return types.CommitInfo{StoreInfos: storeInfos}.Hash() +} + +// CacheWrap implements CacheWrapper/Store/CommitStore. +func (rs *Store) CacheWrap() types.CacheWrap { + return rs.CacheMultiStore().(types.CacheWrap) +} + +// CacheWrapWithTrace implements the CacheWrapper interface. +func (rs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { + return rs.CacheWrap() +} + +// CacheMultiStore creates ephemeral branch of the multi-store and returns a CacheMultiStore. +// It implements the MultiStore interface. +func (rs *Store) CacheMultiStore() types.CacheMultiStore { + stores := make(map[types.StoreKey]types.CacheWrapper) + for k, v := range rs.stores { + store := types.KVStore(v) + // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, + // set same listeners on cache store will observe duplicated writes. + if rs.ListeningEnabled(k) { + store = listenkv.NewStore(store, k, rs.listeners[k]) + } + stores[k] = store + } + return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.getTracingContext()) +} + +// CacheMultiStoreWithVersion is analogous to CacheMultiStore except that it +// attempts to load stores at a given version (height). An error is returned if +// any store cannot be loaded. This should only be used for querying and +// iterating at past heights. +func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStore, error) { + cachedStores := make(map[types.StoreKey]types.CacheWrapper) + var commitInfo *types.CommitInfo + storeInfos := map[string]bool{} + for key, store := range rs.stores { + var cacheStore types.KVStore + switch store.GetStoreType() { + case types.StoreTypeIAVL: + // If the store is wrapped with an inter-block cache, we must first unwrap + // it to get the underlying IAVL store. + store = rs.GetCommitKVStore(key) + + // Attempt to lazy-load an already saved IAVL store version. If the + // version does not exist or is pruned, an error should be returned. + var err error + cacheStore, err = store.(*iavl.Store).GetImmutable(version) + // if we got error from loading a module store + // we fetch commit info of this version + // we use commit info to check if the store existed at this version or not + if err != nil { + + rs.logger.Error("[*] Cache for", "module store", key.Name(), "error", err.Error(), "By-passing...") + continue + + if commitInfo == nil { + var errCommitInfo error + commitInfo, errCommitInfo = rs.GetCommitInfo(version) + + if errCommitInfo != nil { + return nil, errCommitInfo + } + + for _, storeInfo := range commitInfo.StoreInfos { + storeInfos[storeInfo.Name] = true + } + } + + // If the store existed at this version, it means there's actually an error + // getting the root store at this version. + if storeInfos[key.Name()] { + return nil, err + } + } + + default: + cacheStore = store + } + + // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, + // set same listeners on cache store will observe duplicated writes. + if rs.ListeningEnabled(key) { + cacheStore = listenkv.NewStore(cacheStore, key, rs.listeners[key]) + } + + cachedStores[key] = cacheStore + } + + return cachemulti.NewStore(rs.db, cachedStores, rs.keysByName, rs.traceWriter, rs.getTracingContext()), nil +} + +// GetStore returns a mounted Store for a given StoreKey. If the StoreKey does +// not exist, it will panic. If the Store is wrapped in an inter-block cache, it +// will be unwrapped prior to being returned. +// +// TODO: This isn't used directly upstream. Consider returning the Store as-is +// instead of unwrapping. +func (rs *Store) GetStore(key types.StoreKey) types.Store { + store := rs.GetCommitKVStore(key) + if store == nil { + panic(fmt.Sprintf("store does not exist for key: %s", key.Name())) + } + + return store +} + +// GetKVStore returns a mounted KVStore for a given StoreKey. If tracing is +// enabled on the KVStore, a wrapped TraceKVStore will be returned with the root +// store's tracer, otherwise, the original KVStore will be returned. +// +// NOTE: The returned KVStore may be wrapped in an inter-block cache if it is +// set on the root store. +func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore { + s := rs.stores[key] + if s == nil { + panic(fmt.Sprintf("store does not exist for key: %s", key.Name())) + } + store := types.KVStore(s) + + if rs.TracingEnabled() { + store = tracekv.NewStore(store, rs.traceWriter, rs.getTracingContext()) + } + if rs.ListeningEnabled(key) { + store = listenkv.NewStore(store, key, rs.listeners[key]) + } + + return store +} + +func (rs *Store) handlePruning(version int64) error { + pruneHeight := rs.pruningManager.GetPruningHeight(version) + rs.logger.Debug("prune start", "height", version) + defer rs.logger.Debug("prune end", "height", version) + return rs.PruneStores(pruneHeight) +} + +// PruneStores prunes all history upto the specific height of the multi store. +func (rs *Store) PruneStores(pruningHeight int64) (err error) { + if pruningHeight <= 0 { + rs.logger.Debug("pruning skipped, height is less than or equal to 0") + return nil + } + + rs.logger.Debug("pruning store", "heights", pruningHeight) + + for key, store := range rs.stores { + rs.logger.Debug("pruning store", "key", key) // Also log store.name (a private variable)? + + // If the store is wrapped with an inter-block cache, we must first unwrap + // it to get the underlying IAVL store. + if store.GetStoreType() != types.StoreTypeIAVL { + continue + } + + store = rs.GetCommitKVStore(key) + + err := store.(*iavl.Store).DeleteVersionsTo(pruningHeight) + if err == nil { + continue + } + + if errors.Is(err, iavltree.ErrVersionDoesNotExist) { + return err + } + + rs.logger.Error("failed to prune store", "key", key, "err", err) + } + return nil +} + +// getStoreByName performs a lookup of a StoreKey given a store name typically +// provided in a path. The StoreKey is then used to perform a lookup and return +// a Store. If the Store is wrapped in an inter-block cache, it will be unwrapped +// prior to being returned. If the StoreKey does not exist, nil is returned. +func (rs *Store) GetStoreByName(name string) types.Store { + key := rs.keysByName[name] + if key == nil { + return nil + } + + return rs.GetCommitKVStore(key) +} + +// Query calls substore.Query with the same `req` where `req.Path` is +// modified to remove the substore prefix. +// Ie. `req.Path` here is `//`, and trimmed to `/` for the substore. +// TODO: add proof for `multistore -> substore`. +func (rs *Store) Query(req *types.RequestQuery) (*types.ResponseQuery, error) { + path := req.Path + storeName, subpath, err := parsePath(path) + if err != nil { + return &types.ResponseQuery{}, err + } + + store := rs.GetStoreByName(storeName) + if store == nil { + return &types.ResponseQuery{}, errorsmod.Wrapf(types.ErrUnknownRequest, "no such store: %s", storeName) + } + + queryable, ok := store.(types.Queryable) + if !ok { + return &types.ResponseQuery{}, errorsmod.Wrapf(types.ErrUnknownRequest, "store %s (type %T) doesn't support queries", storeName, store) + } + + // trim the path and make the query + req.Path = subpath + res, err := queryable.Query(req) + + if !req.Prove || !RequireProof(subpath) { + return res, err + } + + if res.ProofOps == nil || len(res.ProofOps.Ops) == 0 { + return &types.ResponseQuery{}, errorsmod.Wrap(types.ErrInvalidRequest, "proof is unexpectedly empty; ensure height has not been pruned") + } + + // If the request's height is the latest height we've committed, then utilize + // the store's lastCommitInfo as this commit info may not be flushed to disk. + // Otherwise, we query for the commit info from disk. + var commitInfo *types.CommitInfo + + if res.Height == rs.lastCommitInfo.Version { + commitInfo = rs.lastCommitInfo + } else { + commitInfo, err = rs.GetCommitInfo(res.Height) + if err != nil { + return &types.ResponseQuery{}, err + } + } + + // Restore origin path and append proof op. + res.ProofOps.Ops = append(res.ProofOps.Ops, commitInfo.ProofOp(storeName)) + + return res, nil +} + +// SetInitialVersion sets the initial version of the IAVL tree. It is used when +// starting a new chain at an arbitrary height. +func (rs *Store) SetInitialVersion(version int64) error { + rs.initialVersion = version + + // Loop through all the stores, if it's an IAVL store, then set initial + // version on it. + for key, store := range rs.stores { + if store.GetStoreType() == types.StoreTypeIAVL { + // If the store is wrapped with an inter-block cache, we must first unwrap + // it to get the underlying IAVL store. + store = rs.GetCommitKVStore(key) + store.(types.StoreWithInitialVersion).SetInitialVersion(version) + } + } + + return nil +} + +// parsePath expects a format like /[/] +// Must start with /, subpath may be empty +// Returns error if it doesn't start with / +func parsePath(path string) (storeName, subpath string, err error) { + if !strings.HasPrefix(path, "/") { + return storeName, subpath, errorsmod.Wrapf(types.ErrUnknownRequest, "invalid path: %s", path) + } + + paths := strings.SplitN(path[1:], "/", 2) + storeName = paths[0] + + if len(paths) == 2 { + subpath = "/" + paths[1] + } + + return storeName, subpath, nil +} + +//---------------------- Snapshotting ------------------ + +// Snapshot implements snapshottypes.Snapshotter. The snapshot output for a given format must be +// identical across nodes such that chunks from different sources fit together. If the output for a +// given format changes (at the byte level), the snapshot format must be bumped - see +// TestMultistoreSnapshot_Checksum test. +func (rs *Store) Snapshot(height uint64, protoWriter protoio.Writer) error { + if height == 0 { + return errorsmod.Wrap(types.ErrLogic, "cannot snapshot height 0") + } + if height > uint64(GetLatestVersion(rs.db)) { + return errorsmod.Wrapf(types.ErrLogic, "cannot snapshot future height %v", height) + } + + // Collect stores to snapshot (only IAVL stores are supported) + type namedStore struct { + *iavl.Store + name string + } + stores := []namedStore{} + keys := keysFromStoreKeyMap(rs.stores) + for _, key := range keys { + switch store := rs.GetCommitKVStore(key).(type) { + case *iavl.Store: + stores = append(stores, namedStore{name: key.Name(), Store: store}) + case *transient.Store, *mem.Store: + // Non-persisted stores shouldn't be snapshotted + continue + default: + return errorsmod.Wrapf(types.ErrLogic, + "don't know how to snapshot store %q of type %T", key.Name(), store) + } + } + sort.Slice(stores, func(i, j int) bool { + return strings.Compare(stores[i].name, stores[j].name) == -1 + }) + + // Export each IAVL store. Stores are serialized as a stream of SnapshotItem Protobuf + // messages. The first item contains a SnapshotStore with store metadata (i.e. name), + // and the following messages contain a SnapshotNode (i.e. an ExportNode). Store changes + // are demarcated by new SnapshotStore items. + for _, store := range stores { + rs.logger.Debug("starting snapshot", "store", store.name, "height", height) + exporter, err := store.Export(int64(height)) + if err != nil { + rs.logger.Error("snapshot failed; exporter error", "store", store.name, "err", err) + return err + } + + err = func() error { + defer exporter.Close() + + err := protoWriter.WriteMsg(&snapshottypes.SnapshotItem{ + Item: &snapshottypes.SnapshotItem_Store{ + Store: &snapshottypes.SnapshotStoreItem{ + Name: store.name, + }, + }, + }) + if err != nil { + rs.logger.Error("snapshot failed; item store write failed", "store", store.name, "err", err) + return err + } + + nodeCount := 0 + for { + node, err := exporter.Next() + if err == iavltree.ErrorExportDone { + rs.logger.Debug("snapshot Done", "store", store.name, "nodeCount", nodeCount) + break + } else if err != nil { + return err + } + err = protoWriter.WriteMsg(&snapshottypes.SnapshotItem{ + Item: &snapshottypes.SnapshotItem_IAVL{ + IAVL: &snapshottypes.SnapshotIAVLItem{ + Key: node.Key, + Value: node.Value, + Height: int32(node.Height), + Version: node.Version, + }, + }, + }) + if err != nil { + return err + } + nodeCount++ + } + + return nil + }() + + if err != nil { + return err + } + } + + return nil +} + +// Restore implements snapshottypes.Snapshotter. +// returns next snapshot item and error. +func (rs *Store) Restore( + height uint64, format uint32, protoReader protoio.Reader, +) (snapshottypes.SnapshotItem, error) { + // Import nodes into stores. The first item is expected to be a SnapshotItem containing + // a SnapshotStoreItem, telling us which store to import into. The following items will contain + // SnapshotNodeItem (i.e. ExportNode) until we reach the next SnapshotStoreItem or EOF. + var importer *iavltree.Importer + var snapshotItem snapshottypes.SnapshotItem +loop: + for { + snapshotItem = snapshottypes.SnapshotItem{} + err := protoReader.ReadMsg(&snapshotItem) + if err == io.EOF { + break + } else if err != nil { + return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "invalid protobuf message") + } + + switch item := snapshotItem.Item.(type) { + case *snapshottypes.SnapshotItem_Store: + if importer != nil { + err = importer.Commit() + if err != nil { + return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "IAVL commit failed") + } + importer.Close() + } + store, ok := rs.GetStoreByName(item.Store.Name).(*iavl.Store) + if !ok || store == nil { + return snapshottypes.SnapshotItem{}, errorsmod.Wrapf(types.ErrLogic, "cannot import into non-IAVL store %q", item.Store.Name) + } + importer, err = store.Import(int64(height)) + if err != nil { + return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "import failed") + } + defer importer.Close() + // Importer height must reflect the node height (which usually matches the block height, but not always) + rs.logger.Debug("restoring snapshot", "store", item.Store.Name) + + case *snapshottypes.SnapshotItem_IAVL: + if importer == nil { + rs.logger.Error("failed to restore; received IAVL node item before store item") + return snapshottypes.SnapshotItem{}, errorsmod.Wrap(types.ErrLogic, "received IAVL node item before store item") + } + if item.IAVL.Height > math.MaxInt8 { + return snapshottypes.SnapshotItem{}, errorsmod.Wrapf(types.ErrLogic, "node height %v cannot exceed %v", + item.IAVL.Height, math.MaxInt8) + } + node := &iavltree.ExportNode{ + Key: item.IAVL.Key, + Value: item.IAVL.Value, + Height: int8(item.IAVL.Height), + Version: item.IAVL.Version, + } + // Protobuf does not differentiate between []byte{} as nil, but fortunately IAVL does + // not allow nil keys nor nil values for leaf nodes, so we can always set them to empty. + if node.Key == nil { + node.Key = []byte{} + } + if node.Height == 0 && node.Value == nil { + node.Value = []byte{} + } + err := importer.Add(node) + if err != nil { + return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "IAVL node import failed") + } + + default: + break loop + } + } + + if importer != nil { + err := importer.Commit() + if err != nil { + return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "IAVL commit failed") + } + importer.Close() + } + + rs.flushMetadata(rs.db, int64(height), rs.buildCommitInfo(int64(height))) + return snapshotItem, rs.LoadLatestVersion() +} + +func (rs *Store) loadCommitStoreFromParams(key types.StoreKey, id types.CommitID, params storeParams) (types.CommitKVStore, error) { + var db dbm.DB + + if params.db != nil { + db = dbm.NewPrefixDB(params.db, []byte("s/_/")) + } else { + prefix := "s/k:" + params.key.Name() + "/" + db = dbm.NewPrefixDB(rs.db, []byte(prefix)) + } + + switch params.typ { + case types.StoreTypeMulti: + panic("recursive MultiStores not yet supported") + + case types.StoreTypeIAVL: + var store types.CommitKVStore + var err error + + if params.initialVersion == 0 { + store, err = iavl.LoadStore(db, rs.logger, key, id, rs.iavlCacheSize, rs.iavlDisableFastNode, rs.metrics) + } else { + store, err = iavl.LoadStoreWithInitialVersion(db, rs.logger, key, id, params.initialVersion, rs.iavlCacheSize, rs.iavlDisableFastNode, rs.metrics) + } + + if err != nil { + return nil, err + } + + if rs.interBlockCache != nil { + // Wrap and get a CommitKVStore with inter-block caching. Note, this should + // only wrap the primary CommitKVStore, not any store that is already + // branched as that will create unexpected behavior. + store = rs.interBlockCache.GetStoreCache(key, store) + } + + return store, err + + case types.StoreTypeDB: + return commitDBStoreAdapter{Store: dbadapter.Store{DB: db}}, nil + + case types.StoreTypeTransient: + _, ok := key.(*types.TransientStoreKey) + if !ok { + return nil, fmt.Errorf("invalid StoreKey for StoreTypeTransient: %s", key.String()) + } + + return transient.NewStore(), nil + + case types.StoreTypeMemory: + if _, ok := key.(*types.MemoryStoreKey); !ok { + return nil, fmt.Errorf("unexpected key type for a MemoryStoreKey; got: %s", key.String()) + } + + return mem.NewStore(), nil + + default: + panic(fmt.Sprintf("unrecognized store type %v", params.typ)) + } +} + +func (rs *Store) buildCommitInfo(version int64) *types.CommitInfo { + keys := keysFromStoreKeyMap(rs.stores) + storeInfos := []types.StoreInfo{} + for _, key := range keys { + store := rs.stores[key] + storeType := store.GetStoreType() + if storeType == types.StoreTypeTransient || storeType == types.StoreTypeMemory { + continue + } + storeInfos = append(storeInfos, types.StoreInfo{ + Name: key.Name(), + CommitId: store.LastCommitID(), + }) + } + return &types.CommitInfo{ + Version: version, + StoreInfos: storeInfos, + } +} + +// RollbackToVersion delete the versions after `target` and update the latest version. +func (rs *Store) RollbackToVersion(target int64) error { + if target <= 0 { + return fmt.Errorf("invalid rollback height target: %d", target) + } + + for key, store := range rs.stores { + if store.GetStoreType() == types.StoreTypeIAVL { + // If the store is wrapped with an inter-block cache, we must first unwrap + // it to get the underlying IAVL store. + store = rs.GetCommitKVStore(key) + err := store.(*iavl.Store).LoadVersionForOverwriting(target) + if err != nil { + return err + } + } + } + + rs.flushMetadata(rs.db, target, rs.buildCommitInfo(target)) + + return rs.LoadLatestVersion() +} + +// SetCommitHeader sets the commit block header of the store. +func (rs *Store) SetCommitHeader(h cmtproto.Header) { + rs.commitHeader = h +} + +// GetCommitInfo attempts to retrieve CommitInfo for a given version/height. It +// will return an error if no CommitInfo exists, we fail to unmarshal the record +// or if we cannot retrieve the object from the DB. +func (rs *Store) GetCommitInfo(ver int64) (*types.CommitInfo, error) { + cInfoKey := fmt.Sprintf(commitInfoKeyFmt, ver) + + bz, err := rs.db.Get([]byte(cInfoKey)) + if err != nil { + return nil, errorsmod.Wrap(err, "failed to get commit info") + } else if bz == nil { + return nil, errors.New("no commit info found") + } + + cInfo := &types.CommitInfo{} + if err = cInfo.Unmarshal(bz); err != nil { + return nil, errorsmod.Wrap(err, "failed unmarshal commit info") + } + + return cInfo, nil +} + +func (rs *Store) flushMetadata(db dbm.DB, version int64, cInfo *types.CommitInfo) { + rs.logger.Debug("flushing metadata", "height", version) + batch := db.NewBatch() + defer func() { + _ = batch.Close() + }() + + if cInfo != nil { + flushCommitInfo(batch, version, cInfo) + } else { + rs.logger.Debug("commitInfo is nil, not flushed", "height", version) + } + + flushLatestVersion(batch, version) + + if err := batch.WriteSync(); err != nil { + panic(fmt.Errorf("error on batch write %w", err)) + } + rs.logger.Debug("flushing metadata finished", "height", version) +} + +type storeParams struct { + key types.StoreKey + db dbm.DB + typ types.StoreType + initialVersion uint64 +} + +func newStoreParams(key types.StoreKey, db dbm.DB, typ types.StoreType, initialVersion uint64) storeParams { + return storeParams{ + key: key, + db: db, + typ: typ, + initialVersion: initialVersion, + } +} + +func GetLatestVersion(db dbm.DB) int64 { + bz, err := db.Get([]byte(latestVersionKey)) + if err != nil { + panic(err) + } else if bz == nil { + return 0 + } + + var latestVersion int64 + + if err := gogotypes.StdInt64Unmarshal(&latestVersion, bz); err != nil { + panic(err) + } + + return latestVersion +} + +// Commits each store and returns a new commitInfo. +func commitStores(version int64, storeMap map[types.StoreKey]types.CommitKVStore, removalMap map[types.StoreKey]bool) *types.CommitInfo { + storeInfos := make([]types.StoreInfo, 0, len(storeMap)) + storeKeys := keysFromStoreKeyMap(storeMap) + + for _, key := range storeKeys { + store := storeMap[key] + last := store.LastCommitID() + + // If a commit event execution is interrupted, a new iavl store's version + // will be larger than the RMS's metadata, when the block is replayed, we + // should avoid committing that iavl store again. + var commitID types.CommitID + if last.Version >= version { + last.Version = version + commitID = last + } else { + commitID = store.Commit() + } + + storeType := store.GetStoreType() + if storeType == types.StoreTypeTransient || storeType == types.StoreTypeMemory { + continue + } + + if !removalMap[key] { + si := types.StoreInfo{} + si.Name = key.Name() + si.CommitId = commitID + storeInfos = append(storeInfos, si) + } + } + + sort.SliceStable(storeInfos, func(i, j int) bool { + return strings.Compare(storeInfos[i].Name, storeInfos[j].Name) < 0 + }) + + return &types.CommitInfo{ + Version: version, + StoreInfos: storeInfos, + } +} + +func flushCommitInfo(batch dbm.Batch, version int64, cInfo *types.CommitInfo) { + bz, err := cInfo.Marshal() + if err != nil { + panic(err) + } + + cInfoKey := fmt.Sprintf(commitInfoKeyFmt, version) + err = batch.Set([]byte(cInfoKey), bz) + if err != nil { + panic(err) + } +} + +func flushLatestVersion(batch dbm.Batch, version int64) { + bz, err := gogotypes.StdInt64Marshal(version) + if err != nil { + panic(err) + } + + err = batch.Set([]byte(latestVersionKey), bz) + if err != nil { + panic(err) + } +} diff --git a/cosmos-sdk-store/rootmulti/store_test.go b/cosmos-sdk-store/rootmulti/store_test.go new file mode 100755 index 000000000..2702f3e08 --- /dev/null +++ b/cosmos-sdk-store/rootmulti/store_test.go @@ -0,0 +1,992 @@ +package rootmulti + +import ( + "bytes" + "crypto/sha256" + "fmt" + "testing" + "time" + + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/require" + + "cosmossdk.io/errors" + "cosmossdk.io/log" + "cosmossdk.io/store/cachemulti" + "cosmossdk.io/store/iavl" + sdkmaps "cosmossdk.io/store/internal/maps" + "cosmossdk.io/store/metrics" + pruningtypes "cosmossdk.io/store/pruning/types" + "cosmossdk.io/store/types" +) + +func TestStoreType(t *testing.T) { + db := dbm.NewMemDB() + store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) + store.MountStoreWithDB(types.NewKVStoreKey("store1"), types.StoreTypeIAVL, db) +} + +func TestGetCommitKVStore(t *testing.T) { + var db dbm.DB = dbm.NewMemDB() + ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)) + err := ms.LoadLatestVersion() + require.Nil(t, err) + + key := ms.keysByName["store1"] + + store1 := ms.GetCommitKVStore(key) + require.NotNil(t, store1) + require.IsType(t, &iavl.Store{}, store1) + + store2 := ms.GetCommitStore(key) + require.NotNil(t, store2) + require.IsType(t, &iavl.Store{}, store2) +} + +func TestStoreMount(t *testing.T) { + db := dbm.NewMemDB() + store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) + + key1 := types.NewKVStoreKey("store1") + key2 := types.NewKVStoreKey("store2") + dup1 := types.NewKVStoreKey("store1") + + require.NotPanics(t, func() { store.MountStoreWithDB(key1, types.StoreTypeIAVL, db) }) + require.NotPanics(t, func() { store.MountStoreWithDB(key2, types.StoreTypeIAVL, db) }) + + require.Panics(t, func() { store.MountStoreWithDB(key1, types.StoreTypeIAVL, db) }) + require.Panics(t, func() { store.MountStoreWithDB(nil, types.StoreTypeIAVL, db) }) + require.Panics(t, func() { store.MountStoreWithDB(dup1, types.StoreTypeIAVL, db) }) +} + +func TestCacheMultiStore(t *testing.T) { + var db dbm.DB = dbm.NewMemDB() + ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + + cacheMulti := ms.CacheMultiStore() + require.IsType(t, cachemulti.Store{}, cacheMulti) +} + +func TestCacheMultiStoreWithVersion(t *testing.T) { + var db dbm.DB = dbm.NewMemDB() + ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + err := ms.LoadLatestVersion() + require.Nil(t, err) + + emptyHash := sha256.Sum256([]byte{}) + appHash := emptyHash[:] + commitID := types.CommitID{Hash: appHash} + checkStore(t, ms, commitID, commitID) + + k, v := []byte("wind"), []byte("blows") + + store1 := ms.GetStoreByName("store1").(types.KVStore) + store1.Set(k, v) + + cID := ms.Commit() + require.Equal(t, int64(1), cID.Version) + + // require no failure when given an invalid or pruned version + _, err = ms.CacheMultiStoreWithVersion(cID.Version + 1) + require.Error(t, err) + + // require a valid version can be cache-loaded + cms, err := ms.CacheMultiStoreWithVersion(cID.Version) + require.NoError(t, err) + + // require a valid key lookup yields the correct value + kvStore := cms.GetKVStore(ms.keysByName["store1"]) + require.NotNil(t, kvStore) + require.Equal(t, kvStore.Get(k), v) + + // add new module stores (store4 and store5) to multi stores and commit + ms.MountStoreWithDB(types.NewKVStoreKey("store4"), types.StoreTypeIAVL, nil) + ms.MountStoreWithDB(types.NewKVStoreKey("store5"), types.StoreTypeIAVL, nil) + err = ms.LoadLatestVersionAndUpgrade(&types.StoreUpgrades{Added: []string{"store4", "store5"}}) + require.NoError(t, err) + ms.Commit() + + // cache multistore of version before adding store4 should works + _, err = ms.CacheMultiStoreWithVersion(1) + require.NoError(t, err) + + // require we cannot commit (write) to a cache-versioned multi-store + require.Panics(t, func() { + kvStore.Set(k, []byte("newValue")) + cms.Write() + }) +} + +func TestHashStableWithEmptyCommit(t *testing.T) { + var db dbm.DB = dbm.NewMemDB() + ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + err := ms.LoadLatestVersion() + require.Nil(t, err) + + emptyHash := sha256.Sum256([]byte{}) + appHash := emptyHash[:] + commitID := types.CommitID{Hash: appHash} + checkStore(t, ms, commitID, commitID) + + k, v := []byte("wind"), []byte("blows") + + store1 := ms.GetStoreByName("store1").(types.KVStore) + store1.Set(k, v) + + workingHash := ms.WorkingHash() + cID := ms.Commit() + require.Equal(t, int64(1), cID.Version) + hash := cID.Hash + require.Equal(t, workingHash, hash) + + // make an empty commit, it should update version, but not affect hash + workingHash = ms.WorkingHash() + cID = ms.Commit() + require.Equal(t, workingHash, cID.Hash) + require.Equal(t, int64(2), cID.Version) + require.Equal(t, hash, cID.Hash) +} + +func TestMultistoreCommitLoad(t *testing.T) { + var db dbm.DB = dbm.NewMemDB() + store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + err := store.LoadLatestVersion() + require.Nil(t, err) + + emptyHash := sha256.Sum256([]byte{}) + appHash := emptyHash[:] + // New store has empty last commit. + commitID := types.CommitID{Hash: appHash} + checkStore(t, store, commitID, commitID) + + // Make sure we can get stores by name. + s1 := store.GetStoreByName("store1") + require.NotNil(t, s1) + s3 := store.GetStoreByName("store3") + require.NotNil(t, s3) + s77 := store.GetStoreByName("store77") + require.Nil(t, s77) + + // Make a few commits and check them. + nCommits := int64(3) + for i := int64(0); i < nCommits; i++ { + workingHash := store.WorkingHash() + commitID = store.Commit() + require.Equal(t, workingHash, commitID.Hash) + expectedCommitID := getExpectedCommitID(store, i+1) + checkStore(t, store, expectedCommitID, commitID) + } + + // Load the latest multistore again and check version. + store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + err = store.LoadLatestVersion() + require.Nil(t, err) + commitID = getExpectedCommitID(store, nCommits) + checkStore(t, store, commitID, commitID) + + // Commit and check version. + workingHash := store.WorkingHash() + commitID = store.Commit() + require.Equal(t, workingHash, commitID.Hash) + expectedCommitID := getExpectedCommitID(store, nCommits+1) + checkStore(t, store, expectedCommitID, commitID) + + // Load an older multistore and check version. + ver := nCommits - 1 + store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + err = store.LoadVersion(ver) + require.Nil(t, err) + commitID = getExpectedCommitID(store, ver) + checkStore(t, store, commitID, commitID) +} + +func TestMultistoreLoadWithUpgrade(t *testing.T) { + var db dbm.DB = dbm.NewMemDB() + store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + err := store.LoadLatestVersion() + require.Nil(t, err) + + // write some data in all stores + k1, v1 := []byte("first"), []byte("store") + s1, _ := store.GetStoreByName("store1").(types.KVStore) + require.NotNil(t, s1) + s1.Set(k1, v1) + + k2, v2 := []byte("second"), []byte("restore") + s2, _ := store.GetStoreByName("store2").(types.KVStore) + require.NotNil(t, s2) + s2.Set(k2, v2) + + k3, v3 := []byte("third"), []byte("dropped") + s3, _ := store.GetStoreByName("store3").(types.KVStore) + require.NotNil(t, s3) + s3.Set(k3, v3) + + s4, _ := store.GetStoreByName("store4").(types.KVStore) + require.Nil(t, s4) + + // do one commit + workingHash := store.WorkingHash() + commitID := store.Commit() + require.Equal(t, workingHash, commitID.Hash) + expectedCommitID := getExpectedCommitID(store, 1) + checkStore(t, store, expectedCommitID, commitID) + + ci, err := store.GetCommitInfo(1) + require.NoError(t, err) + require.Equal(t, int64(1), ci.Version) + require.Equal(t, 3, len(ci.StoreInfos)) + checkContains(t, ci.StoreInfos, []string{"store1", "store2", "store3"}) + + // Load without changes and make sure it is sensible + store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + + err = store.LoadLatestVersion() + require.Nil(t, err) + commitID = getExpectedCommitID(store, 1) + checkStore(t, store, commitID, commitID) + + // let's query data to see it was saved properly + s2, _ = store.GetStoreByName("store2").(types.KVStore) + require.NotNil(t, s2) + require.Equal(t, v2, s2.Get(k2)) + + // now, let's load with upgrades... + restore, upgrades := newMultiStoreWithModifiedMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + err = restore.LoadLatestVersionAndUpgrade(upgrades) + require.Nil(t, err) + + // s1 was not changed + s1, _ = restore.GetStoreByName("store1").(types.KVStore) + require.NotNil(t, s1) + require.Equal(t, v1, s1.Get(k1)) + + // store3 is mounted, but data deleted are gone + s3, _ = restore.GetStoreByName("store3").(types.KVStore) + require.NotNil(t, s3) + require.Nil(t, s3.Get(k3)) // data was deleted + + // store4 is mounted, with empty data + s4, _ = restore.GetStoreByName("store4").(types.KVStore) + require.NotNil(t, s4) + + iterator := s4.Iterator(nil, nil) + + values := 0 + for ; iterator.Valid(); iterator.Next() { + values++ + } + require.Zero(t, values) + + require.NoError(t, iterator.Close()) + + // write something inside store4 + k4, v4 := []byte("fourth"), []byte("created") + s4.Set(k4, v4) + + // store2 is no longer mounted + st2 := restore.GetStoreByName("store2") + require.Nil(t, st2) + + // restore2 has the old data + rs2, _ := restore.GetStoreByName("restore2").(types.KVStore) + require.NotNil(t, rs2) + require.Equal(t, v2, rs2.Get(k2)) + + // store this migrated data, and load it again without migrations + migratedID := restore.Commit() + require.Equal(t, migratedID.Version, int64(2)) + + reload, _ := newMultiStoreWithModifiedMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + // unmount store3 since store3 was deleted + unmountStore(reload, "store3") + + rs3, _ := reload.GetStoreByName("store3").(types.KVStore) + require.Nil(t, rs3) + + err = reload.LoadLatestVersion() + require.Nil(t, err) + require.Equal(t, migratedID, reload.LastCommitID()) + + // query this new store + rl1, _ := reload.GetStoreByName("store1").(types.KVStore) + require.NotNil(t, rl1) + require.Equal(t, v1, rl1.Get(k1)) + + rl2, _ := reload.GetStoreByName("restore2").(types.KVStore) + require.NotNil(t, rl2) + require.Equal(t, v2, rl2.Get(k2)) + + rl4, _ := reload.GetStoreByName("store4").(types.KVStore) + require.NotNil(t, rl4) + require.Equal(t, v4, rl4.Get(k4)) + + // check commitInfo in storage + ci, err = reload.GetCommitInfo(2) + require.NoError(t, err) + require.Equal(t, int64(2), ci.Version) + require.Equal(t, 3, len(ci.StoreInfos), ci.StoreInfos) + checkContains(t, ci.StoreInfos, []string{"store1", "restore2", "store4"}) +} + +func TestParsePath(t *testing.T) { + _, _, err := parsePath("foo") + require.Error(t, err) + + store, subpath, err := parsePath("/foo") + require.NoError(t, err) + require.Equal(t, store, "foo") + require.Equal(t, subpath, "") + + store, subpath, err = parsePath("/fizz/bang/baz") + require.NoError(t, err) + require.Equal(t, store, "fizz") + require.Equal(t, subpath, "/bang/baz") + + substore, subsubpath, err := parsePath(subpath) + require.NoError(t, err) + require.Equal(t, substore, "bang") + require.Equal(t, subsubpath, "/baz") +} + +func TestMultiStoreRestart(t *testing.T) { + db := dbm.NewMemDB() + pruning := pruningtypes.NewCustomPruningOptions(2, 1) + multi := newMultiStoreWithMounts(db, pruning) + err := multi.LoadLatestVersion() + require.Nil(t, err) + + initCid := multi.LastCommitID() + + k, v := "wind", "blows" + k2, v2 := "water", "flows" + k3, v3 := "fire", "burns" + + for i := 1; i < 3; i++ { + // Set and commit data in one store. + store1 := multi.GetStoreByName("store1").(types.KVStore) + store1.Set([]byte(k), []byte(fmt.Sprintf("%s:%d", v, i))) + + // ... and another. + store2 := multi.GetStoreByName("store2").(types.KVStore) + store2.Set([]byte(k2), []byte(fmt.Sprintf("%s:%d", v2, i))) + + // ... and another. + store3 := multi.GetStoreByName("store3").(types.KVStore) + store3.Set([]byte(k3), []byte(fmt.Sprintf("%s:%d", v3, i))) + + multi.Commit() + + cinfo, err := multi.GetCommitInfo(int64(i)) + require.NoError(t, err) + require.Equal(t, int64(i), cinfo.Version) + } + + // Set and commit data in one store. + store1 := multi.GetStoreByName("store1").(types.KVStore) + store1.Set([]byte(k), []byte(fmt.Sprintf("%s:%d", v, 3))) + + // ... and another. + store2 := multi.GetStoreByName("store2").(types.KVStore) + store2.Set([]byte(k2), []byte(fmt.Sprintf("%s:%d", v2, 3))) + + multi.Commit() + + flushedCinfo, err := multi.GetCommitInfo(3) + require.Nil(t, err) + require.NotEqual(t, initCid, flushedCinfo, "CID is different after flush to disk") + + // ... and another. + store3 := multi.GetStoreByName("store3").(types.KVStore) + store3.Set([]byte(k3), []byte(fmt.Sprintf("%s:%d", v3, 3))) + + multi.Commit() + + postFlushCinfo, err := multi.GetCommitInfo(4) + require.NoError(t, err) + require.Equal(t, int64(4), postFlushCinfo.Version, "Commit changed after in-memory commit") + + multi = newMultiStoreWithMounts(db, pruning) + err = multi.LoadLatestVersion() + require.Nil(t, err) + + reloadedCid := multi.LastCommitID() + require.Equal(t, int64(4), reloadedCid.Version, "Reloaded CID is not the same as last flushed CID") + + // Check that store1 and store2 retained date from 3rd commit + store1 = multi.GetStoreByName("store1").(types.KVStore) + val := store1.Get([]byte(k)) + require.Equal(t, []byte(fmt.Sprintf("%s:%d", v, 3)), val, "Reloaded value not the same as last flushed value") + + store2 = multi.GetStoreByName("store2").(types.KVStore) + val2 := store2.Get([]byte(k2)) + require.Equal(t, []byte(fmt.Sprintf("%s:%d", v2, 3)), val2, "Reloaded value not the same as last flushed value") + + // Check that store3 still has data from last commit even though update happened on 2nd commit + store3 = multi.GetStoreByName("store3").(types.KVStore) + val3 := store3.Get([]byte(k3)) + require.Equal(t, []byte(fmt.Sprintf("%s:%d", v3, 3)), val3, "Reloaded value not the same as last flushed value") +} + +func TestMultiStoreQuery(t *testing.T) { + db := dbm.NewMemDB() + multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + err := multi.LoadLatestVersion() + require.Nil(t, err) + + k, v := []byte("wind"), []byte("blows") + k2, v2 := []byte("water"), []byte("flows") + // v3 := []byte("is cold") + + // Commit the multistore. + _ = multi.Commit() + + // Make sure we can get by name. + garbage := multi.GetStoreByName("bad-name") + require.Nil(t, garbage) + + // Set and commit data in one store. + store1 := multi.GetStoreByName("store1").(types.KVStore) + store1.Set(k, v) + + // ... and another. + store2 := multi.GetStoreByName("store2").(types.KVStore) + store2.Set(k2, v2) + + // Commit the multistore. + cid := multi.Commit() + ver := cid.Version + + // Reload multistore from database + multi = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + err = multi.LoadLatestVersion() + require.Nil(t, err) + + // Test bad path. + query := types.RequestQuery{Path: "/key", Data: k, Height: ver} + _, err = multi.Query(&query) + codespace, code, _ := errors.ABCIInfo(err, false) + require.EqualValues(t, types.ErrUnknownRequest.ABCICode(), code) + require.EqualValues(t, types.ErrUnknownRequest.Codespace(), codespace) + + query.Path = "h897fy32890rf63296r92" + _, err = multi.Query(&query) + codespace, code, _ = errors.ABCIInfo(err, false) + require.EqualValues(t, types.ErrUnknownRequest.ABCICode(), code) + require.EqualValues(t, types.ErrUnknownRequest.Codespace(), codespace) + + // Test invalid store name. + query.Path = "/garbage/key" + _, err = multi.Query(&query) + codespace, code, _ = errors.ABCIInfo(err, false) + require.EqualValues(t, types.ErrUnknownRequest.ABCICode(), code) + require.EqualValues(t, types.ErrUnknownRequest.Codespace(), codespace) + + // Test valid query with data. + query.Path = "/store1/key" + qres, err := multi.Query(&query) + require.NoError(t, err) + require.Equal(t, v, qres.Value) + + // Test valid but empty query. + query.Path = "/store2/key" + query.Prove = true + qres, err = multi.Query(&query) + require.NoError(t, err) + require.Nil(t, qres.Value) + + // Test store2 data. + // Since we are using the request as a reference, the path will be modified. + query.Data = k2 + query.Path = "/store2/key" + qres, err = multi.Query(&query) + require.NoError(t, err) + require.Equal(t, v2, qres.Value) +} + +func TestMultiStore_Pruning(t *testing.T) { + testCases := []struct { + name string + numVersions int64 + po pruningtypes.PruningOptions + deleted []int64 + saved []int64 + }{ + {"prune nothing", 10, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), nil, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, + {"prune everything", 12, pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), []int64{1, 2, 3, 4, 5, 6, 7}, []int64{8, 9, 10, 11, 12}}, + {"prune some; no batch", 10, pruningtypes.NewCustomPruningOptions(2, 1), []int64{1, 2, 3, 4, 6, 5, 7}, []int64{8, 9, 10}}, + {"prune some; small batch", 10, pruningtypes.NewCustomPruningOptions(2, 3), []int64{1, 2, 3, 4, 5, 6}, []int64{7, 8, 9, 10}}, + {"prune some; large batch", 10, pruningtypes.NewCustomPruningOptions(2, 11), nil, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + db := dbm.NewMemDB() + ms := newMultiStoreWithMounts(db, tc.po) + require.NoError(t, ms.LoadLatestVersion()) + + for i := int64(0); i < tc.numVersions; i++ { + ms.Commit() + } + + for _, v := range tc.saved { + _, err := ms.CacheMultiStoreWithVersion(v) + require.NoError(t, err, "expected no error when loading height: %d", v) + } + + for _, v := range tc.deleted { + _, err := ms.CacheMultiStoreWithVersion(v) + require.Error(t, err, "expected error when loading height: %d", v) + } + }) + } +} + +func TestMultiStore_Pruning_SameHeightsTwice(t *testing.T) { + const ( + numVersions int64 = 10 + keepRecent uint64 = 2 + interval uint64 = 10 + ) + + db := dbm.NewMemDB() + + ms := newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(keepRecent, interval)) + require.NoError(t, ms.LoadLatestVersion()) + + var lastCommitInfo types.CommitID + for i := int64(0); i < numVersions; i++ { + lastCommitInfo = ms.Commit() + } + + require.Equal(t, numVersions, lastCommitInfo.Version) + + for v := int64(1); v < numVersions-int64(keepRecent); v++ { + err := ms.LoadVersion(v) + require.Error(t, err, "expected error when loading pruned height: %d", v) + } + + for v := (numVersions - int64(keepRecent)); v < numVersions; v++ { + err := ms.LoadVersion(v) + require.NoError(t, err, "expected no error when loading height: %d", v) + } + + // Get latest + err := ms.LoadVersion(numVersions - 1) + require.NoError(t, err) + + // Ensure already pruned snapshot heights were loaded + require.NoError(t, ms.pruningManager.LoadSnapshotHeights(db)) + + // Test pruning the same heights again + lastCommitInfo = ms.Commit() + require.Equal(t, numVersions, lastCommitInfo.Version) + + // Ensure that can commit one more height with no panic + lastCommitInfo = ms.Commit() + require.Equal(t, numVersions+1, lastCommitInfo.Version) +} + +func TestMultiStore_PruningRestart(t *testing.T) { + db := dbm.NewMemDB() + ms := newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(2, 11)) + require.NoError(t, ms.LoadLatestVersion()) + + // Commit enough to build up heights to prune, where on the next block we should + // batch delete. + for i := int64(0); i < 10; i++ { + ms.Commit() + } + + actualHeightToPrune := ms.pruningManager.GetPruningHeight(ms.LatestVersion()) + require.Equal(t, int64(0), actualHeightToPrune) + + // "restart" + ms = newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(2, 11)) + err := ms.LoadLatestVersion() + require.NoError(t, err) + + actualHeightToPrune = ms.pruningManager.GetPruningHeight(ms.LatestVersion()) + require.Equal(t, int64(0), actualHeightToPrune) + + // commit one more block and ensure the heights have been pruned + ms.Commit() + + actualHeightToPrune = ms.pruningManager.GetPruningHeight(ms.LatestVersion()) + require.Equal(t, int64(8), actualHeightToPrune) + + for v := int64(1); v <= actualHeightToPrune; v++ { + _, err := ms.CacheMultiStoreWithVersion(v) + require.Error(t, err, "expected error when loading height: %d", v) + } +} + +// TestUnevenStoresHeightCheck tests if loading root store correctly errors when +// there's any module store with the wrong height +func TestUnevenStoresHeightCheck(t *testing.T) { + var db dbm.DB = dbm.NewMemDB() + store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + err := store.LoadLatestVersion() + require.Nil(t, err) + + // commit to increment store's height + store.Commit() + + // mount store4 to root store + store.MountStoreWithDB(types.NewKVStoreKey("store4"), types.StoreTypeIAVL, nil) + + // load the stores without upgrades + err = store.LoadLatestVersion() + require.Error(t, err) + + // now, let's load with upgrades... + upgrades := &types.StoreUpgrades{ + Added: []string{"store4"}, + } + err = store.LoadLatestVersionAndUpgrade(upgrades) + require.Nil(t, err) +} + +func TestSetInitialVersion(t *testing.T) { + db := dbm.NewMemDB() + multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + + require.NoError(t, multi.LoadLatestVersion()) + + err := multi.SetInitialVersion(5) + require.NoError(t, err) + require.Equal(t, int64(5), multi.initialVersion) + + multi.Commit() + require.Equal(t, int64(5), multi.LastCommitID().Version) + + ckvs := multi.GetCommitKVStore(multi.keysByName["store1"]) + iavlStore, ok := ckvs.(*iavl.Store) + require.True(t, ok) + require.True(t, iavlStore.VersionExists(5)) +} + +func TestAddListenersAndListeningEnabled(t *testing.T) { + db := dbm.NewMemDB() + multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + testKey := types.NewKVStoreKey("listening_test_key") + enabled := multi.ListeningEnabled(testKey) + require.False(t, enabled) + + wrongTestKey := types.NewKVStoreKey("wrong_listening_test_key") + multi.AddListeners([]types.StoreKey{testKey}) + enabled = multi.ListeningEnabled(wrongTestKey) + require.False(t, enabled) + + enabled = multi.ListeningEnabled(testKey) + require.True(t, enabled) +} + +func TestCacheWraps(t *testing.T) { + db := dbm.NewMemDB() + multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + + cacheWrapper := multi.CacheWrap() + require.IsType(t, cachemulti.Store{}, cacheWrapper) + + cacheWrappedWithTrace := multi.CacheWrapWithTrace(nil, nil) + require.IsType(t, cachemulti.Store{}, cacheWrappedWithTrace) +} + +func TestTraceConcurrency(t *testing.T) { + db := dbm.NewMemDB() + multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + err := multi.LoadLatestVersion() + require.NoError(t, err) + + b := &bytes.Buffer{} + key := multi.keysByName["store1"] + tc := types.TraceContext(map[string]interface{}{"blockHeight": 64}) + + multi.SetTracer(b) + multi.SetTracingContext(tc) + + cms := multi.CacheMultiStore() + store1 := cms.GetKVStore(key) + cw := store1.CacheWrapWithTrace(b, tc) + _ = cw + require.NotNil(t, store1) + + stop := make(chan struct{}) + stopW := make(chan struct{}) + + go func(stop chan struct{}) { + for { + select { + case <-stop: + return + default: + store1.Set([]byte{1}, []byte{1}) + cms.Write() + } + } + }(stop) + + go func(stop chan struct{}) { + for { + select { + case <-stop: + return + default: + multi.SetTracingContext(tc) + } + } + }(stopW) + + time.Sleep(3 * time.Second) + stop <- struct{}{} + stopW <- struct{}{} +} + +func TestCommitOrdered(t *testing.T) { + var db dbm.DB = dbm.NewMemDB() + multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + err := multi.LoadLatestVersion() + require.Nil(t, err) + + emptyHash := sha256.Sum256([]byte{}) + appHash := emptyHash[:] + commitID := types.CommitID{Hash: appHash} + checkStore(t, multi, commitID, commitID) + + k, v := []byte("wind"), []byte("blows") + k2, v2 := []byte("water"), []byte("flows") + k3, v3 := []byte("fire"), []byte("burns") + + store1 := multi.GetStoreByName("store1").(types.KVStore) + store1.Set(k, v) + + store2 := multi.GetStoreByName("store2").(types.KVStore) + store2.Set(k2, v2) + + store3 := multi.GetStoreByName("store3").(types.KVStore) + store3.Set(k3, v3) + + typeID := multi.Commit() + require.Equal(t, int64(1), typeID.Version) + + ci, err := multi.GetCommitInfo(1) + require.NoError(t, err) + require.Equal(t, int64(1), ci.Version) + require.Equal(t, 3, len(ci.StoreInfos)) + for i, s := range ci.StoreInfos { + require.Equal(t, s.Name, fmt.Sprintf("store%d", i+1)) + } +} + +//----------------------------------------------------------------------- +// utils + +var ( + testStoreKey1 = types.NewKVStoreKey("store1") + testStoreKey2 = types.NewKVStoreKey("store2") + testStoreKey3 = types.NewKVStoreKey("store3") +) + +func newMultiStoreWithMounts(db dbm.DB, pruningOpts pruningtypes.PruningOptions) *Store { + store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) + store.SetPruning(pruningOpts) + + store.MountStoreWithDB(testStoreKey1, types.StoreTypeIAVL, nil) + store.MountStoreWithDB(testStoreKey2, types.StoreTypeIAVL, nil) + store.MountStoreWithDB(testStoreKey3, types.StoreTypeIAVL, nil) + + return store +} + +func newMultiStoreWithModifiedMounts(db dbm.DB, pruningOpts pruningtypes.PruningOptions) (*Store, *types.StoreUpgrades) { + store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) + store.SetPruning(pruningOpts) + + store.MountStoreWithDB(types.NewKVStoreKey("store1"), types.StoreTypeIAVL, nil) + store.MountStoreWithDB(types.NewKVStoreKey("restore2"), types.StoreTypeIAVL, nil) + store.MountStoreWithDB(types.NewKVStoreKey("store3"), types.StoreTypeIAVL, nil) + store.MountStoreWithDB(types.NewKVStoreKey("store4"), types.StoreTypeIAVL, nil) + + upgrades := &types.StoreUpgrades{ + Added: []string{"store4"}, + Renamed: []types.StoreRename{{ + OldKey: "store2", + NewKey: "restore2", + }}, + Deleted: []string{"store3"}, + } + + return store, upgrades +} + +func unmountStore(rootStore *Store, storeKeyName string) { + sk := rootStore.keysByName[storeKeyName] + delete(rootStore.stores, sk) + delete(rootStore.storesParams, sk) + delete(rootStore.keysByName, storeKeyName) +} + +func checkStore(t *testing.T, store *Store, expect, got types.CommitID) { + t.Helper() + require.Equal(t, expect, got) + require.Equal(t, expect, store.LastCommitID()) +} + +func checkContains(tb testing.TB, info []types.StoreInfo, wanted []string) { + tb.Helper() + + for _, want := range wanted { + checkHas(tb, info, want) + } +} + +func checkHas(tb testing.TB, info []types.StoreInfo, want string) { + tb.Helper() + for _, i := range info { + if i.Name == want { + return + } + } + tb.Fatalf("storeInfo doesn't contain %s", want) +} + +func getExpectedCommitID(store *Store, ver int64) types.CommitID { + return types.CommitID{ + Version: ver, + Hash: hashStores(store.stores), + } +} + +func hashStores(stores map[types.StoreKey]types.CommitKVStore) []byte { + m := make(map[string][]byte, len(stores)) + for key, store := range stores { + name := key.Name() + m[name] = types.StoreInfo{ + Name: name, + CommitId: store.LastCommitID(), + }.GetHash() + } + return sdkmaps.HashFromMap(m) +} + +type MockListener struct { + stateCache []types.StoreKVPair +} + +func (tl *MockListener) OnWrite(storeKey types.StoreKey, key, value []byte, delete bool) error { + tl.stateCache = append(tl.stateCache, types.StoreKVPair{ + StoreKey: storeKey.Name(), + Key: key, + Value: value, + Delete: delete, + }) + return nil +} + +func TestStateListeners(t *testing.T) { + var db dbm.DB = dbm.NewMemDB() + ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + require.Empty(t, ms.listeners) + + ms.AddListeners([]types.StoreKey{testStoreKey1}) + require.Equal(t, 1, len(ms.listeners)) + + require.NoError(t, ms.LoadLatestVersion()) + cacheMulti := ms.CacheMultiStore() + + store := cacheMulti.GetKVStore(testStoreKey1) + store.Set([]byte{1}, []byte{1}) + require.Empty(t, ms.PopStateCache()) + + // writes are observed when cache store commit. + cacheMulti.Write() + require.Equal(t, 1, len(ms.PopStateCache())) + + // test no listening on unobserved store + store = cacheMulti.GetKVStore(testStoreKey2) + store.Set([]byte{1}, []byte{1}) + require.Empty(t, ms.PopStateCache()) + + // writes are not observed when cache store commit + cacheMulti.Write() + require.Empty(t, ms.PopStateCache()) +} + +type commitKVStoreStub struct { + types.CommitKVStore + Committed int +} + +func (stub *commitKVStoreStub) Commit() types.CommitID { + commitID := stub.CommitKVStore.Commit() + stub.Committed++ + return commitID +} + +func prepareStoreMap() (map[types.StoreKey]types.CommitKVStore, error) { + var db dbm.DB = dbm.NewMemDB() + store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) + store.MountStoreWithDB(types.NewKVStoreKey("iavl1"), types.StoreTypeIAVL, nil) + store.MountStoreWithDB(types.NewKVStoreKey("iavl2"), types.StoreTypeIAVL, nil) + store.MountStoreWithDB(types.NewTransientStoreKey("trans1"), types.StoreTypeTransient, nil) + if err := store.LoadLatestVersion(); err != nil { + return nil, err + } + return map[types.StoreKey]types.CommitKVStore{ + testStoreKey1: &commitKVStoreStub{ + CommitKVStore: store.GetStoreByName("iavl1").(types.CommitKVStore), + }, + testStoreKey2: &commitKVStoreStub{ + CommitKVStore: store.GetStoreByName("iavl2").(types.CommitKVStore), + }, + testStoreKey3: &commitKVStoreStub{ + CommitKVStore: store.GetStoreByName("trans1").(types.CommitKVStore), + }, + }, nil +} + +func TestCommitStores(t *testing.T) { + testCases := []struct { + name string + committed int + exptectCommit int + }{ + { + "when upgrade not get interrupted", + 0, + 1, + }, + { + "when upgrade get interrupted once", + 1, + 0, + }, + { + "when upgrade get interrupted twice", + 2, + 0, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + storeMap, err := prepareStoreMap() + require.NoError(t, err) + store := storeMap[testStoreKey1].(*commitKVStoreStub) + for i := tc.committed; i > 0; i-- { + store.Commit() + } + store.Committed = 0 + var version int64 = 1 + removalMap := map[types.StoreKey]bool{} + res := commitStores(version, storeMap, removalMap) + for _, s := range res.StoreInfos { + require.Equal(t, version, s.CommitId.Version) + } + require.Equal(t, version, res.Version) + require.Equal(t, tc.exptectCommit, store.Committed) + }) + } +} diff --git a/cosmos-sdk-store/snapshots/README.md b/cosmos-sdk-store/snapshots/README.md new file mode 100755 index 000000000..6de723246 --- /dev/null +++ b/cosmos-sdk-store/snapshots/README.md @@ -0,0 +1,284 @@ +# State Sync Snapshotting + +The `snapshots` package implements automatic support for CometBFT state sync +in Cosmos SDK-based applications. State sync allows a new node joining a network +to simply fetch a recent snapshot of the application state instead of fetching +and applying all historical blocks. This can reduce the time needed to join the +network by several orders of magnitude (e.g. weeks to minutes), but the node +will not contain historical data from previous heights. + +This document describes the Cosmos SDK implementation of the ABCI state sync +interface, for more information on CometBFT state sync in general see: + +* [CometBFT State Sync for Developers](https://medium.com/cometbft/cometbft-core-state-sync-for-developers-70a96ba3ee35) +* [ABCI State Sync Spec](https://docs.cometbft.com/v0.37/spec/p2p/messages/state-sync) +* [ABCI State Sync Method/Type Reference](https://docs.cometbft.com/v0.37/spec/p2p/messages/state-sync) + +## Overview + +For an overview of how Cosmos SDK state sync is set up and configured by +developers and end-users, see the +[Cosmos SDK State Sync Guide](https://blog.cosmos.network/cosmos-sdk-state-sync-guide-99e4cf43be2f). + +Briefly, the Cosmos SDK takes state snapshots at regular height intervals given +by `state-sync.snapshot-interval` and stores them as binary files in the +filesystem under `/data/snapshots/`, with metadata in a LevelDB database +`/data/snapshots/metadata.db`. The number of recent snapshots to keep are given by +`state-sync.snapshot-keep-recent`. + +Snapshots are taken asynchronously, i.e. new blocks will be applied concurrently +with snapshots being taken. This is possible because IAVL supports querying +immutable historical heights. However, this requires heights that are multiples of `state-sync.snapshot-interval` +to be kept until after the snapshot is complete. It is done to prevent a height from being removed +while it is being snapshotted. + +When a remote node is state syncing, CometBFT calls the ABCI method +`ListSnapshots` to list available local snapshots and `LoadSnapshotChunk` to +load a binary snapshot chunk. When the local node is being state synced, +CometBFT calls `OfferSnapshot` to offer a discovered remote snapshot to the +local application and `ApplySnapshotChunk` to apply a binary snapshot chunk to +the local application. See the resources linked above for more details on these +methods and how CometBFT performs state sync. + +The Cosmos SDK does not currently do any incremental verification of snapshots +during restoration, i.e. only after the entire snapshot has been restored will +CometBFT compare the app hash against the trusted hash from the chain. Cosmos +SDK snapshots and chunks do contain hashes as checksums to guard against IO +corruption and non-determinism, but these are not tied to the chain state and +can be trivially forged by an adversary. This was considered out of scope for +the initial implementation, but can be added later without changes to the +ABCI state sync protocol. + +## Relationship to Pruning + +Snapshot settings are optional. However, if set, they have an effect on how pruning is done by +persisting the heights that are multiples of `state-sync.snapshot-interval` until after the snapshot is complete. + +If pruning is enabled (not `pruning = "nothing"`), we avoid pruning heights that are multiples of +`state-sync.snapshot-interval` in the regular logic determined by the +pruning settings and applied after every `Commit()`. This is done to prevent a +height from being removed before a snapshot is complete. Therefore, we keep +such heights until after a snapshot is done. At this point, the height is sent to +the `pruning.Manager` to be pruned according to the pruning settings after the next `Commit()`. + +To illustrate, assume that we are currently at height 960 with `pruning-keep-recent = 50`, +`pruning-interval = 10`, and `state-sync.snapshot-interval = 100`. Let's assume that +the snapshot that was triggered at height `900` **just finishes**. Then, we can prune height +`900` right away (that is, when we call `Commit()` at height 960 because 900 is less than `960 - 50 = 910`). + +Let's now assume that all conditions stay the same but the snapshot at height 900 is **not complete yet**. +Then, we cannot prune it to avoid deleting a height that is still being snapshotted. Therefore, we keep track +of this height until the snapshot is complete. The height 900 will be pruned at the first height h that satisfied the following conditions: + +* the snapshot is complete +* h is a multiple of `pruning-interval` +* snapshot height is less than h - `pruning-keep-recent` + +Note that in both examples, if we let current height = C, and previous height P = C - 1, then for every height h that is: + +P - `pruning-keep-recent` - `pruning-interval` <= h <= P - `pruning-keep-recent` + +we can prune height h. In our first example, all heights 899 - 909 fall in this range and are pruned at height 960 as long as +h is not a snapshot height (E.g. 900). + +That is, we always use current height to determine at which height to prune (960) while we use previous +to determine which heights are to be pruned (959 - 50 - 10 = 899-909 = 959 - 50). + +## Configuration + +* `state-sync.snapshot-interval` + * the interval at which to take snapshots. + * the value of 0 disables snapshots. + * if pruning is enabled, it is done after a snapshot is complete for the heights that are multiples of this interval. + +* `state-sync.snapshot-keep-recent`: + * the number of recent snapshots to keep. + * 0 means keep all. + +## Snapshot Metadata + +The ABCI Protobuf type for a snapshot is listed below (refer to the ABCI spec +for field details): + +```protobuf +message Snapshot { + uint64 height = 1; // The height at which the snapshot was taken + uint32 format = 2; // The application-specific snapshot format + uint32 chunks = 3; // Number of chunks in the snapshot + bytes hash = 4; // Arbitrary snapshot hash, equal only if identical + bytes metadata = 5; // Arbitrary application metadata +} +``` + +Because the `metadata` field is application-specific, the Cosmos SDK uses a +similar type `cosmos.base.snapshots.v1beta1.Snapshot` with its own metadata +representation: + +```protobuf +// Snapshot contains CometBFT state sync snapshot info. +message Snapshot { + uint64 height = 1; + uint32 format = 2; + uint32 chunks = 3; + bytes hash = 4; + Metadata metadata = 5 [(gogoproto.nullable) = false]; +} + +// Metadata contains SDK-specific snapshot metadata. +message Metadata { + repeated bytes chunk_hashes = 1; // SHA-256 chunk hashes +} +``` + +The `format` is currently `1`, defined in `snapshots.types.CurrentFormat`. This +must be increased whenever the binary snapshot format changes, and it may be +useful to support past formats in newer versions. + +The `hash` is a SHA-256 hash of the entire binary snapshot, used to guard +against IO corruption and non-determinism across nodes. Note that this is not +tied to the chain state, and can be trivially forged (but CometBFT will always +compare the final app hash against the chain app hash). Similarly, the +`chunk_hashes` are SHA-256 checksums of each binary chunk. + +The `metadata` field is Protobuf-serialized before it is placed into the ABCI +snapshot. + +## Snapshot Format + +The current version `1` snapshot format is a zlib-compressed, length-prefixed +Protobuf stream of `cosmos.base.store.v1beta1.SnapshotItem` messages, split into +chunks at exact 10 MB byte boundaries. + +```protobuf +// SnapshotItem is an item contained in a rootmulti.Store snapshot. +message SnapshotItem { + // item is the specific type of snapshot item. + oneof item { + SnapshotStoreItem store = 1; + SnapshotIAVLItem iavl = 2 [(gogoproto.customname) = "IAVL"]; + } +} + +// SnapshotStoreItem contains metadata about a snapshotted store. +message SnapshotStoreItem { + string name = 1; +} + +// SnapshotIAVLItem is an exported IAVL node. +message SnapshotIAVLItem { + bytes key = 1; + bytes value = 2; + int64 version = 3; + int32 height = 4; +} +``` + +Snapshots are generated by `rootmulti.Store.Snapshot()` as follows: + +1. Set up a `protoio.NewDelimitedWriter` that writes length-prefixed serialized + `SnapshotItem` Protobuf messages. + 1. Iterate over each IAVL store in lexicographical order by store name. + 2. Emit a `SnapshotStoreItem` containing the store name. + 3. Start an IAVL export for the store using + [`iavl.ImmutableTree.Export()`](https://pkg.go.dev/github.com/cosmos/iavl#ImmutableTree.Export). + 4. Iterate over each IAVL node. + 5. Emit a `SnapshotIAVLItem` for the IAVL node. +2. Pass the serialized Protobuf output stream to a zlib compression writer. +3. Split the zlib output stream into chunks at exactly every 10th megabyte. + +Snapshots are restored via `rootmulti.Store.Restore()` as the inverse of the above, using +[`iavl.MutableTree.Import()`](https://pkg.go.dev/github.com/cosmos/iavl#MutableTree.Import) +to reconstruct each IAVL tree. + +## Snapshot Storage + +Snapshot storage is managed by `snapshots.Store`, with metadata in a `db.DB` +database and binary chunks in the filesystem. Note that this is only used to +store locally taken snapshots that are being offered to other nodes. When the +local node is being state synced, CometBFT will take care of buffering and +storing incoming snapshot chunks before they are applied to the application. + +Metadata is generally stored in a LevelDB database at +`/data/snapshots/metadata.db`. It contains serialized +`cosmos.base.snapshots.v1beta1.Snapshot` Protobuf messages with a key given by +the concatenation of a key prefix, the big-endian height, and the big-endian +format. Chunk data is stored as regular files under +`/data/snapshots///`. + +The `snapshots.Store` API is based on streaming IO, and integrates easily with +the `snapshots.types.Snapshotter` snapshot/restore interface implemented by +`rootmulti.Store`. The `Store.Save()` method stores a snapshot given as a +`<- chan io.ReadCloser` channel of binary chunk streams, and `Store.Load()` loads +the snapshot as a channel of binary chunk streams -- the same stream types used +by `Snapshotter.Snapshot()` and `Snapshotter.Restore()` to take and restore +snapshots using streaming IO. + +The store also provides many other methods such as `List()` to list stored +snapshots, `LoadChunk()` to load a single snapshot chunk, and `Prune()` to prune +old snapshots. + +## Taking Snapshots + +`snapshots.Manager` is a high-level snapshot manager that integrates a +`snapshots.types.Snapshotter` (i.e. the `rootmulti.Store` snapshot +functionality) and a `snapshots.Store`, providing an API that maps easily onto +the ABCI state sync API. The `Manager` will also make sure only one operation +is in progress at a time, e.g. to prevent multiple snapshots being taken +concurrently. + +During `BaseApp.Commit`, once a state transition has been committed, the height +is checked against the `state-sync.snapshot-interval` setting. If the committed +height should be snapshotted, a goroutine `BaseApp.snapshot()` is spawned that +calls `snapshots.Manager.Create()` to create the snapshot. Once a snapshot is +complete and if pruning is enabled, the snapshot height is pruned away by the manager +with the call `PruneSnapshotHeight(...)` to the `snapshots.types.Snapshotter`. + +`Manager.Create()` will do some basic pre-flight checks, and then start +generating a snapshot by calling `rootmulti.Store.Snapshot()`. The chunk stream +is passed into `snapshots.Store.Save()`, which stores the chunks in the +filesystem and records the snapshot metadata in the snapshot database. + +Once the snapshot has been generated, `BaseApp.snapshot()` then removes any +old snapshots based on the `state-sync.snapshot-keep-recent` setting. + +## Serving Snapshots + +When a remote node is discovering snapshots for state sync, CometBFT will +call the `ListSnapshots` ABCI method to list the snapshots present on the +local node. This is dispatched to `snapshots.Manager.List()`, which in turn +dispatches to `snapshots.Store.List()`. + +When a remote node is fetching snapshot chunks during state sync, CometBFT +will call the `LoadSnapshotChunk` ABCI method to fetch a chunk from the local +node. This dispatches to `snapshots.Manager.LoadChunk()`, which in turn +dispatches to `snapshots.Store.LoadChunk()`. + +## Restoring Snapshots + +When the operator has configured the local CometBFT node to run state sync +(see the resources listed in the introduction for details on CometBFT state +sync), it will discover snapshots across the P2P network and offer their +metadata in turn to the local application via the `OfferSnapshot` ABCI call. + +`BaseApp.OfferSnapshot()` attempts to start a restore operation by calling +`snapshots.Manager.Restore()`. This may fail, e.g. if the snapshot format is +unknown (it may have been generated by a different version of the Cosmos SDK), +in which case CometBFT will offer other discovered snapshots. + +If the snapshot is accepted, `Manager.Restore()` will record that a restore +operation is in progress, and spawn a separate goroutine that runs a synchronous +`rootmulti.Store.Restore()` snapshot restoration which will be fed snapshot +chunks until it is complete. + +CometBFT will then start fetching and buffering chunks, providing them in +order via ABCI `ApplySnapshotChunk` calls. These dispatch to +`Manager.RestoreChunk()`, which passes the chunks to the ongoing restore +process, checking if errors have been encountered yet (e.g. due to checksum +mismatches or invalid IAVL data). Once the final chunk is passed, +`Manager.RestoreChunk()` will wait for the restore process to complete before +returning. + +Once the restore is completed, CometBFT will go on to call the `Info` ABCI +call to fetch the app hash, and compare this against the trusted chain app +hash at the snapshot height to verify the restored state. If it matches, +CometBFT goes on to process blocks. diff --git a/cosmos-sdk-store/snapshots/chunk.go b/cosmos-sdk-store/snapshots/chunk.go new file mode 100755 index 000000000..fdf8cbd4b --- /dev/null +++ b/cosmos-sdk-store/snapshots/chunk.go @@ -0,0 +1,185 @@ +package snapshots + +import ( + "io" + "math" + + "cosmossdk.io/errors" + snapshottypes "cosmossdk.io/store/snapshots/types" + storetypes "cosmossdk.io/store/types" +) + +// ChunkWriter reads an input stream, splits it into fixed-size chunks, and writes them to a +// sequence of io.ReadClosers via a channel. +type ChunkWriter struct { + ch chan<- io.ReadCloser + pipe *io.PipeWriter + chunkSize uint64 + written uint64 + closed bool +} + +// NewChunkWriter creates a new ChunkWriter. If chunkSize is 0, no chunking will be done. +func NewChunkWriter(ch chan<- io.ReadCloser, chunkSize uint64) *ChunkWriter { + return &ChunkWriter{ + ch: ch, + chunkSize: chunkSize, + } +} + +// chunk creates a new chunk. +func (w *ChunkWriter) chunk() error { + if w.pipe != nil { + err := w.pipe.Close() + if err != nil { + return err + } + } + pr, pw := io.Pipe() + w.ch <- pr + w.pipe = pw + w.written = 0 + return nil +} + +// Close implements io.Closer. +func (w *ChunkWriter) Close() error { + if !w.closed { + w.closed = true + close(w.ch) + var err error + if w.pipe != nil { + err = w.pipe.Close() + } + return err + } + return nil +} + +// CloseWithError closes the writer and sends an error to the reader. +func (w *ChunkWriter) CloseWithError(err error) { + if !w.closed { + if w.pipe == nil { + // create a dummy pipe just to propagate the error to the reader, it always returns nil + _ = w.chunk() + } + w.closed = true + close(w.ch) + _ = w.pipe.CloseWithError(err) // CloseWithError always returns nil + } +} + +// Write implements io.Writer. +func (w *ChunkWriter) Write(data []byte) (int, error) { + if w.closed { + return 0, errors.Wrap(storetypes.ErrLogic, "cannot write to closed ChunkWriter") + } + nTotal := 0 + for len(data) > 0 { + if w.pipe == nil || (w.written >= w.chunkSize && w.chunkSize > 0) { + err := w.chunk() + if err != nil { + return nTotal, err + } + } + + var writeSize uint64 + if w.chunkSize == 0 { + writeSize = uint64(len(data)) + } else { + writeSize = w.chunkSize - w.written + } + if writeSize > uint64(len(data)) { + writeSize = uint64(len(data)) + } + + n, err := w.pipe.Write(data[:writeSize]) + w.written += uint64(n) + nTotal += n + if err != nil { + return nTotal, err + } + data = data[writeSize:] + } + return nTotal, nil +} + +// ChunkReader reads chunks from a channel of io.ReadClosers and outputs them as an io.Reader +type ChunkReader struct { + ch <-chan io.ReadCloser + reader io.ReadCloser +} + +// NewChunkReader creates a new ChunkReader. +func NewChunkReader(ch <-chan io.ReadCloser) *ChunkReader { + return &ChunkReader{ch: ch} +} + +// next fetches the next chunk from the channel, or returns io.EOF if there are no more chunks. +func (r *ChunkReader) next() error { + reader, ok := <-r.ch + if !ok { + return io.EOF + } + r.reader = reader + return nil +} + +// Close implements io.ReadCloser. +func (r *ChunkReader) Close() error { + var err error + if r.reader != nil { + err = r.reader.Close() + r.reader = nil + } + for reader := range r.ch { + if e := reader.Close(); e != nil && err == nil { + err = e + } + } + return err +} + +// Read implements io.Reader. +func (r *ChunkReader) Read(p []byte) (int, error) { + if r.reader == nil { + err := r.next() + if err != nil { + return 0, err + } + } + n, err := r.reader.Read(p) + if err == io.EOF { + err = r.reader.Close() + r.reader = nil + if err != nil { + return 0, err + } + return r.Read(p) + } + return n, err +} + +// DrainChunks drains and closes all remaining chunks from a chunk channel. +func DrainChunks(chunks <-chan io.ReadCloser) { + for chunk := range chunks { + _ = chunk.Close() + } +} + +// ValidRestoreHeight will check height is valid for snapshot restore or not +func ValidRestoreHeight(format uint32, height uint64) error { + if format != snapshottypes.CurrentFormat { + return errors.Wrapf(snapshottypes.ErrUnknownFormat, "format %v", format) + } + + if height == 0 { + return errors.Wrap(storetypes.ErrLogic, "cannot restore snapshot at height 0") + } + if height > uint64(math.MaxInt64) { + return errors.Wrapf(snapshottypes.ErrInvalidMetadata, + "snapshot height %v cannot exceed %v", height, int64(math.MaxInt64)) + } + + return nil +} diff --git a/cosmos-sdk-store/snapshots/chunk_test.go b/cosmos-sdk-store/snapshots/chunk_test.go new file mode 100755 index 000000000..df524cdf3 --- /dev/null +++ b/cosmos-sdk-store/snapshots/chunk_test.go @@ -0,0 +1,164 @@ +package snapshots_test + +import ( + "bytes" + "errors" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "cosmossdk.io/store/snapshots" +) + +func TestChunkWriter(t *testing.T) { + ch := make(chan io.ReadCloser, 100) + go func() { + chunkWriter := snapshots.NewChunkWriter(ch, 2) + + n, err := chunkWriter.Write([]byte{1, 2, 3}) + require.NoError(t, err) + assert.Equal(t, 3, n) + + n, err = chunkWriter.Write([]byte{4, 5, 6}) + require.NoError(t, err) + assert.Equal(t, 3, n) + + n, err = chunkWriter.Write([]byte{7, 8, 9}) + require.NoError(t, err) + assert.Equal(t, 3, n) + + err = chunkWriter.Close() + require.NoError(t, err) + + // closed writer should error + _, err = chunkWriter.Write([]byte{10}) + require.Error(t, err) + + // closing again should be fine + err = chunkWriter.Close() + require.NoError(t, err) + }() + + assert.Equal(t, [][]byte{{1, 2}, {3, 4}, {5, 6}, {7, 8}, {9}}, readChunks(ch)) + + // 0-sized chunks should return the whole body as one chunk + ch = make(chan io.ReadCloser, 100) + go func() { + chunkWriter := snapshots.NewChunkWriter(ch, 0) + _, err := chunkWriter.Write([]byte{1, 2, 3}) + require.NoError(t, err) + _, err = chunkWriter.Write([]byte{4, 5, 6}) + require.NoError(t, err) + err = chunkWriter.Close() + require.NoError(t, err) + }() + assert.Equal(t, [][]byte{{1, 2, 3, 4, 5, 6}}, readChunks(ch)) + + // closing with error should return the error + theErr := errors.New("boom") + ch = make(chan io.ReadCloser, 100) + go func() { + chunkWriter := snapshots.NewChunkWriter(ch, 2) + _, err := chunkWriter.Write([]byte{1, 2, 3}) + require.NoError(t, err) + chunkWriter.CloseWithError(theErr) + }() + chunk, err := io.ReadAll(<-ch) + require.NoError(t, err) + assert.Equal(t, []byte{1, 2}, chunk) + _, err = io.ReadAll(<-ch) + require.Error(t, err) + assert.Equal(t, theErr, err) + assert.Empty(t, ch) + + // closing immediately should return no chunks + ch = make(chan io.ReadCloser, 100) + chunkWriter := snapshots.NewChunkWriter(ch, 2) + err = chunkWriter.Close() + require.NoError(t, err) + assert.Empty(t, ch) +} + +func TestChunkReader(t *testing.T) { + ch := makeChunks([][]byte{ + {1, 2, 3}, + {4}, + {}, + {5, 6}, + }) + chunkReader := snapshots.NewChunkReader(ch) + + buf := []byte{0, 0, 0, 0} + n, err := chunkReader.Read(buf) + require.NoError(t, err) + assert.Equal(t, 3, n) + assert.Equal(t, []byte{1, 2, 3, 0}, buf) + + buf = []byte{0, 0, 0, 0} + n, err = chunkReader.Read(buf) + require.NoError(t, err) + assert.Equal(t, 1, n) + assert.Equal(t, []byte{4, 0, 0, 0}, buf) + + buf = []byte{0, 0, 0, 0} + n, err = chunkReader.Read(buf) + require.NoError(t, err) + assert.Equal(t, 2, n) + assert.Equal(t, []byte{5, 6, 0, 0}, buf) + + buf = []byte{0, 0, 0, 0} + _, err = chunkReader.Read(buf) + require.Error(t, err) + assert.Equal(t, io.EOF, err) + + err = chunkReader.Close() + require.NoError(t, err) + + err = chunkReader.Close() // closing twice should be fine + require.NoError(t, err) + + // Empty channel should be fine + ch = makeChunks(nil) + chunkReader = snapshots.NewChunkReader(ch) + buf = make([]byte, 4) + _, err = chunkReader.Read(buf) + require.Error(t, err) + assert.Equal(t, io.EOF, err) + + // Using a pipe that closes with an error should return the error + theErr := errors.New("boom") + pr, pw := io.Pipe() + pch := make(chan io.ReadCloser, 1) + pch <- pr + _ = pw.CloseWithError(theErr) + + chunkReader = snapshots.NewChunkReader(pch) + buf = make([]byte, 4) + _, err = chunkReader.Read(buf) + require.Error(t, err) + assert.Equal(t, theErr, err) + + // Closing the reader should close the writer + pr, pw = io.Pipe() + pch = make(chan io.ReadCloser, 2) + pch <- io.NopCloser(bytes.NewBuffer([]byte{1, 2, 3})) + pch <- pr + close(pch) + + go func() { + chunkReader := snapshots.NewChunkReader(pch) + buf := []byte{0, 0, 0, 0} + _, err := chunkReader.Read(buf) + require.NoError(t, err) + assert.Equal(t, []byte{1, 2, 3, 0}, buf) + + err = chunkReader.Close() + require.NoError(t, err) + }() + + _, err = pw.Write([]byte{9, 9, 9}) + require.Error(t, err) + assert.Equal(t, err, io.ErrClosedPipe) +} diff --git a/cosmos-sdk-store/snapshots/helpers_test.go b/cosmos-sdk-store/snapshots/helpers_test.go new file mode 100755 index 000000000..d337b53ab --- /dev/null +++ b/cosmos-sdk-store/snapshots/helpers_test.go @@ -0,0 +1,337 @@ +package snapshots_test + +import ( + "bufio" + "bytes" + "compress/zlib" + "crypto/sha256" + "errors" + "io" + "os" + "testing" + "time" + + db "github.com/cosmos/cosmos-db" + protoio "github.com/cosmos/gogoproto/io" + "github.com/stretchr/testify/require" + + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/log" + "cosmossdk.io/store/snapshots" + snapshottypes "cosmossdk.io/store/snapshots/types" + "cosmossdk.io/store/types" +) + +func checksums(slice [][]byte) [][]byte { + hasher := sha256.New() + checksums := make([][]byte, len(slice)) + for i, chunk := range slice { + hasher.Write(chunk) + checksums[i] = hasher.Sum(nil) + hasher.Reset() + } + return checksums +} + +func hash(chunks [][]byte) []byte { + hasher := sha256.New() + for _, chunk := range chunks { + hasher.Write(chunk) + } + return hasher.Sum(nil) +} + +func makeChunks(chunks [][]byte) <-chan io.ReadCloser { + ch := make(chan io.ReadCloser, len(chunks)) + for _, chunk := range chunks { + ch <- io.NopCloser(bytes.NewReader(chunk)) + } + close(ch) + return ch +} + +func readChunks(chunks <-chan io.ReadCloser) [][]byte { + bodies := [][]byte{} + for chunk := range chunks { + body, err := io.ReadAll(chunk) + if err != nil { + panic(err) + } + bodies = append(bodies, body) + } + return bodies +} + +// snapshotItems serialize a array of bytes as SnapshotItem_ExtensionPayload, and return the chunks. +func snapshotItems(items [][]byte, ext snapshottypes.ExtensionSnapshotter) [][]byte { + // copy the same parameters from the code + snapshotChunkSize := uint64(10e6) + snapshotBufferSize := int(snapshotChunkSize) + + ch := make(chan io.ReadCloser) + go func() { + chunkWriter := snapshots.NewChunkWriter(ch, snapshotChunkSize) + bufWriter := bufio.NewWriterSize(chunkWriter, snapshotBufferSize) + zWriter, _ := zlib.NewWriterLevel(bufWriter, 7) + protoWriter := protoio.NewDelimitedWriter(zWriter) + for _, item := range items { + _ = snapshottypes.WriteExtensionPayload(protoWriter, item) + } + // write extension metadata + _ = protoWriter.WriteMsg(&snapshottypes.SnapshotItem{ + Item: &snapshottypes.SnapshotItem_Extension{ + Extension: &snapshottypes.SnapshotExtensionMeta{ + Name: ext.SnapshotName(), + Format: ext.SnapshotFormat(), + }, + }, + }) + _ = ext.SnapshotExtension(0, func(payload []byte) error { + return snapshottypes.WriteExtensionPayload(protoWriter, payload) + }) + _ = protoWriter.Close() + _ = bufWriter.Flush() + _ = chunkWriter.Close() + }() + + var chunks [][]byte + for chunkBody := range ch { + chunk, err := io.ReadAll(chunkBody) + if err != nil { + panic(err) + } + chunks = append(chunks, chunk) + } + + return chunks +} + +type mockSnapshotter struct { + items [][]byte + prunedHeights map[int64]struct{} + snapshotInterval uint64 +} + +func (m *mockSnapshotter) Restore( + height uint64, format uint32, protoReader protoio.Reader, +) (snapshottypes.SnapshotItem, error) { + if format == 0 { + return snapshottypes.SnapshotItem{}, snapshottypes.ErrUnknownFormat + } + if m.items != nil { + return snapshottypes.SnapshotItem{}, errors.New("already has contents") + } + + var item snapshottypes.SnapshotItem + m.items = [][]byte{} + for { + item.Reset() + err := protoReader.ReadMsg(&item) + if err == io.EOF { + break + } else if err != nil { + return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "invalid protobuf message") + } + payload := item.GetExtensionPayload() + if payload == nil { + break + } + m.items = append(m.items, payload.Payload) + } + + return item, nil +} + +func (m *mockSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error { + for _, item := range m.items { + if err := snapshottypes.WriteExtensionPayload(protoWriter, item); err != nil { + return err + } + } + return nil +} + +func (m *mockSnapshotter) SnapshotFormat() uint32 { + return snapshottypes.CurrentFormat +} + +func (m *mockSnapshotter) SupportedFormats() []uint32 { + return []uint32{snapshottypes.CurrentFormat} +} + +func (m *mockSnapshotter) PruneSnapshotHeight(height int64) { + m.prunedHeights[height] = struct{}{} +} + +func (m *mockSnapshotter) GetSnapshotInterval() uint64 { + return m.snapshotInterval +} + +func (m *mockSnapshotter) SetSnapshotInterval(snapshotInterval uint64) { + m.snapshotInterval = snapshotInterval +} + +type mockErrorSnapshotter struct{} + +var _ snapshottypes.Snapshotter = (*mockErrorSnapshotter)(nil) + +func (m *mockErrorSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error { + return errors.New("mock snapshot error") +} + +func (m *mockErrorSnapshotter) Restore( + height uint64, format uint32, protoReader protoio.Reader, +) (snapshottypes.SnapshotItem, error) { + return snapshottypes.SnapshotItem{}, errors.New("mock restore error") +} + +func (m *mockErrorSnapshotter) SnapshotFormat() uint32 { + return snapshottypes.CurrentFormat +} + +func (m *mockErrorSnapshotter) SupportedFormats() []uint32 { + return []uint32{snapshottypes.CurrentFormat} +} + +func (m *mockErrorSnapshotter) PruneSnapshotHeight(height int64) { +} + +func (m *mockErrorSnapshotter) GetSnapshotInterval() uint64 { + return 0 +} + +func (m *mockErrorSnapshotter) SetSnapshotInterval(snapshotInterval uint64) { +} + +// setupBusyManager creates a manager with an empty store that is busy creating a snapshot at height 1. +// The snapshot will complete when the returned closer is called. +func setupBusyManager(t *testing.T) *snapshots.Manager { + t.Helper() + store, err := snapshots.NewStore(db.NewMemDB(), t.TempDir()) + require.NoError(t, err) + hung := newHungSnapshotter() + hung.SetSnapshotInterval(opts.Interval) + mgr := snapshots.NewManager(store, opts, hung, nil, log.NewNopLogger()) + require.Equal(t, opts.Interval, hung.snapshotInterval) + + // Channel to ensure the test doesn't finish until the goroutine is done. + // Without this, there are intermittent test failures about + // the t.TempDir() cleanup failing due to the directory not being empty. + done := make(chan struct{}) + + go func() { + defer close(done) + _, err := mgr.Create(1) + require.NoError(t, err) + _, didPruneHeight := hung.prunedHeights[1] + require.True(t, didPruneHeight) + }() + time.Sleep(10 * time.Millisecond) + + t.Cleanup(func() { + <-done + }) + + t.Cleanup(hung.Close) + + return mgr +} + +// hungSnapshotter can be used to test operations in progress. Call close to end the snapshot. +type hungSnapshotter struct { + ch chan struct{} + prunedHeights map[int64]struct{} + snapshotInterval uint64 +} + +func newHungSnapshotter() *hungSnapshotter { + return &hungSnapshotter{ + ch: make(chan struct{}), + prunedHeights: make(map[int64]struct{}), + } +} + +func (m *hungSnapshotter) Close() { + close(m.ch) +} + +func (m *hungSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error { + <-m.ch + return nil +} + +func (m *hungSnapshotter) PruneSnapshotHeight(height int64) { + m.prunedHeights[height] = struct{}{} +} + +func (m *hungSnapshotter) SetSnapshotInterval(snapshotInterval uint64) { + m.snapshotInterval = snapshotInterval +} + +func (m *hungSnapshotter) Restore( + height uint64, format uint32, protoReader protoio.Reader, +) (snapshottypes.SnapshotItem, error) { + panic("not implemented") +} + +type extSnapshotter struct { + state []uint64 +} + +func newExtSnapshotter(count int) *extSnapshotter { + state := make([]uint64, 0, count) + for i := 0; i < count; i++ { + state = append(state, uint64(i)) + } + return &extSnapshotter{ + state, + } +} + +func (s *extSnapshotter) SnapshotName() string { + return "mock" +} + +func (s *extSnapshotter) SnapshotFormat() uint32 { + return 1 +} + +func (s *extSnapshotter) SupportedFormats() []uint32 { + return []uint32{1} +} + +func (s *extSnapshotter) SnapshotExtension(height uint64, payloadWriter snapshottypes.ExtensionPayloadWriter) error { + for _, i := range s.state { + if err := payloadWriter(types.Uint64ToBigEndian(i)); err != nil { + return err + } + } + return nil +} + +func (s *extSnapshotter) RestoreExtension(height uint64, format uint32, payloadReader snapshottypes.ExtensionPayloadReader) error { + for { + payload, err := payloadReader() + if err == io.EOF { + break + } else if err != nil { + return err + } + s.state = append(s.state, types.BigEndianToUint64(payload)) + } + // finalize restoration + return nil +} + +// GetTempDir returns a writable temporary director for the test to use. +func GetTempDir(tb testing.TB) string { + tb.Helper() + // os.MkDir() is used instead of testing.T.TempDir() + // see https://github.com/cosmos/cosmos-sdk/pull/8475 and + // https://github.com/cosmos/cosmos-sdk/pull/10341 for + // this change's rationale. + tempdir, err := os.MkdirTemp("", "") + require.NoError(tb, err) + tb.Cleanup(func() { _ = os.RemoveAll(tempdir) }) + return tempdir +} diff --git a/cosmos-sdk-store/snapshots/manager.go b/cosmos-sdk-store/snapshots/manager.go new file mode 100755 index 000000000..3bedcd3ae --- /dev/null +++ b/cosmos-sdk-store/snapshots/manager.go @@ -0,0 +1,558 @@ +package snapshots + +import ( + "bytes" + "crypto/sha256" + "errors" + "fmt" + "io" + "math" + "os" + "sort" + "sync" + + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/log" + "cosmossdk.io/store/snapshots/types" + storetypes "cosmossdk.io/store/types" +) + +// Manager manages snapshot and restore operations for an app, making sure only a single +// long-running operation is in progress at any given time, and provides convenience methods +// mirroring the ABCI interface. +// +// Although the ABCI interface (and this manager) passes chunks as byte slices, the internal +// snapshot/restore APIs use IO streams (i.e. chan io.ReadCloser), for two reasons: +// +// 1. In the future, ABCI should support streaming. Consider e.g. InitChain during chain +// upgrades, which currently passes the entire chain state as an in-memory byte slice. +// https://github.com/tendermint/tendermint/issues/5184 +// +// 2. io.ReadCloser streams automatically propagate IO errors, and can pass arbitrary +// errors via io.Pipe.CloseWithError(). +type Manager struct { + extensions map[string]types.ExtensionSnapshotter + // store is the snapshot store where all completed snapshots are persisted. + store *Store + opts types.SnapshotOptions + // multistore is the store from which snapshots are taken. + multistore types.Snapshotter + logger log.Logger + + mtx sync.Mutex + operation operation + chRestore chan<- uint32 + chRestoreDone <-chan restoreDone + restoreSnapshot *types.Snapshot + restoreChunkIndex uint32 +} + +// operation represents a Manager operation. Only one operation can be in progress at a time. +type operation string + +// restoreDone represents the result of a restore operation. +type restoreDone struct { + complete bool // if true, restore completed successfully (not prematurely) + err error // if non-nil, restore errored +} + +const ( + opNone operation = "" + opSnapshot operation = "snapshot" + opPrune operation = "prune" + opRestore operation = "restore" + + chunkBufferSize = 4 + chunkIDBufferSize = 1024 + + snapshotMaxItemSize = int(64e6) // SDK has no key/value size limit, so we set an arbitrary limit +) + +var ErrOptsZeroSnapshotInterval = errors.New("snaphot-interval must not be 0") + +// NewManager creates a new manager. +func NewManager(store *Store, opts types.SnapshotOptions, multistore types.Snapshotter, extensions map[string]types.ExtensionSnapshotter, logger log.Logger) *Manager { + if extensions == nil { + extensions = map[string]types.ExtensionSnapshotter{} + } + return &Manager{ + store: store, + opts: opts, + multistore: multistore, + extensions: extensions, + logger: logger, + } +} + +// RegisterExtensions register extension snapshotters to manager +func (m *Manager) RegisterExtensions(extensions ...types.ExtensionSnapshotter) error { + if m.extensions == nil { + m.extensions = make(map[string]types.ExtensionSnapshotter, len(extensions)) + } + for _, extension := range extensions { + name := extension.SnapshotName() + if _, ok := m.extensions[name]; ok { + return fmt.Errorf("duplicated snapshotter name: %s", name) + } + if !IsFormatSupported(extension, extension.SnapshotFormat()) { + return fmt.Errorf("snapshotter don't support it's own snapshot format: %s %d", name, extension.SnapshotFormat()) + } + m.extensions[name] = extension + } + return nil +} + +// begin starts an operation, or errors if one is in progress. It manages the mutex itself. +func (m *Manager) begin(op operation) error { + m.mtx.Lock() + defer m.mtx.Unlock() + return m.beginLocked(op) +} + +// beginLocked begins an operation while already holding the mutex. +func (m *Manager) beginLocked(op operation) error { + if op == opNone { + return errorsmod.Wrap(storetypes.ErrLogic, "can't begin a none operation") + } + if m.operation != opNone { + return errorsmod.Wrapf(storetypes.ErrConflict, "a %v operation is in progress", m.operation) + } + m.operation = op + return nil +} + +// end ends the current operation. +func (m *Manager) end() { + m.mtx.Lock() + defer m.mtx.Unlock() + m.endLocked() +} + +// endLocked ends the current operation while already holding the mutex. +func (m *Manager) endLocked() { + m.operation = opNone + if m.chRestore != nil { + close(m.chRestore) + m.chRestore = nil + } + m.chRestoreDone = nil + m.restoreSnapshot = nil + m.restoreChunkIndex = 0 +} + +// GetInterval returns snapshot interval represented in heights. +func (m *Manager) GetInterval() uint64 { + return m.opts.Interval +} + +// GetKeepRecent returns snapshot keep-recent represented in heights. +func (m *Manager) GetKeepRecent() uint32 { + return m.opts.KeepRecent +} + +// GetSnapshotBlockRetentionHeights returns the number of heights needed +// for block retention. Blocks since the oldest available snapshot must be +// available for state sync nodes to catch up (oldest because a node may be +// restoring an old snapshot while a new snapshot was taken). +func (m *Manager) GetSnapshotBlockRetentionHeights() int64 { + return int64(m.opts.Interval * uint64(m.opts.KeepRecent)) +} + +// Create creates a snapshot and returns its metadata. +func (m *Manager) Create(height uint64) (*types.Snapshot, error) { + if m == nil { + return nil, errorsmod.Wrap(storetypes.ErrLogic, "no snapshot store configured") + } + + defer m.multistore.PruneSnapshotHeight(int64(height)) + + err := m.begin(opSnapshot) + if err != nil { + return nil, err + } + defer m.end() + + latest, err := m.store.GetLatest() + if err != nil { + return nil, errorsmod.Wrap(err, "failed to examine latest snapshot") + } + if latest != nil && latest.Height >= height { + return nil, errorsmod.Wrapf(storetypes.ErrConflict, + "a more recent snapshot already exists at height %v", latest.Height) + } + + // Spawn goroutine to generate snapshot chunks and pass their io.ReadClosers through a channel + ch := make(chan io.ReadCloser) + go m.createSnapshot(height, ch) + + return m.store.Save(height, types.CurrentFormat, ch) +} + +// createSnapshot do the heavy work of snapshotting after the validations of request are done +// the produced chunks are written to the channel. +func (m *Manager) createSnapshot(height uint64, ch chan<- io.ReadCloser) { + streamWriter := NewStreamWriter(ch) + if streamWriter == nil { + return + } + defer func() { + if err := streamWriter.Close(); err != nil { + streamWriter.CloseWithError(err) + } + }() + + if err := m.multistore.Snapshot(height, streamWriter); err != nil { + streamWriter.CloseWithError(err) + return + } + for _, name := range m.sortedExtensionNames() { + extension := m.extensions[name] + // write extension metadata + err := streamWriter.WriteMsg(&types.SnapshotItem{ + Item: &types.SnapshotItem_Extension{ + Extension: &types.SnapshotExtensionMeta{ + Name: name, + Format: extension.SnapshotFormat(), + }, + }, + }) + if err != nil { + streamWriter.CloseWithError(err) + return + } + payloadWriter := func(payload []byte) error { + return types.WriteExtensionPayload(streamWriter, payload) + } + if err := extension.SnapshotExtension(height, payloadWriter); err != nil { + streamWriter.CloseWithError(err) + return + } + } +} + +// List lists snapshots, mirroring ABCI ListSnapshots. It can be concurrent with other operations. +func (m *Manager) List() ([]*types.Snapshot, error) { + return m.store.List() +} + +// LoadChunk loads a chunk into a byte slice, mirroring ABCI LoadChunk. It can be called +// concurrently with other operations. If the chunk does not exist, nil is returned. +func (m *Manager) LoadChunk(height uint64, format, chunk uint32) ([]byte, error) { + reader, err := m.store.LoadChunk(height, format, chunk) + if err != nil { + return nil, err + } + if reader == nil { + return nil, nil + } + defer reader.Close() + + return io.ReadAll(reader) +} + +// Prune prunes snapshots, if no other operations are in progress. +func (m *Manager) Prune(retain uint32) (uint64, error) { + err := m.begin(opPrune) + if err != nil { + return 0, err + } + defer m.end() + return m.store.Prune(retain) +} + +// Restore begins an async snapshot restoration, mirroring ABCI OfferSnapshot. Chunks must be fed +// via RestoreChunk() until the restore is complete or a chunk fails. +func (m *Manager) Restore(snapshot types.Snapshot) error { + if snapshot.Chunks == 0 { + return errorsmod.Wrap(types.ErrInvalidMetadata, "no chunks") + } + if uint32(len(snapshot.Metadata.ChunkHashes)) != snapshot.Chunks { + return errorsmod.Wrapf(types.ErrInvalidMetadata, "snapshot has %v chunk hashes, but %v chunks", + uint32(len(snapshot.Metadata.ChunkHashes)), + snapshot.Chunks) + } + m.mtx.Lock() + defer m.mtx.Unlock() + + // check multistore supported format preemptive + if snapshot.Format != types.CurrentFormat { + return errorsmod.Wrapf(types.ErrUnknownFormat, "snapshot format %v", snapshot.Format) + } + if snapshot.Height == 0 { + return errorsmod.Wrap(storetypes.ErrLogic, "cannot restore snapshot at height 0") + } + if snapshot.Height > uint64(math.MaxInt64) { + return errorsmod.Wrapf(types.ErrInvalidMetadata, + "snapshot height %v cannot exceed %v", snapshot.Height, int64(math.MaxInt64)) + } + + err := m.beginLocked(opRestore) + if err != nil { + return err + } + + // Start an asynchronous snapshot restoration, passing chunks and completion status via channels. + chChunkIDs := make(chan uint32, chunkIDBufferSize) + chDone := make(chan restoreDone, 1) + + dir := m.store.pathSnapshot(snapshot.Height, snapshot.Format) + if err := os.MkdirAll(dir, 0o750); err != nil { + return errorsmod.Wrapf(err, "failed to create snapshot directory %q", dir) + } + + chChunks := m.loadChunkStream(snapshot.Height, snapshot.Format, chChunkIDs) + + go func() { + err := m.doRestoreSnapshot(snapshot, chChunks) + chDone <- restoreDone{ + complete: err == nil, + err: err, + } + close(chDone) + }() + + m.chRestore = chChunkIDs + m.chRestoreDone = chDone + m.restoreSnapshot = &snapshot + m.restoreChunkIndex = 0 + return nil +} + +func (m *Manager) loadChunkStream(height uint64, format uint32, chunkIDs <-chan uint32) <-chan io.ReadCloser { + chunks := make(chan io.ReadCloser, chunkBufferSize) + go func() { + defer close(chunks) + + for chunkID := range chunkIDs { + chunk, err := m.store.loadChunkFile(height, format, chunkID) + if err != nil { + m.logger.Error("load chunk file failed", "height", height, "format", format, "chunk", chunkID, "err", err) + break + } + chunks <- chunk + } + }() + + return chunks +} + +// doRestoreSnapshot do the heavy work of snapshot restoration after preliminary checks on request have passed. +func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io.ReadCloser) error { + dir := m.store.pathSnapshot(snapshot.Height, snapshot.Format) + if err := os.MkdirAll(dir, 0o750); err != nil { + return errorsmod.Wrapf(err, "failed to create snapshot directory %q", dir) + } + + var nextItem types.SnapshotItem + streamReader, err := NewStreamReader(chChunks) + if err != nil { + return err + } + defer streamReader.Close() + + // payloadReader reads an extension payload for extension snapshotter, it returns `io.EOF` at extension boundaries. + payloadReader := func() ([]byte, error) { + nextItem.Reset() + if err := streamReader.ReadMsg(&nextItem); err != nil { + return nil, err + } + payload := nextItem.GetExtensionPayload() + if payload == nil { + return nil, io.EOF + } + return payload.Payload, nil + } + + nextItem, err = m.multistore.Restore(snapshot.Height, snapshot.Format, streamReader) + if err != nil { + return errorsmod.Wrap(err, "multistore restore") + } + + for { + if nextItem.Item == nil { + // end of stream + break + } + metadata := nextItem.GetExtension() + if metadata == nil { + return errorsmod.Wrapf(storetypes.ErrLogic, "unknown snapshot item %T", nextItem.Item) + } + extension, ok := m.extensions[metadata.Name] + if !ok { + return errorsmod.Wrapf(storetypes.ErrLogic, "unknown extension snapshotter %s", metadata.Name) + } + if !IsFormatSupported(extension, metadata.Format) { + return errorsmod.Wrapf(types.ErrUnknownFormat, "format %v for extension %s", metadata.Format, metadata.Name) + } + + if err := extension.RestoreExtension(snapshot.Height, metadata.Format, payloadReader); err != nil { + return errorsmod.Wrapf(err, "extension %s restore", metadata.Name) + } + + if nextItem.GetExtensionPayload() != nil { + return errorsmod.Wrapf(err, "extension %s don't exhausted payload stream", metadata.Name) + } + } + return nil +} + +// RestoreChunk adds a chunk to an active snapshot restoration, mirroring ABCI ApplySnapshotChunk. +// Chunks must be given until the restore is complete, returning true, or a chunk errors. +func (m *Manager) RestoreChunk(chunk []byte) (bool, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + if m.operation != opRestore { + return false, errorsmod.Wrap(storetypes.ErrLogic, "no restore operation in progress") + } + + if int(m.restoreChunkIndex) >= len(m.restoreSnapshot.Metadata.ChunkHashes) { + return false, errorsmod.Wrap(storetypes.ErrLogic, "received unexpected chunk") + } + + // Check if any errors have occurred yet. + select { + case done := <-m.chRestoreDone: + m.endLocked() + if done.err != nil { + return false, done.err + } + return false, errorsmod.Wrap(storetypes.ErrLogic, "restore ended unexpectedly") + default: + } + + // Verify the chunk hash. + hash := sha256.Sum256(chunk) + expected := m.restoreSnapshot.Metadata.ChunkHashes[m.restoreChunkIndex] + if !bytes.Equal(hash[:], expected) { + return false, errorsmod.Wrapf(types.ErrChunkHashMismatch, + "expected %x, got %x", hash, expected) + } + + if err := m.store.saveChunkContent(chunk, m.restoreChunkIndex, m.restoreSnapshot); err != nil { + return false, errorsmod.Wrapf(err, "save chunk content %d", m.restoreChunkIndex) + } + + // Pass the chunk to the restore, and wait for completion if it was the final one. + m.chRestore <- m.restoreChunkIndex + m.restoreChunkIndex++ + + if int(m.restoreChunkIndex) >= len(m.restoreSnapshot.Metadata.ChunkHashes) { + close(m.chRestore) + m.chRestore = nil + + // the chunks are all written into files, we can save the snapshot to the db, + // even if the restoration may not completed yet. + if err := m.store.saveSnapshot(m.restoreSnapshot); err != nil { + return false, errorsmod.Wrap(err, "save restoring snapshot") + } + + done := <-m.chRestoreDone + m.endLocked() + if done.err != nil { + return false, done.err + } + if !done.complete { + return false, errorsmod.Wrap(storetypes.ErrLogic, "restore ended prematurely") + } + + return true, nil + } + return false, nil +} + +// RestoreLocalSnapshot restores app state from a local snapshot. +func (m *Manager) RestoreLocalSnapshot(height uint64, format uint32) error { + snapshot, ch, err := m.store.Load(height, format) + if err != nil { + return err + } + + if snapshot == nil { + return fmt.Errorf("snapshot doesn't exist, height: %d, format: %d", height, format) + } + + m.mtx.Lock() + defer m.mtx.Unlock() + + err = m.beginLocked(opRestore) + if err != nil { + return err + } + defer m.endLocked() + + return m.doRestoreSnapshot(*snapshot, ch) +} + +// sortedExtensionNames sort extension names for deterministic iteration. +func (m *Manager) sortedExtensionNames() []string { + names := make([]string, 0, len(m.extensions)) + for name := range m.extensions { + names = append(names, name) + } + + sort.Strings(names) + return names +} + +// IsFormatSupported returns if the snapshotter supports restoration from given format. +func IsFormatSupported(snapshotter types.ExtensionSnapshotter, format uint32) bool { + for _, i := range snapshotter.SupportedFormats() { + if i == format { + return true + } + } + return false +} + +// SnapshotIfApplicable takes a snapshot of the current state if we are on a snapshot height. +// It also prunes any old snapshots. +func (m *Manager) SnapshotIfApplicable(height int64) { + if m == nil { + return + } + if !m.shouldTakeSnapshot(height) { + m.logger.Debug("snapshot is skipped", "height", height) + return + } + // start the routine after need to create a snapshot + go m.snapshot(height) +} + +// shouldTakeSnapshot returns true is snapshot should be taken at height. +func (m *Manager) shouldTakeSnapshot(height int64) bool { + return m.opts.Interval > 0 && uint64(height)%m.opts.Interval == 0 +} + +func (m *Manager) snapshot(height int64) { + m.logger.Info("creating state snapshot", "height", height) + + if height <= 0 { + m.logger.Error("snapshot height must be positive", "height", height) + return + } + + snapshot, err := m.Create(uint64(height)) + if err != nil { + m.logger.Error("failed to create state snapshot", "height", height, "err", err) + return + } + + m.logger.Info("completed state snapshot", "height", height, "format", snapshot.Format) + + if m.opts.KeepRecent > 0 { + m.logger.Debug("pruning state snapshots") + + pruned, err := m.Prune(m.opts.KeepRecent) + if err != nil { + m.logger.Error("Failed to prune state snapshots", "err", err) + return + } + + m.logger.Debug("pruned state snapshots", "pruned", pruned) + } +} + +// Close the snapshot database. +func (m *Manager) Close() error { + return m.store.db.Close() +} diff --git a/cosmos-sdk-store/snapshots/manager_test.go b/cosmos-sdk-store/snapshots/manager_test.go new file mode 100755 index 000000000..49f31e862 --- /dev/null +++ b/cosmos-sdk-store/snapshots/manager_test.go @@ -0,0 +1,258 @@ +package snapshots_test + +import ( + "errors" + "testing" + + db "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "cosmossdk.io/log" + "cosmossdk.io/store/snapshots" + "cosmossdk.io/store/snapshots/types" +) + +var opts = types.NewSnapshotOptions(1500, 2) + +func TestManager_List(t *testing.T) { + store := setupStore(t) + snapshotter := &mockSnapshotter{} + snapshotter.SetSnapshotInterval(opts.Interval) + manager := snapshots.NewManager(store, opts, snapshotter, nil, log.NewNopLogger()) + require.Equal(t, opts.Interval, snapshotter.GetSnapshotInterval()) + + mgrList, err := manager.List() + require.NoError(t, err) + storeList, err := store.List() + require.NoError(t, err) + + require.NotEmpty(t, storeList) + assert.Equal(t, storeList, mgrList) + + // list should not block or error on busy managers + manager = setupBusyManager(t) + list, err := manager.List() + require.NoError(t, err) + assert.Equal(t, []*types.Snapshot{}, list) + + require.NoError(t, manager.Close()) +} + +func TestManager_LoadChunk(t *testing.T) { + store := setupStore(t) + manager := snapshots.NewManager(store, opts, &mockSnapshotter{}, nil, log.NewNopLogger()) + + // Existing chunk should return body + chunk, err := manager.LoadChunk(2, 1, 1) + require.NoError(t, err) + assert.Equal(t, []byte{2, 1, 1}, chunk) + + // Missing chunk should return nil + chunk, err = manager.LoadChunk(2, 1, 9) + require.NoError(t, err) + assert.Nil(t, chunk) + + // LoadChunk should not block or error on busy managers + manager = setupBusyManager(t) + chunk, err = manager.LoadChunk(2, 1, 0) + require.NoError(t, err) + assert.Nil(t, chunk) +} + +func TestManager_Take(t *testing.T) { + store := setupStore(t) + items := [][]byte{ + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9}, + } + snapshotter := &mockSnapshotter{ + items: items, + prunedHeights: make(map[int64]struct{}), + } + extSnapshotter := newExtSnapshotter(10) + + expectChunks := snapshotItems(items, extSnapshotter) + manager := snapshots.NewManager(store, opts, snapshotter, nil, log.NewNopLogger()) + err := manager.RegisterExtensions(extSnapshotter) + require.NoError(t, err) + + // nil manager should return error + _, err = (*snapshots.Manager)(nil).Create(1) + require.Error(t, err) + + // creating a snapshot at a lower height than the latest should error + _, err = manager.Create(3) + require.Error(t, err) + _, didPruneHeight := snapshotter.prunedHeights[3] + require.True(t, didPruneHeight) + + // creating a snapshot at a higher height should be fine, and should return it + snapshot, err := manager.Create(5) + require.NoError(t, err) + _, didPruneHeight = snapshotter.prunedHeights[5] + require.True(t, didPruneHeight) + + assert.Equal(t, &types.Snapshot{ + Height: 5, + Format: snapshotter.SnapshotFormat(), + Chunks: 1, + Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, + Metadata: types.Metadata{ + ChunkHashes: checksums(expectChunks), + }, + }, snapshot) + + storeSnapshot, chunks, err := store.Load(snapshot.Height, snapshot.Format) + require.NoError(t, err) + assert.Equal(t, snapshot, storeSnapshot) + assert.Equal(t, expectChunks, readChunks(chunks)) + + // creating a snapshot while a different snapshot is being created should error + manager = setupBusyManager(t) + _, err = manager.Create(9) + require.Error(t, err) +} + +func TestManager_Prune(t *testing.T) { + store := setupStore(t) + snapshotter := &mockSnapshotter{} + snapshotter.SetSnapshotInterval(opts.Interval) + manager := snapshots.NewManager(store, opts, snapshotter, nil, log.NewNopLogger()) + + pruned, err := manager.Prune(2) + require.NoError(t, err) + assert.EqualValues(t, 1, pruned) + + list, err := manager.List() + require.NoError(t, err) + assert.Len(t, list, 3) + + // Prune should error while a snapshot is being taken + manager = setupBusyManager(t) + _, err = manager.Prune(2) + require.Error(t, err) +} + +func TestManager_Restore(t *testing.T) { + store := setupStore(t) + target := &mockSnapshotter{ + prunedHeights: make(map[int64]struct{}), + } + extSnapshotter := newExtSnapshotter(0) + manager := snapshots.NewManager(store, opts, target, nil, log.NewNopLogger()) + err := manager.RegisterExtensions(extSnapshotter) + require.NoError(t, err) + + expectItems := [][]byte{ + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9}, + } + + chunks := snapshotItems(expectItems, newExtSnapshotter(10)) + + // Restore errors on invalid format + err = manager.Restore(types.Snapshot{ + Height: 3, + Format: 0, + Hash: []byte{1, 2, 3}, + Chunks: uint32(len(chunks)), + Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, + }) + require.Error(t, err) + require.ErrorIs(t, err, types.ErrUnknownFormat) + + // Restore errors on no chunks + err = manager.Restore(types.Snapshot{Height: 3, Format: types.CurrentFormat, Hash: []byte{1, 2, 3}}) + require.Error(t, err) + + // Restore errors on chunk and chunkhashes mismatch + err = manager.Restore(types.Snapshot{ + Height: 3, + Format: types.CurrentFormat, + Hash: []byte{1, 2, 3}, + Chunks: 4, + Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, + }) + require.Error(t, err) + + // Starting a restore works + err = manager.Restore(types.Snapshot{ + Height: 3, + Format: types.CurrentFormat, + Hash: []byte{1, 2, 3}, + Chunks: 1, + Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, + }) + require.NoError(t, err) + + // While the restore is in progress, any other operations fail + _, err = manager.Create(4) + require.Error(t, err) + _, didPruneHeight := target.prunedHeights[4] + require.True(t, didPruneHeight) + + _, err = manager.Prune(1) + require.Error(t, err) + + // Feeding an invalid chunk should error due to invalid checksum, but not abort restoration. + _, err = manager.RestoreChunk([]byte{9, 9, 9}) + require.Error(t, err) + require.True(t, errors.Is(err, types.ErrChunkHashMismatch)) + + // Feeding the chunks should work + for i, chunk := range chunks { + done, err := manager.RestoreChunk(chunk) + require.NoError(t, err) + if i == len(chunks)-1 { + assert.True(t, done) + } else { + assert.False(t, done) + } + } + + assert.Equal(t, expectItems, target.items) + assert.Equal(t, 10, len(extSnapshotter.state)) + + // The snapshot is saved in local snapshot store + snapshots, err := store.List() + require.NoError(t, err) + snapshot := snapshots[0] + require.Equal(t, uint64(3), snapshot.Height) + require.Equal(t, types.CurrentFormat, snapshot.Format) + + // Starting a new restore should fail now, because the target already has contents. + err = manager.Restore(types.Snapshot{ + Height: 3, + Format: types.CurrentFormat, + Hash: []byte{1, 2, 3}, + Chunks: 3, + Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, + }) + require.Error(t, err) + + // But if we clear out the target we should be able to start a new restore. This time we'll + // fail it with a checksum error. That error should stop the operation, so that we can do + // a prune operation right after. + target.items = nil + err = manager.Restore(types.Snapshot{ + Height: 3, + Format: types.CurrentFormat, + Hash: []byte{1, 2, 3}, + Chunks: 1, + Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, + }) + require.NoError(t, err) +} + +func TestManager_TakeError(t *testing.T) { + snapshotter := &mockErrorSnapshotter{} + store, err := snapshots.NewStore(db.NewMemDB(), GetTempDir(t)) + require.NoError(t, err) + manager := snapshots.NewManager(store, opts, snapshotter, nil, log.NewNopLogger()) + + _, err = manager.Create(1) + require.Error(t, err) +} diff --git a/cosmos-sdk-store/snapshots/store.go b/cosmos-sdk-store/snapshots/store.go new file mode 100755 index 000000000..2f08a6e6c --- /dev/null +++ b/cosmos-sdk-store/snapshots/store.go @@ -0,0 +1,369 @@ +package snapshots + +import ( + "crypto/sha256" + "encoding/binary" + "hash" + "io" + "math" + "os" + "path/filepath" + "strconv" + "sync" + + db "github.com/cosmos/cosmos-db" + "github.com/cosmos/gogoproto/proto" + + "cosmossdk.io/errors" + "cosmossdk.io/store/snapshots/types" + storetypes "cosmossdk.io/store/types" +) + +const ( + // keyPrefixSnapshot is the prefix for snapshot database keys + keyPrefixSnapshot byte = 0x01 +) + +// Store is a snapshot store, containing snapshot metadata and binary chunks. +type Store struct { + db db.DB + dir string + + mtx sync.Mutex + saving map[uint64]bool // heights currently being saved +} + +// NewStore creates a new snapshot store. +func NewStore(db db.DB, dir string) (*Store, error) { + if dir == "" { + return nil, errors.Wrap(storetypes.ErrLogic, "snapshot directory not given") + } + err := os.MkdirAll(dir, 0o755) + if err != nil { + return nil, errors.Wrapf(err, "failed to create snapshot directory %q", dir) + } + + return &Store{ + db: db, + dir: dir, + saving: make(map[uint64]bool), + }, nil +} + +// Delete deletes a snapshot. +func (s *Store) Delete(height uint64, format uint32) error { + s.mtx.Lock() + saving := s.saving[height] + s.mtx.Unlock() + if saving { + return errors.Wrapf(storetypes.ErrConflict, + "snapshot for height %v format %v is currently being saved", height, format) + } + err := s.db.DeleteSync(encodeKey(height, format)) + if err != nil { + return errors.Wrapf(err, "failed to delete snapshot for height %v format %v", + height, format) + } + err = os.RemoveAll(s.pathSnapshot(height, format)) + return errors.Wrapf(err, "failed to delete snapshot chunks for height %v format %v", + height, format) +} + +// Get fetches snapshot info from the database. +func (s *Store) Get(height uint64, format uint32) (*types.Snapshot, error) { + bytes, err := s.db.Get(encodeKey(height, format)) + if err != nil { + return nil, errors.Wrapf(err, "failed to fetch snapshot metadata for height %v format %v", + height, format) + } + if bytes == nil { + return nil, nil + } + snapshot := &types.Snapshot{} + err = proto.Unmarshal(bytes, snapshot) + if err != nil { + return nil, errors.Wrapf(err, "failed to decode snapshot metadata for height %v format %v", + height, format) + } + if snapshot.Metadata.ChunkHashes == nil { + snapshot.Metadata.ChunkHashes = [][]byte{} + } + return snapshot, nil +} + +// Get fetches the latest snapshot from the database, if any. +func (s *Store) GetLatest() (*types.Snapshot, error) { + iter, err := s.db.ReverseIterator(encodeKey(0, 0), encodeKey(uint64(math.MaxUint64), math.MaxUint32)) + if err != nil { + return nil, errors.Wrap(err, "failed to find latest snapshot") + } + defer iter.Close() + + var snapshot *types.Snapshot + if iter.Valid() { + snapshot = &types.Snapshot{} + err := proto.Unmarshal(iter.Value(), snapshot) + if err != nil { + return nil, errors.Wrap(err, "failed to decode latest snapshot") + } + } + err = iter.Error() + return snapshot, errors.Wrap(err, "failed to find latest snapshot") +} + +// List lists snapshots, in reverse order (newest first). +func (s *Store) List() ([]*types.Snapshot, error) { + iter, err := s.db.ReverseIterator(encodeKey(0, 0), encodeKey(uint64(math.MaxUint64), math.MaxUint32)) + if err != nil { + return nil, errors.Wrap(err, "failed to list snapshots") + } + defer iter.Close() + + snapshots := make([]*types.Snapshot, 0) + for ; iter.Valid(); iter.Next() { + snapshot := &types.Snapshot{} + err := proto.Unmarshal(iter.Value(), snapshot) + if err != nil { + return nil, errors.Wrap(err, "failed to decode snapshot info") + } + snapshots = append(snapshots, snapshot) + } + return snapshots, iter.Error() +} + +// Load loads a snapshot (both metadata and binary chunks). The chunks must be consumed and closed. +// Returns nil if the snapshot does not exist. +func (s *Store) Load(height uint64, format uint32) (*types.Snapshot, <-chan io.ReadCloser, error) { + snapshot, err := s.Get(height, format) + if snapshot == nil || err != nil { + return nil, nil, err + } + + ch := make(chan io.ReadCloser) + go func() { + defer close(ch) + for i := uint32(0); i < snapshot.Chunks; i++ { + pr, pw := io.Pipe() + ch <- pr + chunk, err := s.loadChunkFile(height, format, i) + if err != nil { + _ = pw.CloseWithError(err) + return + } + defer chunk.Close() + _, err = io.Copy(pw, chunk) + if err != nil { + _ = pw.CloseWithError(err) + return + } + chunk.Close() + pw.Close() + } + }() + + return snapshot, ch, nil +} + +// LoadChunk loads a chunk from disk, or returns nil if it does not exist. The caller must call +// Close() on it when done. +func (s *Store) LoadChunk(height uint64, format, chunk uint32) (io.ReadCloser, error) { + path := s.PathChunk(height, format, chunk) + file, err := os.Open(path) + if os.IsNotExist(err) { + return nil, nil + } + return file, err +} + +// loadChunkFile loads a chunk from disk, and errors if it does not exist. +func (s *Store) loadChunkFile(height uint64, format, chunk uint32) (io.ReadCloser, error) { + path := s.PathChunk(height, format, chunk) + return os.Open(path) +} + +// Prune removes old snapshots. The given number of most recent heights (regardless of format) are retained. +func (s *Store) Prune(retain uint32) (uint64, error) { + iter, err := s.db.ReverseIterator(encodeKey(0, 0), encodeKey(uint64(math.MaxUint64), math.MaxUint32)) + if err != nil { + return 0, errors.Wrap(err, "failed to prune snapshots") + } + defer iter.Close() + + pruned := uint64(0) + prunedHeights := make(map[uint64]bool) + skip := make(map[uint64]bool) + for ; iter.Valid(); iter.Next() { + height, format, err := decodeKey(iter.Key()) + if err != nil { + return 0, errors.Wrap(err, "failed to prune snapshots") + } + if skip[height] || uint32(len(skip)) < retain { + skip[height] = true + continue + } + err = s.Delete(height, format) + if err != nil { + return 0, errors.Wrap(err, "failed to prune snapshots") + } + pruned++ + prunedHeights[height] = true + } + // Since Delete() deletes a specific format, while we want to prune a height, we clean up + // the height directory as well + for height, ok := range prunedHeights { + if ok { + err = os.Remove(s.pathHeight(height)) + if err != nil { + return 0, errors.Wrapf(err, "failed to remove snapshot directory for height %v", height) + } + } + } + return pruned, iter.Error() +} + +// Save saves a snapshot to disk, returning it. +func (s *Store) Save( + height uint64, format uint32, chunks <-chan io.ReadCloser, +) (*types.Snapshot, error) { + defer DrainChunks(chunks) + if height == 0 { + return nil, errors.Wrap(storetypes.ErrLogic, "snapshot height cannot be 0") + } + + s.mtx.Lock() + saving := s.saving[height] + s.saving[height] = true + s.mtx.Unlock() + if saving { + return nil, errors.Wrapf(storetypes.ErrConflict, + "a snapshot for height %v is already being saved", height) + } + defer func() { + s.mtx.Lock() + delete(s.saving, height) + s.mtx.Unlock() + }() + + exists, err := s.db.Has(encodeKey(height, format)) + if err != nil { + return nil, err + } + if exists { + return nil, errors.Wrapf(storetypes.ErrConflict, + "snapshot already exists for height %v format %v", height, format) + } + + snapshot := &types.Snapshot{ + Height: height, + Format: format, + } + + dirCreated := false + index := uint32(0) + snapshotHasher := sha256.New() + chunkHasher := sha256.New() + for chunkBody := range chunks { + // Only create the snapshot directory on encountering the first chunk. + // If the directory disappears during chunk saving, + // the whole operation will fail anyway. + if !dirCreated { + dir := s.pathSnapshot(height, format) + if err := os.MkdirAll(dir, 0o755); err != nil { + return nil, errors.Wrapf(err, "failed to create snapshot directory %q", dir) + } + + dirCreated = true + } + + if err := s.saveChunk(chunkBody, index, snapshot, chunkHasher, snapshotHasher); err != nil { + return nil, err + } + index++ + } + snapshot.Chunks = index + snapshot.Hash = snapshotHasher.Sum(nil) + return snapshot, s.saveSnapshot(snapshot) +} + +// saveChunk saves the given chunkBody with the given index to its appropriate path on disk. +// The hash of the chunk is appended to the snapshot's metadata, +// and the overall snapshot hash is updated with the chunk content too. +func (s *Store) saveChunk(chunkBody io.ReadCloser, index uint32, snapshot *types.Snapshot, chunkHasher, snapshotHasher hash.Hash) error { + defer chunkBody.Close() + + path := s.PathChunk(snapshot.Height, snapshot.Format, index) + chunkFile, err := os.Create(path) + if err != nil { + return errors.Wrapf(err, "failed to create snapshot chunk file %q", path) + } + defer chunkFile.Close() + + chunkHasher.Reset() + if _, err := io.Copy(io.MultiWriter(chunkFile, chunkHasher, snapshotHasher), chunkBody); err != nil { + return errors.Wrapf(err, "failed to generate snapshot chunk %d", index) + } + + if err := chunkFile.Close(); err != nil { + return errors.Wrapf(err, "failed to close snapshot chunk file %d", index) + } + + if err := chunkBody.Close(); err != nil { + return errors.Wrapf(err, "failed to close snapshot chunk body %d", index) + } + + snapshot.Metadata.ChunkHashes = append(snapshot.Metadata.ChunkHashes, chunkHasher.Sum(nil)) + return nil +} + +// saveChunkContent save the chunk to disk +func (s *Store) saveChunkContent(chunk []byte, index uint32, snapshot *types.Snapshot) error { + path := s.PathChunk(snapshot.Height, snapshot.Format, index) + return os.WriteFile(path, chunk, 0o600) +} + +// saveSnapshot saves snapshot metadata to the database. +func (s *Store) saveSnapshot(snapshot *types.Snapshot) error { + value, err := proto.Marshal(snapshot) + if err != nil { + return errors.Wrap(err, "failed to encode snapshot metadata") + } + err = s.db.SetSync(encodeKey(snapshot.Height, snapshot.Format), value) + return errors.Wrap(err, "failed to store snapshot") +} + +// pathHeight generates the path to a height, containing multiple snapshot formats. +func (s *Store) pathHeight(height uint64) string { + return filepath.Join(s.dir, strconv.FormatUint(height, 10)) +} + +// pathSnapshot generates a snapshot path, as a specific format under a height. +func (s *Store) pathSnapshot(height uint64, format uint32) string { + return filepath.Join(s.pathHeight(height), strconv.FormatUint(uint64(format), 10)) +} + +// PathChunk generates a snapshot chunk path. +func (s *Store) PathChunk(height uint64, format, chunk uint32) string { + return filepath.Join(s.pathSnapshot(height, format), strconv.FormatUint(uint64(chunk), 10)) +} + +// decodeKey decodes a snapshot key. +func decodeKey(k []byte) (uint64, uint32, error) { + if len(k) != 13 { + return 0, 0, errors.Wrapf(storetypes.ErrLogic, "invalid snapshot key with length %v", len(k)) + } + if k[0] != keyPrefixSnapshot { + return 0, 0, errors.Wrapf(storetypes.ErrLogic, "invalid snapshot key prefix %x", k[0]) + } + height := binary.BigEndian.Uint64(k[1:9]) + format := binary.BigEndian.Uint32(k[9:13]) + return height, format, nil +} + +// encodeKey encodes a snapshot key. +func encodeKey(height uint64, format uint32) []byte { + k := make([]byte, 13) + k[0] = keyPrefixSnapshot + binary.BigEndian.PutUint64(k[1:], height) + binary.BigEndian.PutUint32(k[9:], format) + return k +} diff --git a/cosmos-sdk-store/snapshots/store_test.go b/cosmos-sdk-store/snapshots/store_test.go new file mode 100755 index 000000000..f4ff0ef74 --- /dev/null +++ b/cosmos-sdk-store/snapshots/store_test.go @@ -0,0 +1,333 @@ +package snapshots_test + +import ( + "bytes" + "errors" + "io" + "testing" + "time" + + db "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "cosmossdk.io/store/snapshots" + "cosmossdk.io/store/snapshots/types" +) + +func setupStore(t *testing.T) *snapshots.Store { + t.Helper() + store, err := snapshots.NewStore(db.NewMemDB(), GetTempDir(t)) + require.NoError(t, err) + + _, err = store.Save(1, 1, makeChunks([][]byte{ + {1, 1, 0}, {1, 1, 1}, + })) + require.NoError(t, err) + _, err = store.Save(2, 1, makeChunks([][]byte{ + {2, 1, 0}, {2, 1, 1}, + })) + require.NoError(t, err) + _, err = store.Save(2, 2, makeChunks([][]byte{ + {2, 2, 0}, {2, 2, 1}, {2, 2, 2}, + })) + require.NoError(t, err) + _, err = store.Save(3, 2, makeChunks([][]byte{ + {3, 2, 0}, {3, 2, 1}, {3, 2, 2}, + })) + require.NoError(t, err) + + return store +} + +func TestNewStore(t *testing.T) { + tempdir := GetTempDir(t) + _, err := snapshots.NewStore(db.NewMemDB(), tempdir) + + require.NoError(t, err) +} + +func TestNewStore_ErrNoDir(t *testing.T) { + _, err := snapshots.NewStore(db.NewMemDB(), "") + require.Error(t, err) +} + +func TestStore_Delete(t *testing.T) { + store := setupStore(t) + // Deleting a snapshot should remove it + err := store.Delete(2, 2) + require.NoError(t, err) + + snapshot, err := store.Get(2, 2) + require.NoError(t, err) + assert.Nil(t, snapshot) + + snapshots, err := store.List() + require.NoError(t, err) + assert.Len(t, snapshots, 3) + + // Deleting it again should not error + err = store.Delete(2, 2) + require.NoError(t, err) + + // Deleting a snapshot being saved should error + ch := make(chan io.ReadCloser) + go func() { + _, err := store.Save(9, 1, ch) + require.NoError(t, err) + }() + + time.Sleep(10 * time.Millisecond) + err = store.Delete(9, 1) + require.Error(t, err) + + // But after it's saved it should work + close(ch) + time.Sleep(10 * time.Millisecond) + err = store.Delete(9, 1) + require.NoError(t, err) +} + +func TestStore_Get(t *testing.T) { + store := setupStore(t) + + // Loading a missing snapshot should return nil + snapshot, err := store.Get(9, 9) + require.NoError(t, err) + assert.Nil(t, snapshot) + + // Loading a snapshot should returns its metadata + snapshot, err = store.Get(2, 1) + require.NoError(t, err) + assert.Equal(t, &types.Snapshot{ + Height: 2, + Format: 1, + Chunks: 2, + Hash: hash([][]byte{{2, 1, 0}, {2, 1, 1}}), + Metadata: types.Metadata{ + ChunkHashes: checksums([][]byte{ + {2, 1, 0}, {2, 1, 1}, + }), + }, + }, snapshot) +} + +func TestStore_GetLatest(t *testing.T) { + store := setupStore(t) + // Loading a missing snapshot should return nil + snapshot, err := store.GetLatest() + require.NoError(t, err) + assert.Equal(t, &types.Snapshot{ + Height: 3, + Format: 2, + Chunks: 3, + Hash: hash([][]byte{ + {3, 2, 0}, + {3, 2, 1}, + {3, 2, 2}, + }), + Metadata: types.Metadata{ + ChunkHashes: checksums([][]byte{ + {3, 2, 0}, + {3, 2, 1}, + {3, 2, 2}, + }), + }, + }, snapshot) +} + +func TestStore_List(t *testing.T) { + store := setupStore(t) + snapshots, err := store.List() + require.NoError(t, err) + + require.Equal(t, []*types.Snapshot{ + { + Height: 3, Format: 2, Chunks: 3, Hash: hash([][]byte{{3, 2, 0}, {3, 2, 1}, {3, 2, 2}}), + Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{3, 2, 0}, {3, 2, 1}, {3, 2, 2}})}, + }, + { + Height: 2, Format: 2, Chunks: 3, Hash: hash([][]byte{{2, 2, 0}, {2, 2, 1}, {2, 2, 2}}), + Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{2, 2, 0}, {2, 2, 1}, {2, 2, 2}})}, + }, + { + Height: 2, Format: 1, Chunks: 2, Hash: hash([][]byte{{2, 1, 0}, {2, 1, 1}}), + Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{2, 1, 0}, {2, 1, 1}})}, + }, + { + Height: 1, Format: 1, Chunks: 2, Hash: hash([][]byte{{1, 1, 0}, {1, 1, 1}}), + Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{1, 1, 0}, {1, 1, 1}})}, + }, + }, snapshots) +} + +func TestStore_Load(t *testing.T) { + store := setupStore(t) + // Loading a missing snapshot should return nil + snapshot, chunks, err := store.Load(9, 9) + require.NoError(t, err) + assert.Nil(t, snapshot) + assert.Nil(t, chunks) + + // Loading a snapshot should returns its metadata and chunks + snapshot, chunks, err = store.Load(2, 1) + require.NoError(t, err) + assert.Equal(t, &types.Snapshot{ + Height: 2, + Format: 1, + Chunks: 2, + Hash: hash([][]byte{{2, 1, 0}, {2, 1, 1}}), + Metadata: types.Metadata{ + ChunkHashes: checksums([][]byte{ + {2, 1, 0}, {2, 1, 1}, + }), + }, + }, snapshot) + + for i := uint32(0); i < snapshot.Chunks; i++ { + reader, ok := <-chunks + require.True(t, ok) + chunk, err := io.ReadAll(reader) + require.NoError(t, err) + err = reader.Close() + require.NoError(t, err) + assert.Equal(t, []byte{2, 1, byte(i)}, chunk) + } + assert.Empty(t, chunks) +} + +func TestStore_LoadChunk(t *testing.T) { + store := setupStore(t) + // Loading a missing snapshot should return nil + chunk, err := store.LoadChunk(9, 9, 0) + require.NoError(t, err) + assert.Nil(t, chunk) + + // Loading a missing chunk index should return nil + chunk, err = store.LoadChunk(2, 1, 2) + require.NoError(t, err) + require.Nil(t, chunk) + + // Loading a chunk should returns a content reader + chunk, err = store.LoadChunk(2, 1, 0) + require.NoError(t, err) + require.NotNil(t, chunk) + body, err := io.ReadAll(chunk) + require.NoError(t, err) + assert.Equal(t, []byte{2, 1, 0}, body) + err = chunk.Close() + require.NoError(t, err) +} + +func TestStore_Prune(t *testing.T) { + store := setupStore(t) + // Pruning too many snapshots should be fine + pruned, err := store.Prune(4) + require.NoError(t, err) + assert.EqualValues(t, 0, pruned) + + snapshots, err := store.List() + require.NoError(t, err) + assert.Len(t, snapshots, 4) + + // Pruning until the last two heights should leave three snapshots (for two heights) + pruned, err = store.Prune(2) + require.NoError(t, err) + assert.EqualValues(t, 1, pruned) + + snapshots, err = store.List() + require.NoError(t, err) + require.Equal(t, []*types.Snapshot{ + { + Height: 3, Format: 2, Chunks: 3, Hash: hash([][]byte{{3, 2, 0}, {3, 2, 1}, {3, 2, 2}}), + Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{3, 2, 0}, {3, 2, 1}, {3, 2, 2}})}, + }, + { + Height: 2, Format: 2, Chunks: 3, Hash: hash([][]byte{{2, 2, 0}, {2, 2, 1}, {2, 2, 2}}), + Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{2, 2, 0}, {2, 2, 1}, {2, 2, 2}})}, + }, + { + Height: 2, Format: 1, Chunks: 2, Hash: hash([][]byte{{2, 1, 0}, {2, 1, 1}}), + Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{2, 1, 0}, {2, 1, 1}})}, + }, + }, snapshots) + + // Pruning all heights should also be fine + pruned, err = store.Prune(0) + require.NoError(t, err) + assert.EqualValues(t, 3, pruned) + + snapshots, err = store.List() + require.NoError(t, err) + assert.Empty(t, snapshots) +} + +func TestStore_Save(t *testing.T) { + store := setupStore(t) + // Saving a snapshot should work + snapshot, err := store.Save(4, 1, makeChunks([][]byte{{1}, {2}})) + require.NoError(t, err) + assert.Equal(t, &types.Snapshot{ + Height: 4, + Format: 1, + Chunks: 2, + Hash: hash([][]byte{{1}, {2}}), + Metadata: types.Metadata{ + ChunkHashes: checksums([][]byte{{1}, {2}}), + }, + }, snapshot) + loaded, err := store.Get(snapshot.Height, snapshot.Format) + require.NoError(t, err) + assert.Equal(t, snapshot, loaded) + + // Saving an existing snapshot should error + _, err = store.Save(4, 1, makeChunks([][]byte{{1}, {2}})) + require.Error(t, err) + + // Saving at height 0 should error + _, err = store.Save(0, 1, makeChunks([][]byte{{1}, {2}})) + require.Error(t, err) + + // Saving at format 0 should be fine + _, err = store.Save(1, 0, makeChunks([][]byte{{1}, {2}})) + require.NoError(t, err) + + // Saving a snapshot with no chunks should be fine, as should loading it + _, err = store.Save(5, 1, makeChunks([][]byte{})) + require.NoError(t, err) + snapshot, chunks, err := store.Load(5, 1) + require.NoError(t, err) + assert.Equal(t, &types.Snapshot{Height: 5, Format: 1, Hash: hash([][]byte{}), Metadata: types.Metadata{ChunkHashes: [][]byte{}}}, snapshot) + assert.Empty(t, chunks) + + // Saving a snapshot should error if a chunk reader returns an error, and it should empty out + // the channel + someErr := errors.New("boom") + pr, pw := io.Pipe() + err = pw.CloseWithError(someErr) + require.NoError(t, err) + + ch := make(chan io.ReadCloser, 2) + ch <- pr + ch <- io.NopCloser(bytes.NewBuffer([]byte{0xff})) + close(ch) + + _, err = store.Save(6, 1, ch) + require.Error(t, err) + require.True(t, errors.Is(err, someErr)) + assert.Empty(t, ch) + + // Saving a snapshot should error if a snapshot is already in progress for the same height, + // regardless of format. However, a different height should succeed. + ch = make(chan io.ReadCloser) + go func() { + _, err := store.Save(7, 1, ch) + require.NoError(t, err) + }() + time.Sleep(10 * time.Millisecond) + _, err = store.Save(7, 2, makeChunks(nil)) + require.Error(t, err) + _, err = store.Save(8, 1, makeChunks(nil)) + require.NoError(t, err) + close(ch) +} diff --git a/cosmos-sdk-store/snapshots/stream.go b/cosmos-sdk-store/snapshots/stream.go new file mode 100755 index 000000000..e010f9224 --- /dev/null +++ b/cosmos-sdk-store/snapshots/stream.go @@ -0,0 +1,113 @@ +package snapshots + +import ( + "bufio" + "compress/zlib" + "io" + + protoio "github.com/cosmos/gogoproto/io" + "github.com/cosmos/gogoproto/proto" + + "cosmossdk.io/errors" +) + +const ( + // Do not change chunk size without new snapshot format (must be uniform across nodes) + snapshotChunkSize = uint64(10e6) + snapshotBufferSize = int(snapshotChunkSize) + // Do not change compression level without new snapshot format (must be uniform across nodes) + snapshotCompressionLevel = 7 +) + +// StreamWriter set up a stream pipeline to serialize snapshot nodes: +// Exported Items -> delimited Protobuf -> zlib -> buffer -> chunkWriter -> chan io.ReadCloser +type StreamWriter struct { + chunkWriter *ChunkWriter + bufWriter *bufio.Writer + zWriter *zlib.Writer + protoWriter protoio.WriteCloser +} + +// NewStreamWriter set up a stream pipeline to serialize snapshot DB records. +func NewStreamWriter(ch chan<- io.ReadCloser) *StreamWriter { + chunkWriter := NewChunkWriter(ch, snapshotChunkSize) + bufWriter := bufio.NewWriterSize(chunkWriter, snapshotBufferSize) + zWriter, err := zlib.NewWriterLevel(bufWriter, snapshotCompressionLevel) + if err != nil { + chunkWriter.CloseWithError(errors.Wrap(err, "zlib failure")) + return nil + } + protoWriter := protoio.NewDelimitedWriter(zWriter) + return &StreamWriter{ + chunkWriter: chunkWriter, + bufWriter: bufWriter, + zWriter: zWriter, + protoWriter: protoWriter, + } +} + +// WriteMsg implements protoio.Write interface +func (sw *StreamWriter) WriteMsg(msg proto.Message) error { + return sw.protoWriter.WriteMsg(msg) +} + +// Close implements io.Closer interface +func (sw *StreamWriter) Close() error { + if err := sw.protoWriter.Close(); err != nil { + sw.chunkWriter.CloseWithError(err) + return err + } + if err := sw.bufWriter.Flush(); err != nil { + sw.chunkWriter.CloseWithError(err) + return err + } + return sw.chunkWriter.Close() +} + +// CloseWithError pass error to chunkWriter +func (sw *StreamWriter) CloseWithError(err error) { + sw.chunkWriter.CloseWithError(err) +} + +// StreamReader set up a restore stream pipeline +// chan io.ReadCloser -> chunkReader -> zlib -> delimited Protobuf -> ExportNode +type StreamReader struct { + chunkReader *ChunkReader + zReader io.ReadCloser + protoReader protoio.ReadCloser +} + +// NewStreamReader set up a restore stream pipeline. +func NewStreamReader(chunks <-chan io.ReadCloser) (*StreamReader, error) { + chunkReader := NewChunkReader(chunks) + zReader, err := zlib.NewReader(chunkReader) + if err != nil { + return nil, errors.Wrap(err, "zlib failure") + } + protoReader := protoio.NewDelimitedReader(zReader, snapshotMaxItemSize) + return &StreamReader{ + chunkReader: chunkReader, + zReader: zReader, + protoReader: protoReader, + }, nil +} + +// ReadMsg implements protoio.Reader interface +func (sr *StreamReader) ReadMsg(msg proto.Message) error { + return sr.protoReader.ReadMsg(msg) +} + +// Close implements io.Closer interface +func (sr *StreamReader) Close() error { + var err error + if err1 := sr.protoReader.Close(); err1 != nil { + err = err1 + } + if err2 := sr.zReader.Close(); err2 != nil { + err = err2 + } + if err3 := sr.chunkReader.Close(); err3 != nil { + err = err3 + } + return err +} diff --git a/cosmos-sdk-store/snapshots/types/convert.go b/cosmos-sdk-store/snapshots/types/convert.go new file mode 100755 index 000000000..90deead3b --- /dev/null +++ b/cosmos-sdk-store/snapshots/types/convert.go @@ -0,0 +1,39 @@ +package types + +import ( + abci "github.com/cometbft/cometbft/abci/types" + proto "github.com/cosmos/gogoproto/proto" + + "cosmossdk.io/errors" +) + +// Converts an ABCI snapshot to a snapshot. Mainly to decode the SDK metadata. +func SnapshotFromABCI(in *abci.Snapshot) (Snapshot, error) { + snapshot := Snapshot{ + Height: in.Height, + Format: in.Format, + Chunks: in.Chunks, + Hash: in.Hash, + } + err := proto.Unmarshal(in.Metadata, &snapshot.Metadata) + if err != nil { + return Snapshot{}, errors.Wrap(err, "failed to unmarshal snapshot metadata") + } + return snapshot, nil +} + +// Converts a Snapshot to its ABCI representation. Mainly to encode the SDK metadata. +func (s Snapshot) ToABCI() (abci.Snapshot, error) { + out := abci.Snapshot{ + Height: s.Height, + Format: s.Format, + Chunks: s.Chunks, + Hash: s.Hash, + } + var err error + out.Metadata, err = proto.Marshal(&s.Metadata) + if err != nil { + return abci.Snapshot{}, errors.Wrap(err, "failed to marshal snapshot metadata") + } + return out, nil +} diff --git a/cosmos-sdk-store/snapshots/types/errors.go b/cosmos-sdk-store/snapshots/types/errors.go new file mode 100755 index 000000000..c1b5db532 --- /dev/null +++ b/cosmos-sdk-store/snapshots/types/errors.go @@ -0,0 +1,19 @@ +package types + +import ( + "errors" +) + +var ( + // ErrUnknownFormat is returned when an unknown format is used. + ErrUnknownFormat = errors.New("unknown snapshot format") + + // ErrChunkHashMismatch is returned when chunk hash verification failed. + ErrChunkHashMismatch = errors.New("chunk hash verification failed") + + // ErrInvalidMetadata is returned when the snapshot metadata is invalid. + ErrInvalidMetadata = errors.New("invalid snapshot metadata") + + // ErrInvalidSnapshotVersion is returned when the snapshot version is invalid + ErrInvalidSnapshotVersion = errors.New("invalid snapshot version") +) diff --git a/cosmos-sdk-store/snapshots/types/format.go b/cosmos-sdk-store/snapshots/types/format.go new file mode 100755 index 000000000..317b6a6e3 --- /dev/null +++ b/cosmos-sdk-store/snapshots/types/format.go @@ -0,0 +1,6 @@ +package types + +// CurrentFormat is the currently used format for snapshots. Snapshots using the same format +// must be identical across all nodes for a given height, so this must be bumped when the binary +// snapshot output changes. +const CurrentFormat uint32 = 3 diff --git a/cosmos-sdk-store/snapshots/types/options.go b/cosmos-sdk-store/snapshots/types/options.go new file mode 100755 index 000000000..9c6ec79a1 --- /dev/null +++ b/cosmos-sdk-store/snapshots/types/options.go @@ -0,0 +1,18 @@ +package types + +// SnapshotOptions defines the snapshot strategy used when determining which +// heights are snapshotted for state sync. +type SnapshotOptions struct { + // Interval defines at which heights the snapshot is taken. + Interval uint64 + + // KeepRecent defines how many snapshots to keep in heights. + KeepRecent uint32 +} + +func NewSnapshotOptions(interval uint64, keepRecent uint32) SnapshotOptions { + return SnapshotOptions{ + Interval: interval, + KeepRecent: keepRecent, + } +} diff --git a/cosmos-sdk-store/snapshots/types/snapshot.pb.go b/cosmos-sdk-store/snapshots/types/snapshot.pb.go new file mode 100755 index 000000000..686e11905 --- /dev/null +++ b/cosmos-sdk-store/snapshots/types/snapshot.pb.go @@ -0,0 +1,2014 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cosmos/store/snapshots/v1/snapshot.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Snapshot contains Tendermint state sync snapshot info. +type Snapshot struct { + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` + Chunks uint32 `protobuf:"varint,3,opt,name=chunks,proto3" json:"chunks,omitempty"` + Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` + Metadata Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_3d5cca1aa5b69183, []int{0} +} +func (m *Snapshot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Snapshot.Merge(m, src) +} +func (m *Snapshot) XXX_Size() int { + return m.Size() +} +func (m *Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_Snapshot proto.InternalMessageInfo + +func (m *Snapshot) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Snapshot) GetFormat() uint32 { + if m != nil { + return m.Format + } + return 0 +} + +func (m *Snapshot) GetChunks() uint32 { + if m != nil { + return m.Chunks + } + return 0 +} + +func (m *Snapshot) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *Snapshot) GetMetadata() Metadata { + if m != nil { + return m.Metadata + } + return Metadata{} +} + +// Metadata contains SDK-specific snapshot metadata. +type Metadata struct { + ChunkHashes [][]byte `protobuf:"bytes,1,rep,name=chunk_hashes,json=chunkHashes,proto3" json:"chunk_hashes,omitempty"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_3d5cca1aa5b69183, []int{1} +} +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) +} +func (m *Metadata) XXX_Size() int { + return m.Size() +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func (m *Metadata) GetChunkHashes() [][]byte { + if m != nil { + return m.ChunkHashes + } + return nil +} + +// SnapshotItem is an item contained in a rootmulti.Store snapshot. +// +// Since: cosmos-sdk 0.46 +type SnapshotItem struct { + // item is the specific type of snapshot item. + // + // Types that are valid to be assigned to Item: + // + // *SnapshotItem_Store + // *SnapshotItem_IAVL + // *SnapshotItem_Extension + // *SnapshotItem_ExtensionPayload + Item isSnapshotItem_Item `protobuf_oneof:"item"` +} + +func (m *SnapshotItem) Reset() { *m = SnapshotItem{} } +func (m *SnapshotItem) String() string { return proto.CompactTextString(m) } +func (*SnapshotItem) ProtoMessage() {} +func (*SnapshotItem) Descriptor() ([]byte, []int) { + return fileDescriptor_3d5cca1aa5b69183, []int{2} +} +func (m *SnapshotItem) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SnapshotItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SnapshotItem.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SnapshotItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapshotItem.Merge(m, src) +} +func (m *SnapshotItem) XXX_Size() int { + return m.Size() +} +func (m *SnapshotItem) XXX_DiscardUnknown() { + xxx_messageInfo_SnapshotItem.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapshotItem proto.InternalMessageInfo + +type isSnapshotItem_Item interface { + isSnapshotItem_Item() + MarshalTo([]byte) (int, error) + Size() int +} + +type SnapshotItem_Store struct { + Store *SnapshotStoreItem `protobuf:"bytes,1,opt,name=store,proto3,oneof" json:"store,omitempty"` +} +type SnapshotItem_IAVL struct { + IAVL *SnapshotIAVLItem `protobuf:"bytes,2,opt,name=iavl,proto3,oneof" json:"iavl,omitempty"` +} +type SnapshotItem_Extension struct { + Extension *SnapshotExtensionMeta `protobuf:"bytes,3,opt,name=extension,proto3,oneof" json:"extension,omitempty"` +} +type SnapshotItem_ExtensionPayload struct { + ExtensionPayload *SnapshotExtensionPayload `protobuf:"bytes,4,opt,name=extension_payload,json=extensionPayload,proto3,oneof" json:"extension_payload,omitempty"` +} + +func (*SnapshotItem_Store) isSnapshotItem_Item() {} +func (*SnapshotItem_IAVL) isSnapshotItem_Item() {} +func (*SnapshotItem_Extension) isSnapshotItem_Item() {} +func (*SnapshotItem_ExtensionPayload) isSnapshotItem_Item() {} + +func (m *SnapshotItem) GetItem() isSnapshotItem_Item { + if m != nil { + return m.Item + } + return nil +} + +func (m *SnapshotItem) GetStore() *SnapshotStoreItem { + if x, ok := m.GetItem().(*SnapshotItem_Store); ok { + return x.Store + } + return nil +} + +func (m *SnapshotItem) GetIAVL() *SnapshotIAVLItem { + if x, ok := m.GetItem().(*SnapshotItem_IAVL); ok { + return x.IAVL + } + return nil +} + +func (m *SnapshotItem) GetExtension() *SnapshotExtensionMeta { + if x, ok := m.GetItem().(*SnapshotItem_Extension); ok { + return x.Extension + } + return nil +} + +func (m *SnapshotItem) GetExtensionPayload() *SnapshotExtensionPayload { + if x, ok := m.GetItem().(*SnapshotItem_ExtensionPayload); ok { + return x.ExtensionPayload + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*SnapshotItem) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*SnapshotItem_Store)(nil), + (*SnapshotItem_IAVL)(nil), + (*SnapshotItem_Extension)(nil), + (*SnapshotItem_ExtensionPayload)(nil), + } +} + +// SnapshotStoreItem contains metadata about a snapshotted store. +// +// Since: cosmos-sdk 0.46 +type SnapshotStoreItem struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *SnapshotStoreItem) Reset() { *m = SnapshotStoreItem{} } +func (m *SnapshotStoreItem) String() string { return proto.CompactTextString(m) } +func (*SnapshotStoreItem) ProtoMessage() {} +func (*SnapshotStoreItem) Descriptor() ([]byte, []int) { + return fileDescriptor_3d5cca1aa5b69183, []int{3} +} +func (m *SnapshotStoreItem) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SnapshotStoreItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SnapshotStoreItem.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SnapshotStoreItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapshotStoreItem.Merge(m, src) +} +func (m *SnapshotStoreItem) XXX_Size() int { + return m.Size() +} +func (m *SnapshotStoreItem) XXX_DiscardUnknown() { + xxx_messageInfo_SnapshotStoreItem.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapshotStoreItem proto.InternalMessageInfo + +func (m *SnapshotStoreItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// SnapshotIAVLItem is an exported IAVL node. +// +// Since: cosmos-sdk 0.46 +type SnapshotIAVLItem struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // version is block height + Version int64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` + // height is depth of the tree. + Height int32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *SnapshotIAVLItem) Reset() { *m = SnapshotIAVLItem{} } +func (m *SnapshotIAVLItem) String() string { return proto.CompactTextString(m) } +func (*SnapshotIAVLItem) ProtoMessage() {} +func (*SnapshotIAVLItem) Descriptor() ([]byte, []int) { + return fileDescriptor_3d5cca1aa5b69183, []int{4} +} +func (m *SnapshotIAVLItem) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SnapshotIAVLItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SnapshotIAVLItem.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SnapshotIAVLItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapshotIAVLItem.Merge(m, src) +} +func (m *SnapshotIAVLItem) XXX_Size() int { + return m.Size() +} +func (m *SnapshotIAVLItem) XXX_DiscardUnknown() { + xxx_messageInfo_SnapshotIAVLItem.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapshotIAVLItem proto.InternalMessageInfo + +func (m *SnapshotIAVLItem) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *SnapshotIAVLItem) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *SnapshotIAVLItem) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *SnapshotIAVLItem) GetHeight() int32 { + if m != nil { + return m.Height + } + return 0 +} + +// SnapshotExtensionMeta contains metadata about an external snapshotter. +// +// Since: cosmos-sdk 0.46 +type SnapshotExtensionMeta struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` +} + +func (m *SnapshotExtensionMeta) Reset() { *m = SnapshotExtensionMeta{} } +func (m *SnapshotExtensionMeta) String() string { return proto.CompactTextString(m) } +func (*SnapshotExtensionMeta) ProtoMessage() {} +func (*SnapshotExtensionMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_3d5cca1aa5b69183, []int{5} +} +func (m *SnapshotExtensionMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SnapshotExtensionMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SnapshotExtensionMeta.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SnapshotExtensionMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapshotExtensionMeta.Merge(m, src) +} +func (m *SnapshotExtensionMeta) XXX_Size() int { + return m.Size() +} +func (m *SnapshotExtensionMeta) XXX_DiscardUnknown() { + xxx_messageInfo_SnapshotExtensionMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapshotExtensionMeta proto.InternalMessageInfo + +func (m *SnapshotExtensionMeta) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *SnapshotExtensionMeta) GetFormat() uint32 { + if m != nil { + return m.Format + } + return 0 +} + +// SnapshotExtensionPayload contains payloads of an external snapshotter. +// +// Since: cosmos-sdk 0.46 +type SnapshotExtensionPayload struct { + Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (m *SnapshotExtensionPayload) Reset() { *m = SnapshotExtensionPayload{} } +func (m *SnapshotExtensionPayload) String() string { return proto.CompactTextString(m) } +func (*SnapshotExtensionPayload) ProtoMessage() {} +func (*SnapshotExtensionPayload) Descriptor() ([]byte, []int) { + return fileDescriptor_3d5cca1aa5b69183, []int{6} +} +func (m *SnapshotExtensionPayload) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SnapshotExtensionPayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SnapshotExtensionPayload.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SnapshotExtensionPayload) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapshotExtensionPayload.Merge(m, src) +} +func (m *SnapshotExtensionPayload) XXX_Size() int { + return m.Size() +} +func (m *SnapshotExtensionPayload) XXX_DiscardUnknown() { + xxx_messageInfo_SnapshotExtensionPayload.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapshotExtensionPayload proto.InternalMessageInfo + +func (m *SnapshotExtensionPayload) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +func init() { + proto.RegisterType((*Snapshot)(nil), "cosmos.store.snapshots.v1.Snapshot") + proto.RegisterType((*Metadata)(nil), "cosmos.store.snapshots.v1.Metadata") + proto.RegisterType((*SnapshotItem)(nil), "cosmos.store.snapshots.v1.SnapshotItem") + proto.RegisterType((*SnapshotStoreItem)(nil), "cosmos.store.snapshots.v1.SnapshotStoreItem") + proto.RegisterType((*SnapshotIAVLItem)(nil), "cosmos.store.snapshots.v1.SnapshotIAVLItem") + proto.RegisterType((*SnapshotExtensionMeta)(nil), "cosmos.store.snapshots.v1.SnapshotExtensionMeta") + proto.RegisterType((*SnapshotExtensionPayload)(nil), "cosmos.store.snapshots.v1.SnapshotExtensionPayload") +} + +func init() { + proto.RegisterFile("cosmos/store/snapshots/v1/snapshot.proto", fileDescriptor_3d5cca1aa5b69183) +} + +var fileDescriptor_3d5cca1aa5b69183 = []byte{ + // 496 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x41, 0x6f, 0xd3, 0x30, + 0x14, 0x8e, 0xd7, 0xb4, 0x74, 0x2f, 0x41, 0xea, 0xac, 0x81, 0x02, 0x87, 0x2c, 0x84, 0x03, 0x91, + 0x80, 0x94, 0x65, 0x1c, 0xb9, 0x50, 0x98, 0x94, 0x09, 0x90, 0x26, 0x4f, 0xe2, 0xc0, 0x65, 0xf2, + 0x56, 0xd3, 0x44, 0x6d, 0xe2, 0xaa, 0xf6, 0x22, 0xfa, 0x2f, 0xf8, 0x23, 0xfc, 0x8f, 0x1d, 0x77, + 0xe4, 0x34, 0x50, 0xfb, 0x47, 0x90, 0xed, 0x26, 0xa0, 0x6d, 0x45, 0xdb, 0xed, 0x7d, 0x2f, 0xdf, + 0xf7, 0xf9, 0xf9, 0xcb, 0x33, 0x44, 0xa7, 0x5c, 0x14, 0x5c, 0xf4, 0x85, 0xe4, 0x33, 0xd6, 0x17, + 0x25, 0x9d, 0x8a, 0x8c, 0x4b, 0xd1, 0xaf, 0x76, 0x1b, 0x10, 0x4f, 0x67, 0x5c, 0x72, 0xfc, 0xc8, + 0x30, 0x63, 0xcd, 0x8c, 0x1b, 0x66, 0x5c, 0xed, 0x3e, 0xde, 0x1e, 0xf1, 0x11, 0xd7, 0xac, 0xbe, + 0xaa, 0x8c, 0x20, 0xfc, 0x81, 0xa0, 0x7b, 0xb4, 0xa2, 0xe1, 0x87, 0xd0, 0xc9, 0x58, 0x3e, 0xca, + 0xa4, 0x87, 0x02, 0x14, 0xd9, 0x64, 0x85, 0x54, 0xff, 0x2b, 0x9f, 0x15, 0x54, 0x7a, 0x1b, 0x01, + 0x8a, 0xee, 0x93, 0x15, 0x52, 0xfd, 0xd3, 0xec, 0xac, 0x1c, 0x0b, 0xaf, 0x65, 0xfa, 0x06, 0x61, + 0x0c, 0x76, 0x46, 0x45, 0xe6, 0xd9, 0x01, 0x8a, 0x5c, 0xa2, 0x6b, 0xbc, 0x0f, 0xdd, 0x82, 0x49, + 0x3a, 0xa4, 0x92, 0x7a, 0xed, 0x00, 0x45, 0x4e, 0xf2, 0x34, 0x5e, 0x3b, 0x6c, 0xfc, 0x69, 0x45, + 0x1d, 0xd8, 0xe7, 0x97, 0x3b, 0x16, 0x69, 0xa4, 0xe1, 0x4b, 0xe8, 0xd6, 0xdf, 0xf0, 0x13, 0x70, + 0xf5, 0x81, 0xc7, 0xea, 0x00, 0x26, 0x3c, 0x14, 0xb4, 0x22, 0x97, 0x38, 0xba, 0x97, 0xea, 0x56, + 0xf8, 0x6b, 0x03, 0xdc, 0xfa, 0x7a, 0x07, 0x92, 0x15, 0xf8, 0x3d, 0xb4, 0xf5, 0x71, 0xfa, 0x86, + 0x4e, 0xf2, 0xe2, 0x3f, 0x33, 0xd4, 0xba, 0x23, 0xf5, 0x49, 0x89, 0x53, 0x8b, 0x18, 0x31, 0xfe, + 0x00, 0x76, 0x4e, 0xab, 0x89, 0x8e, 0xc3, 0x49, 0x9e, 0xdf, 0xc2, 0xe4, 0xe0, 0xed, 0xe7, 0x8f, + 0xca, 0x63, 0xd0, 0x5d, 0x5c, 0xee, 0xd8, 0x0a, 0xa5, 0x16, 0xd1, 0x26, 0xf8, 0x10, 0x36, 0xd9, + 0x37, 0xc9, 0x4a, 0x91, 0xf3, 0x52, 0x07, 0xe9, 0x24, 0xaf, 0x6e, 0xe1, 0xb8, 0x5f, 0x6b, 0x54, + 0x1e, 0xa9, 0x45, 0xfe, 0x9a, 0xe0, 0x13, 0xd8, 0x6a, 0xc0, 0xf1, 0x94, 0xce, 0x27, 0x9c, 0x0e, + 0xf5, 0xcf, 0x70, 0x92, 0xbd, 0xbb, 0x38, 0x1f, 0x1a, 0x69, 0x6a, 0x91, 0x1e, 0xbb, 0xd2, 0x1b, + 0x74, 0xc0, 0xce, 0x25, 0x2b, 0xc2, 0x67, 0xb0, 0x75, 0x2d, 0x28, 0xb5, 0x00, 0x25, 0x2d, 0x4c, + 0xc8, 0x9b, 0x44, 0xd7, 0xe1, 0x04, 0x7a, 0x57, 0xc3, 0xc0, 0x3d, 0x68, 0x8d, 0xd9, 0x5c, 0xd3, + 0x5c, 0xa2, 0x4a, 0xbc, 0x0d, 0xed, 0x8a, 0x4e, 0xce, 0x98, 0x8e, 0xd6, 0x25, 0x06, 0x60, 0x0f, + 0xee, 0x55, 0x6c, 0xd6, 0x04, 0xd4, 0x22, 0x35, 0xfc, 0x67, 0x65, 0xd5, 0xfd, 0xda, 0xf5, 0xca, + 0x86, 0xef, 0xe0, 0xc1, 0x8d, 0x41, 0xdd, 0x34, 0xda, 0xba, 0xfd, 0x0e, 0x5f, 0x83, 0xb7, 0x2e, + 0x13, 0x35, 0x52, 0x9d, 0xac, 0x19, 0xbf, 0x86, 0x83, 0x37, 0xe7, 0x0b, 0x1f, 0x5d, 0x2c, 0x7c, + 0xf4, 0x7b, 0xe1, 0xa3, 0xef, 0x4b, 0xdf, 0xba, 0x58, 0xfa, 0xd6, 0xcf, 0xa5, 0x6f, 0x7d, 0x09, + 0x4d, 0xf6, 0x62, 0x38, 0x8e, 0x73, 0x7e, 0xed, 0x35, 0xcb, 0xf9, 0x94, 0x89, 0x93, 0x8e, 0x7e, + 0x97, 0x7b, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xef, 0xe9, 0x8e, 0x10, 0xf4, 0x03, 0x00, 0x00, +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSnapshot(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x22 + } + if m.Chunks != 0 { + i = encodeVarintSnapshot(dAtA, i, uint64(m.Chunks)) + i-- + dAtA[i] = 0x18 + } + if m.Format != 0 { + i = encodeVarintSnapshot(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintSnapshot(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Metadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChunkHashes) > 0 { + for iNdEx := len(m.ChunkHashes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ChunkHashes[iNdEx]) + copy(dAtA[i:], m.ChunkHashes[iNdEx]) + i = encodeVarintSnapshot(dAtA, i, uint64(len(m.ChunkHashes[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SnapshotItem) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotItem) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SnapshotItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Item != nil { + { + size := m.Item.Size() + i -= size + if _, err := m.Item.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *SnapshotItem_Store) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SnapshotItem_Store) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Store != nil { + { + size, err := m.Store.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSnapshot(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *SnapshotItem_IAVL) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SnapshotItem_IAVL) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.IAVL != nil { + { + size, err := m.IAVL.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSnapshot(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *SnapshotItem_Extension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SnapshotItem_Extension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Extension != nil { + { + size, err := m.Extension.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSnapshot(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *SnapshotItem_ExtensionPayload) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SnapshotItem_ExtensionPayload) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExtensionPayload != nil { + { + size, err := m.ExtensionPayload.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSnapshot(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *SnapshotStoreItem) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotStoreItem) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SnapshotStoreItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SnapshotIAVLItem) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotIAVLItem) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SnapshotIAVLItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintSnapshot(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x20 + } + if m.Version != 0 { + i = encodeVarintSnapshot(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x18 + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SnapshotExtensionMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotExtensionMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SnapshotExtensionMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Format != 0 { + i = encodeVarintSnapshot(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SnapshotExtensionPayload) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotExtensionPayload) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SnapshotExtensionPayload) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int { + offset -= sovSnapshot(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Snapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovSnapshot(uint64(m.Height)) + } + if m.Format != 0 { + n += 1 + sovSnapshot(uint64(m.Format)) + } + if m.Chunks != 0 { + n += 1 + sovSnapshot(uint64(m.Chunks)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovSnapshot(uint64(l)) + } + l = m.Metadata.Size() + n += 1 + l + sovSnapshot(uint64(l)) + return n +} + +func (m *Metadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ChunkHashes) > 0 { + for _, b := range m.ChunkHashes { + l = len(b) + n += 1 + l + sovSnapshot(uint64(l)) + } + } + return n +} + +func (m *SnapshotItem) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Item != nil { + n += m.Item.Size() + } + return n +} + +func (m *SnapshotItem_Store) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Store != nil { + l = m.Store.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + return n +} +func (m *SnapshotItem_IAVL) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IAVL != nil { + l = m.IAVL.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + return n +} +func (m *SnapshotItem_Extension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Extension != nil { + l = m.Extension.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + return n +} +func (m *SnapshotItem_ExtensionPayload) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExtensionPayload != nil { + l = m.ExtensionPayload.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + return n +} +func (m *SnapshotStoreItem) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSnapshot(uint64(l)) + } + return n +} + +func (m *SnapshotIAVLItem) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovSnapshot(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovSnapshot(uint64(l)) + } + if m.Version != 0 { + n += 1 + sovSnapshot(uint64(m.Version)) + } + if m.Height != 0 { + n += 1 + sovSnapshot(uint64(m.Height)) + } + return n +} + +func (m *SnapshotExtensionMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSnapshot(uint64(l)) + } + if m.Format != 0 { + n += 1 + sovSnapshot(uint64(m.Format)) + } + return n +} + +func (m *SnapshotExtensionPayload) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovSnapshot(uint64(l)) + } + return n +} + +func sovSnapshot(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSnapshot(x uint64) (n int) { + return sovSnapshot(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + m.Format = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Format |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + m.Chunks = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Chunks |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSnapshot + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSnapshot + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkHashes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSnapshot + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChunkHashes = append(m.ChunkHashes, make([]byte, postIndex-iNdEx)) + copy(m.ChunkHashes[len(m.ChunkHashes)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotItem) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotItem: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotItem: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Store", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSnapshot + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SnapshotStoreItem{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Item = &SnapshotItem_Store{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IAVL", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSnapshot + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SnapshotIAVLItem{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Item = &SnapshotItem_IAVL{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSnapshot + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SnapshotExtensionMeta{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Item = &SnapshotItem_Extension{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtensionPayload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSnapshot + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SnapshotExtensionPayload{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Item = &SnapshotItem_ExtensionPayload{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotStoreItem) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotStoreItem: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotStoreItem: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSnapshot + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotIAVLItem) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotIAVLItem: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotIAVLItem: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSnapshot + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSnapshot + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotExtensionMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotExtensionMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotExtensionMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSnapshot + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + m.Format = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Format |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotExtensionPayload) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotExtensionPayload: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotExtensionPayload: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSnapshot + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...) + if m.Payload == nil { + m.Payload = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSnapshot(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSnapshot + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSnapshot + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSnapshot + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSnapshot = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSnapshot = fmt.Errorf("proto: unexpected end of group") +) diff --git a/cosmos-sdk-store/snapshots/types/snapshotter.go b/cosmos-sdk-store/snapshots/types/snapshotter.go new file mode 100755 index 000000000..de9fcfe3d --- /dev/null +++ b/cosmos-sdk-store/snapshots/types/snapshotter.go @@ -0,0 +1,56 @@ +package types + +import ( + protoio "github.com/cosmos/gogoproto/io" +) + +// Snapshotter is something that can create and restore snapshots, consisting of streamed binary +// chunks - all of which must be read from the channel and closed. If an unsupported format is +// given, it must return ErrUnknownFormat (possibly wrapped with fmt.Errorf). +type Snapshotter interface { + // Snapshot writes snapshot items into the protobuf writer. + Snapshot(height uint64, protoWriter protoio.Writer) error + + // PruneSnapshotHeight prunes the given height according to the prune strategy. + // If PruneNothing, this is a no-op. + // If other strategy, this height is persisted until it is + // less than - KeepRecent and % Interval == 0 + PruneSnapshotHeight(height int64) + + // SetSnapshotInterval sets the interval at which the snapshots are taken. + // It is used by the store that implements the Snapshotter interface + // to determine which heights to retain until after the snapshot is complete. + SetSnapshotInterval(snapshotInterval uint64) + + // Restore restores a state snapshot, taking the reader of protobuf message stream as input. + Restore(height uint64, format uint32, protoReader protoio.Reader) (SnapshotItem, error) +} + +// ExtensionPayloadReader read extension payloads, +// it returns io.EOF when reached either end of stream or the extension boundaries. +type ExtensionPayloadReader = func() ([]byte, error) + +// ExtensionPayloadWriter is a helper to write extension payloads to underlying stream. +type ExtensionPayloadWriter = func([]byte) error + +// ExtensionSnapshotter is an extension Snapshotter that is appended to the snapshot stream. +// ExtensionSnapshotter has an unique name and manages it's own internal formats. +type ExtensionSnapshotter interface { + // SnapshotName returns the name of snapshotter, it should be unique in the manager. + SnapshotName() string + + // SnapshotFormat returns the default format the extension snapshotter use to encode the + // payloads when taking a snapshot. + // It's defined within the extension, different from the global format for the whole state-sync snapshot. + SnapshotFormat() uint32 + + // SupportedFormats returns a list of formats it can restore from. + SupportedFormats() []uint32 + + // SnapshotExtension writes extension payloads into the underlying protobuf stream. + SnapshotExtension(height uint64, payloadWriter ExtensionPayloadWriter) error + + // RestoreExtension restores an extension state snapshot, + // the payload reader returns `io.EOF` when reached the extension boundaries. + RestoreExtension(height uint64, format uint32, payloadReader ExtensionPayloadReader) error +} diff --git a/cosmos-sdk-store/snapshots/types/util.go b/cosmos-sdk-store/snapshots/types/util.go new file mode 100755 index 000000000..861647088 --- /dev/null +++ b/cosmos-sdk-store/snapshots/types/util.go @@ -0,0 +1,16 @@ +package types + +import ( + protoio "github.com/cosmos/gogoproto/io" +) + +// WriteExtensionPayload writes an extension payload for current extension snapshotter. +func WriteExtensionPayload(protoWriter protoio.Writer, payload []byte) error { + return protoWriter.WriteMsg(&SnapshotItem{ + Item: &SnapshotItem_ExtensionPayload{ + ExtensionPayload: &SnapshotExtensionPayload{ + Payload: payload, + }, + }, + }) +} diff --git a/cosmos-sdk-store/sonar-project.properties b/cosmos-sdk-store/sonar-project.properties new file mode 100755 index 000000000..666c3c9c0 --- /dev/null +++ b/cosmos-sdk-store/sonar-project.properties @@ -0,0 +1,14 @@ +sonar.projectKey=cosmos-sdk-store +sonar.organization=cosmos + +sonar.projectName=Cosmos SDK - Store +sonar.project.monorepo.enabled=true + +sonar.sources=. +sonar.exclusions=**/*_test.go +sonar.tests=. +sonar.test.inclusions=**/*_test.go +sonar.go.coverage.reportPaths=coverage.out + +sonar.sourceEncoding=UTF-8 +sonar.scm.provider=git \ No newline at end of file diff --git a/cosmos-sdk-store/store.go b/cosmos-sdk-store/store.go new file mode 100755 index 000000000..e67457546 --- /dev/null +++ b/cosmos-sdk-store/store.go @@ -0,0 +1,19 @@ +package store + +import ( + dbm "github.com/cosmos/cosmos-db" + + "cosmossdk.io/log" + "cosmossdk.io/store/cache" + "cosmossdk.io/store/metrics" + "cosmossdk.io/store/rootmulti" + "cosmossdk.io/store/types" +) + +func NewCommitMultiStore(db dbm.DB, logger log.Logger, metricGatherer metrics.StoreMetrics) types.CommitMultiStore { + return rootmulti.NewStore(db, logger, metricGatherer) +} + +func NewCommitKVStoreCacheManager() types.MultiStorePersistentCache { + return cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) +} diff --git a/cosmos-sdk-store/streaming/README.md b/cosmos-sdk-store/streaming/README.md new file mode 100755 index 000000000..faa304dec --- /dev/null +++ b/cosmos-sdk-store/streaming/README.md @@ -0,0 +1,30 @@ +# Cosmos-SDK Plugins + +This package contains an extensible plugin system for the Cosmos-SDK. The plugin system leverages the [hashicorp/go-plugin](https://github.com/hashicorp/go-plugin) system. This system is designed to work over RPC. + +Although the `go-plugin` is built to work over RPC, it is currently only designed to work over a local network. + +## Pre requisites + +For an overview of supported features by the `go-plugin` system, please see https://github.com/hashicorp/go-plugin. The `go-plugin` documentation is located [here](https://github.com/hashicorp/go-plugin/tree/master/docs). You can also directly visit any of the links below: + +* [Writing plugins without Go](https://github.com/hashicorp/go-plugin/blob/master/docs/guide-plugin-write-non-go.md) +* [Go Plugin Tutorial](https://github.com/hashicorp/go-plugin/blob/master/docs/extensive-go-plugin-tutorial.md) +* [Plugin Internals](https://github.com/hashicorp/go-plugin/blob/master/docs/internals.md) +* [Plugin Architecture](https://www.youtube.com/watch?v=SRvm3zQQc1Q) (start here) + +## Exposing plugins + +To expose plugins to the plugin system, you will need to: + +1. Implement the gRPC message protocol service of the plugin +2. Build the plugin binary +3. Export it + +Read the plugin documentation in the [Streaming Plugins](#streaming-plugins) section for examples on how to build a plugin. + +## Streaming Plugins + +List of support streaming plugins + +* [ABCI State Streaming Plugin](abci/README.md) diff --git a/cosmos-sdk-store/streaming/abci/README.md b/cosmos-sdk-store/streaming/abci/README.md new file mode 100755 index 000000000..08aaf12e8 --- /dev/null +++ b/cosmos-sdk-store/streaming/abci/README.md @@ -0,0 +1,210 @@ +# ABCI and State Streaming Plugin (gRPC) + +The `BaseApp` package contains the interface for a [ABCIListener](https://github.com/cosmos/cosmos-sdk/blob/main/baseapp/streaming.go) +service used to write state changes out from individual KVStores to external systems, +as described in [ADR-038](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-038-state-listening.md). + +Specific `ABCIListener` service implementations are written and loaded as [hashicorp/go-plugin](https://github.com/hashicorp/go-plugin). + +## Implementation + +In this section we describe the implementation of the `ABCIListener` interface as a gRPC service. + +### Service Protocol + +The companion service protocol for the `ABCIListener` interface is described below. +See [proto/cosmos/store/streaming/abci/grpc.proto](https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/store/streaming/abci/grpc.proto) for full details. + +```protobuf reference +https://github.com/cosmos/cosmos-sdk/blob/6cee22df52eb0cbb30e351fbb41f66d26c1f8300/proto/cosmos/store/streaming/abci/grpc.proto#L1-L36 +``` + +### Generating the Code + +To generate the stubs the local client implementation can call, run the following command: + +```shell +make proto-gen +``` + +For other languages you'll need to [download](https://github.com/cosmos/cosmos-sdk/blob/main/third_party/proto/README.md) +the CosmosSDK protos into your project and compile. For language specific compilation instructions visit +[https://github.com/grpc](https://github.com/grpc) and look in the `examples` folder of your +language of choice `https://github.com/grpc/grpc-{language}/tree/master/examples` and [https://grpc.io](https://grpc.io) +for the documentation. + +### gRPC Client and Server + +Implementing the ABCIListener gRPC client and server is a simple and straight forward process. + +To create the client and server we create a `ListenerGRPCPlugin` struct that implements the +`plugin.GRPCPlugin` interface and a `Impl` property that will contain a concrete implementation +of the `ABCIListener` plugin written in Go. + +#### The Interface + +The `BaseApp` `ABCIListener` interface will be what will define the plugins capabilities. + +Boilerplate RPC implementation example of the `ABCIListener` interface. ([store/streaming/abci/grpc.go](https://github.com/cosmos/cosmos-sdk/blob/main/store/streaming/abci/grpc.go)) + +```go reference +https://github.com/cosmos/cosmos-sdk/blob/f851e188b3b9d46e7c63fa514ad137e6d558fdd9/store/streaming/abci/grpc.go#L13-L79 +``` + +Our `ABCIlistener` service plugin. ([store/streaming/plugins/abci/v1/interface.go](interface.go)) + +```go reference +https://github.com/cosmos/cosmos-sdk/blob/f851e188b3b9d46e7c63fa514ad137e6d558fdd9/store/streaming/abci/interface.go#L13-L45 +``` + +#### Plugin Implementation + +Plugin implementations can be in a completely separate package but will need access +to the `ABCIListener` interface. One thing to note here is that plugin implementations +defined in the `ListenerGRPCPlugin.Impl` property are **only** required when building +plugins in Go. They are pre-compiled into Go modules. The `GRPCServer.Impl` calls methods +on this out-of-process plugin. + +For Go plugins this is all that is required to process data that is sent over gRPC. +This provides the advantage of writing quick plugins that process data to different +external systems (i.e: DB, File, DB, Kafka, etc.) without the need for implementing +the gRPC server endpoints. + +```go +// MyPlugin is the implementation of the ABCIListener interface +// For Go plugins this is all that is required to process data sent over gRPC. +type MyPlugin struct { + ... +} + +func (a FilePlugin) ListenFinalizeBlock(ctx context.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error { + // process data + return nil +} + +func (a FilePlugin) ListenCommit(ctx context.Context, res abci.ResponseCommit, changeSet []*store.StoreKVPair) error { + // process data + return nil +} + +func main() { + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: v1.Handshake, + Plugins: map[string]plugin.Plugin{ + "abci": &ABCIListenerGRPCPlugin{Impl: &MyPlugin{}}, + }, + + // A non-nil value here enables gRPC serving for this streaming... + GRPCServer: plugin.DefaultGRPCServer, + }) +} +``` + +## Plugin Loading System + +A general purpose plugin loading system has been provided by the SDK to be able to load not just +the `ABCIListener` service plugin but other protocol services as well. You can take a look +at how plugins are loaded by the SDK in [store/streaming/streaming.go](https://github.com/cosmos/cosmos-sdk/blob/main/store/streaming/streaming.go) + +You'll need to add this in your `app.go` + +```go +// app.go + +func NewApp(...) *App { + + ... + + // register streaming services + streamingCfg := cast.ToStringMap(appOpts.Get(baseapp.StreamingTomlKey)) + for service := range streamingCfg { + pluginKey := fmt.Sprintf("%s.%s.%s", baseapp.StreamingTomlKey, service, baseapp.StreamingABCIPluginTomlKey) + pluginName := strings.TrimSpace(cast.ToString(appOpts.Get(pluginKey))) + if len(pluginName) > 0 { + logLevel := cast.ToString(appOpts.Get(flags.FlagLogLevel)) + plugin, err := streaming.NewStreamingPlugin(pluginName, logLevel) + if err != nil { + tmos.Exit(err.Error()) + } + if err := baseapp.RegisterStreamingPlugin(bApp, appOpts, keys, plugin); err != nil { + tmos.Exit(err.Error()) + } + } + } + + ... +} +``` + +## Configuration + +Update the streaming section in `app.toml` + +```toml +# Streaming allows nodes to stream state to external systems +[streaming] + +# streaming.abci specifies the configuration for the ABCI Listener streaming service +[streaming.abci] + +# List of kv store keys to stream out via gRPC +# Set to ["*"] to expose all keys. +keys = ["*"] + +# The plugin name used for streaming via gRPC +# Supported plugins: abci +plugin = "abci" + +# stop-node-on-err specifies whether to stop the node when the +stop-node-on-err = true +``` + +## Updating the protocol + +If you update the protocol buffers file, you can regenerate the file and plugins using the +following commands from the project root directory. You do not need to run this if you're +just trying the examples, you can skip ahead to the [Testing](#testing) section. + +```shell +make proto-gen +``` + +* stdout plugin; from inside the `store/` dir, run: + +```shell +go build -o streaming/abci/examples/stdout/stdout streaming/abci/examples/stdout/stdout.go +``` + +* file plugin (writes to `~/`); from inside the `store/` dir, run: + +```shell +go build -o streaming/abci/examples/file/file streaming/abci/examples/file/file.go +``` + +### Testing + +Export a plugin from one of the Go or Python examples. + +* stdout plugin + +```shell +export COSMOS_SDK_ABCI="{path to}/cosmos-sdk/store/streaming/abci/examples/stdout/stdout" +``` + +* file plugin (writes to ~/) + +```shell +export COSMOS_SDK_ABCI="{path to}/cosmos-sdk/store/streaming/abci/examples/file/file" +``` + +where `{path to}` is the parent path to the `cosmos-sdk` repo on you system. + +Test: + +```shell +make test-sim-nondeterminism-streaming +``` + +The plugin system will look for the plugin binary in the `env` variable `COSMOS_SDK_{PLUGIN_NAME}` above +and if it does not find it, it will error out. The plugin UPPERCASE name is that of the +`streaming.abci.plugin` TOML configuration setting. diff --git a/cosmos-sdk-store/streaming/abci/examples/file/.gitignore b/cosmos-sdk-store/streaming/abci/examples/file/.gitignore new file mode 100755 index 000000000..bc8ff7906 --- /dev/null +++ b/cosmos-sdk-store/streaming/abci/examples/file/.gitignore @@ -0,0 +1,2 @@ +# ignore the file plugin binary +file \ No newline at end of file diff --git a/cosmos-sdk-store/streaming/abci/examples/file/README.md b/cosmos-sdk-store/streaming/abci/examples/file/README.md new file mode 100755 index 000000000..27e5f8956 --- /dev/null +++ b/cosmos-sdk-store/streaming/abci/examples/file/README.md @@ -0,0 +1,17 @@ +# File Plugin + +The file plugin is an example plugin written in Go. It is intended for local testing and should not be used in production environments. + +## Build + +To build the plugin run the following command: + +```shell +cd store +``` + +```shell +go build -o streaming/abci/examples/file/file streaming/abci/examples/file/file.go +``` + +* The plugin will write files to the users home directory `~/`. diff --git a/cosmos-sdk-store/streaming/abci/examples/file/file.go b/cosmos-sdk-store/streaming/abci/examples/file/file.go new file mode 100755 index 000000000..150b8cafc --- /dev/null +++ b/cosmos-sdk-store/streaming/abci/examples/file/file.go @@ -0,0 +1,81 @@ +package main + +import ( + "context" + "fmt" + "os" + "path/filepath" + + abci "github.com/cometbft/cometbft/abci/types" + "github.com/hashicorp/go-plugin" + + streamingabci "cosmossdk.io/store/streaming/abci" + store "cosmossdk.io/store/types" +) + +// FilePlugin is the implementation of the baseapp.ABCIListener interface +// For Go plugins this is all that is required to process data sent over gRPC. +type FilePlugin struct { + BlockHeight int64 +} + +func (a *FilePlugin) writeToFile(file string, data []byte) error { + home, err := os.UserHomeDir() + if err != nil { + return err + } + + filename := fmt.Sprintf("%s/%s.txt", home, file) + f, err := os.OpenFile(filepath.Clean(filename), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o600) + if err != nil { + return err + } + + if _, err := f.Write(data); err != nil { + f.Close() // ignore error; Write error takes precedence + return err + } + + if err := f.Close(); err != nil { + return err + } + + return nil +} + +func (a *FilePlugin) ListenFinalizeBlock(ctx context.Context, req abci.RequestFinalizeBlock, res abci.ResponseFinalizeBlock) error { + d1 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, req)) + d2 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, req)) + if err := a.writeToFile("finalize-block-req", d1); err != nil { + return err + } + if err := a.writeToFile("finalize-block-res", d2); err != nil { + return err + } + return nil +} + +func (a *FilePlugin) ListenCommit(ctx context.Context, res abci.ResponseCommit, changeSet []*store.StoreKVPair) error { + fmt.Printf("listen-commit: block_height=%d data=%v", res.RetainHeight, changeSet) + d1 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, res)) + d2 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, changeSet)) + if err := a.writeToFile("commit-res", d1); err != nil { + return err + } + if err := a.writeToFile("state-change", d2); err != nil { + return err + } + return nil +} + +func main() { + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: streamingabci.Handshake, + Plugins: map[string]plugin.Plugin{ + "abci": &streamingabci.ListenerGRPCPlugin{Impl: &FilePlugin{}}, + }, + + // A non-nil value here enables gRPC serving for this streaming... + GRPCServer: plugin.DefaultGRPCServer, + }) +} diff --git a/cosmos-sdk-store/streaming/abci/examples/stdout/stdout b/cosmos-sdk-store/streaming/abci/examples/stdout/stdout new file mode 100755 index 000000000..93f61a7b9 Binary files /dev/null and b/cosmos-sdk-store/streaming/abci/examples/stdout/stdout differ diff --git a/cosmos-sdk-store/streaming/abci/examples/stdout/stdout.go b/cosmos-sdk-store/streaming/abci/examples/stdout/stdout.go new file mode 100755 index 000000000..f1327a586 --- /dev/null +++ b/cosmos-sdk-store/streaming/abci/examples/stdout/stdout.go @@ -0,0 +1,43 @@ +package main + +import ( + "context" + "fmt" + + abci "github.com/cometbft/cometbft/abci/types" + "github.com/hashicorp/go-plugin" + + streamingabci "cosmossdk.io/store/streaming/abci" + store "cosmossdk.io/store/types" +) + +// StdoutPlugin is the implementation of the ABCIListener interface +// For Go plugins this is all that is required to process data sent over gRPC. +type StdoutPlugin struct { + BlockHeight int64 +} + +func (a *StdoutPlugin) ListenFinalizeBlock(ctx context.Context, req abci.RequestFinalizeBlock, res abci.ResponseFinalizeBlock) error { + a.BlockHeight = req.Height + // process tx messages (i.e: sent to external system) + fmt.Printf("listen-finalize-block: block-height=%d req=%v res=%v", a.BlockHeight, req, res) + return nil +} + +func (a *StdoutPlugin) ListenCommit(ctx context.Context, res abci.ResponseCommit, changeSet []*store.StoreKVPair) error { + // process block commit messages (i.e: sent to external system) + fmt.Printf("listen-commit: block_height=%d res=%v data=%v", a.BlockHeight, res, changeSet) + return nil +} + +func main() { + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: streamingabci.Handshake, + Plugins: map[string]plugin.Plugin{ + "abci": &streamingabci.ListenerGRPCPlugin{Impl: &StdoutPlugin{}}, + }, + + // A non-nil value here enables gRPC serving for this streaming... + GRPCServer: plugin.DefaultGRPCServer, + }) +} diff --git a/cosmos-sdk-store/streaming/abci/grpc.go b/cosmos-sdk-store/streaming/abci/grpc.go new file mode 100755 index 000000000..5984e8f95 --- /dev/null +++ b/cosmos-sdk-store/streaming/abci/grpc.go @@ -0,0 +1,79 @@ +package abci + +import ( + "context" + "os" + + abci "github.com/cometbft/cometbft/abci/types" + "github.com/hashicorp/go-plugin" + + storetypes "cosmossdk.io/store/types" +) + +var _ storetypes.ABCIListener = (*GRPCClient)(nil) + +// GRPCClient is an implementation of the ABCIListener interface that talks over RPC. +type GRPCClient struct { + client ABCIListenerServiceClient +} + +// ListenEndBlock listens to end block request and responses. +// In addition, it retrieves a types.Context from a context.Context instance. +// It panics if a types.Context was not properly attached. +// When the node is configured to stop on listening errors, +// it will terminate immediately and exit with a non-zero code. +func (m *GRPCClient) ListenFinalizeBlock(goCtx context.Context, req abci.RequestFinalizeBlock, res abci.ResponseFinalizeBlock) error { + ctx := goCtx.(storetypes.Context) + sm := ctx.StreamingManager() + request := &ListenFinalizeBlockRequest{Req: &req, Res: &res} + _, err := m.client.ListenFinalizeBlock(goCtx, request) + if err != nil && sm.StopNodeOnErr { + ctx.Logger().Error("FinalizeBlock listening hook failed", "height", ctx.BlockHeight(), "err", err) + cleanupAndExit() + } + return err +} + +// ListenCommit listens to commit responses and state changes for the current block. +// In addition, it retrieves a types.Context from a context.Context instance. +// It panics if a types.Context was not properly attached. +// When the node is configured to stop on listening errors, +// it will terminate immediately and exit with a non-zero code. +func (m *GRPCClient) ListenCommit(goCtx context.Context, res abci.ResponseCommit, changeSet []*storetypes.StoreKVPair) error { + ctx := goCtx.(storetypes.Context) + sm := ctx.StreamingManager() + request := &ListenCommitRequest{BlockHeight: ctx.BlockHeight(), Res: &res, ChangeSet: changeSet} + _, err := m.client.ListenCommit(goCtx, request) + if err != nil && sm.StopNodeOnErr { + ctx.Logger().Error("Commit listening hook failed", "height", ctx.BlockHeight(), "err", err) + cleanupAndExit() + } + return err +} + +func cleanupAndExit() { + plugin.CleanupClients() + os.Exit(1) +} + +var _ ABCIListenerServiceServer = (*GRPCServer)(nil) + +// GRPCServer is the gRPC server that GRPCClient talks to. +type GRPCServer struct { + // This is the real implementation + Impl storetypes.ABCIListener +} + +func (m GRPCServer) ListenFinalizeBlock(ctx context.Context, request *ListenFinalizeBlockRequest) (*ListenFinalizeBlockResponse, error) { + if err := m.Impl.ListenFinalizeBlock(ctx, *request.Req, *request.Res); err != nil { + return nil, err + } + return &ListenFinalizeBlockResponse{}, nil +} + +func (m GRPCServer) ListenCommit(ctx context.Context, request *ListenCommitRequest) (*ListenCommitResponse, error) { + if err := m.Impl.ListenCommit(ctx, *request.Res, request.ChangeSet); err != nil { + return nil, err + } + return &ListenCommitResponse{}, nil +} diff --git a/cosmos-sdk-store/streaming/abci/grpc.pb.go b/cosmos-sdk-store/streaming/abci/grpc.pb.go new file mode 100755 index 000000000..77ae842ad --- /dev/null +++ b/cosmos-sdk-store/streaming/abci/grpc.pb.go @@ -0,0 +1,1047 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cosmos/store/streaming/abci/grpc.proto + +package abci + +import ( + context "context" + types1 "cosmossdk.io/store/types" + fmt "fmt" + types "github.com/cometbft/cometbft/abci/types" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ListenEndBlockRequest is the request type for the ListenEndBlock RPC method +type ListenFinalizeBlockRequest struct { + Req *types.RequestFinalizeBlock `protobuf:"bytes,1,opt,name=req,proto3" json:"req,omitempty"` + Res *types.ResponseFinalizeBlock `protobuf:"bytes,2,opt,name=res,proto3" json:"res,omitempty"` +} + +func (m *ListenFinalizeBlockRequest) Reset() { *m = ListenFinalizeBlockRequest{} } +func (m *ListenFinalizeBlockRequest) String() string { return proto.CompactTextString(m) } +func (*ListenFinalizeBlockRequest) ProtoMessage() {} +func (*ListenFinalizeBlockRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b98083eb9315fb6, []int{0} +} +func (m *ListenFinalizeBlockRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListenFinalizeBlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListenFinalizeBlockRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListenFinalizeBlockRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListenFinalizeBlockRequest.Merge(m, src) +} +func (m *ListenFinalizeBlockRequest) XXX_Size() int { + return m.Size() +} +func (m *ListenFinalizeBlockRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListenFinalizeBlockRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListenFinalizeBlockRequest proto.InternalMessageInfo + +func (m *ListenFinalizeBlockRequest) GetReq() *types.RequestFinalizeBlock { + if m != nil { + return m.Req + } + return nil +} + +func (m *ListenFinalizeBlockRequest) GetRes() *types.ResponseFinalizeBlock { + if m != nil { + return m.Res + } + return nil +} + +// ListenEndBlockResponse is the response type for the ListenEndBlock RPC method +type ListenFinalizeBlockResponse struct { +} + +func (m *ListenFinalizeBlockResponse) Reset() { *m = ListenFinalizeBlockResponse{} } +func (m *ListenFinalizeBlockResponse) String() string { return proto.CompactTextString(m) } +func (*ListenFinalizeBlockResponse) ProtoMessage() {} +func (*ListenFinalizeBlockResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b98083eb9315fb6, []int{1} +} +func (m *ListenFinalizeBlockResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListenFinalizeBlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListenFinalizeBlockResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListenFinalizeBlockResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListenFinalizeBlockResponse.Merge(m, src) +} +func (m *ListenFinalizeBlockResponse) XXX_Size() int { + return m.Size() +} +func (m *ListenFinalizeBlockResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListenFinalizeBlockResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListenFinalizeBlockResponse proto.InternalMessageInfo + +// ListenCommitRequest is the request type for the ListenCommit RPC method +type ListenCommitRequest struct { + // explicitly pass in block height as ResponseCommit does not contain this info + BlockHeight int64 `protobuf:"varint,1,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` + Res *types.ResponseCommit `protobuf:"bytes,2,opt,name=res,proto3" json:"res,omitempty"` + ChangeSet []*types1.StoreKVPair `protobuf:"bytes,3,rep,name=change_set,json=changeSet,proto3" json:"change_set,omitempty"` +} + +func (m *ListenCommitRequest) Reset() { *m = ListenCommitRequest{} } +func (m *ListenCommitRequest) String() string { return proto.CompactTextString(m) } +func (*ListenCommitRequest) ProtoMessage() {} +func (*ListenCommitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b98083eb9315fb6, []int{2} +} +func (m *ListenCommitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListenCommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListenCommitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListenCommitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListenCommitRequest.Merge(m, src) +} +func (m *ListenCommitRequest) XXX_Size() int { + return m.Size() +} +func (m *ListenCommitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListenCommitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListenCommitRequest proto.InternalMessageInfo + +func (m *ListenCommitRequest) GetBlockHeight() int64 { + if m != nil { + return m.BlockHeight + } + return 0 +} + +func (m *ListenCommitRequest) GetRes() *types.ResponseCommit { + if m != nil { + return m.Res + } + return nil +} + +func (m *ListenCommitRequest) GetChangeSet() []*types1.StoreKVPair { + if m != nil { + return m.ChangeSet + } + return nil +} + +// ListenCommitResponse is the response type for the ListenCommit RPC method +type ListenCommitResponse struct { +} + +func (m *ListenCommitResponse) Reset() { *m = ListenCommitResponse{} } +func (m *ListenCommitResponse) String() string { return proto.CompactTextString(m) } +func (*ListenCommitResponse) ProtoMessage() {} +func (*ListenCommitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b98083eb9315fb6, []int{3} +} +func (m *ListenCommitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListenCommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListenCommitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListenCommitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListenCommitResponse.Merge(m, src) +} +func (m *ListenCommitResponse) XXX_Size() int { + return m.Size() +} +func (m *ListenCommitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListenCommitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListenCommitResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ListenFinalizeBlockRequest)(nil), "cosmos.store.streaming.abci.ListenFinalizeBlockRequest") + proto.RegisterType((*ListenFinalizeBlockResponse)(nil), "cosmos.store.streaming.abci.ListenFinalizeBlockResponse") + proto.RegisterType((*ListenCommitRequest)(nil), "cosmos.store.streaming.abci.ListenCommitRequest") + proto.RegisterType((*ListenCommitResponse)(nil), "cosmos.store.streaming.abci.ListenCommitResponse") +} + +func init() { + proto.RegisterFile("cosmos/store/streaming/abci/grpc.proto", fileDescriptor_7b98083eb9315fb6) +} + +var fileDescriptor_7b98083eb9315fb6 = []byte{ + // 409 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0x31, 0x6f, 0xda, 0x40, + 0x14, 0xc7, 0x31, 0x96, 0x2a, 0xf5, 0x60, 0x3a, 0xaa, 0x0a, 0x19, 0xd5, 0x05, 0xab, 0x45, 0x4c, + 0xe7, 0x9a, 0x0e, 0x20, 0x75, 0x69, 0x41, 0xaa, 0x5a, 0xb5, 0x43, 0x05, 0x52, 0x87, 0x2c, 0xc8, + 0x36, 0x4f, 0xe6, 0x04, 0xf6, 0x99, 0xbb, 0x0b, 0x52, 0xf2, 0x09, 0xb2, 0x25, 0x4b, 0x3e, 0x46, + 0xbe, 0x47, 0x46, 0xc6, 0x8c, 0x11, 0x7c, 0x91, 0xc8, 0x77, 0x84, 0x60, 0x05, 0xa2, 0x30, 0xf2, + 0xee, 0xff, 0x7b, 0xef, 0x77, 0xbc, 0x33, 0x6a, 0x86, 0x4c, 0xc4, 0x4c, 0xb8, 0x42, 0x32, 0x0e, + 0xae, 0x90, 0x1c, 0xfc, 0x98, 0x26, 0x91, 0xeb, 0x07, 0x21, 0x75, 0x23, 0x9e, 0x86, 0x24, 0xe5, + 0x4c, 0x32, 0x5c, 0xd3, 0x39, 0xa2, 0x72, 0x64, 0x9b, 0x23, 0x59, 0xce, 0xaa, 0x49, 0x48, 0xc6, + 0xc0, 0x63, 0x9a, 0x48, 0x0d, 0xca, 0xb3, 0x14, 0x84, 0x26, 0xad, 0x4f, 0xb9, 0x09, 0x0b, 0x2f, + 0x00, 0xe9, 0x7b, 0xee, 0x8c, 0x0a, 0x09, 0x49, 0xd6, 0x41, 0xa5, 0x9c, 0x4b, 0x03, 0x59, 0x7f, + 0x55, 0xed, 0x27, 0x4d, 0xfc, 0x19, 0x3d, 0x87, 0xde, 0x8c, 0x85, 0xd3, 0x01, 0xcc, 0x4f, 0x41, + 0x48, 0xdc, 0x41, 0x26, 0x87, 0x79, 0xd5, 0xa8, 0x1b, 0xad, 0x52, 0xfb, 0x33, 0x79, 0x9a, 0xa7, + 0x04, 0xc8, 0x26, 0x96, 0x47, 0x33, 0x02, 0x77, 0x33, 0x50, 0x54, 0x8b, 0x0a, 0x6c, 0xee, 0x01, + 0x45, 0xca, 0x12, 0x01, 0xcf, 0x48, 0xe1, 0x7c, 0x40, 0xb5, 0xbd, 0x42, 0x1a, 0x70, 0x6e, 0x0c, + 0x54, 0xd1, 0xe7, 0x7d, 0x16, 0xc7, 0x54, 0x3e, 0x9a, 0x36, 0x50, 0x39, 0xc8, 0x82, 0xa3, 0x09, + 0xd0, 0x68, 0x22, 0x95, 0xb2, 0x39, 0x28, 0xa9, 0xda, 0x2f, 0x55, 0xc2, 0xde, 0xae, 0xd3, 0xc7, + 0x83, 0x4e, 0x9b, 0xbe, 0x59, 0x16, 0x7f, 0x47, 0x28, 0x9c, 0xf8, 0x49, 0x04, 0x23, 0x01, 0xb2, + 0x6a, 0xd6, 0xcd, 0x56, 0xa9, 0xdd, 0x20, 0xb9, 0x9d, 0x6c, 0xfe, 0x59, 0x32, 0xcc, 0x7e, 0xfd, + 0xf9, 0xff, 0xcf, 0xa7, 0x7c, 0xf0, 0x56, 0x43, 0x43, 0x90, 0xce, 0x7b, 0xf4, 0x2e, 0xaf, 0xab, + 0x87, 0xb4, 0xaf, 0x8b, 0xa8, 0xf2, 0xa3, 0xd7, 0xff, 0xad, 0x0f, 0x81, 0x0f, 0x81, 0x2f, 0x68, + 0x08, 0xf8, 0x62, 0x7b, 0xbf, 0xdc, 0xfd, 0x71, 0x87, 0xbc, 0xf0, 0x12, 0xc8, 0xe1, 0x15, 0x5a, + 0xdd, 0xe3, 0x41, 0xad, 0x88, 0x05, 0x2a, 0xef, 0xaa, 0xe3, 0x2f, 0xaf, 0xe8, 0x94, 0x5b, 0x8a, + 0xe5, 0x1d, 0x41, 0xe8, 0xa1, 0xbd, 0x6f, 0xb7, 0x2b, 0xdb, 0x58, 0xae, 0x6c, 0xe3, 0x7e, 0x65, + 0x1b, 0x57, 0x6b, 0xbb, 0xb0, 0x5c, 0xdb, 0x85, 0xbb, 0xb5, 0x5d, 0x38, 0x69, 0xe8, 0x5e, 0x62, + 0x3c, 0x25, 0x94, 0xed, 0xfd, 0x70, 0x82, 0x37, 0xea, 0x51, 0x7f, 0x7d, 0x08, 0x00, 0x00, 0xff, + 0xff, 0xa8, 0x04, 0x3e, 0xdb, 0x5e, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ABCIListenerServiceClient is the client API for ABCIListenerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ABCIListenerServiceClient interface { + // ListenFinalizeBlock is the corresponding endpoint for ABCIListener.ListenEndBlock + ListenFinalizeBlock(ctx context.Context, in *ListenFinalizeBlockRequest, opts ...grpc.CallOption) (*ListenFinalizeBlockResponse, error) + // ListenCommit is the corresponding endpoint for ABCIListener.ListenCommit + ListenCommit(ctx context.Context, in *ListenCommitRequest, opts ...grpc.CallOption) (*ListenCommitResponse, error) +} + +type aBCIListenerServiceClient struct { + cc grpc1.ClientConn +} + +func NewABCIListenerServiceClient(cc grpc1.ClientConn) ABCIListenerServiceClient { + return &aBCIListenerServiceClient{cc} +} + +func (c *aBCIListenerServiceClient) ListenFinalizeBlock(ctx context.Context, in *ListenFinalizeBlockRequest, opts ...grpc.CallOption) (*ListenFinalizeBlockResponse, error) { + out := new(ListenFinalizeBlockResponse) + err := c.cc.Invoke(ctx, "/cosmos.store.streaming.abci.ABCIListenerService/ListenFinalizeBlock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIListenerServiceClient) ListenCommit(ctx context.Context, in *ListenCommitRequest, opts ...grpc.CallOption) (*ListenCommitResponse, error) { + out := new(ListenCommitResponse) + err := c.cc.Invoke(ctx, "/cosmos.store.streaming.abci.ABCIListenerService/ListenCommit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ABCIListenerServiceServer is the server API for ABCIListenerService service. +type ABCIListenerServiceServer interface { + // ListenFinalizeBlock is the corresponding endpoint for ABCIListener.ListenEndBlock + ListenFinalizeBlock(context.Context, *ListenFinalizeBlockRequest) (*ListenFinalizeBlockResponse, error) + // ListenCommit is the corresponding endpoint for ABCIListener.ListenCommit + ListenCommit(context.Context, *ListenCommitRequest) (*ListenCommitResponse, error) +} + +// UnimplementedABCIListenerServiceServer can be embedded to have forward compatible implementations. +type UnimplementedABCIListenerServiceServer struct { +} + +func (*UnimplementedABCIListenerServiceServer) ListenFinalizeBlock(ctx context.Context, req *ListenFinalizeBlockRequest) (*ListenFinalizeBlockResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListenFinalizeBlock not implemented") +} +func (*UnimplementedABCIListenerServiceServer) ListenCommit(ctx context.Context, req *ListenCommitRequest) (*ListenCommitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListenCommit not implemented") +} + +func RegisterABCIListenerServiceServer(s grpc1.Server, srv ABCIListenerServiceServer) { + s.RegisterService(&_ABCIListenerService_serviceDesc, srv) +} + +func _ABCIListenerService_ListenFinalizeBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListenFinalizeBlockRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIListenerServiceServer).ListenFinalizeBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cosmos.store.streaming.abci.ABCIListenerService/ListenFinalizeBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIListenerServiceServer).ListenFinalizeBlock(ctx, req.(*ListenFinalizeBlockRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIListenerService_ListenCommit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListenCommitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIListenerServiceServer).ListenCommit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cosmos.store.streaming.abci.ABCIListenerService/ListenCommit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIListenerServiceServer).ListenCommit(ctx, req.(*ListenCommitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ABCIListenerService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cosmos.store.streaming.abci.ABCIListenerService", + HandlerType: (*ABCIListenerServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListenFinalizeBlock", + Handler: _ABCIListenerService_ListenFinalizeBlock_Handler, + }, + { + MethodName: "ListenCommit", + Handler: _ABCIListenerService_ListenCommit_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cosmos/store/streaming/abci/grpc.proto", +} + +func (m *ListenFinalizeBlockRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenFinalizeBlockRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListenFinalizeBlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Res != nil { + { + size, err := m.Res.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGrpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Req != nil { + { + size, err := m.Req.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGrpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenFinalizeBlockResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenFinalizeBlockResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListenFinalizeBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *ListenCommitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenCommitRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListenCommitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChangeSet) > 0 { + for iNdEx := len(m.ChangeSet) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ChangeSet[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGrpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Res != nil { + { + size, err := m.Res.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGrpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.BlockHeight != 0 { + i = encodeVarintGrpc(dAtA, i, uint64(m.BlockHeight)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ListenCommitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenCommitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListenCommitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintGrpc(dAtA []byte, offset int, v uint64) int { + offset -= sovGrpc(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ListenFinalizeBlockRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Req != nil { + l = m.Req.Size() + n += 1 + l + sovGrpc(uint64(l)) + } + if m.Res != nil { + l = m.Res.Size() + n += 1 + l + sovGrpc(uint64(l)) + } + return n +} + +func (m *ListenFinalizeBlockResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *ListenCommitRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockHeight != 0 { + n += 1 + sovGrpc(uint64(m.BlockHeight)) + } + if m.Res != nil { + l = m.Res.Size() + n += 1 + l + sovGrpc(uint64(l)) + } + if len(m.ChangeSet) > 0 { + for _, e := range m.ChangeSet { + l = e.Size() + n += 1 + l + sovGrpc(uint64(l)) + } + } + return n +} + +func (m *ListenCommitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovGrpc(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGrpc(x uint64) (n int) { + return sovGrpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ListenFinalizeBlockRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListenFinalizeBlockRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListenFinalizeBlockRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Req", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGrpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGrpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Req == nil { + m.Req = &types.RequestFinalizeBlock{} + } + if err := m.Req.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Res", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGrpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGrpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Res == nil { + m.Res = &types.ResponseFinalizeBlock{} + } + if err := m.Res.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGrpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGrpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListenFinalizeBlockResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListenFinalizeBlockResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListenFinalizeBlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGrpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGrpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListenCommitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListenCommitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListenCommitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType) + } + m.BlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Res", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGrpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGrpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Res == nil { + m.Res = &types.ResponseCommit{} + } + if err := m.Res.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChangeSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGrpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGrpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChangeSet = append(m.ChangeSet, &types1.StoreKVPair{}) + if err := m.ChangeSet[len(m.ChangeSet)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGrpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGrpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListenCommitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListenCommitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListenCommitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGrpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGrpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGrpc(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGrpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGrpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGrpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGrpc + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGrpc + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGrpc + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGrpc = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGrpc = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGrpc = fmt.Errorf("proto: unexpected end of group") +) diff --git a/cosmos-sdk-store/streaming/abci/interface.go b/cosmos-sdk-store/streaming/abci/interface.go new file mode 100755 index 000000000..cecc1b0ad --- /dev/null +++ b/cosmos-sdk-store/streaming/abci/interface.go @@ -0,0 +1,45 @@ +// Package abci contains shared data between the host and plugins. +package abci + +import ( + "context" + + "github.com/hashicorp/go-plugin" + "google.golang.org/grpc" + + storetypes "cosmossdk.io/store/types" +) + +// Handshake is a common handshake that is shared by streaming and host. +// This prevents users from executing bad plugins or executing a plugin +// directory. It is a UX feature, not a security feature. +var Handshake = plugin.HandshakeConfig{ + // This isn't required when using VersionedPlugins + ProtocolVersion: 1, + MagicCookieKey: "ABCI_LISTENER_PLUGIN", + MagicCookieValue: "ef78114d-7bdf-411c-868f-347c99a78345", +} + +var _ plugin.GRPCPlugin = (*ListenerGRPCPlugin)(nil) + +// ListenerGRPCPlugin is the implementation of plugin.GRPCPlugin, so we can serve/consume this. +type ListenerGRPCPlugin struct { + // GRPCPlugin must still implement the Plugin interface + plugin.Plugin + // Concrete implementation, written in Go. This is only used for plugins + // that are written in Go. + Impl storetypes.ABCIListener +} + +func (p *ListenerGRPCPlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error { + RegisterABCIListenerServiceServer(s, &GRPCServer{Impl: p.Impl}) + return nil +} + +func (p *ListenerGRPCPlugin) GRPCClient( + _ context.Context, + _ *plugin.GRPCBroker, + c *grpc.ClientConn, +) (interface{}, error) { + return &GRPCClient{client: NewABCIListenerServiceClient(c)}, nil +} diff --git a/cosmos-sdk-store/streaming/streaming.go b/cosmos-sdk-store/streaming/streaming.go new file mode 100755 index 000000000..f553fd16a --- /dev/null +++ b/cosmos-sdk-store/streaming/streaming.go @@ -0,0 +1,79 @@ +package streaming + +import ( + "fmt" + "os" + "os/exec" + "strings" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + + streamingabci "cosmossdk.io/store/streaming/abci" +) + +const pluginEnvKeyPrefix = "COSMOS_SDK" + +// HandshakeMap contains a map of each supported streaming's handshake config +var HandshakeMap = map[string]plugin.HandshakeConfig{ + "abci": streamingabci.Handshake, +} + +// PluginMap contains a map of supported gRPC plugins +var PluginMap = map[string]plugin.Plugin{ + "abci": &streamingabci.ListenerGRPCPlugin{}, +} + +func GetPluginEnvKey(name string) string { + return fmt.Sprintf("%s_%s", pluginEnvKeyPrefix, strings.ToUpper(name)) +} + +func NewStreamingPlugin(name, logLevel string) (interface{}, error) { + logger := hclog.New(&hclog.LoggerOptions{ + Output: hclog.DefaultOutput, + Level: toHclogLevel(logLevel), + Name: fmt.Sprintf("plugin.%s", name), + }) + + // We're a host. Start by launching the streaming process. + env := os.Getenv(GetPluginEnvKey(name)) + client := plugin.NewClient(&plugin.ClientConfig{ + HandshakeConfig: HandshakeMap[name], + Managed: true, + Plugins: PluginMap, + // For verifying the integrity of executables see SecureConfig documentation + // https://pkg.go.dev/github.com/hashicorp/go-plugin#SecureConfig + //#nosec G204 -- Required to load plugins + Cmd: exec.Command("sh", "-c", env), + Logger: logger, + AllowedProtocols: []plugin.Protocol{ + plugin.ProtocolNetRPC, plugin.ProtocolGRPC, + }, + }) + + // Connect via RPC + rpcClient, err := client.Client() + if err != nil { + return nil, err + } + + // Request streaming plugin + return rpcClient.Dispense(name) +} + +func toHclogLevel(s string) hclog.Level { + switch s { + case "trace": + return hclog.Trace + case "debug": + return hclog.Debug + case "info": + return hclog.Info + case "warn": + return hclog.Warn + case "error": + return hclog.Error + default: + return hclog.DefaultLevel + } +} diff --git a/cosmos-sdk-store/streaming/streaming_test.go b/cosmos-sdk-store/streaming/streaming_test.go new file mode 100755 index 000000000..e6124838b --- /dev/null +++ b/cosmos-sdk-store/streaming/streaming_test.go @@ -0,0 +1,178 @@ +package streaming + +import ( + "context" + "fmt" + "os" + "runtime" + "testing" + "time" + + abci "github.com/cometbft/cometbft/abci/types" + tmproto "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/cosmos/gogoproto/proto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "cosmossdk.io/log" + storetypes "cosmossdk.io/store/types" +) + +type PluginTestSuite struct { + suite.Suite + + loggerCtx MockContext + + workDir string + + finalizeBlockReq abci.RequestFinalizeBlock + finalizeBlockRes abci.ResponseFinalizeBlock + commitRes abci.ResponseCommit + + changeSet []*storetypes.StoreKVPair +} + +func (s *PluginTestSuite) SetupTest() { + if runtime.GOOS != "linux" { + s.T().Skip("only run on linux") + } + + path, err := os.Getwd() + if err != nil { + s.T().Fail() + } + s.workDir = path + + pluginVersion := "abci" + // to write data to files, replace stdout/stdout => file/file + pluginPath := fmt.Sprintf("%s/abci/examples/stdout/stdout", s.workDir) + if err := os.Setenv(GetPluginEnvKey(pluginVersion), pluginPath); err != nil { + s.T().Fail() + } + + raw, err := NewStreamingPlugin(pluginVersion, "trace") + require.NoError(s.T(), err, "load", "streaming", "unexpected error") + + abciListener, ok := raw.(storetypes.ABCIListener) + require.True(s.T(), ok, "should pass type check") + + header := tmproto.Header{Height: 1, Time: time.Now()} + logger := log.NewNopLogger() + streamingService := storetypes.StreamingManager{ + ABCIListeners: []storetypes.ABCIListener{abciListener}, + StopNodeOnErr: true, + } + s.loggerCtx = NewMockContext(header, logger, streamingService) + + // test abci message types + + s.finalizeBlockReq = abci.RequestFinalizeBlock{ + Height: s.loggerCtx.BlockHeight(), + Txs: [][]byte{{1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}}, + Misbehavior: []abci.Misbehavior{}, + Hash: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + DecidedLastCommit: abci.CommitInfo{}, + } + s.finalizeBlockRes = abci.ResponseFinalizeBlock{ + Events: []abci.Event{}, + ConsensusParamUpdates: &tmproto.ConsensusParams{}, + ValidatorUpdates: []abci.ValidatorUpdate{}, + TxResults: []*abci.ExecTxResult{{ + Events: []abci.Event{}, + Code: 1, + Codespace: "mockCodeSpace", + Data: []byte{5, 6, 7, 8}, + GasUsed: 2, + GasWanted: 3, + Info: "mockInfo", + Log: "mockLog", + }}, + } + s.commitRes = abci.ResponseCommit{} + + // test store kv pair types + for range [2000]int{} { + s.changeSet = append(s.changeSet, &storetypes.StoreKVPair{ + StoreKey: "mockStore", + Delete: false, + Key: []byte{1, 2, 3}, + Value: []byte{3, 2, 1}, + }) + } +} + +func TestPluginTestSuite(t *testing.T) { + suite.Run(t, new(PluginTestSuite)) +} + +func (s *PluginTestSuite) TestABCIGRPCPlugin() { + s.T().Run("Should successfully load streaming", func(t *testing.T) { + abciListeners := s.loggerCtx.StreamingManager().ABCIListeners + for _, abciListener := range abciListeners { + for i := range [50]int{} { + + err := abciListener.ListenFinalizeBlock(s.loggerCtx, s.finalizeBlockReq, s.finalizeBlockRes) + assert.NoError(t, err, "ListenEndBlock") + + err = abciListener.ListenCommit(s.loggerCtx, s.commitRes, s.changeSet) + assert.NoError(t, err, "ListenCommit") + + s.updateHeight(int64(i + 1)) + } + } + }) +} + +func (s *PluginTestSuite) updateHeight(n int64) { + header := s.loggerCtx.BlockHeader() + header.Height = n + s.loggerCtx = NewMockContext(header, s.loggerCtx.Logger(), s.loggerCtx.StreamingManager()) +} + +var ( + _ context.Context = MockContext{} + _ storetypes.Context = MockContext{} +) + +type MockContext struct { + baseCtx context.Context + header tmproto.Header + logger log.Logger + streamingManager storetypes.StreamingManager +} + +func (m MockContext) BlockHeight() int64 { return m.header.Height } +func (m MockContext) Logger() log.Logger { return m.logger } +func (m MockContext) StreamingManager() storetypes.StreamingManager { return m.streamingManager } + +func (m MockContext) BlockHeader() tmproto.Header { + msg := proto.Clone(&m.header).(*tmproto.Header) + return *msg +} + +func NewMockContext(header tmproto.Header, logger log.Logger, sm storetypes.StreamingManager) MockContext { + header.Time = header.Time.UTC() + return MockContext{ + baseCtx: context.Background(), + header: header, + logger: logger, + streamingManager: sm, + } +} + +func (m MockContext) Deadline() (deadline time.Time, ok bool) { + return m.baseCtx.Deadline() +} + +func (m MockContext) Done() <-chan struct{} { + return m.baseCtx.Done() +} + +func (m MockContext) Err() error { + return m.baseCtx.Err() +} + +func (m MockContext) Value(key any) any { + return m.baseCtx.Value(key) +} diff --git a/cosmos-sdk-store/tracekv/store.go b/cosmos-sdk-store/tracekv/store.go new file mode 100755 index 000000000..ba6df431d --- /dev/null +++ b/cosmos-sdk-store/tracekv/store.go @@ -0,0 +1,202 @@ +package tracekv + +import ( + "encoding/base64" + "encoding/json" + "io" + + "cosmossdk.io/errors" + "cosmossdk.io/store/types" +) + +const ( + writeOp operation = "write" + readOp operation = "read" + deleteOp operation = "delete" + iterKeyOp operation = "iterKey" + iterValueOp operation = "iterValue" +) + +type ( + // Store implements the KVStore interface with tracing enabled. + // Operations are traced on each core KVStore call and written to the + // underlying io.writer. + // + // TODO: Should we use a buffered writer and implement Commit on + // Store? + Store struct { + parent types.KVStore + writer io.Writer + context types.TraceContext + } + + // operation represents an IO operation + operation string + + // traceOperation implements a traced KVStore operation + traceOperation struct { + Operation operation `json:"operation"` + Key string `json:"key"` + Value string `json:"value"` + Metadata map[string]interface{} `json:"metadata"` + } +) + +// NewStore returns a reference to a new traceKVStore given a parent +// KVStore implementation and a buffered writer. +func NewStore(parent types.KVStore, writer io.Writer, tc types.TraceContext) *Store { + return &Store{parent: parent, writer: writer, context: tc} +} + +// Get implements the KVStore interface. It traces a read operation and +// delegates a Get call to the parent KVStore. +func (tkv *Store) Get(key []byte) []byte { + value := tkv.parent.Get(key) + + writeOperation(tkv.writer, readOp, tkv.context, key, value) + return value +} + +// Set implements the KVStore interface. It traces a write operation and +// delegates the Set call to the parent KVStore. +func (tkv *Store) Set(key, value []byte) { + types.AssertValidKey(key) + writeOperation(tkv.writer, writeOp, tkv.context, key, value) + tkv.parent.Set(key, value) +} + +// Delete implements the KVStore interface. It traces a write operation and +// delegates the Delete call to the parent KVStore. +func (tkv *Store) Delete(key []byte) { + writeOperation(tkv.writer, deleteOp, tkv.context, key, nil) + tkv.parent.Delete(key) +} + +// Has implements the KVStore interface. It delegates the Has call to the +// parent KVStore. +func (tkv *Store) Has(key []byte) bool { + return tkv.parent.Has(key) +} + +// Iterator implements the KVStore interface. It delegates the Iterator call +// to the parent KVStore. +func (tkv *Store) Iterator(start, end []byte) types.Iterator { + return tkv.iterator(start, end, true) +} + +// ReverseIterator implements the KVStore interface. It delegates the +// ReverseIterator call to the parent KVStore. +func (tkv *Store) ReverseIterator(start, end []byte) types.Iterator { + return tkv.iterator(start, end, false) +} + +// iterator facilitates iteration over a KVStore. It delegates the necessary +// calls to it's parent KVStore. +func (tkv *Store) iterator(start, end []byte, ascending bool) types.Iterator { + var parent types.Iterator + + if ascending { + parent = tkv.parent.Iterator(start, end) + } else { + parent = tkv.parent.ReverseIterator(start, end) + } + + return newTraceIterator(tkv.writer, parent, tkv.context) +} + +type traceIterator struct { + parent types.Iterator + writer io.Writer + context types.TraceContext +} + +func newTraceIterator(w io.Writer, parent types.Iterator, tc types.TraceContext) types.Iterator { + return &traceIterator{writer: w, parent: parent, context: tc} +} + +// Domain implements the Iterator interface. +func (ti *traceIterator) Domain() (start, end []byte) { + return ti.parent.Domain() +} + +// Valid implements the Iterator interface. +func (ti *traceIterator) Valid() bool { + return ti.parent.Valid() +} + +// Next implements the Iterator interface. +func (ti *traceIterator) Next() { + ti.parent.Next() +} + +// Key implements the Iterator interface. +func (ti *traceIterator) Key() []byte { + key := ti.parent.Key() + + writeOperation(ti.writer, iterKeyOp, ti.context, key, nil) + return key +} + +// Value implements the Iterator interface. +func (ti *traceIterator) Value() []byte { + value := ti.parent.Value() + + writeOperation(ti.writer, iterValueOp, ti.context, nil, value) + return value +} + +// Close implements the Iterator interface. +func (ti *traceIterator) Close() error { + return ti.parent.Close() +} + +// Error delegates the Error call to the parent iterator. +func (ti *traceIterator) Error() error { + return ti.parent.Error() +} + +// GetStoreType implements the KVStore interface. It returns the underlying +// KVStore type. +func (tkv *Store) GetStoreType() types.StoreType { + return tkv.parent.GetStoreType() +} + +// CacheWrap implements the KVStore interface. It panics because a Store +// cannot be branched. +func (tkv *Store) CacheWrap() types.CacheWrap { + panic("cannot CacheWrap a TraceKVStore") +} + +// CacheWrapWithTrace implements the KVStore interface. It panics as a +// Store cannot be branched. +func (tkv *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { + panic("cannot CacheWrapWithTrace a TraceKVStore") +} + +// writeOperation writes a KVStore operation to the underlying io.Writer as +// JSON-encoded data where the key/value pair is base64 encoded. +func writeOperation(w io.Writer, op operation, tc types.TraceContext, key, value []byte) { + traceOp := traceOperation{ + Operation: op, + Key: base64.StdEncoding.EncodeToString(key), + Value: base64.StdEncoding.EncodeToString(value), + } + + if tc != nil { + traceOp.Metadata = tc + } + + raw, err := json.Marshal(traceOp) + if err != nil { + panic(errors.Wrap(err, "failed to serialize trace operation")) + } + + if _, err := w.Write(raw); err != nil { + panic(errors.Wrap(err, "failed to write trace operation")) + } + + _, err = io.WriteString(w, "\n") + if err != nil { + panic(errors.Wrap(err, "failed to write newline")) + } +} diff --git a/cosmos-sdk-store/tracekv/store_test.go b/cosmos-sdk-store/tracekv/store_test.go new file mode 100755 index 000000000..2c42734ba --- /dev/null +++ b/cosmos-sdk-store/tracekv/store_test.go @@ -0,0 +1,292 @@ +package tracekv_test + +import ( + "bytes" + "fmt" + "io" + "testing" + + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/require" + + "cosmossdk.io/store/dbadapter" + "cosmossdk.io/store/internal/kv" + "cosmossdk.io/store/prefix" + "cosmossdk.io/store/tracekv" + "cosmossdk.io/store/types" +) + +func bz(s string) []byte { return []byte(s) } + +func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) } +func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) } + +var kvPairs = []kv.Pair{ + {Key: keyFmt(1), Value: valFmt(1)}, + {Key: keyFmt(2), Value: valFmt(2)}, + {Key: keyFmt(3), Value: valFmt(3)}, +} + +func newTraceKVStore(w io.Writer) *tracekv.Store { + store := newEmptyTraceKVStore(w) + + for _, kvPair := range kvPairs { + store.Set(kvPair.Key, kvPair.Value) + } + + return store +} + +func newEmptyTraceKVStore(w io.Writer) *tracekv.Store { + memDB := dbadapter.Store{DB: dbm.NewMemDB()} + tc := types.TraceContext(map[string]interface{}{"blockHeight": 64}) + + return tracekv.NewStore(memDB, w, tc) +} + +func TestTraceKVStoreGet(t *testing.T) { + testCases := []struct { + key []byte + expectedValue []byte + expectedOut string + }{ + { + key: kvPairs[0].Key, + expectedValue: kvPairs[0].Value, + expectedOut: "{\"operation\":\"read\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n", + }, + { + key: []byte("does-not-exist"), + expectedValue: nil, + expectedOut: "{\"operation\":\"read\",\"key\":\"ZG9lcy1ub3QtZXhpc3Q=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", + }, + } + + for _, tc := range testCases { + var buf bytes.Buffer + + store := newTraceKVStore(&buf) + buf.Reset() + value := store.Get(tc.key) + + require.Equal(t, tc.expectedValue, value) + require.Equal(t, tc.expectedOut, buf.String()) + } +} + +func TestTraceKVStoreSet(t *testing.T) { + testCases := []struct { + key []byte + value []byte + expectedOut string + }{ + { + key: kvPairs[0].Key, + value: kvPairs[0].Value, + expectedOut: "{\"operation\":\"write\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n", + }, + { + key: kvPairs[1].Key, + value: kvPairs[1].Value, + expectedOut: "{\"operation\":\"write\",\"key\":\"a2V5MDAwMDAwMDI=\",\"value\":\"dmFsdWUwMDAwMDAwMg==\",\"metadata\":{\"blockHeight\":64}}\n", + }, + { + key: kvPairs[2].Key, + value: kvPairs[2].Value, + expectedOut: "{\"operation\":\"write\",\"key\":\"a2V5MDAwMDAwMDM=\",\"value\":\"dmFsdWUwMDAwMDAwMw==\",\"metadata\":{\"blockHeight\":64}}\n", + }, + } + + for _, tc := range testCases { + var buf bytes.Buffer + + store := newEmptyTraceKVStore(&buf) + buf.Reset() + store.Set(tc.key, tc.value) + + require.Equal(t, tc.expectedOut, buf.String()) + } + + var buf bytes.Buffer + store := newEmptyTraceKVStore(&buf) + require.Panics(t, func() { store.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") + require.Panics(t, func() { store.Set(nil, []byte("value")) }, "setting a nil key should panic") +} + +func TestTraceKVStoreDelete(t *testing.T) { + testCases := []struct { + key []byte + expectedOut string + }{ + { + key: kvPairs[0].Key, + expectedOut: "{\"operation\":\"delete\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", + }, + } + + for _, tc := range testCases { + var buf bytes.Buffer + + store := newTraceKVStore(&buf) + buf.Reset() + store.Delete(tc.key) + + require.Equal(t, tc.expectedOut, buf.String()) + } +} + +func TestTraceKVStoreHas(t *testing.T) { + testCases := []struct { + key []byte + expected bool + }{ + { + key: kvPairs[0].Key, + expected: true, + }, + } + + for _, tc := range testCases { + var buf bytes.Buffer + + store := newTraceKVStore(&buf) + buf.Reset() + ok := store.Has(tc.key) + + require.Equal(t, tc.expected, ok) + } +} + +func TestTestTraceKVStoreIterator(t *testing.T) { + var buf bytes.Buffer + + store := newTraceKVStore(&buf) + iterator := store.Iterator(nil, nil) + + s, e := iterator.Domain() + require.Equal(t, []byte(nil), s) + require.Equal(t, []byte(nil), e) + + testCases := []struct { + expectedKey []byte + expectedValue []byte + expectedKeyOut string + expectedvalueOut string + }{ + { + expectedKey: kvPairs[0].Key, + expectedValue: kvPairs[0].Value, + expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", + expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n", + }, + { + expectedKey: kvPairs[1].Key, + expectedValue: kvPairs[1].Value, + expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDI=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", + expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMg==\",\"metadata\":{\"blockHeight\":64}}\n", + }, + { + expectedKey: kvPairs[2].Key, + expectedValue: kvPairs[2].Value, + expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDM=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", + expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMw==\",\"metadata\":{\"blockHeight\":64}}\n", + }, + } + + for _, tc := range testCases { + buf.Reset() + ka := iterator.Key() + require.Equal(t, tc.expectedKeyOut, buf.String()) + + buf.Reset() + va := iterator.Value() + require.Equal(t, tc.expectedvalueOut, buf.String()) + + require.Equal(t, tc.expectedKey, ka) + require.Equal(t, tc.expectedValue, va) + + iterator.Next() + } + + require.False(t, iterator.Valid()) + require.Panics(t, iterator.Next) + require.NoError(t, iterator.Close()) +} + +func TestTestTraceKVStoreReverseIterator(t *testing.T) { + var buf bytes.Buffer + + store := newTraceKVStore(&buf) + iterator := store.ReverseIterator(nil, nil) + + s, e := iterator.Domain() + require.Equal(t, []byte(nil), s) + require.Equal(t, []byte(nil), e) + + testCases := []struct { + expectedKey []byte + expectedValue []byte + expectedKeyOut string + expectedvalueOut string + }{ + { + expectedKey: kvPairs[2].Key, + expectedValue: kvPairs[2].Value, + expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDM=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", + expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMw==\",\"metadata\":{\"blockHeight\":64}}\n", + }, + { + expectedKey: kvPairs[1].Key, + expectedValue: kvPairs[1].Value, + expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDI=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", + expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMg==\",\"metadata\":{\"blockHeight\":64}}\n", + }, + { + expectedKey: kvPairs[0].Key, + expectedValue: kvPairs[0].Value, + expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", + expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n", + }, + } + + for _, tc := range testCases { + buf.Reset() + ka := iterator.Key() + require.Equal(t, tc.expectedKeyOut, buf.String()) + + buf.Reset() + va := iterator.Value() + require.Equal(t, tc.expectedvalueOut, buf.String()) + + require.Equal(t, tc.expectedKey, ka) + require.Equal(t, tc.expectedValue, va) + + iterator.Next() + } + + require.False(t, iterator.Valid()) + require.Panics(t, iterator.Next) + require.NoError(t, iterator.Close()) +} + +func TestTraceKVStorePrefix(t *testing.T) { + store := newEmptyTraceKVStore(nil) + pStore := prefix.NewStore(store, []byte("trace_prefix")) + require.IsType(t, prefix.Store{}, pStore) +} + +func TestTraceKVStoreGetStoreType(t *testing.T) { + memDB := dbadapter.Store{DB: dbm.NewMemDB()} + store := newEmptyTraceKVStore(nil) + require.Equal(t, memDB.GetStoreType(), store.GetStoreType()) +} + +func TestTraceKVStoreCacheWrap(t *testing.T) { + store := newEmptyTraceKVStore(nil) + require.Panics(t, func() { store.CacheWrap() }) +} + +func TestTraceKVStoreCacheWrapWithTrace(t *testing.T) { + store := newEmptyTraceKVStore(nil) + require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) }) +} diff --git a/cosmos-sdk-store/transient/store.go b/cosmos-sdk-store/transient/store.go new file mode 100755 index 000000000..6f393279f --- /dev/null +++ b/cosmos-sdk-store/transient/store.go @@ -0,0 +1,53 @@ +package transient + +import ( + dbm "github.com/cosmos/cosmos-db" + + "cosmossdk.io/store/dbadapter" + pruningtypes "cosmossdk.io/store/pruning/types" + "cosmossdk.io/store/types" +) + +var ( + _ types.Committer = (*Store)(nil) + _ types.KVStore = (*Store)(nil) +) + +// Store is a wrapper for a MemDB with Commiter implementation +type Store struct { + dbadapter.Store +} + +// Constructs new MemDB adapter +func NewStore() *Store { + return &Store{Store: dbadapter.Store{DB: dbm.NewMemDB()}} +} + +// Implements CommitStore +// Commit cleans up Store. +func (ts *Store) Commit() (id types.CommitID) { + ts.Store = dbadapter.Store{DB: dbm.NewMemDB()} + return +} + +func (ts *Store) SetPruning(_ pruningtypes.PruningOptions) {} + +// GetPruning is a no-op as pruning options cannot be directly set on this store. +// They must be set on the root commit multi-store. +func (ts *Store) GetPruning() pruningtypes.PruningOptions { + return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined) +} + +// Implements CommitStore +func (ts *Store) LastCommitID() types.CommitID { + return types.CommitID{} +} + +func (ts *Store) WorkingHash() []byte { + return []byte{} +} + +// Implements Store. +func (ts *Store) GetStoreType() types.StoreType { + return types.StoreTypeTransient +} diff --git a/cosmos-sdk-store/transient/store_test.go b/cosmos-sdk-store/transient/store_test.go new file mode 100755 index 000000000..341ef41cc --- /dev/null +++ b/cosmos-sdk-store/transient/store_test.go @@ -0,0 +1,34 @@ +package transient_test + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + pruningtypes "cosmossdk.io/store/pruning/types" + "cosmossdk.io/store/transient" +) + +var k, v = []byte("hello"), []byte("world") + +func TestTransientStore(t *testing.T) { + tstore := transient.NewStore() + + require.Nil(t, tstore.Get(k)) + + tstore.Set(k, v) + + require.Equal(t, v, tstore.Get(k)) + + tstore.Commit() + + require.Nil(t, tstore.Get(k)) + + // no-op + tstore.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined)) + + emptyCommitID := tstore.LastCommitID() + require.Equal(t, emptyCommitID.Version, int64(0)) + require.True(t, bytes.Equal(emptyCommitID.Hash, nil)) +} diff --git a/cosmos-sdk-store/types/codec.go b/cosmos-sdk-store/types/codec.go new file mode 100755 index 000000000..4a5f42487 --- /dev/null +++ b/cosmos-sdk-store/types/codec.go @@ -0,0 +1,89 @@ +package types + +import ( + "encoding/binary" + fmt "fmt" + + proto "github.com/cosmos/gogoproto/proto" +) + +// Codec defines a interface needed for the store package to marshal data +type Codec interface { + // Marshal returns binary encoding of v. + Marshal(proto.Message) ([]byte, error) + + // MarshalLengthPrefixed returns binary encoding of v with bytes length prefix. + MarshalLengthPrefixed(proto.Message) ([]byte, error) + + // Unmarshal parses the data encoded with Marshal method and stores the result + // in the value pointed to by v. + Unmarshal(bz []byte, ptr proto.Message) error + + // Unmarshal parses the data encoded with UnmarshalLengthPrefixed method and stores + // the result in the value pointed to by v. + UnmarshalLengthPrefixed(bz []byte, ptr proto.Message) error +} + +// ============= TestCodec ============= +// TestCodec defines a codec that utilizes Protobuf for both binary and JSON +// encoding. +type TestCodec struct{} + +var _ Codec = &TestCodec{} + +func NewTestCodec() Codec { + return &TestCodec{} +} + +// Marshal implements BinaryMarshaler.Marshal method. +// NOTE: this function must be used with a concrete type which +// implements proto.Message. For interface please use the codec.MarshalInterface +func (pc *TestCodec) Marshal(o proto.Message) ([]byte, error) { + // Size() check can catch the typed nil value. + if o == nil || proto.Size(o) == 0 { + // return empty bytes instead of nil, because nil has special meaning in places like store.Set + return []byte{}, nil + } + return proto.Marshal(o) +} + +// MarshalLengthPrefixed implements BinaryMarshaler.MarshalLengthPrefixed method. +func (pc *TestCodec) MarshalLengthPrefixed(o proto.Message) ([]byte, error) { + bz, err := pc.Marshal(o) + if err != nil { + return nil, err + } + + var sizeBuf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(sizeBuf[:], uint64(len(bz))) + return append(sizeBuf[:n], bz...), nil +} + +// Unmarshal implements BinaryMarshaler.Unmarshal method. +// NOTE: this function must be used with a concrete type which +// implements proto.Message. For interface please use the codec.UnmarshalInterface +func (pc *TestCodec) Unmarshal(bz []byte, ptr proto.Message) error { + err := proto.Unmarshal(bz, ptr) + if err != nil { + return err + } + + return nil +} + +// UnmarshalLengthPrefixed implements BinaryMarshaler.UnmarshalLengthPrefixed method. +func (pc *TestCodec) UnmarshalLengthPrefixed(bz []byte, ptr proto.Message) error { + size, n := binary.Uvarint(bz) + if n < 0 { + return fmt.Errorf("invalid number of bytes read from length-prefixed encoding: %d", n) + } + + if size > uint64(len(bz)-n) { + return fmt.Errorf("not enough bytes to read; want: %v, got: %v", size, len(bz)-n) + } else if size < uint64(len(bz)-n) { + return fmt.Errorf("too many bytes to read; want: %v, got: %v", size, len(bz)-n) + } + + bz = bz[n:] + return proto.Unmarshal(bz, ptr) +} diff --git a/cosmos-sdk-store/types/commit_info.go b/cosmos-sdk-store/types/commit_info.go new file mode 100755 index 000000000..249d0986d --- /dev/null +++ b/cosmos-sdk-store/types/commit_info.go @@ -0,0 +1,62 @@ +package types + +import ( + "crypto/sha256" + + cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + + "cosmossdk.io/store/internal/maps" +) + +// GetHash returns the GetHash from the CommitID. +// This is used in CommitInfo.Hash() +// +// When we commit to this in a merkle proof, we create a map of storeInfo.Name -> storeInfo.GetHash() +// and build a merkle proof from that. +// This is then chained with the substore proof, so we prove the root hash from the substore before this +// and need to pass that (unmodified) as the leaf value of the multistore proof. +func (si StoreInfo) GetHash() []byte { + return si.CommitId.Hash +} + +func (ci CommitInfo) toMap() map[string][]byte { + m := make(map[string][]byte, len(ci.StoreInfos)) + for _, storeInfo := range ci.StoreInfos { + m[storeInfo.Name] = storeInfo.GetHash() + } + + return m +} + +// Hash returns the simple merkle root hash of the stores sorted by name. +func (ci CommitInfo) Hash() []byte { + // we need a special case for empty set, as SimpleProofsFromMap requires at least one entry + if len(ci.StoreInfos) == 0 { + emptyHash := sha256.Sum256([]byte{}) + return emptyHash[:] + } + + rootHash, _, _ := maps.ProofsFromMap(ci.toMap()) + + if len(rootHash) == 0 { + emptyHash := sha256.Sum256([]byte{}) + return emptyHash[:] + } + + return rootHash +} + +func (ci CommitInfo) ProofOp(storeName string) cmtprotocrypto.ProofOp { + ret, err := ProofOpFromMap(ci.toMap(), storeName) + if err != nil { + panic(err) + } + return ret +} + +func (ci CommitInfo) CommitID() CommitID { + return CommitID{ + Version: ci.Version, + Hash: ci.Hash(), + } +} diff --git a/cosmos-sdk-store/types/commit_info.pb.go b/cosmos-sdk-store/types/commit_info.pb.go new file mode 100755 index 000000000..81220a79c --- /dev/null +++ b/cosmos-sdk-store/types/commit_info.pb.go @@ -0,0 +1,864 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cosmos/store/v1beta1/commit_info.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + _ "google.golang.org/protobuf/types/known/timestamppb" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// CommitInfo defines commit information used by the multi-store when committing +// a version/height. +type CommitInfo struct { + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + StoreInfos []StoreInfo `protobuf:"bytes,2,rep,name=store_infos,json=storeInfos,proto3" json:"store_infos"` + Timestamp time.Time `protobuf:"bytes,3,opt,name=timestamp,proto3,stdtime" json:"timestamp"` +} + +func (m *CommitInfo) Reset() { *m = CommitInfo{} } +func (m *CommitInfo) String() string { return proto.CompactTextString(m) } +func (*CommitInfo) ProtoMessage() {} +func (*CommitInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_5f8c656cdef8c524, []int{0} +} +func (m *CommitInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommitInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CommitInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitInfo.Merge(m, src) +} +func (m *CommitInfo) XXX_Size() int { + return m.Size() +} +func (m *CommitInfo) XXX_DiscardUnknown() { + xxx_messageInfo_CommitInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitInfo proto.InternalMessageInfo + +func (m *CommitInfo) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *CommitInfo) GetStoreInfos() []StoreInfo { + if m != nil { + return m.StoreInfos + } + return nil +} + +func (m *CommitInfo) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +// StoreInfo defines store-specific commit information. It contains a reference +// between a store name and the commit ID. +type StoreInfo struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CommitId CommitID `protobuf:"bytes,2,opt,name=commit_id,json=commitId,proto3" json:"commit_id"` +} + +func (m *StoreInfo) Reset() { *m = StoreInfo{} } +func (m *StoreInfo) String() string { return proto.CompactTextString(m) } +func (*StoreInfo) ProtoMessage() {} +func (*StoreInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_5f8c656cdef8c524, []int{1} +} +func (m *StoreInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StoreInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreInfo.Merge(m, src) +} +func (m *StoreInfo) XXX_Size() int { + return m.Size() +} +func (m *StoreInfo) XXX_DiscardUnknown() { + xxx_messageInfo_StoreInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_StoreInfo proto.InternalMessageInfo + +func (m *StoreInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *StoreInfo) GetCommitId() CommitID { + if m != nil { + return m.CommitId + } + return CommitID{} +} + +// CommitID defines the commitment information when a specific store is +// committed. +type CommitID struct { + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *CommitID) Reset() { *m = CommitID{} } +func (*CommitID) ProtoMessage() {} +func (*CommitID) Descriptor() ([]byte, []int) { + return fileDescriptor_5f8c656cdef8c524, []int{2} +} +func (m *CommitID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommitID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommitID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CommitID) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitID.Merge(m, src) +} +func (m *CommitID) XXX_Size() int { + return m.Size() +} +func (m *CommitID) XXX_DiscardUnknown() { + xxx_messageInfo_CommitID.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitID proto.InternalMessageInfo + +func (m *CommitID) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *CommitID) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func init() { + proto.RegisterType((*CommitInfo)(nil), "cosmos.store.v1beta1.CommitInfo") + proto.RegisterType((*StoreInfo)(nil), "cosmos.store.v1beta1.StoreInfo") + proto.RegisterType((*CommitID)(nil), "cosmos.store.v1beta1.CommitID") +} + +func init() { + proto.RegisterFile("cosmos/store/v1beta1/commit_info.proto", fileDescriptor_5f8c656cdef8c524) +} + +var fileDescriptor_5f8c656cdef8c524 = []byte{ + // 336 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xb1, 0x4e, 0xf2, 0x50, + 0x14, 0xc7, 0x7b, 0xa1, 0xf9, 0x3e, 0x7a, 0x70, 0xba, 0x61, 0x68, 0x18, 0x6e, 0x09, 0x83, 0x61, + 0xba, 0x0d, 0xb8, 0x39, 0x98, 0x58, 0x8d, 0x09, 0x6b, 0x75, 0x72, 0x31, 0x2d, 0x5c, 0x4a, 0xa3, + 0xed, 0x21, 0xdc, 0x2b, 0x89, 0x6f, 0xc1, 0xe8, 0xe8, 0x33, 0xf8, 0x14, 0x8c, 0x8c, 0x4e, 0x6a, + 0xe0, 0x45, 0x4c, 0x4f, 0x5b, 0x5c, 0x88, 0xdb, 0x39, 0xed, 0xef, 0x9c, 0xff, 0xaf, 0xa7, 0x70, + 0x3a, 0x41, 0x9d, 0xa1, 0xf6, 0xb5, 0xc1, 0xa5, 0xf2, 0x57, 0xc3, 0x58, 0x99, 0x68, 0xe8, 0x4f, + 0x30, 0xcb, 0x52, 0xf3, 0x90, 0xe6, 0x33, 0x94, 0x8b, 0x25, 0x1a, 0xe4, 0x9d, 0x92, 0x93, 0xc4, + 0xc9, 0x8a, 0xeb, 0x76, 0x12, 0x4c, 0x90, 0x00, 0xbf, 0xa8, 0x4a, 0xb6, 0xeb, 0x25, 0x88, 0xc9, + 0x93, 0xf2, 0xa9, 0x8b, 0x9f, 0x67, 0xbe, 0x49, 0x33, 0xa5, 0x4d, 0x94, 0x2d, 0x4a, 0xa0, 0xff, + 0xce, 0x00, 0xae, 0x28, 0x62, 0x9c, 0xcf, 0x90, 0xbb, 0xf0, 0x7f, 0xa5, 0x96, 0x3a, 0xc5, 0xdc, + 0x65, 0x3d, 0x36, 0x68, 0x86, 0x75, 0xcb, 0x6f, 0xa0, 0x4d, 0x81, 0x64, 0xa2, 0xdd, 0x46, 0xaf, + 0x39, 0x68, 0x8f, 0x3c, 0x79, 0xcc, 0x45, 0xde, 0x16, 0x5d, 0xb1, 0x2f, 0xb0, 0x37, 0x9f, 0x9e, + 0x15, 0x82, 0xae, 0x1f, 0x68, 0x1e, 0x80, 0x73, 0x70, 0x70, 0x9b, 0x3d, 0x36, 0x68, 0x8f, 0xba, + 0xb2, 0xb4, 0x94, 0xb5, 0xa5, 0xbc, 0xab, 0x89, 0xa0, 0x55, 0x2c, 0x58, 0x7f, 0x79, 0x2c, 0xfc, + 0x1d, 0xeb, 0xc7, 0xe0, 0x1c, 0x22, 0x38, 0x07, 0x3b, 0x8f, 0x32, 0x45, 0xbe, 0x4e, 0x48, 0x35, + 0xbf, 0x04, 0xa7, 0xbe, 0xdb, 0xd4, 0x6d, 0x50, 0x88, 0x38, 0xae, 0x5a, 0x7d, 0xfb, 0x75, 0x65, + 0xda, 0x2a, 0xc7, 0xc6, 0xd3, 0xfe, 0x05, 0xb4, 0xea, 0x77, 0x7f, 0x5c, 0x85, 0x83, 0x3d, 0x8f, + 0xf4, 0x9c, 0x32, 0x4e, 0x42, 0xaa, 0xcf, 0xed, 0xd7, 0x37, 0xcf, 0x0a, 0x46, 0x9b, 0x9d, 0x60, + 0xdb, 0x9d, 0x60, 0xdf, 0x3b, 0xc1, 0xd6, 0x7b, 0x61, 0x6d, 0xf7, 0xc2, 0xfa, 0xd8, 0x0b, 0xeb, + 0xde, 0x2d, 0x45, 0xf4, 0xf4, 0x51, 0xa6, 0x58, 0xfd, 0x6d, 0xf3, 0xb2, 0x50, 0x3a, 0xfe, 0x47, + 0x07, 0x38, 0xfb, 0x09, 0x00, 0x00, 0xff, 0xff, 0x67, 0xb7, 0x0d, 0x59, 0x0a, 0x02, 0x00, 0x00, +} + +func (m *CommitInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n1, err1 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintCommitInfo(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x1a + if len(m.StoreInfos) > 0 { + for iNdEx := len(m.StoreInfos) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.StoreInfos[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCommitInfo(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Version != 0 { + i = encodeVarintCommitInfo(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StoreInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StoreInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.CommitId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCommitInfo(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintCommitInfo(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommitID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintCommitInfo(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x12 + } + if m.Version != 0 { + i = encodeVarintCommitInfo(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintCommitInfo(dAtA []byte, offset int, v uint64) int { + offset -= sovCommitInfo(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CommitInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Version != 0 { + n += 1 + sovCommitInfo(uint64(m.Version)) + } + if len(m.StoreInfos) > 0 { + for _, e := range m.StoreInfos { + l = e.Size() + n += 1 + l + sovCommitInfo(uint64(l)) + } + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovCommitInfo(uint64(l)) + return n +} + +func (m *StoreInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovCommitInfo(uint64(l)) + } + l = m.CommitId.Size() + n += 1 + l + sovCommitInfo(uint64(l)) + return n +} + +func (m *CommitID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Version != 0 { + n += 1 + sovCommitInfo(uint64(m.Version)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovCommitInfo(uint64(l)) + } + return n +} + +func sovCommitInfo(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCommitInfo(x uint64) (n int) { + return sovCommitInfo(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CommitInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoreInfos", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCommitInfo + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCommitInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoreInfos = append(m.StoreInfos, StoreInfo{}) + if err := m.StoreInfos[len(m.StoreInfos)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCommitInfo + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCommitInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommitInfo(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCommitInfo + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCommitInfo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCommitInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommitId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCommitInfo + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCommitInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommitId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommitInfo(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCommitInfo + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCommitInfo + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCommitInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommitInfo(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCommitInfo + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCommitInfo(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCommitInfo + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCommitInfo + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCommitInfo + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCommitInfo + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCommitInfo + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCommitInfo + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCommitInfo = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCommitInfo = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCommitInfo = fmt.Errorf("proto: unexpected end of group") +) diff --git a/cosmos-sdk-store/types/context.go b/cosmos-sdk-store/types/context.go new file mode 100755 index 000000000..2daccef91 --- /dev/null +++ b/cosmos-sdk-store/types/context.go @@ -0,0 +1,13 @@ +package types + +import ( + "cosmossdk.io/log" +) + +// Context is an interface used by an App to pass context information +// needed to process store streaming requests. +type Context interface { + BlockHeight() int64 + Logger() log.Logger + StreamingManager() StreamingManager +} diff --git a/cosmos-sdk-store/types/errors.go b/cosmos-sdk-store/types/errors.go new file mode 100755 index 000000000..db86a3cc6 --- /dev/null +++ b/cosmos-sdk-store/types/errors.go @@ -0,0 +1,28 @@ +package types + +import ( + "cosmossdk.io/errors" +) + +const StoreCodespace = "store" + +var ( + // ErrInvalidProof is returned when a proof is invalid + ErrInvalidProof = errors.Register(StoreCodespace, 2, "invalid proof") + // ErrTxDecode is returned if we cannot parse a transaction + ErrTxDecode = errors.Register(StoreCodespace, 3, "tx parse error") + + // ErrUnknownRequest to doc + ErrUnknownRequest = errors.Register(StoreCodespace, 4, "unknown request") + + // ErrLogic defines an internal logic error, e.g. an invariant or assertion + // that is violated. It is a programmer error, not a user-facing error. + ErrLogic = errors.Register(StoreCodespace, 5, "internal logic error") + + // ErrConflict defines a conflict error, e.g. when two goroutines try to access + // the same resource and one of them fails. + ErrConflict = errors.Register(StoreCodespace, 6, "conflict") + // ErrInvalidRequest defines an ABCI typed error where the request contains + // invalid data. + ErrInvalidRequest = errors.Register(StoreCodespace, 7, "invalid request") +) diff --git a/cosmos-sdk-store/types/gas.go b/cosmos-sdk-store/types/gas.go new file mode 100755 index 000000000..644844cf1 --- /dev/null +++ b/cosmos-sdk-store/types/gas.go @@ -0,0 +1,255 @@ +package types + +import ( + "fmt" + "math" +) + +// Gas consumption descriptors. +const ( + GasIterNextCostFlatDesc = "IterNextFlat" + GasValuePerByteDesc = "ValuePerByte" + GasWritePerByteDesc = "WritePerByte" + GasReadPerByteDesc = "ReadPerByte" + GasWriteCostFlatDesc = "WriteFlat" + GasReadCostFlatDesc = "ReadFlat" + GasHasDesc = "Has" + GasDeleteDesc = "Delete" +) + +// Gas measured by the SDK +type Gas = uint64 + +// ErrorNegativeGasConsumed defines an error thrown when the amount of gas refunded results in a +// negative gas consumed amount. +type ErrorNegativeGasConsumed struct { + Descriptor string +} + +// ErrorOutOfGas defines an error thrown when an action results in out of gas. +type ErrorOutOfGas struct { + Descriptor string +} + +// ErrorGasOverflow defines an error thrown when an action results gas consumption +// unsigned integer overflow. +type ErrorGasOverflow struct { + Descriptor string +} + +// GasMeter interface to track gas consumption +type GasMeter interface { + GasConsumed() Gas + GasConsumedToLimit() Gas + GasRemaining() Gas + Limit() Gas + ConsumeGas(amount Gas, descriptor string) + RefundGas(amount Gas, descriptor string) + IsPastLimit() bool + IsOutOfGas() bool + String() string +} + +type basicGasMeter struct { + limit Gas + consumed Gas +} + +// NewGasMeter returns a reference to a new basicGasMeter. +func NewGasMeter(limit Gas) GasMeter { + return &basicGasMeter{ + limit: limit, + consumed: 0, + } +} + +// GasConsumed returns the gas consumed from the GasMeter. +func (g *basicGasMeter) GasConsumed() Gas { + return g.consumed +} + +// GasRemaining returns the gas left in the GasMeter. +func (g *basicGasMeter) GasRemaining() Gas { + if g.IsPastLimit() { + return 0 + } + return g.limit - g.consumed +} + +// Limit returns the gas limit of the GasMeter. +func (g *basicGasMeter) Limit() Gas { + return g.limit +} + +// GasConsumedToLimit returns the gas limit if gas consumed is past the limit, +// otherwise it returns the consumed gas. +// +// NOTE: This behavior is only called when recovering from panic when +// BlockGasMeter consumes gas past the limit. +func (g *basicGasMeter) GasConsumedToLimit() Gas { + if g.IsPastLimit() { + return g.limit + } + return g.consumed +} + +// addUint64Overflow performs the addition operation on two uint64 integers and +// returns a boolean on whether or not the result overflows. +func addUint64Overflow(a, b uint64) (uint64, bool) { + if math.MaxUint64-a < b { + return 0, true + } + + return a + b, false +} + +// ConsumeGas adds the given amount of gas to the gas consumed and panics if it overflows the limit or out of gas. +func (g *basicGasMeter) ConsumeGas(amount Gas, descriptor string) { + var overflow bool + g.consumed, overflow = addUint64Overflow(g.consumed, amount) + if overflow { + g.consumed = math.MaxUint64 + panic(ErrorGasOverflow{descriptor}) + } + + if g.consumed > g.limit { + panic(ErrorOutOfGas{descriptor}) + } +} + +// RefundGas will deduct the given amount from the gas consumed. If the amount is greater than the +// gas consumed, the function will panic. +// +// Use case: This functionality enables refunding gas to the transaction or block gas pools so that +// EVM-compatible chains can fully support the go-ethereum StateDb interface. +// See https://github.com/cosmos/cosmos-sdk/pull/9403 for reference. +func (g *basicGasMeter) RefundGas(amount Gas, descriptor string) { + if g.consumed < amount { + panic(ErrorNegativeGasConsumed{Descriptor: descriptor}) + } + + g.consumed -= amount +} + +// IsPastLimit returns true if gas consumed is past limit, otherwise it returns false. +func (g *basicGasMeter) IsPastLimit() bool { + return g.consumed > g.limit +} + +// IsOutOfGas returns true if gas consumed is greater than or equal to gas limit, otherwise it returns false. +func (g *basicGasMeter) IsOutOfGas() bool { + return g.consumed >= g.limit +} + +// String returns the BasicGasMeter's gas limit and gas consumed. +func (g *basicGasMeter) String() string { + return fmt.Sprintf("BasicGasMeter:\n limit: %d\n consumed: %d", g.limit, g.consumed) +} + +type infiniteGasMeter struct { + consumed Gas +} + +// NewInfiniteGasMeter returns a new gas meter without a limit. +func NewInfiniteGasMeter() GasMeter { + return &infiniteGasMeter{ + consumed: 0, + } +} + +// GasConsumed returns the gas consumed from the GasMeter. +func (g *infiniteGasMeter) GasConsumed() Gas { + return g.consumed +} + +// GasConsumedToLimit returns the gas consumed from the GasMeter since the gas is not confined to a limit. +// NOTE: This behavior is only called when recovering from panic when BlockGasMeter consumes gas past the limit. +func (g *infiniteGasMeter) GasConsumedToLimit() Gas { + return g.consumed +} + +// GasRemaining returns MaxUint64 since limit is not confined in infiniteGasMeter. +func (g *infiniteGasMeter) GasRemaining() Gas { + return math.MaxUint64 +} + +// Limit returns MaxUint64 since limit is not confined in infiniteGasMeter. +func (g *infiniteGasMeter) Limit() Gas { + return math.MaxUint64 +} + +// ConsumeGas adds the given amount of gas to the gas consumed and panics if it overflows the limit. +func (g *infiniteGasMeter) ConsumeGas(amount Gas, descriptor string) { + var overflow bool + // TODO: Should we set the consumed field after overflow checking? + g.consumed, overflow = addUint64Overflow(g.consumed, amount) + if overflow { + panic(ErrorGasOverflow{descriptor}) + } +} + +// RefundGas will deduct the given amount from the gas consumed. If the amount is greater than the +// gas consumed, the function will panic. +// +// Use case: This functionality enables refunding gas to the trasaction or block gas pools so that +// EVM-compatible chains can fully support the go-ethereum StateDb interface. +// See https://github.com/cosmos/cosmos-sdk/pull/9403 for reference. +func (g *infiniteGasMeter) RefundGas(amount Gas, descriptor string) { + if g.consumed < amount { + panic(ErrorNegativeGasConsumed{Descriptor: descriptor}) + } + + g.consumed -= amount +} + +// IsPastLimit returns false since the gas limit is not confined. +func (g *infiniteGasMeter) IsPastLimit() bool { + return false +} + +// IsOutOfGas returns false since the gas limit is not confined. +func (g *infiniteGasMeter) IsOutOfGas() bool { + return false +} + +// String returns the InfiniteGasMeter's gas consumed. +func (g *infiniteGasMeter) String() string { + return fmt.Sprintf("InfiniteGasMeter:\n consumed: %d", g.consumed) +} + +// GasConfig defines gas cost for each operation on KVStores +type GasConfig struct { + HasCost Gas + DeleteCost Gas + ReadCostFlat Gas + ReadCostPerByte Gas + WriteCostFlat Gas + WriteCostPerByte Gas + IterNextCostFlat Gas +} + +// KVGasConfig returns a default gas config for KVStores. +func KVGasConfig() GasConfig { + return GasConfig{ + HasCost: 100, + DeleteCost: 100, + ReadCostFlat: 100, + ReadCostPerByte: 1, + WriteCostFlat: 200, + WriteCostPerByte: 5, + IterNextCostFlat: 5, + } +} + +// TransientGasConfig returns a default gas config for TransientStores. +func TransientGasConfig() GasConfig { + return GasConfig{ + HasCost: 10, + DeleteCost: 10, + ReadCostFlat: 10, + ReadCostPerByte: 0, + WriteCostFlat: 20, + WriteCostPerByte: 1, + IterNextCostFlat: 1, + } +} diff --git a/cosmos-sdk-store/types/gas_test.go b/cosmos-sdk-store/types/gas_test.go new file mode 100755 index 000000000..f4b5a6abe --- /dev/null +++ b/cosmos-sdk-store/types/gas_test.go @@ -0,0 +1,123 @@ +package types + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestInfiniteGasMeter(t *testing.T) { + t.Parallel() + meter := NewInfiniteGasMeter() + require.Equal(t, uint64(math.MaxUint64), meter.Limit()) + require.Equal(t, uint64(math.MaxUint64), meter.GasRemaining()) + require.Equal(t, uint64(0), meter.GasConsumed()) + require.Equal(t, uint64(0), meter.GasConsumedToLimit()) + meter.ConsumeGas(10, "consume 10") + require.Equal(t, uint64(math.MaxUint64), meter.GasRemaining()) + require.Equal(t, uint64(10), meter.GasConsumed()) + require.Equal(t, uint64(10), meter.GasConsumedToLimit()) + meter.RefundGas(1, "refund 1") + require.Equal(t, uint64(math.MaxUint64), meter.GasRemaining()) + require.Equal(t, uint64(9), meter.GasConsumed()) + require.False(t, meter.IsPastLimit()) + require.False(t, meter.IsOutOfGas()) + meter.ConsumeGas(Gas(math.MaxUint64/2), "consume half max uint64") + require.Panics(t, func() { meter.ConsumeGas(Gas(math.MaxUint64/2)+2, "panic") }) + require.Panics(t, func() { meter.RefundGas(meter.GasConsumed()+1, "refund greater than consumed") }) +} + +func TestGasMeter(t *testing.T) { + t.Parallel() + cases := []struct { + limit Gas + usage []Gas + }{ + {10, []Gas{1, 2, 3, 4}}, + {1000, []Gas{40, 30, 20, 10, 900}}, + {100000, []Gas{99999, 1}}, + {100000000, []Gas{50000000, 40000000, 10000000}}, + {65535, []Gas{32768, 32767}}, + {65536, []Gas{32768, 32767, 1}}, + } + + for tcnum, tc := range cases { + meter := NewGasMeter(tc.limit) + used := uint64(0) + + for unum, usage := range tc.usage { + usage := usage + used += usage + require.NotPanics(t, func() { meter.ConsumeGas(usage, "") }, "Not exceeded limit but panicked. tc #%d, usage #%d", tcnum, unum) + require.Equal(t, used, meter.GasConsumed(), "Gas consumption not match. tc #%d, usage #%d", tcnum, unum) + require.Equal(t, tc.limit-used, meter.GasRemaining(), "Gas left not match. tc #%d, usage #%d", tcnum, unum) + require.Equal(t, used, meter.GasConsumedToLimit(), "Gas consumption (to limit) not match. tc #%d, usage #%d", tcnum, unum) + require.False(t, meter.IsPastLimit(), "Not exceeded limit but got IsPastLimit() true") + if unum < len(tc.usage)-1 { + require.False(t, meter.IsOutOfGas(), "Not yet at limit but got IsOutOfGas() true") + } else { + require.True(t, meter.IsOutOfGas(), "At limit but got IsOutOfGas() false") + } + } + + require.Panics(t, func() { meter.ConsumeGas(1, "") }, "Exceeded but not panicked. tc #%d", tcnum) + require.Equal(t, meter.GasConsumedToLimit(), meter.Limit(), "Gas consumption (to limit) not match limit") + require.Equal(t, meter.GasConsumed(), meter.Limit()+1, "Gas consumption not match limit+1") + require.Equal(t, uint64(0), meter.GasRemaining()) + + require.NotPanics(t, func() { meter.RefundGas(1, "refund 1") }) + require.Equal(t, meter.GasConsumed(), meter.Limit(), "Gas consumption not match with limit") + require.Equal(t, uint64(0), meter.GasRemaining()) + require.Panics(t, func() { meter.RefundGas(meter.GasConsumed()+1, "refund greater than consumed") }) + + require.NotPanics(t, func() { meter.RefundGas(meter.GasConsumed(), "refund consumed gas") }) + require.Equal(t, meter.Limit(), meter.GasRemaining()) + + meter2 := NewGasMeter(math.MaxUint64) + require.Equal(t, uint64(math.MaxUint64), meter2.GasRemaining()) + meter2.ConsumeGas(Gas(math.MaxUint64/2), "consume half max uint64") + require.Equal(t, Gas(math.MaxUint64-(math.MaxUint64/2)), meter2.GasRemaining()) + require.Panics(t, func() { meter2.ConsumeGas(Gas(math.MaxUint64/2)+2, "panic") }) + } +} + +func TestAddUint64Overflow(t *testing.T) { + t.Parallel() + testCases := []struct { + a, b uint64 + result uint64 + overflow bool + }{ + {0, 0, 0, false}, + {100, 100, 200, false}, + {math.MaxUint64 / 2, math.MaxUint64/2 + 1, math.MaxUint64, false}, + {math.MaxUint64 / 2, math.MaxUint64/2 + 2, 0, true}, + } + + for i, tc := range testCases { + res, overflow := addUint64Overflow(tc.a, tc.b) + require.Equal( + t, tc.overflow, overflow, + "invalid overflow result; tc: #%d, a: %d, b: %d", i, tc.a, tc.b, + ) + require.Equal( + t, tc.result, res, + "invalid uint64 result; tc: #%d, a: %d, b: %d", i, tc.a, tc.b, + ) + } +} + +func TestTransientGasConfig(t *testing.T) { + t.Parallel() + config := TransientGasConfig() + require.Equal(t, config, GasConfig{ + HasCost: 100, + DeleteCost: 100, + ReadCostFlat: 100, + ReadCostPerByte: 0, + WriteCostFlat: 200, + WriteCostPerByte: 3, + IterNextCostFlat: 3, + }) +} diff --git a/cosmos-sdk-store/types/iterator.go b/cosmos-sdk-store/types/iterator.go new file mode 100755 index 000000000..a328e87a6 --- /dev/null +++ b/cosmos-sdk-store/types/iterator.go @@ -0,0 +1,60 @@ +package types + +import ( + "fmt" +) + +// KVStorePrefixIteratorPaginated returns iterator over items in the selected page. +// Items iterated and skipped in ascending order. +func KVStorePrefixIteratorPaginated(kvs KVStore, prefix []byte, page, limit uint) Iterator { + pi := &PaginatedIterator{ + Iterator: KVStorePrefixIterator(kvs, prefix), + page: page, + limit: limit, + } + pi.skip() + return pi +} + +// KVStoreReversePrefixIteratorPaginated returns iterator over items in the selected page. +// Items iterated and skipped in descending order. +func KVStoreReversePrefixIteratorPaginated(kvs KVStore, prefix []byte, page, limit uint) Iterator { + pi := &PaginatedIterator{ + Iterator: KVStoreReversePrefixIterator(kvs, prefix), + page: page, + limit: limit, + } + pi.skip() + return pi +} + +// PaginatedIterator is a wrapper around Iterator that iterates over values starting for given page and limit. +type PaginatedIterator struct { + Iterator + + page, limit uint // provided during initialization + iterated uint // incremented in a call to Next +} + +func (pi *PaginatedIterator) skip() { + for i := (pi.page - 1) * pi.limit; i > 0 && pi.Iterator.Valid(); i-- { + pi.Iterator.Next() + } +} + +// Next will panic after limit is reached. +func (pi *PaginatedIterator) Next() { + if !pi.Valid() { + panic(fmt.Sprintf("PaginatedIterator reached limit %d", pi.limit)) + } + pi.Iterator.Next() + pi.iterated++ +} + +// Valid if below limit and underlying iterator is valid. +func (pi *PaginatedIterator) Valid() bool { + if pi.iterated >= pi.limit { + return false + } + return pi.Iterator.Valid() +} diff --git a/cosmos-sdk-store/types/iterator_test.go b/cosmos-sdk-store/types/iterator_test.go new file mode 100755 index 000000000..a804b092c --- /dev/null +++ b/cosmos-sdk-store/types/iterator_test.go @@ -0,0 +1,122 @@ +package types_test + +import ( + "testing" + + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/require" + + "cosmossdk.io/log" + "cosmossdk.io/store/iavl" + "cosmossdk.io/store/metrics" + "cosmossdk.io/store/types" +) + +func newMemTestKVStore(t *testing.T) types.KVStore { + t.Helper() + db := dbm.NewMemDB() + store, err := iavl.LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), types.CommitID{}, iavl.DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) + require.NoError(t, err) + return store +} + +func TestPaginatedIterator(t *testing.T) { + kvs := newMemTestKVStore(t) + total := 10 + lth := total - 1 + asc := make([][]byte, total) + desc := make([][]byte, total) + // store returns values in lexicographic order (or reverse lex order) + for i := 0; i < total; i++ { + key := []byte{byte(i)} + kvs.Set(key, key) + asc[i] = key + desc[lth-i] = key + } + type testCase struct { + desc string + page, limit uint + result [][]byte + reverse bool + } + for _, tc := range []testCase{ + { + desc: "FirstChunk", + page: 1, + limit: 4, + result: asc[:4], + }, + { + desc: "SecondChunk", + page: 2, + limit: 4, + result: asc[4:8], + }, + { + desc: "ThirdChunkHalf", + page: 3, + limit: 4, + result: asc[8:], + }, + { + desc: "OverLimit", + page: 10, + limit: 10, + result: [][]byte{}, + }, + { + desc: "ZeroLimit", + page: 1, + result: [][]byte{}, + }, + { + desc: "ReverseFirstChunk", + page: 1, + limit: 6, + result: desc[:6], + reverse: true, + }, + { + desc: "ReverseSecondChunk", + page: 2, + limit: 6, + result: desc[6:], + reverse: true, + }, + } { + tc := tc + t.Run(tc.desc, func(t *testing.T) { + var iter types.Iterator + if tc.reverse { + iter = types.KVStoreReversePrefixIteratorPaginated(kvs, nil, tc.page, tc.limit) + } else { + iter = types.KVStorePrefixIteratorPaginated(kvs, nil, tc.page, tc.limit) + } + defer iter.Close() + + result := [][]byte{} + for ; iter.Valid(); iter.Next() { + result = append(result, iter.Key()) + } + + require.Equal(t, tc.result, result) + require.False(t, iter.Valid()) + }) + } +} + +func TestPaginatedIteratorPanicIfInvalid(t *testing.T) { + kvs := newMemTestKVStore(t) + + iter := types.KVStorePrefixIteratorPaginated(kvs, nil, 1, 1) + defer iter.Close() + require.False(t, iter.Valid()) + require.Panics(t, func() { iter.Next() }) // "iterator is empty" + + kvs.Set([]byte{1}, []byte{}) + + iter = types.KVStorePrefixIteratorPaginated(kvs, nil, 1, 0) + defer iter.Close() + require.False(t, iter.Valid()) + require.Panics(t, func() { iter.Next() }) // "not empty but limit is zero" +} diff --git a/cosmos-sdk-store/types/listening.go b/cosmos-sdk-store/types/listening.go new file mode 100755 index 000000000..75828793f --- /dev/null +++ b/cosmos-sdk-store/types/listening.go @@ -0,0 +1,28 @@ +package types + +// MemoryListener listens to the state writes and accumulate the records in memory. +type MemoryListener struct { + stateCache []*StoreKVPair +} + +// NewMemoryListener creates a listener that accumulate the state writes in memory. +func NewMemoryListener() *MemoryListener { + return &MemoryListener{} +} + +// OnWrite implements MemoryListener interface +func (fl *MemoryListener) OnWrite(storeKey StoreKey, key, value []byte, delete bool) { + fl.stateCache = append(fl.stateCache, &StoreKVPair{ + StoreKey: storeKey.Name(), + Delete: delete, + Key: key, + Value: value, + }) +} + +// PopStateCache returns the current state caches and set to nil +func (fl *MemoryListener) PopStateCache() []*StoreKVPair { + res := fl.stateCache + fl.stateCache = nil + return res +} diff --git a/cosmos-sdk-store/types/listening.pb.go b/cosmos-sdk-store/types/listening.pb.go new file mode 100755 index 000000000..aab2ad57e --- /dev/null +++ b/cosmos-sdk-store/types/listening.pb.go @@ -0,0 +1,784 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cosmos/store/v1beta1/listening.proto + +package types + +import ( + fmt "fmt" + types "github.com/cometbft/cometbft/abci/types" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// StoreKVPair is a KVStore KVPair used for listening to state changes (Sets and Deletes) +// It optionally includes the StoreKey for the originating KVStore and a Boolean flag to distinguish between Sets and +// Deletes +// +// Since: cosmos-sdk 0.43 +type StoreKVPair struct { + StoreKey string `protobuf:"bytes,1,opt,name=store_key,json=storeKey,proto3" json:"store_key,omitempty"` + Delete bool `protobuf:"varint,2,opt,name=delete,proto3" json:"delete,omitempty"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *StoreKVPair) Reset() { *m = StoreKVPair{} } +func (m *StoreKVPair) String() string { return proto.CompactTextString(m) } +func (*StoreKVPair) ProtoMessage() {} +func (*StoreKVPair) Descriptor() ([]byte, []int) { + return fileDescriptor_b6caeb9d7b7c7c10, []int{0} +} +func (m *StoreKVPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreKVPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreKVPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StoreKVPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreKVPair.Merge(m, src) +} +func (m *StoreKVPair) XXX_Size() int { + return m.Size() +} +func (m *StoreKVPair) XXX_DiscardUnknown() { + xxx_messageInfo_StoreKVPair.DiscardUnknown(m) +} + +var xxx_messageInfo_StoreKVPair proto.InternalMessageInfo + +func (m *StoreKVPair) GetStoreKey() string { + if m != nil { + return m.StoreKey + } + return "" +} + +func (m *StoreKVPair) GetDelete() bool { + if m != nil { + return m.Delete + } + return false +} + +func (m *StoreKVPair) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *StoreKVPair) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// BlockMetadata contains all the abci event data of a block +// the file streamer dump them into files together with the state changes. +type BlockMetadata struct { + ResponseCommit *types.ResponseCommit `protobuf:"bytes,6,opt,name=response_commit,json=responseCommit,proto3" json:"response_commit,omitempty"` + RequestFinalizeBlock *types.RequestFinalizeBlock `protobuf:"bytes,7,opt,name=request_finalize_block,json=requestFinalizeBlock,proto3" json:"request_finalize_block,omitempty"` + ResponseFinalizeBlock *types.ResponseFinalizeBlock `protobuf:"bytes,8,opt,name=response_finalize_block,json=responseFinalizeBlock,proto3" json:"response_finalize_block,omitempty"` +} + +func (m *BlockMetadata) Reset() { *m = BlockMetadata{} } +func (m *BlockMetadata) String() string { return proto.CompactTextString(m) } +func (*BlockMetadata) ProtoMessage() {} +func (*BlockMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_b6caeb9d7b7c7c10, []int{1} +} +func (m *BlockMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockMetadata.Merge(m, src) +} +func (m *BlockMetadata) XXX_Size() int { + return m.Size() +} +func (m *BlockMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_BlockMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockMetadata proto.InternalMessageInfo + +func (m *BlockMetadata) GetResponseCommit() *types.ResponseCommit { + if m != nil { + return m.ResponseCommit + } + return nil +} + +func (m *BlockMetadata) GetRequestFinalizeBlock() *types.RequestFinalizeBlock { + if m != nil { + return m.RequestFinalizeBlock + } + return nil +} + +func (m *BlockMetadata) GetResponseFinalizeBlock() *types.ResponseFinalizeBlock { + if m != nil { + return m.ResponseFinalizeBlock + } + return nil +} + +func init() { + proto.RegisterType((*StoreKVPair)(nil), "cosmos.store.v1beta1.StoreKVPair") + proto.RegisterType((*BlockMetadata)(nil), "cosmos.store.v1beta1.BlockMetadata") +} + +func init() { + proto.RegisterFile("cosmos/store/v1beta1/listening.proto", fileDescriptor_b6caeb9d7b7c7c10) +} + +var fileDescriptor_b6caeb9d7b7c7c10 = []byte{ + // 374 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xdf, 0x8a, 0xd3, 0x40, + 0x18, 0xc5, 0x3b, 0xc9, 0x34, 0xce, 0xce, 0xfa, 0x27, 0x0c, 0x75, 0x0d, 0x2e, 0xc4, 0xb0, 0xa8, + 0xe4, 0x2a, 0x61, 0xd7, 0x37, 0xa8, 0x20, 0x92, 0x22, 0x48, 0x04, 0x2f, 0x14, 0x0c, 0x93, 0xe4, + 0x53, 0xc6, 0x26, 0x99, 0x3a, 0x33, 0x2d, 0xd4, 0xa7, 0xf0, 0x61, 0x7c, 0x08, 0x2f, 0x7b, 0xe9, + 0xa5, 0xb4, 0x2f, 0x22, 0x99, 0x04, 0xa5, 0x52, 0xef, 0x72, 0x0e, 0xbf, 0xef, 0xe4, 0xc0, 0x19, + 0xfa, 0xb8, 0x92, 0xba, 0x95, 0x3a, 0xd5, 0x46, 0x2a, 0x48, 0x37, 0xd7, 0x25, 0x18, 0x7e, 0x9d, + 0x36, 0x42, 0x1b, 0xe8, 0x44, 0xf7, 0x29, 0x59, 0x29, 0x69, 0x24, 0x9b, 0x0d, 0x54, 0x62, 0xa9, + 0x64, 0xa4, 0x1e, 0x5e, 0x1a, 0xe8, 0x6a, 0x50, 0xad, 0xe8, 0x4c, 0xca, 0xcb, 0x4a, 0xa4, 0x66, + 0xbb, 0x02, 0x3d, 0x9c, 0x5c, 0x7d, 0xa6, 0xe7, 0x6f, 0x7a, 0x7a, 0xf1, 0xf6, 0x35, 0x17, 0x8a, + 0x5d, 0xd2, 0x33, 0x7b, 0x5c, 0x2c, 0x61, 0x1b, 0xa0, 0x08, 0xc5, 0x67, 0x39, 0xb1, 0xc6, 0x02, + 0xb6, 0xec, 0x82, 0x7a, 0x35, 0x34, 0x60, 0x20, 0x70, 0x22, 0x14, 0x93, 0x7c, 0x54, 0xcc, 0xa7, + 0x6e, 0x8f, 0xbb, 0x11, 0x8a, 0x6f, 0xe7, 0xfd, 0x27, 0x9b, 0xd1, 0xe9, 0x86, 0x37, 0x6b, 0x08, + 0xb0, 0xf5, 0x06, 0x71, 0xf5, 0xdd, 0xa1, 0x77, 0xe6, 0x8d, 0xac, 0x96, 0xaf, 0xc0, 0xf0, 0x9a, + 0x1b, 0xce, 0x5e, 0xd2, 0x7b, 0x0a, 0xf4, 0x4a, 0x76, 0x1a, 0x8a, 0x4a, 0xb6, 0xad, 0x30, 0x81, + 0x17, 0xa1, 0xf8, 0xfc, 0xe6, 0x51, 0xf2, 0xb7, 0x74, 0xd2, 0x97, 0x4e, 0xf2, 0x91, 0x7b, 0x6e, + 0xb1, 0xfc, 0xae, 0x3a, 0xd2, 0xec, 0x3d, 0xbd, 0x50, 0xf0, 0x65, 0x0d, 0xda, 0x14, 0x1f, 0x45, + 0xc7, 0x1b, 0xf1, 0x15, 0x8a, 0xb2, 0xff, 0x57, 0x70, 0xcb, 0x06, 0x3e, 0x39, 0x11, 0x68, 0xf1, + 0x17, 0x23, 0x6d, 0x8b, 0xe5, 0x33, 0x75, 0xc2, 0x65, 0x1f, 0xe8, 0x83, 0x3f, 0x35, 0xff, 0x49, + 0x27, 0x36, 0xfd, 0xe9, 0x7f, 0xeb, 0x1e, 0xc7, 0xdf, 0x57, 0xa7, 0xec, 0x0c, 0x13, 0xe4, 0x3b, + 0x19, 0x26, 0x8e, 0xef, 0x66, 0x98, 0xb8, 0x3e, 0xce, 0x30, 0xc1, 0xfe, 0x34, 0xc3, 0x64, 0xea, + 0x7b, 0xf3, 0x9b, 0x1f, 0xfb, 0x10, 0xed, 0xf6, 0x21, 0xfa, 0xb5, 0x0f, 0xd1, 0xb7, 0x43, 0x38, + 0xd9, 0x1d, 0xc2, 0xc9, 0xcf, 0x43, 0x38, 0x79, 0x17, 0x0c, 0x7b, 0xeb, 0x7a, 0x99, 0x08, 0x39, + 0xbe, 0x0d, 0x3b, 0x6e, 0xe9, 0xd9, 0x75, 0x9f, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x68, 0x61, + 0xc9, 0x0c, 0x38, 0x02, 0x00, 0x00, +} + +func (m *StoreKVPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreKVPair) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StoreKVPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintListening(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x22 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintListening(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x1a + } + if m.Delete { + i-- + if m.Delete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.StoreKey) > 0 { + i -= len(m.StoreKey) + copy(dAtA[i:], m.StoreKey) + i = encodeVarintListening(dAtA, i, uint64(len(m.StoreKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BlockMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ResponseFinalizeBlock != nil { + { + size, err := m.ResponseFinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.RequestFinalizeBlock != nil { + { + size, err := m.RequestFinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.ResponseCommit != nil { + { + size, err := m.ResponseCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} + +func encodeVarintListening(dAtA []byte, offset int, v uint64) int { + offset -= sovListening(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *StoreKVPair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StoreKey) + if l > 0 { + n += 1 + l + sovListening(uint64(l)) + } + if m.Delete { + n += 2 + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovListening(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovListening(uint64(l)) + } + return n +} + +func (m *BlockMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResponseCommit != nil { + l = m.ResponseCommit.Size() + n += 1 + l + sovListening(uint64(l)) + } + if m.RequestFinalizeBlock != nil { + l = m.RequestFinalizeBlock.Size() + n += 1 + l + sovListening(uint64(l)) + } + if m.ResponseFinalizeBlock != nil { + l = m.ResponseFinalizeBlock.Size() + n += 1 + l + sovListening(uint64(l)) + } + return n +} + +func sovListening(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozListening(x uint64) (n int) { + return sovListening(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *StoreKVPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreKVPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreKVPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoreKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoreKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Delete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Delete = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipListening(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthListening + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseCommit == nil { + m.ResponseCommit = &types.ResponseCommit{} + } + if err := m.ResponseCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestFinalizeBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestFinalizeBlock == nil { + m.RequestFinalizeBlock = &types.RequestFinalizeBlock{} + } + if err := m.RequestFinalizeBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseFinalizeBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseFinalizeBlock == nil { + m.ResponseFinalizeBlock = &types.ResponseFinalizeBlock{} + } + if err := m.ResponseFinalizeBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipListening(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthListening + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipListening(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowListening + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowListening + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowListening + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthListening + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupListening + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthListening + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthListening = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowListening = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupListening = fmt.Errorf("proto: unexpected end of group") +) diff --git a/cosmos-sdk-store/types/listening_test.go b/cosmos-sdk-store/types/listening_test.go new file mode 100755 index 000000000..034d2a496 --- /dev/null +++ b/cosmos-sdk-store/types/listening_test.go @@ -0,0 +1,42 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewStoreKVPairWriteListener(t *testing.T) { + listener := NewMemoryListener() + require.IsType(t, &MemoryListener{}, listener) +} + +func TestOnWrite(t *testing.T) { + listener := NewMemoryListener() + + testStoreKey := NewKVStoreKey("test_key") + testKey := []byte("testing123") + testValue := []byte("testing321") + + // test set + listener.OnWrite(testStoreKey, testKey, testValue, false) + outputKVPair := listener.PopStateCache()[0] + expectedOutputKVPair := &StoreKVPair{ + Key: testKey, + Value: testValue, + StoreKey: testStoreKey.Name(), + Delete: false, + } + require.EqualValues(t, expectedOutputKVPair, outputKVPair) + + // test delete + listener.OnWrite(testStoreKey, testKey, testValue, true) + outputKVPair = listener.PopStateCache()[0] + expectedOutputKVPair = &StoreKVPair{ + Key: testKey, + Value: testValue, + StoreKey: testStoreKey.Name(), + Delete: true, + } + require.EqualValues(t, expectedOutputKVPair, outputKVPair) +} diff --git a/cosmos-sdk-store/types/proof.go b/cosmos-sdk-store/types/proof.go new file mode 100755 index 000000000..b1f4a115e --- /dev/null +++ b/cosmos-sdk-store/types/proof.go @@ -0,0 +1,174 @@ +package types + +import ( + "fmt" + + "github.com/cometbft/cometbft/crypto/merkle" + cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + ics23 "github.com/cosmos/ics23/go" + + errorsmod "cosmossdk.io/errors" + sdkmaps "cosmossdk.io/store/internal/maps" + sdkproofs "cosmossdk.io/store/internal/proofs" +) + +const ( + ProofOpIAVLCommitment = "ics23:iavl" + ProofOpSimpleMerkleCommitment = "ics23:simple" + ProofOpSMTCommitment = "ics23:smt" +) + +// CommitmentOp implements merkle.ProofOperator by wrapping an ics23 CommitmentProof +// It also contains a Key field to determine which key the proof is proving. +// NOTE: CommitmentProof currently can either be ExistenceProof or NonexistenceProof +// +// Type and Spec are classified by the kind of merkle proof it represents allowing +// the code to be reused by more types. Spec is never on the wire, but mapped from type in the code. +type CommitmentOp struct { + Type string + Spec *ics23.ProofSpec + Key []byte + Proof *ics23.CommitmentProof +} + +var _ merkle.ProofOperator = CommitmentOp{} + +func NewIavlCommitmentOp(key []byte, proof *ics23.CommitmentProof) CommitmentOp { + return CommitmentOp{ + Type: ProofOpIAVLCommitment, + Spec: ics23.IavlSpec, + Key: key, + Proof: proof, + } +} + +func NewSimpleMerkleCommitmentOp(key []byte, proof *ics23.CommitmentProof) CommitmentOp { + return CommitmentOp{ + Type: ProofOpSimpleMerkleCommitment, + Spec: ics23.TendermintSpec, + Key: key, + Proof: proof, + } +} + +func NewSmtCommitmentOp(key []byte, proof *ics23.CommitmentProof) CommitmentOp { + return CommitmentOp{ + Type: ProofOpSMTCommitment, + Spec: ics23.SmtSpec, + Key: key, + Proof: proof, + } +} + +// CommitmentOpDecoder takes a merkle.ProofOp and attempts to decode it into a CommitmentOp ProofOperator +// The proofOp.Data is just a marshaled CommitmentProof. The Key of the CommitmentOp is extracted +// from the unmarshalled proof. +func CommitmentOpDecoder(pop cmtprotocrypto.ProofOp) (merkle.ProofOperator, error) { + var spec *ics23.ProofSpec + switch pop.Type { + case ProofOpIAVLCommitment: + spec = ics23.IavlSpec + case ProofOpSimpleMerkleCommitment: + spec = ics23.TendermintSpec + case ProofOpSMTCommitment: + spec = ics23.SmtSpec + default: + return nil, errorsmod.Wrapf(ErrInvalidProof, "unexpected ProofOp.Type; got %s, want supported ics23 subtypes 'ProofOpSimpleMerkleCommitment', 'ProofOpIAVLCommitment', or 'ProofOpSMTCommitment'", pop.Type) + } + + proof := &ics23.CommitmentProof{} + err := proof.Unmarshal(pop.Data) + if err != nil { + return nil, err + } + + op := CommitmentOp{ + Type: pop.Type, + Key: pop.Key, + Spec: spec, + Proof: proof, + } + return op, nil +} + +func (op CommitmentOp) GetKey() []byte { + return op.Key +} + +// Run takes in a list of arguments and attempts to run the proof op against these arguments +// Returns the root wrapped in [][]byte if the proof op succeeds with given args. If not, +// it will return an error. +// +// CommitmentOp will accept args of length 1 or length 0 +// If length 1 args is passed in, then CommitmentOp will attempt to prove the existence of the key +// with the value provided by args[0] using the embedded CommitmentProof and return the CommitmentRoot of the proof +// If length 0 args is passed in, then CommitmentOp will attempt to prove the absence of the key +// in the CommitmentOp and return the CommitmentRoot of the proof +func (op CommitmentOp) Run(args [][]byte) ([][]byte, error) { + // calculate root from proof + root, err := op.Proof.Calculate() + if err != nil { + return nil, errorsmod.Wrapf(ErrInvalidProof, "could not calculate root for proof: %v", err) + } + // Only support an existence proof or nonexistence proof (batch proofs currently unsupported) + switch len(args) { + case 0: + // Args are nil, so we verify the absence of the key. + absent := ics23.VerifyNonMembership(op.Spec, root, op.Proof, op.Key) + if !absent { + return nil, errorsmod.Wrapf(ErrInvalidProof, "proof did not verify absence of key: %s", string(op.Key)) + } + + case 1: + // Args is length 1, verify existence of key with value args[0] + if !ics23.VerifyMembership(op.Spec, root, op.Proof, op.Key, args[0]) { + return nil, errorsmod.Wrapf(ErrInvalidProof, "proof did not verify existence of key %s with given value %x", op.Key, args[0]) + } + default: + return nil, errorsmod.Wrapf(ErrInvalidProof, "args must be length 0 or 1, got: %d", len(args)) + } + + return [][]byte{root}, nil +} + +// ProofOp implements ProofOperator interface and converts a CommitmentOp +// into a merkle.ProofOp format that can later be decoded by CommitmentOpDecoder +// back into a CommitmentOp for proof verification +func (op CommitmentOp) ProofOp() cmtprotocrypto.ProofOp { + bz, err := op.Proof.Marshal() + if err != nil { + panic(err.Error()) + } + return cmtprotocrypto.ProofOp{ + Type: op.Type, + Key: op.Key, + Data: bz, + } +} + +// ProofOpFromMap generates a single proof from a map and converts it to a ProofOp. +func ProofOpFromMap(cmap map[string][]byte, storeName string) (ret cmtprotocrypto.ProofOp, err error) { + _, proofs, _ := sdkmaps.ProofsFromMap(cmap) + + proof := proofs[storeName] + if proof == nil { + err = fmt.Errorf("ProofOp for %s but not registered store name", storeName) + return + } + + // convert merkle.SimpleProof to CommitmentProof + existProof, err := sdkproofs.ConvertExistenceProof(proof, []byte(storeName), cmap[storeName]) + if err != nil { + err = fmt.Errorf("could not convert simple proof to existence proof: %w", err) + return + } + + commitmentProof := &ics23.CommitmentProof{ + Proof: &ics23.CommitmentProof_Exist{ + Exist: existProof, + }, + } + + ret = NewSimpleMerkleCommitmentOp([]byte(storeName), commitmentProof).ProofOp() + return +} diff --git a/cosmos-sdk-store/types/store.go b/cosmos-sdk-store/types/store.go new file mode 100755 index 000000000..898017995 --- /dev/null +++ b/cosmos-sdk-store/types/store.go @@ -0,0 +1,534 @@ +package types + +import ( + "fmt" + "io" + + "github.com/cometbft/cometbft/proto/tendermint/crypto" + dbm "github.com/cosmos/cosmos-db" + + "cosmossdk.io/store/metrics" + pruningtypes "cosmossdk.io/store/pruning/types" + snapshottypes "cosmossdk.io/store/snapshots/types" +) + +type Store interface { + GetStoreType() StoreType + CacheWrapper +} + +// something that can persist to disk +type Committer interface { + Commit() CommitID + LastCommitID() CommitID + + // WorkingHash returns the hash of the KVStore's state before commit. + WorkingHash() []byte + + SetPruning(pruningtypes.PruningOptions) + GetPruning() pruningtypes.PruningOptions +} + +// Stores of MultiStore must implement CommitStore. +type CommitStore interface { + Committer + Store +} + +// Queryable allows a Store to expose internal state to the abci.Query +// interface. Multistore can route requests to the proper Store. +// +// This is an optional, but useful extension to any CommitStore +type Queryable interface { + Query(*RequestQuery) (*ResponseQuery, error) +} + +type RequestQuery struct { + Data []byte + Path string + Height int64 + Prove bool +} + +type ResponseQuery struct { + Code uint32 + Log string + Info string + Index int64 + Key []byte + Value []byte + ProofOps *crypto.ProofOps + Height int64 + Codespace string +} + +//---------------------------------------- +// MultiStore + +// StoreUpgrades defines a series of transformations to apply the multistore db upon load +type StoreUpgrades struct { + Added []string `json:"added"` + Renamed []StoreRename `json:"renamed"` + Deleted []string `json:"deleted"` +} + +// StoreRename defines a name change of a sub-store. +// All data previously under a PrefixStore with OldKey will be copied +// to a PrefixStore with NewKey, then deleted from OldKey store. +type StoreRename struct { + OldKey string `json:"old_key"` + NewKey string `json:"new_key"` +} + +// IsAdded returns true if the given key should be added +func (s *StoreUpgrades) IsAdded(key string) bool { + if s == nil { + return false + } + for _, added := range s.Added { + if key == added { + return true + } + } + return false +} + +// IsDeleted returns true if the given key should be deleted +func (s *StoreUpgrades) IsDeleted(key string) bool { + if s == nil { + return false + } + for _, d := range s.Deleted { + if d == key { + return true + } + } + return false +} + +// RenamedFrom returns the oldKey if it was renamed +// Returns "" if it was not renamed +func (s *StoreUpgrades) RenamedFrom(key string) string { + if s == nil { + return "" + } + for _, re := range s.Renamed { + if re.NewKey == key { + return re.OldKey + } + } + return "" +} + +type MultiStore interface { + Store + + // Branches MultiStore into a cached storage object. + // NOTE: Caller should probably not call .Write() on each, but + // call CacheMultiStore.Write(). + CacheMultiStore() CacheMultiStore + + // CacheMultiStoreWithVersion branches the underlying MultiStore where + // each stored is loaded at a specific version (height). + CacheMultiStoreWithVersion(version int64) (CacheMultiStore, error) + + // Convenience for fetching substores. + // If the store does not exist, panics. + GetStore(StoreKey) Store + GetKVStore(StoreKey) KVStore + + // TracingEnabled returns if tracing is enabled for the MultiStore. + TracingEnabled() bool + + // SetTracer sets the tracer for the MultiStore that the underlying + // stores will utilize to trace operations. The modified MultiStore is + // returned. + SetTracer(w io.Writer) MultiStore + + // SetTracingContext sets the tracing context for a MultiStore. It is + // implied that the caller should update the context when necessary between + // tracing operations. The modified MultiStore is returned. + SetTracingContext(TraceContext) MultiStore + + // LatestVersion returns the latest version in the store + LatestVersion() int64 +} + +// From MultiStore.CacheMultiStore().... +type CacheMultiStore interface { + MultiStore + Write() // Writes operations to underlying KVStore +} + +// CommitMultiStore is an interface for a MultiStore without cache capabilities. +type CommitMultiStore interface { + Committer + MultiStore + snapshottypes.Snapshotter + + // Mount a store of type using the given db. + // If db == nil, the new store will use the CommitMultiStore db. + MountStoreWithDB(key StoreKey, typ StoreType, db dbm.DB) + + // Panics on a nil key. + GetCommitStore(key StoreKey) CommitStore + + // Panics on a nil key. + GetCommitKVStore(key StoreKey) CommitKVStore + + // Load the latest persisted version. Called once after all calls to + // Mount*Store() are complete. + LoadLatestVersion() error + + // LoadLatestVersionAndUpgrade will load the latest version, but also + // rename/delete/create sub-store keys, before registering all the keys + // in order to handle breaking formats in migrations + LoadLatestVersionAndUpgrade(upgrades *StoreUpgrades) error + + // LoadVersionAndUpgrade will load the named version, but also + // rename/delete/create sub-store keys, before registering all the keys + // in order to handle breaking formats in migrations + LoadVersionAndUpgrade(ver int64, upgrades *StoreUpgrades) error + + // Load a specific persisted version. When you load an old version, or when + // the last commit attempt didn't complete, the next commit after loading + // must be idempotent (return the same commit id). Otherwise the behavior is + // undefined. + LoadVersion(ver int64) error + + // Set an inter-block (persistent) cache that maintains a mapping from + // StoreKeys to CommitKVStores. + SetInterBlockCache(MultiStorePersistentCache) + + // SetInitialVersion sets the initial version of the IAVL tree. It is used when + // starting a new chain at an arbitrary height. + SetInitialVersion(version int64) error + + // SetIAVLCacheSize sets the cache size of the IAVL tree. + SetIAVLCacheSize(size int) + + // SetIAVLDisableFastNode enables/disables fastnode feature on iavl. + SetIAVLDisableFastNode(disable bool) + + // RollbackToVersion rollback the db to specific version(height). + RollbackToVersion(version int64) error + + // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey + ListeningEnabled(key StoreKey) bool + + // AddListeners adds a listener for the KVStore belonging to the provided StoreKey + AddListeners(keys []StoreKey) + + // PopStateCache returns the accumulated state change messages from the CommitMultiStore + PopStateCache() []*StoreKVPair + + // SetMetrics sets the metrics for the KVStore + SetMetrics(metrics metrics.StoreMetrics) +} + +//---------subsp------------------------------- +// KVStore + +// BasicKVStore is a simple interface to get/set data +type BasicKVStore interface { + // Get returns nil if key doesn't exist. Panics on nil key. + Get(key []byte) []byte + + // Has checks if a key exists. Panics on nil key. + Has(key []byte) bool + + // Set sets the key. Panics on nil key or value. + Set(key, value []byte) + + // Delete deletes the key. Panics on nil key. + Delete(key []byte) +} + +// KVStore additionally provides iteration and deletion +type KVStore interface { + Store + BasicKVStore + + // Iterator over a domain of keys in ascending order. End is exclusive. + // Start must be less than end, or the Iterator is invalid. + // Iterator must be closed by caller. + // To iterate over entire domain, use store.Iterator(nil, nil) + // CONTRACT: No writes may happen within a domain while an iterator exists over it. + // Exceptionally allowed for cachekv.Store, safe to write in the modules. + Iterator(start, end []byte) Iterator + + // Iterator over a domain of keys in descending order. End is exclusive. + // Start must be less than end, or the Iterator is invalid. + // Iterator must be closed by caller. + // CONTRACT: No writes may happen within a domain while an iterator exists over it. + // Exceptionally allowed for cachekv.Store, safe to write in the modules. + ReverseIterator(start, end []byte) Iterator +} + +// Iterator is an alias db's Iterator for convenience. +type Iterator = dbm.Iterator + +// CacheKVStore branches a KVStore and provides read cache functionality. +// After calling .Write() on the CacheKVStore, all previously created +// CacheKVStores on the object expire. +type CacheKVStore interface { + KVStore + + // Writes operations to underlying KVStore + Write() +} + +// CommitKVStore is an interface for MultiStore. +type CommitKVStore interface { + Committer + KVStore +} + +//---------------------------------------- +// CacheWrap + +// CacheWrap is the most appropriate interface for store ephemeral branching and cache. +// For example, IAVLStore.CacheWrap() returns a CacheKVStore. CacheWrap should not return +// a Committer, since Commit ephemeral store make no sense. It can return KVStore, +// HeapStore, SpaceStore, etc. +type CacheWrap interface { + // Write syncs with the underlying store. + Write() + + // CacheWrap recursively wraps again. + CacheWrap() CacheWrap + + // CacheWrapWithTrace recursively wraps again with tracing enabled. + CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap +} + +type CacheWrapper interface { + // CacheWrap branches a store. + CacheWrap() CacheWrap + + // CacheWrapWithTrace branches a store with tracing enabled. + CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap +} + +func (cid CommitID) IsZero() bool { + return cid.Version == 0 && len(cid.Hash) == 0 +} + +func (cid CommitID) String() string { + return fmt.Sprintf("CommitID{%v:%X}", cid.Hash, cid.Version) +} + +//---------------------------------------- +// Store types + +// kind of store +type StoreType int + +const ( + StoreTypeMulti StoreType = iota + StoreTypeDB + StoreTypeIAVL + StoreTypeTransient + StoreTypeMemory + StoreTypeSMT + StoreTypePersistent +) + +func (st StoreType) String() string { + switch st { + case StoreTypeMulti: + return "StoreTypeMulti" + + case StoreTypeDB: + return "StoreTypeDB" + + case StoreTypeIAVL: + return "StoreTypeIAVL" + + case StoreTypeTransient: + return "StoreTypeTransient" + + case StoreTypeMemory: + return "StoreTypeMemory" + + case StoreTypeSMT: + return "StoreTypeSMT" + + case StoreTypePersistent: + return "StoreTypePersistent" + } + + return "unknown store type" +} + +//---------------------------------------- +// Keys for accessing substores + +// StoreKey is a key used to index stores in a MultiStore. +type StoreKey interface { + Name() string + String() string +} + +// CapabilityKey represent the Cosmos SDK keys for object-capability +// generation in the IBC protocol as defined in https://github.com/cosmos/ibc/tree/master/spec/core/ics-005-port-allocation#data-structures +type CapabilityKey StoreKey + +// KVStoreKey is used for accessing substores. +// Only the pointer value should ever be used - it functions as a capabilities key. +type KVStoreKey struct { + name string +} + +// NewKVStoreKey returns a new pointer to a KVStoreKey. +// Use a pointer so keys don't collide. +func NewKVStoreKey(name string) *KVStoreKey { + if name == "" { + panic("empty key name not allowed") + } + return &KVStoreKey{ + name: name, + } +} + +// NewKVStoreKeys returns a map of new pointers to KVStoreKey's. +// The function will panic if there is a potential conflict in names (see `assertNoPrefix` +// function for more details). +func NewKVStoreKeys(names ...string) map[string]*KVStoreKey { + assertNoCommonPrefix(names) + keys := make(map[string]*KVStoreKey, len(names)) + for _, n := range names { + keys[n] = NewKVStoreKey(n) + } + + return keys +} + +func (key *KVStoreKey) Name() string { + return key.name +} + +func (key *KVStoreKey) String() string { + return fmt.Sprintf("KVStoreKey{%p, %s}", key, key.name) +} + +// TransientStoreKey is used for indexing transient stores in a MultiStore +type TransientStoreKey struct { + name string +} + +// Constructs new TransientStoreKey +// Must return a pointer according to the ocap principle +func NewTransientStoreKey(name string) *TransientStoreKey { + return &TransientStoreKey{ + name: name, + } +} + +// Implements StoreKey +func (key *TransientStoreKey) Name() string { + return key.name +} + +// Implements StoreKey +func (key *TransientStoreKey) String() string { + return fmt.Sprintf("TransientStoreKey{%p, %s}", key, key.name) +} + +// MemoryStoreKey defines a typed key to be used with an in-memory KVStore. +type MemoryStoreKey struct { + name string +} + +func NewMemoryStoreKey(name string) *MemoryStoreKey { + return &MemoryStoreKey{name: name} +} + +// Name returns the name of the MemoryStoreKey. +func (key *MemoryStoreKey) Name() string { + return key.name +} + +// String returns a stringified representation of the MemoryStoreKey. +func (key *MemoryStoreKey) String() string { + return fmt.Sprintf("MemoryStoreKey{%p, %s}", key, key.name) +} + +//---------------------------------------- + +// TraceContext contains TraceKVStore context data. It will be written with +// every trace operation. +type TraceContext map[string]interface{} + +// Clone clones tc into another instance of TraceContext. +func (tc TraceContext) Clone() TraceContext { + ret := TraceContext{} + for k, v := range tc { + ret[k] = v + } + + return ret +} + +// Merge merges value of newTc into tc. +func (tc TraceContext) Merge(newTc TraceContext) TraceContext { + if tc == nil { + tc = TraceContext{} + } + + for k, v := range newTc { + tc[k] = v + } + + return tc +} + +// MultiStorePersistentCache defines an interface which provides inter-block +// (persistent) caching capabilities for multiple CommitKVStores based on StoreKeys. +type MultiStorePersistentCache interface { + // Wrap and return the provided CommitKVStore with an inter-block (persistent) + // cache. + GetStoreCache(key StoreKey, store CommitKVStore) CommitKVStore + + // Return the underlying CommitKVStore for a StoreKey. + Unwrap(key StoreKey) CommitKVStore + + // Reset the entire set of internal caches. + Reset() +} + +// StoreWithInitialVersion is a store that can have an arbitrary initial +// version. +type StoreWithInitialVersion interface { + // SetInitialVersion sets the initial version of the IAVL tree. It is used when + // starting a new chain at an arbitrary height. + SetInitialVersion(version int64) +} + +// NewTransientStoreKeys constructs a new map of TransientStoreKey's +// Must return pointers according to the ocap principle +// The function will panic if there is a potential conflict in names +// see `assertNoCommonPrefix` function for more details. +func NewTransientStoreKeys(names ...string) map[string]*TransientStoreKey { + assertNoCommonPrefix(names) + keys := make(map[string]*TransientStoreKey) + for _, n := range names { + keys[n] = NewTransientStoreKey(n) + } + + return keys +} + +// NewMemoryStoreKeys constructs a new map matching store key names to their +// respective MemoryStoreKey references. +// The function will panic if there is a potential conflict in names (see `assertNoPrefix` +// function for more details). +func NewMemoryStoreKeys(names ...string) map[string]*MemoryStoreKey { + assertNoCommonPrefix(names) + keys := make(map[string]*MemoryStoreKey) + for _, n := range names { + keys[n] = NewMemoryStoreKey(n) + } + + return keys +} diff --git a/cosmos-sdk-store/types/store_test.go b/cosmos-sdk-store/types/store_test.go new file mode 100755 index 000000000..b6304d131 --- /dev/null +++ b/cosmos-sdk-store/types/store_test.go @@ -0,0 +1,240 @@ +package types + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "gotest.tools/v3/assert" +) + +func TestStoreUpgrades(t *testing.T) { + t.Parallel() + type toAdd struct { + key string + } + type toDelete struct { + key string + delete bool + } + type toRename struct { + newkey string + result string + } + + cases := map[string]struct { + upgrades *StoreUpgrades + expectAdd []toAdd + expectDelete []toDelete + expectRename []toRename + }{ + "empty upgrade": { + expectDelete: []toDelete{{"foo", false}}, + expectRename: []toRename{{"foo", ""}}, + }, + "simple matches": { + upgrades: &StoreUpgrades{ + Deleted: []string{"foo"}, + Renamed: []StoreRename{{"bar", "baz"}}, + }, + expectDelete: []toDelete{{"foo", true}, {"bar", false}, {"baz", false}}, + expectRename: []toRename{{"foo", ""}, {"bar", ""}, {"baz", "bar"}}, + }, + "many data points": { + upgrades: &StoreUpgrades{ + Added: []string{"foo", "bar", "baz"}, + Deleted: []string{"one", "two", "three", "four", "five"}, + Renamed: []StoreRename{{"old", "new"}, {"white", "blue"}, {"black", "orange"}, {"fun", "boring"}}, + }, + expectAdd: []toAdd{{"foo"}, {"bar"}, {"baz"}}, + expectDelete: []toDelete{{"four", true}, {"six", false}, {"baz", false}}, + expectRename: []toRename{{"white", ""}, {"blue", "white"}, {"boring", "fun"}, {"missing", ""}}, + }, + } + + for name, tc := range cases { + tc := tc + t.Run(name, func(t *testing.T) { + for _, r := range tc.expectAdd { + assert.Equal(t, tc.upgrades.IsAdded(r.key), true) + } + for _, d := range tc.expectDelete { + assert.Equal(t, tc.upgrades.IsDeleted(d.key), d.delete) + } + for _, r := range tc.expectRename { + assert.Equal(t, tc.upgrades.RenamedFrom(r.newkey), r.result) + } + }) + } +} + +func TestCommitID(t *testing.T) { + t.Parallel() + require.True(t, CommitID{}.IsZero()) + require.False(t, CommitID{Version: int64(1)}.IsZero()) + require.False(t, CommitID{Hash: []byte("x")}.IsZero()) + require.Equal(t, "CommitID{[120 120 120 120]:64}", CommitID{Version: int64(100), Hash: []byte("xxxx")}.String()) +} + +func TestKVStoreKey(t *testing.T) { + t.Parallel() + key := NewKVStoreKey("test") + require.Equal(t, "test", key.name) + require.Equal(t, key.name, key.Name()) + require.Equal(t, fmt.Sprintf("KVStoreKey{%p, test}", key), key.String()) +} + +func TestNilKVStoreKey(t *testing.T) { + t.Parallel() + + require.Panics(t, func() { + _ = NewKVStoreKey("") + }, "setting an empty key should panic") +} + +func TestTransientStoreKey(t *testing.T) { + t.Parallel() + key := NewTransientStoreKey("test") + require.Equal(t, "test", key.name) + require.Equal(t, key.name, key.Name()) + require.Equal(t, fmt.Sprintf("TransientStoreKey{%p, test}", key), key.String()) +} + +func TestMemoryStoreKey(t *testing.T) { + t.Parallel() + key := NewMemoryStoreKey("test") + require.Equal(t, "test", key.name) + require.Equal(t, key.name, key.Name()) + require.Equal(t, fmt.Sprintf("MemoryStoreKey{%p, test}", key), key.String()) +} + +func TestTraceContext_Clone(t *testing.T) { + tests := []struct { + name string + tc TraceContext + want TraceContext + }{ + { + "nil TraceContext yields empty TraceContext", + nil, + TraceContext{}, + }, + { + "non-nil TraceContext yields equal TraceContext", + TraceContext{ + "value": 42, + }, + TraceContext{ + "value": 42, + }, + }, + { + "non-nil TraceContext yields equal TraceContext, for more than one key", + TraceContext{ + "value": 42, + "another": 24, + "weird": "string", + }, + TraceContext{ + "value": 42, + "another": 24, + "weird": "string", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.want, tt.tc.Clone()) + }) + } +} + +func TestTraceContext_Clone_is_deep(t *testing.T) { + original := TraceContext{ + "value": 42, + "another": 24, + "weird": "string", + } + + clone := original.Clone() + + clone["other"] = true + + require.NotEqual(t, original, clone) +} + +func TestTraceContext_Merge(t *testing.T) { + tests := []struct { + name string + tc TraceContext + other TraceContext + want TraceContext + }{ + { + "tc is nil, other is empty, yields an empty TraceContext", + nil, + TraceContext{}, + TraceContext{}, + }, + { + "tc is nil, other is nil, yields an empty TraceContext", + nil, + nil, + TraceContext{}, + }, + { + "tc is not nil, other is nil, yields tc", + TraceContext{ + "data": 42, + }, + nil, + TraceContext{ + "data": 42, + }, + }, + { + "tc is not nil, other is not nil, yields tc + other", + TraceContext{ + "data": 42, + }, + TraceContext{ + "data2": 42, + }, + TraceContext{ + "data": 42, + "data2": 42, + }, + }, + { + "tc is not nil, other is not nil, other updates value in tc, yields tc updated with value from other", + TraceContext{ + "data": 42, + }, + TraceContext{ + "data": 24, + }, + TraceContext{ + "data": 24, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.want, tt.tc.Merge(tt.other)) + }) + } +} + +func TestNewTransientStoreKeys(t *testing.T) { + assert.DeepEqual(t, map[string]*TransientStoreKey{}, NewTransientStoreKeys()) + assert.DeepEqual(t, 1, len(NewTransientStoreKeys("one"))) +} + +func TestNewInfiniteGasMeter(t *testing.T) { + gm := NewInfiniteGasMeter() + require.NotNil(t, gm) +} + +func TestStoreTypes(t *testing.T) { + assert.DeepEqual(t, InclusiveEndBytes([]byte("endbytes")), InclusiveEndBytes([]byte("endbytes"))) +} diff --git a/cosmos-sdk-store/types/streaming.go b/cosmos-sdk-store/types/streaming.go new file mode 100755 index 000000000..68a5c9223 --- /dev/null +++ b/cosmos-sdk-store/types/streaming.go @@ -0,0 +1,28 @@ +package types + +import ( + "context" + + abci "github.com/cometbft/cometbft/abci/types" +) + +// ABCIListener is the interface that we're exposing as a streaming service. +// It hooks into the ABCI message processing of the BaseApp. +// The error results are propagated to consensus state machine, +// if you don't want to affect consensus, handle the errors internally and always return `nil` in these APIs. +type ABCIListener interface { + // ListenFinalizeBlock updates the streaming service with the latest FinalizeBlock messages + ListenFinalizeBlock(ctx context.Context, req abci.RequestFinalizeBlock, res abci.ResponseFinalizeBlock) error + // ListenCommit updates the steaming service with the latest Commit messages and state changes + ListenCommit(ctx context.Context, res abci.ResponseCommit, changeSet []*StoreKVPair) error +} + +// StreamingManager is the struct that maintains a list of ABCIListeners and configuration settings. +type StreamingManager struct { + // ABCIListeners for hooking into the ABCI message processing of the BaseApp + // and exposing the requests and responses to external consumers + ABCIListeners []ABCIListener + + // StopNodeOnErr halts the node when ABCI streaming service listening results in an error. + StopNodeOnErr bool +} diff --git a/cosmos-sdk-store/types/utils.go b/cosmos-sdk-store/types/utils.go new file mode 100755 index 000000000..a54d2746f --- /dev/null +++ b/cosmos-sdk-store/types/utils.go @@ -0,0 +1,94 @@ +package types + +import ( + "encoding/binary" + "fmt" + "sort" + "strings" +) + +// KVStorePrefixIterator iterates over all the keys with a certain prefix in ascending order +func KVStorePrefixIterator(kvs KVStore, prefix []byte) Iterator { + return kvs.Iterator(prefix, PrefixEndBytes(prefix)) +} + +// KVStoreReversePrefixIterator iterates over all the keys with a certain prefix in descending order. +func KVStoreReversePrefixIterator(kvs KVStore, prefix []byte) Iterator { + return kvs.ReverseIterator(prefix, PrefixEndBytes(prefix)) +} + +// PrefixEndBytes returns the []byte that would end a +// range query for all []byte with a certain prefix +// Deals with last byte of prefix being FF without overflowing +func PrefixEndBytes(prefix []byte) []byte { + if len(prefix) == 0 { + return nil + } + + end := make([]byte, len(prefix)) + copy(end, prefix) + + for { + if end[len(end)-1] != byte(255) { + end[len(end)-1]++ + break + } + + end = end[:len(end)-1] + + if len(end) == 0 { + end = nil + break + } + } + + return end +} + +// InclusiveEndBytes returns the []byte that would end a +// range query such that the input would be included +func InclusiveEndBytes(inclusiveBytes []byte) []byte { + return append(inclusiveBytes, byte(0x00)) +} + +// assertNoCommonPrefix will panic if there are two keys: k1 and k2 in keys, such that +// k1 is a prefix of k2 +func assertNoCommonPrefix(keys []string) { + sorted := make([]string, len(keys)) + copy(sorted, keys) + sort.Strings(sorted) + for i := 1; i < len(sorted); i++ { + if strings.HasPrefix(sorted[i], sorted[i-1]) { + panic(fmt.Sprint("Potential key collision between KVStores:", sorted[i], " - ", sorted[i-1])) + } + } +} + +// Uint64ToBigEndian - marshals uint64 to a bigendian byte slice so it can be sorted +func Uint64ToBigEndian(i uint64) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, i) + return b +} + +// BigEndianToUint64 returns an uint64 from big endian encoded bytes. If encoding +// is empty, zero is returned. +func BigEndianToUint64(bz []byte) uint64 { + if len(bz) == 0 { + return 0 + } + + return binary.BigEndian.Uint64(bz) +} + +// SliceContains implements a generic function for checking if a slice contains +// a certain value. +func SliceContains[T comparable](elements []T, v T) bool { + for _, s := range elements { + if v == s { + return true + } + } + + return false +} diff --git a/cosmos-sdk-store/types/utils_test.go b/cosmos-sdk-store/types/utils_test.go new file mode 100755 index 000000000..d05d9df6d --- /dev/null +++ b/cosmos-sdk-store/types/utils_test.go @@ -0,0 +1,37 @@ +package types_test + +import ( + "testing" + + "gotest.tools/v3/assert" + + "cosmossdk.io/store/types" +) + +func TestPrefixEndBytes(t *testing.T) { + t.Parallel() + testCases := []struct { + prefix []byte + expected []byte + }{ + {[]byte{byte(55), byte(255), byte(255), byte(0)}, []byte{byte(55), byte(255), byte(255), byte(1)}}, + {[]byte{byte(55), byte(255), byte(255), byte(15)}, []byte{byte(55), byte(255), byte(255), byte(16)}}, + {[]byte{byte(55), byte(200), byte(255)}, []byte{byte(55), byte(201)}}, + {[]byte{byte(55), byte(255), byte(255)}, []byte{byte(56)}}, + {[]byte{byte(255), byte(255), byte(255)}, nil}, + {[]byte{byte(255)}, nil}, + {nil, nil}, + } + + for _, test := range testCases { + end := types.PrefixEndBytes(test.prefix) + assert.DeepEqual(t, test.expected, end) + } +} + +func TestInclusiveEndBytes(t *testing.T) { + t.Parallel() + assert.DeepEqual(t, []byte{0x00}, types.InclusiveEndBytes(nil)) + bs := []byte("test") + assert.DeepEqual(t, append(bs, byte(0x00)), types.InclusiveEndBytes(bs)) +} diff --git a/cosmos-sdk-store/types/validity.go b/cosmos-sdk-store/types/validity.go new file mode 100755 index 000000000..a1fbaba99 --- /dev/null +++ b/cosmos-sdk-store/types/validity.go @@ -0,0 +1,28 @@ +package types + +var ( + // 128K - 1 + MaxKeyLength = (1 << 17) - 1 + // 2G - 1 + MaxValueLength = (1 << 31) - 1 +) + +// AssertValidKey checks if the key is valid(key is not nil, not empty and within length limit) +func AssertValidKey(key []byte) { + if len(key) == 0 { + panic("key is nil or empty") + } + if len(key) > MaxKeyLength { + panic("key is too large") + } +} + +// AssertValidValue checks if the value is valid(value is not nil and within length limit) +func AssertValidValue(value []byte) { + if value == nil { + panic("value is nil") + } + if len(value) > MaxValueLength { + panic("value is too large") + } +} diff --git a/cosmos-sdk-store/types/validity_test.go b/cosmos-sdk-store/types/validity_test.go new file mode 100755 index 000000000..56e679136 --- /dev/null +++ b/cosmos-sdk-store/types/validity_test.go @@ -0,0 +1,23 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "cosmossdk.io/store/types" +) + +func TestAssertValidKey(t *testing.T) { + t.Parallel() + require.NotPanics(t, func() { types.AssertValidKey([]byte{0x01}) }) + require.Panics(t, func() { types.AssertValidKey([]byte{}) }) + require.Panics(t, func() { types.AssertValidKey(nil) }) +} + +func TestAssertValidValue(t *testing.T) { + t.Parallel() + require.NotPanics(t, func() { types.AssertValidValue([]byte{}) }) + require.NotPanics(t, func() { types.AssertValidValue([]byte{0x01}) }) + require.Panics(t, func() { types.AssertValidValue(nil) }) +} diff --git a/cosmos-sdk-store/wrapper/wrapper.go b/cosmos-sdk-store/wrapper/wrapper.go new file mode 100755 index 000000000..5ccb4aef4 --- /dev/null +++ b/cosmos-sdk-store/wrapper/wrapper.go @@ -0,0 +1,34 @@ +package wrapper + +import ( + dbm "github.com/cosmos/cosmos-db" + idb "github.com/cosmos/iavl/db" +) + +var _ idb.DB = &DBWrapper{} + +// DBwrapper is a simple wrapper of dbm.DB that implements the iavl.DB interface. +type DBWrapper struct { + dbm.DB +} + +// NewDBWrapper creates a new DBWrapper instance. +func NewDBWrapper(db dbm.DB) *DBWrapper { + return &DBWrapper{db} +} + +func (dbw *DBWrapper) NewBatch() idb.Batch { + return dbw.DB.NewBatch() +} + +func (dbw *DBWrapper) NewBatchWithSize(size int) idb.Batch { + return dbw.DB.NewBatchWithSize(size) +} + +func (dbw *DBWrapper) Iterator(start, end []byte) (idb.Iterator, error) { + return dbw.DB.Iterator(start, end) +} + +func (dbw *DBWrapper) ReverseIterator(start, end []byte) (idb.Iterator, error) { + return dbw.DB.ReverseIterator(start, end) +} diff --git a/deployment/dockerfiles/Dockerfile b/deployment/dockerfiles/Dockerfile index cd634e298..e3b7eee29 100644 --- a/deployment/dockerfiles/Dockerfile +++ b/deployment/dockerfiles/Dockerfile @@ -126,6 +126,7 @@ COPY eip191 eip191 COPY Makefile . RUN true COPY client client +COPY cosmos-sdk-store cosmos-sdk-store RUN ln -s /usr/lib/x86_64-linux-gnu/liblz4.so /usr/local/lib/liblz4.so && ln -s /usr/lib/x86_64-linux-gnu/libzstd.so /usr/local/lib/libzstd.so diff --git a/go-cosmwasm/cmd/main.go b/go-cosmwasm/cmd/main.go index 6f81da7d9..b223b7c1a 100644 --- a/go-cosmwasm/cmd/main.go +++ b/go-cosmwasm/cmd/main.go @@ -22,7 +22,7 @@ func main() { panic(err) } - wasmer, err := wasm.NewWasmer("tmp", "staking,stargate,ibc3", 0, 15) + wasmer, err := wasm.NewWasmer("tmp", "staking,stargate,ibc3", 0, 15, true) if err != nil { panic(err) } diff --git a/go-cosmwasm/lib.go b/go-cosmwasm/lib.go index 6c3b36953..d73c50eb2 100644 --- a/go-cosmwasm/lib.go +++ b/go-cosmwasm/lib.go @@ -40,14 +40,16 @@ type Wasmer struct { // cacheSize sets the size of an optional in-memory LRU cache for prepared VMs. // They allow popular contracts to be executed very rapidly (no loading overhead), // but require ~32-64MB each in memory usage. -func NewWasmer(dataDir string, supportedFeatures string, cacheSize uint64, moduleCacheSize uint16) (*Wasmer, error) { +func NewWasmer(dataDir string, supportedFeatures string, cacheSize uint64, moduleCacheSize uint16, initEnclave bool) (*Wasmer, error) { cache, err := api.InitCache(dataDir, supportedFeatures, cacheSize) if err != nil { return nil, err } - err = api.InitEnclaveRuntime(moduleCacheSize) - if err != nil { - return nil, err + if initEnclave { + err = api.InitEnclaveRuntime(moduleCacheSize) + if err != nil { + return nil, err + } } return &Wasmer{cache: cache}, nil diff --git a/go.mod b/go.mod index 69283b40c..8ebc5d25b 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 replace ( cosmossdk.io/api => github.com/scrtlabs/cosmos-sdk-api v0.7.5-secret.1 - cosmossdk.io/store => github.com/scrtlabs/cosmos-sdk-store v1.1.0-secret.1 + cosmossdk.io/store => ./cosmos-sdk-store github.com/cometbft/cometbft => github.com/scrtlabs/tendermint v0.38.10-0.20240924173150-b47eda4ca72b github.com/cosmos/cosmos-sdk => github.com/scrtlabs/cosmos-sdk v0.46.0-beta2.0.20240917201403-3c75382e4a9d github.com/cosmos/iavl => github.com/scrtlabs/iavl v1.1.2-secret.1 diff --git a/go.sum b/go.sum index 3a2d9edca..140b0c937 100644 --- a/go.sum +++ b/go.sum @@ -958,8 +958,6 @@ github.com/scrtlabs/cosmos-sdk v0.46.0-beta2.0.20240917201403-3c75382e4a9d h1:IW github.com/scrtlabs/cosmos-sdk v0.46.0-beta2.0.20240917201403-3c75382e4a9d/go.mod h1:9oxg/QW7VVnOzIip9DRJNAmSnzjSFwX3b350xv94D1I= github.com/scrtlabs/cosmos-sdk-api v0.7.5-secret.1 h1:4GLC5nv9pkCEUD4HpSpsnuDMYPT5Bly+IKPi/7H/ylk= github.com/scrtlabs/cosmos-sdk-api v0.7.5-secret.1/go.mod h1:IcxpYS5fMemZGqyYtErK7OqvdM0C8kdW3dq8Q/XIG38= -github.com/scrtlabs/cosmos-sdk-store v1.1.0-secret.1 h1:ZpCTh28SXGD2V6rjkU8Qz4Dol/tMQOYUvAMoN2d5Jic= -github.com/scrtlabs/cosmos-sdk-store v1.1.0-secret.1/go.mod h1:oZfW/4Fc/zYqu3JmQcQdUJ3fqu5vnYTn3LZFFy8P8ng= github.com/scrtlabs/iavl v1.1.2-secret.1 h1:JX5h2U5Q/GxfVhUAm3rDgbaY2Rko7meCbVT2aJDigxw= github.com/scrtlabs/iavl v1.1.2-secret.1/go.mod h1:jLeUvm6bGT1YutCaL2fIar/8vGUE8cPZvh/gXEWDaDM= github.com/scrtlabs/tendermint v0.38.10-0.20240924173150-b47eda4ca72b h1:vBdj5WibgXocSZLOST1NAr7V+c20ZHURJbdjuj47q/s= diff --git a/x/compute/internal/keeper/keeper.go b/x/compute/internal/keeper/keeper.go index a7482d17c..d3d1b16b9 100644 --- a/x/compute/internal/keeper/keeper.go +++ b/x/compute/internal/keeper/keeper.go @@ -126,7 +126,7 @@ func NewKeeper( customPlugins *QueryPlugins, lastMsgManager *baseapp.LastMsgMarkerContainer, ) Keeper { - wasmer, err := wasm.NewWasmer(filepath.Join(homeDir, "wasm"), supportedFeatures, wasmConfig.CacheSize, wasmConfig.EnclaveCacheSize) + wasmer, err := wasm.NewWasmer(filepath.Join(homeDir, "wasm"), supportedFeatures, wasmConfig.CacheSize, wasmConfig.EnclaveCacheSize, wasmConfig.InitEnclave) if err != nil { panic(err) } diff --git a/x/compute/internal/types/types.go b/x/compute/internal/types/types.go index 6575c5af9..232732e78 100644 --- a/x/compute/internal/types/types.go +++ b/x/compute/internal/types/types.go @@ -245,6 +245,9 @@ type WasmConfig struct { SmartQueryGasLimit uint64 CacheSize uint64 EnclaveCacheSize uint16 + // It must always be true except the case when we create temporary app to + // extract autoCLIOpts from it + InitEnclave bool } // DefaultWasmConfig returns the default settings for WasmConfig @@ -253,6 +256,7 @@ func DefaultWasmConfig() *WasmConfig { SmartQueryGasLimit: defaultQueryGasLimit, CacheSize: defaultLRUCacheSize, EnclaveCacheSize: defaultEnclaveLRUCacheSize, + InitEnclave: true, } }