From 9b3fb0c65a75864a7f34f3f8e8aab8d6717cf155 Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Wed, 27 Nov 2024 20:29:42 +0100 Subject: [PATCH 01/21] remove ss from store/v2 --- store/v2/migration/manager.go | 5 +- store/v2/migration/manager_test.go | 8 +- store/v2/pruning/manager.go | 20 +- store/v2/pruning/manager_test.go | 35 +- store/v2/root/factory.go | 58 +- store/v2/root/migrate_test.go | 13 +- store/v2/root/store.go | 47 +- store/v2/root/store_mock_test.go | 3 +- store/v2/root/store_test.go | 55 +- store/v2/root/upgrade_test.go | 14 +- store/v2/storage/README.md | 107 -- store/v2/storage/database.go | 27 - store/v2/storage/pebbledb/batch.go | 97 -- store/v2/storage/pebbledb/comparator.go | 242 ---- store/v2/storage/pebbledb/comparator_test.go | 58 - store/v2/storage/pebbledb/db.go | 528 --------- store/v2/storage/pebbledb/db_test.go | 28 - store/v2/storage/pebbledb/iterator.go | 437 -------- store/v2/storage/rocksdb/batch.go | 67 -- store/v2/storage/rocksdb/comparator.go | 76 -- store/v2/storage/rocksdb/db.go | 251 ----- store/v2/storage/rocksdb/db_noflag.go | 70 -- store/v2/storage/rocksdb/db_test.go | 90 -- store/v2/storage/rocksdb/iterator.go | 159 --- store/v2/storage/rocksdb/opts.go | 125 --- store/v2/storage/sqlite/batch.go | 104 -- store/v2/storage/sqlite/db.go | 360 ------ store/v2/storage/sqlite/db_test.go | 200 ---- store/v2/storage/sqlite/iterator.go | 183 --- store/v2/storage/storage_bench_test.go | 187 ---- store/v2/storage/storage_test_suite.go | 1056 ------------------ store/v2/storage/store.go | 161 --- store/v2/storage/util/iterator.go | 53 - store/v2/store.go | 2 - 34 files changed, 40 insertions(+), 4886 deletions(-) delete mode 100644 store/v2/storage/README.md delete mode 100644 store/v2/storage/database.go delete mode 100644 store/v2/storage/pebbledb/batch.go delete mode 100644 store/v2/storage/pebbledb/comparator.go delete mode 100644 store/v2/storage/pebbledb/comparator_test.go delete mode 100644 store/v2/storage/pebbledb/db.go delete mode 100644 store/v2/storage/pebbledb/db_test.go delete mode 100644 store/v2/storage/pebbledb/iterator.go delete mode 100644 store/v2/storage/rocksdb/batch.go delete mode 100644 store/v2/storage/rocksdb/comparator.go delete mode 100644 store/v2/storage/rocksdb/db.go delete mode 100644 store/v2/storage/rocksdb/db_noflag.go delete mode 100644 store/v2/storage/rocksdb/db_test.go delete mode 100644 store/v2/storage/rocksdb/iterator.go delete mode 100644 store/v2/storage/rocksdb/opts.go delete mode 100644 store/v2/storage/sqlite/batch.go delete mode 100644 store/v2/storage/sqlite/db.go delete mode 100644 store/v2/storage/sqlite/db_test.go delete mode 100644 store/v2/storage/sqlite/iterator.go delete mode 100644 store/v2/storage/storage_bench_test.go delete mode 100644 store/v2/storage/storage_test_suite.go delete mode 100644 store/v2/storage/store.go delete mode 100644 store/v2/storage/util/iterator.go diff --git a/store/v2/migration/manager.go b/store/v2/migration/manager.go index d5118a6313e8..186fb204fea4 100644 --- a/store/v2/migration/manager.go +++ b/store/v2/migration/manager.go @@ -16,7 +16,6 @@ import ( "cosmossdk.io/store/v2/internal/encoding" "cosmossdk.io/store/v2/snapshots" snapshotstypes "cosmossdk.io/store/v2/snapshots/types" - "cosmossdk.io/store/v2/storage" ) const ( @@ -39,7 +38,6 @@ type Manager struct { logger log.Logger snapshotsManager *snapshots.Manager - stateStorage *storage.StorageStore stateCommitment *commitment.CommitStore db corestore.KVStoreWithBatch @@ -53,11 +51,10 @@ type Manager struct { // NewManager returns a new Manager. // // NOTE: `sc` can be `nil` if don't want to migrate the commitment. -func NewManager(db corestore.KVStoreWithBatch, sm *snapshots.Manager, ss *storage.StorageStore, sc *commitment.CommitStore, logger log.Logger) *Manager { +func NewManager(db corestore.KVStoreWithBatch, sm *snapshots.Manager, sc *commitment.CommitStore, logger log.Logger) *Manager { return &Manager{ logger: logger, snapshotsManager: sm, - stateStorage: ss, stateCommitment: sc, db: db, } diff --git a/store/v2/migration/manager_test.go b/store/v2/migration/manager_test.go index 07a5b15b8350..99de9fbe547c 100644 --- a/store/v2/migration/manager_test.go +++ b/store/v2/migration/manager_test.go @@ -13,8 +13,6 @@ import ( "cosmossdk.io/store/v2/commitment/iavl" dbm "cosmossdk.io/store/v2/db" "cosmossdk.io/store/v2/snapshots" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/pebbledb" ) var storeKeys = []string{"store1", "store2"} @@ -37,10 +35,6 @@ func setupMigrationManager(t *testing.T, noCommitStore bool) (*Manager, *commitm snapshotsManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), commitStore, nil, nil, coretesting.NewNopLogger()) - storageDB, err := pebbledb.New(t.TempDir()) - require.NoError(t, err) - newStorageStore := storage.NewStorageStore(storageDB, coretesting.NewNopLogger()) // for store/v2 - db1 := dbm.NewMemDB() multiTrees1 := make(map[string]commitment.Tree) for _, storeKey := range storeKeys { @@ -54,7 +48,7 @@ func setupMigrationManager(t *testing.T, noCommitStore bool) (*Manager, *commitm newCommitStore = nil } - return NewManager(db, snapshotsManager, newStorageStore, newCommitStore, coretesting.NewNopLogger()), commitStore + return NewManager(db, snapshotsManager, newCommitStore, coretesting.NewNopLogger()), commitStore } func TestMigrateState(t *testing.T) { diff --git a/store/v2/pruning/manager.go b/store/v2/pruning/manager.go index 4e61a7459d08..e21fe1ce1952 100644 --- a/store/v2/pruning/manager.go +++ b/store/v2/pruning/manager.go @@ -10,19 +10,13 @@ type Manager struct { scPruner store.Pruner // scPruningOption are the pruning options for the SC. scPruningOption *store.PruningOption - // ssPruner is the pruner for the SS. - ssPruner store.Pruner - // ssPruningOption are the pruning options for the SS. - ssPruningOption *store.PruningOption } // NewManager creates a new Pruning Manager. -func NewManager(scPruner, ssPruner store.Pruner, scPruningOption, ssPruningOption *store.PruningOption) *Manager { +func NewManager(scPruner store.Pruner, scPruningOption *store.PruningOption) *Manager { return &Manager{ scPruner: scPruner, scPruningOption: scPruningOption, - ssPruner: ssPruner, - ssPruningOption: ssPruningOption, } } @@ -39,15 +33,6 @@ func (m *Manager) Prune(version uint64) error { } } - // Prune the SS. - if m.ssPruningOption != nil { - if prune, pruneTo := m.ssPruningOption.ShouldPrune(version); prune { - if err := m.ssPruner.Prune(pruneTo); err != nil { - return err - } - } - } - return nil } @@ -55,9 +40,6 @@ func (m *Manager) signalPruning(pause bool) { if scPausablePruner, ok := m.scPruner.(store.PausablePruner); ok { scPausablePruner.PausePruning(pause) } - if ssPausablePruner, ok := m.ssPruner.(store.PausablePruner); ok { - ssPausablePruner.PausePruning(pause) - } } func (m *Manager) PausePruning() { diff --git a/store/v2/pruning/manager_test.go b/store/v2/pruning/manager_test.go index 0a0333451ae5..66cadb353598 100644 --- a/store/v2/pruning/manager_test.go +++ b/store/v2/pruning/manager_test.go @@ -14,8 +14,6 @@ import ( "cosmossdk.io/store/v2/commitment" "cosmossdk.io/store/v2/commitment/iavl" dbm "cosmossdk.io/store/v2/db" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/sqlite" ) var storeKeys = []string{"store1", "store2", "store3"} @@ -25,7 +23,6 @@ type PruningManagerTestSuite struct { manager *Manager sc *commitment.CommitStore - ss *storage.StorageStore } func TestPruningManagerTestSuite(t *testing.T) { @@ -45,12 +42,8 @@ func (s *PruningManagerTestSuite) SetupTest() { s.sc, err = commitment.NewCommitStore(multiTrees, nil, mdb, nopLog) s.Require().NoError(err) - sqliteDB, err := sqlite.New(s.T().TempDir()) - s.Require().NoError(err) - s.ss = storage.NewStorageStore(sqliteDB, nopLog) - scPruningOption := store.NewPruningOptionWithCustom(0, 1) // prune all - ssPruningOption := store.NewPruningOptionWithCustom(5, 10) // prune some - s.manager = NewManager(s.sc, s.ss, scPruningOption, ssPruningOption) + scPruningOption := store.NewPruningOptionWithCustom(0, 1) // prune all + s.manager = NewManager(s.sc, scPruningOption) } func (s *PruningManagerTestSuite) TestPrune() { @@ -68,8 +61,6 @@ func (s *PruningManagerTestSuite) TestPrune() { _, err := s.sc.Commit(version) s.Require().NoError(err) - s.Require().NoError(s.ss.ApplyChangeset(cs)) - s.Require().NoError(s.manager.Prune(version)) } @@ -87,23 +78,6 @@ func (s *PruningManagerTestSuite) TestPrune() { } s.Require().Eventually(checkSCPrune, 10*time.Second, 1*time.Second) - // check the storage store - _, pruneVersion := s.manager.ssPruningOption.ShouldPrune(toVersion) - for version := uint64(1); version <= toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - key := []byte(fmt.Sprintf("key-%d-%d", version, i)) - value, err := s.ss.Get([]byte(storeKey), version, key) - if version <= pruneVersion { - s.Require().Nil(value) - s.Require().Error(err) - } else { - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), value) - } - } - } - } } func TestPruningOption(t *testing.T) { @@ -164,8 +138,6 @@ func (s *PruningManagerTestSuite) TestSignalCommit() { _, err := s.sc.Commit(1) s.Require().NoError(err) - s.Require().NoError(s.ss.ApplyChangeset(cs)) - // commit version 2 for _, storeKey := range storeKeys { cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", 2, 0)), []byte(fmt.Sprintf("value-%d-%d", 2, 0)), false) @@ -179,8 +151,6 @@ func (s *PruningManagerTestSuite) TestSignalCommit() { _, err = s.sc.Commit(2) s.Require().NoError(err) - s.Require().NoError(s.ss.ApplyChangeset(cs)) - // try prune before signaling commit has finished s.Require().NoError(s.manager.Prune(2)) @@ -238,7 +208,6 @@ func (s *PruningManagerTestSuite) TestSignalCommit() { _, err := s.sc.Commit(version) s.Require().NoError(err) - s.Require().NoError(s.ss.ApplyChangeset(cs)) err = s.manager.ResumePruning(version) s.Require().NoError(err) } diff --git a/store/v2/root/factory.go b/store/v2/root/factory.go index 2511a53b434e..36eadf2382bc 100644 --- a/store/v2/root/factory.go +++ b/store/v2/root/factory.go @@ -3,7 +3,6 @@ package root import ( "errors" "fmt" - "os" "cosmossdk.io/core/log" corestore "cosmossdk.io/core/store" @@ -14,30 +13,20 @@ import ( "cosmossdk.io/store/v2/db" "cosmossdk.io/store/v2/internal" "cosmossdk.io/store/v2/pruning" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/pebbledb" - "cosmossdk.io/store/v2/storage/rocksdb" - "cosmossdk.io/store/v2/storage/sqlite" ) type ( - SSType string SCType string ) const ( - SSTypeSQLite SSType = "sqlite" - SSTypePebble SSType = "pebble" - SSTypeRocks SSType = "rocksdb" SCTypeIavl SCType = "iavl" SCTypeIavlV2 SCType = "iavl-v2" ) // Options are the options for creating a root store. type Options struct { - SSType SSType `mapstructure:"ss-type" toml:"ss-type" comment:"State storage database type. Currently we support: \"sqlite\", \"pebble\" and \"rocksdb\""` SCType SCType `mapstructure:"sc-type" toml:"sc-type" comment:"State commitment database type. Currently we support: \"iavl\" and \"iavl-v2\""` - SSPruningOption *store.PruningOption `mapstructure:"ss-pruning-option" toml:"ss-pruning-option" comment:"Pruning options for state storage"` SCPruningOption *store.PruningOption `mapstructure:"sc-pruning-option" toml:"sc-pruning-option" comment:"Pruning options for state commitment"` IavlConfig *iavl.Config `mapstructure:"iavl-config" toml:"iavl-config"` } @@ -54,16 +43,11 @@ type FactoryOptions struct { // DefaultStoreOptions returns the default options for creating a root store. func DefaultStoreOptions() Options { return Options{ - SSType: SSTypeSQLite, SCType: SCTypeIavl, SCPruningOption: &store.PruningOption{ KeepRecent: 2, Interval: 100, }, - SSPruningOption: &store.PruningOption{ - KeepRecent: 2, - Interval: 100, - }, IavlConfig: &iavl.Config{ CacheSize: 100_000, SkipFastStorageUpgrade: true, @@ -77,45 +61,11 @@ func DefaultStoreOptions() Options { // necessary, but demonstrates the required steps and configuration to create a root store. func CreateRootStore(opts *FactoryOptions) (store.RootStore, error) { var ( - ssDb storage.Database - ss *storage.StorageStore - sc *commitment.CommitStore - err error - ensureDir = func(dir string) error { - if err := os.MkdirAll(dir, 0o0755); err != nil { - return fmt.Errorf("failed to create directory %s: %w", dir, err) - } - return nil - } + sc *commitment.CommitStore + err error ) storeOpts := opts.Options - switch storeOpts.SSType { - case SSTypeSQLite: - dir := fmt.Sprintf("%s/data/ss/sqlite", opts.RootDir) - if err = ensureDir(dir); err != nil { - return nil, err - } - ssDb, err = sqlite.New(dir) - case SSTypePebble: - dir := fmt.Sprintf("%s/data/ss/pebble", opts.RootDir) - if err = ensureDir(dir); err != nil { - return nil, err - } - ssDb, err = pebbledb.New(dir) - case SSTypeRocks: - dir := fmt.Sprintf("%s/data/ss/rocksdb", opts.RootDir) - if err = ensureDir(dir); err != nil { - return nil, err - } - ssDb, err = rocksdb.New(dir) - default: - return nil, fmt.Errorf("unknown storage type: %s", opts.Options.SSType) - } - if err != nil { - return nil, err - } - ss = storage.NewStorageStore(ssDb, opts.Logger) metadata := commitment.NewMetadataStore(opts.SCRawDB) latestVersion, err := metadata.GetLatestVersion() @@ -176,6 +126,6 @@ func CreateRootStore(opts *FactoryOptions) (store.RootStore, error) { return nil, err } - pm := pruning.NewManager(sc, ss, storeOpts.SCPruningOption, storeOpts.SSPruningOption) - return New(opts.SCRawDB, opts.Logger, ss, sc, pm, nil, nil) + pm := pruning.NewManager(sc, storeOpts.SCPruningOption) + return New(opts.SCRawDB, opts.Logger, sc, pm, nil, nil) } diff --git a/store/v2/root/migrate_test.go b/store/v2/root/migrate_test.go index 2d83038b812e..437746392968 100644 --- a/store/v2/root/migrate_test.go +++ b/store/v2/root/migrate_test.go @@ -17,8 +17,6 @@ import ( "cosmossdk.io/store/v2/migration" "cosmossdk.io/store/v2/pruning" "cosmossdk.io/store/v2/snapshots" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/sqlite" ) var storeKeys = []string{"store1", "store2", "store3"} @@ -61,11 +59,6 @@ func (s *MigrateStoreTestSuite) SetupTest() { s.Require().NoError(err) } - // create a new storage and commitment stores - sqliteDB, err := sqlite.New(s.T().TempDir()) - s.Require().NoError(err) - ss := storage.NewStorageStore(sqliteDB, testLog) - multiTrees1 := make(map[string]commitment.Tree) for _, storeKey := range storeKeys { multiTrees1[storeKey] = iavl.NewIavlTree(dbm.NewMemDB(), nopLog, iavl.DefaultConfig()) @@ -76,11 +69,11 @@ func (s *MigrateStoreTestSuite) SetupTest() { snapshotsStore, err := snapshots.NewStore(s.T().TempDir()) s.Require().NoError(err) snapshotManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), orgSC, nil, nil, testLog) - migrationManager := migration.NewManager(dbm.NewMemDB(), snapshotManager, ss, sc, testLog) - pm := pruning.NewManager(sc, ss, nil, nil) + migrationManager := migration.NewManager(dbm.NewMemDB(), snapshotManager, sc, testLog) + pm := pruning.NewManager(sc, nil) // assume no storage store, simulate the migration process - s.rootStore, err = New(dbm.NewMemDB(), testLog, ss, orgSC, pm, migrationManager, nil) + s.rootStore, err = New(dbm.NewMemDB(), testLog, orgSC, pm, migrationManager, nil) s.Require().NoError(err) } diff --git a/store/v2/root/store.go b/store/v2/root/store.go index 59363e2fb35b..80c5f6bab1b6 100644 --- a/store/v2/root/store.go +++ b/store/v2/root/store.go @@ -34,9 +34,6 @@ type Store struct { // holds the db instance for closing it dbCloser io.Closer - // stateStorage reflects the state storage backend - stateStorage store.VersionedWriter - // stateCommitment reflects the state commitment (SC) backend stateCommitment store.Committer @@ -67,7 +64,6 @@ type Store struct { func New( dbCloser io.Closer, logger corelog.Logger, - ss store.VersionedWriter, sc store.Committer, pm *pruning.Manager, mm *migration.Manager, @@ -76,7 +72,6 @@ func New( return &Store{ dbCloser: dbCloser, logger: logger, - stateStorage: ss, stateCommitment: sc, pruningManager: pm, migrationManager: mm, @@ -88,11 +83,9 @@ func New( // Close closes the store and resets all internal fields. Note, Close() is NOT // idempotent and should only be called once. func (s *Store) Close() (err error) { - err = errors.Join(err, s.stateStorage.Close()) err = errors.Join(err, s.stateCommitment.Close()) err = errors.Join(err, s.dbCloser.Close()) - s.stateStorage = nil s.stateCommitment = nil s.lastCommitInfo = nil @@ -113,13 +106,6 @@ func (s *Store) SetInitialVersion(v uint64) error { // and the version exists in the state commitment, since the state storage will be // synced during migration. func (s *Store) getVersionedReader(version uint64) (store.VersionedReader, error) { - isExist, err := s.stateStorage.VersionExists(version) - if err != nil { - return nil, err - } - if isExist { - return s.stateStorage, nil - } if vReader, ok := s.stateCommitment.(store.VersionedReader); ok { isExist, err := vReader.VersionExists(version) @@ -154,10 +140,6 @@ func (s *Store) StateAt(v uint64) (corestore.ReaderMap, error) { return NewReaderMap(v, vReader), err } -func (s *Store) GetStateStorage() store.VersionedWriter { - return s.stateStorage -} - func (s *Store) GetStateCommitment() store.Committer { return s.stateCommitment } @@ -282,14 +264,15 @@ func (s *Store) LoadVersionAndUpgrade(version uint64, upgrades *corestore.StoreU return err } - // if the state storage implements the UpgradableDatabase interface, prune the - // deleted store keys - upgradableDatabase, ok := s.stateStorage.(store.UpgradableDatabase) - if ok { - if err := upgradableDatabase.PruneStoreKeys(upgrades.Deleted, version); err != nil { - return fmt.Errorf("failed to prune store keys %v: %w", upgrades.Deleted, err) - } - } + //TODO why are we not pruning sc keys? + // // if the state storage implements the UpgradableDatabase interface, prune the + // // deleted store keys + // upgradableDatabase, ok := s.stateStorage.(store.UpgradableDatabase) + // if ok { + // if err := upgradableDatabase.PruneStoreKeys(upgrades.Deleted, version); err != nil { + // return fmt.Errorf("failed to prune store keys %v: %w", upgrades.Deleted, err) + // } + // } return nil } @@ -348,18 +331,6 @@ func (s *Store) Commit(cs *corestore.Changeset) ([]byte, error) { eg := new(errgroup.Group) - // if migrating the changeset will be sent to migration manager to fill SS - // otherwise commit to SS async here - if !s.isMigrating { - eg.Go(func() error { - if err := s.stateStorage.ApplyChangeset(cs); err != nil { - return fmt.Errorf("failed to commit SS: %w", err) - } - - return nil - }) - } - // commit SC async var cInfo *proof.CommitInfo eg.Go(func() error { diff --git a/store/v2/root/store_mock_test.go b/store/v2/root/store_mock_test.go index 4b43d52f7f7e..9afa2d34969d 100644 --- a/store/v2/root/store_mock_test.go +++ b/store/v2/root/store_mock_test.go @@ -17,11 +17,10 @@ import ( func newTestRootStore(ss store.VersionedWriter, sc store.Committer) *Store { noopLog := coretesting.NewNopLogger() - pm := pruning.NewManager(sc.(store.Pruner), ss.(store.Pruner), nil, nil) + pm := pruning.NewManager(sc.(store.Pruner), nil) return &Store{ logger: noopLog, telemetry: metrics.Metrics{}, - stateStorage: ss, stateCommitment: sc, pruningManager: pm, isMigrating: false, diff --git a/store/v2/root/store_test.go b/store/v2/root/store_test.go index 10a3a049d3c0..1261c7e6abd9 100644 --- a/store/v2/root/store_test.go +++ b/store/v2/root/store_test.go @@ -16,8 +16,6 @@ import ( dbm "cosmossdk.io/store/v2/db" "cosmossdk.io/store/v2/proof" "cosmossdk.io/store/v2/pruning" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/sqlite" ) const ( @@ -47,18 +45,14 @@ func TestStorageTestSuite(t *testing.T) { func (s *RootStoreTestSuite) SetupTest() { noopLog := coretesting.NewNopLogger() - sqliteDB, err := sqlite.New(s.T().TempDir()) - s.Require().NoError(err) - ss := storage.NewStorageStore(sqliteDB, noopLog) - tree := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig()) tree2 := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig()) tree3 := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig()) sc, err := commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree, testStoreKey2: tree2, testStoreKey3: tree3}, nil, dbm.NewMemDB(), noopLog) s.Require().NoError(err) - pm := pruning.NewManager(sc, ss, nil, nil) - rs, err := New(dbm.NewMemDB(), noopLog, ss, sc, pm, nil, nil) + pm := pruning.NewManager(sc, nil) + rs, err := New(dbm.NewMemDB(), noopLog, sc, pm, nil, nil) s.Require().NoError(err) s.rootStore = rs @@ -67,10 +61,6 @@ func (s *RootStoreTestSuite) SetupTest() { func (s *RootStoreTestSuite) newStoreWithPruneConfig(config *store.PruningOption) { noopLog := coretesting.NewNopLogger() - sqliteDB, err := sqlite.New(s.T().TempDir()) - s.Require().NoError(err) - ss := storage.NewStorageStore(sqliteDB, noopLog) - mdb := dbm.NewMemDB() multiTrees := make(map[string]commitment.Tree) for _, storeKey := range testStoreKeys { @@ -81,18 +71,18 @@ func (s *RootStoreTestSuite) newStoreWithPruneConfig(config *store.PruningOption sc, err := commitment.NewCommitStore(multiTrees, nil, dbm.NewMemDB(), noopLog) s.Require().NoError(err) - pm := pruning.NewManager(sc, ss, config, config) + pm := pruning.NewManager(sc, config) - rs, err := New(dbm.NewMemDB(), noopLog, ss, sc, pm, nil, nil) + rs, err := New(dbm.NewMemDB(), noopLog, sc, pm, nil, nil) s.Require().NoError(err) s.rootStore = rs } -func (s *RootStoreTestSuite) newStoreWithBackendMount(ss store.VersionedWriter, sc store.Committer, pm *pruning.Manager) { +func (s *RootStoreTestSuite) newStoreWithBackendMount(sc store.Committer, pm *pruning.Manager) { noopLog := coretesting.NewNopLogger() - rs, err := New(dbm.NewMemDB(), noopLog, ss, sc, pm, nil, nil) + rs, err := New(dbm.NewMemDB(), noopLog, sc, pm, nil, nil) s.Require().NoError(err) s.rootStore = rs @@ -107,10 +97,6 @@ func (s *RootStoreTestSuite) TestGetStateCommitment() { s.Require().Equal(s.rootStore.GetStateCommitment(), s.rootStore.(*Store).stateCommitment) } -func (s *RootStoreTestSuite) TestGetStateStorage() { - s.Require().Equal(s.rootStore.GetStateStorage(), s.rootStore.(*Store).stateStorage) -} - func (s *RootStoreTestSuite) TestSetInitialVersion() { initialVersion := uint64(5) s.Require().NoError(s.rootStore.SetInitialVersion(initialVersion)) @@ -535,17 +521,14 @@ func (s *RootStoreTestSuite) TestMultiStore_PruningRestart() { mdb1 := dbm.NewMemDB() mdb2 := dbm.NewMemDB() - sqliteDB, err := sqlite.New(s.T().TempDir()) - s.Require().NoError(err) - ss := storage.NewStorageStore(sqliteDB, noopLog) tree := iavl.NewIavlTree(mdb1, noopLog, iavl.DefaultConfig()) sc, err := commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree}, nil, mdb2, noopLog) s.Require().NoError(err) - pm := pruning.NewManager(sc, ss, pruneOpt, pruneOpt) + pm := pruning.NewManager(sc, pruneOpt) - s.newStoreWithBackendMount(ss, sc, pm) + s.newStoreWithBackendMount(sc, pm) s.Require().NoError(s.rootStore.LoadLatestVersion()) // Commit enough to build up heights to prune, where on the next block we should @@ -565,18 +548,13 @@ func (s *RootStoreTestSuite) TestMultiStore_PruningRestart() { s.Require().False(ok) s.Require().Equal(uint64(0), actualHeightToPrune) - // "restart" - sqliteDB, err = sqlite.New(s.T().TempDir()) - s.Require().NoError(err) - ss = storage.NewStorageStore(sqliteDB, noopLog) - tree = iavl.NewIavlTree(mdb1, noopLog, iavl.DefaultConfig()) sc, err = commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree}, nil, mdb2, noopLog) s.Require().NoError(err) - pm = pruning.NewManager(sc, ss, pruneOpt, pruneOpt) + pm = pruning.NewManager(sc, pruneOpt) - s.newStoreWithBackendMount(ss, sc, pm) + s.newStoreWithBackendMount(sc, pm) err = s.rootStore.LoadLatestVersion() s.Require().NoError(err) @@ -616,11 +594,6 @@ func (s *RootStoreTestSuite) TestMultiStore_PruningRestart() { func (s *RootStoreTestSuite) TestMultiStoreRestart() { noopLog := coretesting.NewNopLogger() - sqliteDB, err := sqlite.New(s.T().TempDir()) - s.Require().NoError(err) - - ss := storage.NewStorageStore(sqliteDB, noopLog) - mdb1 := dbm.NewMemDB() mdb2 := dbm.NewMemDB() multiTrees := make(map[string]commitment.Tree) @@ -632,9 +605,9 @@ func (s *RootStoreTestSuite) TestMultiStoreRestart() { sc, err := commitment.NewCommitStore(multiTrees, nil, mdb2, noopLog) s.Require().NoError(err) - pm := pruning.NewManager(sc, ss, nil, nil) + pm := pruning.NewManager(sc, nil) - s.newStoreWithBackendMount(ss, sc, pm) + s.newStoreWithBackendMount(sc, pm) s.Require().NoError(s.rootStore.LoadLatestVersion()) // perform changes @@ -719,9 +692,9 @@ func (s *RootStoreTestSuite) TestMultiStoreRestart() { sc, err = commitment.NewCommitStore(multiTrees, nil, mdb2, noopLog) s.Require().NoError(err) - pm = pruning.NewManager(sc, ss, nil, nil) + pm = pruning.NewManager(sc, nil) - s.newStoreWithBackendMount(ss, sc, pm) + s.newStoreWithBackendMount(sc, pm) err = s.rootStore.LoadLatestVersion() s.Require().Nil(err) diff --git a/store/v2/root/upgrade_test.go b/store/v2/root/upgrade_test.go index 400ddb2c4d65..fcbb2a5cb9d5 100644 --- a/store/v2/root/upgrade_test.go +++ b/store/v2/root/upgrade_test.go @@ -14,8 +14,6 @@ import ( "cosmossdk.io/store/v2/commitment/iavl" dbm "cosmossdk.io/store/v2/db" "cosmossdk.io/store/v2/pruning" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/sqlite" ) type UpgradeStoreTestSuite struct { @@ -43,14 +41,10 @@ func (s *UpgradeStoreTestSuite) SetupTest() { multiTrees[storeKey], _ = newTreeFn(storeKey) } - // create storage and commitment stores - sqliteDB, err := sqlite.New(s.T().TempDir()) - s.Require().NoError(err) - ss := storage.NewStorageStore(sqliteDB, testLog) sc, err := commitment.NewCommitStore(multiTrees, nil, s.commitDB, testLog) s.Require().NoError(err) - pm := pruning.NewManager(sc, ss, nil, nil) - s.rootStore, err = New(s.commitDB, testLog, ss, sc, pm, nil, nil) + pm := pruning.NewManager(sc, nil) + s.rootStore, err = New(s.commitDB, testLog, sc, pm, nil, nil) s.Require().NoError(err) // commit changeset @@ -91,8 +85,8 @@ func (s *UpgradeStoreTestSuite) loadWithUpgrades(upgrades *corestore.StoreUpgrad sc, err := commitment.NewCommitStore(multiTrees, oldTrees, s.commitDB, testLog) s.Require().NoError(err) - pm := pruning.NewManager(sc, s.rootStore.GetStateStorage().(store.Pruner), nil, nil) - s.rootStore, err = New(s.commitDB, testLog, s.rootStore.GetStateStorage(), sc, pm, nil, nil) + pm := pruning.NewManager(sc, nil) + s.rootStore, err = New(s.commitDB, testLog, sc, pm, nil, nil) s.Require().NoError(err) } diff --git a/store/v2/storage/README.md b/store/v2/storage/README.md deleted file mode 100644 index aaffab357c30..000000000000 --- a/store/v2/storage/README.md +++ /dev/null @@ -1,107 +0,0 @@ -# State Storage (SS) - -The `storage` package contains the state storage (SS) implementation. Specifically, -it contains RocksDB, PebbleDB, and SQLite (Btree) backend implementations of the -`VersionedWriter` interface. - -The goal of SS is to provide a modular storage backend, i.e. multiple implementations, -to facilitate storing versioned raw key/value pairs in a fast embedded database, -although an embedded database is not required, i.e. you could use a replicated -RDBMS system. - -The responsibility and functions of SS include the following: - -* Provide fast and efficient queries for versioned raw key/value pairs -* Provide versioned CRUD operations -* Provide versioned batching functionality -* Provide versioned iteration (forward and reverse) functionality -* Provide pruning functionality - -All of the functionality provided by an SS backend should work under a versioned -scheme, i.e. a user should be able to get, store, and iterate over keys for the -latest and historical versions efficiently. - -## Backends - -### RocksDB - -The RocksDB implementation is a CGO-based SS implementation. It fully supports -the `VersionedWriter` API and is arguably the most efficient implementation. It -also supports versioning out-of-the-box using User-defined Timestamps in -ColumnFamilies (CF). However, it requires the CGO dependency which can complicate -an app’s build process. - -### PebbleDB - -The PebbleDB implementation is a native Go SS implementation that is primarily an -alternative to RocksDB. Since it does not support CF, results in the fact that we -need to implement versioning (MVCC) ourselves. This comes with added implementation -complexity and potential performance overhead. However, it is a pure Go implementation -and does not require CGO. - -### SQLite (Btree) - -The SQLite implementation is another CGO-based SS implementation. It fully supports -the `VersionedWriter` API. The implementation is relatively straightforward and -easy to understand as it’s entirely SQL-based. However, benchmarks show that this -options is least performant, even for reads. This SS backend has a lot of promise, -but needs more benchmarking and potential SQL optimizations, like dedicated tables -for certain aspects of state, e.g. latest state, to be extremely performant. - -## Benchmarks - -Benchmarks for basic operations on all supported native SS implementations can -be found in `store/storage/storage_bench_test.go`. - -At the time of writing, the following benchmarks were performed: - -```shell -name time/op -Get/backend_rocksdb_versiondb_opts-10 7.41µs ± 0% -Get/backend_pebbledb_default_opts-10 6.17µs ± 0% -Get/backend_btree_sqlite-10 29.1µs ± 0% -ApplyChangeset/backend_pebbledb_default_opts-10 5.73ms ± 0% -ApplyChangeset/backend_btree_sqlite-10 56.9ms ± 0% -ApplyChangeset/backend_rocksdb_versiondb_opts-10 4.07ms ± 0% -Iterate/backend_pebbledb_default_opts-10 1.04s ± 0% -Iterate/backend_btree_sqlite-10 1.59s ± 0% -Iterate/backend_rocksdb_versiondb_opts-10 778ms ± 0% -``` - -## Pruning - -Pruning is the process of efficiently managing and removing outdated or redundant -data from the State Storage (SS). To facilitate this, the SS backend must implement -the `Pruner` interface, allowing the `PruningManager` to execute data pruning operations -according to the specified `PruningOption`. - -## State Sync - -State storage (SS) does not have a direct notion of state sync. Rather, `snapshots.Manager` -is responsible for creating and restoring snapshots of the entire state. The -`snapshots.Manager` has a `StorageSnapshotter` field which is fulfilled by the -`StorageStore` type, specifically it implements the `Restore` method. The `Restore` -method reads off of a provided channel and writes key/value pairs directly to a -batch object which is committed to the underlying SS engine. - -## Non-Consensus Data - - - -## Usage - -An SS backend is meant to be used within a broader store implementation, as it -only stores data for direct and historical query purposes. We define a `Database` -interface in the `storage` package which is mean to be represent a `VersionedWriter` -with only the necessary methods. The `StorageStore` interface is meant to wrap or -accept this `Database` type, e.g. RocksDB. - -The `StorageStore` interface is an abstraction or wrapper around the backing SS -engine can be seen as the main entry point to using SS. - -Higher up the stack, there should exist a `root.Store` implementation. The `root.Store` -is meant to encapsulate both an SS backend and an SC backend. The SS backend is -defined by this `StorageStore` implementation. - -In short, initialize your SS engine of choice and then provide that to `NewStorageStore` -which will further be provided to `root.Store` as the SS backend. diff --git a/store/v2/storage/database.go b/store/v2/storage/database.go deleted file mode 100644 index e969a9ee6338..000000000000 --- a/store/v2/storage/database.go +++ /dev/null @@ -1,27 +0,0 @@ -package storage - -import ( - "io" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" -) - -// Database is an interface that wraps the storage database methods. A wrapper -// is useful for instances where you want to perform logic that is identical for all SS -// backends, such as restoring snapshots. -type Database interface { - NewBatch(version uint64) (store.Batch, error) - Has(storeKey []byte, version uint64, key []byte) (bool, error) - Get(storeKey []byte, version uint64, key []byte) ([]byte, error) - GetLatestVersion() (uint64, error) - SetLatestVersion(version uint64) error - VersionExists(version uint64) (bool, error) - - Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) - ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) - - Prune(version uint64) error - - io.Closer -} diff --git a/store/v2/storage/pebbledb/batch.go b/store/v2/storage/pebbledb/batch.go deleted file mode 100644 index fdd58f447435..000000000000 --- a/store/v2/storage/pebbledb/batch.go +++ /dev/null @@ -1,97 +0,0 @@ -package pebbledb - -import ( - "encoding/binary" - "errors" - "fmt" - - "github.com/cockroachdb/pebble" - - "cosmossdk.io/store/v2" -) - -var _ store.Batch = (*Batch)(nil) - -type Batch struct { - storage *pebble.DB - batch *pebble.Batch - version uint64 - sync bool - size int -} - -const ( - oneIf64Bit = ^uint(0) >> 63 - maxUint32OrInt = (1<<31)< maxUint32OrInt { - // 4 GB is huge, probably genesis; flush and reset - if err := b.batch.Commit(&pebble.WriteOptions{Sync: b.sync}); err != nil { - return fmt.Errorf("max batch size exceed: failed to write PebbleDB batch: %w", err) - } - b.batch.Reset() - b.size = 0 - } - - if err := b.batch.Set(prefixedKey, prefixedVal, nil); err != nil { - return fmt.Errorf("failed to write PebbleDB batch: %w", err) - } - b.size += size - - return nil -} - -func (b *Batch) Set(storeKey, key, value []byte) error { - return b.set(storeKey, 0, key, value) -} - -func (b *Batch) Delete(storeKey, key []byte) error { - return b.set(storeKey, b.version, key, []byte(tombstoneVal)) -} - -func (b *Batch) Write() (err error) { - defer func() { - err = errors.Join(err, b.batch.Close()) - }() - - return b.batch.Commit(&pebble.WriteOptions{Sync: b.sync}) -} diff --git a/store/v2/storage/pebbledb/comparator.go b/store/v2/storage/pebbledb/comparator.go deleted file mode 100644 index 24f5e05a6214..000000000000 --- a/store/v2/storage/pebbledb/comparator.go +++ /dev/null @@ -1,242 +0,0 @@ -package pebbledb - -import ( - "bytes" - "encoding/binary" - "fmt" - - "github.com/cockroachdb/pebble" -) - -// MVCCComparer returns a PebbleDB Comparer with encoding and decoding routines -// for MVCC control, used to compare and store versioned keys. -// -// Note: This Comparer implementation is largely based on PebbleDB's internal -// MVCC example, which can be found here: -// https://github.com/cockroachdb/pebble/blob/master/cmd/pebble/mvcc.go -var MVCCComparer = &pebble.Comparer{ - Name: "ss_pebbledb_comparator", - - Compare: MVCCKeyCompare, - - AbbreviatedKey: func(k []byte) uint64 { - key, _, ok := SplitMVCCKey(k) - if !ok { - return 0 - } - - return pebble.DefaultComparer.AbbreviatedKey(key) - }, - - Equal: func(a, b []byte) bool { - return MVCCKeyCompare(a, b) == 0 - }, - - Separator: func(dst, a, b []byte) []byte { - aKey, _, ok := SplitMVCCKey(a) - if !ok { - return append(dst, a...) - } - - bKey, _, ok := SplitMVCCKey(b) - if !ok { - return append(dst, a...) - } - - // if the keys are the same just return a - if bytes.Equal(aKey, bKey) { - return append(dst, a...) - } - - n := len(dst) - - // MVCC key comparison uses bytes.Compare on the roachpb.Key, which is the - // same semantics as pebble.DefaultComparer, so reuse the latter's Separator - // implementation. - dst = pebble.DefaultComparer.Separator(dst, aKey, bKey) - - // Did we pick a separator different than aKey? If we did not, we can't do - // better than a. - buf := dst[n:] - if bytes.Equal(aKey, buf) { - return append(dst[:n], a...) - } - - // The separator is > aKey, so we only need to add the timestamp sentinel. - return append(dst, 0) - }, - - ImmediateSuccessor: func(dst, a []byte) []byte { - // The key `a` is guaranteed to be a bare prefix: It's a key without a version - // — just a trailing 0-byte to signify the length of the version. For example - // the user key "foo" is encoded as: "foo\0". We need to encode the immediate - // successor to "foo", which in the natural byte ordering is "foo\0". Append - // a single additional zero, to encode the user key "foo\0" with a zero-length - // version. - return append(append(dst, a...), 0) - }, - - Successor: func(dst, a []byte) []byte { - aKey, _, ok := SplitMVCCKey(a) - if !ok { - return append(dst, a...) - } - - n := len(dst) - - // MVCC key comparison uses bytes.Compare on the roachpb.Key, which is the - // same semantics as pebble.DefaultComparer, so reuse the latter's Successor - // implementation. - dst = pebble.DefaultComparer.Successor(dst, aKey) - - // Did we pick a successor different than aKey? If we did not, we can't do - // better than a. - buf := dst[n:] - if bytes.Equal(aKey, buf) { - return append(dst[:n], a...) - } - - // The successor is > aKey, so we only need to add the timestamp sentinel. - return append(dst, 0) - }, - - FormatKey: func(k []byte) fmt.Formatter { - return mvccKeyFormatter{key: k} - }, - - Split: func(k []byte) int { - key, _, ok := SplitMVCCKey(k) - if !ok { - return len(k) - } - - // This matches the behavior of libroach/KeyPrefix. RocksDB requires that - // keys generated via a SliceTransform be comparable with normal encoded - // MVCC keys. Encoded MVCC keys have a suffix indicating the number of - // bytes of timestamp data. MVCC keys without a timestamp have a suffix of - // 0. We're careful in EncodeKey to make sure that the user-key always has - // a trailing 0. If there is no timestamp this falls out naturally. If - // there is a timestamp we prepend a 0 to the encoded timestamp data. - return len(key) + 1 - }, -} - -type mvccKeyFormatter struct { - key []byte -} - -func (f mvccKeyFormatter) Format(s fmt.State, verb rune) { - k, vBz, ok := SplitMVCCKey(f.key) - if ok { - v, _ := decodeUint64Ascending(vBz) - fmt.Fprintf(s, "%s/%d", k, v) - } else { - fmt.Fprintf(s, "%s", f.key) - } -} - -// SplitMVCCKey accepts an MVCC key and returns the "user" key, the MVCC version, -// and a boolean indicating if the provided key is an MVCC key. -// -// Note, internally, we must make a copy of the provided mvccKey argument, which -// typically comes from the Key() method as it's not safe. -func SplitMVCCKey(mvccKey []byte) (key, version []byte, ok bool) { - if len(mvccKey) == 0 { - return nil, nil, false - } - - mvccKeyCopy := bytes.Clone(mvccKey) - - n := len(mvccKeyCopy) - 1 - tsLen := int(mvccKeyCopy[n]) - if n < tsLen { - return nil, nil, false - } - - key = mvccKeyCopy[:n-tsLen] - if tsLen > 0 { - version = mvccKeyCopy[n-tsLen+1 : n] - } - - return key, version, true -} - -// MVCCKeyCompare compares two MVCC keys. -func MVCCKeyCompare(a, b []byte) int { - aEnd := len(a) - 1 - bEnd := len(b) - 1 - if aEnd < 0 || bEnd < 0 { - // This should never happen unless there is some sort of corruption of - // the keys. This is a little bizarre, but the behavior exactly matches - // engine/db.cc:DBComparator. - return bytes.Compare(a, b) - } - - // Compute the index of the separator between the key and the timestamp. - aSep := aEnd - int(a[aEnd]) - bSep := bEnd - int(b[bEnd]) - if aSep < 0 || bSep < 0 { - // This should never happen unless there is some sort of corruption of - // the keys. This is a little bizarre, but the behavior exactly matches - // engine/db.cc:DBComparator. - return bytes.Compare(a, b) - } - - // compare the "user key" part of the key - if c := bytes.Compare(a[:aSep], b[:bSep]); c != 0 { - return c - } - - // compare the timestamp part of the key - aTS := a[aSep:aEnd] - bTS := b[bSep:bEnd] - if len(aTS) == 0 { - if len(bTS) == 0 { - return 0 - } - return -1 - } else if len(bTS) == 0 { - return 1 - } - - return bytes.Compare(aTS, bTS) -} - -// MVCCEncode encodes a key and version into an MVCC format. -// The format is: \x00[]<#version-bytes> -// If the version is 0, only the key and a null byte are encoded. -func MVCCEncode(key []byte, version uint64) (dst []byte) { - dst = append(dst, key...) - dst = append(dst, 0) - - if version != 0 { - extra := byte(1 + 8) - dst = encodeUint64Ascending(dst, version) - dst = append(dst, extra) - } - - return dst -} - -// encodeUint64Ascending encodes the uint64 value using a big-endian 8 byte -// representation. The bytes are appended to the supplied buffer and -// the final buffer is returned. -func encodeUint64Ascending(dst []byte, v uint64) []byte { - return append( - dst, - byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), - byte(v>>24), byte(v>>16), byte(v>>8), byte(v), - ) -} - -// decodeUint64Ascending decodes a uint64 from the input buffer, treating -// the input as a big-endian 8 byte uint64 representation. The decoded uint64 is -// returned. -func decodeUint64Ascending(b []byte) (uint64, error) { - if len(b) < 8 { - return 0, fmt.Errorf("insufficient bytes to decode uint64 int value; expected 8; got %d", len(b)) - } - - v := binary.BigEndian.Uint64(b) - return v, nil -} diff --git a/store/v2/storage/pebbledb/comparator_test.go b/store/v2/storage/pebbledb/comparator_test.go deleted file mode 100644 index 1affd81b408c..000000000000 --- a/store/v2/storage/pebbledb/comparator_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package pebbledb - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestMVCCKey(t *testing.T) { - for i := uint64(1); i < 1001; i++ { - keyA := MVCCEncode([]byte("key001"), i) - - key, vBz, ok := SplitMVCCKey(keyA) - - version, err := decodeUint64Ascending(vBz) - require.NoError(t, err) - require.True(t, ok) - require.Equal(t, i, version) - require.Equal(t, []byte("key001"), key) - } -} - -func TestMVCCKeyCompare(t *testing.T) { - testCases := []struct { - keyA []byte - keyB []byte - expected int - }{ - { - // same key, same version - keyA: MVCCEncode([]byte("key001"), 1), - keyB: MVCCEncode([]byte("key001"), 1), - expected: 0, - }, - { - // same key, different version - keyA: MVCCEncode([]byte("key001"), 1), - keyB: MVCCEncode([]byte("key001"), 2), - expected: -1, - }, - { - // same key, different version (inverse) - keyA: MVCCEncode([]byte("key001"), 2), - keyB: MVCCEncode([]byte("key001"), 1), - expected: 1, - }, - { - // different key, same version - keyA: MVCCEncode([]byte("key001"), 1), - keyB: MVCCEncode([]byte("key009"), 1), - expected: -1, - }, - } - - for _, tc := range testCases { - require.Equalf(t, tc.expected, MVCCKeyCompare(tc.keyA, tc.keyB), "keyA: %s, keyB: %s", tc.keyA, tc.keyB) - } -} diff --git a/store/v2/storage/pebbledb/db.go b/store/v2/storage/pebbledb/db.go deleted file mode 100644 index 20fc3f11c7f1..000000000000 --- a/store/v2/storage/pebbledb/db.go +++ /dev/null @@ -1,528 +0,0 @@ -package pebbledb - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "math" - "slices" - - "github.com/cockroachdb/pebble" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - storeerrors "cosmossdk.io/store/v2/errors" - "cosmossdk.io/store/v2/internal/encoding" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/util" -) - -const ( - VersionSize = 8 - // PruneCommitBatchSize defines the size, in number of key/value pairs, to prune - // in a single batch. - PruneCommitBatchSize = 50 - // batchBufferSize defines the maximum size of a batch before it is committed. - batchBufferSize = 100_000 - - StorePrefixTpl = "s/k:%s/" // s/k: - removedStoreKeyPrefix = "s/_removed_key" // NB: removedStoreKeys key must be lexically smaller than StorePrefixTpl - latestVersionKey = "s/_latest" // NB: latestVersionKey key must be lexically smaller than StorePrefixTpl - pruneHeightKey = "s/_prune_height" // NB: pruneHeightKey key must be lexically smaller than StorePrefixTpl - tombstoneVal = "TOMBSTONE" -) - -var ( - _ storage.Database = (*Database)(nil) - _ store.UpgradableDatabase = (*Database)(nil) -) - -type Database struct { - storage *pebble.DB - - // earliestVersion defines the earliest version set in the database, which is - // only updated when the database is pruned. - earliestVersion uint64 - - // Sync is whether to sync writes through the OS buffer cache and down onto - // the actual disk, if applicable. Setting Sync is required for durability of - // individual write operations but can result in slower writes. - // - // If false, and the process or machine crashes, then a recent write may be - // lost. This is due to the recently written data being buffered inside the - // process running Pebble. This differs from the semantics of a write system - // call in which the data is buffered in the OS buffer cache and would thus - // survive a process crash. - sync bool -} - -func New(dataDir string) (*Database, error) { - opts := &pebble.Options{ - Comparer: MVCCComparer, - } - opts = opts.EnsureDefaults() - - db, err := pebble.Open(dataDir, opts) - if err != nil { - return nil, fmt.Errorf("failed to open PebbleDB: %w", err) - } - - earliestVersion, err := getEarliestVersion(db) - if err != nil { - return nil, fmt.Errorf("failed to get the earliest version: %w", err) - } - - return &Database{ - storage: db, - earliestVersion: earliestVersion, - sync: true, - }, nil -} - -func NewWithDB(storage *pebble.DB, sync bool) *Database { - earliestVersion, err := getEarliestVersion(storage) - if err != nil { - panic(fmt.Errorf("failed to get the earliest version: %w", err)) - } - - return &Database{ - storage: storage, - earliestVersion: earliestVersion, - sync: sync, - } -} - -func (db *Database) SetSync(sync bool) { - db.sync = sync -} - -func (db *Database) Close() error { - err := db.storage.Close() - db.storage = nil - return err -} - -func (db *Database) NewBatch(version uint64) (store.Batch, error) { - b, err := NewBatch(db.storage, version, db.sync) - if err != nil { - return nil, err - } - - return b, nil -} - -func (db *Database) SetLatestVersion(version uint64) error { - var ts [VersionSize]byte - binary.LittleEndian.PutUint64(ts[:], version) - - return db.storage.Set([]byte(latestVersionKey), ts[:], &pebble.WriteOptions{Sync: db.sync}) -} - -func (db *Database) GetLatestVersion() (uint64, error) { - bz, closer, err := db.storage.Get([]byte(latestVersionKey)) - if err != nil { - if errors.Is(err, pebble.ErrNotFound) { - // in case of a fresh database - return 0, nil - } - - return 0, err - } - - if len(bz) == 0 { - return 0, closer.Close() - } - - return binary.LittleEndian.Uint64(bz), closer.Close() -} - -func (db *Database) VersionExists(version uint64) (bool, error) { - latestVersion, err := db.GetLatestVersion() - if err != nil { - return false, err - } - - return latestVersion >= version && version >= db.earliestVersion, nil -} - -func (db *Database) setPruneHeight(pruneVersion uint64) error { - db.earliestVersion = pruneVersion + 1 - - var ts [VersionSize]byte - binary.LittleEndian.PutUint64(ts[:], pruneVersion) - - return db.storage.Set([]byte(pruneHeightKey), ts[:], &pebble.WriteOptions{Sync: db.sync}) -} - -func (db *Database) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - val, err := db.Get(storeKey, version, key) - if err != nil { - return false, err - } - - return val != nil, nil -} - -func (db *Database) Get(storeKey []byte, targetVersion uint64, key []byte) ([]byte, error) { - if targetVersion < db.earliestVersion { - return nil, storeerrors.ErrVersionPruned{EarliestVersion: db.earliestVersion, RequestedVersion: targetVersion} - } - - prefixedVal, err := getMVCCSlice(db.storage, storeKey, key, targetVersion) - if err != nil { - if errors.Is(err, storeerrors.ErrRecordNotFound) { - return nil, nil - } - - return nil, fmt.Errorf("failed to perform PebbleDB read: %w", err) - } - - valBz, tombBz, ok := SplitMVCCKey(prefixedVal) - if !ok { - return nil, fmt.Errorf("invalid PebbleDB MVCC value: %s", prefixedVal) - } - - // A tombstone of zero or a target version that is less than the tombstone - // version means the key is not deleted at the target version. - if len(tombBz) == 0 { - return valBz, nil - } - - tombstone, err := decodeUint64Ascending(tombBz) - if err != nil { - return nil, fmt.Errorf("failed to decode value tombstone: %w", err) - } - - // A tombstone of zero or a target version that is less than the tombstone - // version means the key is not deleted at the target version. - if targetVersion < tombstone { - return valBz, nil - } - - // the value is considered deleted - return nil, nil -} - -// Prune removes all versions of all keys that are <= the given version. -// -// Note, the implementation of this method is inefficient and can be potentially -// time consuming given the size of the database and when the last pruning occurred -// (if any). This is because the implementation iterates over all keys in the -// database in order to delete them. -// -// See: https://github.com/cockroachdb/cockroach/blob/33623e3ee420174a4fd3226d1284b03f0e3caaac/pkg/storage/mvcc.go#L3182 -func (db *Database) Prune(version uint64) (err error) { - itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: []byte("s/k:")}) - if err != nil { - return err - } - defer itr.Close() - - batch := db.storage.NewBatch() - defer func() { - err = errors.Join(err, batch.Close()) - }() - - var ( - batchCounter int - prevKey, prevKeyPrefixed, prevPrefixedVal []byte - prevKeyVersion uint64 - ) - - for itr.First(); itr.Valid(); { - prefixedKey := slices.Clone(itr.Key()) - - keyBz, verBz, ok := SplitMVCCKey(prefixedKey) - if !ok { - return fmt.Errorf("invalid PebbleDB MVCC key: %s", prefixedKey) - } - - var keyVersion uint64 - // handle version 0 (no version prefix) - if len(verBz) > 0 { - keyVersion, err = decodeUint64Ascending(verBz) - if err != nil { - return fmt.Errorf("failed to decode key version: %w", err) - } - } - // seek to next key if we are at a version which is higher than prune height - if keyVersion > version { - itr.NextPrefix() - continue - } - - // Delete a key if another entry for that key exists a larger version than - // the original but <= to the prune height. We also delete a key if it has - // been tombstoned and its version is <= to the prune height. - if prevKeyVersion <= version && (bytes.Equal(prevKey, keyBz) || valTombstoned(prevPrefixedVal)) { - if err := batch.Delete(prevKeyPrefixed, nil); err != nil { - return err - } - - batchCounter++ - if batchCounter >= PruneCommitBatchSize { - if err := batch.Commit(&pebble.WriteOptions{Sync: db.sync}); err != nil { - return err - } - - batchCounter = 0 - batch.Reset() - } - } - - prevKey = keyBz - prevKeyVersion = keyVersion - prevKeyPrefixed = prefixedKey - value, err := itr.ValueAndErr() - if err != nil { - return err - } - prevPrefixedVal = slices.Clone(value) - - itr.Next() - } - - // commit any leftover delete ops in batch - if batchCounter > 0 { - if err := batch.Commit(&pebble.WriteOptions{Sync: db.sync}); err != nil { - return err - } - } - - if err := db.deleteRemovedStoreKeys(version); err != nil { - return err - } - - return db.setPruneHeight(version) -} - -func (db *Database) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - - if start != nil && end != nil && bytes.Compare(start, end) > 0 { - return nil, storeerrors.ErrStartAfterEnd - } - - lowerBound := MVCCEncode(prependStoreKey(storeKey, start), 0) - - var upperBound []byte - if end != nil { - upperBound = MVCCEncode(prependStoreKey(storeKey, end), 0) - } - - itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: lowerBound, UpperBound: upperBound}) - if err != nil { - return nil, err - } - - return newPebbleDBIterator(itr, storePrefix(storeKey), start, end, version, db.earliestVersion, false), nil -} - -func (db *Database) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - - if start != nil && end != nil && bytes.Compare(start, end) > 0 { - return nil, storeerrors.ErrStartAfterEnd - } - - lowerBound := MVCCEncode(prependStoreKey(storeKey, start), 0) - - var upperBound []byte - if end != nil { - upperBound = MVCCEncode(prependStoreKey(storeKey, end), 0) - } - - itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: lowerBound, UpperBound: upperBound}) - if err != nil { - return nil, err - } - - return newPebbleDBIterator(itr, storePrefix(storeKey), start, end, version, db.earliestVersion, true), nil -} - -func (db *Database) PruneStoreKeys(storeKeys []string, version uint64) (err error) { - batch := db.storage.NewBatch() - defer func() { - err = errors.Join(err, batch.Close()) - }() - - for _, storeKey := range storeKeys { - if err := batch.Set([]byte(fmt.Sprintf("%s%s", encoding.BuildPrefixWithVersion(removedStoreKeyPrefix, version), storeKey)), []byte{}, nil); err != nil { - return err - } - } - - return batch.Commit(&pebble.WriteOptions{Sync: db.sync}) -} - -func storePrefix(storeKey []byte) []byte { - return []byte(fmt.Sprintf(StorePrefixTpl, storeKey)) -} - -func prependStoreKey(storeKey, key []byte) []byte { - return []byte(fmt.Sprintf("%s%s", storePrefix(storeKey), key)) -} - -// getEarliestVersion returns the earliest version set in the database. -// It is calculated by prune height + 1. If the prune height is not set, it -// returns 0. -func getEarliestVersion(storage *pebble.DB) (uint64, error) { - bz, closer, err := storage.Get([]byte(pruneHeightKey)) - if err != nil { - if errors.Is(err, pebble.ErrNotFound) { - // in cases where pruning was never triggered - return 0, nil - } - - return 0, err - } - - if len(bz) == 0 { - return 0, closer.Close() - } - - return binary.LittleEndian.Uint64(bz) + 1, closer.Close() -} - -func valTombstoned(value []byte) bool { - if value == nil { - return false - } - - _, tombBz, ok := SplitMVCCKey(value) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC value. - panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", value)) - } - - // If the tombstone suffix is empty, we consider this a zero value and thus it - // is not tombstoned. - if len(tombBz) == 0 { - return false - } - - return true -} - -func getMVCCSlice(db *pebble.DB, storeKey, key []byte, version uint64) ([]byte, error) { - // end domain is exclusive, so we need to increment the version by 1 - if version < math.MaxUint64 { - version++ - } - - itr, err := db.NewIter(&pebble.IterOptions{ - LowerBound: MVCCEncode(prependStoreKey(storeKey, key), 0), - UpperBound: MVCCEncode(prependStoreKey(storeKey, key), version), - }) - if err != nil { - return nil, err - } - - defer itr.Close() - - if !itr.Last() { - return nil, storeerrors.ErrRecordNotFound - } - - _, vBz, ok := SplitMVCCKey(itr.Key()) - if !ok { - return nil, fmt.Errorf("invalid PebbleDB MVCC key: %s", itr.Key()) - } - - var keyVersion uint64 - // handle version 0 (no version prefix) - if len(vBz) > 0 { - keyVersion, err = decodeUint64Ascending(vBz) - if err != nil { - return nil, fmt.Errorf("failed to decode key version: %w", err) - } - } - if keyVersion > version { - return nil, fmt.Errorf("key version too large: %d", keyVersion) - } - - value, err := itr.ValueAndErr() - return slices.Clone(value), err -} - -func (db *Database) deleteRemovedStoreKeys(version uint64) (err error) { - batch := db.storage.NewBatch() - defer func() { - err = errors.Join(err, batch.Close()) - }() - - end := encoding.BuildPrefixWithVersion(removedStoreKeyPrefix, version+1) - storeKeyIter, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: []byte(removedStoreKeyPrefix), UpperBound: end}) - if err != nil { - return err - } - defer storeKeyIter.Close() - - storeKeys := make(map[string]uint64) - prefixLen := len(end) - for storeKeyIter.First(); storeKeyIter.Valid(); storeKeyIter.Next() { - verBz := storeKeyIter.Key()[len(removedStoreKeyPrefix):prefixLen] - v, err := decodeUint64Ascending(verBz) - if err != nil { - return err - } - storeKey := string(storeKeyIter.Key()[prefixLen:]) - if ev, ok := storeKeys[storeKey]; ok { - if ev < v { - storeKeys[storeKey] = v - } - } else { - storeKeys[storeKey] = v - } - if err := batch.Delete(storeKeyIter.Key(), nil); err != nil { - return err - } - } - - for storeKey, v := range storeKeys { - if err := func() error { - storeKey := []byte(storeKey) - itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: storePrefix(storeKey), UpperBound: storePrefix(util.CopyIncr(storeKey))}) - if err != nil { - return err - } - defer itr.Close() - - for itr.First(); itr.Valid(); itr.Next() { - itrKey := itr.Key() - _, verBz, ok := SplitMVCCKey(itrKey) - if !ok { - return fmt.Errorf("invalid PebbleDB MVCC key: %s", itrKey) - } - keyVersion, err := decodeUint64Ascending(verBz) - if err != nil { - return err - } - if keyVersion > v { - // skip keys that are newer than the version - continue - } - if err := batch.Delete(itr.Key(), nil); err != nil { - return err - } - if batch.Len() >= batchBufferSize { - if err := batch.Commit(&pebble.WriteOptions{Sync: db.sync}); err != nil { - return err - } - batch.Reset() - } - } - return nil - }(); err != nil { - return err - } - } - - return batch.Commit(&pebble.WriteOptions{Sync: true}) -} diff --git a/store/v2/storage/pebbledb/db_test.go b/store/v2/storage/pebbledb/db_test.go deleted file mode 100644 index 0ef4c8ca9f25..000000000000 --- a/store/v2/storage/pebbledb/db_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package pebbledb - -import ( - "testing" - - "github.com/stretchr/testify/suite" - - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2/storage" -) - -func TestStorageTestSuite(t *testing.T) { - s := &storage.StorageTestSuite{ - NewDB: func(dir string) (*storage.StorageStore, error) { - db, err := New(dir) - if err == nil && db != nil { - // We set sync=false just to speed up CI tests. Operators should take - // careful consideration when setting this value in production environments. - db.SetSync(false) - } - - return storage.NewStorageStore(db, coretesting.NewNopLogger()), err - }, - EmptyBatchSize: 12, - } - - suite.Run(t, s) -} diff --git a/store/v2/storage/pebbledb/iterator.go b/store/v2/storage/pebbledb/iterator.go deleted file mode 100644 index 2401ab4ef000..000000000000 --- a/store/v2/storage/pebbledb/iterator.go +++ /dev/null @@ -1,437 +0,0 @@ -package pebbledb - -import ( - "bytes" - "fmt" - "slices" - - "github.com/cockroachdb/pebble" - - corestore "cosmossdk.io/core/store" -) - -var _ corestore.Iterator = (*iterator)(nil) - -// iterator implements the store.Iterator interface. It wraps a PebbleDB iterator -// with added MVCC key handling logic. The iterator will iterate over the key space -// in the provided domain for a given version. If a key has been written at the -// provided version, that key/value pair will be iterated over. Otherwise, the -// latest version for that key/value pair will be iterated over s.t. it's less -// than the provided version. -type iterator struct { - source *pebble.Iterator - prefix, start, end []byte - version uint64 - valid bool - reverse bool -} - -func newPebbleDBIterator(src *pebble.Iterator, prefix, mvccStart, mvccEnd []byte, version, earliestVersion uint64, reverse bool) *iterator { - if version < earliestVersion { - return &iterator{ - source: src, - prefix: prefix, - start: mvccStart, - end: mvccEnd, - version: version, - valid: false, - reverse: reverse, - } - } - - // move the underlying PebbleDB iterator to the first key - var valid bool - if reverse { - valid = src.Last() - } else { - valid = src.First() - } - - itr := &iterator{ - source: src, - prefix: prefix, - start: mvccStart, - end: mvccEnd, - version: version, - valid: valid, - reverse: reverse, - } - - if valid { - currKey, currKeyVersion, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC value. - panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Key())) - } - - curKeyVersionDecoded, err := decodeUint64Ascending(currKeyVersion) - if err != nil { - itr.valid = false - return itr - } - - // We need to check whether initial key iterator visits has a version <= requested - // version. If larger version, call next to find another key which does. - if curKeyVersionDecoded > itr.version { - itr.Next() - } else { - // If version is less, seek to the largest version of that key <= requested - // iterator version. It is guaranteed this won't move the iterator to a key - // that is invalid since curKeyVersionDecoded <= requested iterator version, - // so there exists at least one version of currKey SeekLT may move to. - itr.valid = itr.source.SeekLT(MVCCEncode(currKey, itr.version+1)) - } - - // The cursor might now be pointing at a key/value pair that is tombstoned. - // If so, we must move the cursor. - if itr.valid && itr.cursorTombstoned() { - itr.Next() - } - } - return itr -} - -// Domain returns the domain of the iterator. The caller must not modify the -// return values. -func (itr *iterator) Domain() ([]byte, []byte) { - return itr.start, itr.end -} - -func (itr *iterator) Key() []byte { - itr.assertIsValid() - - key, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) - } - - keyCopy := slices.Clone(key) - return keyCopy[len(itr.prefix):] -} - -func (itr *iterator) Value() []byte { - itr.assertIsValid() - - val, _, ok := SplitMVCCKey(itr.source.Value()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC value. - panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Key())) - } - - return slices.Clone(val) -} - -func (itr *iterator) Next() { - if itr.reverse { - itr.nextReverse() - } else { - itr.nextForward() - } -} - -func (itr *iterator) Valid() bool { - // once invalid, forever invalid - if !itr.valid || !itr.source.Valid() { - itr.valid = false - return itr.valid - } - - // if source has error, consider it invalid - if err := itr.source.Error(); err != nil { - itr.valid = false - return itr.valid - } - - // if key is at the end or past it, consider it invalid - if end := itr.end; end != nil { - if bytes.Compare(end, itr.Key()) <= 0 { - itr.valid = false - return itr.valid - } - } - - return true -} - -func (itr *iterator) Error() error { - return itr.source.Error() -} - -func (itr *iterator) Close() error { - err := itr.source.Close() - itr.source = nil - itr.valid = false - - return err -} - -func (itr *iterator) assertIsValid() { - if !itr.valid { - panic("iterator is invalid") - } -} - -// cursorTombstoned checks if the current cursor is pointing at a key/value pair -// that is tombstoned. If the cursor is tombstoned, is returned, otherwise -// is returned. In the case where the iterator is valid but the key/value -// pair is tombstoned, the caller should call Next(). Note, this method assumes -// the caller assures the iterator is valid first! -func (itr *iterator) cursorTombstoned() bool { - _, tombBz, ok := SplitMVCCKey(itr.source.Value()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC value. - panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Key())) - } - - // If the tombstone suffix is empty, we consider this a zero value and thus it - // is not tombstoned. - if len(tombBz) == 0 { - return false - } - - // If the tombstone suffix is non-empty and greater than the target version, - // the value is not tombstoned. - tombstone, err := decodeUint64Ascending(tombBz) - if err != nil { - panic(fmt.Errorf("failed to decode value tombstone: %w", err)) - } - if tombstone > itr.version { - return false - } - - return true -} - -func (itr *iterator) DebugRawIterate() { - valid := itr.source.Valid() - if valid { - // The first key may not represent the desired target version, so move the - // cursor to the correct location. - firstKey, _, _ := SplitMVCCKey(itr.source.Key()) - valid = itr.source.SeekLT(MVCCEncode(firstKey, itr.version+1)) - } - - var err error - for valid { - key, vBz, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) - } - - var version uint64 - // handle version 0 (no version prefix) - if len(vBz) > 0 { - version, err = decodeUint64Ascending(vBz) - if err != nil { - panic(fmt.Errorf("failed to decode key version: %w", err)) - } - } - - val, tombBz, ok := SplitMVCCKey(itr.source.Value()) - if !ok { - panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Value())) - } - - var tombstone uint64 - if len(tombBz) > 0 { - tombstone, err = decodeUint64Ascending(vBz) - if err != nil { - panic(fmt.Errorf("failed to decode value tombstone: %w", err)) - } - } - - fmt.Printf("KEY: %s, VALUE: %s, VERSION: %d, TOMBSTONE: %d\n", key, val, version, tombstone) - - var next bool - if itr.reverse { - next = itr.source.SeekLT(MVCCEncode(key, 0)) - } else { - next = itr.source.NextPrefix() - } - - if next { - nextKey, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) - } - - // the next key must have itr.prefix as the prefix - if !bytes.HasPrefix(nextKey, itr.prefix) { - valid = false - } else { - valid = itr.source.SeekLT(MVCCEncode(nextKey, itr.version+1)) - } - } else { - valid = false - } - } -} - -func (itr *iterator) nextForward() { - if !itr.source.Valid() { - itr.valid = false - return - } - - currKey, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) - } - - next := itr.source.NextPrefix() - - // First move the iterator to the next prefix, which may not correspond to the - // desired version for that key, e.g. if the key was written at a later version, - // so we seek back to the latest desired version, s.t. the version is <= itr.version. - if next { - nextKey, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - itr.valid = false - return - } - - if !bytes.HasPrefix(nextKey, itr.prefix) { - // the next key must have itr.prefix as the prefix - itr.valid = false - return - } - - // Move the iterator to the closest version to the desired version, so we - // append the current iterator key to the prefix and seek to that key. - itr.valid = itr.source.SeekLT(MVCCEncode(nextKey, itr.version+1)) - - tmpKey, tmpKeyVersion, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - itr.valid = false - return - } - - // There exists cases where the SeekLT() call moved us back to the same key - // we started at, so we must move to next key, i.e. two keys forward. - if bytes.Equal(tmpKey, currKey) { - if itr.source.NextPrefix() { - itr.nextForward() - - _, tmpKeyVersion, ok = SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - itr.valid = false - return - } - - } else { - itr.valid = false - return - } - } - - // We need to verify that every Next call either moves the iterator to a key - // whose version is less than or equal to requested iterator version, or - // exhausts the iterator. - tmpKeyVersionDecoded, err := decodeUint64Ascending(tmpKeyVersion) - if err != nil { - itr.valid = false - return - } - - // If iterator is at a entry whose version is higher than requested version, - // call nextForward again. - if tmpKeyVersionDecoded > itr.version { - itr.nextForward() - } - - // The cursor might now be pointing at a key/value pair that is tombstoned. - // If so, we must move the cursor. - if itr.valid && itr.cursorTombstoned() { - itr.nextForward() - } - - return - } - - itr.valid = false -} - -func (itr *iterator) nextReverse() { - if !itr.source.Valid() { - itr.valid = false - return - } - - currKey, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) - } - - next := itr.source.SeekLT(MVCCEncode(currKey, 0)) - - // First move the iterator to the next prefix, which may not correspond to the - // desired version for that key, e.g. if the key was written at a later version, - // so we seek back to the latest desired version, s.t. the version is <= itr.version. - if next { - nextKey, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - itr.valid = false - return - } - - if !bytes.HasPrefix(nextKey, itr.prefix) { - // the next key must have itr.prefix as the prefix - itr.valid = false - return - } - - // Move the iterator to the closest version to the desired version, so we - // append the current iterator key to the prefix and seek to that key. - itr.valid = itr.source.SeekLT(MVCCEncode(nextKey, itr.version+1)) - - _, tmpKeyVersion, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - itr.valid = false - return - } - - // We need to verify that every Next call either moves the iterator to a key - // whose version is less than or equal to requested iterator version, or - // exhausts the iterator. - tmpKeyVersionDecoded, err := decodeUint64Ascending(tmpKeyVersion) - if err != nil { - itr.valid = false - return - } - - // If iterator is at a entry whose version is higher than requested version, - // call nextReverse again. - if tmpKeyVersionDecoded > itr.version { - itr.nextReverse() - } - - // The cursor might now be pointing at a key/value pair that is tombstoned. - // If so, we must move the cursor. - if itr.valid && itr.cursorTombstoned() { - itr.nextReverse() - } - - return - } - - itr.valid = false -} diff --git a/store/v2/storage/rocksdb/batch.go b/store/v2/storage/rocksdb/batch.go deleted file mode 100644 index 826b81778a87..000000000000 --- a/store/v2/storage/rocksdb/batch.go +++ /dev/null @@ -1,67 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "encoding/binary" - - "github.com/linxGnu/grocksdb" - - "cosmossdk.io/store/v2" -) - -var _ store.Batch = (*Batch)(nil) - -type Batch struct { - version uint64 - ts [TimestampSize]byte - storage *grocksdb.DB - cfHandle *grocksdb.ColumnFamilyHandle - batch *grocksdb.WriteBatch -} - -// NewBatch creates a new versioned batch used for batch writes. The caller -// must ensure to call Write() on the returned batch to commit the changes and to -// destroy the batch when done. -func NewBatch(db *Database, version uint64) Batch { - var ts [TimestampSize]byte - binary.LittleEndian.PutUint64(ts[:], version) - - batch := grocksdb.NewWriteBatch() - batch.Put([]byte(latestVersionKey), ts[:]) - - return Batch{ - version: version, - ts: ts, - storage: db.storage, - cfHandle: db.cfHandle, - batch: batch, - } -} - -func (b Batch) Size() int { - return len(b.batch.Data()) -} - -func (b Batch) Reset() error { - b.batch.Clear() - return nil -} - -func (b Batch) Set(storeKey, key, value []byte) error { - prefixedKey := prependStoreKey(storeKey, key) - b.batch.PutCFWithTS(b.cfHandle, prefixedKey, b.ts[:], value) - return nil -} - -func (b Batch) Delete(storeKey, key []byte) error { - prefixedKey := prependStoreKey(storeKey, key) - b.batch.DeleteCFWithTS(b.cfHandle, prefixedKey, b.ts[:]) - return nil -} - -func (b Batch) Write() error { - defer b.batch.Destroy() - return b.storage.Write(defaultWriteOpts, b.batch) -} diff --git a/store/v2/storage/rocksdb/comparator.go b/store/v2/storage/rocksdb/comparator.go deleted file mode 100644 index 5da27d9121f9..000000000000 --- a/store/v2/storage/rocksdb/comparator.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "bytes" - "encoding/binary" - - "github.com/linxGnu/grocksdb" -) - -// CreateTSComparator should behavior identical with RocksDB builtin timestamp comparator. -// We also use the same builtin comparator name so the builtin tools `ldb`/`sst_dump` -// can work with the database. -func CreateTSComparator() *grocksdb.Comparator { - return grocksdb.NewComparatorWithTimestamp( - "leveldb.BytewiseComparator.u64ts", - TimestampSize, - compare, - compareTS, - compareWithoutTS, - ) -} - -// compareTS compares timestamp as little endian encoded integers. -// -// NOTICE: The behavior must be identical to RocksDB builtin comparator -// "leveldb.BytewiseComparator.u64ts". -func compareTS(bz1, bz2 []byte) int { - ts1 := binary.LittleEndian.Uint64(bz1) - ts2 := binary.LittleEndian.Uint64(bz2) - - switch { - case ts1 < ts2: - return -1 - - case ts1 > ts2: - return 1 - - default: - return 0 - } -} - -// compare compares two internal keys with timestamp suffix, larger timestamp -// comes first. -// -// NOTICE: The behavior must be identical to RocksDB builtin comparator -// "leveldb.BytewiseComparator.u64ts". -func compare(a, b []byte) int { - ret := compareWithoutTS(a, true, b, true) - if ret != 0 { - return ret - } - - // Compare timestamp. For the same user key with different timestamps, larger - // (newer) timestamp comes first, which means seek operation will try to find - // a version less than or equal to the target version. - return -compareTS(a[len(a)-TimestampSize:], b[len(b)-TimestampSize:]) -} - -// compareWithoutTS compares two internal keys without the timestamp part. -// -// NOTICE: the behavior must be identical to RocksDB builtin comparator -// "leveldb.BytewiseComparator.u64ts". -func compareWithoutTS(a []byte, aHasTS bool, b []byte, bHasTS bool) int { - if aHasTS { - a = a[:len(a)-TimestampSize] - } - if bHasTS { - b = b[:len(b)-TimestampSize] - } - - return bytes.Compare(a, b) -} diff --git a/store/v2/storage/rocksdb/db.go b/store/v2/storage/rocksdb/db.go deleted file mode 100644 index 248b014f7b4e..000000000000 --- a/store/v2/storage/rocksdb/db.go +++ /dev/null @@ -1,251 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "bytes" - "encoding/binary" - "fmt" - "slices" - - "github.com/linxGnu/grocksdb" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/errors" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/util" -) - -const ( - TimestampSize = 8 - - StorePrefixTpl = "s/k:%s/" - latestVersionKey = "s/latest" -) - -var ( - _ storage.Database = (*Database)(nil) - _ store.UpgradableDatabase = (*Database)(nil) - - defaultWriteOpts = grocksdb.NewDefaultWriteOptions() - defaultReadOpts = grocksdb.NewDefaultReadOptions() -) - -type Database struct { - storage *grocksdb.DB - cfHandle *grocksdb.ColumnFamilyHandle - - // tsLow reflects the full_history_ts_low CF value, which is earliest version - // supported - tsLow uint64 -} - -func New(dataDir string) (*Database, error) { - storage, cfHandle, err := OpenRocksDB(dataDir) - if err != nil { - return nil, fmt.Errorf("failed to open RocksDB: %w", err) - } - - slice, err := storage.GetFullHistoryTsLow(cfHandle) - if err != nil { - return nil, fmt.Errorf("failed to get full_history_ts_low: %w", err) - } - - var tsLow uint64 - tsLowBz := copyAndFreeSlice(slice) - if len(tsLowBz) > 0 { - tsLow = binary.LittleEndian.Uint64(tsLowBz) - } - - return &Database{ - storage: storage, - cfHandle: cfHandle, - tsLow: tsLow, - }, nil -} - -func NewWithDB(storage *grocksdb.DB, cfHandle *grocksdb.ColumnFamilyHandle) (*Database, error) { - slice, err := storage.GetFullHistoryTsLow(cfHandle) - if err != nil { - return nil, fmt.Errorf("failed to get full_history_ts_low: %w", err) - } - - var tsLow uint64 - tsLowBz := copyAndFreeSlice(slice) - if len(tsLowBz) > 0 { - tsLow = binary.LittleEndian.Uint64(tsLowBz) - } - - return &Database{ - storage: storage, - cfHandle: cfHandle, - tsLow: tsLow, - }, nil -} - -func (db *Database) Close() error { - db.storage.Close() - - db.storage = nil - db.cfHandle = nil - - return nil -} - -func (db *Database) NewBatch(version uint64) (store.Batch, error) { - return NewBatch(db, version), nil -} - -func (db *Database) getSlice(storeKey []byte, version uint64, key []byte) (*grocksdb.Slice, error) { - if version < db.tsLow { - return nil, errors.ErrVersionPruned{EarliestVersion: db.tsLow, RequestedVersion: version} - } - - return db.storage.GetCF( - newTSReadOptions(version), - db.cfHandle, - prependStoreKey(storeKey, key), - ) -} - -func (db *Database) SetLatestVersion(version uint64) error { - var ts [TimestampSize]byte - binary.LittleEndian.PutUint64(ts[:], version) - - return db.storage.Put(defaultWriteOpts, []byte(latestVersionKey), ts[:]) -} - -func (db *Database) GetLatestVersion() (uint64, error) { - bz, err := db.storage.GetBytes(defaultReadOpts, []byte(latestVersionKey)) - if err != nil { - return 0, err - } - - if len(bz) == 0 { - // in case of a fresh database - return 0, nil - } - - return binary.LittleEndian.Uint64(bz), nil -} - -func (db *Database) VersionExists(version uint64) (bool, error) { - latestVersion, err := db.GetLatestVersion() - if err != nil { - return false, err - } - - return latestVersion >= version && version >= db.tsLow, nil -} - -func (db *Database) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - slice, err := db.getSlice(storeKey, version, key) - if err != nil { - return false, err - } - - return slice.Exists(), nil -} - -func (db *Database) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) { - slice, err := db.getSlice(storeKey, version, key) - if err != nil { - return nil, fmt.Errorf("failed to get RocksDB slice: %w", err) - } - - return copyAndFreeSlice(slice), nil -} - -// Prune prunes all versions up to and including the provided version argument. -// Internally, this performs a manual compaction, the data with older timestamp -// will be GCed by compaction. -func (db *Database) Prune(version uint64) error { - tsLow := version + 1 // we increment by 1 to include the provided version - - var ts [TimestampSize]byte - binary.LittleEndian.PutUint64(ts[:], tsLow) - compactOpts := grocksdb.NewCompactRangeOptions() - compactOpts.SetFullHistoryTsLow(ts[:]) - db.storage.CompactRangeCFOpt(db.cfHandle, grocksdb.Range{}, compactOpts) - - db.tsLow = tsLow - return nil -} - -func (db *Database) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errors.ErrKeyEmpty - } - - if start != nil && end != nil && bytes.Compare(start, end) > 0 { - return nil, errors.ErrStartAfterEnd - } - - prefix := storePrefix(storeKey) - start, end = util.IterateWithPrefix(prefix, start, end) - - itr := db.storage.NewIteratorCF(newTSReadOptions(version), db.cfHandle) - return newRocksDBIterator(itr, prefix, start, end, false), nil -} - -func (db *Database) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errors.ErrKeyEmpty - } - - if start != nil && end != nil && bytes.Compare(start, end) > 0 { - return nil, errors.ErrStartAfterEnd - } - - prefix := storePrefix(storeKey) - start, end = util.IterateWithPrefix(prefix, start, end) - - itr := db.storage.NewIteratorCF(newTSReadOptions(version), db.cfHandle) - return newRocksDBIterator(itr, prefix, start, end, true), nil -} - -// PruneStoreKeys will do nothing for RocksDB, it will be pruned by compaction -// when the version is pruned -func (db *Database) PruneStoreKeys(_ []string, _ uint64) error { - return nil -} - -// newTSReadOptions returns ReadOptions used in the RocksDB column family read. -func newTSReadOptions(version uint64) *grocksdb.ReadOptions { - var ts [TimestampSize]byte - binary.LittleEndian.PutUint64(ts[:], version) - - readOpts := grocksdb.NewDefaultReadOptions() - readOpts.SetTimestamp(ts[:]) - - return readOpts -} - -func storePrefix(storeKey []byte) []byte { - return []byte(fmt.Sprintf(StorePrefixTpl, storeKey)) -} - -func prependStoreKey(storeKey, key []byte) []byte { - return []byte(fmt.Sprintf("%s%s", storePrefix(storeKey), key)) -} - -// copyAndFreeSlice will copy a given RocksDB slice and free it. If the slice does -// not exist, will be returned. -func copyAndFreeSlice(s *grocksdb.Slice) []byte { - defer s.Free() - if !s.Exists() { - return nil - } - - return slices.Clone(s.Data()) -} - -func readOnlySlice(s *grocksdb.Slice) []byte { - if !s.Exists() { - return nil - } - - return s.Data() -} diff --git a/store/v2/storage/rocksdb/db_noflag.go b/store/v2/storage/rocksdb/db_noflag.go deleted file mode 100644 index 93bc3090f284..000000000000 --- a/store/v2/storage/rocksdb/db_noflag.go +++ /dev/null @@ -1,70 +0,0 @@ -//go:build !rocksdb -// +build !rocksdb - -package rocksdb - -import ( - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/storage" -) - -var ( - _ storage.Database = (*Database)(nil) - _ store.UpgradableDatabase = (*Database)(nil) -) - -type Database struct{} - -func New(dataDir string) (*Database, error) { - return &Database{}, nil -} - -func (db *Database) Close() error { - return nil -} - -func (db *Database) NewBatch(version uint64) (store.Batch, error) { - panic("rocksdb requires a build flag") -} - -func (db *Database) SetLatestVersion(version uint64) error { - panic("rocksdb requires a build flag") -} - -func (db *Database) GetLatestVersion() (uint64, error) { - panic("rocksdb requires a build flag") -} - -func (db *Database) VersionExists(version uint64) (bool, error) { - panic("rocksdb requires a build flag") -} - -func (db *Database) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - panic("rocksdb requires a build flag") -} - -func (db *Database) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) { - panic("rocksdb requires a build flag") -} - -// Prune prunes all versions up to and including the provided version argument. -// Internally, this performs a manual compaction, the data with older timestamp -// will be GCed by compaction. -func (db *Database) Prune(version uint64) error { - panic("rocksdb requires a build flag") -} - -func (db *Database) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - panic("rocksdb requires a build flag") -} - -func (db *Database) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - panic("rocksdb requires a build flag") -} - -// PruneStoreKeys will do nothing for RocksDB, it will be pruned by compaction -// when the version is pruned -func (db *Database) PruneStoreKeys(_ []string, _ uint64) error { - return nil -} diff --git a/store/v2/storage/rocksdb/db_test.go b/store/v2/storage/rocksdb/db_test.go deleted file mode 100644 index a77afbb3a8fd..000000000000 --- a/store/v2/storage/rocksdb/db_test.go +++ /dev/null @@ -1,90 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2/storage" -) - -var storeKey1 = []byte("store1") - -func TestStorageTestSuite(t *testing.T) { - s := &storage.StorageTestSuite{ - NewDB: func(dir string) (*storage.StorageStore, error) { - db, err := New(dir) - return storage.NewStorageStore(db, coretesting.NewNopLogger()), err - }, - EmptyBatchSize: 12, - SkipTests: []string{"TestUpgradable_Prune"}, - } - suite.Run(t, s) -} - -func TestDatabase_ReverseIterator(t *testing.T) { - db, err := New(t.TempDir()) - require.NoError(t, err) - defer db.Close() - - batch := NewBatch(db, 1) - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 - - require.NoError(t, batch.Set(storeKey1, []byte(key), []byte(val))) - } - - require.NoError(t, batch.Write()) - - // reverse iterator without an end key - iter, err := db.ReverseIterator(storeKey1, 1, []byte("key000"), nil) - require.NoError(t, err) - - defer iter.Close() - - i, count := 99, 0 - for ; iter.Valid(); iter.Next() { - require.Equal(t, []byte(fmt.Sprintf("key%03d", i)), iter.Key()) - require.Equal(t, []byte(fmt.Sprintf("val%03d", i)), iter.Value()) - - i-- - count++ - } - require.Equal(t, 100, count) - require.NoError(t, iter.Error()) - - // seek past domain, which should make the iterator invalid and produce an error - require.False(t, iter.Valid()) - - // reverse iterator with a start and end domain - iter2, err := db.ReverseIterator(storeKey1, 1, []byte("key010"), []byte("key019")) - require.NoError(t, err) - - defer iter2.Close() - - i, count = 18, 0 - for ; iter2.Valid(); iter2.Next() { - require.Equal(t, []byte(fmt.Sprintf("key%03d", i)), iter2.Key()) - require.Equal(t, []byte(fmt.Sprintf("val%03d", i)), iter2.Value()) - - i-- - count++ - } - require.Equal(t, 9, count) - require.NoError(t, iter2.Error()) - - // seek past domain, which should make the iterator invalid and produce an error - require.False(t, iter2.Valid()) - - // start must be <= end - iter3, err := db.ReverseIterator(storeKey1, 1, []byte("key020"), []byte("key019")) - require.Error(t, err) - require.Nil(t, iter3) -} diff --git a/store/v2/storage/rocksdb/iterator.go b/store/v2/storage/rocksdb/iterator.go deleted file mode 100644 index 9a09dc92c5c8..000000000000 --- a/store/v2/storage/rocksdb/iterator.go +++ /dev/null @@ -1,159 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "bytes" - - "github.com/linxGnu/grocksdb" - - corestore "cosmossdk.io/core/store" -) - -var _ corestore.Iterator = (*iterator)(nil) - -type iterator struct { - source *grocksdb.Iterator - prefix, start, end []byte - reverse bool - invalid bool -} - -func newRocksDBIterator(source *grocksdb.Iterator, prefix, start, end []byte, reverse bool) *iterator { - if reverse { - if end == nil { - source.SeekToLast() - } else { - source.Seek(end) - - if source.Valid() { - eoaKey := readOnlySlice(source.Key()) // end or after key - if bytes.Compare(end, eoaKey) <= 0 { - source.Prev() - } - } else { - source.SeekToLast() - } - } - } else { - if start == nil { - source.SeekToFirst() - } else { - source.Seek(start) - } - } - - return &iterator{ - source: source, - prefix: prefix, - start: start, - end: end, - reverse: reverse, - invalid: !source.Valid(), - } -} - -// Domain returns the domain of the iterator. The caller must not modify the -// return values. -func (itr *iterator) Domain() ([]byte, []byte) { - start := itr.start - if start != nil { - start = start[len(itr.prefix):] - if len(start) == 0 { - start = nil - } - } - - end := itr.end - if end != nil { - end = end[len(itr.prefix):] - if len(end) == 0 { - end = nil - } - } - - return start, end -} - -func (itr *iterator) Valid() bool { - // once invalid, forever invalid - if itr.invalid { - return false - } - - // if source has error, consider it invalid - if err := itr.source.Err(); err != nil { - itr.invalid = true - return false - } - - // if source is invalid, consider it invalid - if !itr.source.Valid() { - itr.invalid = true - return false - } - - // if key is at the end or past it, consider it invalid - start := itr.start - end := itr.end - key := readOnlySlice(itr.source.Key()) - - if itr.reverse { - if start != nil && bytes.Compare(key, start) < 0 { - itr.invalid = true - return false - } - } else { - if end != nil && bytes.Compare(end, key) <= 0 { - itr.invalid = true - return false - } - } - - return true -} - -func (itr *iterator) Key() []byte { - itr.assertIsValid() - return copyAndFreeSlice(itr.source.Key())[len(itr.prefix):] -} - -func (itr *iterator) Value() []byte { - itr.assertIsValid() - return copyAndFreeSlice(itr.source.Value()) -} - -func (itr *iterator) Timestamp() []byte { - return itr.source.Timestamp().Data() -} - -func (itr iterator) Next() { - if itr.invalid { - return - } - - if itr.reverse { - itr.source.Prev() - } else { - itr.source.Next() - } -} - -func (itr *iterator) Error() error { - return itr.source.Err() -} - -func (itr *iterator) Close() error { - itr.source.Close() - itr.source = nil - itr.invalid = true - - return nil -} - -func (itr *iterator) assertIsValid() { - if itr.invalid { - panic("iterator is invalid") - } -} diff --git a/store/v2/storage/rocksdb/opts.go b/store/v2/storage/rocksdb/opts.go deleted file mode 100644 index bf2272c17c21..000000000000 --- a/store/v2/storage/rocksdb/opts.go +++ /dev/null @@ -1,125 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "encoding/binary" - "runtime" - - "github.com/linxGnu/grocksdb" -) - -const ( - // CFNameStateStorage defines the RocksDB column family name for versioned state - // storage. - CFNameStateStorage = "state_storage" - - // CFNameDefault defines the RocksDB column family name for the default column. - CFNameDefault = "default" -) - -// NewRocksDBOpts returns the options used for the RocksDB column family for use -// in state storage. -// -// FIXME: We do not enable dict compression for SSTFileWriter, because otherwise -// the file writer won't report correct file size. -// Ref: https://github.com/facebook/rocksdb/issues/11146 -func NewRocksDBOpts(sstFileWriter bool) *grocksdb.Options { - opts := grocksdb.NewDefaultOptions() - opts.SetCreateIfMissing(true) - opts.SetComparator(CreateTSComparator()) - opts.IncreaseParallelism(runtime.NumCPU()) - opts.OptimizeLevelStyleCompaction(512 * 1024 * 1024) - opts.SetTargetFileSizeMultiplier(2) - opts.SetLevelCompactionDynamicLevelBytes(true) - - // block based table options - bbto := grocksdb.NewDefaultBlockBasedTableOptions() - - // 1G block cache - bbto.SetBlockSize(32 * 1024) - bbto.SetBlockCache(grocksdb.NewLRUCache(1 << 30)) - - bbto.SetFilterPolicy(grocksdb.NewRibbonHybridFilterPolicy(9.9, 1)) - bbto.SetIndexType(grocksdb.KBinarySearchWithFirstKey) - bbto.SetOptimizeFiltersForMemory(true) - opts.SetBlockBasedTableFactory(bbto) - - // Improve sst file creation speed: compaction or sst file writer. - opts.SetCompressionOptionsParallelThreads(4) - - if !sstFileWriter { - // compression options at bottommost level - opts.SetBottommostCompression(grocksdb.ZSTDCompression) - - compressOpts := grocksdb.NewDefaultCompressionOptions() - compressOpts.MaxDictBytes = 112640 // 110k - compressOpts.Level = 12 - - opts.SetBottommostCompressionOptions(compressOpts, true) - opts.SetBottommostCompressionOptionsZstdMaxTrainBytes(compressOpts.MaxDictBytes*100, true) - } - - return opts -} - -// OpenRocksDB opens a RocksDB database connection for versioned reading and writing. -// It also returns a column family handle for versioning using user-defined timestamps. -// The default column family is used for metadata, specifically key/value pairs -// that are stored on another column family named with "state_storage", which has -// user-defined timestamp enabled. -func OpenRocksDB(dataDir string) (*grocksdb.DB, *grocksdb.ColumnFamilyHandle, error) { - opts := grocksdb.NewDefaultOptions() - opts.SetCreateIfMissing(true) - opts.SetCreateIfMissingColumnFamilies(true) - - db, cfHandles, err := grocksdb.OpenDbColumnFamilies( - opts, - dataDir, - []string{ - CFNameDefault, - CFNameStateStorage, - }, - []*grocksdb.Options{ - opts, - NewRocksDBOpts(false), - }, - ) - if err != nil { - return nil, nil, err - } - - return db, cfHandles[1], nil -} - -// OpenRocksDBAndTrimHistory opens a RocksDB handle similar to `OpenRocksDB`, -// but it also trims the versions newer than target one, such that it can be used -// for rollback. -func OpenRocksDBAndTrimHistory(dataDir string, version int64) (*grocksdb.DB, *grocksdb.ColumnFamilyHandle, error) { - var ts [TimestampSize]byte - binary.LittleEndian.PutUint64(ts[:], uint64(version)) - - opts := grocksdb.NewDefaultOptions() - opts.SetCreateIfMissing(true) - opts.SetCreateIfMissingColumnFamilies(true) - - db, cfHandles, err := grocksdb.OpenDbAndTrimHistory( - opts, - dataDir, - []string{ - CFNameDefault, - CFNameStateStorage, - }, - []*grocksdb.Options{ - opts, - NewRocksDBOpts(false), - }, - ts[:], - ) - if err != nil { - return nil, nil, err - } - - return db, cfHandles[1], nil -} diff --git a/store/v2/storage/sqlite/batch.go b/store/v2/storage/sqlite/batch.go deleted file mode 100644 index 783b597e04af..000000000000 --- a/store/v2/storage/sqlite/batch.go +++ /dev/null @@ -1,104 +0,0 @@ -package sqlite - -import ( - "database/sql" - "fmt" - - "cosmossdk.io/store/v2" -) - -var _ store.Batch = (*Batch)(nil) - -type batchAction int - -const ( - batchActionSet batchAction = 0 - batchActionDel batchAction = 1 -) - -type batchOp struct { - action batchAction - storeKey []byte - key, value []byte -} - -type Batch struct { - db *sql.DB - tx *sql.Tx - ops []batchOp - size int - version uint64 -} - -func NewBatch(db *sql.DB, version uint64) (*Batch, error) { - tx, err := db.Begin() - if err != nil { - return nil, fmt.Errorf("failed to create SQL transaction: %w", err) - } - - return &Batch{ - db: db, - tx: tx, - ops: make([]batchOp, 0), - version: version, - }, nil -} - -func (b *Batch) Size() int { - return b.size -} - -func (b *Batch) Reset() error { - b.ops = nil - b.ops = make([]batchOp, 0) - b.size = 0 - - tx, err := b.db.Begin() - if err != nil { - return err - } - - b.tx = tx - return nil -} - -func (b *Batch) Set(storeKey, key, value []byte) error { - b.size += len(key) + len(value) - b.ops = append(b.ops, batchOp{action: batchActionSet, storeKey: storeKey, key: key, value: value}) - return nil -} - -func (b *Batch) Delete(storeKey, key []byte) error { - b.size += len(key) - b.ops = append(b.ops, batchOp{action: batchActionDel, storeKey: storeKey, key: key}) - return nil -} - -func (b *Batch) Write() error { - _, err := b.tx.Exec(reservedUpsertStmt, reservedStoreKey, keyLatestHeight, b.version, 0, b.version) - if err != nil { - return fmt.Errorf("failed to exec SQL statement: %w", err) - } - - for _, op := range b.ops { - switch op.action { - case batchActionSet: - _, err := b.tx.Exec(upsertStmt, op.storeKey, op.key, op.value, b.version, op.value) - if err != nil { - return fmt.Errorf("failed to exec SQL statement: %w", err) - } - - case batchActionDel: - _, err := b.tx.Exec(delStmt, b.version, op.storeKey, op.key, b.version) - if err != nil { - return fmt.Errorf("failed to exec SQL statement: %w", err) - } - } - } - - if err := b.tx.Commit(); err != nil { - return fmt.Errorf("failed to write SQL transaction: %w", err) - } - - return nil -} diff --git a/store/v2/storage/sqlite/db.go b/store/v2/storage/sqlite/db.go deleted file mode 100644 index 925648928601..000000000000 --- a/store/v2/storage/sqlite/db.go +++ /dev/null @@ -1,360 +0,0 @@ -package sqlite - -import ( - "bytes" - "database/sql" - "errors" - "fmt" - "path/filepath" - "strings" - - _ "github.com/mattn/go-sqlite3" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - storeerrors "cosmossdk.io/store/v2/errors" - "cosmossdk.io/store/v2/storage" -) - -const ( - driverName = "sqlite3" - dbName = "ss.db?cache=shared&mode=rwc&_journal_mode=WAL" - reservedStoreKey = "_RESERVED_" - keyLatestHeight = "latest_height" - keyPruneHeight = "prune_height" - valueRemovedStore = "removed_store" - - reservedUpsertStmt = ` - INSERT INTO state_storage(store_key, key, value, version) - VALUES(?, ?, ?, ?) - ON CONFLICT(store_key, key, version) DO UPDATE SET - value = ?; - ` - upsertStmt = ` - INSERT INTO state_storage(store_key, key, value, version) - VALUES(?, ?, ?, ?) - ON CONFLICT(store_key, key, version) DO UPDATE SET - value = ?; - ` - delStmt = ` - UPDATE state_storage SET tombstone = ? - WHERE id = ( - SELECT id FROM state_storage WHERE store_key = ? AND key = ? AND version <= ? ORDER BY version DESC LIMIT 1 - ) AND tombstone = 0; - ` -) - -var ( - _ storage.Database = (*Database)(nil) - _ store.UpgradableDatabase = (*Database)(nil) -) - -type Database struct { - storage *sql.DB - - // earliestVersion defines the earliest version set in the database, which is - // only updated when the database is pruned. - earliestVersion uint64 -} - -func New(dataDir string) (*Database, error) { - storage, err := sql.Open(driverName, filepath.Join(dataDir, dbName)) - if err != nil { - return nil, fmt.Errorf("failed to open sqlite DB: %w", err) - } - - stmt := ` - CREATE TABLE IF NOT EXISTS state_storage ( - id integer not null primary key, - store_key varchar not null, - key varchar not null, - value varchar not null, - version integer unsigned not null, - tombstone integer unsigned default 0, - unique (store_key, key, version) - ); - - CREATE UNIQUE INDEX IF NOT EXISTS idx_store_key_version ON state_storage (store_key, key, version); - ` - _, err = storage.Exec(stmt) - if err != nil { - return nil, fmt.Errorf("failed to exec SQL statement: %w", err) - } - - pruneHeight, err := getPruneHeight(storage) - if err != nil { - return nil, fmt.Errorf("failed to get prune height: %w", err) - } - - return &Database{ - storage: storage, - earliestVersion: pruneHeight, - }, nil -} - -func (db *Database) Close() error { - err := db.storage.Close() - db.storage = nil - return err -} - -func (db *Database) NewBatch(version uint64) (store.Batch, error) { - return NewBatch(db.storage, version) -} - -func (db *Database) GetLatestVersion() (uint64, error) { - stmt, err := db.storage.Prepare(` - SELECT value - FROM state_storage - WHERE store_key = ? AND key = ? - `) - if err != nil { - return 0, fmt.Errorf("failed to prepare SQL statement: %w", err) - } - - defer stmt.Close() - - var latestHeight uint64 - if err := stmt.QueryRow(reservedStoreKey, keyLatestHeight).Scan(&latestHeight); err != nil { - if errors.Is(err, sql.ErrNoRows) { - // in case of a fresh database - return 0, nil - } - - return 0, fmt.Errorf("failed to query row: %w", err) - } - - return latestHeight, nil -} - -func (db *Database) VersionExists(v uint64) (bool, error) { - latestVersion, err := db.GetLatestVersion() - if err != nil { - return false, err - } - - return latestVersion >= v && v >= db.earliestVersion, nil -} - -func (db *Database) SetLatestVersion(version uint64) error { - _, err := db.storage.Exec(reservedUpsertStmt, reservedStoreKey, keyLatestHeight, version, 0, version) - if err != nil { - return fmt.Errorf("failed to exec SQL statement: %w", err) - } - - return nil -} - -func (db *Database) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - val, err := db.Get(storeKey, version, key) - if err != nil { - return false, err - } - - return val != nil, nil -} - -func (db *Database) Get(storeKey []byte, targetVersion uint64, key []byte) ([]byte, error) { - if targetVersion < db.earliestVersion { - return nil, storeerrors.ErrVersionPruned{EarliestVersion: db.earliestVersion, RequestedVersion: targetVersion} - } - - stmt, err := db.storage.Prepare(` - SELECT value, tombstone FROM state_storage - WHERE store_key = ? AND key = ? AND version <= ? - ORDER BY version DESC LIMIT 1; - `) - if err != nil { - return nil, fmt.Errorf("failed to prepare SQL statement: %w", err) - } - - defer stmt.Close() - - var ( - value []byte - tomb uint64 - ) - if err := stmt.QueryRow(storeKey, key, targetVersion).Scan(&value, &tomb); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, nil - } - - return nil, fmt.Errorf("failed to query row: %w", err) - } - - // A tombstone of zero or a target version that is less than the tombstone - // version means the key is not deleted at the target version. - if tomb == 0 || targetVersion < tomb { - return value, nil - } - - // the value is considered deleted - return nil, nil -} - -// Prune removes all versions of all keys that are <= the given version. It keeps -// the latest (non-tombstoned) version of each key/value tuple to handle queries -// above the prune version. This is analogous to RocksDB full_history_ts_low. -// -// We perform the prune by deleting all versions of a key, excluding reserved keys, -// that are <= the given version, except for the latest version of the key. -func (db *Database) Prune(version uint64) error { - tx, err := db.storage.Begin() - if err != nil { - return fmt.Errorf("failed to create SQL transaction: %w", err) - } - defer func() { - if err != nil { - err = tx.Rollback() - } - }() - - // prune all keys of old versions - pruneStmt := `DELETE FROM state_storage - WHERE version < ( - SELECT max(version) FROM state_storage t2 WHERE - t2.store_key = state_storage.store_key AND - t2.key = state_storage.key AND - t2.version <= ? - ) AND store_key != ?; - ` - if _, err := tx.Exec(pruneStmt, version, reservedStoreKey); err != nil { - return fmt.Errorf("failed to exec SQL statement: %w", err) - } - - // prune removed stores - pruneRemovedStoreKeysStmt := `DELETE FROM state_storage AS s - WHERE EXISTS ( - SELECT 1 FROM - ( - SELECT key, MAX(version) AS max_version - FROM state_storage - WHERE store_key = ? AND value = ? AND version <= ? - GROUP BY key - ) AS t - WHERE s.store_key = t.key AND s.version <= t.max_version LIMIT 1 - ); - ` - if _, err := tx.Exec(pruneRemovedStoreKeysStmt, reservedStoreKey, valueRemovedStore, version, version); err != nil { - return fmt.Errorf("failed to exec SQL statement: %w", err) - } - - // delete the removedKeys - if _, err := tx.Exec("DELETE FROM state_storage WHERE store_key = ? AND value = ? AND version <= ?", reservedStoreKey, valueRemovedStore, version); err != nil { - return fmt.Errorf("failed to exec SQL statement: %w", err) - } - - // set the prune height so we can return for queries below this height - if _, err := tx.Exec(reservedUpsertStmt, reservedStoreKey, keyPruneHeight, version, 0, version); err != nil { - return fmt.Errorf("failed to exec SQL statement: %w", err) - } - - if err := tx.Commit(); err != nil { - return fmt.Errorf("failed to write SQL transaction: %w", err) - } - - db.earliestVersion = version + 1 - return nil -} - -func (db *Database) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - - if start != nil && end != nil && bytes.Compare(start, end) > 0 { - return nil, storeerrors.ErrStartAfterEnd - } - - return newIterator(db, storeKey, version, start, end, false) -} - -func (db *Database) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - - if start != nil && end != nil && bytes.Compare(start, end) > 0 { - return nil, storeerrors.ErrStartAfterEnd - } - - return newIterator(db, storeKey, version, start, end, true) -} - -func (db *Database) PruneStoreKeys(storeKeys []string, version uint64) (err error) { - tx, err := db.storage.Begin() - if err != nil { - return fmt.Errorf("failed to create SQL transaction: %w", err) - } - defer func() { - if err != nil { - err = tx.Rollback() - } - }() - - // flush removed store keys - flushRemovedStoreKeyStmt := `INSERT INTO state_storage(store_key, key, value, version) - VALUES (?, ?, ?, ?)` - for _, storeKey := range storeKeys { - if _, err := tx.Exec(flushRemovedStoreKeyStmt, reservedStoreKey, []byte(storeKey), valueRemovedStore, version); err != nil { - return fmt.Errorf("failed to exec SQL statement: %w", err) - } - } - - return tx.Commit() -} - -func (db *Database) PrintRowsDebug() { - stmt, err := db.storage.Prepare("SELECT store_key, key, value, version, tombstone FROM state_storage") - if err != nil { - panic(fmt.Errorf("failed to prepare SQL statement: %w", err)) - } - - defer stmt.Close() - - rows, err := stmt.Query() - if err != nil { - panic(fmt.Errorf("failed to execute SQL query: %w", err)) - } - - var sb strings.Builder - for rows.Next() { - var ( - storeKey []byte - key []byte - value []byte - version uint64 - tomb uint64 - ) - if err := rows.Scan(&storeKey, &key, &value, &version, &tomb); err != nil { - panic(fmt.Sprintf("failed to scan row: %s", err)) - } - - sb.WriteString(fmt.Sprintf("STORE_KEY: %s, KEY: %s, VALUE: %s, VERSION: %d, TOMBSTONE: %d\n", storeKey, key, value, version, tomb)) - } - if err := rows.Err(); err != nil { - panic(fmt.Errorf("received unexpected error: %w", err)) - } - - fmt.Println(strings.TrimSpace(sb.String())) -} - -func getPruneHeight(storage *sql.DB) (uint64, error) { - stmt, err := storage.Prepare(`SELECT value FROM state_storage WHERE store_key = ? AND key = ?`) - if err != nil { - return 0, fmt.Errorf("failed to prepare SQL statement: %w", err) - } - - defer stmt.Close() - - var value uint64 - if err := stmt.QueryRow(reservedStoreKey, keyPruneHeight).Scan(&value); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return 0, nil - } - - return 0, fmt.Errorf("failed to query row: %w", err) - } - - return value, nil -} diff --git a/store/v2/storage/sqlite/db_test.go b/store/v2/storage/sqlite/db_test.go deleted file mode 100644 index fc4c27e6bfd9..000000000000 --- a/store/v2/storage/sqlite/db_test.go +++ /dev/null @@ -1,200 +0,0 @@ -package sqlite - -import ( - "fmt" - "sync" - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2/storage" -) - -var storeKey1 = []byte("store1") - -func TestStorageTestSuite(t *testing.T) { - s := &storage.StorageTestSuite{ - NewDB: func(dir string) (*storage.StorageStore, error) { - db, err := New(dir) - return storage.NewStorageStore(db, coretesting.NewNopLogger()), err - }, - EmptyBatchSize: 0, - } - suite.Run(t, s) -} - -func TestDatabase_ReverseIterator(t *testing.T) { - db, err := New(t.TempDir()) - require.NoError(t, err) - defer db.Close() - - batch, err := db.NewBatch(1) - require.NoError(t, err) - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 - - require.NoError(t, batch.Set(storeKey1, []byte(key), []byte(val))) - } - - require.NoError(t, batch.Write()) - - // reverse iterator without an end key - iter, err := db.ReverseIterator(storeKey1, 1, []byte("key000"), nil) - require.NoError(t, err) - - defer iter.Close() - - i, count := 99, 0 - for ; iter.Valid(); iter.Next() { - require.Equal(t, []byte(fmt.Sprintf("key%03d", i)), iter.Key()) - require.Equal(t, []byte(fmt.Sprintf("val%03d", i)), iter.Value()) - - i-- - count++ - } - require.Equal(t, 100, count) - require.NoError(t, iter.Error()) - - // seek past domain, which should make the iterator invalid and produce an error - require.False(t, iter.Valid()) - - // reverse iterator with a start and end domain - iter2, err := db.ReverseIterator(storeKey1, 1, []byte("key010"), []byte("key019")) - require.NoError(t, err) - - defer iter2.Close() - - i, count = 18, 0 - for ; iter2.Valid(); iter2.Next() { - require.Equal(t, []byte(fmt.Sprintf("key%03d", i)), iter2.Key()) - require.Equal(t, []byte(fmt.Sprintf("val%03d", i)), iter2.Value()) - - i-- - count++ - } - require.Equal(t, 9, count) - require.NoError(t, iter2.Error()) - - // seek past domain, which should make the iterator invalid and produce an error - require.False(t, iter2.Valid()) - - // start must be <= end - iter3, err := db.ReverseIterator(storeKey1, 1, []byte("key020"), []byte("key019")) - require.Error(t, err) - require.Nil(t, iter3) -} - -func TestParallelWrites(t *testing.T) { - db, err := New(t.TempDir()) - require.NoError(t, err) - defer db.Close() - - latestVersion := 10 - kvCount := 100 - - wg := sync.WaitGroup{} - triggerStartCh := make(chan bool) - - // start 10 goroutines that write to the database - for i := 0; i < latestVersion; i++ { - wg.Add(1) - go func(i int) { - <-triggerStartCh - defer wg.Done() - batch, err := db.NewBatch(uint64(i + 1)) - require.NoError(t, err) - for j := 0; j < kvCount; j++ { - key := fmt.Sprintf("key-%d-%03d", i, j) - val := fmt.Sprintf("val-%d-%03d", i, j) - - require.NoError(t, batch.Set(storeKey1, []byte(key), []byte(val))) - } - - require.NoError(t, batch.Write()) - }(i) - - } - - // start the goroutines - close(triggerStartCh) - wg.Wait() - - // check that all the data is there - for i := 0; i < latestVersion; i++ { - for j := 0; j < kvCount; j++ { - version := uint64(i + 1) - key := fmt.Sprintf("key-%d-%03d", i, j) - val := fmt.Sprintf("val-%d-%03d", i, j) - - v, err := db.Get(storeKey1, version, []byte(key)) - require.NoError(t, err) - require.Equal(t, []byte(val), v) - } - } -} - -func TestParallelWriteAndPruning(t *testing.T) { - db, err := New(t.TempDir()) - require.NoError(t, err) - defer db.Close() - - latestVersion := 100 - kvCount := 100 - prunePeriod := 5 - - wg := sync.WaitGroup{} - triggerStartCh := make(chan bool) - - // start a goroutine that write to the database - wg.Add(1) - go func() { - <-triggerStartCh - defer wg.Done() - for i := 0; i < latestVersion; i++ { - batch, err := db.NewBatch(uint64(i + 1)) - require.NoError(t, err) - for j := 0; j < kvCount; j++ { - key := fmt.Sprintf("key-%d-%03d", i, j) - val := fmt.Sprintf("val-%d-%03d", i, j) - - require.NoError(t, batch.Set(storeKey1, []byte(key), []byte(val))) - } - - require.NoError(t, batch.Write()) - } - }() - // start a goroutine that prunes the database - wg.Add(1) - go func() { - <-triggerStartCh - defer wg.Done() - for i := 10; i < latestVersion; i += prunePeriod { - for { - v, err := db.GetLatestVersion() - require.NoError(t, err) - if v > uint64(i) { - require.NoError(t, db.Prune(v-1)) - break - } - } - } - }() - - // start the goroutines - close(triggerStartCh) - wg.Wait() - - // check if the data is pruned - version := uint64(latestVersion - prunePeriod) - val, err := db.Get(storeKey1, version, []byte(fmt.Sprintf("key-%d-%03d", version-1, 0))) - require.Error(t, err) - require.Nil(t, val) - - version = uint64(latestVersion) - val, err = db.Get(storeKey1, version, []byte(fmt.Sprintf("key-%d-%03d", version-1, 0))) - require.NoError(t, err) - require.Equal(t, []byte(fmt.Sprintf("val-%d-%03d", version-1, 0)), val) -} diff --git a/store/v2/storage/sqlite/iterator.go b/store/v2/storage/sqlite/iterator.go deleted file mode 100644 index daf7e073db3c..000000000000 --- a/store/v2/storage/sqlite/iterator.go +++ /dev/null @@ -1,183 +0,0 @@ -package sqlite - -import ( - "bytes" - "database/sql" - "fmt" - "slices" - "strings" - - corestore "cosmossdk.io/core/store" -) - -var _ corestore.Iterator = (*iterator)(nil) - -type iterator struct { - statement *sql.Stmt - rows *sql.Rows - key, val []byte - start, end []byte - valid bool - err error -} - -func newIterator(db *Database, storeKey []byte, targetVersion uint64, start, end []byte, reverse bool) (*iterator, error) { - if targetVersion < db.earliestVersion { - return &iterator{ - start: start, - end: end, - valid: false, - }, nil - } - - var ( - keyClause = []string{"store_key = ?", "version <= ?"} - queryArgs []any - ) - - switch { - case len(start) > 0 && len(end) > 0: - keyClause = append(keyClause, "key >= ?", "key < ?") - queryArgs = []any{storeKey, targetVersion, start, end, targetVersion} - - case len(start) > 0 && len(end) == 0: - keyClause = append(keyClause, "key >= ?") - queryArgs = []any{storeKey, targetVersion, start, targetVersion} - - case len(start) == 0 && len(end) > 0: - keyClause = append(keyClause, "key < ?") - queryArgs = []any{storeKey, targetVersion, end, targetVersion} - - default: - queryArgs = []any{storeKey, targetVersion, targetVersion} - } - - orderBy := "ASC" - if reverse { - orderBy = "DESC" - } - - // Note, this is not susceptible to SQL injection because placeholders are used - // for parts of the query outside the store's direct control. - stmt, err := db.storage.Prepare(fmt.Sprintf(` - SELECT x.key, x.value - FROM ( - SELECT key, value, version, tombstone, - row_number() OVER (PARTITION BY key ORDER BY version DESC) AS _rn - FROM state_storage WHERE %s - ) x - WHERE x._rn = 1 AND (x.tombstone = 0 OR x.tombstone > ?) ORDER BY x.key %s; - `, strings.Join(keyClause, " AND "), orderBy)) - if err != nil { - return nil, fmt.Errorf("failed to prepare SQL statement: %w", err) - } - - rows, err := stmt.Query(queryArgs...) - if err != nil { - _ = stmt.Close() - return nil, fmt.Errorf("failed to execute SQL query: %w", err) - } - - itr := &iterator{ - statement: stmt, - rows: rows, - start: start, - end: end, - valid: rows.Next(), - } - if !itr.valid { - itr.err = fmt.Errorf("iterator invalid: %w", sql.ErrNoRows) - return itr, nil - } - - // read the first row - itr.parseRow() - if !itr.valid { - return itr, nil - } - - return itr, nil -} - -func (itr *iterator) Close() (err error) { - if itr.statement != nil { - err = itr.statement.Close() - } - - itr.valid = false - itr.statement = nil - itr.rows = nil - - return err -} - -// Domain returns the domain of the iterator. The caller must not modify the -// return values. -func (itr *iterator) Domain() ([]byte, []byte) { - return itr.start, itr.end -} - -func (itr *iterator) Key() []byte { - itr.assertIsValid() - return slices.Clone(itr.key) -} - -func (itr *iterator) Value() []byte { - itr.assertIsValid() - return slices.Clone(itr.val) -} - -func (itr *iterator) Valid() bool { - if !itr.valid || itr.rows.Err() != nil { - itr.valid = false - return itr.valid - } - - // if key is at the end or past it, consider it invalid - if end := itr.end; end != nil { - if bytes.Compare(end, itr.Key()) <= 0 { - itr.valid = false - return itr.valid - } - } - - return true -} - -func (itr *iterator) Next() { - if itr.rows.Next() { - itr.parseRow() - return - } - - itr.valid = false -} - -func (itr *iterator) Error() error { - if err := itr.rows.Err(); err != nil { - return err - } - - return itr.err -} - -func (itr *iterator) parseRow() { - var ( - key []byte - value []byte - ) - if err := itr.rows.Scan(&key, &value); err != nil { - itr.err = fmt.Errorf("failed to scan row: %w", err) - itr.valid = false - return - } - - itr.key = key - itr.val = value -} - -func (itr *iterator) assertIsValid() { - if !itr.valid { - panic("iterator is invalid") - } -} diff --git a/store/v2/storage/storage_bench_test.go b/store/v2/storage/storage_bench_test.go deleted file mode 100644 index 2e2030bec6bb..000000000000 --- a/store/v2/storage/storage_bench_test.go +++ /dev/null @@ -1,187 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package storage_test - -import ( - "bytes" - "fmt" - "math/rand" - "sort" - "testing" - - "github.com/stretchr/testify/require" - - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/pebbledb" - "cosmossdk.io/store/v2/storage/rocksdb" - "cosmossdk.io/store/v2/storage/sqlite" -) - -var storeKey1 = []byte("store1") - -var ( - backends = map[string]func(dataDir string) (store.VersionedWriter, error){ - "rocksdb_versiondb_opts": func(dataDir string) (store.VersionedWriter, error) { - db, err := rocksdb.New(dataDir) - return storage.NewStorageStore(db, coretesting.NewNopLogger()), err - }, - "pebbledb_default_opts": func(dataDir string) (store.VersionedWriter, error) { - db, err := pebbledb.New(dataDir) - if err == nil && db != nil { - db.SetSync(false) - } - - return storage.NewStorageStore(db, coretesting.NewNopLogger()), err - }, - "btree_sqlite": func(dataDir string) (store.VersionedWriter, error) { - db, err := sqlite.New(dataDir) - return storage.NewStorageStore(db, coretesting.NewNopLogger()), err - }, - } - rng = rand.New(rand.NewSource(567320)) -) - -func BenchmarkGet(b *testing.B) { - numKeyVals := 1_000_000 - keys := make([][]byte, numKeyVals) - vals := make([][]byte, numKeyVals) - for i := 0; i < numKeyVals; i++ { - key := make([]byte, 128) - val := make([]byte, 128) - - _, err := rng.Read(key) - require.NoError(b, err) - _, err = rng.Read(val) - require.NoError(b, err) - - keys[i] = key - vals[i] = val - } - - for ty, fn := range backends { - db, err := fn(b.TempDir()) - require.NoError(b, err) - defer func() { - _ = db.Close() - }() - - cs := corestore.NewChangesetWithPairs(1, map[string]corestore.KVPairs{string(storeKey1): {}}) - for i := 0; i < numKeyVals; i++ { - cs.AddKVPair(storeKey1, corestore.KVPair{Key: keys[i], Value: vals[i]}) - } - - require.NoError(b, db.ApplyChangeset(cs)) - - b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) { - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.StopTimer() - key := keys[rng.Intn(len(keys))] - - b.StartTimer() - _, err = db.Get(storeKey1, 1, key) - require.NoError(b, err) - } - }) - } -} - -func BenchmarkApplyChangeset(b *testing.B) { - for ty, fn := range backends { - db, err := fn(b.TempDir()) - require.NoError(b, err) - defer func() { - _ = db.Close() - }() - - b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) { - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.StopTimer() - - ver := uint64(b.N + 1) - cs := corestore.NewChangesetWithPairs(ver, map[string]corestore.KVPairs{string(storeKey1): {}}) - for j := 0; j < 1000; j++ { - key := make([]byte, 128) - val := make([]byte, 128) - - _, err = rng.Read(key) - require.NoError(b, err) - _, err = rng.Read(val) - require.NoError(b, err) - - cs.AddKVPair(storeKey1, corestore.KVPair{Key: key, Value: val}) - } - - b.StartTimer() - require.NoError(b, db.ApplyChangeset(cs)) - } - }) - } -} - -func BenchmarkIterate(b *testing.B) { - numKeyVals := 1_000_000 - keys := make([][]byte, numKeyVals) - vals := make([][]byte, numKeyVals) - for i := 0; i < numKeyVals; i++ { - key := make([]byte, 128) - val := make([]byte, 128) - - _, err := rng.Read(key) - require.NoError(b, err) - _, err = rng.Read(val) - require.NoError(b, err) - - keys[i] = key - vals[i] = val - - } - - for ty, fn := range backends { - db, err := fn(b.TempDir()) - require.NoError(b, err) - defer func() { - _ = db.Close() - }() - - b.StopTimer() - - cs := corestore.NewChangesetWithPairs(1, map[string]corestore.KVPairs{string(storeKey1): {}}) - for i := 0; i < numKeyVals; i++ { - cs.AddKVPair(storeKey1, corestore.KVPair{Key: keys[i], Value: vals[i]}) - } - - require.NoError(b, db.ApplyChangeset(cs)) - - sort.Slice(keys, func(i, j int) bool { - return bytes.Compare(keys[i], keys[j]) < 0 - }) - - b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) { - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.StopTimer() - - itr, err := db.Iterator(storeKey1, 1, keys[0], nil) - require.NoError(b, err) - - b.StartTimer() - - for ; itr.Valid(); itr.Next() { - _ = itr.Key() - _ = itr.Value() - } - - require.NoError(b, itr.Error()) - } - }) - } -} diff --git a/store/v2/storage/storage_test_suite.go b/store/v2/storage/storage_test_suite.go deleted file mode 100644 index 4d38efe7931e..000000000000 --- a/store/v2/storage/storage_test_suite.go +++ /dev/null @@ -1,1056 +0,0 @@ -package storage - -import ( - "fmt" - "slices" - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" -) - -const ( - storeKey1 = "store1" -) - -var storeKey1Bytes = []byte(storeKey1) - -// StorageTestSuite defines a reusable test suite for all storage backends. -type StorageTestSuite struct { - suite.Suite - - NewDB func(dir string) (*StorageStore, error) - EmptyBatchSize int - SkipTests []string -} - -func (s *StorageTestSuite) TestDatabase_Close() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - s.Require().NoError(db.Close()) - - // close should not be idempotent - s.Require().Panics(func() { _ = db.Close() }) -} - -func (s *StorageTestSuite) TestDatabase_LatestVersion() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - lv, err := db.GetLatestVersion() - s.Require().NoError(err) - s.Require().Zero(lv) - - for i := uint64(1); i <= 1001; i++ { - err = db.SetLatestVersion(i) - s.Require().NoError(err) - - lv, err = db.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(i, lv) - } -} - -func (s *StorageTestSuite) TestDatabase_VersionedKeys() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - for i := uint64(1); i <= 100; i++ { - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - i, - map[string]corestore.KVPairs{ - storeKey1: {{Key: []byte("key"), Value: []byte(fmt.Sprintf("value%03d", i))}}, - }, - ))) - } - - for i := uint64(1); i <= 100; i++ { - bz, err := db.Get(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().Equal(fmt.Sprintf("value%03d", i), string(bz)) - } -} - -func (s *StorageTestSuite) TestDatabase_GetVersionedKey() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - // store a key at version 1 - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 1, - map[string]corestore.KVPairs{ - storeKey1: {{Key: []byte("key"), Value: []byte("value001")}}, - }, - ))) - - // assume chain progresses to version 10 w/o any changes to key - bz, err := db.Get(storeKey1Bytes, 10, []byte("key")) - s.Require().NoError(err) - s.Require().Equal([]byte("value001"), bz) - - ok, err := db.Has(storeKey1Bytes, 10, []byte("key")) - s.Require().NoError(err) - s.Require().True(ok) - - // chain progresses to version 11 with an update to key - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 11, - map[string]corestore.KVPairs{ - storeKey1: {{Key: []byte("key"), Value: []byte("value011")}}, - }, - ))) - - bz, err = db.Get(storeKey1Bytes, 10, []byte("key")) - s.Require().NoError(err) - s.Require().Equal([]byte("value001"), bz) - - ok, err = db.Has(storeKey1Bytes, 10, []byte("key")) - s.Require().NoError(err) - s.Require().True(ok) - - for i := uint64(11); i <= 14; i++ { - bz, err = db.Get(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().Equal([]byte("value011"), bz) - - ok, err = db.Has(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().True(ok) - } - - // chain progresses to version 15 with a delete to key - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 15, - map[string]corestore.KVPairs{storeKey1: {{Key: []byte("key"), Remove: true}}}, - ))) - - // all queries up to version 14 should return the latest value - for i := uint64(1); i <= 14; i++ { - bz, err = db.Get(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().NotNil(bz) - - ok, err = db.Has(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().True(ok) - } - - // all queries after version 15 should return nil - for i := uint64(15); i <= 17; i++ { - bz, err = db.Get(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().Nil(bz) - - ok, err = db.Has(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().False(ok) - } -} - -func (s *StorageTestSuite) TestDatabase_ApplyChangeset() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - cs := corestore.NewChangesetWithPairs(1, map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 100; i++ { - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(fmt.Sprintf("key%03d", i)), Value: []byte("value")}) - } - - for i := 0; i < 100; i++ { - if i%10 == 0 { - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(fmt.Sprintf("key%03d", i)), Remove: true}) - } - } - - s.Require().NoError(db.ApplyChangeset(cs)) - - lv, err := db.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(1), lv) - - for i := 0; i < 1; i++ { - ok, err := db.Has(storeKey1Bytes, 1, []byte(fmt.Sprintf("key%03d", i))) - s.Require().NoError(err) - - if i%10 == 0 { - s.Require().False(ok) - } else { - s.Require().True(ok) - } - } -} - -func (s *StorageTestSuite) TestDatabase_IteratorEmptyDomain() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - iter, err := db.Iterator(storeKey1Bytes, 1, []byte{}, []byte{}) - s.Require().Error(err) - s.Require().Nil(iter) -} - -func (s *StorageTestSuite) TestDatabase_IteratorClose() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - iter, err := db.Iterator(storeKey1Bytes, 1, []byte("key000"), nil) - s.Require().NoError(err) - iter.Close() - - s.Require().False(iter.Valid()) -} - -func (s *StorageTestSuite) TestDatabase_IteratorDomain() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - testCases := map[string]struct { - start, end []byte - }{ - "start without end domain": { - start: []byte("key010"), - }, - "start and end domain": { - start: []byte("key010"), - end: []byte("key020"), - }, - } - - for name, tc := range testCases { - s.Run(name, func() { - iter, err := db.Iterator(storeKey1Bytes, 1, tc.start, tc.end) - s.Require().NoError(err) - - defer iter.Close() - - start, end := iter.Domain() - s.Require().Equal(tc.start, start) - s.Require().Equal(tc.end, end) - }) - } -} - -func (s *StorageTestSuite) TestDatabase_Iterator() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - cs := corestore.NewChangesetWithPairs(1, map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val), Remove: false}) - } - - s.Require().NoError(db.ApplyChangeset(cs)) - - // iterator without an end key over multiple versions - for v := uint64(1); v < 5; v++ { - itr, err := db.Iterator(storeKey1Bytes, v, []byte("key000"), nil) - s.Require().NoError(err) - - var i, count int - for ; itr.Valid(); itr.Next() { - s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) - s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) - - i++ - count++ - } - s.Require().NoError(itr.Error()) - s.Require().Equal(100, count) - - // seek past domain, which should make the iterator invalid and produce an error - s.Require().False(itr.Valid()) - - err = itr.Close() - s.Require().NoError(err, "Failed to close iterator") - } - - // iterator with a start and end domain over multiple versions - for v := uint64(1); v < 5; v++ { - itr2, err := db.Iterator(storeKey1Bytes, v, []byte("key010"), []byte("key019")) - s.Require().NoError(err) - - i, count := 10, 0 - for ; itr2.Valid(); itr2.Next() { - s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr2.Key()) - s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr2.Value()) - - i++ - count++ - } - s.Require().Equal(9, count) - s.Require().NoError(itr2.Error()) - - // seek past domain, which should make the iterator invalid and produce an error - s.Require().False(itr2.Valid()) - - err = itr2.Close() - if err != nil { - return - } - } - - // start must be <= end - iter3, err := db.Iterator(storeKey1Bytes, 1, []byte("key020"), []byte("key019")) - s.Require().Error(err) - s.Require().Nil(iter3) -} - -func (s *StorageTestSuite) TestDatabase_Iterator_RangedDeletes() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 1, - map[string]corestore.KVPairs{ - storeKey1: { - {Key: []byte("key001"), Value: []byte("value001"), Remove: false}, - {Key: []byte("key002"), Value: []byte("value001"), Remove: false}, - }, - }, - ))) - - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 5, - map[string]corestore.KVPairs{ - storeKey1: {{Key: []byte("key002"), Value: []byte("value002"), Remove: false}}, - }, - ))) - - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 10, - map[string]corestore.KVPairs{ - storeKey1: {{Key: []byte("key002"), Remove: true}}, - }, - ))) - - itr, err := db.Iterator(storeKey1Bytes, 11, []byte("key001"), nil) - s.Require().NoError(err) - - defer itr.Close() - - // there should only be one valid key in the iterator -- key001 - var count int - for ; itr.Valid(); itr.Next() { - s.Require().Equal([]byte("key001"), itr.Key()) - count++ - } - s.Require().Equal(1, count) - s.Require().NoError(itr.Error()) -} - -func (s *StorageTestSuite) TestDatabase_IteratorMultiVersion() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - // for versions 1-49, set all 10 keys - for v := uint64(1); v < 50; v++ { - cs := corestore.NewChangesetWithPairs(v, map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val)}) - } - - s.Require().NoError(db.ApplyChangeset(cs)) - } - - // for versions 50-100, only update even keys - for v := uint64(50); v <= 100; v++ { - cs := corestore.NewChangesetWithPairs(v, map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 10; i++ { - if i%2 == 0 { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val), Remove: false}) - } - } - - s.Require().NoError(db.ApplyChangeset(cs)) - } - - itr, err := db.Iterator(storeKey1Bytes, 69, []byte("key000"), nil) - s.Require().NoError(err) - - defer itr.Close() - - // All keys should be present; All odd keys should have a value that reflects - // version 49, and all even keys should have a value that reflects the desired - // version, 69. - var i, count int - for ; itr.Valid(); itr.Next() { - s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) - - if i%2 == 0 { - s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, 69)), itr.Value()) - } else { - s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, 49)), itr.Value()) - } - - i = (i + 1) % 10 - count++ - } - - s.Require().NoError(itr.Error()) - s.Require().Equal(10, count) -} - -func (s *StorageTestSuite) TestDatabaseIterator_SkipVersion() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - - defer db.Close() - - dbApplyChangeset(s.T(), db, 58827506, storeKey1, [][]byte{[]byte("keyC")}, [][]byte{[]byte("value003")}) - dbApplyChangeset(s.T(), db, 58827506, storeKey1, [][]byte{[]byte("keyE")}, [][]byte{[]byte("value000")}) - dbApplyChangeset(s.T(), db, 58827506, storeKey1, [][]byte{[]byte("keyF")}, [][]byte{[]byte("value000")}) - dbApplyChangeset(s.T(), db, 58833605, storeKey1, [][]byte{[]byte("keyC")}, [][]byte{[]byte("value004")}) - dbApplyChangeset(s.T(), db, 58833606, storeKey1, [][]byte{[]byte("keyD")}, [][]byte{[]byte("value006")}) - - itr, err := db.Iterator(storeKey1Bytes, 58831525, []byte("key"), nil) - s.Require().NoError(err) - defer itr.Close() - - count := make(map[string]struct{}) - for ; itr.Valid(); itr.Next() { - count[string(itr.Key())] = struct{}{} - } - - s.Require().NoError(itr.Error()) - s.Require().Equal(3, len(count)) -} - -func (s *StorageTestSuite) TestDatabaseIterator_ForwardIteration() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - dbApplyChangeset(s.T(), db, 8, storeKey1, [][]byte{[]byte("keyA")}, [][]byte{[]byte("value001")}) - dbApplyChangeset(s.T(), db, 9, storeKey1, [][]byte{[]byte("keyB")}, [][]byte{[]byte("value002")}) - dbApplyChangeset(s.T(), db, 10, storeKey1, [][]byte{[]byte("keyC")}, [][]byte{[]byte("value003")}) - dbApplyChangeset(s.T(), db, 11, storeKey1, [][]byte{[]byte("keyD")}, [][]byte{[]byte("value004")}) - - dbApplyChangeset(s.T(), db, 2, storeKey1, [][]byte{[]byte("keyD")}, [][]byte{[]byte("value007")}) - dbApplyChangeset(s.T(), db, 3, storeKey1, [][]byte{[]byte("keyE")}, [][]byte{[]byte("value008")}) - dbApplyChangeset(s.T(), db, 4, storeKey1, [][]byte{[]byte("keyF")}, [][]byte{[]byte("value009")}) - dbApplyChangeset(s.T(), db, 5, storeKey1, [][]byte{[]byte("keyH")}, [][]byte{[]byte("value010")}) - - itr, err := db.Iterator(storeKey1Bytes, 6, nil, []byte("keyZ")) - s.Require().NoError(err) - - defer itr.Close() - count := 0 - for ; itr.Valid(); itr.Next() { - count++ - } - - s.Require().NoError(itr.Error()) - s.Require().Equal(4, count) -} - -func (s *StorageTestSuite) TestDatabaseIterator_ForwardIterationHigher() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - dbApplyChangeset(s.T(), db, 9, storeKey1, [][]byte{[]byte("keyB")}, [][]byte{[]byte("value002")}) - dbApplyChangeset(s.T(), db, 10, storeKey1, [][]byte{[]byte("keyC")}, [][]byte{[]byte("value003")}) - dbApplyChangeset(s.T(), db, 11, storeKey1, [][]byte{[]byte("keyD")}, [][]byte{[]byte("value004")}) - - dbApplyChangeset(s.T(), db, 12, storeKey1, [][]byte{[]byte("keyD")}, [][]byte{[]byte("value007")}) - dbApplyChangeset(s.T(), db, 13, storeKey1, [][]byte{[]byte("keyE")}, [][]byte{[]byte("value008")}) - dbApplyChangeset(s.T(), db, 14, storeKey1, [][]byte{[]byte("keyF")}, [][]byte{[]byte("value009")}) - dbApplyChangeset(s.T(), db, 15, storeKey1, [][]byte{[]byte("keyH")}, [][]byte{[]byte("value010")}) - - itr, err := db.Iterator(storeKey1Bytes, 6, nil, []byte("keyZ")) - s.Require().NoError(err) - - defer itr.Close() - - count := 0 - for ; itr.Valid(); itr.Next() { - count++ - } - - s.Require().Equal(0, count) -} - -func (s *StorageTestSuite) TestDatabaseIterator_WithDelete() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - dbApplyChangeset(s.T(), db, 1, storeKey1, [][]byte{[]byte("keyA")}, [][]byte{[]byte("value001")}) - dbApplyChangeset(s.T(), db, 2, storeKey1, [][]byte{[]byte("keyA")}, [][]byte{nil}) // delete - - itr, err := db.Iterator(storeKey1Bytes, 1, nil, nil) - s.Require().NoError(err) - - count := 0 - for ; itr.Valid(); itr.Next() { - count++ - } - s.Require().Equal(1, count) - - itr, err = db.Iterator(storeKey1Bytes, 2, nil, nil) - s.Require().NoError(err) - - count = 0 - for ; itr.Valid(); itr.Next() { - count++ - } - s.Require().Equal(0, count) -} - -func (s *StorageTestSuite) TestDatabase_IteratorNoDomain() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - // for versions 1-50, set all 10 keys - for v := uint64(1); v <= 50; v++ { - cs := corestore.NewChangesetWithPairs(v, map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val), Remove: false}) - } - - s.Require().NoError(db.ApplyChangeset(cs)) - } - - // create an iterator over the entire domain - itr, err := db.Iterator(storeKey1Bytes, 50, nil, nil) - s.Require().NoError(err) - - defer itr.Close() - - var i, count int - for ; itr.Valid(); itr.Next() { - s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) - s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, 50)), itr.Value()) - - i++ - count++ - } - s.Require().NoError(itr.Error()) - s.Require().Equal(10, count) -} - -func (s *StorageTestSuite) TestDatabase_Prune() { - if slices.Contains(s.SkipTests, s.T().Name()) { - s.T().SkipNow() - } - - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - // for versions 1-50, set 10 keys - for v := uint64(1); v <= 50; v++ { - cs := corestore.NewChangesetWithPairs(v, map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val)}) - } - - s.Require().NoError(db.ApplyChangeset(cs)) - } - - // prune the first 25 versions - s.Require().NoError(db.Prune(25)) - - latestVersion, err := db.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(50), latestVersion) - - // Ensure all keys are no longer present up to and including version 25 and - // all keys are present after version 25. - for v := uint64(1); v <= 50; v++ { - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - bz, err := db.Get(storeKey1Bytes, v, []byte(key)) - if v <= 25 { - s.Require().Error(err) - s.Require().Nil(bz) - } else { - s.Require().NoError(err) - s.Require().Equal([]byte(val), bz) - } - } - } - - itr, err := db.Iterator(storeKey1Bytes, 25, []byte("key000"), nil) - s.Require().NoError(err) - s.Require().False(itr.Valid()) - - // prune the latest version which should prune the entire dataset - s.Require().NoError(db.Prune(50)) - - for v := uint64(1); v <= 50; v++ { - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) - - bz, err := db.Get(storeKey1Bytes, v, []byte(key)) - s.Require().Error(err) - s.Require().Nil(bz) - } - } -} - -func (s *StorageTestSuite) TestDatabase_Prune_KeepRecent() { - if slices.Contains(s.SkipTests, s.T().Name()) { - s.T().SkipNow() - } - - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - key := []byte("key") - - // write a key at three different versions - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 1, - map[string]corestore.KVPairs{storeKey1: {{Key: key, Value: []byte("val001"), Remove: false}}}, - ))) - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 100, - map[string]corestore.KVPairs{storeKey1: {{Key: key, Value: []byte("val100"), Remove: false}}}, - ))) - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 200, - map[string]corestore.KVPairs{storeKey1: {{Key: key, Value: []byte("val200"), Remove: false}}}, - ))) - - // prune version 50 - s.Require().NoError(db.Prune(50)) - - // ensure queries for versions 50 and older return nil - bz, err := db.Get(storeKey1Bytes, 49, key) - s.Require().Error(err) - s.Require().Nil(bz) - - itr, err := db.Iterator(storeKey1Bytes, 49, nil, nil) - s.Require().NoError(err) - s.Require().False(itr.Valid()) - - defer itr.Close() - - // ensure the value previously at version 1 is still there for queries greater than 50 - bz, err = db.Get(storeKey1Bytes, 51, key) - s.Require().NoError(err) - s.Require().Equal([]byte("val001"), bz) - - // ensure the correct value at a greater height - bz, err = db.Get(storeKey1Bytes, 200, key) - s.Require().NoError(err) - s.Require().Equal([]byte("val200"), bz) - - // prune latest height and ensure we have the previous version when querying above it - s.Require().NoError(db.Prune(200)) - - bz, err = db.Get(storeKey1Bytes, 201, key) - s.Require().NoError(err) - s.Require().Equal([]byte("val200"), bz) -} - -func (s *StorageTestSuite) TestDatabase_Restore() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - toVersion := uint64(10) - keyCount := 10 - - // for versions 1-10, set 10 keys - for v := uint64(1); v <= toVersion; v++ { - cs := corestore.NewChangesetWithPairs(v, map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < keyCount; i++ { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val)}) - } - - s.Require().NoError(db.ApplyChangeset(cs)) - } - - latestVersion, err := db.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(10), latestVersion) - - chStorage := make(chan *corestore.StateChanges, 5) - - go func() { - for i := uint64(11); i <= 15; i++ { - kvPairs := []corestore.KVPair{} - for j := 0; j < keyCount; j++ { - key := fmt.Sprintf("key%03d-%03d", j, i) - val := fmt.Sprintf("val%03d-%03d", j, i) - - kvPairs = append(kvPairs, corestore.KVPair{Key: []byte(key), Value: []byte(val)}) - } - chStorage <- &corestore.StateChanges{ - Actor: storeKey1Bytes, - StateChanges: kvPairs, - } - } - close(chStorage) - }() - - // restore with snapshot version smaller than latest version - // should return an error - err = db.Restore(9, chStorage) - s.Require().Error(err) - - // restore - err = db.Restore(11, chStorage) - s.Require().NoError(err) - - // check the storage - for i := uint64(11); i <= 15; i++ { - for j := 0; j < keyCount; j++ { - key := fmt.Sprintf("key%03d-%03d", j, i) - val := fmt.Sprintf("val%03d-%03d", j, i) - - v, err := db.Get(storeKey1Bytes, 11, []byte(key)) - s.Require().NoError(err) - s.Require().Equal([]byte(val), v) - } - } -} - -func (s *StorageTestSuite) TestUpgradable() { - ss, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer ss.Close() - - // Ensure the database is upgradable. - if _, ok := ss.db.(store.UpgradableDatabase); !ok { - s.T().Skip("database is not upgradable") - } - - storeKeys := []string{"store1", "store2", "store3"} - uptoVersion := uint64(50) - keyCount := 10 - for _, storeKey := range storeKeys { - for v := uint64(1); v <= uptoVersion; v++ { - keys := make([][]byte, keyCount) - vals := make([][]byte, keyCount) - for i := 0; i < keyCount; i++ { - keys[i] = []byte(fmt.Sprintf("key%03d", i)) - vals[i] = []byte(fmt.Sprintf("val%03d-%03d", i, v)) - } - dbApplyChangeset(s.T(), ss, v, storeKey, keys, vals) - } - } - - // prune storekeys (`store2`, `store3`) - removedStoreKeys := []string{storeKeys[1], storeKeys[2]} - err = ss.PruneStoreKeys(removedStoreKeys, uptoVersion) - s.Require().NoError(err) - // should be able to query before Prune for removed storeKeys - for _, storeKey := range removedStoreKeys { - for v := uint64(1); v <= uptoVersion; v++ { - for i := 0; i < keyCount; i++ { - bz, err := ss.Get([]byte(storeKey), v, []byte(fmt.Sprintf("key%03d", i))) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, v)), bz) - } - } - } - s.Require().NoError(ss.Prune(uptoVersion)) - // should not be able to query after Prune - // skip the test of RocksDB - if !slices.Contains(s.SkipTests, "TestUpgradable_Prune") { - for _, storeKey := range removedStoreKeys { - // it will return error ErrVersionPruned - for v := uint64(1); v <= uptoVersion; v++ { - for i := 0; i < keyCount; i++ { - _, err := ss.Get([]byte(storeKey), v, []byte(fmt.Sprintf("key%03d", i))) - s.Require().Error(err) - } - } - v := uptoVersion + 1 - for i := 0; i < keyCount; i++ { - val, err := ss.Get([]byte(storeKey), v, []byte(fmt.Sprintf("key%03d", i))) - s.Require().NoError(err) - s.Require().Nil(val) - } - } - } -} - -func (s *StorageTestSuite) TestRemovingOldStoreKey() { - ss, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer ss.Close() - - // Ensure the database is upgradable. - if _, ok := ss.db.(store.UpgradableDatabase); !ok { - s.T().Skip("database is not upgradable") - } - - storeKeys := []string{"store1", "store2", "store3"} - uptoVersion := uint64(50) - keyCount := 10 - for _, storeKey := range storeKeys { - for v := uint64(1); v <= uptoVersion; v++ { - keys := make([][]byte, keyCount) - vals := make([][]byte, keyCount) - for i := 0; i < keyCount; i++ { - keys[i] = []byte(fmt.Sprintf("key%03d-%03d", i, v)) - vals[i] = []byte(fmt.Sprintf("val%03d-%03d", i, v)) - } - dbApplyChangeset(s.T(), ss, v, storeKey, keys, vals) - } - } - - // remove `store1` and `store3` - removedStoreKeys := []string{storeKeys[0], storeKeys[2]} - err = ss.PruneStoreKeys(removedStoreKeys, uptoVersion) - s.Require().NoError(err) - // should be able to query before Prune for removed storeKeys - for _, storeKey := range removedStoreKeys { - for v := uint64(1); v <= uptoVersion; v++ { - for i := 0; i < keyCount; i++ { - bz, err := ss.Get([]byte(storeKey), v, []byte(fmt.Sprintf("key%03d-%03d", i, v))) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, v)), bz) - } - } - } - // add `store1` back - newStoreKeys := []string{storeKeys[0], storeKeys[1]} - newVersion := uptoVersion + 10 - for _, storeKey := range newStoreKeys { - for v := uptoVersion + 1; v <= newVersion; v++ { - keys := make([][]byte, keyCount) - vals := make([][]byte, keyCount) - for i := 0; i < keyCount; i++ { - keys[i] = []byte(fmt.Sprintf("key%03d-%03d", i, v)) - vals[i] = []byte(fmt.Sprintf("val%03d-%03d", i, v)) - } - dbApplyChangeset(s.T(), ss, v, storeKey, keys, vals) - } - } - - s.Require().NoError(ss.Prune(newVersion)) - // skip the test of RocksDB - if !slices.Contains(s.SkipTests, "TestUpgradable_Prune") { - for _, storeKey := range removedStoreKeys { - queryVersion := newVersion + 1 - // should not be able to query after Prune during 1 ~ uptoVersion - for v := uint64(1); v <= uptoVersion; v++ { - for i := 0; i < keyCount; i++ { - val, err := ss.Get([]byte(storeKey), queryVersion, []byte(fmt.Sprintf("key%03d", i))) - s.Require().NoError(err) - s.Require().Nil(val) - } - } - // should be able to query after Prune during uptoVersion + 1 ~ newVersion - // for `store1` added back - for v := uptoVersion + 1; v <= newVersion; v++ { - for i := 0; i < keyCount; i++ { - val, err := ss.Get([]byte(storeKey), queryVersion, []byte(fmt.Sprintf("key%03d-%03d", i, v))) - s.Require().NoError(err) - if storeKey == storeKeys[0] { - // `store1` is added back - s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, v)), val) - } else { - // `store3` is removed - s.Require().Nil(val) - } - } - } - } - } -} - -// TestVersionExists tests the VersionExists method of the Database struct. -func (s *StorageTestSuite) TestVersionExists() { - // Define test cases - testCases := []struct { - name string - setup func(t *testing.T, db *StorageStore) - version uint64 - expectedExists bool - expectError bool - }{ - { - name: "Fresh database: version 0 exists", - setup: func(t *testing.T, db *StorageStore) { - t.Helper() - // No setup needed for fresh database - }, - version: 0, - expectedExists: true, - expectError: false, - }, - { - name: "Fresh database: version 1 exists", - setup: func(t *testing.T, db *StorageStore) { - t.Helper() - // No setup needed for fresh database - }, - version: 1, - expectedExists: false, - expectError: false, - }, - { - name: "After setting latest version to 10, version 5 exists", - setup: func(t *testing.T, db *StorageStore) { - t.Helper() - err := db.SetLatestVersion(10) - if err != nil { - t.Fatalf("Setting latest version should not error: %v", err) - } - }, - version: 5, - expectedExists: true, // Since pruning hasn't occurred, earliestVersion is still 0 - expectError: false, - }, - { - name: "After setting latest version to 10 and pruning to 5, version 4 does not exist", - setup: func(t *testing.T, db *StorageStore) { - t.Helper() - err := db.SetLatestVersion(10) - if err != nil { - t.Fatalf("Setting latest version should not error: %v", err) - } - - err = db.Prune(5) - if err != nil { - t.Fatalf("Pruning to version 5 should not error: %v", err) - } - }, - version: 4, - expectedExists: false, - expectError: false, - }, - { - name: "After setting latest version to 10 and pruning to 5, version 5 does not exist", - setup: func(t *testing.T, db *StorageStore) { - t.Helper() - err := db.SetLatestVersion(10) - if err != nil { - t.Fatalf("Setting latest version should not error: %v", err) - } - - err = db.Prune(5) - if err != nil { - t.Fatalf("Pruning to version 5 should not error: %v", err) - } - }, - version: 5, - expectedExists: false, - expectError: false, - }, - { - name: "After setting latest version to 10 and pruning to 5, version 6 exists", - setup: func(t *testing.T, db *StorageStore) { - t.Helper() - err := db.SetLatestVersion(10) - if err != nil { - t.Fatalf("Setting latest version should not error: %v", err) - } - - err = db.Prune(5) - if err != nil { - t.Fatalf("Pruning to version 5 should not error: %v", err) - } - }, - version: 6, - expectedExists: true, - expectError: false, - }, - { - name: "After pruning to 0, all versions >=1 exist", - setup: func(t *testing.T, db *StorageStore) { - t.Helper() - err := db.SetLatestVersion(10) - if err != nil { - t.Fatalf("Setting latest version should not error: %v", err) - } - // Prune to version 0 - err = db.Prune(0) - if err != nil { - t.Fatalf("Pruning to version 0 should not error: %v", err) - } - }, - version: 1, - expectedExists: true, - expectError: false, - }, - } - - // Iterate over each test case - for _, tc := range testCases { - s.T().Run(tc.name, func(t *testing.T) { - // Initialize the database for each test - db, err := s.NewDB(t.TempDir()) - require.NoError(t, err, "Failed to initialize the database") - defer db.Close() - - // Setup test environment - tc.setup(t, db) - - // Call VersionExists and check the result - exists, err := db.VersionExists(tc.version) - if tc.expectError { - require.Error(t, err, "Expected error but got none") - } else { - require.NoError(t, err, "Did not expect an error but got one") - require.Equal(t, tc.expectedExists, exists, "Version existence mismatch") - } - }) - } -} - -func dbApplyChangeset( - t *testing.T, - db store.VersionedWriter, - version uint64, - storeKey string, - keys, vals [][]byte, -) { - t.Helper() - - require.Greater(t, version, uint64(0)) - require.Equal(t, len(keys), len(vals)) - - cs := corestore.NewChangeset(version) - for i := 0; i < len(keys); i++ { - remove := false - if vals[i] == nil { - remove = true - } - - cs.AddKVPair([]byte(storeKey), corestore.KVPair{Key: keys[i], Value: vals[i], Remove: remove}) - } - - require.NoError(t, db.ApplyChangeset(cs)) -} diff --git a/store/v2/storage/store.go b/store/v2/storage/store.go deleted file mode 100644 index 5ca5a30132b7..000000000000 --- a/store/v2/storage/store.go +++ /dev/null @@ -1,161 +0,0 @@ -package storage - -import ( - "errors" - "fmt" - - "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/snapshots" -) - -const ( - // TODO: it is a random number, need to be tuned - defaultBatchBufferSize = 100000 -) - -var ( - _ store.VersionedWriter = (*StorageStore)(nil) - _ snapshots.StorageSnapshotter = (*StorageStore)(nil) - _ store.Pruner = (*StorageStore)(nil) - _ store.UpgradableDatabase = (*StorageStore)(nil) -) - -// StorageStore is a wrapper around the store.VersionedWriter interface. -type StorageStore struct { - logger log.Logger - db Database -} - -// NewStorageStore returns a reference to a new StorageStore. -func NewStorageStore(db Database, logger log.Logger) *StorageStore { - return &StorageStore{ - logger: logger, - db: db, - } -} - -// Has returns true if the key exists in the store. -func (ss *StorageStore) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - return ss.db.Has(storeKey, version, key) -} - -// Get returns the value associated with the given key. -func (ss *StorageStore) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) { - return ss.db.Get(storeKey, version, key) -} - -// ApplyChangeset applies the given changeset to the storage. -func (ss *StorageStore) ApplyChangeset(cs *corestore.Changeset) error { - b, err := ss.db.NewBatch(cs.Version) - if err != nil { - return err - } - - for _, pairs := range cs.Changes { - for _, kvPair := range pairs.StateChanges { - if kvPair.Remove { - if err := b.Delete(pairs.Actor, kvPair.Key); err != nil { - return err - } - } else { - if err := b.Set(pairs.Actor, kvPair.Key, kvPair.Value); err != nil { - return err - } - } - } - } - - if err := b.Write(); err != nil { - return err - } - - return nil -} - -// GetLatestVersion returns the latest version of the store. -func (ss *StorageStore) GetLatestVersion() (uint64, error) { - return ss.db.GetLatestVersion() -} - -// SetLatestVersion sets the latest version of the store. -func (ss *StorageStore) SetLatestVersion(version uint64) error { - return ss.db.SetLatestVersion(version) -} - -// VersionExists returns true if the given version exists in the store. -func (ss *StorageStore) VersionExists(version uint64) (bool, error) { - return ss.db.VersionExists(version) -} - -// Iterator returns an iterator over the specified domain and prefix. -func (ss *StorageStore) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - return ss.db.Iterator(storeKey, version, start, end) -} - -// ReverseIterator returns an iterator over the specified domain and prefix in reverse. -func (ss *StorageStore) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - return ss.db.ReverseIterator(storeKey, version, start, end) -} - -// Prune prunes the store up to the given version. -func (ss *StorageStore) Prune(version uint64) error { - return ss.db.Prune(version) -} - -// Restore restores the store from the given channel. -func (ss *StorageStore) Restore(version uint64, chStorage <-chan *corestore.StateChanges) error { - latestVersion, err := ss.db.GetLatestVersion() - if err != nil { - return fmt.Errorf("failed to get latest version: %w", err) - } - if version <= latestVersion { - return fmt.Errorf("the snapshot version %d is not greater than latest version %d", version, latestVersion) - } - - b, err := ss.db.NewBatch(version) - if err != nil { - return err - } - - for kvPair := range chStorage { - for _, kv := range kvPair.StateChanges { - if err := b.Set(kvPair.Actor, kv.Key, kv.Value); err != nil { - return err - } - if b.Size() > defaultBatchBufferSize { - if err := b.Write(); err != nil { - return err - } - if err := b.Reset(); err != nil { - return err - } - } - } - } - - if b.Size() > 0 { - if err := b.Write(); err != nil { - return err - } - } - - return nil -} - -// PruneStoreKeys prunes the store keys which implements the store.UpgradableDatabase -// interface. -func (ss *StorageStore) PruneStoreKeys(storeKeys []string, version uint64) error { - gdb, ok := ss.db.(store.UpgradableDatabase) - if !ok { - return errors.New("db does not implement UpgradableDatabase interface") - } - - return gdb.PruneStoreKeys(storeKeys, version) -} - -// Close closes the store. -func (ss *StorageStore) Close() error { - return ss.db.Close() -} diff --git a/store/v2/storage/util/iterator.go b/store/v2/storage/util/iterator.go deleted file mode 100644 index fe207314c717..000000000000 --- a/store/v2/storage/util/iterator.go +++ /dev/null @@ -1,53 +0,0 @@ -package util - -// IterateWithPrefix returns the begin and end keys for an iterator over a domain -// and prefix. -func IterateWithPrefix(prefix, begin, end []byte) ([]byte, []byte) { - if len(prefix) == 0 { - return begin, end - } - - begin = cloneAppend(prefix, begin) - - if end == nil { - end = CopyIncr(prefix) - } else { - end = cloneAppend(prefix, end) - } - - return begin, end -} - -func cloneAppend(front, tail []byte) (res []byte) { - res = make([]byte, len(front)+len(tail)) - - n := copy(res, front) - copy(res[n:], tail) - - return res -} - -func CopyIncr(bz []byte) []byte { - if len(bz) == 0 { - panic("copyIncr expects non-zero bz length") - } - - ret := make([]byte, len(bz)) - copy(ret, bz) - - for i := len(bz) - 1; i >= 0; i-- { - if ret[i] < byte(0xFF) { - ret[i]++ - return ret - } - - ret[i] = byte(0x00) - - if i == 0 { - // overflow - return nil - } - } - - return nil -} diff --git a/store/v2/store.go b/store/v2/store.go index 124d7de579a1..6b7f90302082 100644 --- a/store/v2/store.go +++ b/store/v2/store.go @@ -57,8 +57,6 @@ type RootStore interface { // Backend defines the interface for the RootStore backends. type Backend interface { - // GetStateStorage returns the SS backend. - GetStateStorage() VersionedWriter // GetStateCommitment returns the SC backend. GetStateCommitment() Committer From 3647b6b21c8d52a4fc25050b9072ead9f1122115 Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Wed, 27 Nov 2024 20:31:23 +0100 Subject: [PATCH 02/21] fix migration --- store/v2/migration/manager.go | 6 ------ store/v2/migration/manager_test.go | 21 --------------------- 2 files changed, 27 deletions(-) diff --git a/store/v2/migration/manager.go b/store/v2/migration/manager.go index 186fb204fea4..ec6cd686905d 100644 --- a/store/v2/migration/manager.go +++ b/store/v2/migration/manager.go @@ -102,9 +102,6 @@ func (m *Manager) Migrate(height uint64) error { chStorage := make(chan *corestore.StateChanges, defaultStorageBufferSize) eg := new(errgroup.Group) - eg.Go(func() error { - return m.stateStorage.Restore(height, chStorage) - }) eg.Go(func() error { defer close(chStorage) if m.stateCommitment != nil { @@ -248,9 +245,6 @@ func (m *Manager) Sync() error { return fmt.Errorf("failed to commit changeset to commitment: %w", err) } } - if err := m.stateStorage.ApplyChangeset(cs); err != nil { - return fmt.Errorf("failed to write changeset to storage: %w", err) - } m.mtx.Lock() m.migratedVersion = version diff --git a/store/v2/migration/manager_test.go b/store/v2/migration/manager_test.go index 99de9fbe547c..62fedd4a2cb7 100644 --- a/store/v2/migration/manager_test.go +++ b/store/v2/migration/manager_test.go @@ -99,16 +99,6 @@ func TestMigrateState(t *testing.T) { require.Nil(t, val) } - // check the storage - for version := uint64(1); version < toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - val, err := m.stateStorage.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) - require.NoError(t, err) - require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) - } - } - } }) } } @@ -189,17 +179,6 @@ func TestStartMigrateState(t *testing.T) { require.Nil(t, val) } - // check the storage - for version := uint64(1); version < toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - val, err := m.stateStorage.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) - require.NoError(t, err) - require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) - } - } - } - // check if migration db write change set to storage for version := uint64(1); version < toVersion; version++ { buf := make([]byte, 8) From 1e73de65859bd06f943a1f07d4bde03cf5c6f78c Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Wed, 27 Nov 2024 22:51:46 +0100 Subject: [PATCH 03/21] make more fixes in baseapp --- runtime/v2/go.mod | 1 - runtime/v2/go.sum | 2 -- server/v2/cometbft/abci_test.go | 7 ++-- server/v2/cometbft/go.mod | 1 - server/v2/cometbft/go.sum | 2 -- .../v2/cometbft/internal/mock/mock_reader.go | 8 ++--- .../v2/cometbft/internal/mock/mock_store.go | 23 ++----------- server/v2/cometbft/server.go | 2 -- server/v2/go.mod | 1 - server/v2/go.sum | 2 -- server/v2/store/snapshot.go | 7 ++-- simapp/v2/go.mod | 1 - simapp/v2/go.sum | 2 -- store/v2/go.mod | 1 - store/v2/go.sum | 2 -- store/v2/migration/manager_test.go | 2 +- store/v2/root/migrate_test.go | 4 +-- store/v2/root/store.go | 26 ++------------- store/v2/snapshots/helpers_test.go | 18 +---------- store/v2/snapshots/manager.go | 29 ++++------------- store/v2/snapshots/manager_test.go | 32 ++++++------------- tests/go.mod | 1 - tests/go.sum | 2 -- 23 files changed, 38 insertions(+), 138 deletions(-) diff --git a/runtime/v2/go.mod b/runtime/v2/go.mod index e9653bd73ece..07d7b2a19a9e 100644 --- a/runtime/v2/go.mod +++ b/runtime/v2/go.mod @@ -65,7 +65,6 @@ require ( github.com/linxGnu/grocksdb v1.9.3 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-sqlite3 v1.14.22 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/onsi/gomega v1.28.1 // indirect github.com/pkg/errors v0.9.1 // indirect diff --git a/runtime/v2/go.sum b/runtime/v2/go.sum index ffa2e8b402ff..a1c16a156978 100644 --- a/runtime/v2/go.sum +++ b/runtime/v2/go.sum @@ -164,8 +164,6 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/server/v2/cometbft/abci_test.go b/server/v2/cometbft/abci_test.go index c86ca226391b..aa78ac8e017f 100644 --- a/server/v2/cometbft/abci_test.go +++ b/server/v2/cometbft/abci_test.go @@ -581,7 +581,7 @@ func TestConsensus_Query(t *testing.T) { c := setUpConsensus(t, 100_000, cometmock.MockMempool[mock.Tx]{}) // Write data to state storage - err := c.store.GetStateStorage().ApplyChangeset(&store.Changeset{ + err := c.store.GetStateCommitment().WriteChangeset(&store.Changeset{ Version: 1, Changes: []store.StateChanges{ { @@ -597,6 +597,8 @@ func TestConsensus_Query(t *testing.T) { }, }) require.NoError(t, err) + _, err = c.store.GetStateCommitment().Commit(1) + require.NoError(t, err) _, err = c.InitChain(context.Background(), &abciproto.InitChainRequest{ Time: time.Now(), @@ -681,9 +683,8 @@ func setUpConsensus(t *testing.T, gasLimit uint64, mempool mempool.Mempool[mock. ) require.NoError(t, err) - ss := cometmock.NewMockStorage(log.NewNopLogger(), t.TempDir()) sc := cometmock.NewMockCommiter(log.NewNopLogger(), string(actorName), "stf") - mockStore := cometmock.NewMockStore(ss, sc) + mockStore := cometmock.NewMockStore(sc) am := appmanager.New(appmanager.Config{ ValidateTxGasLimit: gasLimit, diff --git a/server/v2/cometbft/go.mod b/server/v2/cometbft/go.mod index 08759950339c..deb7ac9f081a 100644 --- a/server/v2/cometbft/go.mod +++ b/server/v2/cometbft/go.mod @@ -134,7 +134,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-sqlite3 v1.14.22 // indirect github.com/minio/highwayhash v1.0.3 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mtibben/percent v0.2.1 // indirect diff --git a/server/v2/cometbft/go.sum b/server/v2/cometbft/go.sum index 173298ead2e9..cecc6fedcbd5 100644 --- a/server/v2/cometbft/go.sum +++ b/server/v2/cometbft/go.sum @@ -351,8 +351,6 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q= github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= diff --git a/server/v2/cometbft/internal/mock/mock_reader.go b/server/v2/cometbft/internal/mock/mock_reader.go index 9911ee55eb81..46c1d422c648 100644 --- a/server/v2/cometbft/internal/mock/mock_reader.go +++ b/server/v2/cometbft/internal/mock/mock_reader.go @@ -39,7 +39,7 @@ func NewMockReader(v uint64, rs *MockStore, actor []byte) *MockReader { } func (roa *MockReader) Has(key []byte) (bool, error) { - val, err := roa.store.GetStateStorage().Has(roa.actor, roa.version, key) + val, err := roa.store.GetStateCommitment().Has(roa.actor, roa.version, key) if err != nil { return false, err } @@ -48,7 +48,7 @@ func (roa *MockReader) Has(key []byte) (bool, error) { } func (roa *MockReader) Get(key []byte) ([]byte, error) { - result, err := roa.store.GetStateStorage().Get(roa.actor, roa.version, key) + result, err := roa.store.GetStateCommitment().Get(roa.actor, roa.version, key) if err != nil { return nil, err } @@ -57,9 +57,9 @@ func (roa *MockReader) Get(key []byte) ([]byte, error) { } func (roa *MockReader) Iterator(start, end []byte) (corestore.Iterator, error) { - return roa.store.GetStateStorage().Iterator(roa.actor, roa.version, start, end) + return roa.store.GetStateCommitment().Iterator(roa.actor, roa.version, start, end) } func (roa *MockReader) ReverseIterator(start, end []byte) (corestore.Iterator, error) { - return roa.store.GetStateStorage().ReverseIterator(roa.actor, roa.version, start, end) + return roa.store.GetStateCommitment().ReverseIterator(roa.actor, roa.version, start, end) } diff --git a/server/v2/cometbft/internal/mock/mock_store.go b/server/v2/cometbft/internal/mock/mock_store.go index e9f7be3edcb3..6f2de4747762 100644 --- a/server/v2/cometbft/internal/mock/mock_store.go +++ b/server/v2/cometbft/internal/mock/mock_store.go @@ -11,21 +11,12 @@ import ( "cosmossdk.io/store/v2/commitment/iavl" dbm "cosmossdk.io/store/v2/db" "cosmossdk.io/store/v2/proof" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/sqlite" ) type MockStore struct { - Storage storev2.VersionedWriter Committer storev2.Committer } -func NewMockStorage(logger log.Logger, dir string) storev2.VersionedWriter { - storageDB, _ := sqlite.New(dir) - ss := storage.NewStorageStore(storageDB, logger) - return ss -} - func NewMockCommiter(logger log.Logger, actors ...string) storev2.Committer { treeMap := make(map[string]commitment.Tree) for _, actor := range actors { @@ -36,8 +27,8 @@ func NewMockCommiter(logger log.Logger, actors ...string) storev2.Committer { return sc } -func NewMockStore(ss storev2.VersionedWriter, sc storev2.Committer) *MockStore { - return &MockStore{Storage: ss, Committer: sc} +func NewMockStore(sc storev2.Committer) *MockStore { + return &MockStore{Committer: sc} } func (s *MockStore) GetLatestVersion() (uint64, error) { @@ -59,12 +50,8 @@ func (s *MockStore) StateLatest() (uint64, corestore.ReaderMap, error) { } func (s *MockStore) Commit(changeset *corestore.Changeset) (corestore.Hash, error) { - err := s.Storage.ApplyChangeset(changeset) - if err != nil { - return []byte{}, err - } - err = s.Committer.WriteChangeset(changeset) + err := s.Committer.WriteChangeset(changeset) if err != nil { return []byte{}, err } @@ -81,10 +68,6 @@ func (s *MockStore) StateAt(version uint64) (corestore.ReaderMap, error) { return NewMockReaderMap(version, s), nil } -func (s *MockStore) GetStateStorage() storev2.VersionedWriter { - return s.Storage -} - func (s *MockStore) GetStateCommitment() storev2.Committer { return s.Committer } diff --git a/server/v2/cometbft/server.go b/server/v2/cometbft/server.go index e844df3c80c3..231841ba4d74 100644 --- a/server/v2/cometbft/server.go +++ b/server/v2/cometbft/server.go @@ -126,7 +126,6 @@ func New[T transaction.Tx]( indexEvents[e] = struct{}{} } - ss := store.GetStateStorage().(snapshots.StorageSnapshotter) sc := store.GetStateCommitment().(snapshots.CommitSnapshotter) snapshotStore, err := GetSnapshotStore(srv.config.ConfigTomlConfig.RootDir) @@ -154,7 +153,6 @@ func New[T transaction.Tx]( snapshotStore, srv.serverOptions.SnapshotOptions(cfg), sc, - ss, nil, // extensions snapshotter registered below logger, ) diff --git a/server/v2/go.mod b/server/v2/go.mod index 2905d7e23c1c..4003974e038f 100644 --- a/server/v2/go.mod +++ b/server/v2/go.mod @@ -83,7 +83,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-sqlite3 v1.14.22 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oklog/run v1.1.0 // indirect github.com/onsi/gomega v1.28.1 // indirect diff --git a/server/v2/go.sum b/server/v2/go.sum index e494b8b56be5..c0446b0564f5 100644 --- a/server/v2/go.sum +++ b/server/v2/go.sum @@ -227,8 +227,6 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= diff --git a/server/v2/store/snapshot.go b/server/v2/store/snapshot.go index c858d47757a9..bf9e5ddb3827 100644 --- a/server/v2/store/snapshot.go +++ b/server/v2/store/snapshot.go @@ -375,10 +375,11 @@ func createSnapshotsManager( } sm := snapshots.NewManager( - snapshotStore, snapshots.NewSnapshotOptions(interval, uint32(keepRecent)), + snapshotStore, + snapshots.NewSnapshotOptions(interval, uint32(keepRecent)), store.GetStateCommitment().(snapshots.CommitSnapshotter), - store.GetStateStorage().(snapshots.StorageSnapshotter), - nil, logger) + nil, + logger) return sm, nil } diff --git a/simapp/v2/go.mod b/simapp/v2/go.mod index a74d0400734a..27fb079657da 100644 --- a/simapp/v2/go.mod +++ b/simapp/v2/go.mod @@ -172,7 +172,6 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect - github.com/mattn/go-sqlite3 v1.14.22 // indirect github.com/mdp/qrterminal/v3 v3.2.0 // indirect github.com/minio/highwayhash v1.0.3 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect diff --git a/simapp/v2/go.sum b/simapp/v2/go.sum index f0dfce3659ea..2949992b4d1c 100644 --- a/simapp/v2/go.sum +++ b/simapp/v2/go.sum @@ -666,8 +666,6 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mdp/qrterminal/v3 v3.2.0 h1:qteQMXO3oyTK4IHwj2mWsKYYRBOp1Pj2WRYFYYNTCdk= github.com/mdp/qrterminal/v3 v3.2.0/go.mod h1:XGGuua4Lefrl7TLEsSONiD+UEjQXJZ4mPzF+gWYIJkk= diff --git a/store/v2/go.mod b/store/v2/go.mod index 8c32d86dd0d7..12f14cbd351b 100644 --- a/store/v2/go.mod +++ b/store/v2/go.mod @@ -15,7 +15,6 @@ require ( github.com/google/btree v1.1.2 github.com/hashicorp/go-metrics v0.5.3 github.com/linxGnu/grocksdb v1.9.3 - github.com/mattn/go-sqlite3 v1.14.22 github.com/spf13/cast v1.7.0 github.com/stretchr/testify v1.10.0 github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d diff --git a/store/v2/go.sum b/store/v2/go.sum index 4a42918d7265..dca51bd89319 100644 --- a/store/v2/go.sum +++ b/store/v2/go.sum @@ -155,8 +155,6 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/store/v2/migration/manager_test.go b/store/v2/migration/manager_test.go index 62fedd4a2cb7..b1e9abaa15ab 100644 --- a/store/v2/migration/manager_test.go +++ b/store/v2/migration/manager_test.go @@ -33,7 +33,7 @@ func setupMigrationManager(t *testing.T, noCommitStore bool) (*Manager, *commitm snapshotsStore, err := snapshots.NewStore(t.TempDir()) require.NoError(t, err) - snapshotsManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), commitStore, nil, nil, coretesting.NewNopLogger()) + snapshotsManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), commitStore, nil, coretesting.NewNopLogger()) db1 := dbm.NewMemDB() multiTrees1 := make(map[string]commitment.Tree) diff --git a/store/v2/root/migrate_test.go b/store/v2/root/migrate_test.go index 437746392968..3b431bdb24f6 100644 --- a/store/v2/root/migrate_test.go +++ b/store/v2/root/migrate_test.go @@ -68,7 +68,7 @@ func (s *MigrateStoreTestSuite) SetupTest() { snapshotsStore, err := snapshots.NewStore(s.T().TempDir()) s.Require().NoError(err) - snapshotManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), orgSC, nil, nil, testLog) + snapshotManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), orgSC, nil, testLog) migrationManager := migration.NewManager(dbm.NewMemDB(), snapshotManager, sc, testLog) pm := pruning.NewManager(sc, nil) @@ -108,7 +108,7 @@ func (s *MigrateStoreTestSuite) TestMigrateState() { s.Require().NoError(err) // check if the migration is completed - ver, err := s.rootStore.GetStateStorage().GetLatestVersion() + ver, err := s.rootStore.GetLatestVersion() s.Require().NoError(err) if ver == latestVersion { break diff --git a/store/v2/root/store.go b/store/v2/root/store.go index 80c5f6bab1b6..438d2b9e725f 100644 --- a/store/v2/root/store.go +++ b/store/v2/root/store.go @@ -180,29 +180,9 @@ func (s *Store) Query(storeKey []byte, version uint64, key []byte, prove bool) ( defer s.telemetry.MeasureSince(now, "root_store", "query") } - var val []byte - var err error - if s.isMigrating { // if we're migrating, we need to query the SC backend - val, err = s.stateCommitment.Get(storeKey, version, key) - if err != nil { - return store.QueryResult{}, fmt.Errorf("failed to query SC store: %w", err) - } - } else { - val, err = s.stateStorage.Get(storeKey, version, key) - if err != nil { - return store.QueryResult{}, fmt.Errorf("failed to query SS store: %w", err) - } - if val == nil { - // fallback to querying SC backend if not found in SS backend - // - // Note, this should only used during migration, i.e. while SS and IAVL v2 - // are being asynchronously synced. - bz, scErr := s.stateCommitment.Get(storeKey, version, key) - if scErr != nil { - return store.QueryResult{}, fmt.Errorf("failed to query SC store: %w", scErr) - } - val = bz - } + val, err := s.stateCommitment.Get(storeKey, version, key) + if err != nil { + return store.QueryResult{}, fmt.Errorf("failed to query SC store: %w", err) } result := store.QueryResult{ diff --git a/store/v2/snapshots/helpers_test.go b/store/v2/snapshots/helpers_test.go index 657fd5c6f1ba..5e272e36523b 100644 --- a/store/v2/snapshots/helpers_test.go +++ b/store/v2/snapshots/helpers_test.go @@ -169,22 +169,6 @@ func (m *mockCommitSnapshotter) SupportedFormats() []uint32 { return []uint32{snapshotstypes.CurrentFormat} } -type mockStorageSnapshotter struct { - items map[string][]byte -} - -func (m *mockStorageSnapshotter) Restore(version uint64, chStorage <-chan *corestore.StateChanges) error { - // mock consuming chStorage to check if the loop closed properly - // - // ref: https://github.com/cosmos/cosmos-sdk/pull/21106 - for change := range chStorage { - for _, kv := range change.StateChanges { - m.items[string(kv.Key)] = kv.Value - } - } - return nil -} - type mockErrorCommitSnapshotter struct{} var _ snapshots.CommitSnapshotter = (*mockErrorCommitSnapshotter)(nil) @@ -214,7 +198,7 @@ func setupBusyManager(t *testing.T) *snapshots.Manager { store, err := snapshots.NewStore(t.TempDir()) require.NoError(t, err) hung := newHungCommitSnapshotter() - mgr := snapshots.NewManager(store, opts, hung, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + mgr := snapshots.NewManager(store, opts, hung, nil, coretesting.NewNopLogger()) // Channel to ensure the test doesn't finish until the goroutine is done. // Without this, there are intermittent test failures about diff --git a/store/v2/snapshots/manager.go b/store/v2/snapshots/manager.go index 85d2cf26be25..afee21e398cb 100644 --- a/store/v2/snapshots/manager.go +++ b/store/v2/snapshots/manager.go @@ -38,8 +38,6 @@ type Manager struct { opts SnapshotOptions // commitSnapshotter is the snapshotter for the commitment state. commitSnapshotter CommitSnapshotter - // storageSnapshotter is the snapshotter for the storage state. - storageSnapshotter StorageSnapshotter logger corelog.Logger @@ -76,17 +74,16 @@ const ( var ErrOptsZeroSnapshotInterval = errors.New("snapshot-interval must not be 0") // NewManager creates a new manager. -func NewManager(store *Store, opts SnapshotOptions, commitSnapshotter CommitSnapshotter, storageSnapshotter StorageSnapshotter, extensions map[string]ExtensionSnapshotter, logger corelog.Logger) *Manager { +func NewManager(store *Store, opts SnapshotOptions, commitSnapshotter CommitSnapshotter, extensions map[string]ExtensionSnapshotter, logger corelog.Logger) *Manager { if extensions == nil { extensions = map[string]ExtensionSnapshotter{} } return &Manager{ - store: store, - opts: opts, - commitSnapshotter: commitSnapshotter, - storageSnapshotter: storageSnapshotter, - extensions: extensions, - logger: logger, + store: store, + opts: opts, + commitSnapshotter: commitSnapshotter, + extensions: extensions, + logger: logger, } } @@ -401,15 +398,6 @@ func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io. // chStorage is the channel to pass the KV pairs to the storage snapshotter. chStorage := make(chan *corestore.StateChanges, defaultStorageChannelBufferSize) - storageErrs := make(chan error, 1) - go func() { - defer close(storageErrs) - err := m.storageSnapshotter.Restore(snapshot.Height, chStorage) - if err != nil { - storageErrs <- err - } - }() - nextItem, err = m.commitSnapshotter.Restore(snapshot.Height, snapshot.Format, streamReader, chStorage) if err != nil { return errorsmod.Wrap(err, "multistore restore") @@ -445,11 +433,6 @@ func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io. } } - // wait for storage snapshotter to complete - if err := <-storageErrs; err != nil { - return errorsmod.Wrap(err, "storage snapshotter") - } - return nil } diff --git a/store/v2/snapshots/manager_test.go b/store/v2/snapshots/manager_test.go index 2ecec5660066..e374b4c75cd0 100644 --- a/store/v2/snapshots/manager_test.go +++ b/store/v2/snapshots/manager_test.go @@ -2,7 +2,6 @@ package snapshots_test import ( "errors" - "fmt" "testing" "time" @@ -19,8 +18,7 @@ var opts = snapshots.NewSnapshotOptions(1500, 2) func TestManager_List(t *testing.T) { store := setupStore(t) commitSnapshotter := &mockCommitSnapshotter{} - storageSnapshotter := &mockStorageSnapshotter{} - manager := snapshots.NewManager(store, opts, commitSnapshotter, storageSnapshotter, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) mgrList, err := manager.List() require.NoError(t, err) @@ -41,7 +39,7 @@ func TestManager_List(t *testing.T) { func TestManager_LoadChunk(t *testing.T) { store := setupStore(t) - manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, nil, coretesting.NewNopLogger()) // Existing chunk should return body chunk, err := manager.LoadChunk(2, 1, 1) @@ -73,7 +71,7 @@ func TestManager_Take(t *testing.T) { extSnapshotter := newExtSnapshotter(10) expectChunks := snapshotItems(items, extSnapshotter) - manager := snapshots.NewManager(store, opts, commitSnapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) err := manager.RegisterExtensions(extSnapshotter) require.NoError(t, err) @@ -112,7 +110,7 @@ func TestManager_Take(t *testing.T) { func TestManager_Prune(t *testing.T) { store := setupStore(t) - manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, nil, coretesting.NewNopLogger()) pruned, err := manager.Prune(2) require.NoError(t, err) @@ -131,9 +129,8 @@ func TestManager_Prune(t *testing.T) { func TestManager_Restore(t *testing.T) { store := setupStore(t) target := &mockCommitSnapshotter{} - storageSnapshotter := &mockStorageSnapshotter{items: map[string][]byte{}} extSnapshotter := newExtSnapshotter(0) - manager := snapshots.NewManager(store, opts, target, storageSnapshotter, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, target, nil, coretesting.NewNopLogger()) err := manager.RegisterExtensions(extSnapshotter) require.NoError(t, err) @@ -206,14 +203,6 @@ func TestManager_Restore(t *testing.T) { assert.Equal(t, expectItems, target.items) assert.Equal(t, 10, len(extSnapshotter.state)) - // make sure storageSnapshotter items are properly stored - for i, item := range target.items { - key := fmt.Sprintf("key-%d", i) - chunk := storageSnapshotter.items[key] - require.NotNil(t, chunk) - require.Equal(t, item, chunk) - } - // The snapshot is saved in local snapshot store snapshots, err := store.List() require.NoError(t, err) @@ -260,7 +249,7 @@ func TestManager_TakeError(t *testing.T) { snapshotter := &mockErrorCommitSnapshotter{} store, err := snapshots.NewStore(t.TempDir()) require.NoError(t, err) - manager := snapshots.NewManager(store, opts, snapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, snapshotter, nil, coretesting.NewNopLogger()) _, err = manager.Create(1) require.Error(t, err) @@ -276,12 +265,11 @@ func TestSnapshot_Take_Restore(t *testing.T) { commitSnapshotter := &mockCommitSnapshotter{ items: items, } - storageSnapshotter := &mockStorageSnapshotter{items: map[string][]byte{}} extSnapshotter := newExtSnapshotter(10) expectChunks := snapshotItems(items, extSnapshotter) - manager := snapshots.NewManager(store, opts, commitSnapshotter, storageSnapshotter, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) err := manager.RegisterExtensions(extSnapshotter) require.NoError(t, err) @@ -367,7 +355,7 @@ func TestSnapshot_Take_Prune(t *testing.T) { extSnapshotter := newExtSnapshotter(10) expectChunks := snapshotItems(items, extSnapshotter) - manager := snapshots.NewManager(store, opts, commitSnapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) err := manager.RegisterExtensions(extSnapshotter) require.NoError(t, err) @@ -446,7 +434,7 @@ func TestSnapshot_Pruning_Take_Snapshot_Parallel(t *testing.T) { extSnapshotter := newExtSnapshotter(10) expectChunks := snapshotItems(items, extSnapshotter) - manager := snapshots.NewManager(store, opts, commitSnapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) err := manager.RegisterExtensions(extSnapshotter) require.NoError(t, err) @@ -518,7 +506,7 @@ func TestSnapshot_SnapshotIfApplicable(t *testing.T) { snapshotOpts := snapshots.NewSnapshotOptions(1, 1) - manager := snapshots.NewManager(store, snapshotOpts, commitSnapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, snapshotOpts, commitSnapshotter, nil, coretesting.NewNopLogger()) err := manager.RegisterExtensions(extSnapshotter) require.NoError(t, err) diff --git a/tests/go.mod b/tests/go.mod index fa985d660cdf..55e1e5db5601 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -177,7 +177,6 @@ require ( github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-sqlite3 v1.14.22 // indirect github.com/minio/highwayhash v1.0.3 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect diff --git a/tests/go.sum b/tests/go.sum index b09c17f396f5..c7b1b68ca0c4 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -666,8 +666,6 @@ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q= github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= From bbc9fcd16bddef2a38ee28cd7691208b5eed18e8 Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Thu, 28 Nov 2024 11:30:29 +0100 Subject: [PATCH 04/21] test cleanups --- store/v2/migration/README.md | 2 +- store/v2/migration/manager.go | 70 ++----- store/v2/migration/manager_test.go | 243 +++++++++++------------ store/v2/root/migrate_test.go | 302 ++++++++++++++--------------- store/v2/root/store.go | 10 - store/v2/root/store_mock_test.go | 21 +- store/v2/snapshots/manager_test.go | 1 + 7 files changed, 285 insertions(+), 364 deletions(-) diff --git a/store/v2/migration/README.md b/store/v2/migration/README.md index 9db8c9874a8c..88b395f63f75 100644 --- a/store/v2/migration/README.md +++ b/store/v2/migration/README.md @@ -108,4 +108,4 @@ This limitation should be clearly understood before starting the migration proce especially if the node relies on historical data for any operations. If historical queries are required, users must fully migrate all historical data to `store/v2`. -Alternatively, keeping store/v1 accessible for historical queries could be an option. \ No newline at end of file +Alternatively, keeping store/v1 accessible for historical queries could be an option. diff --git a/store/v2/migration/manager.go b/store/v2/migration/manager.go index ec6cd686905d..9d569371deff 100644 --- a/store/v2/migration/manager.go +++ b/store/v2/migration/manager.go @@ -4,8 +4,7 @@ import ( "encoding/binary" "errors" "fmt" - "io" - "sync" + "sync/atomic" "time" "golang.org/x/sync/errgroup" @@ -15,7 +14,6 @@ import ( "cosmossdk.io/store/v2/commitment" "cosmossdk.io/store/v2/internal/encoding" "cosmossdk.io/store/v2/snapshots" - snapshotstypes "cosmossdk.io/store/v2/snapshots/types" ) const ( @@ -40,9 +38,9 @@ type Manager struct { stateCommitment *commitment.CommitStore - db corestore.KVStoreWithBatch - mtx sync.Mutex // mutex for migratedVersion - migratedVersion uint64 + db corestore.KVStoreWithBatch + + migratedVersion atomic.Uint64 chChangeset <-chan *VersionedChangeset chDone <-chan struct{} @@ -90,10 +88,10 @@ func (m *Manager) GetStateCommitment() *commitment.CommitStore { // Migrate migrates the whole state at the given height to the new store/v2. func (m *Manager) Migrate(height uint64) error { + fmt.Println(1, "migration start") // create the migration stream and snapshot, // which acts as protoio.Reader and snapshots.WriteCloser. ms := NewMigrationStream(defaultChannelBufferSize) - if err := m.snapshotsManager.CreateMigration(height, ms); err != nil { return err } @@ -104,49 +102,9 @@ func (m *Manager) Migrate(height uint64) error { eg := new(errgroup.Group) eg.Go(func() error { defer close(chStorage) - if m.stateCommitment != nil { - if _, err := m.stateCommitment.Restore(height, 0, ms, chStorage); err != nil { - return err - } - } else { // there is no commitment migration, just consume the stream to restore the state storage - var storeKey []byte - loop: - for { - snapshotItem := snapshotstypes.SnapshotItem{} - err := ms.ReadMsg(&snapshotItem) - if errors.Is(err, io.EOF) { - break - } - if err != nil { - return fmt.Errorf("failed to read snapshot item: %w", err) - } - switch item := snapshotItem.Item.(type) { - case *snapshotstypes.SnapshotItem_Store: - storeKey = []byte(item.Store.Name) - case *snapshotstypes.SnapshotItem_IAVL: - if item.IAVL.Height == 0 { // only restore the leaf nodes - key := item.IAVL.Key - if key == nil { - key = []byte{} - } - value := item.IAVL.Value - if value == nil { - value = []byte{} - } - chStorage <- &corestore.StateChanges{ - Actor: storeKey, - StateChanges: []corestore.KVPair{ - { - Key: key, - Value: value, - }, - }, - } - } - default: - break loop - } - } + fmt.Println(2, "restore") + if _, err := m.stateCommitment.Restore(height, 0, ms, chStorage); err != nil { + return err } return nil }) @@ -155,9 +113,7 @@ func (m *Manager) Migrate(height uint64) error { return err } - m.mtx.Lock() - m.migratedVersion = height - m.mtx.Unlock() + m.migratedVersion.Add(height) return nil } @@ -201,9 +157,7 @@ func (m *Manager) writeChangeset() error { // GetMigratedVersion returns the migrated version. // It is used to check the migrated version in the RootStore. func (m *Manager) GetMigratedVersion() uint64 { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.migratedVersion + return m.migratedVersion.Load() } // Sync catches up the Changesets which are committed while the migration is in progress. @@ -246,9 +200,7 @@ func (m *Manager) Sync() error { } } - m.mtx.Lock() - m.migratedVersion = version - m.mtx.Unlock() + m.migratedVersion.Add(version) version += 1 } diff --git a/store/v2/migration/manager_test.go b/store/v2/migration/manager_test.go index b1e9abaa15ab..1b562a9a2aa5 100644 --- a/store/v2/migration/manager_test.go +++ b/store/v2/migration/manager_test.go @@ -17,7 +17,7 @@ import ( var storeKeys = []string{"store1", "store2"} -func setupMigrationManager(t *testing.T, noCommitStore bool) (*Manager, *commitment.CommitStore) { +func setupMigrationManager(t *testing.T) (*Manager, *commitment.CommitStore) { t.Helper() db := dbm.NewMemDB() @@ -26,7 +26,6 @@ func setupMigrationManager(t *testing.T, noCommitStore bool) (*Manager, *commitm prefixDB := dbm.NewPrefixDB(db, []byte(storeKey)) multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, coretesting.NewNopLogger(), iavl.DefaultConfig()) } - commitStore, err := commitment.NewCommitStore(multiTrees, nil, db, coretesting.NewNopLogger()) require.NoError(t, err) @@ -44,150 +43,138 @@ func setupMigrationManager(t *testing.T, noCommitStore bool) (*Manager, *commitm newCommitStore, err := commitment.NewCommitStore(multiTrees1, nil, db1, coretesting.NewNopLogger()) // for store/v2 require.NoError(t, err) - if noCommitStore { - newCommitStore = nil - } return NewManager(db, snapshotsManager, newCommitStore, coretesting.NewNopLogger()), commitStore } func TestMigrateState(t *testing.T) { - for _, noCommitStore := range []bool{false, true} { - t.Run(fmt.Sprintf("Migrate noCommitStore=%v", noCommitStore), func(t *testing.T) { - m, orgCommitStore := setupMigrationManager(t, noCommitStore) - - // apply changeset - toVersion := uint64(100) - keyCount := 10 - for version := uint64(1); version <= toVersion; version++ { - cs := corestore.NewChangeset(version) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - require.NoError(t, orgCommitStore.WriteChangeset(cs)) - _, err := orgCommitStore.Commit(version) - require.NoError(t, err) + fmt.Println("start") + m, orgCommitStore := setupMigrationManager(t) + // apply changeset + toVersion := uint64(100) + keyCount := 10 + for version := uint64(1); version <= toVersion; version++ { + cs := corestore.NewChangeset(version) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) } + } + require.NoError(t, orgCommitStore.WriteChangeset(cs)) + _, err := orgCommitStore.Commit(version) + require.NoError(t, err) + } - err := m.Migrate(toVersion - 1) - require.NoError(t, err) - - // expecting error for conflicting process, since Migrate trigger snapshotter create migration, - // which start a snapshot process already. - _, err = m.snapshotsManager.Create(toVersion - 1) - require.Error(t, err) - - if m.stateCommitment != nil { - // check the migrated state - for version := uint64(1); version < toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) - require.NoError(t, err) - require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) - } - } - } - // check the latest state - val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1")) - require.NoError(t, err) - require.Nil(t, val) - val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0")) + err := m.Migrate(toVersion - 1) + require.NoError(t, err) + + // expecting error for conflicting process, since Migrate trigger snapshotter create migration, + // which start a snapshot process already. + _, err = m.snapshotsManager.Create(toVersion - 1) + fmt.Println(1) + require.Error(t, err) + + // check the migrated state + for version := uint64(1); version < toVersion; version++ { + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) require.NoError(t, err) - require.Nil(t, val) + require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) } - - }) + } + + // check the latest state + val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1")) + require.NoError(t, err) + require.Nil(t, val) + val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0")) + require.NoError(t, err) + require.Nil(t, val) } } func TestStartMigrateState(t *testing.T) { - for _, noCommitStore := range []bool{false, true} { - t.Run(fmt.Sprintf("Migrate noCommitStore=%v", noCommitStore), func(t *testing.T) { - m, orgCommitStore := setupMigrationManager(t, noCommitStore) - - chDone := make(chan struct{}) - chChangeset := make(chan *VersionedChangeset, 1) - - // apply changeset - toVersion := uint64(10) - keyCount := 5 - changesets := []corestore.Changeset{} - - for version := uint64(1); version <= toVersion; version++ { - cs := corestore.NewChangeset(version) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - changesets = append(changesets, *cs) - require.NoError(t, orgCommitStore.WriteChangeset(cs)) - _, err := orgCommitStore.Commit(version) - require.NoError(t, err) + m, orgCommitStore := setupMigrationManager(t) + + chDone := make(chan struct{}) + chChangeset := make(chan *VersionedChangeset, 1) + + // apply changeset + toVersion := uint64(10) + keyCount := 5 + changesets := []corestore.Changeset{} + + for version := uint64(1); version <= toVersion; version++ { + cs := corestore.NewChangeset(version) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) } + } + changesets = append(changesets, *cs) + require.NoError(t, orgCommitStore.WriteChangeset(cs)) + _, err := orgCommitStore.Commit(version) + require.NoError(t, err) + } - // feed changesets to channel - go func() { - for version := uint64(1); version <= toVersion; version++ { - chChangeset <- &VersionedChangeset{ - Version: version, - Changeset: &changesets[version-1], - } - } - }() - - // check if migrate process complete - go func() { - for { - migrateVersion := m.GetMigratedVersion() - if migrateVersion == toVersion-1 { - break - } - } + // feed changesets to channel + go func() { + for version := uint64(1); version <= toVersion; version++ { + chChangeset <- &VersionedChangeset{ + Version: version, + Changeset: &changesets[version-1], + } + } + }() + + // check if migrate process complete + go func() { + for { + migrateVersion := m.GetMigratedVersion() + if migrateVersion == toVersion-1 { + break + } + } + + chDone <- struct{}{} + }() - chDone <- struct{}{} - }() - - err := m.Start(toVersion-1, chChangeset, chDone) - require.NoError(t, err) - - // expecting error for conflicting process, since Migrate trigger snapshotter create migration, - // which start a snapshot process already. - _, err = m.snapshotsManager.Create(toVersion - 1) - require.Error(t, err) - - if m.stateCommitment != nil { - // check the migrated state - for version := uint64(1); version < toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) - require.NoError(t, err) - require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) - } - } + err := m.Start(toVersion-1, chChangeset, chDone) + require.NoError(t, err) + + // expecting error for conflicting process, since Migrate trigger snapshotter create migration, + // which start a snapshot process already. + _, err = m.snapshotsManager.Create(toVersion - 1) + require.Error(t, err) + + if m.stateCommitment != nil { + // check the migrated state + for version := uint64(1); version < toVersion; version++ { + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) + require.NoError(t, err) + require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) } - // check the latest state - val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1")) - require.NoError(t, err) - require.Nil(t, val) - val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0")) - require.NoError(t, err) - require.Nil(t, val) } + } + // check the latest state + val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1")) + require.NoError(t, err) + require.Nil(t, val) + val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0")) + require.NoError(t, err) + require.Nil(t, val) + } - // check if migration db write change set to storage - for version := uint64(1); version < toVersion; version++ { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, version) - csKey := []byte(fmt.Sprintf(migrateChangesetKeyFmt, buf)) - csVal, err := m.db.Get(csKey) - require.NoError(t, err) - require.NotEmpty(t, csVal) - } - }) + // check if migration db write change set to storage + for version := uint64(1); version < toVersion; version++ { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, version) + csKey := []byte(fmt.Sprintf(migrateChangesetKeyFmt, buf)) + csVal, err := m.db.Get(csKey) + require.NoError(t, err) + require.NotEmpty(t, csVal) } } diff --git a/store/v2/root/migrate_test.go b/store/v2/root/migrate_test.go index 3b431bdb24f6..85dd8738e884 100644 --- a/store/v2/root/migrate_test.go +++ b/store/v2/root/migrate_test.go @@ -1,156 +1,156 @@ package root -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/suite" - - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/log" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/commitment" - "cosmossdk.io/store/v2/commitment/iavl" - dbm "cosmossdk.io/store/v2/db" - "cosmossdk.io/store/v2/migration" - "cosmossdk.io/store/v2/pruning" - "cosmossdk.io/store/v2/snapshots" -) +// import ( +// "fmt" +// "testing" +// "time" + +// "github.com/stretchr/testify/suite" + +// corestore "cosmossdk.io/core/store" +// coretesting "cosmossdk.io/core/testing" +// "cosmossdk.io/log" +// "cosmossdk.io/store/v2" +// "cosmossdk.io/store/v2/commitment" +// "cosmossdk.io/store/v2/commitment/iavl" +// dbm "cosmossdk.io/store/v2/db" +// "cosmossdk.io/store/v2/migration" +// "cosmossdk.io/store/v2/pruning" +// "cosmossdk.io/store/v2/snapshots" +// ) var storeKeys = []string{"store1", "store2", "store3"} -type MigrateStoreTestSuite struct { - suite.Suite - - rootStore store.RootStore -} - -func TestMigrateStoreTestSuite(t *testing.T) { - suite.Run(t, &MigrateStoreTestSuite{}) -} - -func (s *MigrateStoreTestSuite) SetupTest() { - testLog := log.NewTestLogger(s.T()) - nopLog := coretesting.NewNopLogger() - - mdb := dbm.NewMemDB() - multiTrees := make(map[string]commitment.Tree) - for _, storeKey := range storeKeys { - prefixDB := dbm.NewPrefixDB(mdb, []byte(storeKey)) - multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, nopLog, iavl.DefaultConfig()) - } - orgSC, err := commitment.NewCommitStore(multiTrees, nil, mdb, testLog) - s.Require().NoError(err) - - // apply changeset against the original store - toVersion := uint64(200) - keyCount := 10 - for version := uint64(1); version <= toVersion; version++ { - cs := corestore.NewChangeset(version) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - s.Require().NoError(orgSC.WriteChangeset(cs)) - _, err = orgSC.Commit(version) - s.Require().NoError(err) - } - - multiTrees1 := make(map[string]commitment.Tree) - for _, storeKey := range storeKeys { - multiTrees1[storeKey] = iavl.NewIavlTree(dbm.NewMemDB(), nopLog, iavl.DefaultConfig()) - } - sc, err := commitment.NewCommitStore(multiTrees1, nil, dbm.NewMemDB(), testLog) - s.Require().NoError(err) - - snapshotsStore, err := snapshots.NewStore(s.T().TempDir()) - s.Require().NoError(err) - snapshotManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), orgSC, nil, testLog) - migrationManager := migration.NewManager(dbm.NewMemDB(), snapshotManager, sc, testLog) - pm := pruning.NewManager(sc, nil) - - // assume no storage store, simulate the migration process - s.rootStore, err = New(dbm.NewMemDB(), testLog, orgSC, pm, migrationManager, nil) - s.Require().NoError(err) -} - -func (s *MigrateStoreTestSuite) TestMigrateState() { - err := s.rootStore.LoadLatestVersion() - s.Require().NoError(err) - originalLatestVersion, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - - // check if the Query fallback to the original SC - for version := uint64(1); version <= originalLatestVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < 10; i++ { - res, err := s.rootStore.Query([]byte(storeKey), version, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), res.Value) - } - } - } - - // continue to apply changeset against the original store - latestVersion := originalLatestVersion + 1 - keyCount := 10 - for ; latestVersion < 2*originalLatestVersion; latestVersion++ { - cs := corestore.NewChangeset(latestVersion) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", latestVersion, i)), []byte(fmt.Sprintf("value-%d-%d", latestVersion, i)), false) - } - } - _, err = s.rootStore.Commit(cs) - s.Require().NoError(err) - - // check if the migration is completed - ver, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - if ver == latestVersion { - break - } - - // add some delay to simulate the consensus process - time.Sleep(100 * time.Millisecond) - } - - // check if the migration is successful - version, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(latestVersion, version) - - // query against the migrated store - for version := uint64(1); version <= latestVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - targetVersion := version - if version < originalLatestVersion { - targetVersion = originalLatestVersion - } - res, err := s.rootStore.Query([]byte(storeKey), targetVersion, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), res.Value) - } - } - } - - // apply changeset against the migrated store - for version := latestVersion + 1; version <= latestVersion+10; version++ { - cs := corestore.NewChangeset(version) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - _, err = s.rootStore.Commit(cs) - s.Require().NoError(err) - } - - version, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(latestVersion+10, version) -} +// type MigrateStoreTestSuite struct { +// suite.Suite + +// rootStore store.RootStore +// } + +// func TestMigrateStoreTestSuite(t *testing.T) { +// suite.Run(t, &MigrateStoreTestSuite{}) +// } + +// func (s *MigrateStoreTestSuite) SetupTest() { +// testLog := log.NewTestLogger(s.T()) +// nopLog := coretesting.NewNopLogger() + +// mdb := dbm.NewMemDB() +// multiTrees := make(map[string]commitment.Tree) +// for _, storeKey := range storeKeys { +// prefixDB := dbm.NewPrefixDB(mdb, []byte(storeKey)) +// multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, nopLog, iavl.DefaultConfig()) +// } +// orgSC, err := commitment.NewCommitStore(multiTrees, nil, mdb, testLog) +// s.Require().NoError(err) + +// // apply changeset against the original store +// toVersion := uint64(200) +// keyCount := 10 +// for version := uint64(1); version <= toVersion; version++ { +// cs := corestore.NewChangeset(version) +// for _, storeKey := range storeKeys { +// for i := 0; i < keyCount; i++ { +// cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) +// } +// } +// s.Require().NoError(orgSC.WriteChangeset(cs)) +// _, err = orgSC.Commit(version) +// s.Require().NoError(err) +// } + +// multiTrees1 := make(map[string]commitment.Tree) +// for _, storeKey := range storeKeys { +// multiTrees1[storeKey] = iavl.NewIavlTree(dbm.NewMemDB(), nopLog, iavl.DefaultConfig()) +// } +// sc, err := commitment.NewCommitStore(multiTrees1, nil, dbm.NewMemDB(), testLog) +// s.Require().NoError(err) + +// snapshotsStore, err := snapshots.NewStore(s.T().TempDir()) +// s.Require().NoError(err) +// snapshotManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), orgSC, nil, testLog) +// migrationManager := migration.NewManager(dbm.NewMemDB(), snapshotManager, sc, testLog) +// pm := pruning.NewManager(sc, nil) + +// // assume no storage store, simulate the migration process +// s.rootStore, err = New(dbm.NewMemDB(), testLog, orgSC, pm, migrationManager, nil) +// s.Require().NoError(err) +// } + +// func (s *MigrateStoreTestSuite) TestMigrateState() { +// err := s.rootStore.LoadLatestVersion() +// s.Require().NoError(err) +// originalLatestVersion, err := s.rootStore.GetLatestVersion() +// s.Require().NoError(err) + +// // check if the Query fallback to the original SC +// for version := uint64(1); version <= originalLatestVersion; version++ { +// for _, storeKey := range storeKeys { +// for i := 0; i < 10; i++ { +// res, err := s.rootStore.Query([]byte(storeKey), version, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) +// s.Require().NoError(err) +// s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), res.Value) +// } +// } +// } + +// // continue to apply changeset against the original store +// latestVersion := originalLatestVersion + 1 +// keyCount := 10 +// for ; latestVersion < 2*originalLatestVersion; latestVersion++ { +// cs := corestore.NewChangeset(latestVersion) +// for _, storeKey := range storeKeys { +// for i := 0; i < keyCount; i++ { +// cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", latestVersion, i)), []byte(fmt.Sprintf("value-%d-%d", latestVersion, i)), false) +// } +// } +// _, err = s.rootStore.Commit(cs) +// s.Require().NoError(err) + +// // check if the migration is completed +// ver, err := s.rootStore.GetLatestVersion() +// s.Require().NoError(err) +// if ver == latestVersion { +// break +// } + +// // add some delay to simulate the consensus process +// time.Sleep(100 * time.Millisecond) +// } + +// // check if the migration is successful +// version, err := s.rootStore.GetLatestVersion() +// s.Require().NoError(err) +// s.Require().Equal(latestVersion, version) + +// // query against the migrated store +// for version := uint64(1); version <= latestVersion; version++ { +// for _, storeKey := range storeKeys { +// for i := 0; i < keyCount; i++ { +// targetVersion := version +// if version < originalLatestVersion { +// targetVersion = originalLatestVersion +// } +// res, err := s.rootStore.Query([]byte(storeKey), targetVersion, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) +// s.Require().NoError(err) +// s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), res.Value) +// } +// } +// } + +// // apply changeset against the migrated store +// for version := latestVersion + 1; version <= latestVersion+10; version++ { +// cs := corestore.NewChangeset(version) +// for _, storeKey := range storeKeys { +// for i := 0; i < keyCount; i++ { +// cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) +// } +// } +// _, err = s.rootStore.Commit(cs) +// s.Require().NoError(err) +// } + +// version, err = s.rootStore.GetLatestVersion() +// s.Require().NoError(err) +// s.Require().Equal(latestVersion+10, version) +// } diff --git a/store/v2/root/store.go b/store/v2/root/store.go index 438d2b9e725f..e234cb8aedb7 100644 --- a/store/v2/root/store.go +++ b/store/v2/root/store.go @@ -244,16 +244,6 @@ func (s *Store) LoadVersionAndUpgrade(version uint64, upgrades *corestore.StoreU return err } - //TODO why are we not pruning sc keys? - // // if the state storage implements the UpgradableDatabase interface, prune the - // // deleted store keys - // upgradableDatabase, ok := s.stateStorage.(store.UpgradableDatabase) - // if ok { - // if err := upgradableDatabase.PruneStoreKeys(upgrades.Deleted, version); err != nil { - // return fmt.Errorf("failed to prune store keys %v: %w", upgrades.Deleted, err) - // } - // } - return nil } diff --git a/store/v2/root/store_mock_test.go b/store/v2/root/store_mock_test.go index 9afa2d34969d..57b4940571f8 100644 --- a/store/v2/root/store_mock_test.go +++ b/store/v2/root/store_mock_test.go @@ -15,7 +15,7 @@ import ( "cosmossdk.io/store/v2/pruning" ) -func newTestRootStore(ss store.VersionedWriter, sc store.Committer) *Store { +func newTestRootStore(sc store.Committer) *Store { noopLog := coretesting.NewNopLogger() pm := pruning.NewManager(sc.(store.Pruner), nil) return &Store{ @@ -29,9 +29,8 @@ func newTestRootStore(ss store.VersionedWriter, sc store.Committer) *Store { func TestGetLatestState(t *testing.T) { ctrl := gomock.NewController(t) - ss := mock.NewMockStateStorage(ctrl) sc := mock.NewMockStateCommitter(ctrl) - rs := newTestRootStore(ss, sc) + rs := newTestRootStore(sc) // Get the latest version sc.EXPECT().GetLatestVersion().Return(uint64(0), errors.New("error")) @@ -45,30 +44,24 @@ func TestGetLatestState(t *testing.T) { func TestQuery(t *testing.T) { ctrl := gomock.NewController(t) - ss := mock.NewMockStateStorage(ctrl) sc := mock.NewMockStateCommitter(ctrl) - rs := newTestRootStore(ss, sc) + rs := newTestRootStore(sc) // Query without Proof - ss.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error")) _, err := rs.Query(nil, 0, nil, false) require.Error(t, err) - ss.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error")) _, err = rs.Query(nil, 0, nil, false) require.Error(t, err) - ss.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) v, err := rs.Query(nil, 0, nil, false) require.NoError(t, err) require.Equal(t, []byte("value"), v.Value) - ss.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) v, err = rs.Query(nil, 0, nil, false) require.NoError(t, err) require.Equal(t, []byte("value"), v.Value) // Query with Proof - ss.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) sc.EXPECT().GetProof(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error")) v, err = rs.Query(nil, 0, nil, true) require.Error(t, err) @@ -82,9 +75,8 @@ func TestQuery(t *testing.T) { func TestLoadVersion(t *testing.T) { ctrl := gomock.NewController(t) - ss := mock.NewMockStateStorage(ctrl) sc := mock.NewMockStateCommitter(ctrl) - rs := newTestRootStore(ss, sc) + rs := newTestRootStore(sc) // LoadLatestVersion sc.EXPECT().GetLatestVersion().Return(uint64(0), errors.New("error")) @@ -108,9 +100,8 @@ func TestLoadVersion(t *testing.T) { require.Error(t, err) sc.EXPECT().LoadVersionAndUpgrade(uint64(2), v).Return(nil) sc.EXPECT().GetCommitInfo(uint64(2)).Return(nil, nil) - ss.EXPECT().PruneStoreKeys(gomock.Any(), uint64(2)).Return(errors.New("error")) - err = rs.LoadVersionAndUpgrade(uint64(2), v) - require.Error(t, err) + // err = rs.LoadVersionAndUpgrade(uint64(2), v) //TODO why is this not working? + // require.Error(t, err) // LoadVersionUpgrade with Migration rs.isMigrating = true diff --git a/store/v2/snapshots/manager_test.go b/store/v2/snapshots/manager_test.go index e374b4c75cd0..2ed3ec9c664f 100644 --- a/store/v2/snapshots/manager_test.go +++ b/store/v2/snapshots/manager_test.go @@ -421,6 +421,7 @@ func TestSnapshot_Take_Prune(t *testing.T) { } func TestSnapshot_Pruning_Take_Snapshot_Parallel(t *testing.T) { + //TODO: race condition store := setupStore(t) items := [][]byte{ From 77b49635b07f9a83e21889318bd41919f9befd0c Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Thu, 28 Nov 2024 11:56:36 +0100 Subject: [PATCH 05/21] fix some tests --- store/v2/commitment/store.go | 12 ------------ store/v2/root/store_mock_test.go | 5 +++-- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/store/v2/commitment/store.go b/store/v2/commitment/store.go index e9f2ee8379c7..a14a4b0c1777 100644 --- a/store/v2/commitment/store.go +++ b/store/v2/commitment/store.go @@ -425,7 +425,6 @@ func (c *CommitStore) Restore( var ( importer Importer snapshotItem snapshotstypes.SnapshotItem - storeKey []byte ) loop: @@ -449,7 +448,6 @@ loop: } } - storeKey = []byte(item.Store.Name) tree := c.multiTrees[item.Store.Name] if tree == nil { return snapshotstypes.SnapshotItem{}, fmt.Errorf("store %s not found", item.Store.Name) @@ -479,16 +477,6 @@ loop: node.Value = []byte{} } - // If the node is a leaf node, it will be written to the storage. - chStorage <- &corestore.StateChanges{ - Actor: storeKey, - StateChanges: []corestore.KVPair{ - { - Key: node.Key, - Value: node.Value, - }, - }, - } } err := importer.Add(node) if err != nil { diff --git a/store/v2/root/store_mock_test.go b/store/v2/root/store_mock_test.go index 57b4940571f8..9a7e3caab1ca 100644 --- a/store/v2/root/store_mock_test.go +++ b/store/v2/root/store_mock_test.go @@ -100,11 +100,12 @@ func TestLoadVersion(t *testing.T) { require.Error(t, err) sc.EXPECT().LoadVersionAndUpgrade(uint64(2), v).Return(nil) sc.EXPECT().GetCommitInfo(uint64(2)).Return(nil, nil) - // err = rs.LoadVersionAndUpgrade(uint64(2), v) //TODO why is this not working? - // require.Error(t, err) + err = rs.LoadVersionAndUpgrade(uint64(2), v) + require.Error(t, err) // LoadVersionUpgrade with Migration rs.isMigrating = true + sc.EXPECT().LoadVersionAndUpgrade(uint64(2), v).Return(errors.New("error")) err = rs.LoadVersionAndUpgrade(uint64(2), v) require.Error(t, err) } From 553c84fdd87d2b7c2ef5b0a7a9ef8145c550d882 Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Thu, 28 Nov 2024 14:11:47 +0100 Subject: [PATCH 06/21] more fixes --- store/v2/database.go | 1 + store/v2/migration/manager.go | 2 - store/v2/root/migrate_test.go | 302 +++++++++++++++---------------- store/v2/root/store.go | 6 +- store/v2/root/store_mock_test.go | 20 +- store/v2/store.go | 1 - 6 files changed, 159 insertions(+), 173 deletions(-) diff --git a/store/v2/database.go b/store/v2/database.go index 27d0973ec18e..781a5ce09f29 100644 --- a/store/v2/database.go +++ b/store/v2/database.go @@ -41,6 +41,7 @@ type UpgradableDatabase interface { // Committer defines an API for committing state. type Committer interface { + UpgradeableStore // WriteChangeset writes the changeset to the commitment state. WriteChangeset(cs *corestore.Changeset) error diff --git a/store/v2/migration/manager.go b/store/v2/migration/manager.go index 9d569371deff..e32036aa95dd 100644 --- a/store/v2/migration/manager.go +++ b/store/v2/migration/manager.go @@ -88,7 +88,6 @@ func (m *Manager) GetStateCommitment() *commitment.CommitStore { // Migrate migrates the whole state at the given height to the new store/v2. func (m *Manager) Migrate(height uint64) error { - fmt.Println(1, "migration start") // create the migration stream and snapshot, // which acts as protoio.Reader and snapshots.WriteCloser. ms := NewMigrationStream(defaultChannelBufferSize) @@ -102,7 +101,6 @@ func (m *Manager) Migrate(height uint64) error { eg := new(errgroup.Group) eg.Go(func() error { defer close(chStorage) - fmt.Println(2, "restore") if _, err := m.stateCommitment.Restore(height, 0, ms, chStorage); err != nil { return err } diff --git a/store/v2/root/migrate_test.go b/store/v2/root/migrate_test.go index 85dd8738e884..3b431bdb24f6 100644 --- a/store/v2/root/migrate_test.go +++ b/store/v2/root/migrate_test.go @@ -1,156 +1,156 @@ package root -// import ( -// "fmt" -// "testing" -// "time" - -// "github.com/stretchr/testify/suite" - -// corestore "cosmossdk.io/core/store" -// coretesting "cosmossdk.io/core/testing" -// "cosmossdk.io/log" -// "cosmossdk.io/store/v2" -// "cosmossdk.io/store/v2/commitment" -// "cosmossdk.io/store/v2/commitment/iavl" -// dbm "cosmossdk.io/store/v2/db" -// "cosmossdk.io/store/v2/migration" -// "cosmossdk.io/store/v2/pruning" -// "cosmossdk.io/store/v2/snapshots" -// ) +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/suite" + + corestore "cosmossdk.io/core/store" + coretesting "cosmossdk.io/core/testing" + "cosmossdk.io/log" + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/commitment" + "cosmossdk.io/store/v2/commitment/iavl" + dbm "cosmossdk.io/store/v2/db" + "cosmossdk.io/store/v2/migration" + "cosmossdk.io/store/v2/pruning" + "cosmossdk.io/store/v2/snapshots" +) var storeKeys = []string{"store1", "store2", "store3"} -// type MigrateStoreTestSuite struct { -// suite.Suite - -// rootStore store.RootStore -// } - -// func TestMigrateStoreTestSuite(t *testing.T) { -// suite.Run(t, &MigrateStoreTestSuite{}) -// } - -// func (s *MigrateStoreTestSuite) SetupTest() { -// testLog := log.NewTestLogger(s.T()) -// nopLog := coretesting.NewNopLogger() - -// mdb := dbm.NewMemDB() -// multiTrees := make(map[string]commitment.Tree) -// for _, storeKey := range storeKeys { -// prefixDB := dbm.NewPrefixDB(mdb, []byte(storeKey)) -// multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, nopLog, iavl.DefaultConfig()) -// } -// orgSC, err := commitment.NewCommitStore(multiTrees, nil, mdb, testLog) -// s.Require().NoError(err) - -// // apply changeset against the original store -// toVersion := uint64(200) -// keyCount := 10 -// for version := uint64(1); version <= toVersion; version++ { -// cs := corestore.NewChangeset(version) -// for _, storeKey := range storeKeys { -// for i := 0; i < keyCount; i++ { -// cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) -// } -// } -// s.Require().NoError(orgSC.WriteChangeset(cs)) -// _, err = orgSC.Commit(version) -// s.Require().NoError(err) -// } - -// multiTrees1 := make(map[string]commitment.Tree) -// for _, storeKey := range storeKeys { -// multiTrees1[storeKey] = iavl.NewIavlTree(dbm.NewMemDB(), nopLog, iavl.DefaultConfig()) -// } -// sc, err := commitment.NewCommitStore(multiTrees1, nil, dbm.NewMemDB(), testLog) -// s.Require().NoError(err) - -// snapshotsStore, err := snapshots.NewStore(s.T().TempDir()) -// s.Require().NoError(err) -// snapshotManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), orgSC, nil, testLog) -// migrationManager := migration.NewManager(dbm.NewMemDB(), snapshotManager, sc, testLog) -// pm := pruning.NewManager(sc, nil) - -// // assume no storage store, simulate the migration process -// s.rootStore, err = New(dbm.NewMemDB(), testLog, orgSC, pm, migrationManager, nil) -// s.Require().NoError(err) -// } - -// func (s *MigrateStoreTestSuite) TestMigrateState() { -// err := s.rootStore.LoadLatestVersion() -// s.Require().NoError(err) -// originalLatestVersion, err := s.rootStore.GetLatestVersion() -// s.Require().NoError(err) - -// // check if the Query fallback to the original SC -// for version := uint64(1); version <= originalLatestVersion; version++ { -// for _, storeKey := range storeKeys { -// for i := 0; i < 10; i++ { -// res, err := s.rootStore.Query([]byte(storeKey), version, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) -// s.Require().NoError(err) -// s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), res.Value) -// } -// } -// } - -// // continue to apply changeset against the original store -// latestVersion := originalLatestVersion + 1 -// keyCount := 10 -// for ; latestVersion < 2*originalLatestVersion; latestVersion++ { -// cs := corestore.NewChangeset(latestVersion) -// for _, storeKey := range storeKeys { -// for i := 0; i < keyCount; i++ { -// cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", latestVersion, i)), []byte(fmt.Sprintf("value-%d-%d", latestVersion, i)), false) -// } -// } -// _, err = s.rootStore.Commit(cs) -// s.Require().NoError(err) - -// // check if the migration is completed -// ver, err := s.rootStore.GetLatestVersion() -// s.Require().NoError(err) -// if ver == latestVersion { -// break -// } - -// // add some delay to simulate the consensus process -// time.Sleep(100 * time.Millisecond) -// } - -// // check if the migration is successful -// version, err := s.rootStore.GetLatestVersion() -// s.Require().NoError(err) -// s.Require().Equal(latestVersion, version) - -// // query against the migrated store -// for version := uint64(1); version <= latestVersion; version++ { -// for _, storeKey := range storeKeys { -// for i := 0; i < keyCount; i++ { -// targetVersion := version -// if version < originalLatestVersion { -// targetVersion = originalLatestVersion -// } -// res, err := s.rootStore.Query([]byte(storeKey), targetVersion, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) -// s.Require().NoError(err) -// s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), res.Value) -// } -// } -// } - -// // apply changeset against the migrated store -// for version := latestVersion + 1; version <= latestVersion+10; version++ { -// cs := corestore.NewChangeset(version) -// for _, storeKey := range storeKeys { -// for i := 0; i < keyCount; i++ { -// cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) -// } -// } -// _, err = s.rootStore.Commit(cs) -// s.Require().NoError(err) -// } - -// version, err = s.rootStore.GetLatestVersion() -// s.Require().NoError(err) -// s.Require().Equal(latestVersion+10, version) -// } +type MigrateStoreTestSuite struct { + suite.Suite + + rootStore store.RootStore +} + +func TestMigrateStoreTestSuite(t *testing.T) { + suite.Run(t, &MigrateStoreTestSuite{}) +} + +func (s *MigrateStoreTestSuite) SetupTest() { + testLog := log.NewTestLogger(s.T()) + nopLog := coretesting.NewNopLogger() + + mdb := dbm.NewMemDB() + multiTrees := make(map[string]commitment.Tree) + for _, storeKey := range storeKeys { + prefixDB := dbm.NewPrefixDB(mdb, []byte(storeKey)) + multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, nopLog, iavl.DefaultConfig()) + } + orgSC, err := commitment.NewCommitStore(multiTrees, nil, mdb, testLog) + s.Require().NoError(err) + + // apply changeset against the original store + toVersion := uint64(200) + keyCount := 10 + for version := uint64(1); version <= toVersion; version++ { + cs := corestore.NewChangeset(version) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) + } + } + s.Require().NoError(orgSC.WriteChangeset(cs)) + _, err = orgSC.Commit(version) + s.Require().NoError(err) + } + + multiTrees1 := make(map[string]commitment.Tree) + for _, storeKey := range storeKeys { + multiTrees1[storeKey] = iavl.NewIavlTree(dbm.NewMemDB(), nopLog, iavl.DefaultConfig()) + } + sc, err := commitment.NewCommitStore(multiTrees1, nil, dbm.NewMemDB(), testLog) + s.Require().NoError(err) + + snapshotsStore, err := snapshots.NewStore(s.T().TempDir()) + s.Require().NoError(err) + snapshotManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), orgSC, nil, testLog) + migrationManager := migration.NewManager(dbm.NewMemDB(), snapshotManager, sc, testLog) + pm := pruning.NewManager(sc, nil) + + // assume no storage store, simulate the migration process + s.rootStore, err = New(dbm.NewMemDB(), testLog, orgSC, pm, migrationManager, nil) + s.Require().NoError(err) +} + +func (s *MigrateStoreTestSuite) TestMigrateState() { + err := s.rootStore.LoadLatestVersion() + s.Require().NoError(err) + originalLatestVersion, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + + // check if the Query fallback to the original SC + for version := uint64(1); version <= originalLatestVersion; version++ { + for _, storeKey := range storeKeys { + for i := 0; i < 10; i++ { + res, err := s.rootStore.Query([]byte(storeKey), version, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) + s.Require().NoError(err) + s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), res.Value) + } + } + } + + // continue to apply changeset against the original store + latestVersion := originalLatestVersion + 1 + keyCount := 10 + for ; latestVersion < 2*originalLatestVersion; latestVersion++ { + cs := corestore.NewChangeset(latestVersion) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", latestVersion, i)), []byte(fmt.Sprintf("value-%d-%d", latestVersion, i)), false) + } + } + _, err = s.rootStore.Commit(cs) + s.Require().NoError(err) + + // check if the migration is completed + ver, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + if ver == latestVersion { + break + } + + // add some delay to simulate the consensus process + time.Sleep(100 * time.Millisecond) + } + + // check if the migration is successful + version, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(latestVersion, version) + + // query against the migrated store + for version := uint64(1); version <= latestVersion; version++ { + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + targetVersion := version + if version < originalLatestVersion { + targetVersion = originalLatestVersion + } + res, err := s.rootStore.Query([]byte(storeKey), targetVersion, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) + s.Require().NoError(err) + s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), res.Value) + } + } + } + + // apply changeset against the migrated store + for version := latestVersion + 1; version <= latestVersion+10; version++ { + cs := corestore.NewChangeset(version) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) + } + } + _, err = s.rootStore.Commit(cs) + s.Require().NoError(err) + } + + version, err = s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(latestVersion+10, version) +} diff --git a/store/v2/root/store.go b/store/v2/root/store.go index e234cb8aedb7..b6d21e9851e3 100644 --- a/store/v2/root/store.go +++ b/store/v2/root/store.go @@ -256,11 +256,7 @@ func (s *Store) loadVersion(v uint64, upgrades *corestore.StoreUpgrades) error { } } else { // if upgrades are provided, we need to load the version and apply the upgrades - upgradeableStore, ok := s.stateCommitment.(store.UpgradeableStore) - if !ok { - return errors.New("SC store does not support upgrades") - } - if err := upgradeableStore.LoadVersionAndUpgrade(v, upgrades); err != nil { + if err := s.stateCommitment.LoadVersionAndUpgrade(v, upgrades); err != nil { return fmt.Errorf("failed to load SS version with upgrades %d: %w", v, err) } } diff --git a/store/v2/root/store_mock_test.go b/store/v2/root/store_mock_test.go index 9a7e3caab1ca..0ec0a31bdaf2 100644 --- a/store/v2/root/store_mock_test.go +++ b/store/v2/root/store_mock_test.go @@ -48,29 +48,26 @@ func TestQuery(t *testing.T) { rs := newTestRootStore(sc) // Query without Proof - _, err := rs.Query(nil, 0, nil, false) - require.Error(t, err) sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error")) - _, err = rs.Query(nil, 0, nil, false) + _, err := rs.Query(nil, 0, nil, false) require.Error(t, err) sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) v, err := rs.Query(nil, 0, nil, false) require.NoError(t, err) require.Equal(t, []byte("value"), v.Value) - v, err = rs.Query(nil, 0, nil, false) - require.NoError(t, err) - require.Equal(t, []byte("value"), v.Value) // Query with Proof + sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) sc.EXPECT().GetProof(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error")) - v, err = rs.Query(nil, 0, nil, true) + _, err = rs.Query(nil, 0, nil, true) require.Error(t, err) // Query with Migration + rs.isMigrating = true - sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error")) + sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) _, err = rs.Query(nil, 0, nil, false) - require.Error(t, err) + require.NoError(t, err) } func TestLoadVersion(t *testing.T) { @@ -98,14 +95,9 @@ func TestLoadVersion(t *testing.T) { sc.EXPECT().LoadVersionAndUpgrade(uint64(2), v).Return(errors.New("error")) err = rs.LoadVersionAndUpgrade(uint64(2), v) require.Error(t, err) - sc.EXPECT().LoadVersionAndUpgrade(uint64(2), v).Return(nil) - sc.EXPECT().GetCommitInfo(uint64(2)).Return(nil, nil) - err = rs.LoadVersionAndUpgrade(uint64(2), v) - require.Error(t, err) // LoadVersionUpgrade with Migration rs.isMigrating = true - sc.EXPECT().LoadVersionAndUpgrade(uint64(2), v).Return(errors.New("error")) err = rs.LoadVersionAndUpgrade(uint64(2), v) require.Error(t, err) } diff --git a/store/v2/store.go b/store/v2/store.go index 6b7f90302082..d73b3a63cccd 100644 --- a/store/v2/store.go +++ b/store/v2/store.go @@ -57,7 +57,6 @@ type RootStore interface { // Backend defines the interface for the RootStore backends. type Backend interface { - // GetStateCommitment returns the SC backend. GetStateCommitment() Committer } From 19c7c762a437ab7d4289d7257aac8d705448016d Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Thu, 28 Nov 2024 15:14:23 +0100 Subject: [PATCH 07/21] some fixes --- store/v2/commitment/store.go | 4 + store/v2/database.go | 14 +- store/v2/mock/db_mock.go | 240 +++++++++-------------------------- store/v2/mock/types.go | 8 +- store/v2/root/store.go | 17 +-- 5 files changed, 71 insertions(+), 212 deletions(-) diff --git a/store/v2/commitment/store.go b/store/v2/commitment/store.go index a14a4b0c1777..57d99f3f71ba 100644 --- a/store/v2/commitment/store.go +++ b/store/v2/commitment/store.go @@ -269,6 +269,10 @@ func (c *CommitStore) getReader(storeKey string) (Reader, error) { // VersionExists implements store.VersionedReader. func (c *CommitStore) VersionExists(version uint64) (bool, error) { ci, err := c.metadata.GetCommitInfo(version) + if ci == nil && err == nil { + // the key doesn't exist in this path meaning we may be in genesis + return true, nil + } return ci != nil, err } diff --git a/store/v2/database.go b/store/v2/database.go index 781a5ce09f29..53014777f050 100644 --- a/store/v2/database.go +++ b/store/v2/database.go @@ -7,19 +7,6 @@ import ( "cosmossdk.io/store/v2/proof" ) -// VersionedWriter defines an API for a versioned database that allows reads, -// writes, iteration and commitment over a series of versions. -type VersionedWriter interface { - VersionedReader - - SetLatestVersion(version uint64) error - ApplyChangeset(cs *corestore.Changeset) error - - // Closer releases associated resources. It should NOT be idempotent. It must - // only be called once and any call after may panic. - io.Closer -} - type VersionedReader interface { Has(storeKey []byte, version uint64, key []byte) (bool, error) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) @@ -42,6 +29,7 @@ type UpgradableDatabase interface { // Committer defines an API for committing state. type Committer interface { UpgradeableStore + VersionedReader // WriteChangeset writes the changeset to the commitment state. WriteChangeset(cs *corestore.Changeset) error diff --git a/store/v2/mock/db_mock.go b/store/v2/mock/db_mock.go index 9b962affb102..7f600389277b 100644 --- a/store/v2/mock/db_mock.go +++ b/store/v2/mock/db_mock.go @@ -130,6 +130,36 @@ func (mr *MockStateCommitterMockRecorder) GetProof(storeKey, version, key any) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProof", reflect.TypeOf((*MockStateCommitter)(nil).GetProof), storeKey, version, key) } +// Has mocks base method. +func (m *MockStateCommitter) Has(storeKey []byte, version uint64, key []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Has", storeKey, version, key) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Has indicates an expected call of Has. +func (mr *MockStateCommitterMockRecorder) Has(storeKey, version, key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockStateCommitter)(nil).Has), storeKey, version, key) +} + +// Iterator mocks base method. +func (m *MockStateCommitter) Iterator(storeKey []byte, version uint64, start, end []byte) (store.Iterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Iterator", storeKey, version, start, end) + ret0, _ := ret[0].(store.Iterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Iterator indicates an expected call of Iterator. +func (mr *MockStateCommitterMockRecorder) Iterator(storeKey, version, start, end any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockStateCommitter)(nil).Iterator), storeKey, version, start, end) +} + // LoadVersion mocks base method. func (m *MockStateCommitter) LoadVersion(targetVersion uint64) error { m.ctrl.T.Helper() @@ -184,174 +214,8 @@ func (mr *MockStateCommitterMockRecorder) Prune(version any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prune", reflect.TypeOf((*MockStateCommitter)(nil).Prune), version) } -// SetInitialVersion mocks base method. -func (m *MockStateCommitter) SetInitialVersion(version uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetInitialVersion", version) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetInitialVersion indicates an expected call of SetInitialVersion. -func (mr *MockStateCommitterMockRecorder) SetInitialVersion(version any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetInitialVersion", reflect.TypeOf((*MockStateCommitter)(nil).SetInitialVersion), version) -} - -// WriteChangeset mocks base method. -func (m *MockStateCommitter) WriteChangeset(cs *store.Changeset) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteChangeset", cs) - ret0, _ := ret[0].(error) - return ret0 -} - -// WriteChangeset indicates an expected call of WriteChangeset. -func (mr *MockStateCommitterMockRecorder) WriteChangeset(cs any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteChangeset", reflect.TypeOf((*MockStateCommitter)(nil).WriteChangeset), cs) -} - -// MockStateStorage is a mock of StateStorage interface. -type MockStateStorage struct { - ctrl *gomock.Controller - recorder *MockStateStorageMockRecorder - isgomock struct{} -} - -// MockStateStorageMockRecorder is the mock recorder for MockStateStorage. -type MockStateStorageMockRecorder struct { - mock *MockStateStorage -} - -// NewMockStateStorage creates a new mock instance. -func NewMockStateStorage(ctrl *gomock.Controller) *MockStateStorage { - mock := &MockStateStorage{ctrl: ctrl} - mock.recorder = &MockStateStorageMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockStateStorage) EXPECT() *MockStateStorageMockRecorder { - return m.recorder -} - -// ApplyChangeset mocks base method. -func (m *MockStateStorage) ApplyChangeset(cs *store.Changeset) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ApplyChangeset", cs) - ret0, _ := ret[0].(error) - return ret0 -} - -// ApplyChangeset indicates an expected call of ApplyChangeset. -func (mr *MockStateStorageMockRecorder) ApplyChangeset(cs any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyChangeset", reflect.TypeOf((*MockStateStorage)(nil).ApplyChangeset), cs) -} - -// Close mocks base method. -func (m *MockStateStorage) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close. -func (mr *MockStateStorageMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockStateStorage)(nil).Close)) -} - -// Get mocks base method. -func (m *MockStateStorage) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", storeKey, version, key) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockStateStorageMockRecorder) Get(storeKey, version, key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockStateStorage)(nil).Get), storeKey, version, key) -} - -// GetLatestVersion mocks base method. -func (m *MockStateStorage) GetLatestVersion() (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLatestVersion") - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetLatestVersion indicates an expected call of GetLatestVersion. -func (mr *MockStateStorageMockRecorder) GetLatestVersion() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLatestVersion", reflect.TypeOf((*MockStateStorage)(nil).GetLatestVersion)) -} - -// Has mocks base method. -func (m *MockStateStorage) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Has", storeKey, version, key) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Has indicates an expected call of Has. -func (mr *MockStateStorageMockRecorder) Has(storeKey, version, key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockStateStorage)(nil).Has), storeKey, version, key) -} - -// Iterator mocks base method. -func (m *MockStateStorage) Iterator(storeKey []byte, version uint64, start, end []byte) (store.Iterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Iterator", storeKey, version, start, end) - ret0, _ := ret[0].(store.Iterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Iterator indicates an expected call of Iterator. -func (mr *MockStateStorageMockRecorder) Iterator(storeKey, version, start, end any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockStateStorage)(nil).Iterator), storeKey, version, start, end) -} - -// PausePruning mocks base method. -func (m *MockStateStorage) PausePruning(pause bool) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PausePruning", pause) -} - -// PausePruning indicates an expected call of PausePruning. -func (mr *MockStateStorageMockRecorder) PausePruning(pause any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PausePruning", reflect.TypeOf((*MockStateStorage)(nil).PausePruning), pause) -} - -// Prune mocks base method. -func (m *MockStateStorage) Prune(version uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Prune", version) - ret0, _ := ret[0].(error) - return ret0 -} - -// Prune indicates an expected call of Prune. -func (mr *MockStateStorageMockRecorder) Prune(version any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prune", reflect.TypeOf((*MockStateStorage)(nil).Prune), version) -} - // PruneStoreKeys mocks base method. -func (m *MockStateStorage) PruneStoreKeys(storeKeys []string, version uint64) error { +func (m *MockStateCommitter) PruneStoreKeys(storeKeys []string, version uint64) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PruneStoreKeys", storeKeys, version) ret0, _ := ret[0].(error) @@ -359,13 +223,13 @@ func (m *MockStateStorage) PruneStoreKeys(storeKeys []string, version uint64) er } // PruneStoreKeys indicates an expected call of PruneStoreKeys. -func (mr *MockStateStorageMockRecorder) PruneStoreKeys(storeKeys, version any) *gomock.Call { +func (mr *MockStateCommitterMockRecorder) PruneStoreKeys(storeKeys, version any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PruneStoreKeys", reflect.TypeOf((*MockStateStorage)(nil).PruneStoreKeys), storeKeys, version) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PruneStoreKeys", reflect.TypeOf((*MockStateCommitter)(nil).PruneStoreKeys), storeKeys, version) } // ReverseIterator mocks base method. -func (m *MockStateStorage) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (store.Iterator, error) { +func (m *MockStateCommitter) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (store.Iterator, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReverseIterator", storeKey, version, start, end) ret0, _ := ret[0].(store.Iterator) @@ -374,27 +238,27 @@ func (m *MockStateStorage) ReverseIterator(storeKey []byte, version uint64, star } // ReverseIterator indicates an expected call of ReverseIterator. -func (mr *MockStateStorageMockRecorder) ReverseIterator(storeKey, version, start, end any) *gomock.Call { +func (mr *MockStateCommitterMockRecorder) ReverseIterator(storeKey, version, start, end any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockStateStorage)(nil).ReverseIterator), storeKey, version, start, end) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockStateCommitter)(nil).ReverseIterator), storeKey, version, start, end) } -// SetLatestVersion mocks base method. -func (m *MockStateStorage) SetLatestVersion(version uint64) error { +// SetInitialVersion mocks base method. +func (m *MockStateCommitter) SetInitialVersion(version uint64) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetLatestVersion", version) + ret := m.ctrl.Call(m, "SetInitialVersion", version) ret0, _ := ret[0].(error) return ret0 } -// SetLatestVersion indicates an expected call of SetLatestVersion. -func (mr *MockStateStorageMockRecorder) SetLatestVersion(version any) *gomock.Call { +// SetInitialVersion indicates an expected call of SetInitialVersion. +func (mr *MockStateCommitterMockRecorder) SetInitialVersion(version any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLatestVersion", reflect.TypeOf((*MockStateStorage)(nil).SetLatestVersion), version) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetInitialVersion", reflect.TypeOf((*MockStateCommitter)(nil).SetInitialVersion), version) } // VersionExists mocks base method. -func (m *MockStateStorage) VersionExists(v uint64) (bool, error) { +func (m *MockStateCommitter) VersionExists(v uint64) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "VersionExists", v) ret0, _ := ret[0].(bool) @@ -403,7 +267,21 @@ func (m *MockStateStorage) VersionExists(v uint64) (bool, error) { } // VersionExists indicates an expected call of VersionExists. -func (mr *MockStateStorageMockRecorder) VersionExists(v any) *gomock.Call { +func (mr *MockStateCommitterMockRecorder) VersionExists(v any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VersionExists", reflect.TypeOf((*MockStateStorage)(nil).VersionExists), v) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VersionExists", reflect.TypeOf((*MockStateCommitter)(nil).VersionExists), v) +} + +// WriteChangeset mocks base method. +func (m *MockStateCommitter) WriteChangeset(cs *store.Changeset) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteChangeset", cs) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteChangeset indicates an expected call of WriteChangeset. +func (mr *MockStateCommitterMockRecorder) WriteChangeset(cs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteChangeset", reflect.TypeOf((*MockStateCommitter)(nil).WriteChangeset), cs) } diff --git a/store/v2/mock/types.go b/store/v2/mock/types.go index 83eba3326f26..3c5edb372a85 100644 --- a/store/v2/mock/types.go +++ b/store/v2/mock/types.go @@ -8,12 +8,6 @@ type StateCommitter interface { store.Pruner store.PausablePruner store.UpgradeableStore -} - -// StateStorage is a mock of store.VersionedWriter -type StateStorage interface { - store.VersionedWriter + store.VersionedReader store.UpgradableDatabase - store.Pruner - store.PausablePruner } diff --git a/store/v2/root/store.go b/store/v2/root/store.go index b6d21e9851e3..e39f25b57b3b 100644 --- a/store/v2/root/store.go +++ b/store/v2/root/store.go @@ -106,17 +106,13 @@ func (s *Store) SetInitialVersion(v uint64) error { // and the version exists in the state commitment, since the state storage will be // synced during migration. func (s *Store) getVersionedReader(version uint64) (store.VersionedReader, error) { - - if vReader, ok := s.stateCommitment.(store.VersionedReader); ok { - isExist, err := vReader.VersionExists(version) - if err != nil { - return nil, err - } - if isExist { - return vReader, nil - } + isExist, err := s.stateCommitment.VersionExists(version) + if err != nil { + return nil, err + } + if isExist { + return s.stateCommitment, nil } - return nil, fmt.Errorf("version %d does not exist", version) } @@ -125,7 +121,6 @@ func (s *Store) StateLatest() (uint64, corestore.ReaderMap, error) { if err != nil { return 0, nil, err } - vReader, err := s.getVersionedReader(v) if err != nil { return 0, nil, err From ec1bc293e4f8dbb8b2f54f78fd5c95ec112cd298 Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Thu, 28 Nov 2024 15:21:28 +0100 Subject: [PATCH 08/21] linting --- server/v2/cometbft/abci_test.go | 22 +++++++++---------- .../v2/cometbft/internal/mock/mock_store.go | 1 - .../cometbft/oe/optimistic_execution_test.go | 5 +++-- server/v2/cometbft/server.go | 2 +- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/server/v2/cometbft/abci_test.go b/server/v2/cometbft/abci_test.go index f159dba7193f..8cac351920a5 100644 --- a/server/v2/cometbft/abci_test.go +++ b/server/v2/cometbft/abci_test.go @@ -2,18 +2,16 @@ package cometbft import ( "context" - "cosmossdk.io/core/server" "crypto/sha256" "encoding/json" "errors" - abci "github.com/cometbft/cometbft/abci/types" "io" "strings" "sync" "testing" "time" - "cosmossdk.io/server/v2/cometbft/oe" + abci "github.com/cometbft/cometbft/abci/types" abciproto "github.com/cometbft/cometbft/api/cometbft/abci/v1" v1 "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cosmos/gogoproto/proto" @@ -21,6 +19,7 @@ import ( "github.com/stretchr/testify/require" appmodulev2 "cosmossdk.io/core/appmodule/v2" + "cosmossdk.io/core/server" "cosmossdk.io/core/store" "cosmossdk.io/core/transaction" "cosmossdk.io/log" @@ -28,6 +27,7 @@ import ( "cosmossdk.io/server/v2/cometbft/handlers" cometmock "cosmossdk.io/server/v2/cometbft/internal/mock" "cosmossdk.io/server/v2/cometbft/mempool" + "cosmossdk.io/server/v2/cometbft/oe" "cosmossdk.io/server/v2/cometbft/types" "cosmossdk.io/server/v2/stf" "cosmossdk.io/server/v2/stf/branch" @@ -60,10 +60,10 @@ func getQueryRouterBuilder[T any, PT interface { *T proto.Message }, -U any, UT interface { - *U - proto.Message -}]( + U any, UT interface { + *U + proto.Message + }]( t *testing.T, handler func(ctx context.Context, msg PT) (UT, error), ) *stf.MsgRouterBuilder { @@ -90,10 +90,10 @@ func getMsgRouterBuilder[T any, PT interface { *T transaction.Msg }, -U any, UT interface { - *U - transaction.Msg -}]( + U any, UT interface { + *U + transaction.Msg + }]( t *testing.T, handler func(ctx context.Context, msg PT) (UT, error), ) *stf.MsgRouterBuilder { diff --git a/server/v2/cometbft/internal/mock/mock_store.go b/server/v2/cometbft/internal/mock/mock_store.go index 6f2de4747762..8cb4542ac41e 100644 --- a/server/v2/cometbft/internal/mock/mock_store.go +++ b/server/v2/cometbft/internal/mock/mock_store.go @@ -50,7 +50,6 @@ func (s *MockStore) StateLatest() (uint64, corestore.ReaderMap, error) { } func (s *MockStore) Commit(changeset *corestore.Changeset) (corestore.Hash, error) { - err := s.Committer.WriteChangeset(changeset) if err != nil { return []byte{}, err diff --git a/server/v2/cometbft/oe/optimistic_execution_test.go b/server/v2/cometbft/oe/optimistic_execution_test.go index c0eb28c2a5e9..cd3c9263c784 100644 --- a/server/v2/cometbft/oe/optimistic_execution_test.go +++ b/server/v2/cometbft/oe/optimistic_execution_test.go @@ -5,12 +5,13 @@ import ( "errors" "testing" + abci "github.com/cometbft/cometbft/api/cometbft/abci/v1" + "github.com/stretchr/testify/assert" + "cosmossdk.io/core/server" "cosmossdk.io/core/store" "cosmossdk.io/core/transaction" "cosmossdk.io/log" - abci "github.com/cometbft/cometbft/api/cometbft/abci/v1" - "github.com/stretchr/testify/assert" ) func testFinalizeBlock[T transaction.Tx](context.Context, *abci.FinalizeBlockRequest) (*server.BlockResponse, store.WriterMap, []T, error) { diff --git a/server/v2/cometbft/server.go b/server/v2/cometbft/server.go index 21ae75307595..ed0c4fba8702 100644 --- a/server/v2/cometbft/server.go +++ b/server/v2/cometbft/server.go @@ -2,7 +2,6 @@ package cometbft import ( "context" - "cosmossdk.io/server/v2/cometbft/oe" "crypto/sha256" "encoding/json" "fmt" @@ -35,6 +34,7 @@ import ( "cosmossdk.io/server/v2/appmanager" cometlog "cosmossdk.io/server/v2/cometbft/log" "cosmossdk.io/server/v2/cometbft/mempool" + "cosmossdk.io/server/v2/cometbft/oe" "cosmossdk.io/server/v2/cometbft/types" "cosmossdk.io/store/v2/snapshots" From ca33ca421ec704e3614469ca82ffb6c41b4914c0 Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Thu, 28 Nov 2024 20:30:38 +0100 Subject: [PATCH 09/21] linting --- store/v2/commitment/store.go | 1 - store/v2/pruning/manager_test.go | 1 - store/v2/snapshots/manager_test.go | 1 - 3 files changed, 3 deletions(-) diff --git a/store/v2/commitment/store.go b/store/v2/commitment/store.go index 57d99f3f71ba..a35d7ecfaaf4 100644 --- a/store/v2/commitment/store.go +++ b/store/v2/commitment/store.go @@ -480,7 +480,6 @@ loop: if node.Value == nil { node.Value = []byte{} } - } err := importer.Add(node) if err != nil { diff --git a/store/v2/pruning/manager_test.go b/store/v2/pruning/manager_test.go index 66cadb353598..d45d123a3504 100644 --- a/store/v2/pruning/manager_test.go +++ b/store/v2/pruning/manager_test.go @@ -77,7 +77,6 @@ func (s *PruningManagerTestSuite) TestPrune() { return count == len(storeKeys) } s.Require().Eventually(checkSCPrune, 10*time.Second, 1*time.Second) - } func TestPruningOption(t *testing.T) { diff --git a/store/v2/snapshots/manager_test.go b/store/v2/snapshots/manager_test.go index 2ed3ec9c664f..e374b4c75cd0 100644 --- a/store/v2/snapshots/manager_test.go +++ b/store/v2/snapshots/manager_test.go @@ -421,7 +421,6 @@ func TestSnapshot_Take_Prune(t *testing.T) { } func TestSnapshot_Pruning_Take_Snapshot_Parallel(t *testing.T) { - //TODO: race condition store := setupStore(t) items := [][]byte{ From 6d4bf775a10f9c68060b1f1bbca6b64e299957d2 Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Fri, 29 Nov 2024 12:24:10 +0100 Subject: [PATCH 10/21] fix genesis flow --- crypto/keys/bls12_381/key_cgo.go | 13 +------------ runtime/v2/builder.go | 1 + store/v2/commitment/iavl/tree.go | 12 ++++++++++++ 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/crypto/keys/bls12_381/key_cgo.go b/crypto/keys/bls12_381/key_cgo.go index 2470e2c6ab6b..86c6a6644690 100644 --- a/crypto/keys/bls12_381/key_cgo.go +++ b/crypto/keys/bls12_381/key_cgo.go @@ -4,13 +4,12 @@ package bls12_381 import ( "bytes" - "crypto/sha256" "errors" "fmt" "github.com/cometbft/cometbft/crypto" - "github.com/cometbft/cometbft/crypto/tmhash" "github.com/cometbft/cometbft/crypto/bls12381" + "github.com/cometbft/cometbft/crypto/tmhash" "github.com/cosmos/cosmos-sdk/codec" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" @@ -84,11 +83,6 @@ func (privKey PrivKey) Sign(msg []byte) ([]byte, error) { return nil, err } - if len(msg) > bls12381.MaxMsgLen { - hash := sha256.Sum256(msg) - return secretKey.Sign(hash[:]) - } - return secretKey.Sign(msg) } @@ -151,11 +145,6 @@ func (pubKey PubKey) VerifySignature(msg, sig []byte) bool { return false } - if len(msg) > bls12381.MaxMsgLen { - hash := sha256.Sum256(msg) - msg = hash[:] - } - return pubK.VerifySignature(msg, sig) } diff --git a/runtime/v2/builder.go b/runtime/v2/builder.go index e6e8cb7c4ea5..b851955943b0 100644 --- a/runtime/v2/builder.go +++ b/runtime/v2/builder.go @@ -134,6 +134,7 @@ func (a *AppBuilder[T]) initGenesis(ctx context.Context, src io.Reader, txHandle if err != nil { return nil, fmt.Errorf("failed to read import state: %w", err) } + var genesisJSON map[string]json.RawMessage if err = json.Unmarshal(bz, &genesisJSON); err != nil { return nil, err diff --git a/store/v2/commitment/iavl/tree.go b/store/v2/commitment/iavl/tree.go index 5503218e66a1..e0e5f90f0ca5 100644 --- a/store/v2/commitment/iavl/tree.go +++ b/store/v2/commitment/iavl/tree.go @@ -78,6 +78,10 @@ func (t *IavlTree) Commit() ([]byte, uint64, error) { func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) { immutableTree, err := t.tree.GetImmutable(int64(version)) if err != nil { + if immutableTree == nil { + // at version 0, root key will not exist + return t.tree.GetProof(key) + } return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) } @@ -88,6 +92,10 @@ func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { immutableTree, err := t.tree.GetImmutable(int64(version)) if err != nil { + if immutableTree == nil { + // at version 0, root key will not exist + return t.tree.Get(key) + } return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) } @@ -98,6 +106,10 @@ func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { func (t *IavlTree) Iterator(version uint64, start, end []byte, ascending bool) (corestore.Iterator, error) { immutableTree, err := t.tree.GetImmutable(int64(version)) if err != nil { + if immutableTree == nil { + // at version 0, root key may not exist + return t.tree.Iterator(start, end, ascending) + } return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) } From 3ae36761829202dae89ddc58d85c803d264e9690 Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Fri, 29 Nov 2024 12:28:08 +0100 Subject: [PATCH 11/21] replace add with store --- store/v2/migration/manager.go | 4 ++-- store/v2/migration/manager_test.go | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/store/v2/migration/manager.go b/store/v2/migration/manager.go index e32036aa95dd..db88b195712b 100644 --- a/store/v2/migration/manager.go +++ b/store/v2/migration/manager.go @@ -111,7 +111,7 @@ func (m *Manager) Migrate(height uint64) error { return err } - m.migratedVersion.Add(height) + m.migratedVersion.Store(height) return nil } @@ -198,7 +198,7 @@ func (m *Manager) Sync() error { } } - m.migratedVersion.Add(version) + m.migratedVersion.Store(version) version += 1 } diff --git a/store/v2/migration/manager_test.go b/store/v2/migration/manager_test.go index 1b562a9a2aa5..103b3244b650 100644 --- a/store/v2/migration/manager_test.go +++ b/store/v2/migration/manager_test.go @@ -48,7 +48,6 @@ func setupMigrationManager(t *testing.T) (*Manager, *commitment.CommitStore) { } func TestMigrateState(t *testing.T) { - fmt.Println("start") m, orgCommitStore := setupMigrationManager(t) // apply changeset toVersion := uint64(100) From f799ddb5e359bc5e74f2ac88a0f4c106903577fc Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Fri, 29 Nov 2024 13:03:05 +0100 Subject: [PATCH 12/21] fix store tests --- store/v2/commitment/iavl/tree.go | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/store/v2/commitment/iavl/tree.go b/store/v2/commitment/iavl/tree.go index e0e5f90f0ca5..9eea3e8e80c5 100644 --- a/store/v2/commitment/iavl/tree.go +++ b/store/v2/commitment/iavl/tree.go @@ -76,12 +76,14 @@ func (t *IavlTree) Commit() ([]byte, uint64, error) { // GetProof returns a proof for the given key and version. func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) { + versions := t.tree.AvailableVersions() + // if the length of version is 0 we are in genesis + if len(versions) == 0 { + return t.tree.GetProof(key) + } + immutableTree, err := t.tree.GetImmutable(int64(version)) if err != nil { - if immutableTree == nil { - // at version 0, root key will not exist - return t.tree.GetProof(key) - } return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) } @@ -90,12 +92,13 @@ func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, // Get implements the Reader interface. func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { + versions := t.tree.AvailableVersions() + // if the length of version is 0 we are in genesis + if len(versions) == 0 { + return t.tree.Get(key) + } immutableTree, err := t.tree.GetImmutable(int64(version)) if err != nil { - if immutableTree == nil { - // at version 0, root key will not exist - return t.tree.Get(key) - } return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) } @@ -104,12 +107,14 @@ func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { // Iterator implements the Reader interface. func (t *IavlTree) Iterator(version uint64, start, end []byte, ascending bool) (corestore.Iterator, error) { + versions := t.tree.AvailableVersions() + // if the length of version is 0 we are in genesis + if len(versions) == 0 { + return t.tree.Iterator(start, end, ascending) + } + immutableTree, err := t.tree.GetImmutable(int64(version)) if err != nil { - if immutableTree == nil { - // at version 0, root key may not exist - return t.tree.Iterator(start, end, ascending) - } return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) } From f5fbb8b83a14f568d21ef279da547fe3d03f7bc3 Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Fri, 29 Nov 2024 13:26:25 +0100 Subject: [PATCH 13/21] fix store tests --- store/v2/commitment/iavl/tree.go | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/store/v2/commitment/iavl/tree.go b/store/v2/commitment/iavl/tree.go index 9eea3e8e80c5..619308e0c047 100644 --- a/store/v2/commitment/iavl/tree.go +++ b/store/v2/commitment/iavl/tree.go @@ -76,9 +76,8 @@ func (t *IavlTree) Commit() ([]byte, uint64, error) { // GetProof returns a proof for the given key and version. func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) { - versions := t.tree.AvailableVersions() - // if the length of version is 0 we are in genesis - if len(versions) == 0 { + + if t.tree.Version() == int64(version) { return t.tree.GetProof(key) } @@ -92,11 +91,10 @@ func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, // Get implements the Reader interface. func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { - versions := t.tree.AvailableVersions() - // if the length of version is 0 we are in genesis - if len(versions) == 0 { + if t.tree.Version() == int64(version) { return t.tree.Get(key) } + immutableTree, err := t.tree.GetImmutable(int64(version)) if err != nil { return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) @@ -107,9 +105,7 @@ func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { // Iterator implements the Reader interface. func (t *IavlTree) Iterator(version uint64, start, end []byte, ascending bool) (corestore.Iterator, error) { - versions := t.tree.AvailableVersions() - // if the length of version is 0 we are in genesis - if len(versions) == 0 { + if t.tree.Version() == int64(version) { return t.tree.Iterator(start, end, ascending) } From 12b715218817d0e7fb6de84e492cfb51808b64db Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Fri, 29 Nov 2024 13:51:16 +0100 Subject: [PATCH 14/21] another attempt --- store/v2/commitment/store.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/store/v2/commitment/store.go b/store/v2/commitment/store.go index a35d7ecfaaf4..132c11ac2443 100644 --- a/store/v2/commitment/store.go +++ b/store/v2/commitment/store.go @@ -268,11 +268,15 @@ func (c *CommitStore) getReader(storeKey string) (Reader, error) { // VersionExists implements store.VersionedReader. func (c *CommitStore) VersionExists(version uint64) (bool, error) { - ci, err := c.metadata.GetCommitInfo(version) - if ci == nil && err == nil { - // the key doesn't exist in this path meaning we may be in genesis + latestVersion, err := c.metadata.GetLatestVersion() + if err != nil { + return false, err + } + if latestVersion == 0 { return true, nil } + + ci, err := c.metadata.GetCommitInfo(version) return ci != nil, err } From 6ea2f12c8f909194c300b56d87cebbd8eff13310 Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Fri, 29 Nov 2024 15:25:32 +0100 Subject: [PATCH 15/21] fix comet tests --- server/v2/cometbft/abci_test.go | 2 -- store/v2/commitment/iavl/tree.go | 1 - 2 files changed, 3 deletions(-) diff --git a/server/v2/cometbft/abci_test.go b/server/v2/cometbft/abci_test.go index 8cac351920a5..df1901554ac9 100644 --- a/server/v2/cometbft/abci_test.go +++ b/server/v2/cometbft/abci_test.go @@ -607,8 +607,6 @@ func TestConsensus_Query(t *testing.T) { }, }) require.NoError(t, err) - _, err = c.store.GetStateCommitment().Commit(1) - require.NoError(t, err) _, err = c.InitChain(context.Background(), &abciproto.InitChainRequest{ Time: time.Now(), diff --git a/store/v2/commitment/iavl/tree.go b/store/v2/commitment/iavl/tree.go index 619308e0c047..380711128edc 100644 --- a/store/v2/commitment/iavl/tree.go +++ b/store/v2/commitment/iavl/tree.go @@ -76,7 +76,6 @@ func (t *IavlTree) Commit() ([]byte, uint64, error) { // GetProof returns a proof for the given key and version. func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) { - if t.tree.Version() == int64(version) { return t.tree.GetProof(key) } From 26d07094ec1be104b711e08e62e599d5faa508de Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Sun, 1 Dec 2024 18:44:06 +0100 Subject: [PATCH 16/21] if tree is empty assume genesis --- store/v2/commitment/iavl/tree.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/store/v2/commitment/iavl/tree.go b/store/v2/commitment/iavl/tree.go index 380711128edc..3450e23e9112 100644 --- a/store/v2/commitment/iavl/tree.go +++ b/store/v2/commitment/iavl/tree.go @@ -76,7 +76,8 @@ func (t *IavlTree) Commit() ([]byte, uint64, error) { // GetProof returns a proof for the given key and version. func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) { - if t.tree.Version() == int64(version) { + // if the tree is empty we will be in genesis + if t.tree.IsEmpty() { return t.tree.GetProof(key) } @@ -90,7 +91,8 @@ func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, // Get implements the Reader interface. func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { - if t.tree.Version() == int64(version) { + // if the tree is empty we will be in geneiss + if t.tree.IsEmpty() { return t.tree.Get(key) } @@ -104,7 +106,8 @@ func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { // Iterator implements the Reader interface. func (t *IavlTree) Iterator(version uint64, start, end []byte, ascending bool) (corestore.Iterator, error) { - if t.tree.Version() == int64(version) { + // if the tree is empty we will be in geneiss + if t.tree.IsEmpty() { return t.tree.Iterator(start, end, ascending) } From fd70846bb816fc9fe84435d3856c08eb8a3f0288 Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Mon, 2 Dec 2024 11:27:16 +0100 Subject: [PATCH 17/21] use old trees --- store/v2/commitment/store.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/store/v2/commitment/store.go b/store/v2/commitment/store.go index 132c11ac2443..146116e9c203 100644 --- a/store/v2/commitment/store.go +++ b/store/v2/commitment/store.go @@ -218,6 +218,7 @@ func (c *CommitStore) SetInitialVersion(version uint64) error { return nil } +// GetProof returns a proof for the given key and version. func (c *CommitStore) GetProof(storeKey []byte, version uint64, key []byte) ([]proof.CommitmentOp, error) { rawStoreKey := conv.UnsafeBytesToStr(storeKey) tree, ok := c.multiTrees[rawStoreKey] @@ -253,8 +254,12 @@ func (c *CommitStore) GetProof(storeKey []byte, version uint64, key []byte) ([]p // WARNING: This function is only used during the migration process. The SC layer // generally does not provide a reader for the CommitStore. func (c *CommitStore) getReader(storeKey string) (Reader, error) { - tree, ok := c.multiTrees[storeKey] - if !ok { + var tree Tree + if storeTree, ok := c.oldTrees[storeKey]; ok { + tree = storeTree + } else if storeTree, ok := c.multiTrees[storeKey]; ok { + tree = storeTree + } else { return nil, fmt.Errorf("store %s not found", storeKey) } From 7207c8148427fac065c3ef2a019d21ddcbfb9d21 Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Mon, 2 Dec 2024 12:02:00 +0100 Subject: [PATCH 18/21] check store2 --- store/iavl/store_test.go | 2 - store/v2/commitment/iavl/tree.go | 27 +++++++--- store/v2/commitment/store.go | 3 +- store/v2/commitment/store_test_suite.go | 70 +++++++++++++++---------- store/v2/migration/manager.go | 6 +-- store/v2/root/upgrade_test.go | 2 +- store/v2/snapshots/helpers_test.go | 21 ++------ store/v2/snapshots/manager.go | 7 +-- store/v2/snapshots/snapshotter.go | 9 +--- 9 files changed, 72 insertions(+), 75 deletions(-) diff --git a/store/iavl/store_test.go b/store/iavl/store_test.go index d0339c59a107..3a6050e2453e 100644 --- a/store/iavl/store_test.go +++ b/store/iavl/store_test.go @@ -79,8 +79,6 @@ func TestLoadStore(t *testing.T) { cIDHp := types.CommitID{Version: verHp, Hash: hash} require.Nil(t, err) - // TODO: Prune this height - // Create current height Hc updated, err = tree.Set([]byte("hello"), []byte("ciao")) require.NoError(t, err) diff --git a/store/v2/commitment/iavl/tree.go b/store/v2/commitment/iavl/tree.go index 3450e23e9112..1722a9016baf 100644 --- a/store/v2/commitment/iavl/tree.go +++ b/store/v2/commitment/iavl/tree.go @@ -76,8 +76,13 @@ func (t *IavlTree) Commit() ([]byte, uint64, error) { // GetProof returns a proof for the given key and version. func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) { - // if the tree is empty we will be in genesis - if t.tree.IsEmpty() { + // the mutable tree is empty at genesis & when the key is removed, but the immutable tree is not + // by checking the latest version we can determine if we are in genesis or have a key that has been removed + lv, err := t.tree.GetLatestVersion() + if err != nil { + return nil, err + } + if lv == 0 { return t.tree.GetProof(key) } @@ -91,8 +96,13 @@ func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, // Get implements the Reader interface. func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { - // if the tree is empty we will be in geneiss - if t.tree.IsEmpty() { + // the mutable tree is empty at genesis & when the key is removed, but the immutable tree is not + // by checking the latest version we can determine if we are in genesis or have a key that has been removed + lv, err := t.tree.GetLatestVersion() + if err != nil { + return nil, err + } + if lv == 0 { return t.tree.Get(key) } @@ -106,8 +116,13 @@ func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { // Iterator implements the Reader interface. func (t *IavlTree) Iterator(version uint64, start, end []byte, ascending bool) (corestore.Iterator, error) { - // if the tree is empty we will be in geneiss - if t.tree.IsEmpty() { + // the mutable tree is empty at genesis & when the key is removed, but the immutable tree is not + // by checking the latest version we can determine if we are in genesis or have a key that has been removed + lv, err := t.tree.GetLatestVersion() + if err != nil { + return nil, err + } + if lv == 0 { return t.tree.Iterator(start, end, ascending) } diff --git a/store/v2/commitment/store.go b/store/v2/commitment/store.go index 146116e9c203..62760c9c51b3 100644 --- a/store/v2/commitment/store.go +++ b/store/v2/commitment/store.go @@ -433,7 +433,6 @@ func (c *CommitStore) Restore( version uint64, format uint32, protoReader protoio.Reader, - chStorage chan<- *corestore.StateChanges, ) (snapshotstypes.SnapshotItem, error) { var ( importer Importer @@ -460,7 +459,6 @@ loop: return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to close importer: %w", err) } } - tree := c.multiTrees[item.Store.Name] if tree == nil { return snapshotstypes.SnapshotItem{}, fmt.Errorf("store %s not found", item.Store.Name) @@ -489,6 +487,7 @@ loop: if node.Value == nil { node.Value = []byte{} } + } err := importer.Add(node) if err != nil { diff --git a/store/v2/commitment/store_test_suite.go b/store/v2/commitment/store_test_suite.go index afb468de8595..0beca9278991 100644 --- a/store/v2/commitment/store_test_suite.go +++ b/store/v2/commitment/store_test_suite.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "sync" "github.com/stretchr/testify/suite" @@ -31,26 +30,66 @@ type CommitStoreTestSuite struct { NewStore func(db corestore.KVStoreWithBatch, storeKeys, oldStoreKeys []string, logger corelog.Logger) (*CommitStore, error) } +// TestStore_Snapshotter tests the snapshot functionality of the CommitStore. +// This test verifies that the store can correctly create snapshots and restore from them. +// The test follows these steps: +// +// 1. Setup & Data Population: +// - Creates a new CommitStore with two stores (store1 and store2) +// - Writes 10 versions of data (version 1-10) +// - For each version, writes 10 key-value pairs to each store +// - Total data: 2 stores * 10 versions * 10 pairs = 200 key-value pairs +// - Keys are formatted as "key-{version}-{index}" +// - Values are formatted as "value-{version}-{index}" +// - Each version is committed to get a CommitInfo +// +// 2. Snapshot Creation: +// - Creates a dummy extension item for metadata testing +// - Sets up a new target store for restoration +// - Creates a channel for snapshot chunks +// - Launches a goroutine to: +// - Create a snapshot writer +// - Take a snapshot at version 10 +// - Write extension metadata +// +// 3. Snapshot Restoration: +// - Creates a snapshot reader from the chunks +// - Sets up a channel for state changes during restoration +// - Launches a goroutine to collect restored key-value pairs +// - Restores the snapshot into the target store +// - Verifies the extension metadata was preserved +// +// 4. Verification: +// - Confirms all 200 key-value pairs were restored correctly +// - Verifies the format: "{storeKey}_key-{version}-{index}" -> "value-{version}-{index}" +// - Checks that the restored store's Merkle tree hashes match the original +// - Ensures store integrity by comparing CommitInfo hashes func (s *CommitStoreTestSuite) TestStore_Snapshotter() { + // Initialize a new CommitStore with two stores storeKeys := []string{storeKey1, storeKey2} commitStore, err := s.NewStore(dbm.NewMemDB(), storeKeys, nil, coretesting.NewNopLogger()) s.Require().NoError(err) + // We'll create 10 versions of data latestVersion := uint64(10) kvCount := 10 var cInfo *proof.CommitInfo + + // For each version 1-10 for i := uint64(1); i <= latestVersion; i++ { + // Create KV pairs for each store kvPairs := make(map[string]corestore.KVPairs) for _, storeKey := range storeKeys { kvPairs[storeKey] = corestore.KVPairs{} + // Create 10 KV pairs for this store for j := 0; j < kvCount; j++ { key := []byte(fmt.Sprintf("key-%d-%d", i, j)) value := []byte(fmt.Sprintf("value-%d-%d", i, j)) kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value}) } } + // Write and commit the changes for this version s.Require().NoError(commitStore.WriteChangeset(corestore.NewChangesetWithPairs(i, kvPairs))) - cInfo, err = commitStore.Commit(i) s.Require().NoError(err) } @@ -84,34 +123,11 @@ func (s *CommitStoreTestSuite) TestStore_Snapshotter() { streamReader, err := snapshots.NewStreamReader(chunks) s.Require().NoError(err) - chStorage := make(chan *corestore.StateChanges, 100) - leaves := make(map[string]string) - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - for kv := range chStorage { - for _, actor := range kv.StateChanges { - leaves[fmt.Sprintf("%s_%s", kv.Actor, actor.Key)] = string(actor.Value) - } - } - wg.Done() - }() - nextItem, err := targetStore.Restore(latestVersion, snapshotstypes.CurrentFormat, streamReader, chStorage) + + nextItem, err := targetStore.Restore(latestVersion, snapshotstypes.CurrentFormat, streamReader) s.Require().NoError(err) s.Require().Equal(*dummyExtensionItem.GetExtension(), *nextItem.GetExtension()) - close(chStorage) - wg.Wait() - s.Require().Equal(len(storeKeys)*kvCount*int(latestVersion), len(leaves)) - for _, storeKey := range storeKeys { - for i := 1; i <= int(latestVersion); i++ { - for j := 0; j < kvCount; j++ { - key := fmt.Sprintf("%s_key-%d-%d", storeKey, i, j) - s.Require().Equal(leaves[key], fmt.Sprintf("value-%d-%d", i, j)) - } - } - } - // check the restored tree hash targetCommitInfo, err := targetStore.GetCommitInfo(latestVersion) s.Require().NoError(err) diff --git a/store/v2/migration/manager.go b/store/v2/migration/manager.go index db88b195712b..a0b191ac7369 100644 --- a/store/v2/migration/manager.go +++ b/store/v2/migration/manager.go @@ -95,13 +95,9 @@ func (m *Manager) Migrate(height uint64) error { return err } - // restore the snapshot - chStorage := make(chan *corestore.StateChanges, defaultStorageBufferSize) - eg := new(errgroup.Group) eg.Go(func() error { - defer close(chStorage) - if _, err := m.stateCommitment.Restore(height, 0, ms, chStorage); err != nil { + if _, err := m.stateCommitment.Restore(height, 0, ms); err != nil { return err } return nil diff --git a/store/v2/root/upgrade_test.go b/store/v2/root/upgrade_test.go index fcbb2a5cb9d5..1bcee4149b48 100644 --- a/store/v2/root/upgrade_test.go +++ b/store/v2/root/upgrade_test.go @@ -106,7 +106,7 @@ func (s *UpgradeStoreTestSuite) TestLoadVersionAndUpgrade() { keyCount := 10 // check old store keys are queryable - oldStoreKeys := []string{"store1", "store3"} + oldStoreKeys := []string{"store1", "store2", "store3"} for _, storeKey := range oldStoreKeys { for version := uint64(1); version <= v; version++ { for i := 0; i < keyCount; i++ { diff --git a/store/v2/snapshots/helpers_test.go b/store/v2/snapshots/helpers_test.go index 5e272e36523b..40090c896817 100644 --- a/store/v2/snapshots/helpers_test.go +++ b/store/v2/snapshots/helpers_test.go @@ -14,7 +14,6 @@ import ( protoio "github.com/cosmos/gogoproto/io" "github.com/stretchr/testify/require" - corestore "cosmossdk.io/core/store" coretesting "cosmossdk.io/core/testing" "cosmossdk.io/store/v2/snapshots" snapshotstypes "cosmossdk.io/store/v2/snapshots/types" @@ -109,7 +108,7 @@ type mockCommitSnapshotter struct { } func (m *mockCommitSnapshotter) Restore( - height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges, + height uint64, format uint32, protoReader protoio.Reader, ) (snapshotstypes.SnapshotItem, error) { if format == 0 { return snapshotstypes.SnapshotItem{}, snapshotstypes.ErrUnknownFormat @@ -120,7 +119,6 @@ func (m *mockCommitSnapshotter) Restore( var item snapshotstypes.SnapshotItem m.items = [][]byte{} - keyCount := 0 for { item.Reset() err := protoReader.ReadMsg(&item) @@ -134,19 +132,6 @@ func (m *mockCommitSnapshotter) Restore( break } m.items = append(m.items, payload.Payload) - // mock feeding chStorage to check if the loop closed properly - // - // ref: https://github.com/cosmos/cosmos-sdk/pull/21106 - chStorage <- &corestore.StateChanges{ - Actor: []byte("actor"), - StateChanges: []corestore.KVPair{ - { - Key: []byte(fmt.Sprintf("key-%d", keyCount)), - Value: payload.Payload, - }, - }, - } - keyCount++ } return item, nil @@ -178,7 +163,7 @@ func (m *mockErrorCommitSnapshotter) Snapshot(height uint64, protoWriter protoio } func (m *mockErrorCommitSnapshotter) Restore( - height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges, + height uint64, format uint32, protoReader protoio.Reader, ) (snapshotstypes.SnapshotItem, error) { return snapshotstypes.SnapshotItem{}, errors.New("mock restore error") } @@ -242,7 +227,7 @@ func (m *hungCommitSnapshotter) Snapshot(height uint64, protoWriter protoio.Writ } func (m *hungCommitSnapshotter) Restore( - height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges, + height uint64, format uint32, protoReader protoio.Reader, ) (snapshotstypes.SnapshotItem, error) { panic("not implemented") } diff --git a/store/v2/snapshots/manager.go b/store/v2/snapshots/manager.go index afee21e398cb..a0d7895513d8 100644 --- a/store/v2/snapshots/manager.go +++ b/store/v2/snapshots/manager.go @@ -12,7 +12,6 @@ import ( "sync" corelog "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" errorsmod "cosmossdk.io/errors/v2" storeerrors "cosmossdk.io/store/v2/errors" "cosmossdk.io/store/v2/snapshots/types" @@ -395,14 +394,10 @@ func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io. return payload.Payload, nil } - // chStorage is the channel to pass the KV pairs to the storage snapshotter. - chStorage := make(chan *corestore.StateChanges, defaultStorageChannelBufferSize) - - nextItem, err = m.commitSnapshotter.Restore(snapshot.Height, snapshot.Format, streamReader, chStorage) + nextItem, err = m.commitSnapshotter.Restore(snapshot.Height, snapshot.Format, streamReader) if err != nil { return errorsmod.Wrap(err, "multistore restore") } - close(chStorage) for { if nextItem.Item == nil { diff --git a/store/v2/snapshots/snapshotter.go b/store/v2/snapshots/snapshotter.go index 9b054060a36d..f3f4d33f1cf5 100644 --- a/store/v2/snapshots/snapshotter.go +++ b/store/v2/snapshots/snapshotter.go @@ -3,7 +3,6 @@ package snapshots import ( protoio "github.com/cosmos/gogoproto/io" - corestore "cosmossdk.io/core/store" "cosmossdk.io/store/v2/snapshots/types" ) @@ -14,13 +13,7 @@ type CommitSnapshotter interface { Snapshot(version uint64, protoWriter protoio.Writer) error // Restore restores the commitment state from the snapshot reader. - Restore(version uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges) (types.SnapshotItem, error) -} - -// StorageSnapshotter defines an API for restoring snapshots of the storage state. -type StorageSnapshotter interface { - // Restore restores the storage state from the given channel. - Restore(version uint64, chStorage <-chan *corestore.StateChanges) error + Restore(version uint64, format uint32, protoReader protoio.Reader) (types.SnapshotItem, error) } // ExtensionPayloadReader read extension payloads, From 10192af51920960be8a9058e280205aa6289bb3a Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Mon, 2 Dec 2024 12:57:36 +0100 Subject: [PATCH 19/21] fix linting --- server/v2/cometbft/abci_test.go | 1 + server/v2/stf/branch/bench_test.go | 1 + store/v2/commitment/store.go | 1 - tests/integration/accounts/base_account_test.go | 4 ++++ tests/integration/accounts/bundler_test.go | 1 + tests/integration/v2/auth/app_test.go | 2 -- 6 files changed, 7 insertions(+), 3 deletions(-) diff --git a/server/v2/cometbft/abci_test.go b/server/v2/cometbft/abci_test.go index df1901554ac9..c2bd81d65f23 100644 --- a/server/v2/cometbft/abci_test.go +++ b/server/v2/cometbft/abci_test.go @@ -785,6 +785,7 @@ func TestOptimisticExecution(t *testing.T) { Txs: ppReq.Txs, } fbResp, err := c.FinalizeBlock(context.Background(), fbReq) + require.Nil(t, fbResp) require.Error(t, err) require.ErrorContains(t, err, "test error") // from optimisticMockFunc require.Equal(t, 1, calledTimes) diff --git a/server/v2/stf/branch/bench_test.go b/server/v2/stf/branch/bench_test.go index f275e8d4352f..67122b59b66f 100644 --- a/server/v2/stf/branch/bench_test.go +++ b/server/v2/stf/branch/bench_test.go @@ -105,6 +105,7 @@ func Benchmark_Iterate(b *testing.B) { // makeBranchStack creates a branch stack of the given size and initializes it with unique key-value pairs. func makeBranchStack(b *testing.B, stackSize int) Store[store.KVStore] { + b.Helper() parent := coretesting.NewMemKV() branch := NewStore[store.KVStore](parent) for i := 1; i < stackSize; i++ { diff --git a/store/v2/commitment/store.go b/store/v2/commitment/store.go index 62760c9c51b3..fbd6f38f7648 100644 --- a/store/v2/commitment/store.go +++ b/store/v2/commitment/store.go @@ -487,7 +487,6 @@ loop: if node.Value == nil { node.Value = []byte{} } - } err := importer.Add(node) if err != nil { diff --git a/tests/integration/accounts/base_account_test.go b/tests/integration/accounts/base_account_test.go index a50975b8ff79..8db4cbf1a9e3 100644 --- a/tests/integration/accounts/base_account_test.go +++ b/tests/integration/accounts/base_account_test.go @@ -51,12 +51,14 @@ func TestBaseAccount(t *testing.T) { } func sendTx(t *testing.T, ctx sdk.Context, app *simapp.SimApp, sender []byte, msg sdk.Msg) { + t.Helper() tx := sign(t, ctx, app, sender, privKey, msg) _, _, err := app.SimDeliver(app.TxEncode, tx) require.NoError(t, err) } func sign(t *testing.T, ctx sdk.Context, app *simapp.SimApp, from sdk.AccAddress, privKey cryptotypes.PrivKey, msg sdk.Msg) sdk.Tx { + t.Helper() r := rand.New(rand.NewSource(0)) accNum, err := app.AccountsKeeper.AccountByNumber.Get(ctx, from) @@ -81,12 +83,14 @@ func sign(t *testing.T, ctx sdk.Context, app *simapp.SimApp, from sdk.AccAddress } func bechify(t *testing.T, app *simapp.SimApp, addr []byte) string { + t.Helper() bech32, err := app.AuthKeeper.AddressCodec().BytesToString(addr) require.NoError(t, err) return bech32 } func fundAccount(t *testing.T, app *simapp.SimApp, ctx sdk.Context, addr sdk.AccAddress, amt string) { + t.Helper() require.NoError(t, testutil.FundAccount(ctx, app.BankKeeper, addr, coins(t, amt))) } diff --git a/tests/integration/accounts/bundler_test.go b/tests/integration/accounts/bundler_test.go index 1b94ddd78fa1..2fb88983ddf6 100644 --- a/tests/integration/accounts/bundler_test.go +++ b/tests/integration/accounts/bundler_test.go @@ -209,6 +209,7 @@ func TestMsgServer_ExecuteBundle(t *testing.T) { } func makeTx(t *testing.T, msg gogoproto.Message, sig []byte, xt *account_abstractionv1.TxExtension) []byte { + t.Helper() anyMsg, err := codectypes.NewAnyWithValue(msg) require.NoError(t, err) diff --git a/tests/integration/v2/auth/app_test.go b/tests/integration/v2/auth/app_test.go index 5c3c5a7e95cf..82a2c3b8631f 100644 --- a/tests/integration/v2/auth/app_test.go +++ b/tests/integration/v2/auth/app_test.go @@ -20,7 +20,6 @@ import ( _ "cosmossdk.io/x/consensus" // import as blank for app wiring _ "cosmossdk.io/x/staking" // import as blank for app wirings - "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/tests/integration/v2" "github.com/cosmos/cosmos-sdk/testutil/configurator" _ "github.com/cosmos/cosmos-sdk/x/auth" // import as blank for app wiring @@ -33,7 +32,6 @@ import ( type suite struct { app *integration.App - cdc codec.Codec ctx context.Context authKeeper authkeeper.AccountKeeper From 88c74c125924dd91c6e2ec37c12f254cf52f2e9f Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Mon, 2 Dec 2024 13:17:50 +0100 Subject: [PATCH 20/21] fix linting --- store/v2/migration/manager.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/store/v2/migration/manager.go b/store/v2/migration/manager.go index a0b191ac7369..5365e8eb6a11 100644 --- a/store/v2/migration/manager.go +++ b/store/v2/migration/manager.go @@ -19,8 +19,6 @@ import ( const ( // defaultChannelBufferSize is the default buffer size for the migration stream. defaultChannelBufferSize = 1024 - // defaultStorageBufferSize is the default buffer size for the storage snapshotter. - defaultStorageBufferSize = 1024 migrateChangesetKeyFmt = "m/cs_%x" // m/cs_ ) From e2a8fd5ecfed4bbed80f9d2554ad3e8ad482a120 Mon Sep 17 00:00:00 2001 From: marbar3778 Date: Tue, 3 Dec 2024 11:28:33 +0100 Subject: [PATCH 21/21] address comments --- store/v2/commitment/iavl/tree.go | 6 +++--- store/v2/commitment/store.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/store/v2/commitment/iavl/tree.go b/store/v2/commitment/iavl/tree.go index 5fbaf0d4d5eb..4aaac08ab8bf 100644 --- a/store/v2/commitment/iavl/tree.go +++ b/store/v2/commitment/iavl/tree.go @@ -83,7 +83,7 @@ func (t *IavlTree) Commit() ([]byte, uint64, error) { // GetProof returns a proof for the given key and version. func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) { - // the mutable tree is empty at genesis & when the key is removed, but the immutable tree is not + // the mutable tree is empty at genesis & when the storekey is removed, but the immutable tree is not but the immutable tree is not empty when the storekey is removed // by checking the latest version we can determine if we are in genesis or have a key that has been removed lv, err := t.tree.GetLatestVersion() if err != nil { @@ -103,7 +103,7 @@ func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, // Get implements the Reader interface. func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { - // the mutable tree is empty at genesis & when the key is removed, but the immutable tree is not + // the mutable tree is empty at genesis & when the storekey is removed, but the immutable tree is not but the immutable tree is not empty when the storekey is removed // by checking the latest version we can determine if we are in genesis or have a key that has been removed lv, err := t.tree.GetLatestVersion() if err != nil { @@ -123,7 +123,7 @@ func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { // Iterator implements the Reader interface. func (t *IavlTree) Iterator(version uint64, start, end []byte, ascending bool) (corestore.Iterator, error) { - // the mutable tree is empty at genesis & when the key is removed, but the immutable tree is not + // the mutable tree is empty at genesis & when the storekey is removed, but the immutable tree is not empty when the storekey is removed // by checking the latest version we can determine if we are in genesis or have a key that has been removed lv, err := t.tree.GetLatestVersion() if err != nil { diff --git a/store/v2/commitment/store.go b/store/v2/commitment/store.go index 3803a506bf63..aa383b57ae56 100644 --- a/store/v2/commitment/store.go +++ b/store/v2/commitment/store.go @@ -293,7 +293,7 @@ func (c *CommitStore) VersionExists(version uint64) (bool, error) { return false, err } if latestVersion == 0 { - return true, nil + return version == 0, nil } ci, err := c.metadata.GetCommitInfo(version)