diff --git a/go.mod b/go.mod index 1088b5270..71e2c014c 100644 --- a/go.mod +++ b/go.mod @@ -49,10 +49,11 @@ require ( golang.org/x/sync v0.8.0 ) +require gopkg.in/yaml.v3 v3.0.1 // indirect + require ( github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/jackc/pgxlisten v0.0.0-20241005155529-9d952acd6a6c // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect ) require ( diff --git a/internal/storage/bucket/bucket.go b/internal/storage/bucket/bucket.go index 8db0c7dda..32545970a 100644 --- a/internal/storage/bucket/bucket.go +++ b/internal/storage/bucket/bucket.go @@ -13,7 +13,7 @@ import ( ) // stateless version (+1 regarding directory name, as migrations start from 1 in the lib) -const MinimalSchemaVersion = 12 +const MinimalSchemaVersion = 15 type Bucket struct { name string diff --git a/internal/storage/bucket/migrations.go b/internal/storage/bucket/migrations.go index cca904cd5..1bbfd067b 100644 --- a/internal/storage/bucket/migrations.go +++ b/internal/storage/bucket/migrations.go @@ -13,11 +13,11 @@ var MigrationsFS embed.FS func GetMigrator(db *bun.DB, name string) *migrations.Migrator { migrator := migrations.NewMigrator(db, migrations.WithSchema(name)) - migrations, err := migrations.CollectMigrations(MigrationsFS, name) + _migrations, err := migrations.CollectMigrations(MigrationsFS, name) if err != nil { panic(err) } - migrator.RegisterMigrations(migrations...) + migrator.RegisterMigrations(_migrations...) return migrator } @@ -27,4 +27,4 @@ func migrate(ctx context.Context, tracer trace.Tracer, db *bun.DB, name string) defer span.End() return GetMigrator(db, name).Up(ctx) -} +} \ No newline at end of file diff --git a/internal/storage/bucket/migrations/1-fix-trigger/up.sql b/internal/storage/bucket/migrations/1-fix-trigger/up.sql index 73866f58c..cbef57036 100644 --- a/internal/storage/bucket/migrations/1-fix-trigger/up.sql +++ b/internal/storage/bucket/migrations/1-fix-trigger/up.sql @@ -29,4 +29,4 @@ begin posting ->> 'destination', posting ->> 'asset', (posting ->> 'amount')::numeric, false, _destination_exists); end; -$$ set search_path from current; +$$ set search_path from current; \ No newline at end of file diff --git a/internal/storage/bucket/migrations/11-make-stateless/up.sql b/internal/storage/bucket/migrations/11-make-stateless/up.sql index 182ce53b4..251d1448a 100644 --- a/internal/storage/bucket/migrations/11-make-stateless/up.sql +++ b/internal/storage/bucket/migrations/11-make-stateless/up.sql @@ -200,8 +200,6 @@ execute procedure set_compat_on_transactions_metadata(); alter table transactions add column post_commit_volumes jsonb, --- todo: set in subsequent migration `default transaction_date()`, --- otherwise the function is called for every existing lines add column inserted_at timestamp without time zone, alter column timestamp set default transaction_date() -- todo: we should change the type of this column, but actually it cause a full lock of the table @@ -363,7 +361,6 @@ from (select row_number() over () as number, v.value select null) v) data $$ set search_path from current; --- todo(next-minor): remove that on future version when the table will have this default value (need to fill nulls before) create or replace function set_transaction_inserted_at() returns trigger security definer language plpgsql diff --git a/internal/storage/bucket/migrations/16-moves-fill-transaction-id/notes.yaml b/internal/storage/bucket/migrations/16-moves-fill-transaction-id/notes.yaml new file mode 100644 index 000000000..4e7ed8eef --- /dev/null +++ b/internal/storage/bucket/migrations/16-moves-fill-transaction-id/notes.yaml @@ -0,0 +1 @@ +name: Fill transaction ids of table moves diff --git a/internal/storage/bucket/migrations/16-moves-fill-transaction-id/up.sql b/internal/storage/bucket/migrations/16-moves-fill-transaction-id/up.sql new file mode 100644 index 000000000..5c486cf6e --- /dev/null +++ b/internal/storage/bucket/migrations/16-moves-fill-transaction-id/up.sql @@ -0,0 +1,44 @@ +do $$ + declare + _batch_size integer := 100; + _max integer; + begin + set search_path = '{{.Schema}}'; + + create index moves_transactions_id on moves(transactions_id); + + select count(seq) + from moves + where transactions_id is null + into _max; + + perform pg_notify('migrations-{{ .Schema }}', 'init: ' || _max); + loop + + with _outdated_moves as ( + select * + from moves + where transactions_id is null + limit _batch_size + ) + update moves + set transactions_id = ( + select id + from transactions + where seq = moves.transactions_seq + ) + from _outdated_moves + where moves.seq in (_outdated_moves.seq); + + exit when not found; + + perform pg_notify('migrations-{{ .Schema }}', 'continue: ' || _batch_size); + + commit ; + end loop; + + alter table moves + alter column transactions_id set not null; + end +$$ +language plpgsql; \ No newline at end of file diff --git a/internal/storage/bucket/migrations/16-moves-fill-transaction-id/up_tests_after.sql b/internal/storage/bucket/migrations/16-moves-fill-transaction-id/up_tests_after.sql new file mode 100644 index 000000000..e69de29bb diff --git a/internal/storage/bucket/migrations/16-moves-fill-transaction-id/up_tests_before.sql b/internal/storage/bucket/migrations/16-moves-fill-transaction-id/up_tests_before.sql new file mode 100644 index 000000000..e69de29bb diff --git a/internal/storage/bucket/migrations/17-transactions-fill-inserted-at/notes.yaml b/internal/storage/bucket/migrations/17-transactions-fill-inserted-at/notes.yaml new file mode 100644 index 000000000..69c43fb23 --- /dev/null +++ b/internal/storage/bucket/migrations/17-transactions-fill-inserted-at/notes.yaml @@ -0,0 +1 @@ +name: Fill inserted_at column of transactions table diff --git a/internal/storage/bucket/migrations/17-transactions-fill-inserted-at/up.sql b/internal/storage/bucket/migrations/17-transactions-fill-inserted-at/up.sql new file mode 100644 index 000000000..6adca135f --- /dev/null +++ b/internal/storage/bucket/migrations/17-transactions-fill-inserted-at/up.sql @@ -0,0 +1,59 @@ +do $$ + declare + _batch_size integer := 100; + _date timestamp without time zone; + _count integer := 0; + begin + --todo: take explicit advisory lock to avoid concurrent migrations when the service is killed + set search_path = '{{.Schema}}'; + + -- select the date where the "11-make-stateless" migration has been applied + select tstamp into _date + from _system.goose_db_version + where version_id = 12; + + create temporary table logs_transactions as + select id, ledger, date, (data->'transaction'->>'id')::bigint as transaction_id + from logs + where date <= _date; + + create index on logs_transactions (ledger, transaction_id) include (id, date); + + select count(*) into _count + from logs_transactions; + + perform pg_notify('migrations-{{ .Schema }}', 'init: ' || _count); + + for i in 0.._count by _batch_size loop + -- disable triggers + set session_replication_role = replica; + + with _rows as ( + select * + from logs_transactions + order by ledger, transaction_id + offset i + limit _batch_size + ) + update transactions + set inserted_at = _rows.date + from _rows + where transactions.ledger = _rows.ledger and transactions.id = _rows.transaction_id; + + -- enable triggers + set session_replication_role = default; + + commit; + + perform pg_notify('migrations-{{ .Schema }}', 'continue: ' || _batch_size); + end loop; + + drop table logs_transactions; + + alter table transactions + alter column inserted_at set default transaction_date(); + + drop trigger set_transaction_inserted_at on transactions; + drop function set_transaction_inserted_at; + end +$$; \ No newline at end of file diff --git a/internal/storage/bucket/migrations/17-transactions-fill-inserted-at/up_tests_after.sql b/internal/storage/bucket/migrations/17-transactions-fill-inserted-at/up_tests_after.sql new file mode 100644 index 000000000..e69de29bb diff --git a/internal/storage/bucket/migrations/17-transactions-fill-inserted-at/up_tests_before.sql b/internal/storage/bucket/migrations/17-transactions-fill-inserted-at/up_tests_before.sql new file mode 100644 index 000000000..e69de29bb diff --git a/internal/storage/bucket/migrations/18-transactions-fill-pcv/notes.yaml b/internal/storage/bucket/migrations/18-transactions-fill-pcv/notes.yaml new file mode 100644 index 000000000..4a8274783 --- /dev/null +++ b/internal/storage/bucket/migrations/18-transactions-fill-pcv/notes.yaml @@ -0,0 +1 @@ +name: Fill post_commit_volumes column of transactions table diff --git a/internal/storage/bucket/migrations/18-transactions-fill-pcv/up.sql b/internal/storage/bucket/migrations/18-transactions-fill-pcv/up.sql new file mode 100644 index 000000000..39dd9e9f4 --- /dev/null +++ b/internal/storage/bucket/migrations/18-transactions-fill-pcv/up.sql @@ -0,0 +1,62 @@ +do $$ + declare + _batch_size integer := 100; + _count integer; + begin + set search_path = '{{.Schema}}'; + + select count(id) + from transactions + where post_commit_volumes is null + into _count; + + perform pg_notify('migrations-{{ .Schema }}', 'init: ' || _count); + + loop + -- disable triggers + set session_replication_role = replica; + + with _outdated_transactions as ( + select id + from transactions + where post_commit_volumes is null + limit _batch_size + ) + update transactions + set post_commit_volumes = ( + select public.aggregate_objects(post_commit_volumes::jsonb) as post_commit_volumes + from ( + select accounts_address, json_build_object(accounts_address, post_commit_volumes) post_commit_volumes + from ( + select accounts_address, json_build_object(asset, post_commit_volumes) as post_commit_volumes + from ( + select distinct on (accounts_address, asset) + accounts_address, + asset, + first_value(post_commit_volumes) over ( + partition by accounts_address, asset + order by seq desc + ) as post_commit_volumes + from moves + where transactions_id = transactions.id and ledger = transactions.ledger + ) moves + ) values + ) values + ) + from _outdated_transactions + where transactions.id in (_outdated_transactions.id); + + -- enable triggers + set session_replication_role = default; + + exit when not found; + + commit; + + perform pg_notify('migrations-{{ .Schema }}', 'continue: ' || _batch_size); + end loop; + + alter table transactions + alter column post_commit_volumes set not null; + end +$$; \ No newline at end of file diff --git a/internal/storage/bucket/migrations/18-transactions-fill-pcv/up_tests_after.sql b/internal/storage/bucket/migrations/18-transactions-fill-pcv/up_tests_after.sql new file mode 100644 index 000000000..e69de29bb diff --git a/internal/storage/bucket/migrations/18-transactions-fill-pcv/up_tests_before.sql b/internal/storage/bucket/migrations/18-transactions-fill-pcv/up_tests_before.sql new file mode 100644 index 000000000..e69de29bb diff --git a/internal/storage/bucket/migrations/19-accounts-volumes-fill-history/notes.yaml b/internal/storage/bucket/migrations/19-accounts-volumes-fill-history/notes.yaml new file mode 100644 index 000000000..35624b619 --- /dev/null +++ b/internal/storage/bucket/migrations/19-accounts-volumes-fill-history/notes.yaml @@ -0,0 +1 @@ +name: Populate accounts_volumes table with historic data diff --git a/internal/storage/bucket/migrations/19-accounts-volumes-fill-history/up.sql b/internal/storage/bucket/migrations/19-accounts-volumes-fill-history/up.sql new file mode 100644 index 000000000..f77f2a0ec --- /dev/null +++ b/internal/storage/bucket/migrations/19-accounts-volumes-fill-history/up.sql @@ -0,0 +1,54 @@ +do $$ + declare + _count integer; + _batch_size integer := 100; + begin + set search_path = '{{.Schema}}'; + + create temporary table tmp_volumes as + select distinct on (ledger, accounts_address, asset) + ledger, + accounts_address, + asset, + first_value(post_commit_volumes) over ( + partition by ledger, accounts_address, asset + order by seq desc + ) as post_commit_volumes + from moves + where not exists( + select + from accounts_volumes + where ledger = moves.ledger + and asset = moves.asset + and accounts_address = moves.accounts_address + ); + + select count(*) + from tmp_volumes + into _count; + + perform pg_notify('migrations-{{ .Schema }}', 'init: ' || _count); + + raise info '_count: %', _count; + + for i in 0.._count by _batch_size loop + with _rows as ( + select * + from tmp_volumes + offset i + limit _batch_size + ) + insert into accounts_volumes (ledger, accounts_address, asset, input, output) + select ledger, accounts_address, asset, (post_commit_volumes).inputs, (post_commit_volumes).outputs + from _rows + on conflict do nothing; -- can be inserted by a concurrent transaction + + commit; + + perform pg_notify('migrations-{{ .Schema }}', 'continue: ' || _batch_size); + + end loop; + + drop table tmp_volumes; + end +$$; \ No newline at end of file diff --git a/internal/storage/bucket/migrations/19-accounts-volumes-fill-history/up_tests_after.sql b/internal/storage/bucket/migrations/19-accounts-volumes-fill-history/up_tests_after.sql new file mode 100644 index 000000000..e69de29bb diff --git a/internal/storage/bucket/migrations/19-accounts-volumes-fill-history/up_tests_before.sql b/internal/storage/bucket/migrations/19-accounts-volumes-fill-history/up_tests_before.sql new file mode 100644 index 000000000..e69de29bb diff --git a/internal/storage/bucket/migrations/2-fix-volumes-aggregation/up.sql b/internal/storage/bucket/migrations/2-fix-volumes-aggregation/up.sql index 9cdc09172..986b42575 100644 --- a/internal/storage/bucket/migrations/2-fix-volumes-aggregation/up.sql +++ b/internal/storage/bucket/migrations/2-fix-volumes-aggregation/up.sql @@ -22,5 +22,4 @@ with all_assets as (select v.v as asset ) m on true) select moves.asset, moves.post_commit_volumes from moves -$$ set search_path from current; - +$$ set search_path from current; \ No newline at end of file diff --git a/internal/storage/bucket/migrations/20-transactions-metadata-fill-transaction-id/notes.yaml b/internal/storage/bucket/migrations/20-transactions-metadata-fill-transaction-id/notes.yaml new file mode 100644 index 000000000..449dcfd17 --- /dev/null +++ b/internal/storage/bucket/migrations/20-transactions-metadata-fill-transaction-id/notes.yaml @@ -0,0 +1 @@ +name: Fill transactions_id column of transactions_metadata table diff --git a/internal/storage/bucket/migrations/20-transactions-metadata-fill-transaction-id/up.sql b/internal/storage/bucket/migrations/20-transactions-metadata-fill-transaction-id/up.sql new file mode 100644 index 000000000..7823fa915 --- /dev/null +++ b/internal/storage/bucket/migrations/20-transactions-metadata-fill-transaction-id/up.sql @@ -0,0 +1,44 @@ + +do $$ + declare + _batch_size integer := 100; + _count integer; + begin + set search_path = '{{.Schema}}'; + + select count(seq) + from transactions_metadata + where transactions_id is null + into _count; + + perform pg_notify('migrations-{{ .Schema }}', 'init: ' || _count); + + loop + with _outdated_transactions_metadata as ( + select seq + from transactions_metadata + where transactions_id is null + limit _batch_size + ) + update transactions_metadata + set transactions_id = ( + select id + from transactions + where transactions_metadata.transactions_seq = seq + ) + from _outdated_transactions_metadata + where transactions_metadata.seq in (_outdated_transactions_metadata.seq); + + exit when not found; + + commit; + + perform pg_notify('migrations-{{ .Schema }}', 'continue: ' || _batch_size); + + end loop; + + alter table transactions_metadata + alter column transactions_id set not null ; + end +$$; + diff --git a/internal/storage/bucket/migrations/20-transactions-metadata-fill-transaction-id/up_tests_after.sql b/internal/storage/bucket/migrations/20-transactions-metadata-fill-transaction-id/up_tests_after.sql new file mode 100644 index 000000000..e69de29bb diff --git a/internal/storage/bucket/migrations/20-transactions-metadata-fill-transaction-id/up_tests_before.sql b/internal/storage/bucket/migrations/20-transactions-metadata-fill-transaction-id/up_tests_before.sql new file mode 100644 index 000000000..e69de29bb diff --git a/internal/storage/bucket/migrations/21-accounts-metadata-fill-address/notes.yaml b/internal/storage/bucket/migrations/21-accounts-metadata-fill-address/notes.yaml new file mode 100644 index 000000000..f599539a8 --- /dev/null +++ b/internal/storage/bucket/migrations/21-accounts-metadata-fill-address/notes.yaml @@ -0,0 +1 @@ +name: Fill accounts_address column of accounts_metadata table diff --git a/internal/storage/bucket/migrations/21-accounts-metadata-fill-address/up.sql b/internal/storage/bucket/migrations/21-accounts-metadata-fill-address/up.sql new file mode 100644 index 000000000..752ef3cfd --- /dev/null +++ b/internal/storage/bucket/migrations/21-accounts-metadata-fill-address/up.sql @@ -0,0 +1,44 @@ + +do $$ + declare + _batch_size integer := 100; + _count integer; + begin + set search_path = '{{.Schema}}'; + + select count(seq) + from accounts_metadata + where accounts_address is null + into _count; + + perform pg_notify('migrations-{{ .Schema }}', 'init: ' || _count); + + loop + with _outdated_accounts_metadata as ( + select seq + from accounts_metadata + where accounts_address is null + limit _batch_size + ) + update accounts_metadata + set accounts_address = ( + select address + from accounts + where accounts_metadata.accounts_seq = seq + ) + from _outdated_accounts_metadata + where accounts_metadata.seq in (_outdated_accounts_metadata.seq); + + exit when not found; + + commit; + + perform pg_notify('migrations-{{ .Schema }}', 'continue: ' || _batch_size); + + end loop; + + alter table accounts_metadata + alter column accounts_address set not null ; + end +$$; + diff --git a/internal/storage/bucket/migrations/21-accounts-metadata-fill-address/up_tests_after.sql b/internal/storage/bucket/migrations/21-accounts-metadata-fill-address/up_tests_after.sql new file mode 100644 index 000000000..e69de29bb diff --git a/internal/storage/bucket/migrations/21-accounts-metadata-fill-address/up_tests_before.sql b/internal/storage/bucket/migrations/21-accounts-metadata-fill-address/up_tests_before.sql new file mode 100644 index 000000000..e69de29bb diff --git a/internal/storage/bucket/migrations/22-logs-fill-memento/notes.yaml b/internal/storage/bucket/migrations/22-logs-fill-memento/notes.yaml new file mode 100644 index 000000000..1f7fd9415 --- /dev/null +++ b/internal/storage/bucket/migrations/22-logs-fill-memento/notes.yaml @@ -0,0 +1 @@ +name: Fill memento column of logs table diff --git a/internal/storage/bucket/migrations/22-logs-fill-memento/up.sql b/internal/storage/bucket/migrations/22-logs-fill-memento/up.sql new file mode 100644 index 000000000..7923084b3 --- /dev/null +++ b/internal/storage/bucket/migrations/22-logs-fill-memento/up.sql @@ -0,0 +1,38 @@ +do $$ + declare + _batch_size integer := 100; + _count integer; + begin + set search_path = '{{.Schema}}'; + + select count(seq) + from logs + where memento is null + into _count; + + perform pg_notify('migrations-{{ .Schema }}', 'init: ' || _count); + + loop + with _outdated_logs as ( + select seq + from logs + where memento is null + limit _batch_size + ) + update logs + set memento = convert_to(data::varchar, 'LATIN1')::bytea + from _outdated_logs + where logs.seq in (_outdated_logs.seq); + + exit when not found; + + commit; + + perform pg_notify('migrations-{{ .Schema }}', 'continue: ' || _batch_size); + end loop; + + alter table logs + alter column memento set not null; + end +$$; + diff --git a/internal/storage/bucket/migrations/22-logs-fill-memento/up_tests_after.sql b/internal/storage/bucket/migrations/22-logs-fill-memento/up_tests_after.sql new file mode 100644 index 000000000..e69de29bb diff --git a/internal/storage/bucket/migrations/22-logs-fill-memento/up_tests_before.sql b/internal/storage/bucket/migrations/22-logs-fill-memento/up_tests_before.sql new file mode 100644 index 000000000..e69de29bb diff --git a/internal/storage/ledger/balances.go b/internal/storage/ledger/balances.go index 566e45d76..b5fafad91 100644 --- a/internal/storage/ledger/balances.go +++ b/internal/storage/ledger/balances.go @@ -154,7 +154,7 @@ func (s *Store) selectAccountWithAggregatedVolumes(date *time.Time, useInsertion TableExpr("(?) values", selectAccountWithAssetAndVolumes). Group("accounts_address"). Column("accounts_address"). - ColumnExpr("aggregate_objects(json_build_object(asset, json_build_object('input', (volumes).inputs, 'output', (volumes).outputs))::jsonb) as " + alias) + ColumnExpr("public.aggregate_objects(json_build_object(asset, json_build_object('input', (volumes).inputs, 'output', (volumes).outputs))::jsonb) as " + alias) } func (s *Store) SelectAggregatedBalances(date *time.Time, useInsertionDate bool, builder query.Builder) *bun.SelectQuery { diff --git a/internal/storage/ledger/transactions.go b/internal/storage/ledger/transactions.go index a8b522e7d..e08d573a7 100644 --- a/internal/storage/ledger/transactions.go +++ b/internal/storage/ledger/transactions.go @@ -401,11 +401,6 @@ func (s *Store) InsertTransaction(ctx context.Context, tx *ledger.Transaction) e if err.(postgres.ErrConstraintsFailed).GetConstraint() == "transactions_reference" { return nil, ledgercontroller.NewErrTransactionReferenceConflict(tx.Reference) } - case errors.Is(err, postgres.ErrRaisedException{}): - // todo(next-minor): remove this test - if err.(postgres.ErrRaisedException).GetMessage() == "duplicate reference" { - return nil, ledgercontroller.NewErrTransactionReferenceConflict(tx.Reference) - } default: return nil, err } diff --git a/internal/storage/ledger/transactions_test.go b/internal/storage/ledger/transactions_test.go index 6bb0312e3..469804f65 100644 --- a/internal/storage/ledger/transactions_test.go +++ b/internal/storage/ledger/transactions_test.go @@ -7,9 +7,6 @@ import ( "database/sql" "fmt" "github.com/alitto/pond" - "github.com/formancehq/ledger/internal/storage/bucket" - ledgerstore "github.com/formancehq/ledger/internal/storage/ledger" - "github.com/google/uuid" "math/big" "slices" "testing" @@ -606,84 +603,6 @@ func TestTransactionsInsert(t *testing.T) { require.Error(t, err) require.True(t, errors.Is(err, ledgercontroller.ErrTransactionReferenceConflict{})) }) - // todo(next-minor): remove this test - t.Run("check reference conflict with minimal store version", func(t *testing.T) { - t.Parallel() - - driver := newDriver(t) - ledgerName := uuid.NewString()[:8] - - l := ledger.MustNewWithDefault(ledgerName) - l.Bucket = ledgerName - - migrator := bucket.GetMigrator(driver.GetDB(), ledgerName) - for i := 0; i < bucket.MinimalSchemaVersion; i++ { - require.NoError(t, migrator.UpByOne(ctx)) - } - - b := bucket.New(driver.GetDB(), ledgerName) - err := b.AddLedger(ctx, l, driver.GetDB()) - require.NoError(t, err) - - store := ledgerstore.New(driver.GetDB(), b, l) - - const nbTry = 100 - - for i := 0; i < nbTry; i++ { - errChan := make(chan error, 2) - - // Create a simple tx - tx1 := ledger.Transaction{ - TransactionData: ledger.TransactionData{ - Timestamp: now, - Reference: fmt.Sprintf("foo:%d", i), - Postings: []ledger.Posting{ - ledger.NewPosting("world", "bank", "USD/2", big.NewInt(100)), - }, - }, - } - go func() { - errChan <- store.InsertTransaction(ctx, &tx1) - }() - - // Create another tx with the same reference - tx2 := ledger.Transaction{ - TransactionData: ledger.TransactionData{ - Timestamp: now, - Reference: fmt.Sprintf("foo:%d", i), - Postings: []ledger.Posting{ - ledger.NewPosting("world", "bank", "USD/2", big.NewInt(100)), - }, - }, - } - go func() { - errChan <- store.InsertTransaction(ctx, &tx2) - }() - - select { - case err1 := <-errChan: - if err1 != nil { - require.True(t, errors.Is(err1, ledgercontroller.ErrTransactionReferenceConflict{})) - select { - case err2 := <-errChan: - require.NoError(t, err2) - case <-time.After(time.Second): - require.Fail(t, "should have received an error") - } - } else { - select { - case err2 := <-errChan: - require.Error(t, err2) - require.True(t, errors.Is(err2, ledgercontroller.ErrTransactionReferenceConflict{})) - case <-time.After(time.Second): - require.Fail(t, "should have received an error") - } - } - case <-time.After(time.Second): - require.Fail(t, "should have received an error") - } - } - }) t.Run("check denormalization", func(t *testing.T) { t.Parallel() diff --git a/internal/storage/module.go b/internal/storage/module.go index 1897c97e6..7902459d6 100644 --- a/internal/storage/module.go +++ b/internal/storage/module.go @@ -6,6 +6,7 @@ import ( "github.com/formancehq/go-libs/v2/logging" "github.com/formancehq/ledger/internal/storage/driver" "go.uber.org/fx" + "time" ) func NewFXModule(autoUpgrade bool) fx.Option { @@ -44,6 +45,8 @@ func NewFXModule(autoUpgrade bool) fx.Option { return } logging.FromContext(ctx).Errorf("Upgrading buckets: %s", err) + // todo: make configurable + <-time.After(5 * time.Second) continue } return