Skip to content

Commit

Permalink
XX
Browse files Browse the repository at this point in the history
  • Loading branch information
chrzaszcz committed Apr 8, 2024
1 parent 4b69fce commit bca629b
Show file tree
Hide file tree
Showing 9 changed files with 97 additions and 66 deletions.
104 changes: 58 additions & 46 deletions big_tests/tests/cets_disco_SUITE.erl
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ file_cases() ->

rdbms_cases() ->
[rdbms_backend,
rdbms_backend_supports_cluster_change,
rdbms_backend_supports_auto_cleaning,
rdbms_backend_node_doesnt_remove_itself,
rdbms_backend_db_queries,
Expand Down Expand Up @@ -71,7 +72,7 @@ end_per_testcase(Name, Config) when Name == address_please_returns_ip;
Name == address_please_returns_ip_127_0_0_1_from_db ->
stop_cets_discovery(),
Config;
end_per_testcase(_CaseName, Config) ->
end_per_testcase(_CaseName, _Config) ->
unmock(mim()),
unmock(mim2()).

Expand All @@ -91,21 +92,33 @@ rdbms_backend(_Config) ->
Opts1 = #{cluster_name => CN, node_name_to_insert => <<"test1">>},
Opts2 = #{cluster_name => CN, node_name_to_insert => <<"test2">>},

State1 = disco_init(mim(), Opts1),
{{ok, Nodes1_2}, State1_2} = disco_get_nodes(mim(), State1),
?assertMatch(#{last_query_info := #{already_registered := false}}, State1_2),
?assertEqual([], Nodes1_2),
init_and_get_nodes(mim(), Opts1, []),

%% "test2" node can see "test1" on initial registration
State2 = disco_init(mim2(), Opts2),
{{ok, Nodes2_2}, State2_2} = disco_get_nodes(mim2(), State2),
?assertMatch(#{last_query_info := #{already_registered := false}}, State2_2),
?assertEqual([test1], Nodes2_2),
State2 = init_and_get_nodes(mim2(), Opts2, [test1]),

%% "test2" node can see "test1" on update
{{ok, Nodes2_3}, State2_3} = disco_get_nodes(mim2(), State2_2),
?assertEqual(lists:sort([test1, test2]), lists:sort(Nodes2_3)),
?assertMatch(#{last_query_info := #{already_registered := true}}, State2_3).
get_nodes(mim2(), State2, [test1, test2]).

rdbms_backend_supports_cluster_change(_Config) ->
CN1 = random_cluster_name(?FUNCTION_NAME),
CN2 = <<CN1/binary, "_new">>,
Opts1 = #{cluster_name => CN1, node_name_to_insert => <<"test1">>},
Opts2 = #{cluster_name => CN1, node_name_to_insert => <<"test2">>},

%% Nodes test1 and test2 are in CN1, and they become connected
State1 = init_and_get_nodes(mim(), Opts1, []),
State2 = init_and_get_nodes(mim2(), Opts2, [test1]),
get_nodes(mim(), State1, [test1, test2]),

%% Node test1 moves to CN2, and the nodes are disconnected
NewState1 = init_and_get_nodes(mim(), Opts1#{cluster_name := CN2}, []),
get_nodes(mim2(), State2, [test2]),
NewState1A = get_nodes(mim(), NewState1, [test1]),

%% Node test2 moves to CN, and the nodes are connected again
init_and_get_nodes(mim2(), Opts2#{cluster_name := CN2}, [test1]),
get_nodes(mim(), NewState1A, [test1, test2]).

rdbms_backend_supports_auto_cleaning(_Config) ->
Timestamp = month_ago(),
Expand All @@ -115,24 +128,17 @@ rdbms_backend_supports_auto_cleaning(_Config) ->
Opts2 = #{cluster_name => CN, node_name_to_insert => <<"test2">>},

%% test1 row is written with an old (mocked) timestamp
State1 = disco_init(mim(), Opts1),
{{ok, Nodes1_2}, State1_2} = disco_get_nodes(mim(), State1),
{{ok, Nodes1_3}, State1_3} = disco_get_nodes(mim(), State1_2),
?assertEqual([], Nodes1_2),
?assertEqual([test1], Nodes1_3),
?assertMatch(#{last_query_info := #{timestamp := Timestamp}}, State1_2),
?assertMatch(#{last_query_info := #{timestamp := Timestamp}}, State1_3),
State1 = init_and_get_nodes(mim(), Opts1, []),
?assertMatch(#{last_query_info := #{timestamp := Timestamp}}, State1),
State1A = get_nodes(mim(), State1, [test1]),
?assertMatch(#{last_query_info := #{timestamp := Timestamp}}, State1A),

%% test2 would clean test1 registration
%% We don't mock on mim2 node, so timestamps would differ
State2 = disco_init(mim2(), Opts2),
{{ok, Nodes2_2}, State2_2} = disco_get_nodes(mim2(), State2),
?assertEqual([], Nodes2_2),
?assertMatch(#{last_query_info := #{run_cleaning_result := {removed, [<<"test1">>]}}},
State2_2),
{{ok, Nodes2_3}, State2_3} = disco_get_nodes(mim2(), State2),
?assertEqual([test2], Nodes2_3),
#{last_query_info := #{last_rows := SelectedRows}} = State2_3,
State2 = init_and_get_nodes(mim2(), Opts2, []),
?assertMatch(#{last_query_info := #{run_cleaning_result := {removed, [<<"test1">>]}}}, State2),
State2A = get_nodes(mim2(), State2, [test2]),
#{last_query_info := #{last_rows := SelectedRows}} = State2A,
?assertMatch(1, length(SelectedRows)).

rdbms_backend_node_doesnt_remove_itself(_Config) ->
Expand All @@ -143,31 +149,24 @@ rdbms_backend_node_doesnt_remove_itself(_Config) ->
Opts2 = #{cluster_name => CN, node_name_to_insert => <<"test2">>},

%% test1 row is written with an old (mocked) timestamp
State1 = disco_init(mim(), Opts1),
{{ok, Nodes1_2}, State1_2} = disco_get_nodes(mim(), State1),
?assertEqual([], Nodes1_2),
?assertMatch(#{last_query_info := #{timestamp := Timestamp}}, State1_2),
State1 = init_and_get_nodes(mim(), Opts1, []),
?assertMatch(#{last_query_info := #{timestamp := Timestamp}}, State1),

unmock_timestamp(mim()),
%% test1 row is not removed and timestamp is updated
{{ok, Nodes1_3}, State1_3} = disco_get_nodes(mim(), State1_2),
?assertNotMatch(#{last_query_info := #{timestamp := Timestamp}}, State1_3),
?assertMatch(#{last_query_info := #{run_cleaning_result := {removed, []}}},
State1_3),
?assertEqual([test1], Nodes1_3),
State1A = get_nodes(mim(), State1, [test1]),
?assertNotMatch(#{last_query_info := #{timestamp := Timestamp}}, State1A),
?assertMatch(#{last_query_info := #{run_cleaning_result := {removed, []}}}, State1A),

State2 = disco_init(mim2(), Opts2),
{{ok, Nodes2_2}, State2_2} = disco_get_nodes(mim2(), State2),
?assertEqual([test1], Nodes2_2),
?assertMatch(#{last_query_info := #{run_cleaning_result := {removed, []}}},
State2_2).
State2 = init_and_get_nodes(mim2(), Opts2, [test1]),
?assertMatch(#{last_query_info := #{run_cleaning_result := {removed, []}}}, State2).

rdbms_backend_db_queries(_Config) ->
CN = random_cluster_name(?FUNCTION_NAME),
TS = rpc(mim(), mongoose_rdbms_timestamp, select, []),
TS2 = TS + 100,

%% Make sure "test1" and "test2" are not in the table
%% make sure "test1" and "test2" are not in the table
?assertEqual({updated, 1}, delete_node_from_db(<<"test1">>)),
?assertEqual({updated, 1}, delete_node_from_db(<<"test2">>)),

Expand All @@ -185,7 +184,7 @@ rdbms_backend_db_queries(_Config) ->
{selected, SelectedNodes1} = select(CN),
?assertEqual(lists:sort([{<<"test1">>, 1, <<>>, TS}, {<<"test2">>, 2, <<>>, TS}]),
lists:sort(SelectedNodes1)),
?assertEqual({updated, 1}, update_existing(CN, <<"test1">>, <<>>, TS2)),
?assertEqual({updated, 1}, update_existing(<<"test1">>, <<>>, TS2)),
{selected, SelectedNodes2} = select(CN),
?assertEqual(lists:sort([{<<"test1">>, 1, <<>>, TS2}, {<<"test2">>, 2, <<>>, TS}]),
lists:sort(SelectedNodes2)),
Expand Down Expand Up @@ -256,6 +255,19 @@ address_please_returns_ip_127_0_0_1_from_db(Config) ->
%% Helpers
%%--------------------------------------------------------------------

init_and_get_nodes(RPCNode, Opts, ExpectedNodes) ->
StateIn = disco_init(RPCNode, Opts),
get_nodes(RPCNode, StateIn, ExpectedNodes, false).

get_nodes(RPCNode, StateIn, ExpectedNodes) ->
get_nodes(RPCNode, StateIn, ExpectedNodes, true).

get_nodes(RPCNode, StateIn, ExpectedNodes, AlreadyRegistered) ->
{{ok, Nodes}, State} = disco_get_nodes(RPCNode, StateIn),
?assertEqual(lists:sort(ExpectedNodes), lists:sort(Nodes)),
?assertMatch(#{last_query_info := #{already_registered := AlreadyRegistered}}, State),
State.

disco_init(Node, Opts) ->
State = rpc(Node, mongoose_cets_discovery_rdbms, init, [Opts]),
log_disco_request(?FUNCTION_NAME, Node, Opts, State),
Expand Down Expand Up @@ -318,9 +330,9 @@ select(CN) ->
ct:log("select(~p) = ~p", [CN, Ret]),
Ret.

update_existing(CN, BinNode, Address, TS) ->
Ret = rpc(mim(), mongoose_cets_discovery_rdbms, update_existing, [CN, BinNode, Address, TS]),
ct:log("select(~p, ~p, ~p, ~p) = ~p", [CN, BinNode, Address, TS, Ret]),
update_existing(BinNode, Address, TS) ->
Ret = rpc(mim(), mongoose_cets_discovery_rdbms, update_existing, [BinNode, Address, TS]),
ct:log("select(~p, ~p, ~p) = ~p", [BinNode, Address, TS, Ret]),
Ret.

delete_node_from_db(BinNode) ->
Expand Down
3 changes: 1 addition & 2 deletions big_tests/tests/graphql_cets_SUITE.erl
Original file line number Diff line number Diff line change
Expand Up @@ -251,9 +251,8 @@ register_bad_node() ->
{updated, 1} = rpc(mim(), mongoose_cets_discovery_rdbms, insert_new, InsertArgs).

ensure_bad_node_unregistered() ->
ClusterName = <<"mim">>,
Node = <<"badnode@localhost">>,
DeleteArgs = [ClusterName, Node],
DeleteArgs = [Node],
%% Ensure the node is removed
{updated, _} = rpc(mim(), mongoose_cets_discovery_rdbms, delete_node_from_db, DeleteArgs).

Expand Down
24 changes: 21 additions & 3 deletions priv/migrations/mssql_6.2.0_x.x.x.sql
Original file line number Diff line number Diff line change
@@ -1,15 +1,33 @@
-- Update roster schema
DROP INDEX i_rosteru_server_user_jid ON rosterusers;
DROP INDEX i_rosteru_server_user ON rosterusers;
DROP INDEX i_rosteru_jid ON rosterusers;
ALTER TABLE rosterusers
DROP CONSTRAINT rosterusers$i_rosteru_server_user_jid;
ALTER TABLE rosterusers
ADD CONSTRAINT PK_rosterusers PRIMARY KEY CLUSTERED (server ASC, username ASC, jid ASC);

DROP INDEX i_rosteru_jid ON rostergroups;
DROP INDEX i_rosterg_server_user_jid ON rostergroups;
ALTER TABLE rostergroups
ALTER COLUMN grp VARCHAR(250) NOT NULL;
ALTER TABLE rostergroups
ALTER COLUMN grp VARCHAR(250),
ADD CONSTRAINT PK_rostergroups PRIMARY KEY CLUSTERED (server ASC, username ASC, jid ASC, grp ASC);

-- Store information whether the message is of type "groupchat" in the user's archive
ALTER TABLE mam_message
ADD is_groupchat smallint NOT NULL DEFAULT 0;

-- Create table for mod_caps
CREATE TABLE caps (
node varchar(250) NOT NULL,
sub_node varchar(250) NOT NULL,
features text NOT NULL,
PRIMARY KEY (node, sub_node)
);

-- Delete PK constraint before replacing it with a new one
DECLARE @pk VARCHAR(max) = (SELECT CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS
WHERE TABLE_NAME='discovery_nodes' AND CONSTRAINT_TYPE='PRIMARY KEY');
EXEC('ALTER TABLE discovery_nodes DROP CONSTRAINT ' + @pk);

-- In case of duplicates, you need to remove stale rows manually or wait for cleanup
ALTER TABLE discovery_nodes ADD PRIMARY KEY (node_name);
4 changes: 4 additions & 0 deletions priv/migrations/mysql_6.2.0_x.x.x.sql
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,7 @@ CREATE TABLE caps (
features text NOT NULL,
PRIMARY KEY (node, sub_node)
);

-- In case of duplicates, you need to remove stale rows manually or wait for cleanup
ALTER TABLE discovery_nodes
DROP PRIMARY KEY, ADD PRIMARY KEY (node_name);
7 changes: 3 additions & 4 deletions priv/migrations/pgsql_6.2.0_x.x.x.sql
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ CREATE TABLE caps (
PRIMARY KEY (node, sub_node)
);

ALTER TABLE discovery_nodes DROP CONSTRAINT discovery_nodes_pkey;
-- In case of duplicates, remove stale rows manually or wait for cleanup
ALTER TABLE discovery_nodes ADD PRIMARY KEY (node_name);
CREATE UNIQUE INDEX i_discovery_nodes_node_name ON discovery_nodes USING BTREE(cluster_name, node_name);
-- In case of duplicates, you need to remove stale rows manually or wait for cleanup
ALTER TABLE discovery_nodes
DROP CONSTRAINT discovery_nodes_pkey, ADD PRIMARY KEY (node_name);
2 changes: 1 addition & 1 deletion priv/mssql2012.sql
Original file line number Diff line number Diff line change
Expand Up @@ -758,7 +758,7 @@ CREATE TABLE discovery_nodes (
node_num INT NOT NULL,
address varchar(250) NOT NULL DEFAULT '', -- empty means we should ask DNS
updated_timestamp BIGINT NOT NULL, -- in seconds
PRIMARY KEY (cluster_name, node_name)
PRIMARY KEY (node_name)
);
CREATE UNIQUE INDEX i_discovery_nodes_node_num ON discovery_nodes(cluster_name, node_num);

Expand Down
2 changes: 1 addition & 1 deletion priv/mysql.sql
Original file line number Diff line number Diff line change
Expand Up @@ -547,7 +547,7 @@ CREATE TABLE discovery_nodes (
node_num INT UNSIGNED NOT NULL,
address varchar(250) NOT NULL DEFAULT '', -- empty means we should ask DNS
updated_timestamp BIGINT NOT NULL, -- in seconds
PRIMARY KEY (cluster_name, node_name)
PRIMARY KEY (node_name)
);
CREATE UNIQUE INDEX i_discovery_nodes_node_num USING BTREE ON discovery_nodes(cluster_name, node_num);

Expand Down
1 change: 0 additions & 1 deletion priv/pg.sql
Original file line number Diff line number Diff line change
Expand Up @@ -492,7 +492,6 @@ CREATE TABLE discovery_nodes (
PRIMARY KEY (node_name)
);
CREATE UNIQUE INDEX i_discovery_nodes_node_num ON discovery_nodes USING BTREE(cluster_name, node_num);
CREATE UNIQUE INDEX i_discovery_nodes_node_name ON discovery_nodes USING BTREE(cluster_name, node_name);

CREATE TABLE caps (
node varchar(250) NOT NULL,
Expand Down
16 changes: 8 additions & 8 deletions src/mongoose_cets_discovery_rdbms.erl
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
-export([init/1, get_nodes/1]).

%% these functions are exported for testing purposes only.
-export([select/1, insert_new/5, update_existing/4, delete_node_from_db/1]).
-ignore_xref([select/1, insert_new/5, update_existing/4, delete_node_from_db/1]).
-export([select/1, insert_new/5, update_existing/3, delete_node_from_db/1]).
-ignore_xref([select/1, insert_new/5, update_existing/3, delete_node_from_db/1]).

-include("mongoose_logger.hrl").

Expand Down Expand Up @@ -73,7 +73,7 @@ try_register(ClusterName, Node, State = #{node_ip_binary := Address})
NodeNum =
case AlreadyRegistered of
true ->
update_existing(ClusterName, Node, Address, Timestamp),
update_existing(Node, Address, Timestamp),

Check warning on line 76 in src/mongoose_cets_discovery_rdbms.erl

View check run for this annotation

Codecov / codecov/patch

src/mongoose_cets_discovery_rdbms.erl#L76

Added line #L76 was not covered by tests
{value, {_, Num, _Addr, _TS}} = lists:keysearch(Node, 1, Rows),
Num;
false ->
Expand Down Expand Up @@ -124,9 +124,9 @@ prepare() ->
[cluster_name, node_name, node_num, address, updated_timestamp],
insert_new()),
mongoose_rdbms:prepare(cets_disco_update_existing, T,
[updated_timestamp, address, cluster_name, node_name], update_existing()),
[updated_timestamp, address, node_name], update_existing()),
mongoose_rdbms:prepare(cets_delete_node_from_db, T,
[cluster_name, node_name], delete_node_from_db()).
[node_name], delete_node_from_db()).

select() ->
<<"SELECT node_name, node_num, address, updated_timestamp FROM discovery_nodes WHERE cluster_name = ?">>.
Expand All @@ -143,10 +143,10 @@ insert_new(ClusterName, NodeName, NodeNum, Address, UpdatedTimestamp) ->
[ClusterName, NodeName, NodeNum, Address, UpdatedTimestamp]).

update_existing() ->
<<"UPDATE discovery_nodes SET updated_timestamp = ?, address = ? WHERE cluster_name = ? AND node_name = ?">>.
<<"UPDATE discovery_nodes SET updated_timestamp = ?, address = ? WHERE node_name = ?">>.

Check warning on line 146 in src/mongoose_cets_discovery_rdbms.erl

View check run for this annotation

Codecov / codecov/patch

src/mongoose_cets_discovery_rdbms.erl#L146

Added line #L146 was not covered by tests

update_existing(ClusterName, NodeName, Address, UpdatedTimestamp) ->
mongoose_rdbms:execute(global, cets_disco_update_existing, [UpdatedTimestamp, Address, ClusterName, NodeName]).
update_existing(NodeName, Address, UpdatedTimestamp) ->
mongoose_rdbms:execute(global, cets_disco_update_existing, [UpdatedTimestamp, Address, NodeName]).

Check warning on line 149 in src/mongoose_cets_discovery_rdbms.erl

View check run for this annotation

Codecov / codecov/patch

src/mongoose_cets_discovery_rdbms.erl#L149

Added line #L149 was not covered by tests

delete_node_from_db() ->
<<"DELETE FROM discovery_nodes WHERE node_name = ?">>.

Check warning on line 152 in src/mongoose_cets_discovery_rdbms.erl

View check run for this annotation

Codecov / codecov/patch

src/mongoose_cets_discovery_rdbms.erl#L152

Added line #L152 was not covered by tests
Expand Down

0 comments on commit bca629b

Please sign in to comment.