Skip to content

Commit

Permalink
Add the cluster commands back
Browse files Browse the repository at this point in the history
  • Loading branch information
jacekwegr committed Nov 17, 2023
1 parent 995f495 commit 3b3d515
Show file tree
Hide file tree
Showing 7 changed files with 284 additions and 53 deletions.
230 changes: 224 additions & 6 deletions big_tests/tests/cluster_commands_SUITE.erl
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@
remove_node_from_cluster/2,
require_rpc_nodes/1,
rpc/4]).
-import(mongooseimctl_helper, [mongooseimctl/3, rpc_call/3]).
-import(domain_helper, [host_type/1]).

-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
Expand All @@ -35,20 +37,37 @@

all() ->
[{group, clustered},
{group, clustering_two}].
{group, clustering_two},
{group, clustering_three}].

groups() ->
[{clustered, [], [one_to_one_message]},
{clustering_two, [], clustering_two_tests()}].
{clustering_two, [], clustering_two_tests()},
{clustering_three, [], clustering_three_tests()}].

suite() ->
require_rpc_nodes([mim, mim2, mim3]) ++ escalus:suite().

clustering_two_tests() ->
[leave_using_rpc,
[join_successful_prompt,
join_successful_force,
leave_successful_prompt,
leave_successful_force,
join_unsuccessful,
leave_unsuccessful,
leave_but_no_cluster,
join_twice,
leave_using_rpc,
leave_twice,
join_twice_using_rpc,
join_twice_in_parallel_using_rpc].

clustering_three_tests() ->
[cluster_of_three,
leave_the_three,
%remove_dead_from_cluster, % TODO: Breaks cover
remove_alive_from_cluster].

%%--------------------------------------------------------------------
%% Init & teardown
%%--------------------------------------------------------------------
Expand All @@ -71,7 +90,7 @@ init_per_suite(Config) ->
end_per_suite(Config) ->
escalus:end_per_suite(Config).

init_per_group(Group, Config) when Group == clustered ->
init_per_group(Group, Config) when Group == clustered orelse Group == mnesia ->
Node2 = mim2(),
Config1 = add_node_to_cluster(Node2, Config),
case is_sm_distributed() of
Expand All @@ -96,7 +115,7 @@ init_per_group(Group, _Config) when Group == clustering_two orelse Group == clus
init_per_group(_GroupName, Config) ->
escalus:create_users(Config).

end_per_group(Group, Config) when Group == clustered ->
end_per_group(Group, Config) when Group == clustered orelse Group == mnesia ->
escalus:delete_users(Config, escalus:get_users([alice, clusterguy])),
Node2 = mim2(),
remove_node_from_cluster(Node2, Config);
Expand All @@ -111,7 +130,27 @@ end_per_group(_GroupName, Config) ->
init_per_testcase(CaseName, Config) ->
escalus:init_per_testcase(CaseName, Config).

end_per_testcase(CaseName, Config) when CaseName == join_twice_using_rpc
end_per_testcase(cluster_of_three, Config) ->
Node2 = mim2(),
Node3 = mim3(),
remove_node_from_cluster(Node2, Config),
remove_node_from_cluster(Node3, Config),
escalus:end_per_testcase(cluster_of_three, Config);

end_per_testcase(CaseName, Config) when CaseName == remove_alive_from_cluster
orelse CaseName == remove_dead_from_cluster->
Node3 = mim3(),
Node2 = mim2(),
remove_node_from_cluster(Node3, Config),
remove_node_from_cluster(Node2, Config),
escalus:end_per_testcase(CaseName, Config);

end_per_testcase(CaseName, Config) when CaseName == join_successful_prompt
orelse CaseName == join_successful_force
orelse CaseName == leave_unsuccessful_prompt
orelse CaseName == leave_unsuccessful_force
orelse CaseName == join_twice
orelse CaseName == join_twice_using_rpc
orelse CaseName == join_twice_in_parallel_using_rpc ->
Node2 = mim2(),
remove_node_from_cluster(Node2, Config),
Expand Down Expand Up @@ -148,6 +187,87 @@ one_to_one_message(ConfigIn) ->
%% Manage cluster commands tests
%%--------------------------------------------------------------------


join_successful_prompt(Config) ->
%% given
#{node := Node2} = RPCSpec2 = mim2(),
%% when
{_, OpCode} =
mongooseimctl_interactive("join_cluster", [atom_to_list(Node2)], "yes\n", Config),
%% then
distributed_helper:verify_result(RPCSpec2, add),
?eq(0, OpCode).

join_successful_force(Config) ->
%% given
#{node := Node2} = RPCSpec2 = mim2(),
%% when
{_, OpCode} = mongooseimctl_force("join_cluster", [atom_to_list(Node2)], "--force", Config),
%% then
distributed_helper:verify_result(RPCSpec2, add),
?eq(0, OpCode).

leave_successful_prompt(Config) ->
%% given
Node2 = mim2(),
add_node_to_cluster(Node2, Config),
%% when
{_, OpCode} = mongooseimctl_interactive("leave_cluster", [], "yes\n", Config),
%% then
distributed_helper:verify_result(Node2, remove),
?eq(0, OpCode).

leave_successful_force(Config) ->
%% given
Node2 = mim2(),
add_node_to_cluster(Node2, Config),
%% when
{_, OpCode} = mongooseimctl_force("leave_cluster", [], "-f", Config),
%% then
distributed_helper:verify_result(Node2, remove),
?eq(0, OpCode).

join_unsuccessful(Config) ->
%% given
Node2 = mim2(),
%% when
{_, OpCode} = mongooseimctl_interactive("join_cluster", [], "no\n", Config),
%% then
distributed_helper:verify_result(Node2, remove),
?ne(0, OpCode).

leave_unsuccessful(Config) ->
%% given
Node2 = mim(),
add_node_to_cluster(Node2, Config),
%% when
{_, OpCode} = mongooseimctl_interactive("leave_cluster", [], "no\n", Config),
%% then
distributed_helper:verify_result(Node2, add),
?ne(0, OpCode).

leave_but_no_cluster(Config) ->
%% given
Node2 = mim2(),
%% when
{_, OpCode} = mongooseimctl_interactive("leave_cluster", [], "yes\n", Config),
%% then
distributed_helper:verify_result(Node2, remove),
?ne(0, OpCode).

join_twice(Config) ->
%% given
#{node := Node2} = RPCSpec2 = mim2(),
%% when
{_, OpCode1} = mongooseimctl_interactive("join_cluster",
[atom_to_list(Node2)], "yes\n", Config),
{_, OpCode2} = mongooseimctl_interactive("join_cluster",
[atom_to_list(Node2)], "yes\n", Config),
%% then
distributed_helper:verify_result(RPCSpec2, add),
?eq(0, OpCode1),
?ne(0, OpCode2).

%% This function checks that it's ok to call mongoose_cluster:join/1 twice
join_twice_using_rpc(_Config) ->
%% given
Expand Down Expand Up @@ -194,6 +314,98 @@ leave_using_rpc(Config) ->
distributed_helper:verify_result(Node2, remove),
ok.

leave_twice(Config) ->
%% given
Node2 = mim2(),
add_node_to_cluster(Node2, Config),
%% when
{_, OpCode1} = mongooseimctl_force("leave_cluster", [], "--force", Config),
{_, OpCode2} = mongooseimctl_force("leave_cluster", [], "-f", Config),
%% then
distributed_helper:verify_result(Node2, remove),
?eq(0, OpCode1),
?ne(0, OpCode2).

cluster_of_three(Config) ->
%% given
#{node := ClusterMemberNodeName} = ClusterMember = mim(),
#{node := Node2Nodename} = Node2 = mim2(),
#{node := Node3Nodename} = Node3 = mim3(),
%% when
{_, OpCode1} = mongooseimctl_force(Node2Nodename, "join_cluster",
[atom_to_list(ClusterMemberNodeName)], "-f", Config),
{_, OpCode2} = mongooseimctl_force(Node3Nodename, "join_cluster",
[atom_to_list(ClusterMemberNodeName)], "-f", Config),
%% then
?eq(0, OpCode1),
?eq(0, OpCode2),
nodes_clustered(Node2, ClusterMember, true),
nodes_clustered(Node3, ClusterMember, true),
nodes_clustered(Node2, Node3, true).

leave_the_three(Config) ->
%% given
Timeout = timer:seconds(60),
#{node := ClusterMemberNode} = ClusterMember = mim(),
#{node := Node2Nodename} = Node2 = mim2(),
#{node := Node3Nodename} = Node3 = mim3(),
ok = rpc(Node2#{timeout => Timeout}, mongoose_cluster, join, [ClusterMemberNode]),
ok = rpc(Node3#{timeout => Timeout}, mongoose_cluster, join, [ClusterMemberNode]),
%% when
{_, OpCode1} = mongooseimctl_interactive(Node2Nodename, "leave_cluster", [], "yes\n", Config),
nodes_clustered(Node2, ClusterMember, false),
nodes_clustered(Node3, ClusterMember, true),
{_, OpCode2} = mongooseimctl_interactive(Node3Nodename, "leave_cluster", [], "yes\n", Config),
%% then
nodes_clustered(Node3, ClusterMember, false),
nodes_clustered(Node2, Node3, false),
?eq(0, OpCode1),
?eq(0, OpCode2).

remove_dead_from_cluster(Config) ->
% given
Timeout = timer:seconds(60),
#{node := Node1Nodename} = Node1 = mim(),
#{node := _Node2Nodename} = Node2 = mim2(),
#{node := Node3Nodename} = Node3 = mim3(),
ok = rpc(Node2#{timeout => Timeout}, mongoose_cluster, join, [Node1Nodename]),
ok = rpc(Node3#{timeout => Timeout}, mongoose_cluster, join, [Node1Nodename]),
%% when
distributed_helper:stop_node(Node3Nodename, Config),
{_, OpCode1} = mongooseimctl_interactive(Node1, "remove_from_cluster",
[atom_to_list(Node3Nodename)], "yes\n", Config),
%% then
?eq(0, OpCode1),
% node is down hence its not in mnesia cluster
have_node_in_mnesia(Node1, Node2, true),
have_node_in_mnesia(Node1, Node3, false),
have_node_in_mnesia(Node2, Node3, false),
% after node awakening nodes are clustered again
distributed_helper:start_node(Node3Nodename, Config),
have_node_in_mnesia(Node1, Node3, true),
have_node_in_mnesia(Node2, Node3, true).

remove_alive_from_cluster(Config) ->
% given
Timeout = timer:seconds(60),
#{node := Node1Name} = Node1 = mim(),
#{node := Node2Name} = Node2 = mim2(),
Node3 = mim3(),
ok = rpc(Node2#{timeout => Timeout}, mongoose_cluster, join, [Node1Name]),
ok = rpc(Node3#{timeout => Timeout}, mongoose_cluster, join, [Node1Name]),
%% when
%% Node2 is still running
{_, OpCode1} = mongooseimctl_force(Node1Name, "remove_from_cluster",
[atom_to_list(Node2Name)], "-f", Config),
%% then
?eq(0, OpCode1),
% node is down hence its not in mnesia cluster
have_node_in_mnesia(Node1, Node3, true),
have_node_in_mnesia(Node1, Node2, false),
have_node_in_mnesia(Node3, Node2, false).



%% Helpers
mongooseimctl_interactive(C, A, R, Config) ->
#{node := DefaultNode} = mim(),
Expand All @@ -210,6 +422,12 @@ normalize_args(Args) ->
Arg
end, Args).

mongooseimctl_force(Command, Args, ForceFlag, Config) ->
#{node := DefaultNode} = mim(),
mongooseimctl_force(DefaultNode, Command, Args, ForceFlag, Config).
mongooseimctl_force(Node, Cmd, Args, ForceFlag, Config) ->
mongooseimctl_helper:mongooseimctl(Node, Cmd, [ForceFlag | Args], Config).

ctl_path_atom(NodeName) ->
CtlString = atom_to_list(NodeName) ++ "_ctl",
list_to_atom(CtlString).
Expand Down
3 changes: 2 additions & 1 deletion big_tests/tests/graphql_account_SUITE.erl
Original file line number Diff line number Diff line change
Expand Up @@ -490,8 +490,9 @@ admin_import_users_http(Config) ->
<<"notAllowed">> => null},
get_ok_value([data, account, importUsers], Resp2)),
Domain = domain_helper:domain(),
JID = mongoose_helper:make_jid(<<"john">>, Domain),
mongoose_helper:wait_until(fun() ->
rpc(mim(), mongoose_account_api, check_account, [<<"john">>, Domain])
rpc(mim(), mongoose_account_api, check_account, [JID])
end,
{ok, io_lib:format("User ~s exists", [<<"john@", Domain/binary>>])},
#{time_left => timer:seconds(20),
Expand Down
2 changes: 1 addition & 1 deletion doc/migrations/6.1.0_6.2.0.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@ If you want to switch to CETS, see [`internal_databases`](../configuration/inter

# Transition to New CLI Commands

Legacy CLI commands previously marked as deprecated have now been removed. The users are encouraged to explore the new GraphQL-based CLI. It is recommended to transition to the new CLI commands **prior to the next system upgrade**. The configuration options `general.mongooseimctl_access_commands` and `services.service_admin_extra` related to the legacy CLI were also removed.
Legacy CLI commands previously marked as deprecated have now been removed. The users are encouraged to explore the new GraphQL-based CLI. It is recommended to transition to the new CLI commands **prior to the next system upgrade**. The configuration options `general.mongooseimctl_access_commands` and `services.service_admin_extra` related to the legacy CLI were also removed. **You need to remove them** from your configuration file unless you have already done so.
4 changes: 4 additions & 0 deletions rel/files/mongooseimctl
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,10 @@ help ()
echo " debug Attach an interactive Erlang shell to a running MongooseIM node"
echo " live Start MongooseIM node in live (interactive) mode"
echo " foreground Start MongooseIM node in foreground (non-interactive) mode"
echo "MongooseIM cluster management commands:"
echo " join_cluster other_node_name Add current node to cluster"
echo " leave_cluster Make the current node leave the cluster"
echo " remove_from_cluster other_node_name Remove dead node from the cluster"
echo "Extra Commands:"
echo " bootstrap Executes MongooseIM init scripts (used for initial configuration)"
echo " print_install_dir Prints path to MongooseIM release directory"
Expand Down
Loading

0 comments on commit 3b3d515

Please sign in to comment.