diff --git a/big_tests/tests/ejabberd_node_utils.erl b/big_tests/tests/ejabberd_node_utils.erl index 60cd4b8bc8..4980cb9679 100644 --- a/big_tests/tests/ejabberd_node_utils.erl +++ b/big_tests/tests/ejabberd_node_utils.erl @@ -19,6 +19,7 @@ -export([init/1, init/2, node_cwd/2, restart_application/1, restart_application/2, + ensure_started_application/1, ensure_started_application/2, call_fun/3, call_fun/4, call_ctl/2, call_ctl/3, call_ctl_with_args/3, @@ -26,6 +27,7 @@ backup_config_file/1, backup_config_file/2, restore_config_file/1, restore_config_file/2, modify_config_file/2, modify_config_file/4, + replace_config_file/1, replace_config_file/2, get_cwd/2]). -include_lib("common_test/include/ct.hrl"). @@ -73,9 +75,18 @@ restart_application(ApplicationName) -> -spec restart_application(node(), atom()) -> ok. restart_application(Node, ApplicationName) -> - ok = ejabberd_node_utils:call_fun(Node, application, stop, [ApplicationName]), - ok = ejabberd_node_utils:call_fun(Node, application, start, [ApplicationName]). + ok = call_fun(Node, application, stop, [ApplicationName]), + ok = call_fun(Node, application, start, [ApplicationName]). +-spec ensure_started_application(atom()) -> ok. +ensure_started_application(ApplicationName) -> + Node = distributed_helper:mim(), + ensure_started_application(Node#{timeout => timer:seconds(30)}, ApplicationName). + +-spec ensure_started_application(node(), atom()) -> ok. +ensure_started_application(Node, ApplicationName) -> + call_fun(Node, application, stop, [ApplicationName]), + ok = call_fun(Node, application, start, [ApplicationName]). -spec backup_config_file(ct_config()) -> ct_config(). backup_config_file(Config) -> @@ -166,7 +177,17 @@ modify_config_file(Host, VarsToChange, Config, Format) -> RPCSpec = distributed_helper:Host(), NewCfgPath = update_config_path(RPCSpec, Format), - ok = ejabberd_node_utils:call_fun(RPCSpec, file, write_file, [NewCfgPath, TemplatedConfig]). + ok = call_fun(RPCSpec, file, write_file, [NewCfgPath, TemplatedConfig]). + +-spec replace_config_file(binary()) -> ok. +replace_config_file(TomlContent) -> + Node = distributed_helper:mim(), + replace_config_file(Node, TomlContent). + +-spec replace_config_file(distributed_helper:node_spec(), binary()) -> ok. +replace_config_file(RPCSpec, TomlContent) -> + NewCfgPath = update_config_path(RPCSpec, toml), + ok = call_fun(RPCSpec, file, write_file, [NewCfgPath, TomlContent]). read_vars(File) -> {ok, Terms} = file:consult(File), @@ -204,10 +225,10 @@ update_config_path(RPCSpec, Format) -> end. get_config_path(RPCSpec) -> - ejabberd_node_utils:call_fun(RPCSpec, os, getenv, ["EJABBERD_CONFIG_PATH"]). + call_fun(RPCSpec, os, getenv, ["EJABBERD_CONFIG_PATH"]). set_config_path(RPCSpec, Path) -> - ejabberd_node_utils:call_fun(RPCSpec, os, putenv, ["EJABBERD_CONFIG_PATH", Path]). + call_fun(RPCSpec, os, putenv, ["EJABBERD_CONFIG_PATH", Path]). vars_file(toml) -> "vars-toml.config". diff --git a/big_tests/tests/persistent_cluster_id_SUITE.erl b/big_tests/tests/persistent_cluster_id_SUITE.erl index 0478deb79a..d3010205ac 100644 --- a/big_tests/tests/persistent_cluster_id_SUITE.erl +++ b/big_tests/tests/persistent_cluster_id_SUITE.erl @@ -16,6 +16,7 @@ %% test cases -export([ + can_start_with_cluster_id_in_cets_only/1, all_nodes_in_the_cluster_have_the_same_cluster_id/1, id_persists_after_restart/1, same_cluster_id_in_backend_and_mnesia/1, @@ -30,6 +31,7 @@ all() -> [ + {group, cets}, {group, mnesia}, {group, rdbms} ]. @@ -46,6 +48,7 @@ tests() -> groups() -> [ + {cets, [], [can_start_with_cluster_id_in_cets_only]}, {mnesia, [], [all_nodes_in_the_cluster_have_the_same_cluster_id]}, {rdbms, [], tests()} ]. @@ -65,15 +68,21 @@ end_per_suite(_Config) -> group(_Groupname) -> []. -init_per_group(mnesia, Config) -> - case not mongoose_helper:is_rdbms_enabled(host_type()) of +init_per_group(rdbms, Config) -> + case mongoose_helper:is_rdbms_enabled(host_type()) of true -> Config; + false -> {skip, require_rdbms} + end; +init_per_group(_, Config) -> + case not mongoose_helper:is_rdbms_enabled(host_type()) of + true -> + Config; false -> {skip, require_no_rdbms} end; -init_per_group(_Groupname, Config) -> - case mongoose_helper:is_rdbms_enabled(host_type()) of +init_per_group(mnesia, Config) -> + case not mongoose_helper:is_rdbms_enabled(host_type()) of true -> Config; - false -> {skip, require_rdbms} + false -> {skip, require_no_rdbms} end. end_per_group(_Groupname, _Config) -> @@ -82,12 +91,19 @@ end_per_group(_Groupname, _Config) -> %%%=================================================================== %%% Testcase specific setup/teardown %%%=================================================================== +init_per_testcase(can_start_with_cluster_id_in_cets_only, Config) -> + Config1 = ejabberd_node_utils:init(Config), + ejabberd_node_utils:backup_config_file(Config1), + Config1; init_per_testcase(all_nodes_in_the_cluster_have_the_same_cluster_id, Config) -> distributed_helper:add_node_to_cluster(mim2(), Config), Config; init_per_testcase(_TestCase, Config) -> Config. +end_per_testcase(can_start_with_cluster_id_in_cets_only, Config) -> + ejabberd_node_utils:restore_config_file(Config), + ejabberd_node_utils:ensure_started_application(mongooseim); end_per_testcase(all_nodes_in_the_cluster_have_the_same_cluster_id, Config) -> distributed_helper:remove_node_from_cluster(mim2(), Config), Config; @@ -97,6 +113,20 @@ end_per_testcase(_TestCase, _Config) -> %%%=================================================================== %%% Individual Test Cases (from groups() definition) %%%=================================================================== + +can_start_with_cluster_id_in_cets_only(_Config) -> + Toml = "[general] + hosts = [\"example.com\"] + default_server_domain = \"example.com\" + sm_backend = \"cets\" + s2s_backend = \"cets\" + component_backend = \"cets\" + [internal_databases.cets] + backend = \"file\" + node_list_file = \"etc/cets_disco.txt\"", + ejabberd_node_utils:replace_config_file(Toml), + ejabberd_node_utils:restart_application(mongooseim). + all_nodes_in_the_cluster_have_the_same_cluster_id(_Config) -> {ok, ID_mim1} = mongoose_helper:successful_rpc( mim(), mongoose_cluster_id, get_cached_cluster_id, []), diff --git a/doc/authentication-methods/dummy.md b/doc/authentication-methods/dummy.md index 61b2a4207c..ae838e2a8f 100644 --- a/doc/authentication-methods/dummy.md +++ b/doc/authentication-methods/dummy.md @@ -28,6 +28,6 @@ where `Base` is `base_time` and `Variance` is `variance`, as configured below. ```toml [auth.dummy] - base = 5 + base_time = 5 variance = 10 ``` diff --git a/rel/files/mongooseim b/rel/files/mongooseim index a3da0ec141..cf6659e40d 100755 --- a/rel/files/mongooseim +++ b/rel/files/mongooseim @@ -18,7 +18,7 @@ EJABBERD_STATUS_PATH="{{mongooseim_status_dir}}/status" export EJABBERD_STATUS_PATH="$EJABBERD_STATUS_PATH" EJABBERD_SO_PATH=`ls -dt "$RUNNER_BASE_DIR"/lib/mongooseim-*/priv/lib | head -1` -EJABBERD_CONFIG_PATH="$RUNNER_ETC_DIR"/mongooseim.${MONGOOSEIM_CONFIG_FORMAT:-toml} +EJABBERD_CONFIG_PATH=${EJABBERD_CONFIG_PATH:-$RUNNER_ETC_DIR/mongooseim.${MONGOOSEIM_CONFIG_FORMAT:-toml}} export EJABBERD_SO_PATH export EJABBERD_CONFIG_PATH diff --git a/src/mongoose_cluster_id.erl b/src/mongoose_cluster_id.erl index 82d7399720..bf523aaad3 100644 --- a/src/mongoose_cluster_id.erl +++ b/src/mongoose_cluster_id.erl @@ -2,23 +2,18 @@ -include("mongoose.hrl"). --export([ - start/0, - get_cached_cluster_id/0, - get_backend_cluster_id/0 - ]). +-export([start/0, get_cached_cluster_id/0]). % For testing purposes only --export([clean_table/0, clean_cache/0]). +-export([clean_table/0, clean_cache/0, get_backend_cluster_id/0]). -ignore_xref([clean_table/0, clean_cache/0, get_backend_cluster_id/0]). -record(mongoose_cluster_id, {key :: atom(), value :: cluster_id()}). -type cluster_id() :: binary(). -type maybe_cluster_id() :: {ok, cluster_id()} | {error, any()}. --type mongoose_backend() :: rdbms - | mnesia - | cets. +-type persistent_backend() :: rdbms | {error, none}. +-type volatile_backend() :: mnesia | cets. -spec start() -> maybe_cluster_id(). start() -> @@ -26,23 +21,23 @@ start() -> %% Currently, we have to do an SQL query each time we restart MongooseIM %% application in the tests. init_cache(), - Backend = which_backend_available(), - IntBackend = which_volatile_backend_available(), - maybe_prepare_queries(Backend), + PersistentBackend = which_persistent_backend_enabled(), + VolatileBackend = which_volatile_backend_available(), + maybe_prepare_queries(PersistentBackend), cets_long:run_tracked(#{task => wait_for_any_backend, - backend => Backend, volatile_backend => IntBackend}, - fun() -> wait_for_any_backend(Backend, IntBackend) end), - CachedRes = get_cached_cluster_id(IntBackend), - BackendRes = get_backend_cluster_id(), + backend => PersistentBackend, volatile_backend => VolatileBackend}, + fun() -> wait_for_any_backend(PersistentBackend, VolatileBackend) end), + CachedRes = get_cached_cluster_id(VolatileBackend), + BackendRes = get_backend_cluster_id(PersistentBackend), case {CachedRes, BackendRes} of {{ok, ID}, {ok, ID}} -> {ok, ID}; {{ok, ID}, {error, _}} -> - set_new_cluster_id(ID, Backend); + persist_cluster_id(ID, PersistentBackend); {{error, _}, {ok, ID}} -> - set_new_cluster_id(ID, IntBackend); + cache_cluster_id(ID, VolatileBackend); {{error, _}, {error, _}} -> - make_and_set_new_cluster_id(); + make_and_set_new_cluster_id(PersistentBackend, VolatileBackend); {{ok, CachedID}, {ok, BackendID}} -> ?LOG_ERROR(#{what => cluster_id_setup_conflict, text => <<"Mnesia and Backend have different cluster IDs">>, @@ -53,12 +48,14 @@ start() -> %% If RDBMS is available before CETS - it is enough for us to continue %% the starting procedure -wait_for_any_backend(Backend, IntBackend) -> +wait_for_any_backend(PersistentBackend, VolatileBackend) -> Alias = erlang:alias([reply]), - Pids = lists:append([wait_for_backend_promise(B, Alias) || B <- lists:sort([Backend, IntBackend])]), + Pids = lists:append([wait_for_backend_promise(B, Alias) + || B <- lists:sort([PersistentBackend, VolatileBackend])]), wait_for_first_reply(Alias), %% Interrupt other waiting calls to reduce the logging noise [erlang:exit(Pid, shutdown) || Pid <- Pids], + clear_pending_replies(Alias), ok. wait_for_first_reply(Alias) -> @@ -67,9 +64,13 @@ wait_for_first_reply(Alias) -> ok end. -wait_for_backend_promise(mnesia, Alias) -> - Alias ! {ready, Alias}, - []; +clear_pending_replies(Alias) -> + receive + {ready, Alias} -> clear_pending_replies(Alias) + after + 0 -> ok + end. + wait_for_backend_promise(cets, Alias) -> [spawn(fun() -> %% We have to do it, because we want to read from across the cluster @@ -81,7 +82,10 @@ wait_for_backend_promise(rdbms, Alias) -> [spawn(fun() -> cets_long:run_tracked(#{task => wait_for_rdbms}, fun() -> wait_for_rdbms() end), Alias ! {ready, Alias} - end)]. + end)]; +wait_for_backend_promise(_, Alias) -> + Alias ! {ready, Alias}, + []. wait_for_rdbms() -> case get_backend_cluster_id(rdbms) of @@ -121,16 +125,14 @@ get_cached_cluster_id(cets) -> %% ==================================================================== -spec get_backend_cluster_id() -> maybe_cluster_id(). get_backend_cluster_id() -> - get_backend_cluster_id(which_backend_available()). - --spec set_new_cluster_id(cluster_id()) -> maybe_cluster_id(). -set_new_cluster_id(ID) -> - set_new_cluster_id(ID, which_backend_available()). + get_backend_cluster_id(which_persistent_backend_enabled()). --spec make_and_set_new_cluster_id() -> maybe_cluster_id(). -make_and_set_new_cluster_id() -> - NewID = make_cluster_id(), - set_new_cluster_id(NewID). +-spec make_and_set_new_cluster_id(persistent_backend(), volatile_backend()) -> + maybe_cluster_id(). +make_and_set_new_cluster_id(PersistentBackend, VolatileBackend) -> + NewID = make_cluster_id(PersistentBackend), + persist_cluster_id(NewID, PersistentBackend), + cache_cluster_id(NewID, VolatileBackend). %% ==================================================================== %% Internal functions @@ -149,31 +151,36 @@ init_cache(cets) -> cets:start(cets_cluster_id, #{}), cets_discovery:add_table(mongoose_cets_discovery, cets_cluster_id). --spec maybe_prepare_queries(mongoose_backend()) -> ok. -maybe_prepare_queries(mnesia) -> ok; +-spec maybe_prepare_queries(persistent_backend()) -> any(). maybe_prepare_queries(rdbms) -> mongoose_rdbms:prepare(cluster_insert_new, mongoose_cluster_id, [v], <<"INSERT INTO mongoose_cluster_id(k,v) VALUES ('cluster_id', ?)">>), mongoose_rdbms:prepare(cluster_select, mongoose_cluster_id, [], - <<"SELECT v FROM mongoose_cluster_id WHERE k='cluster_id'">>), + <<"SELECT v FROM mongoose_cluster_id WHERE k='cluster_id'">>); +maybe_prepare_queries(_) -> ok. -spec execute_cluster_insert_new(binary()) -> mongoose_rdbms:query_result(). execute_cluster_insert_new(ID) -> mongoose_rdbms:execute_successfully(global, cluster_insert_new, [ID]). --spec make_cluster_id() -> cluster_id(). -make_cluster_id() -> - uuid:uuid_to_string(uuid:get_v4(), binary_standard). +%% If there's no persistent backend, cluster IDs will be recreated on every cluster restart, +%% hence prefix them as ephemeral to re-classify them later. +-spec make_cluster_id(persistent_backend()) -> cluster_id(). +make_cluster_id(rdbms) -> + uuid:uuid_to_string(uuid:get_v4(), binary_standard); +make_cluster_id({error, none}) -> + <<"ephemeral-", (uuid:uuid_to_string(uuid:get_v4(), binary_standard))/binary>>. -%% Which backend is enabled --spec which_backend_available() -> mongoose_backend(). -which_backend_available() -> +%% Which persistent backend is enabled +-spec which_persistent_backend_enabled() -> persistent_backend(). +which_persistent_backend_enabled() -> case mongoose_wpool:get_pool_settings(rdbms, global, default) of - undefined -> which_volatile_backend_available(); + undefined -> {error, none}; _ -> rdbms end. +-spec which_volatile_backend_available() -> volatile_backend(). which_volatile_backend_available() -> case mongoose_config:get_opt(internal_databases) of #{cets := _} -> @@ -182,11 +189,10 @@ which_volatile_backend_available() -> mnesia end. --spec set_new_cluster_id(cluster_id(), mongoose_backend()) -> ok | {error, any()}. -set_new_cluster_id(ID, rdbms) -> +-spec persist_cluster_id(cluster_id(), persistent_backend()) -> maybe_cluster_id(). +persist_cluster_id(ID, rdbms) -> try execute_cluster_insert_new(ID) of {updated, 1} -> - set_new_cluster_id(ID, which_volatile_backend_available()), {ok, ID} catch Class:Reason:Stacktrace -> @@ -196,7 +202,11 @@ set_new_cluster_id(ID, rdbms) -> class => Class, reason => Reason, stacktrace => Stacktrace}), {error, {Class, Reason}} end; -set_new_cluster_id(ID, mnesia) -> +persist_cluster_id(ID, {error, none}) -> + {ok, ID}. + +-spec cache_cluster_id(cluster_id(), volatile_backend()) -> maybe_cluster_id(). +cache_cluster_id(ID, mnesia) -> T = fun() -> mnesia:write(#mongoose_cluster_id{key = cluster_id, value = ID}) end, case mnesia:transaction(T) of {atomic, ok} -> @@ -204,12 +214,12 @@ set_new_cluster_id(ID, mnesia) -> {aborted, Reason} -> {error, Reason} end; -set_new_cluster_id(ID, cets) -> +cache_cluster_id(ID, cets) -> cets:insert_serial(cets_cluster_id, {cluster_id, ID}), {ok, ID}. %% Get cluster ID --spec get_backend_cluster_id(mongoose_backend()) -> maybe_cluster_id(). +-spec get_backend_cluster_id(persistent_backend()) -> maybe_cluster_id(). get_backend_cluster_id(rdbms) -> try mongoose_rdbms:execute_successfully(global, cluster_select, []) of {selected, [{ID}]} -> {ok, ID}; @@ -221,15 +231,13 @@ get_backend_cluster_id(rdbms) -> class => Class, reason => Reason, stacktrace => Stacktrace}), {error, {Class, Reason}} end; -get_backend_cluster_id(mnesia) -> - get_cached_cluster_id(mnesia); -get_backend_cluster_id(cets) -> - get_cached_cluster_id(cets). +get_backend_cluster_id({error, none}) -> + {error, no_value_in_backend}. clean_table() -> - clean_table(which_backend_available()). + clean_table(which_persistent_backend_enabled()). --spec clean_table(mongoose_backend()) -> ok | {error, any()}. +-spec clean_table(persistent_backend()) -> ok | {error, any()}. clean_table(rdbms) -> SQLQuery = [<<"TRUNCATE TABLE mongoose_cluster_id;">>], try mongoose_rdbms:sql_query(global, SQLQuery) of