From 12f325c89caa65dbba346cacbd07d2db439aa2b4 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 31 May 2022 10:29:09 +0200 Subject: [PATCH 1/9] Add graphql api for metrics --- big_tests/default.spec | 1 + big_tests/dynamic_domains.spec | 1 + big_tests/tests/graphql_metric_SUITE.erl | 104 ++++++++++ priv/graphql/schemas/admin/admin_schema.gql | 2 + priv/graphql/schemas/admin/metric.gql | 182 ++++++++++++++++++ .../admin/mongoose_graphql_admin_query.erl | 4 +- .../mongoose_graphql_metric_admin_query.erl | 145 ++++++++++++++ src/graphql/mongoose_graphql.erl | 1 + src/graphql/mongoose_graphql_enum.erl | 6 +- src/graphql/mongoose_graphql_union.erl | 25 ++- 10 files changed, 466 insertions(+), 5 deletions(-) create mode 100644 big_tests/tests/graphql_metric_SUITE.erl create mode 100644 priv/graphql/schemas/admin/metric.gql create mode 100644 src/graphql/admin/mongoose_graphql_metric_admin_query.erl diff --git a/big_tests/default.spec b/big_tests/default.spec index a7371fa2070..35b7903a2fa 100644 --- a/big_tests/default.spec +++ b/big_tests/default.spec @@ -37,6 +37,7 @@ {suites, "tests", graphql_stanza_SUITE}. {suites, "tests", graphql_vcard_SUITE}. {suites, "tests", graphql_http_upload_SUITE}. +{suites, "tests", graphql_metric_SUITE}. {suites, "tests", inbox_SUITE}. {suites, "tests", inbox_extensions_SUITE}. {suites, "tests", jingle_SUITE}. diff --git a/big_tests/dynamic_domains.spec b/big_tests/dynamic_domains.spec index 1f3db9a14a6..09207261d54 100644 --- a/big_tests/dynamic_domains.spec +++ b/big_tests/dynamic_domains.spec @@ -53,6 +53,7 @@ {suites, "tests", graphql_stanza_SUITE}. {suites, "tests", graphql_vcard_SUITE}. {suites, "tests", graphql_http_upload_SUITE}. +{suites, "tests", graphql_metric_SUITE}. {suites, "tests", inbox_SUITE}. diff --git a/big_tests/tests/graphql_metric_SUITE.erl b/big_tests/tests/graphql_metric_SUITE.erl new file mode 100644 index 00000000000..908d3bb0cfe --- /dev/null +++ b/big_tests/tests/graphql_metric_SUITE.erl @@ -0,0 +1,104 @@ +-module(graphql_metric_SUITE). + + -include_lib("common_test/include/ct.hrl"). + -include_lib("eunit/include/eunit.hrl"). + -include_lib("exml/include/exml.hrl"). + + -compile([export_all, nowarn_export_all]). + +-import(distributed_helper, [mim/0, require_rpc_nodes/1, rpc/4]). +-import(graphql_helper, [execute_auth/2, init_admin_handler/1]). + +suite() -> + require_rpc_nodes([mim]) ++ escalus:suite(). + +all() -> + [{group, metrics}]. + +groups() -> + [{metrics, [], metrics_handler()}]. + +metrics_handler() -> + [get_metrics, + get_metrics_as_dicts, + get_metrics_as_dicts_with_key_one]. + +init_per_suite(Config) -> + escalus:init_per_suite(init_admin_handler(Config)). + +end_per_suite(Config) -> + escalus_fresh:clean(), + escalus:end_per_suite(Config). + +init_per_testcase(CaseName, Config) -> + escalus:init_per_testcase(CaseName, Config). + +end_per_testcase(CaseName, Config) -> + escalus:end_per_testcase(CaseName, Config). + +get_metrics(Config) -> + Vars = #{}, + Result = execute_auth(#{query => get_all_metrics_call(), variables => Vars, + operationName => <<"Q1">>}, Config), + ct:fail(Result), + ParsedResult = ok_result(<<"metric">>, <<"getMetrics">>, Result), + ?assertEqual([], ParsedResult). + +get_metrics_as_dicts(Config) -> + Vars = #{}, + Result = execute_auth(#{query => get_all_metrics_as_dicts_call(), variables => Vars, + operationName => <<"Q1">>}, Config), + ct:fail(Result), + ParsedResult = ok_result(<<"metric">>, <<"getMetricsAsDicts">>, Result), + ?assertEqual([], ParsedResult). + +get_metrics_as_dicts_with_key_one(Config) -> + Vars = #{}, + Result = execute_auth(#{query => get_all_metrics_as_dicts_with_key_one_call(), + variables => Vars, + operationName => <<"Q1">>}, Config), + ct:fail(Result), + ParsedResult = ok_result(<<"metric">>, <<"getMetricsAsDicts">>, Result), + ?assertEqual([], ParsedResult). + +get_all_metrics_call() -> + <<"query Q1 + {metric + {getMetrics { + ... on HistogramMetric + { name type n mean min max median p50 p75 p90 p95 p99 p999 } + ... on CounterMetric + { name type value ms_since_reset } + ... on SpiralMetric + { name type one count } + ... on GaugeMetric + { name type value } + ... on MergedInetStatsMetric + { name type connections recv_cnt recv_max recv_oct + send_cnt send_max send_oct send_pend } + ... on VMStatsMemoryMetric + { name type total processes_used atom_used binary ets system } + ... on VMSystemInfoMetric + { name type port_count port_limit process_count process_limit ets_limit } + ... on ProbeQueuesMetric + { name type type fsm regular total } + } + } + }">>. + +get_all_metrics_as_dicts_call() -> + <<"query Q1 + {metric + {getMetricsAsDicts { name dict { key value }}}}">>. + +get_all_metrics_as_dicts_with_key_one_call() -> + <<"query Q1 + {metric + {getMetricsAsDicts(filterKeys: [\"one\"]) { name dict { key value }}}}">>. + +%% Helpers +ok_result(What1, What2, {{<<"200">>, <<"OK">>}, #{<<"data">> := Data}}) -> + maps:get(What2, maps:get(What1, Data)). + +error_result(ErrorNumber, {{<<"200">>, <<"OK">>}, #{<<"errors">> := Errors}}) -> + lists:nth(ErrorNumber, Errors). diff --git a/priv/graphql/schemas/admin/admin_schema.gql b/priv/graphql/schemas/admin/admin_schema.gql index 08f258006d9..cc9016f5a72 100644 --- a/priv/graphql/schemas/admin/admin_schema.gql +++ b/priv/graphql/schemas/admin/admin_schema.gql @@ -30,6 +30,8 @@ type AdminQuery{ vcard: VcardAdminQuery "Private storage management" private: PrivateAdminQuery + "Metrics management" + metric: MetricAdminQuery } """ diff --git a/priv/graphql/schemas/admin/metric.gql b/priv/graphql/schemas/admin/metric.gql new file mode 100644 index 00000000000..11e8eaec20a --- /dev/null +++ b/priv/graphql/schemas/admin/metric.gql @@ -0,0 +1,182 @@ +""" +Result of a metric +""" + +enum MetricType { + histogram + counter + spiral + gauge + merged_inet_stats + vm_stats_memory + vm_system_info + probe_queues +} + +union MetricResult = HistogramMetric | CounterMetric | SpiralMetric + | GaugeMetric | MergedInetStatsMetric + | VMStatsMemoryMetric | VMSystemInfoMetric + | ProbeQueuesMetric + +type HistogramMetric { + "Metric name" + name: [String] + "Metric type" + type: MetricType + "The number of values used in the calculation" + n: Int + "Mean value" + mean: Int + "Min value" + min: Int + "Max value" + max: Int + median: Int + "50th percentile" + p50: Int + "75th percentile" + p75: Int + "90th percentile" + p90: Int + "95th percentile" + p95: Int + "99th percentile" + p99: Int + "99.9th percentile" + p999: Int +} + +type CounterMetric { + "Metric name" + name: [String] + "Metric type" + type: MetricType + "The metric value" + value: Int + "Time since last reset" + ms_since_reset: Int +} + +type GaugeMetric { + "Metric name" + name: [String] + "Metric type" + type: MetricType + "The metric value" + value: Int +} + +type SpiralMetric { + "Metric name" + name: [String] + "Metric type" + type: MetricType + "One minute value" + one: Int + "Total value" + count: Int +} + +type MergedInetStatsMetric { + "Metric name" + name: [String] + "Metric type" + type: MetricType + "Number of connections" + connections: Int + "Number of packets received by the socket" + recv_cnt: Int + "Size of the largest packet, in bytes, received by the socket" + recv_max: Int + "Number of bytes received by the socket" + recv_oct: Int + "Number of packets sent from the socket" + send_cnt: Int + "Size of the largest packet, in bytes, sent from the socket" + send_max: Int + "Number of bytes sent from the socket" + send_oct: Int + "Number of bytes waiting to be sent by the socket" + send_pend: Int +} + +type VMStatsMemoryMetric { + "Metric name" + name: [String] + "Metric type" + type: MetricType + "The total amount of memory in bytes currently allocated (processes_used + system)" + total: Int + "The total amount of memory in bytes allocated for Erlang processes" + processes_used: Int + "The total amount of memory in bytes allocated for atoms" + atom_used: Int + "The total amount of memory in bytes allocated for binaries" + binary: Int + "The total amount of memory in bytes allocated for ETS tables" + ets: Int + "The total amount of memory in bytes allocated for the emulator" + system: Int +} + +type VMSystemInfoMetric { + "Metric name" + name: [String] + "Metric type" + type: MetricType + "Current number of open Erlang ports" + port_count: Int + "Maximum allowed number of open Erlang ports" + port_limit: Int + "Current number of Erlang processes" + process_count: Int + "Maximum allowed number of Erlang processes" + process_limit: Int + "Maximum number of ETS tables" + ets_limit: Int +} + +type ProbeQueuesMetric { + "Metric name" + name: [String] + "Metric type" + type: MetricType + "Number of messages in p1_fsm queue" + fsm: Int + "Number of messages in the erlang process message queues" + regular: Int + "Total number of messages (fsm + regular)" + total: Int +} + +type MetricDictEntry { + "The name of the metric key (i.e. one, count, value)" + key: String + "Metric value" + value: Int +} + +type MetricDictResult { + "Metric name" + name: [String] + "A list of keys and values" + dict: [MetricDictEntry] +} + +""" +Allow admin to get the metric values +""" +type MetricAdminQuery @protected{ + """ + Match metrics using a name pattern and return the metric values. + Return all metrics if the name is not provided. + Name is a list of name segments or an underscore (i.e. path). + """ + getMetrics(name: [String]): [MetricResult] + """ + Get metrics without using graphql unions. + Optionally returns only specified keys + (i.e. filterKeys: ["one"] only returns key "one", but not key "count") + """ + getMetricsAsDicts(name: [String], filterKeys: [String]): [MetricDictResult] +} diff --git a/src/graphql/admin/mongoose_graphql_admin_query.erl b/src/graphql/admin/mongoose_graphql_admin_query.erl index 4543b86c5bc..5ce01ef7e56 100644 --- a/src/graphql/admin/mongoose_graphql_admin_query.erl +++ b/src/graphql/admin/mongoose_graphql_admin_query.erl @@ -28,4 +28,6 @@ execute(_Ctx, _Obj, <<"session">>, _Args) -> execute(_Ctx, _Obj, <<"stanza">>, _Args) -> {ok, #{}}; execute(_Ctx, _Obj, <<"vcard">>, _Args) -> - {ok, vcard}. + {ok, vcard}; +execute(_Ctx, _Obj, <<"metric">>, _Args) -> + {ok, metric}. diff --git a/src/graphql/admin/mongoose_graphql_metric_admin_query.erl b/src/graphql/admin/mongoose_graphql_metric_admin_query.erl new file mode 100644 index 00000000000..ba5a1bc6d9e --- /dev/null +++ b/src/graphql/admin/mongoose_graphql_metric_admin_query.erl @@ -0,0 +1,145 @@ +-module(mongoose_graphql_metric_admin_query). +-behaviour(mongoose_graphql). + +-export([execute/4]). + +-ignore_xref([execute/4]). + +-include("../mongoose_graphql_types.hrl"). + +-import(mongoose_graphql_helper, [make_error/2, format_result/2]). + +-type metric_result() :: term(). + +execute(_Ctx, _Obj, <<"getMetrics">>, Args) -> + get_metrics(Args); +execute(_Ctx, _Obj, <<"getMetricsAsDicts">>, Args) -> + get_metrics_as_dicts(Args). + +-spec get_metrics(mongoose_graphql:args()) -> + {ok, [metric_result()]} | {error, resolver_error()}. +get_metrics(Args) -> + Name = maps:get(<<"name">>, Args, []), + Values = exometer:get_values(prepare_name(Name)), + {ok, lists:map(fun make_metric_result/1, Values)}. + +get_metrics_as_dicts(Args) -> + Name = maps:get(<<"name">>, Args, []), + FilterKeys = prepare_keys(maps:get(<<"filterKeys">>, Args, null)), + Values = exometer:get_values(prepare_name(Name)), + {ok, [make_metric_dict_result(V, FilterKeys) || V <- Values]}. + +prepare_keys([]) -> + null; +prepare_keys(null) -> + null; +prepare_keys(Keys) -> + lists:map(fun prepare_key/1, Keys). + +prepare_key(X) when is_binary(X) -> + binary_to_atom(X); +prepare_key(X) when is_integer(X) -> %% For percentiles + X. + +prepare_name(null) -> + []; +prepare_name([<<"global">> | T]) -> + [global | prepare_name2(T)]; +prepare_name([H | T]) -> + [binary_to_atom(H) | prepare_name2(T)]; +prepare_name([]) -> + []. + +prepare_name2(Segments) -> + lists:map(fun binary_to_atom/1, Segments). + +make_metric_result({Name, Dict}) -> + PreparedName = format_name(Name), + Map = format_dict(Dict), + {ok, Map#{<<"name">> => PreparedName}}. + +make_metric_dict_result({Name, Dict}, FilterKeys) -> + PreparedName = format_name(Name), + {ok, #{<<"name">> => PreparedName, <<"dict">> => format_dict_entries(Dict, FilterKeys)}}. + +format_dict_entries(Dict, FilterKeys) -> + [{ok, #{<<"key">> => Key, <<"value">> => Value}} + || {Key, Value} <- filter_keys(Dict, FilterKeys)]. + +filter_keys(Dict, null) -> + Dict; +filter_keys(Dict, FilterKeys) -> + [KV || KV = {Key, _} <- Dict, lists:member(Key, FilterKeys)]. + +format_name(Name) -> + lists:map(fun format_name_segment/1, Name). + +format_name_segment(Segment) when is_atom(Segment) -> + {ok, atom_to_binary(Segment)}; +format_name_segment(Segment) when is_binary(Segment) -> + {ok, Segment}. + +format_dict(Dict) -> + format_dict2(maps:from_list(Dict)). + +format_dict2(#{one := _} = Dict) -> + format_spiral(Dict); +format_dict2(#{ms_since_reset := _} = Dict) -> + format_counter(Dict); +format_dict2(#{value := _} = Dict) -> + format_gauge(Dict); +format_dict2(#{median := _} = Dict) -> + format_histogram(Dict); +format_dict2(#{connections := _, recv_cnt := _} = Dict) -> + format_merged_inet_stats(Dict); +format_dict2(#{processes_used := _} = Dict) -> + format_vm_stats_memory(Dict); +format_dict2(#{port_count := _} = Dict) -> + format_vm_system_info(Dict); +format_dict2(#{fsm := _, regular := _} = Dict) -> + format_probe_queues(Dict). + +format_spiral(#{one := One, count := Count}) -> + #{<<"type">> => <<"spiral">>, <<"one">> => One, <<"count">> => Count}. + +format_counter(#{value := Value, ms_since_reset := MS}) -> + #{<<"type">> => <<"counter">>, <<"value">> => Value, <<"ms_since_reset">> => MS}. + +format_gauge(#{value := Value}) -> + #{<<"type">> => <<"gauge">>, <<"value">> => Value}. + +format_histogram(#{n := N, mean := Mean, min := Min, max := Max, median := Median, + 50 := P50, 75 := P75, 90 := P90, 95 := P95, + 99 := P99, 999 := P999}) -> + #{<<"type">> => <<"histogram">>, <<"n">> => N, <<"mean">> => Mean, + <<"min">> => Min, <<"max">> => Max, <<"median">> => Median, + <<"p50">> => P50, <<"p75">> => P75, <<"p90">> => P90, <<"p95">> => P95, + <<"p99">> => P99, <<"p999">> => P999}. + +format_merged_inet_stats(#{connections := Cons, + recv_cnt := RCnt, recv_max := RMax, recv_oct := ROct, + send_cnt := SCnt, send_max := SMax, send_oct := SOct, + send_pend := SPend}) -> + %% Metrics from a pool of connections + #{<<"type">> => <<"merged_inet_stats">>, <<"connections">> => Cons, + <<"recv_cnt">> => RCnt, recv_max => RMax, recv_oct => ROct, + <<"send_cnt">> => SCnt, send_max => SMax, send_oct => SOct, + <<"send_pend">> => SPend}. + +format_vm_stats_memory(#{total := Total, processes_used := P, + atom_used := A, binary := B, ets := E, system := S}) -> + #{<<"type">> => <<"vm_stats_memory">>, + <<"total">> => Total, <<"processes_used">> => P, <<"atom_used">> => A, + <<"binary">> => B, <<"ets">> => E, <<"system">> => S}. + +format_vm_system_info(#{port_count := PortCount, port_limit := PortLimit, + process_count := ProcessCount, process_limit := ProcessLimit, + ets_limit := EtsLimit}) -> + #{<<"type">> => <<"vm_system_info">>, + <<"port_count">> => PortCount, <<"port_limit">> => PortLimit, + <<"process_count">> => ProcessCount, <<"process_limit">> => ProcessLimit, + <<"ets_limit">> => EtsLimit}. + +format_probe_queues(#{fsm := FSM, regular := Regular, total := Total}) -> + #{<<"type">> => <<"probe_queues">>, + <<"fsm">> => FSM, <<"regular">> => Regular, <<"total">> => Total}. diff --git a/src/graphql/mongoose_graphql.erl b/src/graphql/mongoose_graphql.erl index 1931c6bbcf5..f93cbcaed0a 100644 --- a/src/graphql/mongoose_graphql.erl +++ b/src/graphql/mongoose_graphql.erl @@ -152,6 +152,7 @@ admin_mapping_rules() -> 'HttpUploadAdminMutation' => mongoose_graphql_http_upload_admin_mutation, 'RosterAdminMutation' => mongoose_graphql_roster_admin_mutation, 'Domain' => mongoose_graphql_domain, + 'MetricAdminQuery' => mongoose_graphql_metric_admin_query, default => mongoose_graphql_default}, interfaces => #{default => mongoose_graphql_default}, scalars => #{default => mongoose_graphql_scalar}, diff --git a/src/graphql/mongoose_graphql_enum.erl b/src/graphql/mongoose_graphql_enum.erl index d1e73a792da..948184bada6 100644 --- a/src/graphql/mongoose_graphql_enum.erl +++ b/src/graphql/mongoose_graphql_enum.erl @@ -32,7 +32,8 @@ input(<<"MUCAffiliation">>, <<"OUTCAST">>) -> {ok, outcast}; input(<<"MUCAffiliation">>, <<"ADMIN">>) -> {ok, admin}; input(<<"MUCAffiliation">>, <<"OWNER">>) -> {ok, owner}; input(<<"PrivacyClassificationTags">>, Name) -> {ok, Name}; -input(<<"TelephoneTags">>, Name) -> {ok, Name}. +input(<<"TelephoneTags">>, Name) -> {ok, Name}; +input(<<"MetricType">>, Name) -> {ok, Name}. output(<<"PresenceShow">>, Show) -> {ok, list_to_binary(string:to_upper(binary_to_list(Show)))}; @@ -67,4 +68,5 @@ output(<<"MUCAffiliation">>, Aff) -> output(<<"AddressTags">>, Name) -> {ok, Name}; output(<<"EmailTags">>, Name) -> {ok, Name}; output(<<"PrivacyClassificationTags">>, Name) -> {ok, Name}; -output(<<"TelephoneTags">>, Name) -> {ok, Name}. +output(<<"TelephoneTags">>, Name) -> {ok, Name}; +output(<<"MetricType">>, Type) -> {ok, Type}. diff --git a/src/graphql/mongoose_graphql_union.erl b/src/graphql/mongoose_graphql_union.erl index 357e8301605..813fa76327f 100644 --- a/src/graphql/mongoose_graphql_union.erl +++ b/src/graphql/mongoose_graphql_union.erl @@ -1,12 +1,33 @@ -module(mongoose_graphql_union). - -export([execute/1]). -ignore_xref([execute/1]). +-include("mongoose_logger.hrl"). + execute(#{<<"type">> := _, <<"binValue">> := _}) -> {ok, <<"ImageData">>}; execute(#{<<"extValue">> := _}) -> {ok, <<"External">>}; execute(#{<<"phonetic">> := _}) -> {ok, <<"Phonetic">>}; execute(#{<<"binValue">> := _}) -> {ok, <<"BinValue">>}; execute(#{<<"vcard">> := _}) -> {ok, <<"AgentVcard">>}; -execute(_Otherwise) -> {error, unknown_type}. +execute(#{<<"type">> := <<"histogram">>, <<"name">> := _, <<"p50">> := _}) -> + {ok, <<"HistogramMetric">>}; +execute(#{<<"type">> := <<"spiral">>, <<"name">> := _, <<"one">> := _}) -> + {ok, <<"SpiralMetric">>}; +execute(#{<<"type">> := <<"counter">>, <<"name">> := _, <<"ms_since_reset">> := _}) -> + {ok, <<"CounterMetric">>}; +execute(#{<<"type">> := <<"gauge">>, <<"name">> := _, <<"value">> := _}) -> + {ok, <<"GaugeMetric">>}; +execute(#{<<"type">> := <<"merged_inet_stats">>, <<"connections">> := _}) -> + {ok, <<"MergedInetStatsMetric">>}; +execute(#{<<"type">> := <<"merged_inet_stats">>, <<"connections">> := _}) -> + {ok, <<"MergedInetStatsMetric">>}; +execute(#{<<"type">> := <<"vm_stats_memory">>, <<"processes_used">> := _}) -> + {ok, <<"VMStatsMemoryMetric">>}; +execute(#{<<"type">> := <<"vm_system_info">>, <<"port_count">> := _}) -> + {ok, <<"VMSystemInfoMetric">>}; +execute(#{<<"type">> := <<"probe_queues">>, <<"fsm">> := _}) -> + {ok, <<"ProbeQueuesMetric">>}; +execute(Value) -> + ?LOG_ERROR(#{what => graphql_unknown_type, value => Value}), + {error, unknown_type}. From d5597a9f696d23f2d82097484e2a467d4ff48578 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 1 Jun 2022 21:49:11 +0200 Subject: [PATCH 2/9] Add tests --- big_tests/tests/graphql_metric_SUITE.erl | 156 +++++++++++++++--- priv/graphql/schemas/admin/metric.gql | 15 +- .../mongoose_graphql_metric_admin_query.erl | 53 +++++- 3 files changed, 191 insertions(+), 33 deletions(-) diff --git a/big_tests/tests/graphql_metric_SUITE.erl b/big_tests/tests/graphql_metric_SUITE.erl index 908d3bb0cfe..fd7c5f793da 100644 --- a/big_tests/tests/graphql_metric_SUITE.erl +++ b/big_tests/tests/graphql_metric_SUITE.erl @@ -1,16 +1,19 @@ -module(graphql_metric_SUITE). - -include_lib("common_test/include/ct.hrl"). - -include_lib("eunit/include/eunit.hrl"). - -include_lib("exml/include/exml.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("exml/include/exml.hrl"). - -compile([export_all, nowarn_export_all]). +-compile([export_all, nowarn_export_all]). --import(distributed_helper, [mim/0, require_rpc_nodes/1, rpc/4]). +-import(distributed_helper, [require_rpc_nodes/1, rpc/4]). -import(graphql_helper, [execute_auth/2, init_admin_handler/1]). suite() -> - require_rpc_nodes([mim]) ++ escalus:suite(). + MIM2NodeName = maps:get(node, distributed_helper:mim2()), + %% Ensure nodes are connected + mongoose_helper:successful_rpc(net_kernel, connect_node, [MIM2NodeName]), + require_rpc_nodes([mim, mim2]) ++ escalus:suite(). all() -> [{group, metrics}]. @@ -20,8 +23,12 @@ groups() -> metrics_handler() -> [get_metrics, + get_global_erlang_metrics, + get_vm_stats_memory, get_metrics_as_dicts, - get_metrics_as_dicts_with_key_one]. + get_metrics_as_dicts_with_key_one, + get_cluster_metrics, + get_mim2_cluster_metrics]. init_per_suite(Config) -> escalus:init_per_suite(init_admin_handler(Config)). @@ -37,34 +44,128 @@ end_per_testcase(CaseName, Config) -> escalus:end_per_testcase(CaseName, Config). get_metrics(Config) -> - Vars = #{}, - Result = execute_auth(#{query => get_all_metrics_call(), variables => Vars, - operationName => <<"Q1">>}, Config), - ct:fail(Result), + %% Get all metrics + Result = execute_auth(#{query => get_all_metrics_call(), + variables => #{}, operationName => <<"Q1">>}, Config), + ParsedResult = ok_result(<<"metric">>, <<"getMetrics">>, Result), + Map = maps:from_list([{Name, X} || X = #{<<"name">> := Name} <- ParsedResult]), + ReadsKey = [<<"global">>, <<"backends">>, <<"mod_roster">>, <<"read_roster_version">>], + Reads = maps:get(ReadsKey, Map), + %% Histogram integer keys have p prefix + check_histogram_p(Reads), + #{<<"type">> := <<"histogram">>} = Reads. + +get_global_erlang_metrics(Config) -> + %% Filter by name works + Result = execute_auth(#{query => get_metrics_call_with_args(<<"(name: [\"global\", \"erlang\"])">>), + variables => #{}, operationName => <<"Q1">>}, Config), + ParsedResult = ok_result(<<"metric">>, <<"getMetrics">>, Result), + Map = maps:from_list([{Name, X} || X = #{<<"name">> := Name} <- ParsedResult]), + Info = maps:get([<<"global">>,<<"erlang">>, <<"system_info">>], Map), + #{<<"type">> := <<"vm_system_info">>} = Info, + Keys = [<<"ets_limit">>, <<"port_count">>, <<"port_limit">>, + <<"process_count">>, <<"process_limit">>], + [true = is_integer(maps:get(Key, Info)) || Key <- Keys], + ReadsKey = [<<"global">>, <<"backends">>, <<"mod_roster">>, <<"read_roster_version">>], + %% Other metrics are filtered out + undef = maps:get(ReadsKey, Map, undef). + +get_vm_stats_memory(Config) -> + Result = execute_auth(#{query => get_metrics_call_with_args(<<"(name: [\"global\"])">>), + variables => #{}, operationName => <<"Q1">>}, Config), ParsedResult = ok_result(<<"metric">>, <<"getMetrics">>, Result), - ?assertEqual([], ParsedResult). + Map = maps:from_list([{Name, X} || X = #{<<"name">> := Name} <- ParsedResult]), + Mem = maps:get([<<"global">>, <<"erlang">>, <<"memory">>], Map), + #{<<"type">> := <<"vm_stats_memory">>} = Mem, + Keys = [<<"atom_used">>, <<"binary">>, <<"ets">>, + <<"processes_used">>, <<"system">>, <<"total">>], + [true = is_integer(maps:get(Key, Mem)) || Key <- Keys]. get_metrics_as_dicts(Config) -> - Vars = #{}, - Result = execute_auth(#{query => get_all_metrics_as_dicts_call(), variables => Vars, + Result = execute_auth(#{query => get_all_metrics_as_dicts_call(), variables => #{}, operationName => <<"Q1">>}, Config), - ct:fail(Result), ParsedResult = ok_result(<<"metric">>, <<"getMetricsAsDicts">>, Result), - ?assertEqual([], ParsedResult). + check_node_result_is_valid(ParsedResult, false). get_metrics_as_dicts_with_key_one(Config) -> - Vars = #{}, Result = execute_auth(#{query => get_all_metrics_as_dicts_with_key_one_call(), - variables => Vars, + variables => #{}, operationName => <<"Q1">>}, Config), - ct:fail(Result), ParsedResult = ok_result(<<"metric">>, <<"getMetricsAsDicts">>, Result), - ?assertEqual([], ParsedResult). + Map = dict_objects_to_map(ParsedResult), + SentName = [domain_helper:host_type(), <<"xmppStanzaSent">>], + [#{<<"key">> := <<"one">>, <<"value">> := One}] = maps:get(SentName, Map), + true = is_integer(One). + +get_cluster_metrics(Config) -> + %% We will have at least these two nodes + Node1 = atom_to_binary(maps:get(node, distributed_helper:mim())), + Node2 = atom_to_binary(maps:get(node, distributed_helper:mim2())), + Result = execute_auth(#{query => get_all_cluster_metrics_as_dicts_call(), + variables => #{}, + operationName => <<"Q1">>}, Config), + ParsedResult = ok_result(<<"metric">>, <<"getClusterMetricsAsDicts">>, Result), + #{Node1 := Res1, Node2 := Res2} = node_objects_to_map(ParsedResult), + check_node_result_is_valid(Res1, false), + check_node_result_is_valid(Res2, true). + +get_mim2_cluster_metrics(Config) -> + Node = atom_to_binary(maps:get(node, distributed_helper:mim2())), + Result = execute_auth(#{query => get_node_cluster_metrics_as_dicts_call(Node), + variables => #{}, + operationName => <<"Q1">>}, Config), + ParsedResult = ok_result(<<"metric">>, <<"getClusterMetricsAsDicts">>, Result), + [#{<<"node">> := Node, <<"result">> := ResList}] = ParsedResult, + check_node_result_is_valid(ResList, true). + +check_node_result_is_valid(ResList, MetricsAreGlobal) -> + %% Check that result contains something + Map = dict_objects_to_map(ResList), + SentName = case MetricsAreGlobal of + true -> [<<"global">>, <<"xmppStanzaSent">>]; + false -> [domain_helper:host_type(), <<"xmppStanzaSent">>] + end, + [#{<<"key">> := <<"count">>, <<"value">> := Count}, + #{<<"key">> := <<"one">>, <<"value">> := One}] = + maps:get(SentName, Map), + true = is_integer(Count), + true = is_integer(One), + [#{<<"key">> := <<"value">>,<<"value">> := V}] = + maps:get([<<"global">>,<<"uniqueSessionCount">>], Map), + true = is_integer(V), + HistObjects = maps:get([<<"global">>, <<"data">>, <<"xmpp">>, + <<"sent">>, <<"compressed_size">>], Map), + check_histogram(kv_objects_to_map(HistObjects)). + +check_histogram(Map) -> + Keys = [<<"n">>, <<"mean">>, <<"min">>, <<"max">>, <<"median">>, + <<"50">>, <<"75">>, <<"90">>, <<"95">>, <<"99">>, <<"999">>], + [true = is_integer(maps:get(Key, Map)) || Key <- Keys]. + +check_histogram_p(Map) -> + Keys = [<<"n">>, <<"mean">>, <<"min">>, <<"max">>, <<"median">>, + <<"p50">>, <<"p75">>, <<"p90">>, <<"p95">>, <<"p99">>, <<"p999">>], + [true = is_integer(maps:get(Key, Map)) || Key <- Keys]. + +dict_objects_to_map(List) -> + KV = [{Name, Dict} || #{<<"name">> := Name, <<"dict">> := Dict} <- List], + maps:from_list(KV). + +node_objects_to_map(List) -> + KV = [{Name, Value} || #{<<"node">> := Name, <<"result">> := Value} <- List], + maps:from_list(KV). + +kv_objects_to_map(List) -> + KV = [{Key, Value} || #{<<"key">> := Key, <<"value">> := Value} <- List], + maps:from_list(KV). get_all_metrics_call() -> + get_metrics_call_with_args(<<>>). + +get_metrics_call_with_args(Args) -> <<"query Q1 {metric - {getMetrics { + {getMetrics", Args/binary, " { ... on HistogramMetric { name type n mean min max median p50 p75 p90 p95 p99 p999 } ... on CounterMetric @@ -94,7 +195,18 @@ get_all_metrics_as_dicts_call() -> get_all_metrics_as_dicts_with_key_one_call() -> <<"query Q1 {metric - {getMetricsAsDicts(filterKeys: [\"one\"]) { name dict { key value }}}}">>. + {getMetricsAsDicts(keys: [\"one\"]) { name dict { key value }}}}">>. + +get_all_cluster_metrics_as_dicts_call() -> + <<"query Q1 + {metric + {getClusterMetricsAsDicts {node result { name dict { key value }}}}}">>. + +get_node_cluster_metrics_as_dicts_call(NodeBin) -> + <<"query Q1 + {metric + {getClusterMetricsAsDicts(nodes: [\"", NodeBin/binary, "\"]) " + "{node result { name dict { key value }}}}}">>. %% Helpers ok_result(What1, What2, {{<<"200">>, <<"OK">>}, #{<<"data">> := Data}}) -> diff --git a/priv/graphql/schemas/admin/metric.gql b/priv/graphql/schemas/admin/metric.gql index 11e8eaec20a..a1644d80a0a 100644 --- a/priv/graphql/schemas/admin/metric.gql +++ b/priv/graphql/schemas/admin/metric.gql @@ -163,6 +163,11 @@ type MetricDictResult { dict: [MetricDictEntry] } +type MetricNodeResult { + node: String + result: [MetricDictResult] +} + """ Allow admin to get the metric values """ @@ -176,7 +181,13 @@ type MetricAdminQuery @protected{ """ Get metrics without using graphql unions. Optionally returns only specified keys - (i.e. filterKeys: ["one"] only returns key "one", but not key "count") + (i.e. keys: ["one"] only returns key "one", but not key "count") + """ + getMetricsAsDicts(name: [String], keys: [String]): [MetricDictResult] + + """ + Gather metrics from multiple nodes. + Optionally returns only from specified nodes. """ - getMetricsAsDicts(name: [String], filterKeys: [String]): [MetricDictResult] + getClusterMetricsAsDicts(name: [String], keys: [String], nodes: [String]): [MetricNodeResult] } diff --git a/src/graphql/admin/mongoose_graphql_metric_admin_query.erl b/src/graphql/admin/mongoose_graphql_metric_admin_query.erl index ba5a1bc6d9e..c852ad95f96 100644 --- a/src/graphql/admin/mongoose_graphql_metric_admin_query.erl +++ b/src/graphql/admin/mongoose_graphql_metric_admin_query.erl @@ -6,6 +6,7 @@ -ignore_xref([execute/4]). -include("../mongoose_graphql_types.hrl"). +-include("mongoose_logger.hrl"). -import(mongoose_graphql_helper, [make_error/2, format_result/2]). @@ -14,7 +15,9 @@ execute(_Ctx, _Obj, <<"getMetrics">>, Args) -> get_metrics(Args); execute(_Ctx, _Obj, <<"getMetricsAsDicts">>, Args) -> - get_metrics_as_dicts(Args). + get_metrics_as_dicts(Args); +execute(_Ctx, _Obj, <<"getClusterMetricsAsDicts">>, Args) -> + get_cluster_metrics_as_dicts(Args). -spec get_metrics(mongoose_graphql:args()) -> {ok, [metric_result()]} | {error, resolver_error()}. @@ -25,9 +28,29 @@ get_metrics(Args) -> get_metrics_as_dicts(Args) -> Name = maps:get(<<"name">>, Args, []), - FilterKeys = prepare_keys(maps:get(<<"filterKeys">>, Args, null)), + Keys = prepare_keys(maps:get(<<"keys">>, Args, null)), Values = exometer:get_values(prepare_name(Name)), - {ok, [make_metric_dict_result(V, FilterKeys) || V <- Values]}. + {ok, [make_metric_dict_result(V, Keys) || V <- Values]}. + +get_cluster_metrics_as_dicts(Args) -> + Name = maps:get(<<"name">>, Args, []), + PrepName = prepare_name(Name), + Keys = prepare_keys(maps:get(<<"keys">>, Args, null)), + Nodes = prepare_nodes(maps:get(<<"nodes">>, Args, null)), + AllNodes = [node()|nodes()], + F = fun(Node) -> rpc:call(Node, exometer, get_values, [PrepName]) end, + FilteredNodes = filter_nodes(AllNodes, Nodes), + Results = mongoose_lib:pmap(F, FilteredNodes), + Zip = lists:zip(FilteredNodes, Results), + {ok, [make_node_result(Node, Result, Keys) || {Node, Result} <- Zip]}. + +make_node_result(Node, {ok, Values}, Keys) -> + {ok, #{<<"node">> => Node, + <<"result">> => [make_metric_dict_result(V, Keys) || V <- Values]}}; +make_node_result(Node, Other, _Keys) -> + ?LOG_ERROR(#{what => metric_get_failed, + remote_node => Node, reason => Other}), + {error, <<"Failed to get metrics">>}. prepare_keys([]) -> null; @@ -36,6 +59,18 @@ prepare_keys(null) -> prepare_keys(Keys) -> lists:map(fun prepare_key/1, Keys). +prepare_nodes([]) -> + null; +prepare_nodes(null) -> + null; +prepare_nodes(Nodes) -> + lists:map(fun binary_to_atom/1, Nodes). + +filter_nodes(AllNodes, null) -> + AllNodes; +filter_nodes(AllNodes, AllowedNodes) -> + [Node || Node <- AllNodes, lists:member(Node, AllowedNodes)]. + prepare_key(X) when is_binary(X) -> binary_to_atom(X); prepare_key(X) when is_integer(X) -> %% For percentiles @@ -58,18 +93,18 @@ make_metric_result({Name, Dict}) -> Map = format_dict(Dict), {ok, Map#{<<"name">> => PreparedName}}. -make_metric_dict_result({Name, Dict}, FilterKeys) -> +make_metric_dict_result({Name, Dict}, Keys) -> PreparedName = format_name(Name), - {ok, #{<<"name">> => PreparedName, <<"dict">> => format_dict_entries(Dict, FilterKeys)}}. + {ok, #{<<"name">> => PreparedName, <<"dict">> => format_dict_entries(Dict, Keys)}}. -format_dict_entries(Dict, FilterKeys) -> +format_dict_entries(Dict, Keys) -> [{ok, #{<<"key">> => Key, <<"value">> => Value}} - || {Key, Value} <- filter_keys(Dict, FilterKeys)]. + || {Key, Value} <- filter_keys(Dict, Keys)]. filter_keys(Dict, null) -> Dict; -filter_keys(Dict, FilterKeys) -> - [KV || KV = {Key, _} <- Dict, lists:member(Key, FilterKeys)]. +filter_keys(Dict, Keys) -> + [KV || KV = {Key, _} <- Dict, lists:member(Key, Keys)]. format_name(Name) -> lists:map(fun format_name_segment/1, Name). From 89805da39d2336e43005a215d51d10129472c5ae Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 2 Jun 2022 11:19:42 +0200 Subject: [PATCH 3/9] Add type tests --- big_tests/tests/graphql_metric_SUITE.erl | 93 ++++++++++++++++--- .../mongoose_graphql_metric_admin_query.erl | 4 +- 2 files changed, 80 insertions(+), 17 deletions(-) diff --git a/big_tests/tests/graphql_metric_SUITE.erl b/big_tests/tests/graphql_metric_SUITE.erl index fd7c5f793da..f01fe7d9ece 100644 --- a/big_tests/tests/graphql_metric_SUITE.erl +++ b/big_tests/tests/graphql_metric_SUITE.erl @@ -22,12 +22,16 @@ groups() -> [{metrics, [], metrics_handler()}]. metrics_handler() -> - [get_metrics, - get_global_erlang_metrics, + [get_all_metrics, + get_by_name_global_erlang_metrics, + get_process_queue_length, + get_inet_stats, get_vm_stats_memory, get_metrics_as_dicts, + get_by_name_metrics_as_dicts, get_metrics_as_dicts_with_key_one, get_cluster_metrics, + get_by_name_cluster_metrics_as_dicts, get_mim2_cluster_metrics]. init_per_suite(Config) -> @@ -43,7 +47,7 @@ init_per_testcase(CaseName, Config) -> end_per_testcase(CaseName, Config) -> escalus:end_per_testcase(CaseName, Config). -get_metrics(Config) -> +get_all_metrics(Config) -> %% Get all metrics Result = execute_auth(#{query => get_all_metrics_call(), variables => #{}, operationName => <<"Q1">>}, Config), @@ -53,15 +57,17 @@ get_metrics(Config) -> Reads = maps:get(ReadsKey, Map), %% Histogram integer keys have p prefix check_histogram_p(Reads), + %% HistogramMetric type #{<<"type">> := <<"histogram">>} = Reads. -get_global_erlang_metrics(Config) -> +get_by_name_global_erlang_metrics(Config) -> %% Filter by name works Result = execute_auth(#{query => get_metrics_call_with_args(<<"(name: [\"global\", \"erlang\"])">>), variables => #{}, operationName => <<"Q1">>}, Config), ParsedResult = ok_result(<<"metric">>, <<"getMetrics">>, Result), Map = maps:from_list([{Name, X} || X = #{<<"name">> := Name} <- ParsedResult]), Info = maps:get([<<"global">>,<<"erlang">>, <<"system_info">>], Map), + %% VMSystemInfoMetric type #{<<"type">> := <<"vm_system_info">>} = Info, Keys = [<<"ets_limit">>, <<"port_count">>, <<"port_limit">>, <<"process_count">>, <<"process_limit">>], @@ -70,12 +76,38 @@ get_global_erlang_metrics(Config) -> %% Other metrics are filtered out undef = maps:get(ReadsKey, Map, undef). +get_process_queue_length(Config) -> + Result = execute_auth(#{query => get_metrics_call_with_args( + <<"(name: [\"global\", \"processQueueLengths\"])">>), + variables => #{}, operationName => <<"Q1">>}, Config), + ParsedResult = ok_result(<<"metric">>, <<"getMetrics">>, Result), + Map = maps:from_list([{Name, X} || X = #{<<"name">> := Name} <- ParsedResult]), + Lens = maps:get([<<"global">>, <<"processQueueLengths">>], Map), + %% ProbeQueuesMetric type + #{<<"type">> := <<"probe_queues">>} = Lens, + Keys = [<<"fsm">>, <<"regular">>, <<"total">>], + [true = is_integer(maps:get(Key, Lens)) || Key <- Keys]. + +get_inet_stats(Config) -> + Result = execute_auth(#{query => get_metrics_call_with_args( + <<"(name: [\"global\", \"data\", \"dist\"])">>), + variables => #{}, operationName => <<"Q1">>}, Config), + ParsedResult = ok_result(<<"metric">>, <<"getMetrics">>, Result), + Map = maps:from_list([{Name, X} || X = #{<<"name">> := Name} <- ParsedResult]), + Stats = maps:get([<<"global">>, <<"data">>, <<"dist">>], Map), + %% MergedInetStatsMetric type + #{<<"type">> := <<"merged_inet_stats">>} = Stats, + Keys = [<<"connections">>, <<"recv_cnt">>, <<"recv_max">>, <<"recv_oct">>, + <<"send_cnt">>, <<"send_max">>, <<"send_oct">>, <<"send_pend">>], + [true = is_integer(maps:get(Key, Stats)) || Key <- Keys]. + get_vm_stats_memory(Config) -> Result = execute_auth(#{query => get_metrics_call_with_args(<<"(name: [\"global\"])">>), variables => #{}, operationName => <<"Q1">>}, Config), ParsedResult = ok_result(<<"metric">>, <<"getMetrics">>, Result), Map = maps:from_list([{Name, X} || X = #{<<"name">> := Name} <- ParsedResult]), Mem = maps:get([<<"global">>, <<"erlang">>, <<"memory">>], Map), + %% VMStatsMemoryMetric type #{<<"type">> := <<"vm_stats_memory">>} = Mem, Keys = [<<"atom_used">>, <<"binary">>, <<"ets">>, <<"processes_used">>, <<"system">>, <<"total">>], @@ -87,6 +119,16 @@ get_metrics_as_dicts(Config) -> ParsedResult = ok_result(<<"metric">>, <<"getMetricsAsDicts">>, Result), check_node_result_is_valid(ParsedResult, false). +get_by_name_metrics_as_dicts(Config) -> + Args = <<"(name: [\"_\", \"xmppStanzaSent\"])">>, + Result = execute_auth(#{query => get_by_args_metrics_as_dicts_call(Args), + variables => #{}, operationName => <<"Q1">>}, Config), + ParsedResult = ok_result(<<"metric">>, <<"getMetricsAsDicts">>, Result), + %% Only xmppStanzaSent type + lists:foreach(fun(#{<<"dict">> := Dict, <<"name">> := [_, <<"xmppStanzaSent">>]}) -> + check_spiral_dict(Dict) + end, ParsedResult). + get_metrics_as_dicts_with_key_one(Config) -> Result = execute_auth(#{query => get_all_metrics_as_dicts_with_key_one_call(), variables => #{}, @@ -109,6 +151,21 @@ get_cluster_metrics(Config) -> check_node_result_is_valid(Res1, false), check_node_result_is_valid(Res2, true). +get_by_name_cluster_metrics_as_dicts(Config) -> + Args = <<"(name: [\"_\", \"xmppStanzaSent\"])">>, + Result = execute_auth(#{query => get_by_args_cluster_metrics_as_dicts_call(Args), + variables => #{}, operationName => <<"Q1">>}, Config), + NodeResult = ok_result(<<"metric">>, <<"getClusterMetricsAsDicts">>, Result), + Map = node_objects_to_map(NodeResult), + %% Contains data for at least two nodes + true = maps:size(Map) > 1, + %% Only xmppStanzaSent type + maps:map(fun(_Node, NodeRes) -> + lists:foreach(fun(#{<<"dict">> := Dict, + <<"name">> := [_, <<"xmppStanzaSent">>]}) -> + check_spiral_dict(Dict) + end, NodeRes) end, Map). + get_mim2_cluster_metrics(Config) -> Node = atom_to_binary(maps:get(node, distributed_helper:mim2())), Result = execute_auth(#{query => get_node_cluster_metrics_as_dicts_call(Node), @@ -125,11 +182,7 @@ check_node_result_is_valid(ResList, MetricsAreGlobal) -> true -> [<<"global">>, <<"xmppStanzaSent">>]; false -> [domain_helper:host_type(), <<"xmppStanzaSent">>] end, - [#{<<"key">> := <<"count">>, <<"value">> := Count}, - #{<<"key">> := <<"one">>, <<"value">> := One}] = - maps:get(SentName, Map), - true = is_integer(Count), - true = is_integer(One), + check_spiral_dict(maps:get(SentName, Map)), [#{<<"key">> := <<"value">>,<<"value">> := V}] = maps:get([<<"global">>,<<"uniqueSessionCount">>], Map), true = is_integer(V), @@ -188,9 +241,12 @@ get_metrics_call_with_args(Args) -> }">>. get_all_metrics_as_dicts_call() -> + get_by_args_metrics_as_dicts_call(<<>>). + +get_by_args_metrics_as_dicts_call(Args) -> <<"query Q1 {metric - {getMetricsAsDicts { name dict { key value }}}}">>. + {getMetricsAsDicts", Args/binary, " { name dict { key value }}}}">>. get_all_metrics_as_dicts_with_key_one_call() -> <<"query Q1 @@ -198,15 +254,16 @@ get_all_metrics_as_dicts_with_key_one_call() -> {getMetricsAsDicts(keys: [\"one\"]) { name dict { key value }}}}">>. get_all_cluster_metrics_as_dicts_call() -> + get_by_args_cluster_metrics_as_dicts_call(<<>>). + +get_by_args_cluster_metrics_as_dicts_call(Args) -> <<"query Q1 {metric - {getClusterMetricsAsDicts {node result { name dict { key value }}}}}">>. + {getClusterMetricsAsDicts", Args/binary, + " {node result { name dict { key value }}}}}">>. get_node_cluster_metrics_as_dicts_call(NodeBin) -> - <<"query Q1 - {metric - {getClusterMetricsAsDicts(nodes: [\"", NodeBin/binary, "\"]) " - "{node result { name dict { key value }}}}}">>. + get_by_args_cluster_metrics_as_dicts_call(<<"(nodes: [\"", NodeBin/binary, "\"])">>). %% Helpers ok_result(What1, What2, {{<<"200">>, <<"OK">>}, #{<<"data">> := Data}}) -> @@ -214,3 +271,9 @@ ok_result(What1, What2, {{<<"200">>, <<"OK">>}, #{<<"data">> := Data}}) -> error_result(ErrorNumber, {{<<"200">>, <<"OK">>}, #{<<"errors">> := Errors}}) -> lists:nth(ErrorNumber, Errors). + +check_spiral_dict(Dict) -> + [#{<<"key">> := <<"count">>, <<"value">> := Count}, + #{<<"key">> := <<"one">>, <<"value">> := One}] = Dict, + true = is_integer(Count), + true = is_integer(One). diff --git a/src/graphql/admin/mongoose_graphql_metric_admin_query.erl b/src/graphql/admin/mongoose_graphql_metric_admin_query.erl index c852ad95f96..a582ec4c85c 100644 --- a/src/graphql/admin/mongoose_graphql_metric_admin_query.erl +++ b/src/graphql/admin/mongoose_graphql_metric_admin_query.erl @@ -157,8 +157,8 @@ format_merged_inet_stats(#{connections := Cons, send_pend := SPend}) -> %% Metrics from a pool of connections #{<<"type">> => <<"merged_inet_stats">>, <<"connections">> => Cons, - <<"recv_cnt">> => RCnt, recv_max => RMax, recv_oct => ROct, - <<"send_cnt">> => SCnt, send_max => SMax, send_oct => SOct, + <<"recv_cnt">> => RCnt, <<"recv_max">> => RMax, <<"recv_oct">> => ROct, + <<"send_cnt">> => SCnt, <<"send_max">> => SMax, <<"send_oct">> => SOct, <<"send_pend">> => SPend}. format_vm_stats_memory(#{total := Total, processes_used := P, From ce38c118911a4a5692824166ddf5b75916b13d4e Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 2 Jun 2022 11:35:52 +0200 Subject: [PATCH 4/9] Add get_all_metrics_check_by_type testcase --- big_tests/tests/graphql_metric_SUITE.erl | 59 +++++++++++++++++------- 1 file changed, 43 insertions(+), 16 deletions(-) diff --git a/big_tests/tests/graphql_metric_SUITE.erl b/big_tests/tests/graphql_metric_SUITE.erl index f01fe7d9ece..122b60a15b8 100644 --- a/big_tests/tests/graphql_metric_SUITE.erl +++ b/big_tests/tests/graphql_metric_SUITE.erl @@ -23,6 +23,7 @@ groups() -> metrics_handler() -> [get_all_metrics, + get_all_metrics_check_by_type, get_by_name_global_erlang_metrics, get_process_queue_length, get_inet_stats, @@ -60,6 +61,37 @@ get_all_metrics(Config) -> %% HistogramMetric type #{<<"type">> := <<"histogram">>} = Reads. +get_all_metrics_check_by_type(Config) -> + %% Get all metrics + Result = execute_auth(#{query => get_all_metrics_call(), + variables => #{}, operationName => <<"Q1">>}, Config), + ParsedResult = ok_result(<<"metric">>, <<"getMetrics">>, Result), + lists:foreach(fun check_metric_by_type/1, ParsedResult). + +check_metric_by_type(#{<<"type">> := Type} = Map) -> + values_are_integers(Map, type_to_keys(Type)). + +type_to_keys(<<"histogram">>) -> + [<<"n">>, <<"mean">>, <<"min">>, <<"max">>, <<"median">>, + <<"p50">>, <<"p75">>, <<"p90">>, <<"p95">>, <<"p99">>, <<"p999">>]; +type_to_keys(<<"counter">>) -> + [<<"value">>, <<"ms_since_reset">>]; +type_to_keys(<<"spiral">>) -> + [<<"one">>, <<"count">>]; +type_to_keys(<<"gauge">>) -> + [<<"value">>]; +type_to_keys(<<"merged_inet_stats">>) -> + [<<"connections">>, <<"recv_cnt">>, <<"recv_max">>, <<"recv_oct">>, + <<"send_cnt">>, <<"send_max">>, <<"send_oct">>, <<"send_pend">>]; +type_to_keys(<<"vm_stats_memory">>) -> + [<<"atom_used">>, <<"binary">>, <<"ets">>, + <<"processes_used">>, <<"system">>, <<"total">>]; +type_to_keys(<<"vm_system_info">>) -> + [<<"ets_limit">>, <<"port_count">>, <<"port_limit">>, + <<"process_count">>, <<"process_limit">>]; +type_to_keys(<<"probe_queues">>) -> + [<<"fsm">>, <<"regular">>, <<"total">>]. + get_by_name_global_erlang_metrics(Config) -> %% Filter by name works Result = execute_auth(#{query => get_metrics_call_with_args(<<"(name: [\"global\", \"erlang\"])">>), @@ -69,9 +101,7 @@ get_by_name_global_erlang_metrics(Config) -> Info = maps:get([<<"global">>,<<"erlang">>, <<"system_info">>], Map), %% VMSystemInfoMetric type #{<<"type">> := <<"vm_system_info">>} = Info, - Keys = [<<"ets_limit">>, <<"port_count">>, <<"port_limit">>, - <<"process_count">>, <<"process_limit">>], - [true = is_integer(maps:get(Key, Info)) || Key <- Keys], + check_metric_by_type(Info), ReadsKey = [<<"global">>, <<"backends">>, <<"mod_roster">>, <<"read_roster_version">>], %% Other metrics are filtered out undef = maps:get(ReadsKey, Map, undef). @@ -85,8 +115,7 @@ get_process_queue_length(Config) -> Lens = maps:get([<<"global">>, <<"processQueueLengths">>], Map), %% ProbeQueuesMetric type #{<<"type">> := <<"probe_queues">>} = Lens, - Keys = [<<"fsm">>, <<"regular">>, <<"total">>], - [true = is_integer(maps:get(Key, Lens)) || Key <- Keys]. + check_metric_by_type(Lens). get_inet_stats(Config) -> Result = execute_auth(#{query => get_metrics_call_with_args( @@ -97,9 +126,7 @@ get_inet_stats(Config) -> Stats = maps:get([<<"global">>, <<"data">>, <<"dist">>], Map), %% MergedInetStatsMetric type #{<<"type">> := <<"merged_inet_stats">>} = Stats, - Keys = [<<"connections">>, <<"recv_cnt">>, <<"recv_max">>, <<"recv_oct">>, - <<"send_cnt">>, <<"send_max">>, <<"send_oct">>, <<"send_pend">>], - [true = is_integer(maps:get(Key, Stats)) || Key <- Keys]. + check_metric_by_type(Stats). get_vm_stats_memory(Config) -> Result = execute_auth(#{query => get_metrics_call_with_args(<<"(name: [\"global\"])">>), @@ -109,9 +136,7 @@ get_vm_stats_memory(Config) -> Mem = maps:get([<<"global">>, <<"erlang">>, <<"memory">>], Map), %% VMStatsMemoryMetric type #{<<"type">> := <<"vm_stats_memory">>} = Mem, - Keys = [<<"atom_used">>, <<"binary">>, <<"ets">>, - <<"processes_used">>, <<"system">>, <<"total">>], - [true = is_integer(maps:get(Key, Mem)) || Key <- Keys]. + check_metric_by_type(Mem). get_metrics_as_dicts(Config) -> Result = execute_auth(#{query => get_all_metrics_as_dicts_call(), variables => #{}, @@ -193,12 +218,11 @@ check_node_result_is_valid(ResList, MetricsAreGlobal) -> check_histogram(Map) -> Keys = [<<"n">>, <<"mean">>, <<"min">>, <<"max">>, <<"median">>, <<"50">>, <<"75">>, <<"90">>, <<"95">>, <<"99">>, <<"999">>], - [true = is_integer(maps:get(Key, Map)) || Key <- Keys]. + values_are_integers(Map, Keys). check_histogram_p(Map) -> - Keys = [<<"n">>, <<"mean">>, <<"min">>, <<"max">>, <<"median">>, - <<"p50">>, <<"p75">>, <<"p90">>, <<"p95">>, <<"p99">>, <<"p999">>], - [true = is_integer(maps:get(Key, Map)) || Key <- Keys]. + Keys = type_to_keys(<<"histogram">>), + values_are_integers(Map, Keys). dict_objects_to_map(List) -> KV = [{Name, Dict} || #{<<"name">> := Name, <<"dict">> := Dict} <- List], @@ -235,7 +259,7 @@ get_metrics_call_with_args(Args) -> ... on VMSystemInfoMetric { name type port_count port_limit process_count process_limit ets_limit } ... on ProbeQueuesMetric - { name type type fsm regular total } + { name type fsm regular total } } } }">>. @@ -277,3 +301,6 @@ check_spiral_dict(Dict) -> #{<<"key">> := <<"one">>, <<"value">> := One}] = Dict, true = is_integer(Count), true = is_integer(One). + +values_are_integers(Map, Keys) -> + lists:foreach(fun(Key) -> true = is_integer(maps:get(Key, Map)) end, Keys). From b9bcf48e41eb1578a26119b1003c8a71f1aa3206 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 2 Jun 2022 15:47:07 +0200 Subject: [PATCH 5/9] Fix RDBMS metrics --- big_tests/tests/graphql_metric_SUITE.erl | 6 +++++ priv/graphql/schemas/admin/metric.gql | 26 ++++++++++++++++++- .../mongoose_graphql_metric_admin_query.erl | 12 ++++++++- src/graphql/mongoose_graphql_union.erl | 2 ++ 4 files changed, 44 insertions(+), 2 deletions(-) diff --git a/big_tests/tests/graphql_metric_SUITE.erl b/big_tests/tests/graphql_metric_SUITE.erl index 122b60a15b8..83691d96eb6 100644 --- a/big_tests/tests/graphql_metric_SUITE.erl +++ b/big_tests/tests/graphql_metric_SUITE.erl @@ -83,6 +83,9 @@ type_to_keys(<<"gauge">>) -> type_to_keys(<<"merged_inet_stats">>) -> [<<"connections">>, <<"recv_cnt">>, <<"recv_max">>, <<"recv_oct">>, <<"send_cnt">>, <<"send_max">>, <<"send_oct">>, <<"send_pend">>]; +type_to_keys(<<"rdbms_stats">>) -> + [<<"workers">>, <<"recv_cnt">>, <<"recv_max">>, <<"recv_oct">>, + <<"send_cnt">>, <<"send_max">>, <<"send_oct">>, <<"send_pend">>]; type_to_keys(<<"vm_stats_memory">>) -> [<<"atom_used">>, <<"binary">>, <<"ets">>, <<"processes_used">>, <<"system">>, <<"total">>]; @@ -254,6 +257,9 @@ get_metrics_call_with_args(Args) -> ... on MergedInetStatsMetric { name type connections recv_cnt recv_max recv_oct send_cnt send_max send_oct send_pend } + ... on RDBMSStatsMetric + { name type workers recv_cnt recv_max recv_oct + send_cnt send_max send_oct send_pend } ... on VMStatsMemoryMetric { name type total processes_used atom_used binary ets system } ... on VMSystemInfoMetric diff --git a/priv/graphql/schemas/admin/metric.gql b/priv/graphql/schemas/admin/metric.gql index a1644d80a0a..12007cea355 100644 --- a/priv/graphql/schemas/admin/metric.gql +++ b/priv/graphql/schemas/admin/metric.gql @@ -8,13 +8,14 @@ enum MetricType { spiral gauge merged_inet_stats + rdbms_stats vm_stats_memory vm_system_info probe_queues } union MetricResult = HistogramMetric | CounterMetric | SpiralMetric - | GaugeMetric | MergedInetStatsMetric + | GaugeMetric | MergedInetStatsMetric | RDBMSStatsMetric | VMStatsMemoryMetric | VMSystemInfoMetric | ProbeQueuesMetric @@ -100,6 +101,29 @@ type MergedInetStatsMetric { send_pend: Int } +type RDBMSStatsMetric { + "Metric name" + name: [String] + "Metric type" + type: MetricType + "Number of workers" + workers: Int + "Number of packets received by the socket" + recv_cnt: Int + "Size of the largest packet, in bytes, received by the socket" + recv_max: Int + "Number of bytes received by the socket" + recv_oct: Int + "Number of packets sent from the socket" + send_cnt: Int + "Size of the largest packet, in bytes, sent from the socket" + send_max: Int + "Number of bytes sent from the socket" + send_oct: Int + "Number of bytes waiting to be sent by the socket" + send_pend: Int +} + type VMStatsMemoryMetric { "Metric name" name: [String] diff --git a/src/graphql/admin/mongoose_graphql_metric_admin_query.erl b/src/graphql/admin/mongoose_graphql_metric_admin_query.erl index a582ec4c85c..e7229d4261a 100644 --- a/src/graphql/admin/mongoose_graphql_metric_admin_query.erl +++ b/src/graphql/admin/mongoose_graphql_metric_admin_query.erl @@ -132,7 +132,9 @@ format_dict2(#{processes_used := _} = Dict) -> format_dict2(#{port_count := _} = Dict) -> format_vm_system_info(Dict); format_dict2(#{fsm := _, regular := _} = Dict) -> - format_probe_queues(Dict). + format_probe_queues(Dict); +format_dict2(#{recv_cnt := _, workers := _} = Dict) -> + format_rdbms_stats(Dict). format_spiral(#{one := One, count := Count}) -> #{<<"type">> => <<"spiral">>, <<"one">> => One, <<"count">> => Count}. @@ -161,6 +163,14 @@ format_merged_inet_stats(#{connections := Cons, <<"send_cnt">> => SCnt, <<"send_max">> => SMax, <<"send_oct">> => SOct, <<"send_pend">> => SPend}. +format_rdbms_stats(#{recv_cnt := RCnt, recv_max := RMax, recv_oct := ROct, + send_cnt := SCnt,send_max := SMax, send_oct := SOct, + send_pend := SPend, workers := Workers}) -> + #{<<"type">> => <<"rdbms_stats">>, <<"workers">> => Workers, + <<"recv_cnt">> => RCnt, <<"recv_max">> => RMax, <<"recv_oct">> => ROct, + <<"send_cnt">> => SCnt, <<"send_max">> => SMax, <<"send_oct">> => SOct, + <<"send_pend">> => SPend}. + format_vm_stats_memory(#{total := Total, processes_used := P, atom_used := A, binary := B, ets := E, system := S}) -> #{<<"type">> => <<"vm_stats_memory">>, diff --git a/src/graphql/mongoose_graphql_union.erl b/src/graphql/mongoose_graphql_union.erl index 813fa76327f..ba3521c425e 100644 --- a/src/graphql/mongoose_graphql_union.erl +++ b/src/graphql/mongoose_graphql_union.erl @@ -28,6 +28,8 @@ execute(#{<<"type">> := <<"vm_system_info">>, <<"port_count">> := _}) -> {ok, <<"VMSystemInfoMetric">>}; execute(#{<<"type">> := <<"probe_queues">>, <<"fsm">> := _}) -> {ok, <<"ProbeQueuesMetric">>}; +execute(#{<<"type">> := <<"rdbms_stats">>, <<"workers">> := _}) -> + {ok, <<"RDBMSStatsMetric">>}; execute(Value) -> ?LOG_ERROR(#{what => graphql_unknown_type, value => Value}), {error, unknown_type}. From f4f4847c850c42cba5ae043fc87e5a8f519d4e60 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 9 Jun 2022 11:31:22 +0200 Subject: [PATCH 6/9] Add metric_host_type option So we can test metrics in an easy way --- big_tests/dynamic_domains.config | 1 + big_tests/test.config | 1 + big_tests/tests/domain_helper.erl | 4 ++++ big_tests/tests/graphql_metric_SUITE.erl | 4 ++-- 4 files changed, 8 insertions(+), 2 deletions(-) diff --git a/big_tests/dynamic_domains.config b/big_tests/dynamic_domains.config index 6f55ba42706..ca044e64672 100644 --- a/big_tests/dynamic_domains.config +++ b/big_tests/dynamic_domains.config @@ -4,6 +4,7 @@ {hosts, [{mim, [{node, mongooseim@localhost}, {domain, <<"domain.example.com">>}, {host_type, <<"test type">>}, + {metric_host_type, <<"test_type">>}, {secondary_domain, <<"domain.example.org">>}, {secondary_host_type, <<"test type">>}, {dynamic_domains, [{<<"test type">>, [<<"domain.example.com">>, <<"domain.example.org">>]}, diff --git a/big_tests/test.config b/big_tests/test.config index b3442e46018..55b36f93b41 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -19,6 +19,7 @@ {hosts, [{mim, [{node, mongooseim@localhost}, {domain, <<"localhost">>}, {host_type, <<"localhost">>}, + {metric_host_type, <<"localhost">>}, {vars, "mim1"}, {cluster, mim}, {secondary_domain, <<"localhost.bis">>}, diff --git a/big_tests/tests/domain_helper.erl b/big_tests/tests/domain_helper.erl index 8a81a8f4475..baacc8bcbf0 100644 --- a/big_tests/tests/domain_helper.erl +++ b/big_tests/tests/domain_helper.erl @@ -11,6 +11,7 @@ host_types/1, host_type/0, host_type/1, + metric_host_type/0, domain_to_host_type/2, domain/0, domain/1, @@ -34,6 +35,9 @@ domain() -> host_type(NodeKey) -> get_or_fail({hosts, NodeKey, host_type}). +metric_host_type() -> + get_or_fail({hosts, mim, metric_host_type}). + domain_to_host_type(Node, Domain) -> {ok, HostType} = rpc(Node, mongoose_domain_core, get_host_type, [Domain]), HostType. diff --git a/big_tests/tests/graphql_metric_SUITE.erl b/big_tests/tests/graphql_metric_SUITE.erl index 83691d96eb6..bf8093523ba 100644 --- a/big_tests/tests/graphql_metric_SUITE.erl +++ b/big_tests/tests/graphql_metric_SUITE.erl @@ -163,7 +163,7 @@ get_metrics_as_dicts_with_key_one(Config) -> operationName => <<"Q1">>}, Config), ParsedResult = ok_result(<<"metric">>, <<"getMetricsAsDicts">>, Result), Map = dict_objects_to_map(ParsedResult), - SentName = [domain_helper:host_type(), <<"xmppStanzaSent">>], + SentName = [domain_helper:metric_host_type(), <<"xmppStanzaSent">>], [#{<<"key">> := <<"one">>, <<"value">> := One}] = maps:get(SentName, Map), true = is_integer(One). @@ -208,7 +208,7 @@ check_node_result_is_valid(ResList, MetricsAreGlobal) -> Map = dict_objects_to_map(ResList), SentName = case MetricsAreGlobal of true -> [<<"global">>, <<"xmppStanzaSent">>]; - false -> [domain_helper:host_type(), <<"xmppStanzaSent">>] + false -> [domain_helper:metric_host_type(), <<"xmppStanzaSent">>] end, check_spiral_dict(maps:get(SentName, Map)), [#{<<"key">> := <<"value">>,<<"value">> := V}] = From dd78bb3cef4dc525358c0e537f31f9e6aa166658 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 13 Jun 2022 10:21:39 +0200 Subject: [PATCH 7/9] Fix review comments --- big_tests/dynamic_domains.config | 1 - big_tests/test.config | 1 - big_tests/tests/domain_helper.erl | 4 ---- big_tests/tests/graphql_metric_SUITE.erl | 12 ++++++++---- big_tests/tests/mam_helper.erl | 4 ++-- priv/graphql/schemas/admin/metric.gql | 1 + .../admin/mongoose_graphql_metric_admin_query.erl | 11 ++--------- src/graphql/mongoose_graphql_union.erl | 7 +------ 8 files changed, 14 insertions(+), 27 deletions(-) diff --git a/big_tests/dynamic_domains.config b/big_tests/dynamic_domains.config index ca044e64672..6f55ba42706 100644 --- a/big_tests/dynamic_domains.config +++ b/big_tests/dynamic_domains.config @@ -4,7 +4,6 @@ {hosts, [{mim, [{node, mongooseim@localhost}, {domain, <<"domain.example.com">>}, {host_type, <<"test type">>}, - {metric_host_type, <<"test_type">>}, {secondary_domain, <<"domain.example.org">>}, {secondary_host_type, <<"test type">>}, {dynamic_domains, [{<<"test type">>, [<<"domain.example.com">>, <<"domain.example.org">>]}, diff --git a/big_tests/test.config b/big_tests/test.config index 55b36f93b41..b3442e46018 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -19,7 +19,6 @@ {hosts, [{mim, [{node, mongooseim@localhost}, {domain, <<"localhost">>}, {host_type, <<"localhost">>}, - {metric_host_type, <<"localhost">>}, {vars, "mim1"}, {cluster, mim}, {secondary_domain, <<"localhost.bis">>}, diff --git a/big_tests/tests/domain_helper.erl b/big_tests/tests/domain_helper.erl index baacc8bcbf0..8a81a8f4475 100644 --- a/big_tests/tests/domain_helper.erl +++ b/big_tests/tests/domain_helper.erl @@ -11,7 +11,6 @@ host_types/1, host_type/0, host_type/1, - metric_host_type/0, domain_to_host_type/2, domain/0, domain/1, @@ -35,9 +34,6 @@ domain() -> host_type(NodeKey) -> get_or_fail({hosts, NodeKey, host_type}). -metric_host_type() -> - get_or_fail({hosts, mim, metric_host_type}). - domain_to_host_type(Node, Domain) -> {ok, HostType} = rpc(Node, mongoose_domain_core, get_host_type, [Domain]), HostType. diff --git a/big_tests/tests/graphql_metric_SUITE.erl b/big_tests/tests/graphql_metric_SUITE.erl index bf8093523ba..635494b6b66 100644 --- a/big_tests/tests/graphql_metric_SUITE.erl +++ b/big_tests/tests/graphql_metric_SUITE.erl @@ -101,7 +101,7 @@ get_by_name_global_erlang_metrics(Config) -> variables => #{}, operationName => <<"Q1">>}, Config), ParsedResult = ok_result(<<"metric">>, <<"getMetrics">>, Result), Map = maps:from_list([{Name, X} || X = #{<<"name">> := Name} <- ParsedResult]), - Info = maps:get([<<"global">>,<<"erlang">>, <<"system_info">>], Map), + Info = maps:get([<<"global">>, <<"erlang">>, <<"system_info">>], Map), %% VMSystemInfoMetric type #{<<"type">> := <<"vm_system_info">>} = Info, check_metric_by_type(Info), @@ -152,6 +152,7 @@ get_by_name_metrics_as_dicts(Config) -> Result = execute_auth(#{query => get_by_args_metrics_as_dicts_call(Args), variables => #{}, operationName => <<"Q1">>}, Config), ParsedResult = ok_result(<<"metric">>, <<"getMetricsAsDicts">>, Result), + [_|_] = ParsedResult, %% Only xmppStanzaSent type lists:foreach(fun(#{<<"dict">> := Dict, <<"name">> := [_, <<"xmppStanzaSent">>]}) -> check_spiral_dict(Dict) @@ -163,7 +164,7 @@ get_metrics_as_dicts_with_key_one(Config) -> operationName => <<"Q1">>}, Config), ParsedResult = ok_result(<<"metric">>, <<"getMetricsAsDicts">>, Result), Map = dict_objects_to_map(ParsedResult), - SentName = [domain_helper:metric_host_type(), <<"xmppStanzaSent">>], + SentName = [metric_host_type(), <<"xmppStanzaSent">>], [#{<<"key">> := <<"one">>, <<"value">> := One}] = maps:get(SentName, Map), true = is_integer(One). @@ -188,7 +189,7 @@ get_by_name_cluster_metrics_as_dicts(Config) -> %% Contains data for at least two nodes true = maps:size(Map) > 1, %% Only xmppStanzaSent type - maps:map(fun(_Node, NodeRes) -> + maps:map(fun(_Node, [_|_] = NodeRes) -> lists:foreach(fun(#{<<"dict">> := Dict, <<"name">> := [_, <<"xmppStanzaSent">>]}) -> check_spiral_dict(Dict) @@ -208,7 +209,7 @@ check_node_result_is_valid(ResList, MetricsAreGlobal) -> Map = dict_objects_to_map(ResList), SentName = case MetricsAreGlobal of true -> [<<"global">>, <<"xmppStanzaSent">>]; - false -> [domain_helper:metric_host_type(), <<"xmppStanzaSent">>] + false -> [metric_host_type(), <<"xmppStanzaSent">>] end, check_spiral_dict(maps:get(SentName, Map)), [#{<<"key">> := <<"value">>,<<"value">> := V}] = @@ -310,3 +311,6 @@ check_spiral_dict(Dict) -> values_are_integers(Map, Keys) -> lists:foreach(fun(Key) -> true = is_integer(maps:get(Key, Map)) end, Keys). + +metric_host_type() -> + binary:replace(domain_helper:host_type(), <<" ">>, <<"_">>, [global]). diff --git a/big_tests/tests/mam_helper.erl b/big_tests/tests/mam_helper.erl index 10ae4f28925..4b450fa0b5f 100644 --- a/big_tests/tests/mam_helper.erl +++ b/big_tests/tests/mam_helper.erl @@ -655,9 +655,9 @@ send_muc_rsm_messages(Config) -> escalus:wait_for_stanzas(Alice, 3), %% Alice sends messages to Bob. - lists:foreach(fun(N) -> + lists:foreach(fun(NN) -> escalus:send(Alice, escalus_stanza:groupchat_to( - RoomAddr, generate_message_text(N))) + RoomAddr, generate_message_text(NN))) end, lists:seq(1, N)), assert_list_size(N, escalus:wait_for_stanzas(Bob, N)), assert_list_size(N, escalus:wait_for_stanzas(Alice, N)), diff --git a/priv/graphql/schemas/admin/metric.gql b/priv/graphql/schemas/admin/metric.gql index 12007cea355..4f439aa79cc 100644 --- a/priv/graphql/schemas/admin/metric.gql +++ b/priv/graphql/schemas/admin/metric.gql @@ -32,6 +32,7 @@ type HistogramMetric { min: Int "Max value" max: Int + "Median value" median: Int "50th percentile" p50: Int diff --git a/src/graphql/admin/mongoose_graphql_metric_admin_query.erl b/src/graphql/admin/mongoose_graphql_metric_admin_query.erl index e7229d4261a..72220f68efe 100644 --- a/src/graphql/admin/mongoose_graphql_metric_admin_query.erl +++ b/src/graphql/admin/mongoose_graphql_metric_admin_query.erl @@ -78,14 +78,7 @@ prepare_key(X) when is_integer(X) -> %% For percentiles prepare_name(null) -> []; -prepare_name([<<"global">> | T]) -> - [global | prepare_name2(T)]; -prepare_name([H | T]) -> - [binary_to_atom(H) | prepare_name2(T)]; -prepare_name([]) -> - []. - -prepare_name2(Segments) -> +prepare_name(Segments) -> lists:map(fun binary_to_atom/1, Segments). make_metric_result({Name, Dict}) -> @@ -164,7 +157,7 @@ format_merged_inet_stats(#{connections := Cons, <<"send_pend">> => SPend}. format_rdbms_stats(#{recv_cnt := RCnt, recv_max := RMax, recv_oct := ROct, - send_cnt := SCnt,send_max := SMax, send_oct := SOct, + send_cnt := SCnt, send_max := SMax, send_oct := SOct, send_pend := SPend, workers := Workers}) -> #{<<"type">> => <<"rdbms_stats">>, <<"workers">> => Workers, <<"recv_cnt">> => RCnt, <<"recv_max">> => RMax, <<"recv_oct">> => ROct, diff --git a/src/graphql/mongoose_graphql_union.erl b/src/graphql/mongoose_graphql_union.erl index ba3521c425e..acce89eb71d 100644 --- a/src/graphql/mongoose_graphql_union.erl +++ b/src/graphql/mongoose_graphql_union.erl @@ -20,8 +20,6 @@ execute(#{<<"type">> := <<"gauge">>, <<"name">> := _, <<"value">> := _}) -> {ok, <<"GaugeMetric">>}; execute(#{<<"type">> := <<"merged_inet_stats">>, <<"connections">> := _}) -> {ok, <<"MergedInetStatsMetric">>}; -execute(#{<<"type">> := <<"merged_inet_stats">>, <<"connections">> := _}) -> - {ok, <<"MergedInetStatsMetric">>}; execute(#{<<"type">> := <<"vm_stats_memory">>, <<"processes_used">> := _}) -> {ok, <<"VMStatsMemoryMetric">>}; execute(#{<<"type">> := <<"vm_system_info">>, <<"port_count">> := _}) -> @@ -29,7 +27,4 @@ execute(#{<<"type">> := <<"vm_system_info">>, <<"port_count">> := _}) -> execute(#{<<"type">> := <<"probe_queues">>, <<"fsm">> := _}) -> {ok, <<"ProbeQueuesMetric">>}; execute(#{<<"type">> := <<"rdbms_stats">>, <<"workers">> := _}) -> - {ok, <<"RDBMSStatsMetric">>}; -execute(Value) -> - ?LOG_ERROR(#{what => graphql_unknown_type, value => Value}), - {error, unknown_type}. + {ok, <<"RDBMSStatsMetric">>}. From 5bf490324f7ab4d291c8ae45784cc0f8d4e76144 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 13 Jun 2022 11:16:09 +0200 Subject: [PATCH 8/9] Move functions into mongoose_metrics_api --- .../mongoose_graphql_metric_admin_query.erl | 179 ++---------------- src/metrics/mongoose_metrics_api.erl | 146 ++++++++++++++ 2 files changed, 166 insertions(+), 159 deletions(-) create mode 100644 src/metrics/mongoose_metrics_api.erl diff --git a/src/graphql/admin/mongoose_graphql_metric_admin_query.erl b/src/graphql/admin/mongoose_graphql_metric_admin_query.erl index 72220f68efe..d501895f923 100644 --- a/src/graphql/admin/mongoose_graphql_metric_admin_query.erl +++ b/src/graphql/admin/mongoose_graphql_metric_admin_query.erl @@ -5,179 +5,40 @@ -ignore_xref([execute/4]). --include("../mongoose_graphql_types.hrl"). -include("mongoose_logger.hrl"). --import(mongoose_graphql_helper, [make_error/2, format_result/2]). - --type metric_result() :: term(). - execute(_Ctx, _Obj, <<"getMetrics">>, Args) -> - get_metrics(Args); + Name = get_name(Args), + mongoose_metrics_api:get_metrics(Name); execute(_Ctx, _Obj, <<"getMetricsAsDicts">>, Args) -> - get_metrics_as_dicts(Args); + Name = get_name(Args), + Keys = get_keys2(Args), + mongoose_metrics_api:get_metrics_as_dicts(Name, Keys); execute(_Ctx, _Obj, <<"getClusterMetricsAsDicts">>, Args) -> - get_cluster_metrics_as_dicts(Args). - --spec get_metrics(mongoose_graphql:args()) -> - {ok, [metric_result()]} | {error, resolver_error()}. -get_metrics(Args) -> - Name = maps:get(<<"name">>, Args, []), - Values = exometer:get_values(prepare_name(Name)), - {ok, lists:map(fun make_metric_result/1, Values)}. - -get_metrics_as_dicts(Args) -> - Name = maps:get(<<"name">>, Args, []), - Keys = prepare_keys(maps:get(<<"keys">>, Args, null)), - Values = exometer:get_values(prepare_name(Name)), - {ok, [make_metric_dict_result(V, Keys) || V <- Values]}. - -get_cluster_metrics_as_dicts(Args) -> - Name = maps:get(<<"name">>, Args, []), - PrepName = prepare_name(Name), - Keys = prepare_keys(maps:get(<<"keys">>, Args, null)), - Nodes = prepare_nodes(maps:get(<<"nodes">>, Args, null)), - AllNodes = [node()|nodes()], - F = fun(Node) -> rpc:call(Node, exometer, get_values, [PrepName]) end, - FilteredNodes = filter_nodes(AllNodes, Nodes), - Results = mongoose_lib:pmap(F, FilteredNodes), - Zip = lists:zip(FilteredNodes, Results), - {ok, [make_node_result(Node, Result, Keys) || {Node, Result} <- Zip]}. - -make_node_result(Node, {ok, Values}, Keys) -> - {ok, #{<<"node">> => Node, - <<"result">> => [make_metric_dict_result(V, Keys) || V <- Values]}}; -make_node_result(Node, Other, _Keys) -> - ?LOG_ERROR(#{what => metric_get_failed, - remote_node => Node, reason => Other}), - {error, <<"Failed to get metrics">>}. + Name = get_name(Args), + Keys = get_keys2(Args), + Nodes = get_nodes(Args), + mongoose_metrics_api:get_cluster_metrics_as_dicts(Name, Keys, Nodes). -prepare_keys([]) -> - null; -prepare_keys(null) -> - null; -prepare_keys(Keys) -> +get_keys2(Args) -> + Keys = get_list(<<"keys">>, Args), lists:map(fun prepare_key/1, Keys). -prepare_nodes([]) -> - null; -prepare_nodes(null) -> - null; -prepare_nodes(Nodes) -> - lists:map(fun binary_to_atom/1, Nodes). - -filter_nodes(AllNodes, null) -> - AllNodes; -filter_nodes(AllNodes, AllowedNodes) -> - [Node || Node <- AllNodes, lists:member(Node, AllowedNodes)]. - prepare_key(X) when is_binary(X) -> binary_to_atom(X); prepare_key(X) when is_integer(X) -> %% For percentiles X. -prepare_name(null) -> - []; -prepare_name(Segments) -> +get_name(Args) -> + Segments = get_list(<<"name">>, Args), lists:map(fun binary_to_atom/1, Segments). -make_metric_result({Name, Dict}) -> - PreparedName = format_name(Name), - Map = format_dict(Dict), - {ok, Map#{<<"name">> => PreparedName}}. - -make_metric_dict_result({Name, Dict}, Keys) -> - PreparedName = format_name(Name), - {ok, #{<<"name">> => PreparedName, <<"dict">> => format_dict_entries(Dict, Keys)}}. - -format_dict_entries(Dict, Keys) -> - [{ok, #{<<"key">> => Key, <<"value">> => Value}} - || {Key, Value} <- filter_keys(Dict, Keys)]. - -filter_keys(Dict, null) -> - Dict; -filter_keys(Dict, Keys) -> - [KV || KV = {Key, _} <- Dict, lists:member(Key, Keys)]. - -format_name(Name) -> - lists:map(fun format_name_segment/1, Name). - -format_name_segment(Segment) when is_atom(Segment) -> - {ok, atom_to_binary(Segment)}; -format_name_segment(Segment) when is_binary(Segment) -> - {ok, Segment}. - -format_dict(Dict) -> - format_dict2(maps:from_list(Dict)). - -format_dict2(#{one := _} = Dict) -> - format_spiral(Dict); -format_dict2(#{ms_since_reset := _} = Dict) -> - format_counter(Dict); -format_dict2(#{value := _} = Dict) -> - format_gauge(Dict); -format_dict2(#{median := _} = Dict) -> - format_histogram(Dict); -format_dict2(#{connections := _, recv_cnt := _} = Dict) -> - format_merged_inet_stats(Dict); -format_dict2(#{processes_used := _} = Dict) -> - format_vm_stats_memory(Dict); -format_dict2(#{port_count := _} = Dict) -> - format_vm_system_info(Dict); -format_dict2(#{fsm := _, regular := _} = Dict) -> - format_probe_queues(Dict); -format_dict2(#{recv_cnt := _, workers := _} = Dict) -> - format_rdbms_stats(Dict). - -format_spiral(#{one := One, count := Count}) -> - #{<<"type">> => <<"spiral">>, <<"one">> => One, <<"count">> => Count}. - -format_counter(#{value := Value, ms_since_reset := MS}) -> - #{<<"type">> => <<"counter">>, <<"value">> => Value, <<"ms_since_reset">> => MS}. - -format_gauge(#{value := Value}) -> - #{<<"type">> => <<"gauge">>, <<"value">> => Value}. - -format_histogram(#{n := N, mean := Mean, min := Min, max := Max, median := Median, - 50 := P50, 75 := P75, 90 := P90, 95 := P95, - 99 := P99, 999 := P999}) -> - #{<<"type">> => <<"histogram">>, <<"n">> => N, <<"mean">> => Mean, - <<"min">> => Min, <<"max">> => Max, <<"median">> => Median, - <<"p50">> => P50, <<"p75">> => P75, <<"p90">> => P90, <<"p95">> => P95, - <<"p99">> => P99, <<"p999">> => P999}. - -format_merged_inet_stats(#{connections := Cons, - recv_cnt := RCnt, recv_max := RMax, recv_oct := ROct, - send_cnt := SCnt, send_max := SMax, send_oct := SOct, - send_pend := SPend}) -> - %% Metrics from a pool of connections - #{<<"type">> => <<"merged_inet_stats">>, <<"connections">> => Cons, - <<"recv_cnt">> => RCnt, <<"recv_max">> => RMax, <<"recv_oct">> => ROct, - <<"send_cnt">> => SCnt, <<"send_max">> => SMax, <<"send_oct">> => SOct, - <<"send_pend">> => SPend}. - -format_rdbms_stats(#{recv_cnt := RCnt, recv_max := RMax, recv_oct := ROct, - send_cnt := SCnt, send_max := SMax, send_oct := SOct, - send_pend := SPend, workers := Workers}) -> - #{<<"type">> => <<"rdbms_stats">>, <<"workers">> => Workers, - <<"recv_cnt">> => RCnt, <<"recv_max">> => RMax, <<"recv_oct">> => ROct, - <<"send_cnt">> => SCnt, <<"send_max">> => SMax, <<"send_oct">> => SOct, - <<"send_pend">> => SPend}. - -format_vm_stats_memory(#{total := Total, processes_used := P, - atom_used := A, binary := B, ets := E, system := S}) -> - #{<<"type">> => <<"vm_stats_memory">>, - <<"total">> => Total, <<"processes_used">> => P, <<"atom_used">> => A, - <<"binary">> => B, <<"ets">> => E, <<"system">> => S}. +get_nodes(Args) -> + Nodes = get_list(<<"nodes">>, Args), + lists:map(fun binary_to_atom/1, Nodes). -format_vm_system_info(#{port_count := PortCount, port_limit := PortLimit, - process_count := ProcessCount, process_limit := ProcessLimit, - ets_limit := EtsLimit}) -> - #{<<"type">> => <<"vm_system_info">>, - <<"port_count">> => PortCount, <<"port_limit">> => PortLimit, - <<"process_count">> => ProcessCount, <<"process_limit">> => ProcessLimit, - <<"ets_limit">> => EtsLimit}. +get_list(Key, Map) -> + null_as_empty(maps:get(Key, Map, [])). -format_probe_queues(#{fsm := FSM, regular := Regular, total := Total}) -> - #{<<"type">> => <<"probe_queues">>, - <<"fsm">> => FSM, <<"regular">> => Regular, <<"total">> => Total}. +null_as_empty(null) -> []; +null_as_empty(X) -> X. diff --git a/src/metrics/mongoose_metrics_api.erl b/src/metrics/mongoose_metrics_api.erl new file mode 100644 index 00000000000..07258e15b95 --- /dev/null +++ b/src/metrics/mongoose_metrics_api.erl @@ -0,0 +1,146 @@ +-module(mongoose_metrics_api). +-export([get_metrics/1, + get_metrics_as_dicts/2, + get_cluster_metrics_as_dicts/3]). + +-include("mongoose_logger.hrl"). + +-type name() :: [atom() | integer()]. +-type key() :: atom(). + +-spec get_metrics(Name :: name()) -> {ok, list()}. +get_metrics(Name) -> + Values = exometer:get_values(Name), + {ok, lists:map(fun make_metric_result/1, Values)}. + +-spec get_metrics_as_dicts(Name :: name(), Keys :: [key()]) -> {ok, list()}. +get_metrics_as_dicts(Name, Keys) -> + Values = exometer:get_values(Name), + {ok, [make_metric_dict_result(V, Keys) || V <- Values]}. + +-spec get_cluster_metrics_as_dicts(Name :: name(), Keys :: [key()], + Nodes :: [node()]) -> {ok, list()}. +get_cluster_metrics_as_dicts(Name, Keys, Nodes) -> + Nodes2 = existing_nodes(Nodes), + F = fun(Node) -> rpc:call(Node, exometer, get_values, [Name]) end, + Results = mongoose_lib:pmap(F, Nodes2), + {ok, [make_node_result(Node, Result, Keys) + || {Node, Result} <- lists:zip(Nodes2, Results)]}. + +make_node_result(Node, {ok, Values}, Keys) -> + {ok, #{<<"node">> => Node, + <<"result">> => [make_metric_dict_result(V, Keys) || V <- Values]}}; +make_node_result(Node, Other, _Keys) -> + ?LOG_ERROR(#{what => metric_get_failed, + remote_node => Node, reason => Other}), + {error, <<"Failed to get metrics">>}. + +filter_keys(Dict, []) -> + Dict; +filter_keys(Dict, Keys) -> + [KV || KV = {Key, _} <- Dict, lists:member(Key, Keys)]. + +existing_nodes(Nodes) -> + AllNodes = [node()|nodes()], + filter_nodes(AllNodes, Nodes). + +filter_nodes(AllNodes, []) -> + AllNodes; +filter_nodes(AllNodes, AllowedNodes) -> + [Node || Node <- AllNodes, lists:member(Node, AllowedNodes)]. + +make_metric_result({Name, Dict}) -> + PreparedName = format_name(Name), + Map = format_dict(Dict), + {ok, Map#{<<"name">> => PreparedName}}. + +make_metric_dict_result({Name, Dict}, Keys) -> + PreparedName = format_name(Name), + {ok, #{<<"name">> => PreparedName, <<"dict">> => format_dict_entries(Dict, Keys)}}. + +format_dict_entries(Dict, Keys) -> + [{ok, #{<<"key">> => Key, <<"value">> => Value}} + || {Key, Value} <- filter_keys(Dict, Keys)]. + +format_name(Name) -> + lists:map(fun format_name_segment/1, Name). + +format_name_segment(Segment) when is_atom(Segment) -> + {ok, atom_to_binary(Segment)}; +format_name_segment(Segment) when is_binary(Segment) -> + {ok, Segment}. + +format_dict(Dict) -> + format_dict2(maps:from_list(Dict)). + +format_dict2(#{one := _} = Dict) -> + format_spiral(Dict); +format_dict2(#{ms_since_reset := _} = Dict) -> + format_counter(Dict); +format_dict2(#{value := _} = Dict) -> + format_gauge(Dict); +format_dict2(#{median := _} = Dict) -> + format_histogram(Dict); +format_dict2(#{connections := _, recv_cnt := _} = Dict) -> + format_merged_inet_stats(Dict); +format_dict2(#{processes_used := _} = Dict) -> + format_vm_stats_memory(Dict); +format_dict2(#{port_count := _} = Dict) -> + format_vm_system_info(Dict); +format_dict2(#{fsm := _, regular := _} = Dict) -> + format_probe_queues(Dict); +format_dict2(#{recv_cnt := _, workers := _} = Dict) -> + format_rdbms_stats(Dict). + +format_spiral(#{one := One, count := Count}) -> + #{<<"type">> => <<"spiral">>, <<"one">> => One, <<"count">> => Count}. + +format_counter(#{value := Value, ms_since_reset := MS}) -> + #{<<"type">> => <<"counter">>, <<"value">> => Value, <<"ms_since_reset">> => MS}. + +format_gauge(#{value := Value}) -> + #{<<"type">> => <<"gauge">>, <<"value">> => Value}. + +format_histogram(#{n := N, mean := Mean, min := Min, max := Max, median := Median, + 50 := P50, 75 := P75, 90 := P90, 95 := P95, + 99 := P99, 999 := P999}) -> + #{<<"type">> => <<"histogram">>, <<"n">> => N, <<"mean">> => Mean, + <<"min">> => Min, <<"max">> => Max, <<"median">> => Median, + <<"p50">> => P50, <<"p75">> => P75, <<"p90">> => P90, <<"p95">> => P95, + <<"p99">> => P99, <<"p999">> => P999}. + +format_merged_inet_stats(#{connections := Cons, + recv_cnt := RCnt, recv_max := RMax, recv_oct := ROct, + send_cnt := SCnt, send_max := SMax, send_oct := SOct, + send_pend := SPend}) -> + %% Metrics from a pool of connections + #{<<"type">> => <<"merged_inet_stats">>, <<"connections">> => Cons, + <<"recv_cnt">> => RCnt, <<"recv_max">> => RMax, <<"recv_oct">> => ROct, + <<"send_cnt">> => SCnt, <<"send_max">> => SMax, <<"send_oct">> => SOct, + <<"send_pend">> => SPend}. + +format_rdbms_stats(#{recv_cnt := RCnt, recv_max := RMax, recv_oct := ROct, + send_cnt := SCnt, send_max := SMax, send_oct := SOct, + send_pend := SPend, workers := Workers}) -> + #{<<"type">> => <<"rdbms_stats">>, <<"workers">> => Workers, + <<"recv_cnt">> => RCnt, <<"recv_max">> => RMax, <<"recv_oct">> => ROct, + <<"send_cnt">> => SCnt, <<"send_max">> => SMax, <<"send_oct">> => SOct, + <<"send_pend">> => SPend}. + +format_vm_stats_memory(#{total := Total, processes_used := P, + atom_used := A, binary := B, ets := E, system := S}) -> + #{<<"type">> => <<"vm_stats_memory">>, + <<"total">> => Total, <<"processes_used">> => P, <<"atom_used">> => A, + <<"binary">> => B, <<"ets">> => E, <<"system">> => S}. + +format_vm_system_info(#{port_count := PortCount, port_limit := PortLimit, + process_count := ProcessCount, process_limit := ProcessLimit, + ets_limit := EtsLimit}) -> + #{<<"type">> => <<"vm_system_info">>, + <<"port_count">> => PortCount, <<"port_limit">> => PortLimit, + <<"process_count">> => ProcessCount, <<"process_limit">> => ProcessLimit, + <<"ets_limit">> => EtsLimit}. + +format_probe_queues(#{fsm := FSM, regular := Regular, total := Total}) -> + #{<<"type">> => <<"probe_queues">>, + <<"fsm">> => FSM, <<"regular">> => Regular, <<"total">> => Total}. From 27ad8005dadc3c3aba539ccc92d6389c22b7cfae Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 14 Jun 2022 17:39:35 +0200 Subject: [PATCH 9/9] Add types --- .../mongoose_graphql_metric_admin_query.erl | 1 + src/metrics/mongoose_metrics_api.erl | 18 ++++++++++++++---- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/graphql/admin/mongoose_graphql_metric_admin_query.erl b/src/graphql/admin/mongoose_graphql_metric_admin_query.erl index d501895f923..c2fdebaf76d 100644 --- a/src/graphql/admin/mongoose_graphql_metric_admin_query.erl +++ b/src/graphql/admin/mongoose_graphql_metric_admin_query.erl @@ -20,6 +20,7 @@ execute(_Ctx, _Obj, <<"getClusterMetricsAsDicts">>, Args) -> Nodes = get_nodes(Args), mongoose_metrics_api:get_cluster_metrics_as_dicts(Name, Keys, Nodes). +%% get_keys is a BIF, so we have a name conflict get_keys2(Args) -> Keys = get_list(<<"keys">>, Args), lists:map(fun prepare_key/1, Keys). diff --git a/src/metrics/mongoose_metrics_api.erl b/src/metrics/mongoose_metrics_api.erl index 07258e15b95..53fa08941a5 100644 --- a/src/metrics/mongoose_metrics_api.erl +++ b/src/metrics/mongoose_metrics_api.erl @@ -7,19 +7,29 @@ -type name() :: [atom() | integer()]. -type key() :: atom(). - --spec get_metrics(Name :: name()) -> {ok, list()}. +-type metric_result() :: + {ok, #{binary() => binary() | non_neg_integer()}}. +-type dict_result() :: #{binary() => binary() | non_neg_integer()}. +-type metric_dict_result() :: + {ok, #{binary() => binary() | [dict_result()]}}. +-type metric_node_dict_result() :: + {ok, #{binary() => binary() | [metric_dict_result()]}} + | {error, binary()}. + +-spec get_metrics(Name :: name()) -> {ok, [metric_result()]}. get_metrics(Name) -> Values = exometer:get_values(Name), {ok, lists:map(fun make_metric_result/1, Values)}. --spec get_metrics_as_dicts(Name :: name(), Keys :: [key()]) -> {ok, list()}. +-spec get_metrics_as_dicts(Name :: name(), Keys :: [key()]) -> + {ok, [metric_dict_result()]}. get_metrics_as_dicts(Name, Keys) -> Values = exometer:get_values(Name), {ok, [make_metric_dict_result(V, Keys) || V <- Values]}. -spec get_cluster_metrics_as_dicts(Name :: name(), Keys :: [key()], - Nodes :: [node()]) -> {ok, list()}. + Nodes :: [node()]) -> + {ok, [metric_node_dict_result()]}. get_cluster_metrics_as_dicts(Name, Keys, Nodes) -> Nodes2 = existing_nodes(Nodes), F = fun(Node) -> rpc:call(Node, exometer, get_values, [Name]) end,