From d5ca2d6ec6a603e4267f5285c6699011fd0b5de3 Mon Sep 17 00:00:00 2001 From: eslam-gomaa Date: Mon, 24 Jun 2024 00:21:58 +0300 Subject: [PATCH 1/3] organize cli interface --- dashboard.yaml | 62 +- dashboard2.yaml | 191 ++++++ kptop_tool.py | 33 +- kubePtop/cli_args.py | 180 ++++++ kubePtop/dashboard_monitor.py | 978 ++---------------------------- kubePtop/dashboard_yaml_loader.py | 45 ++ 6 files changed, 525 insertions(+), 964 deletions(-) create mode 100644 dashboard2.yaml create mode 100644 kubePtop/cli_args.py create mode 100644 kubePtop/dashboard_yaml_loader.py diff --git a/dashboard.yaml b/dashboard.yaml index f7d7e62..cab4ba4 100644 --- a/dashboard.yaml +++ b/dashboard.yaml @@ -16,10 +16,10 @@ dashboard: split_mode: column split: left_a: - size: 20 + size: 25 ratio: 1 left_b: - size: 30 + size: 25 ratio: 1 left_c: size: 0 @@ -68,20 +68,27 @@ dashboard: default: .* cliArgument: enable: true - short: -T + short: -t required: true + description: "Kafka Topic Name" - name: namespace default: .* cliArgument: enable: true - short: -N + short: -n + required: false + - name: kafka-topics-namespace + default: kafka-resources + cliArgument: + enable: true + short: -tn + required: false + - name: pod + default: .* + cliArgument: + enable: true + short: -po required: false - - # cliArguments: - # topic: - # arg: "--topic" - # required: False - # default: .* visualization: - name: Kafka Data In per second. @@ -97,7 +104,7 @@ dashboard: width: 80 maxHeight: 17 maxWidth: 45 - updateIntervalSeconds: 1 + updateIntervalSeconds: 5 historyData: enable: true time: 5m @@ -108,19 +115,19 @@ dashboard: type: asciiGraph # || progressBarList || asciiText metricUnit: kb/s metric: > - topk(20, sum(irate(kafka_server_brokertopicmetrics_bytesout_total[5m])) by (strimzi_io_cluster, topic)) / 1024 + topk(20, sum(irate(kafka_server_brokertopicmetrics_bytesout_total{topic=~"$topic"}[5m])) by (strimzi_io_cluster, topic)) / 1024 custom_key: "🥕 {{topic}}" asciiGraphOptions: height: 0 width: 80 maxHeight: 17 maxWidth: 45 - updateIntervalSeconds: 10 + updateIntervalSeconds: 5 historyData: enable: true time: 5m - - name: Kafka brokers memory usage + - name: Kafka pods memory usage (Sorted by higher memory usage) enable: true box: left_a type: progressBarList @@ -128,26 +135,26 @@ dashboard: metrics: total_value_metric: | # Memory limits - sum(container_spec_memory_limit_bytes{namespace="kafka", pod=~".*"}) by (pod, topology_ebs_csi_aws_com_zone) + sum(container_spec_memory_limit_bytes{namespace="$namespace", pod=~"$pod"}) by (pod, topology_ebs_csi_aws_com_zone) usage_value_metric: | # memory usage sorted by higher usage - sort_desc(sum(container_memory_usage_bytes{namespace="kafka", pod=~".*(zookeeper|brokers).*"}) by (pod, topology_ebs_csi_aws_com_zone)) + sort_desc(sum(container_memory_usage_bytes{namespace="$namespace", pod=~"$pod"}) by (pod, topology_ebs_csi_aws_com_zone)) custom_key: "{{pod}}" progressBarListOptions: maxItemsCount: 10 lineBreak: true showBarPercentage: true barWidth: 25 - updateIntervalSeconds: 2 + updateIntervalSeconds: 5 - - name: Kafka Pods Table + - name: Kafka Pods memory usage (simpleTable example) enable: true box: left_b type: simpleTable metricUnit: byte metric: | - sum(container_memory_usage_bytes{namespace="kafka", pod=~".*(zookeeper|brokers|kafka).*"}) by (pod, namespace, karpenter_sh_capacity_type, topology_kubernetes_io_zone,node_kubernetes_io_instance_type) + sum(container_memory_usage_bytes{namespace="$namespace", pod=~"$pod"}) by (pod, namespace, karpenter_sh_capacity_type, topology_kubernetes_io_zone,node_kubernetes_io_instance_type) # custom_key: | # f"{labels['pod']}" simpleTableOptions: @@ -156,38 +163,37 @@ dashboard: headersUppercase: true auto_convert_value_from_byte: true showTableIndex: true - updateIntervalSeconds: 2 + updateIntervalSeconds: 5 - - name: Kafka Used Storage Table + - name: Kafka pods details (advancedTable example) enable: true box: left_c - type: advancedTable metricUnit: byte columns: memory usage: metric: | - sort_desc(sum(container_memory_usage_bytes{namespace="kafka", pod=~".*broker.*"}) by (pod, topology_ebs_csi_aws_com_zone)) + sort_desc(sum(container_memory_usage_bytes{namespace="$namespace", pod=~"$pod"}) by (pod, topology_ebs_csi_aws_com_zone)) metricUnit: byte memory limit: metric: | - sort_desc(sum(container_spec_memory_limit_bytes{namespace="kafka", pod=~".*broker.*"}) by (pod, topology_ebs_csi_aws_com_zone)) + sort_desc(sum(container_spec_memory_limit_bytes{namespace="$namespace", pod=~"$pod"}) by (pod, topology_ebs_csi_aws_com_zone)) metricUnit: byte memory cache: metric: | - sort_desc(sum(container_memory_cache{namespace="kafka", pod=~".*(zookeeper|brokers).*"}) by (pod, topology_ebs_csi_aws_com_zone)) + sort_desc(sum(container_memory_cache{namespace="$namespace", pod=~"$pod"}) by (pod, topology_ebs_csi_aws_com_zone)) metricUnit: byte memory swap: metric: | - sum(container_memory_swap{namespace="kafka", pod=~".*broker.*"}) by (pod, topology_ebs_csi_aws_com_zone) + sum(container_memory_swap{namespace="$namespace", pod=~"$pod"}) by (pod, topology_ebs_csi_aws_com_zone) metricUnit: byte file descriptors: metric: | - sort_desc(sum(container_file_descriptors{namespace="kafka", pod=~".*broker.*"}) by (pod, topology_ebs_csi_aws_com_zone)) + sort_desc(sum(container_file_descriptors{namespace="$namespace", pod=~"$pod"}) by (pod, topology_ebs_csi_aws_com_zone)) metricUnit: counter up time: metric: | - sum(time() - kube_pod_start_time{namespace="kafka", pod=~".*(zookeeper|brokers).*"}) by (pod) + sum(time() - kube_pod_start_time{namespace="$namespace", pod=~"$pod"}) by (pod) metricUnit: seconds custom_key: "{{pod}}" # "{{pod}} - {{topology_ebs_csi_aws_com_zone}}" advancedTableOptions: diff --git a/dashboard2.yaml b/dashboard2.yaml new file mode 100644 index 0000000..2fe8933 --- /dev/null +++ b/dashboard2.yaml @@ -0,0 +1,191 @@ +dashboard: + name: Kafka cluster + description: Staging Kafka dashboard + layout: + split_mode: row # row + header: + enable: false + size: 3 + ratio: 1 + body: + boxes: + left: + enable: true + size: 0 + ratio: 1 + split_mode: column + split: + # left_a: + # size: 20 + # ratio: 1 + # left_b: + # size: 30 + # ratio: 1 + # left_c: + # size: 0 + # ratio: 1 + # left_d: + # size: 0 + # ratio: 1 + middle: + enable: false + size: 0 + ratio: 1 + split_mode: column + split: + middle_a: + size: 0 + ratio: 1 + middle_b: + size: 0 + ratio: 1 + middle_c: + size: 0 + ratio: 1 + right: + enable: false + size: 0 + ratio: 1 + split_mode: column + split: + right_a: + size: 0 + ratio: 1 + right_b: + size: 0 + ratio: 1 + + defaultDataSource: + type: prometheus + endpoint: "" + secure: false + basicAuthEnabled: false + basicAuthUserNameVariable: "" + basicAuthPasswordVariable: "" + + variables: + - name: topic + default: .* + cliArgument: + enable: true + short: -T + required: true + - name: my-namespace + default: .* + cliArgument: + enable: true + short: -N + required: false + + visualization: + # - name: Kafka Data In per second. + # box: right_a + # enable: true + # type: asciiGraph # || progressBar || asciiText || markdown || markdown Table ||Table + # metricUnit: kb/s # byte_to_kb_mb_gb_tb # dynamic_byte_convert + # metric: > + # topk(20, sum(irate(kafka_server_brokertopicmetrics_bytesin_total{topic=~"$topic"}[5m])) by (strimzi_io_cluster, topic)) / 1024 + # custom_key: "🍅 {{topic}}" + # asciiGraphOptions: + # height: 0 + # width: 80 + # maxHeight: 17 + # maxWidth: 45 + # updateIntervalSeconds: 1 + # historyData: + # enable: true + # time: 5m + + # - name: Kafka Data Out per second. + # enable: true + # box: right_b + # type: asciiGraph # || progressBarList || asciiText + # metricUnit: kb/s + # metric: > + # topk(20, sum(irate(kafka_server_brokertopicmetrics_bytesout_total[5m])) by (strimzi_io_cluster, topic)) / 1024 + # custom_key: "🥕 {{topic}}" + # asciiGraphOptions: + # height: 0 + # width: 80 + # maxHeight: 17 + # maxWidth: 45 + # updateIntervalSeconds: 10 + # historyData: + # enable: true + # time: 5m + + # - name: Kafka brokers memory usage + # enable: true + # box: left_a + # type: progressBarList + # metricUnit: mb + # metrics: + # total_value_metric: | + # # Memory limits + # sum(container_spec_memory_limit_bytes{namespace="kafka", pod=~".*"}) by (pod, topology_ebs_csi_aws_com_zone) + # usage_value_metric: | + # # memory usage sorted by higher usage + # sort_desc(sum(container_memory_usage_bytes{namespace="kafka", pod=~".*(zookeeper|brokers).*"}) by (pod, topology_ebs_csi_aws_com_zone)) + # custom_key: "{{pod}}" + # progressBarListOptions: + # maxItemsCount: 10 + # lineBreak: true + # showBarPercentage: true + # barWidth: 25 + # updateIntervalSeconds: 2 + + # - name: Kafka Pods Table + # enable: true + # box: left_b + + # type: simpleTable + # metricUnit: byte + # metric: | + # sum(container_memory_usage_bytes{namespace="kafka", pod=~".*(zookeeper|brokers|kafka).*"}) by (pod, namespace, karpenter_sh_capacity_type, topology_kubernetes_io_zone,node_kubernetes_io_instance_type) + # # custom_key: | + # # f"{labels['pod']}" + # simpleTableOptions: + # tableType: plain # https://github.com/astanin/python-tabulate?tab=readme-ov-file#table-format + # showValue: true + # headersUppercase: true + # auto_convert_value_from_byte: true + # showTableIndex: true + # updateIntervalSeconds: 2 + + - name: List pods details + enable: true + box: left + type: advancedTable + metricUnit: byte + columns: + memory usage: + metric: | + sort_desc(sum(container_memory_usage_bytes{namespace="kafka", pod=~".*(zookeeper|brokers).*"}) by (pod, topology_ebs_csi_aws_com_zone)) + metricUnit: byte + memory limit: + metric: | + sort_desc(sum(container_spec_memory_limit_bytes{namespace="kafka", pod=~".*(zookeeper|brokers).*"}) by (pod, topology_ebs_csi_aws_com_zone)) + metricUnit: byte + memory cache: + metric: | + sort_desc(sum(container_memory_cache{namespace="kafka", pod=~".*(zookeeper|brokers).*"}) by (pod, topology_ebs_csi_aws_com_zone)) + metricUnit: byte + memory swap: + metric: | + sum(container_memory_swap{namespace="kafka", pod=~".*(zookeeper|brokers).*"}) by (pod, topology_ebs_csi_aws_com_zone) + metricUnit: byte + file descriptors: + metric: | + sort_desc(sum(container_file_descriptors{namespace="kafka", pod=~".*(zookeeper|brokers).*"}) by (pod, topology_ebs_csi_aws_com_zone)) + metricUnit: counter + up time: + metric: | + sum(time() - kube_pod_start_time{namespace="kafka", pod=~".*(zookeeper|brokers).*"}) by (pod) + metricUnit: seconds + custom_key: "{{pod}}" # "{{pod}} - {{topology_ebs_csi_aws_com_zone}}" + advancedTableOptions: + tableType: grid # https://github.com/astanin/python-tabulate?tab=readme-ov-file#table-format + headersUppercase: true + autoConvertValue: true + showTableIndex: true + updateIntervalSeconds: 3 diff --git a/kptop_tool.py b/kptop_tool.py index 1b75679..77eb404 100644 --- a/kptop_tool.py +++ b/kptop_tool.py @@ -1,20 +1,39 @@ -def run(): - # It runs at the initilization - from kubePtop.cli import Cli +# def run(): +# # It runs at the initilization +# from kubePtop.cli import Cli # run() -from kubePtop.dashboard_monitor import customDashboardMonitoring +# from kubePtop.dashboard_monitor import customDashboardMonitoring from kubePtop.read_env import ReadEnv env = ReadEnv() env.read_env() -import rich +from kubePtop.cli_args import Cli +cli = Cli() + + +# import rich + +# test = customDashboardMonitoring() + +# Read cli +# if --dashboard +# # .parse_dashboard['data'].get('dashboard').get('variables', {}) +# +# elif --command +# else -> print help + + + + + + + -test = customDashboardMonitoring() # rich.print(test._find_variables_in_query('topk(20, sum(irate(kafka_server_brokertopicmetrics_bytesin_total{topic=~"$topic", namespace=~"$namespace"}[5m])) by (strimzi_io_cluster, topic)) / 1024')) # exit(1) -rich.print(test.build_custom_dashboard("./dashboard.yaml")) +# rich.print(test.build_custom_dashboard("./dashboard.yaml")) # rich.print(test.nodeManagedK8sInfo('.*')) # print(test.topNode()) # test.topNodeTable(option="cloud") diff --git a/kubePtop/cli_args.py b/kubePtop/cli_args.py new file mode 100644 index 0000000..ddd65d4 --- /dev/null +++ b/kubePtop/cli_args.py @@ -0,0 +1,180 @@ +import argparse +from logging import Logger +import yaml +import rich +import os +import logging +from kubePtop.dashboard_monitor import customDashboardMonitoring +from kubePtop.dashboard_yaml_loader import dashboardYamlLoader +dashboard_yaml_loader = dashboardYamlLoader() +custom_dashboard_monitoring = customDashboardMonitoring() +class Cli(): + def __init__(self): + self.default_cli_args = [ + { + "name": "dashboard", + "default": "", + "cliArgument": { + "enable": True, + "short": "-D", + "required": False, + "description": "dashboard name to display" + } + }, + { + "name": "command", + "default": "", + "cliArgument": { + "enable": True, + "short": "-C", + "required": False, + "description": "command name to display" + } + }, + { + "name": "vhelp", + "default": ".*", + "cliArgument": { + "enable": True, + "short": "-vh", + "required": False, + "description": "List the variables cli arguments of the dashboard/command manifests" + } + }, + ] + self.variables = {} + self.build_variables() + + # def parsed_dashboard_yaml_file(self, yaml_file): + # out = { + # "success": False, + # "data": None, + # "fail_reason": "" + # } + + # # Check if the file does NOT exist + # if not os.path.isfile(yaml_file): + # out['fail_reason'] = f"Dashboard File '{yaml_file}' does NOT exist" + # return out + + # # Read the file + # try: + # with open(yaml_file, 'r') as file: + # content = file.read() + # out['data'] = yaml.safe_load(content) + # except Exception as e: + # out['fail_reason'] = f"Failed to open the dashboard file '{yaml_file}' > {e}" + # return out + + # out['success'] = True + # return out + + def build_parser(self, variables): + parser = argparse.ArgumentParser(description='Process some CLI arguments.') + for var in variables: + if var['cliArgument']['enable']: + if var['name'] == 'vhelp': + parser.add_argument( + f"--{var['name']}", + var['cliArgument']['short'], + required=var['cliArgument']['required'], + action='store_true', + help=var['cliArgument'].get('description', f'Specify the {var["name"]} variable value - default: "{var["default"]}"') + ) + else: + parser.add_argument( + f"--{var['name']}", + var['cliArgument']['short'], + required=var['cliArgument']['required'], + default=var['default'], + help=var['cliArgument'].get('description', f'Specify the {var["name"]} variable value - default: "{var["default"]}"') + ) + return parser + + + + def build_variables(self): + initial_parser = self.build_parser(self.default_cli_args) + # rich.print(initial_parser) + initial_args, unknown_args = initial_parser.parse_known_args() + + if initial_args.command and initial_args.dashboard: + rich.print("\n[yellow bold]Can NOT specify '--dashboard' & '--command' together\n") + initial_parser.print_help() + exit(1) + + elif initial_args.dashboard: + parsed_dashboard = dashboard_yaml_loader.load_dashboard_data(dashboard_name="./dashboard.yaml") + if not parsed_dashboard['success']: + logging.error(f"Failed to load dashboard: '{initial_args.dashboard}'") + logging.error(parsed_dashboard['fail_reason']) + exit(1) + + variables = parsed_dashboard['data'].get('dashboard').get('variables', {}) + # Combine default CLI args and dashboard variables + all_variables = self.default_cli_args + variables + # Rebuild the parser with all variables + final_parser = self.build_parser(all_variables) + # Parse all arguments with the final parser + final_args, unknown_args = initial_parser.parse_known_args() + # Store the arguments in the variables dictionary + args_dict = vars(final_args) + + if args_dict['vhelp']: + final_parser.print_help() + exit(0) + + for arg, value in args_dict.items(): + self.variables[arg] = value + + args = final_parser.parse_args() + args_dict = vars(args) + for arg, value in args_dict.items(): + if value == 'ALL': + value = ".*" + self.variables[arg] = value + + custom_dashboard_monitoring.build_custom_dashboard(dashboard_data=parsed_dashboard, dashboard_variables=self.variables) + + elif initial_args.command: + print('command') + else: + pass + + # # Step 3: Combine default CLI args and dashboard variables + # all_variables = self.default_cli_args + variables + + # # Step 4: Rebuild the parser with all variables + # final_parser = self.build_parser(all_variables) + + # # Step 5: Parse all arguments with the final parser + # final_args = final_parser.parse_args() + + # # Step 6: Store the arguments in the variables dictionary + # args_dict = vars(final_args) + + # if args_dict['vhelp']: + # final_parser.print_help() + # exit(0) + + # for arg, value in args_dict.items(): + # self.variables[arg] = value + + # args = final_parser.parse_args() + # args_dict = vars(args) + # for arg, value in args_dict.items(): + # if value == 'ALL': + # value = ".*" + # self.variables[arg] = value + + # return args + + + def argparse(self): + pass + +# cli = Cli() +# cli.build_variables([]) +# cli.build_variables() +# def run(): +# cli = Cli() diff --git a/kubePtop/dashboard_monitor.py b/kubePtop/dashboard_monitor.py index 319491d..fd1d175 100644 --- a/kubePtop/dashboard_monitor.py +++ b/kubePtop/dashboard_monitor.py @@ -1,8 +1,7 @@ +import argparse +import re from math import floor import time -# from tabulate import tabulate -# # import textwrap -# from datetime import datetime, timezone import threading import rich from rich.live import Live @@ -36,23 +35,11 @@ class customDashboardMonitoring(PrometheusNodeMetrics): def __init__(self): super().__init__() self.layout_list = [] - # self.lock = threading.Lock() self.data = { "graphs": {} } self.layout = None self.variables = {} - self.default_cli_args = [ - { - "name": "dashboard", - "default": None, - "cliArgument": { - "enable": True, - "short": "-D", - "required": True - } - } - ] def run_query(self, query): if GlobalAttrs.env_connection_method == 'prometheus_endpoint': @@ -62,43 +49,69 @@ def run_query(self, query): self.K8s_authenticate() return self.run_query_pod_portForward(query) - def build_variables(self, variables): - import argparse + return query + + def build_parser(self, variables): parser = argparse.ArgumentParser(description='Process some CLI arguments.') - variables += self.default_cli_args for var in variables: - if var['cliArgument']['enable']: + if var['cliArgument']['enable']: + if var['name'] == 'vhelp': + parser.add_argument( + f"--{var['name']}", + var['cliArgument']['short'], + required=var['cliArgument']['required'], + action='store_true', + help=var['cliArgument'].get('description', f'Specify the {var["name"]} variable value - default: "{var["default"]}"') + ) + else: parser.add_argument( f"--{var['name']}", var['cliArgument']['short'], required=var['cliArgument']['required'], default=var['default'], - help=f'Specify the {var["name"]} variable value - default: "{var["default"]}"' + help=var['cliArgument'].get('description', f'Specify the {var["name"]} variable value - default: "{var["default"]}"') ) + return parser + + def build_variables(self, inital_args, variables): + # Combine default CLI args and dashboard variables + all_variables = inital_args + variables + + # Rebuild the parser with all variables + final_parser = self.build_parser(all_variables) + + # Parse all arguments with the final parser, ignoring unknown args + final_args, unknown_args = final_parser.parse_known_args() + rich.print("Parsed arguments:", final_args) + rich.print("Unknown arguments:", unknown_args) + + # Store the arguments in the variables dictionary + args_dict = vars(final_args) + if args_dict.get('vhelp'): + final_parser.print_help() + return - args = parser.parse_args() - args_dict = vars(args) for arg, value in args_dict.items(): self.variables[arg] = value - return args + for arg, value in args_dict.items(): + if value == 'ALL': + value = ".*" + self.variables[arg] = value - def build_custom_dashboard(self, yaml_file): + return final_args - # Parse the file - parse_dashboard = self.parse_dashboard_yaml_file(yaml_file) - if not parse_dashboard['success']: - logging.error(f"Failed to load dashboard file: '{yaml_file}'") - logging.error([parse_dashboard['fail_reason']]) + def build_custom_dashboard(self, dashboard_data, dashboard_variables): # Build the Layout structure - self.make_layout(layout_structure_dct=parse_dashboard['data']) + self.make_layout(layout_structure_dct=dashboard_data['data']) # Build the dashboard variables - self.build_variables(variables=parse_dashboard['data'].get('dashboard').get('variables', {})) + # self.build_variables(variables=dashboard_data['data'].get('dashboard').get('variables', {}), inital_args=inital_args) # vistualize the metrics on the layout - self.update_layout_visualization(layout_structure_dct=parse_dashboard['data']) + self.variables = dashboard_variables + self.update_layout_visualization(layout_structure_dct=dashboard_data['data']) def update_layout_visualization(self, layout_structure_dct): @@ -151,9 +164,6 @@ def update_layout_visualization(self, layout_structure_dct): rich.print("Ok") exit(0) - # rich.print(self.layout) - - def parse_dashboard_yaml_file(self, yaml_file): out = { "success": False, @@ -175,8 +185,6 @@ def parse_dashboard_yaml_file(self, yaml_file): out['fail_reason'] = f"Failed to open the dashboard file '{yaml_file}' > {e}" return out - # Validate the input Yaml - out['success'] = True return out @@ -210,22 +218,6 @@ def make_layout(self, layout_structure_dct, print_layout=False) -> Layout: middle_enable = layout_dct['body']['boxes'].get('middle', {}).get('enable', False) split_mode = layout_dct.get('split_mode', 'row') - # if left_enable and right_enable and middle_enable: - # layout["body"].split_row(Layout(name="left", size=layout_dct['body']['boxes']['left']['size']), Layout(name="middle", size=layout_dct['body']['boxes']['middle']['size']), Layout(name="right", size=layout_dct['body']['boxes']['right']['size'])) - # self.layout_list = ['left', 'middle', 'right'] - - # elif left_enable and not right_enable and not middle_enable: - # layout["body"].split_row(Layout(name="left", size=layout_dct['body']['boxes']['left']['size'])) - # self.layout_list = ['left'] - - # elif right_enable and not left_enable and not middle_enable: - # layout["body"].split_row(Layout(name="right", size=layout_dct['body']['boxes']['right']['size'])) - # self.layout_list = ['right'] - - # elif middle_enable and not left_enable and not right_enable: - # layout["body"].split_row(Layout(name="middle", size=layout_dct['body']['boxes']['middle']['size'])) - # self.layout_list = ['middle'] - # If all are enabled if left_enable and right_enable and middle_enable: if split_mode == 'row': @@ -378,7 +370,6 @@ def get_metric_data(self, metric, custom_key=None, evaluate_cli_argument_variabl metric = self.replace_cli_argument_variable(metric) out['metric'] = metric # Run Metric - # rich.print(metric_) metric_result = self.run_query(metric) if not metric_result.get('status') == 'success': @@ -422,11 +413,8 @@ def get_metric_data(self, metric, custom_key=None, evaluate_cli_argument_variabl return out def _find_variables_in_query(self, query): - import re # Find all variables in the form $VARIABLE variables = re.findall(r'\$\w+', query) - # Remove the $ from the variable names - # variable_names = [variable[1:] for variable in variables] return variables def replace_cli_argument_variable(self, query): @@ -440,8 +428,7 @@ def replace_cli_argument_variable(self, query): for variable in found_variables: if variable[1:] in self.variables: # Replace the variable in the query string - updated_query = query.replace(variable, self.variables[variable[1:]]) - return updated_query + query = query.replace(variable, self.variables[variable[1:]]) return query @@ -514,11 +501,11 @@ def build_progress_bar_list(self, name, layout_box_name, progress_bar_list_optio # Get usage data total_data = self.get_metric_data(total_value_metric, custom_key=custom_key) if not total_data['success']: - self.layout[layout_box_name].update(Panel(f"[red]Failed to get data from query 'total_value_metric': [bold]{metric_data_dct['fail_reason']}[/bold][/red]\n\n[bold]METRIC:[/bold]\n[grey53]{metric_data_dct['metric']}", title=f"[b]{name}", padding=(1, 1), expand=True, safe_box=True, highlight=True, height=0)) + self.layout[layout_box_name].update(Panel(f"[red]Failed to get data from query 'total_value_metric': [bold]{total_data['fail_reason']}[/bold][/red]\n\n[bold]METRIC:[/bold]\n[grey53]{total_data['metric']}", title=f"[b]{name}", padding=(1, 1), expand=True, safe_box=True, highlight=True, height=0)) usage_data = self.get_metric_data(usage_value_metric, custom_key=custom_key) if not usage_data['success']: - self.layout[layout_box_name].update(Panel(f"[red]Failed to get data from query 'total_value_metric': [bold]{metric_data_dct['fail_reason']}[/bold][/red]\n\n[bold]METRIC:[/bold]\n[grey53]{metric_data_dct['metric']}", title=f"[b]{name}", padding=(1, 1), expand=True, safe_box=True, highlight=True, height=0)) + self.layout[layout_box_name].update(Panel(f"[red]Failed to get data from query 'total_value_metric': [bold]{total_data['fail_reason']}[/bold][/red]\n\n[bold]METRIC:[/bold]\n[grey53]{total_data['metric']}", title=f"[b]{name}", padding=(1, 1), expand=True, safe_box=True, highlight=True, height=0)) data = {} for k, v in usage_data['data'].items(): @@ -688,7 +675,7 @@ def build_advanced_table(self, name, layout_box_name, advanced_table_options, me metric_data = self.get_metric_data(column_info['metric'], custom_key=custom_key) if not metric_data['success']: - self.layout[layout_box_name].update(Panel(f"[red]Failed to get data from query 'total_value_metric': [bold]{metric_data_dct['fail_reason']}[/bold][/red]\n\n[bold]METRIC:[/bold]\n[grey53]{metric_data_dct['metric']}", title=f"[b]{name}", padding=(1, 1), expand=True, safe_box=True, highlight=True, height=0)) + self.layout[layout_box_name].update(Panel(f"[red]Failed to get data from query 'total_value_metric': [bold]{metric_data['fail_reason']}[/bold][/red]\n\n[bold]METRIC:[/bold]\n[grey53]{metric_data['metric']}", title=f"[b]{name}", padding=(1, 1), expand=True, safe_box=True, highlight=True, height=0)) for name, value in metric_data['data'].items(): value_ = float(value['value']) @@ -709,870 +696,3 @@ def build_advanced_table(self, name, layout_box_name, advanced_table_options, me data_group = Group(out) self.layout[layout_box_name].update(Panel(data_group, title=f"[b]{box_name}", padding=(1, 1), expand=True, safe_box=True, highlight=True, height=0)) time.sleep(update_interval_) - - - # def node_monitor_dashboard_default(self, node_name): - # # Print loading because the layout may take few seconds to start (Probably due to slow connection) - # rich.print("[blink]Loading ...", end="\r") - - # def make_layout() -> Layout: - # """ - # The layout structure - # """ - # layout = Layout(name="root") - - # layout.split( - # Layout(name="header", size=3), - # # Layout(name="header2", size=7, ratio=1), - # Layout(name="main", ratio=1), - # # Layout(name="footer", size=6, ratio=1) - # ) - # layout["main"].split_row( - # # Layout(name="side",), - # Layout(name="body", ratio=3, minimum_size=100,), - # ) - # # layout["side"].split(Layout(name="box1")) # , Layout(name="box2") - # # layout["body"].split(Layout(name="head", size=5, ratio=2), Layout(name="body1")) # , Layout(name="box2") - # layout["body"].split_row(Layout(name="body1", size=45), Layout(name="body2"),) # , Layout(name="box2") - # layout['body1'].split_column(Layout(name="body1_a"), Layout(name="body1_b", size=11)) - # layout["body2"].split(Layout(name="body2_a", ratio=1), Layout(name="body2_b", ratio=1)) # , Layout(name="box2") - # layout['body2_b'].split_row(Layout(name="body2_b_a", ratio=1), Layout(name="body2_b_b", ratio=1)) - - # return layout - - # class Header(): - # """ - # Display header with clock. - # """ - # def __rich__(self) -> Panel: - # grid = Table.grid(expand=True) - # grid.add_column(justify="center", ratio=1) - # grid.add_column(justify="right") - # grid.add_row( - # f"[b]Node: [/b] {node_name} ", - # datetime.now().ctime().replace(":", "[blink]:[/]"), - # ) - # return Panel(grid, style="green") - - # class Node_Resources_Progress(PrometheusNodeMetrics): - # def __init__(self): - # super().__init__() - # self.progress_start() - - # def progress_start(self): - # # node_metrics_json = self.nodeMetrics(node=node_name) - # # node_mem_metrics_json = node_metrics_json.get('memory') - # # node_cpu_metrics_json = node_metrics_json.get('cpu') - # # node_fs_metrics_json = node_metrics_json.get('fs') - - - # self.progress_threads_status = Progress( - # TextColumn("[progress.description]{task.description}"), - # BarColumn(bar_width=20), - # # TextColumn("[progress.percentage]{task.percentage:>3.0f}"), - # TextColumn("{task.fields[status]}"), - # ) - # self.task_thread_refresh = self.progress_threads_status.add_task(description=f"[white]Interval Refresh", status=f"unknown") - # self.task_prometheus_server_connection = self.progress_threads_status.add_task(description=f"[white]Prometheus", status=f"unknown") - - # self.progress_mem_total = Progress( - # TextColumn("[progress.description]{task.description}"), - # BarColumn(bar_width=20), - # # TextColumn("[progress.percentage]{task.percentage:>3.0f}"), - # TextColumn("{task.fields[status]}"), - # ) - # # if node_mem_metrics_json.get('MemTotalBytes').get('success'): - # self.task_mem_total = self.progress_mem_total.add_task(description=f"[white]Mem Total ", status="Loading") - - # self.progress_mem = Progress(TextColumn("[progress.description]{task.description}"), - # BarColumn(bar_width=20), - # TaskProgressColumn(), - # TextColumn("{task.fields[status]}"), - # ) - - # # if (node_mem_metrics_json.get('MemTotalBytes').get('success') and node_mem_metrics_json.get('MemAvailableBytes').get('success')): - # self.task_mem_used = self.progress_mem.add_task(completed=0, description=f"[white]Mem used", total=100, status="Loading") - # # if node_mem_metrics_json.get('MemAvailableBytes').get('success'): - # # self.task_mem_available = self.progress_mem.add_task(completed=0, description=f"[white]Mem available", total=100, status="Loading") - # # if node_mem_metrics_json.get('MemFreeBytes').get('success'): - # self.task_mem_free = self.progress_mem.add_task(completed=0, description=f"[white]Mem free", total=100, status="Loading") - # # if node_mem_metrics_json.get('MemCachedBytes').get('success'): - # self.task_mem_cached = self.progress_mem.add_task(completed=0, description=f"[white]Mem cached ", total=100, status="Loading") - # # if node_mem_metrics_json.get('MemBuffersBytes').get('success'): - # self.task_mem_buffer = self.progress_mem.add_task(completed=0, description=f"[white]Mem buffer ", total=100, status="Loading") - - # self.progress_swap = Progress(TextColumn("[progress.description]{task.description}"), - # BarColumn(bar_width=20), - # TaskProgressColumn(), - # TextColumn("{task.fields[status]}"), - # ) - # # if node_mem_metrics_json.get('MemSwapTotalBytes').get('success'): - # self.task_swap_total = self.progress_swap.add_task(completed=0, description=f"[white]Swap Total ", total=100, status="Loading") - # # if node_mem_metrics_json.get('MemSwapTotalBytes').get('success'): - # self.task_swap_free = self.progress_swap.add_task(completed=0, description=f"[white]Swap free ", total=100, status="Loading") - # # if node_mem_metrics_json.get('MemSwapCachedBytes').get('success'): - # self.task_swap_cached = self.progress_swap.add_task(completed=0, description=f"[white]Swap cached ", total=100, status="Loading") - - # self.progress_cpu_used_avg = Progress(TextColumn("[progress.description]{task.description}"), - # BarColumn(bar_width=20), - # TaskProgressColumn(), - # TextColumn("{task.fields[status]}"), - # ) - # # if node_cpu_metrics_json.get('cpuUsageAVG').get('success'): - # self.task_cpu_used_avg = self.progress_cpu_used_avg.add_task(description="CPU used AVG[10m]", completed=0, total=100, status="Loading") - - # self.progress_cpu = Progress(TextColumn("[progress.description]{task.description}"), - # BarColumn(bar_width=20), - # # TaskProgressColumn(), - # TextColumn("{task.fields[status]}"), - # ) - # # if node_cpu_metrics_json.get('cpuLoadAvg1m').get('success'): - # self.task_cpu_load1avg = self.progress_cpu.add_task(description=f"[white]CPU load avg 1m ", status="Loading") - # self.task_cpu_load5avg = self.progress_cpu.add_task(description=f"[white]CPU load avg 5m ", status="Loading") - # self.task_cpu_load15avg = self.progress_cpu.add_task(description=f"[white]CPU load avg 15m ", status="Loading") - - - # self.progress_fs_total = Progress(TextColumn("[progress.description]{task.description}"), - # BarColumn(bar_width=20), - # # TaskProgressColumn(), - # TextColumn("{task.fields[status]}"), - # ) - # # if node_fs_metrics_json.get('nodeFsSize').get('success'): - # self.task_fs_size_total = self.progress_fs_total.add_task(description=f"[white]FS Total ", status="Loading") - - # self.progress_fs = Progress(TextColumn("[progress.description]{task.description}"), - # BarColumn(bar_width=20), - # TaskProgressColumn(), - # TextColumn("{task.fields[status]}"), - # ) - # # if node_fs_metrics_json.get('nodeFsUsed').get('success'): - # self.task_fs_used = self.progress_fs.add_task(completed=0, description=f"[white]FS used ", total=100, status="Loading") - - # # if node_fs_metrics_json.get('nodeFsAvailable').get('success'): - # self.task_fs_available = self.progress_fs.add_task(completed=0, description=f"[white]FS available ", total=100, status="Loading") - - - - # self.group_memory = Group ( - # self.progress_mem_total, - # self.progress_mem, - # Rule(style='#AAAAAA'), - # self.progress_swap, - # ) - - # self.group_cpu = Group ( - # self.progress_cpu_used_avg, - # self.progress_cpu - # ) - - # self.group_fs = Group ( - # self.progress_fs_total, - # self.progress_fs - # ) - - # def update(self): - # time.sleep(3) - # while True: - # Logging.log.info("Getting node metrics to update the dashboard") - # node_metrics_json = self.nodeMetrics(node=node_name) - # Logging.log.debug("Node metrics Json:") - # Logging.log.debug(node_metrics_json) - # node_mem_metrics_json = node_metrics_json.get('memory') - # node_cpu_metrics_json = node_metrics_json.get('cpu') - # node_fs_metrics_json = node_metrics_json.get('fs') - - # self.progress_mem_total.update(self.task_mem_total, description=f"[white]Mem Total ", status=f" {helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemTotalBytes').get('result'))}") - # self.progress_mem.update(self.task_mem_used, completed=node_mem_metrics_json.get('MemTotalBytes').get('result') - (node_mem_metrics_json.get('MemFreeBytes').get('result') + node_mem_metrics_json.get('MemBuffersBytes').get('result') + node_mem_metrics_json.get('MemCachedBytes').get('result')), description=f"[white]Mem used", total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemTotalBytes').get('result') - (node_mem_metrics_json.get('MemFreeBytes').get('result') + node_mem_metrics_json.get('MemBuffersBytes').get('result') + node_mem_metrics_json.get('MemCachedBytes').get('result')))}") - # # self.progress_mem.update(self.task_mem_available, completed=node_mem_metrics_json.get('MemAvailableBytes').get('result'), description=f"[white]Mem available", total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemAvailableBytes').get('result'))}") - # self.progress_mem.update(self.task_mem_free, completed=node_mem_metrics_json.get('MemFreeBytes').get('result'), description=f"[white]Mem free", total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemFreeBytes').get('result'))}") - # self.progress_mem.update(self.task_mem_cached, completed=node_mem_metrics_json.get('MemCachedBytes').get('result'), description=f"[white]Mem cached ", total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemCachedBytes').get('result'))}") - # self.progress_mem.update(self.task_mem_buffer, completed=node_mem_metrics_json.get('MemBuffersBytes').get('result'), description=f"[white]Mem buffer ", total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemBuffersBytes').get('result'))}") - - # self.progress_swap.update(self.task_swap_total, completed=node_mem_metrics_json.get('MemSwapTotalBytes').get('result'), description=f"[white]Swap Total ", total=node_mem_metrics_json.get('MemSwapTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemSwapTotalBytes').get('result'))}") - # self.progress_swap.update(self.task_swap_free, completed=node_mem_metrics_json.get('MemSwapFreeBytes').get('result'), description=f"[white]Swap free ", total=node_mem_metrics_json.get('MemSwapTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemSwapFreeBytes').get('result'))}") - # self.progress_swap.update(self.task_swap_cached, completed=node_mem_metrics_json.get('MemSwapCachedBytes').get('result'), description=f"[white]Swap cached ", total=node_mem_metrics_json.get('MemSwapTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemSwapCachedBytes').get('result'))}") - - # self.progress_cpu_used_avg.update(self.task_cpu_used_avg, completed=(node_cpu_metrics_json.get('cpuUsageAVG').get('result') / 2), description=f"[white]CPU used AVG[10m] ", total=100, status="") - # self.progress_cpu.update(self.task_cpu_load1avg, description=f"[white]CPU load avg 1m ", status=node_cpu_metrics_json.get('cpuLoadAvg1m').get('result')) - # self.progress_cpu.update(self.task_cpu_load5avg, description=f"[white]CPU load avg 5m ", status=node_cpu_metrics_json.get('cpuLoadAvg5m').get('result')) - # self.progress_cpu.update(self.task_cpu_load15avg, description=f"[white]CPU load avg 15m ", status=node_cpu_metrics_json.get('cpuLoadAvg15m').get('result')) - - # self.progress_fs_total.update(self.task_fs_size_total, description=f"[white]FS Total ", status=helper_.bytes_to_kb_mb_gb(node_fs_metrics_json.get('nodeFsSize').get('result'))) - # self.progress_fs.update(self.task_fs_used, completed=node_fs_metrics_json.get('nodeFsUsed').get('result'), description=f"[white]FS used ", total=node_fs_metrics_json.get('nodeFsSize').get('result'), status=helper_.bytes_to_kb_mb_gb(node_fs_metrics_json.get('nodeFsUsed').get('result'))) - # self.progress_fs.update(self.task_fs_available, completed=node_fs_metrics_json.get('nodeFsAvailable').get('result'), description=f"[white]FS available ", total=node_fs_metrics_json.get('nodeFsSize').get('result'), status=helper_.bytes_to_kb_mb_gb(node_fs_metrics_json.get('nodeFsAvailable').get('result'))) - - # if GlobalAttrs.debug: - # Logging.log.debug(f"Waiting for interval '{GlobalAttrs.live_update_interval}' before the next update") - # time.sleep(GlobalAttrs.live_update_interval) - - # def check_thread_node_resources(self, restart=True): - # while True: - # def thread_status(): - # status = "" - # if self.thread_node_resources.is_alive(): - # status = f"alive [green]✔️" - # else: - # status = "dead [red]❌" - # if restart: - # # Restart thread - # self.start_threads() - # return status - - # self.progress_threads_status.update(task_id=self.task_thread_refresh, status=thread_status()) - # time.sleep(5) - - # class ValidatePrometheuesConnection(PrometheusNodeMetrics): - # def __init__(self): - # super().__init__() - # self.result = {} - - # def run(self): - # while True: - # time.sleep(5) - # self.result = self.verify_prometheus_connection() - # if GlobalAttrs.debug: - # print("DEBUG -- Function: ValidatePrometheuesConnection") - # Logging.log.info("Function: ValidatePrometheuesConnection") - # Logging.log.info("Function: ValidatePrometheuesConnection, waiting for internal '5s' ") - - # def check_thread_prometheus_server_connection(self): - # while True: - - # def thread_status(): - # result = self.vlaidate_prometheus_server.result - # # if self.thread_check_thread_prometheus_server_connection.is_alive(): - # if result.get('connected') is None: - # status = f"waiting [green]✔️" - # elif result.get('connected'): - # status = f"connected [green]✔️" - # else: - # status = f"{result.get('reason')} [red]❌" - - # return status - - # self.progress_threads_status.update(task_id=self.task_prometheus_server_connection, status=f"{thread_status()} ({self.vlaidate_prometheus_server.result.get('status_code')})") - # time.sleep(5) - - # def start_threads(self): - # self.thread_node_resources = threading.Thread(target=self.update) - # self.thread_node_resources.daemon = True - # self.thread_node_resources.start() - # Logging.log.debug("Started Thread: thread_node_resources") - - # self.vlaidate_prometheus_server = self.ValidatePrometheuesConnection() - # self.thread_prometheus_server_connection = threading.Thread(target=self.vlaidate_prometheus_server.run) - # self.thread_prometheus_server_connection.daemon = True - # self.thread_prometheus_server_connection.start() - # Logging.log.debug("Started Thread: thread_prometheus_server_connection") - - # def watch_threads(self): - # self.thread_check_thread_node_resources = threading.Thread(target=self.check_thread_node_resources) - # self.thread_check_thread_node_resources.daemon = True - # self.thread_check_thread_node_resources.start() - - # self.thread_check_thread_prometheus_server_connection = threading.Thread(target=self.check_thread_prometheus_server_connection) - # self.thread_check_thread_prometheus_server_connection.daemon = True - # self.thread_check_thread_prometheus_server_connection.start() - - - # try: - # node_metrics = PrometheusNodeMetrics() - # node_resources_progress = Node_Resources_Progress() - - # progress_table = Table.grid(expand=True) - # progress_table.add_row( - # Panel(node_resources_progress.group_cpu, title="[b]CPU", padding=(1, 2)), - # ) - # progress_table.add_row( - # Panel(node_resources_progress.group_memory, title="[b]Memory", padding=(1, 2)), - # ) - # progress_table.add_row( - # Panel(node_resources_progress.group_fs, title='[b]FS "/"', padding=(1, 2)), - # ) - # progress_table.add_row( - # Panel(node_resources_progress.progress_threads_status, title="[b]Threads Status",padding=(1, 2), subtitle=""), - # ) - - - # layout = make_layout() - # layout["header"].update(Header()) - # layout["body1_a"].update(progress_table) - # layout['body1_b'].update(Panel("Made with [red]❤️[/red]", title='[b]Unused Space', padding=(1, 2),)) - - - # layout["body2_a"].update(Panel("Loading ...", title="[b]Top Pods in Memory Usage", padding=(1, 1))) - - # node_resources_progress.start_threads() - # node_resources_progress.watch_threads() - - # update_disk_read_bytes_graph = True - # disk_read_bytes_graph = AsciiGraph() - # disk_read_bytes = self.nodeDiskReadBytes(node_name) - # if GlobalAttrs.debug: - # Logging.log.debug(f"Getting Pod 'disk_read_bytes' metrics; Result:\n{disk_read_bytes}") - # else: - # Logging.log.info("Getting Pod 'disk_read_bytes' metrics") - # if disk_read_bytes.get('success'): - # disk_read_bytes_graph.create_graph(disk_read_bytes.get('result').keys(), height=5, width=GlobalAttrs.graphs_width, format='{:8.0f} kb/s') - # else: - # disk_read_bytes_graph.graph = disk_read_bytes.get('fail_reason') - # update_disk_read_bytes_graph = False - - # update_network_received_bytes_graph = True - # network_received_bytes_graph = AsciiGraph() - # network_received_bytes = self.nodeNetworkReceiveBytes(node_name) - # if GlobalAttrs.debug: - # Logging.log.debug(f"Getting Pod 'network_received_bytes' metrics; Result:\n{network_received_bytes}") - # else: - # Logging.log.info("Getting Pod 'network_received_bytes' metrics") - # if network_received_bytes.get('success'): - # network_received_bytes_graph.create_graph(network_received_bytes.get('result').keys(), height=5, width=GlobalAttrs.graphs_width, format='{:8.0f} kb/s') - # else: - # network_received_bytes_graph.graph = network_received_bytes.get('fail_reason') - # update_network_received_bytes_graph = False - - # update_network_transmit_bytes_graph = True - # network_transmit_bytes_graph = AsciiGraph() - # network_transmit_bytes = self.nodeNetworkTransmitBytes(node_name) - # if GlobalAttrs.debug: - # Logging.log.debug(f"Getting Pod 'network_transmit_bytes' metrics; Result:\n{network_transmit_bytes}") - # else: - # Logging.log.info("Getting Pod 'network_transmit_bytes' metrics") - # if network_transmit_bytes.get('success'): - # network_transmit_bytes_graph.create_graph(network_transmit_bytes.get('result').keys(), height=5, width=GlobalAttrs.graphs_width, format='{:8.0f} kb/s') - # else: - # network_transmit_bytes_graph.graph = network_transmit_bytes.get('fail_reason') - # update_network_transmit_bytes_graph = False - - - - # update_disk_written_bytes_graph = True - # disk_written_bytes_graph = AsciiGraph() - # disk_written_bytes = self.nodeDiskWrittenBytes(node_name) - # if disk_written_bytes.get('success'): - # disk_written_bytes_graph.create_graph(disk_written_bytes.get('result').keys(), height=5, width=GlobalAttrs.graphs_width, format='{:8.0f} kb/s') - # else: - # disk_written_bytes_graph.graph = disk_written_bytes.get('fail_reason') - # update_disk_written_bytes_graph = False - - # layout["body2_b_b"].update(Panel(Markdown("Loading ..."), title="[b]Network IO", padding=(1, 1))) - # layout["body2_b_a"].update(Panel(Markdown("Loading ..."), title="[b]Disk IO", padding=(1, 1))) - - # group_network_io = Group( - # Markdown("Bytes Received", justify='center'), - # Text.from_ansi(network_received_bytes_graph.graph + f"\n {network_received_bytes_graph.colors_description_str}"), - # Rule(style='#AAAAAA'), - # Markdown("Bytes Transmitted", justify='center'), - # Text.from_ansi(network_transmit_bytes_graph.graph + f"\n {network_transmit_bytes_graph.colors_description_str}") - # ) - - # group_disk_io = Group( - # Markdown("Bytes Read", justify='center'), - # Text.from_ansi(disk_read_bytes_graph.graph + f"\n {disk_read_bytes_graph.colors_description_str}"), - # Rule(style='#AAAAAA'), - # Markdown("Bytes Written", justify='center'), - # Text.from_ansi(disk_written_bytes_graph.graph + f"\n {disk_written_bytes_graph.colors_description_str}") - # ) - - # Logging.log.info("Starting the Layout.") - # with Live(layout, auto_refresh=True, screen=True, refresh_per_second=GlobalAttrs.live_update_interval): - # while True: - # pod_memory_usage = node_metrics.PodMemTopUsage(node=node_name) - # layout["body2_a"].update(Panel(pod_memory_usage, title="[b]Top Pods in Memory Usage", padding=(1, 1))) - # Logging.log.info("Updating the Layout with 'Top Pods in Memory Usage'") - # Logging.log.debug(f"Result:\n{pod_memory_usage}") - - # if update_network_received_bytes_graph: - # network_received_bytes = self.nodeNetworkReceiveBytes(node_name) - # Logging.log.info("Updating Node 'network_received_bytes' metrics") - # Logging.log.debug(network_received_bytes) - # for device, value in network_received_bytes.get('result').items(): - # network_received_bytes_graph.update_lst(device, helper_.bytes_to_kb(value)) - - # if update_network_transmit_bytes_graph: - # Logging.log.info("Updating Node 'network_transmit_bytes' metrics") - # Logging.log.debug(network_transmit_bytes) - # network_transmit_bytes = self.nodeNetworkTransmitBytes(node_name) - # for device, value in network_transmit_bytes.get('result').items(): - # network_transmit_bytes_graph.update_lst(device, helper_.bytes_to_kb(value)) - - # if update_disk_read_bytes_graph: - # disk_read_bytes = self.nodeDiskReadBytes(node_name) - # Logging.log.info("Updating Node 'disk_read_bytes' metrics") - # Logging.log.debug(disk_read_bytes) - # for device, value in disk_read_bytes.get('result').items(): - # disk_read_bytes_graph.update_lst(device, helper_.bytes_to_kb(value)) - - # if update_disk_written_bytes_graph: - # disk_written_bytes = self.nodeDiskWrittenBytes(node_name) - # Logging.log.info("Updating Node 'disk_written_bytes' metrics") - # Logging.log.debug(disk_written_bytes) - # for device, value in disk_written_bytes.get('result').items(): - # disk_written_bytes_graph.update_lst(device, helper_.bytes_to_kb(value)) - - # if update_network_received_bytes_graph or update_network_transmit_bytes_graph: - # group_network_io = Group( - # Markdown("Bytes Received", justify='center'), - # Text.from_ansi(network_received_bytes_graph.graph + f"\n {network_received_bytes_graph.colors_description_str}"), - # Rule(style='#AAAAAA'), - # Markdown("Bytes Transmitted", justify='center'), - # Text.from_ansi(network_transmit_bytes_graph.graph + f"\n {network_transmit_bytes_graph.colors_description_str}") - # ) - - # if update_disk_read_bytes_graph or update_disk_written_bytes_graph: - # group_disk_io = Group( - # Markdown("Bytes Read", justify='center'), - # Text.from_ansi(disk_read_bytes_graph.graph + f"\n {disk_read_bytes_graph.colors_description_str}"), - # Rule(style='#AAAAAA'), - # Markdown("Bytes Written", justify='center'), - # Text.from_ansi(disk_written_bytes_graph.graph + f"\n {disk_written_bytes_graph.colors_description_str}") - # ) - - # layout["body2_b_b"].update(Panel(group_network_io, title="[b]Network IO", padding=(1, 1))) - # layout["body2_b_a"].update(Panel(group_disk_io, title="[b]Disk IO", padding=(1, 1))) - - # Logging.log.info(f"waiting for the update interval '{GlobalAttrs.live_update_interval}' before updating the Layout ") - # time.sleep(GlobalAttrs.live_update_interval) - # Logging.log.info(f"Updating the layout") - - # except Exception as e: - # rich.print("\n[yellow]ERROR -- " + str(e)) - # rich.print("\n[underline bold]Exception:") - # traceback.print_exc() - # exit(1) - # except KeyboardInterrupt: - # print(" ", end="\r") - # rich.print("Ok") - # exit(0) - - - # def node_monitor_dashboard_pvc(self, node_name): - # # Print loading because the layout may take few seconds to start (Probably due to slow connection) - # rich.print("[blink]Loading ...", end="\r") - - # def make_layout() -> Layout: - # """ - # The layout structure - # """ - # layout = Layout(name="root") - - # layout.split( - # Layout(name="header", size=3), - # Layout(name="main", ratio=1), - # ) - # layout["main"].split_row( - # Layout(name="body", ratio=3, minimum_size=100,), - # ) - - # layout["body"].split_column(Layout(name="body1", size=23), Layout(name="body2"),) # , Layout(name="box2") - # return layout - - # class Header(): - # """ - # Display header with clock. - # """ - # def __rich__(self) -> Panel: - # grid = Table.grid(expand=True) - # grid.add_column(justify="center", ratio=1) - # grid.add_column(justify="right") - # grid.add_row( - # f"[b]Node: [/b] {node_name} ", - # datetime.now().ctime().replace(":", "[blink]:[/]"), - # ) - # return Panel(grid, style="green") - - # class Node_Resources_Progress(PrometheusNodeMetrics): - # def __init__(self): - # super().__init__() - # self.progress_start() - - # def progress_start(self): - # # node_metrics_json = self.nodeMetrics(node=node_name) - # # node_mem_metrics_json = node_metrics_json.get('memory') - # # node_cpu_metrics_json = node_metrics_json.get('cpu') - # # node_fs_metrics_json = node_metrics_json.get('fs') - - - # self.progress_threads_status = Progress( - # TextColumn("[progress.description]{task.description}"), - # BarColumn(bar_width=20), - # # TextColumn("[progress.percentage]{task.percentage:>3.0f}"), - # TextColumn("{task.fields[status]}"), - # ) - # self.task_thread_refresh = self.progress_threads_status.add_task(description=f"[white]Metrics Refresh", status=f"unknown") - # self.task_prometheus_server_connection = self.progress_threads_status.add_task(description=f"[white]Prometheus", status=f"unknown") - - # self.progress_mem_total = Progress( - # TextColumn("[progress.description]{task.description}"), - # BarColumn(bar_width=20), - # # TextColumn("[progress.percentage]{task.percentage:>3.0f}"), - # TextColumn("{task.fields[status]}"), - # ) - # # if node_mem_metrics_json.get('MemTotalBytes').get('success'): - # self.task_mem_total = self.progress_mem_total.add_task(description=f"[white]Mem Total ", status="Loading") - - # self.progress_mem = Progress(TextColumn("[progress.description]{task.description}"), - # BarColumn(bar_width=20), - # TaskProgressColumn(), - # TextColumn("{task.fields[status]}"), - # ) - - # # if (node_mem_metrics_json.get('MemTotalBytes').get('success') and node_mem_metrics_json.get('MemAvailableBytes').get('success')): - # self.task_mem_used = self.progress_mem.add_task(completed=0, description=f"[white]Mem used", total=100, status="Loading") - # # if node_mem_metrics_json.get('MemAvailableBytes').get('success'): - # # self.task_mem_available = self.progress_mem.add_task(completed=0, description=f"[white]Mem available", total=100, status="Loading") - # # if node_mem_metrics_json.get('MemFreeBytes').get('success'): - # self.task_mem_free = self.progress_mem.add_task(completed=0, description=f"[white]Mem free", total=100, status="Loading") - # # if node_mem_metrics_json.get('MemCachedBytes').get('success'): - # self.task_mem_cached = self.progress_mem.add_task(completed=0, description=f"[white]Mem cached ", total=100, status="Loading") - # # if node_mem_metrics_json.get('MemBuffersBytes').get('success'): - # self.task_mem_buffer = self.progress_mem.add_task(completed=0, description=f"[white]Mem buffer ", total=100, status="Loading") - - # self.progress_swap = Progress(TextColumn("[progress.description]{task.description}"), - # BarColumn(bar_width=20), - # TaskProgressColumn(), - # TextColumn("{task.fields[status]}"), - # ) - # # if node_mem_metrics_json.get('MemSwapTotalBytes').get('success'): - # self.task_swap_total = self.progress_swap.add_task(completed=0, description=f"[white]Swap Total ", total=100, status="Loading") - # # if node_mem_metrics_json.get('MemSwapTotalBytes').get('success'): - # self.task_swap_free = self.progress_swap.add_task(completed=0, description=f"[white]Swap free ", total=100, status="Loading") - # # if node_mem_metrics_json.get('MemSwapCachedBytes').get('success'): - # self.task_swap_cached = self.progress_swap.add_task(completed=0, description=f"[white]Swap cached ", total=100, status="Loading") - - # self.progress_cpu_used_avg = Progress(TextColumn("[progress.description]{task.description}"), - # BarColumn(bar_width=20), - # TaskProgressColumn(), - # TextColumn("{task.fields[status]}"), - # ) - # # if node_cpu_metrics_json.get('cpuUsageAVG').get('success'): - # self.task_cpu_used_avg = self.progress_cpu_used_avg.add_task(description="CPU used AVG[10m]", completed=0, total=100, status="Loading") - - # self.progress_cpu = Progress(TextColumn("[progress.description]{task.description}"), - # BarColumn(bar_width=20), - # # TaskProgressColumn(), - # TextColumn("{task.fields[status]}"), - # ) - # # if node_cpu_metrics_json.get('cpuLoadAvg1m').get('success'): - # self.task_cpu_load1avg = self.progress_cpu.add_task(description=f"[white]CPU load avg 1m ", status="Loading") - # self.task_cpu_load5avg = self.progress_cpu.add_task(description=f"[white]CPU load avg 5m ", status="Loading") - # self.task_cpu_load15avg = self.progress_cpu.add_task(description=f"[white]CPU load avg 15m ", status="Loading") - - - # self.progress_fs_total = Progress(TextColumn("[progress.description]{task.description}"), - # BarColumn(bar_width=20), - # # TaskProgressColumn(), - # TextColumn("{task.fields[status]}"), - # ) - # # if node_fs_metrics_json.get('nodeFsSize').get('success'): - # self.task_fs_size_total = self.progress_fs_total.add_task(description=f"[white]FS Total ", status="Loading") - - # self.progress_fs = Progress(TextColumn("[progress.description]{task.description}"), - # BarColumn(bar_width=20), - # TaskProgressColumn(), - # TextColumn("{task.fields[status]}"), - # ) - # # if node_fs_metrics_json.get('nodeFsUsed').get('success'): - # self.task_fs_used = self.progress_fs.add_task(completed=0, description=f"[white]FS used ", total=100, status="Loading") - - # # if node_fs_metrics_json.get('nodeFsAvailable').get('success'): - # self.task_fs_available = self.progress_fs.add_task(completed=0, description=f"[white]FS available ", total=100, status="Loading") - - - - # self.group_memory = Group ( - # self.progress_mem_total, - # self.progress_mem, - # Rule(style='#AAAAAA'), - # self.progress_swap, - # ) - - # self.group_cpu = Group ( - # self.progress_cpu_used_avg, - # self.progress_cpu - # ) - - # self.group_fs = Group ( - # self.progress_fs_total, - # self.progress_fs - # ) - - # def update(self): - # time.sleep(3) - # while True: - # Logging.log.info("Getting node metrics to update the dashboard") - # node_metrics_json = self.nodeMetrics(node=node_name) - # if GlobalAttrs.debug: - # Logging.log.info("Node metrics Json:") - # Logging.log.debug(node_metrics_json) - # node_mem_metrics_json = node_metrics_json.get('memory') - # node_cpu_metrics_json = node_metrics_json.get('cpu') - # node_fs_metrics_json = node_metrics_json.get('fs') - - # self.progress_mem_total.update(self.task_mem_total, description=f"[white]Mem Total ", status=f" {helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemTotalBytes').get('result'))}") - # self.progress_mem.update(self.task_mem_used, completed=node_mem_metrics_json.get('MemTotalBytes').get('result') - (node_mem_metrics_json.get('MemFreeBytes').get('result')), description=f"[white]Mem used", total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemTotalBytes').get('result') - (node_mem_metrics_json.get('MemFreeBytes').get('result') + node_mem_metrics_json.get('MemBuffersBytes').get('result') + node_mem_metrics_json.get('MemCachedBytes').get('result')))}") - # # self.progress_mem.update(self.task_mem_available, completed=node_mem_metrics_json.get('MemAvailableBytes').get('result'), description=f"[white]Mem available", total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemAvailableBytes').get('result'))}") - # self.progress_mem.update(self.task_mem_free, completed=node_mem_metrics_json.get('MemFreeBytes').get('result'), description=f"[white]Mem free", total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemFreeBytes').get('result'))}") - # self.progress_mem.update(self.task_mem_cached, completed=node_mem_metrics_json.get('MemCachedBytes').get('result'), description=f"[white]Mem cached ", total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemCachedBytes').get('result'))}") - # self.progress_mem.update(self.task_mem_buffer, completed=node_mem_metrics_json.get('MemBuffersBytes').get('result'), description=f"[white]Mem buffer ", total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemBuffersBytes').get('result'))}") - - # self.progress_swap.update(self.task_swap_total, completed=node_mem_metrics_json.get('MemSwapTotalBytes').get('result'), description=f"[white]Swap Total ", total=node_mem_metrics_json.get('MemSwapTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemSwapTotalBytes').get('result'))}") - # self.progress_swap.update(self.task_swap_free, completed=node_mem_metrics_json.get('MemSwapFreeBytes').get('result'), description=f"[white]Swap free ", total=node_mem_metrics_json.get('MemSwapTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemSwapFreeBytes').get('result'))}") - # self.progress_swap.update(self.task_swap_cached, completed=node_mem_metrics_json.get('MemSwapCachedBytes').get('result'), description=f"[white]Swap cached ", total=node_mem_metrics_json.get('MemSwapTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemSwapCachedBytes').get('result'))}") - - # self.progress_cpu_used_avg.update(self.task_cpu_used_avg, completed=(node_cpu_metrics_json.get('cpuUsageAVG').get('result') / 2), description=f"[white]CPU used AVG[10m] ", total=100, status="") - # self.progress_cpu.update(self.task_cpu_load1avg, description=f"[white]CPU load avg 1m ", status=node_cpu_metrics_json.get('cpuLoadAvg1m').get('result')) - # self.progress_cpu.update(self.task_cpu_load5avg, description=f"[white]CPU load avg 5m ", status=node_cpu_metrics_json.get('cpuLoadAvg5m').get('result')) - # self.progress_cpu.update(self.task_cpu_load15avg, description=f"[white]CPU load avg 15m ", status=node_cpu_metrics_json.get('cpuLoadAvg15m').get('result')) - - # self.progress_fs_total.update(self.task_fs_size_total, description=f"[white]FS Total ", status=helper_.bytes_to_kb_mb_gb(node_fs_metrics_json.get('nodeFsSize').get('result'))) - # self.progress_fs.update(self.task_fs_used, completed=node_fs_metrics_json.get('nodeFsUsed').get('result'), description=f"[white]FS used ", total=node_fs_metrics_json.get('nodeFsSize').get('result'), status=helper_.bytes_to_kb_mb_gb(node_fs_metrics_json.get('nodeFsUsed').get('result'))) - # self.progress_fs.update(self.task_fs_available, completed=node_fs_metrics_json.get('nodeFsAvailable').get('result'), description=f"[white]FS available ", total=node_fs_metrics_json.get('nodeFsSize').get('result'), status=helper_.bytes_to_kb_mb_gb(node_fs_metrics_json.get('nodeFsAvailable').get('result'))) - - # Logging.log.debug(f"Waiting for interval '{GlobalAttrs.live_update_interval}' before the next update") - # time.sleep(GlobalAttrs.live_update_interval) - - # def check_thread_node_resources(self, restart=True): - # while True: - # def thread_status(): - # status = "" - # if self.thread_node_resources.is_alive(): - # status = f"alive [green]✔️" - # else: - # status = "dead [red]❌" - # if restart: - # # Restart thread - # self.start_threads() - # return status - - # self.progress_threads_status.update(task_id=self.task_thread_refresh, status=thread_status()) - # time.sleep(5) - - # class ValidatePrometheuesConnection(PrometheusNodeMetrics): - # def __init__(self): - # super().__init__() - # self.result = {} - - # def run(self): - # while True: - # time.sleep(5) - # self.result = self.verify_prometheus_connection() - # if GlobalAttrs.debug: - # print("DEBUG -- Function: ValidatePrometheuesConnection") - # Logging.log.info("Function: ValidatePrometheuesConnection") - # Logging.log.info("Function: ValidatePrometheuesConnection, waiting for internal '5s' ") - - # def check_thread_prometheus_server_connection(self): - # while True: - - # def thread_status(): - # result = self.vlaidate_prometheus_server.result - # # if self.thread_check_thread_prometheus_server_connection.is_alive(): - # if result.get('connected') is None: - # status = f"waiting [green]✔️" - # elif result.get('connected'): - # status = f"connected [green]✔️" - # else: - # status = f"{result.get('reason')} [red]❌" - - # return status - - # self.progress_threads_status.update(task_id=self.task_prometheus_server_connection, status=f"{thread_status()} ({self.vlaidate_prometheus_server.result.get('status_code')})") - # time.sleep(5) - - # def start_threads(self): - # self.thread_node_resources = threading.Thread(target=self.update) - # self.thread_node_resources.daemon = True - # self.thread_node_resources.start() - # Logging.log.debug("Started Thread: thread_node_resources") - - # self.vlaidate_prometheus_server = self.ValidatePrometheuesConnection() - # self.thread_prometheus_server_connection = threading.Thread(target=self.vlaidate_prometheus_server.run) - # self.thread_prometheus_server_connection.daemon = True - # self.thread_prometheus_server_connection.start() - # Logging.log.debug("Started Thread: thread_prometheus_server_connection") - - # def watch_threads(self): - # self.thread_check_thread_node_resources = threading.Thread(target=self.check_thread_node_resources) - # self.thread_check_thread_node_resources.daemon = True - # self.thread_check_thread_node_resources.start() - - # self.thread_check_thread_prometheus_server_connection = threading.Thread(target=self.check_thread_prometheus_server_connection) - # self.thread_check_thread_prometheus_server_connection.daemon = True - # self.thread_check_thread_prometheus_server_connection.start() - - - # try: - # # node_metrics = PrometheusNodeMetrics() - # node_resources_progress = Node_Resources_Progress() - - # progress_table = Table.grid(expand=True) - # progress_table.add_row( - # Panel(node_resources_progress.group_cpu, title="[b]CPU", padding=(1, 2)), - # ) - # progress_table.add_row( - # Panel(node_resources_progress.group_memory, title="[b]Memory", padding=(1, 2)), - # ) - # progress_table.add_row( - # Panel(node_resources_progress.group_fs, title='[b]FS "/"', padding=(1, 2)), - # ) - # progress_table.add_row( - # Panel(node_resources_progress.progress_threads_status, title="[b]Threads Status",padding=(1, 2), subtitle=""), - # ) - - - - # layout = make_layout() - # layout["header"].update(Header()) - # # layout["body1_a"].update(progress_table) - - - # # layout["body2_a"].update(Panel("Loading ...", title="[b]Top Pods in Memory Usage", padding=(1, 1))) - - # node_resources_progress.start_threads() - # node_resources_progress.watch_threads() - - # update_disk_read_bytes_graph = True - # disk_read_bytes_graph = AsciiGraph() - # disk_read_bytes = self.nodeDiskReadBytes(node_name) - # if GlobalAttrs.debug: - # Logging.log.debug(f"Getting Pod 'disk_read_bytes' metrics; Result:\n{disk_read_bytes}") - # else: - # Logging.log.info("Getting Pod 'disk_read_bytes' metrics") - # if disk_read_bytes.get('success'): - # disk_read_bytes_graph.create_graph(disk_read_bytes.get('result').keys(), height=5, width=GlobalAttrs.graphs_width, format='{:8.0f} kb/s') - # else: - # disk_read_bytes_graph.graph = disk_read_bytes.get('fail_reason') - # update_disk_read_bytes_graph = False - - # update_network_received_bytes_graph = True - # network_received_bytes_graph = AsciiGraph() - # network_received_bytes = self.nodeNetworkReceiveBytes(node_name) - # if GlobalAttrs.debug: - # Logging.log.debug(f"Getting Pod 'network_received_bytes' metrics; Result:\n{network_received_bytes}") - # else: - # Logging.log.info("Getting Pod 'network_received_bytes' metrics") - # if network_received_bytes.get('success'): - # network_received_bytes_graph.create_graph(network_received_bytes.get('result').keys(), height=5, width=GlobalAttrs.graphs_width, format='{:8.0f} kb/s') - # else: - # network_received_bytes_graph.graph = network_received_bytes.get('fail_reason') - # update_network_received_bytes_graph = False - - # update_network_transmit_bytes_graph = True - # network_transmit_bytes_graph = AsciiGraph() - # network_transmit_bytes = self.nodeNetworkTransmitBytes(node_name) - # if GlobalAttrs.debug: - # Logging.log.debug(f"Getting Pod 'network_transmit_bytes' metrics; Result:\n{network_transmit_bytes}") - # else: - # Logging.log.info("Getting Pod 'network_transmit_bytes' metrics") - # if network_transmit_bytes.get('success'): - # network_transmit_bytes_graph.create_graph(network_transmit_bytes.get('result').keys(), height=5, width=GlobalAttrs.graphs_width, format='{:8.0f} kb/s') - # else: - # network_transmit_bytes_graph.graph = network_transmit_bytes.get('fail_reason') - # update_network_transmit_bytes_graph = False - - - - # update_disk_written_bytes_graph = True - # disk_written_bytes_graph = AsciiGraph() - # disk_written_bytes = self.nodeDiskWrittenBytes(node_name) - # if disk_written_bytes.get('success'): - # disk_written_bytes_graph.create_graph(disk_written_bytes.get('result').keys(), height=5, width=GlobalAttrs.graphs_width, format='{:8.0f} kb/s') - # else: - # disk_written_bytes_graph.graph = disk_written_bytes.get('fail_reason') - # update_disk_written_bytes_graph = False - - # # layout["body2_b_b"].update(Panel(Markdown("Loading ..."), title="[b]Network IO", padding=(1, 1))) - # # layout["body2_b_a"].update(Panel(Markdown("Loading ..."), title="[b]Disk IO", padding=(1, 1))) - - # group_network_io = Group( - # Markdown("Bytes Received", justify='center'), - # Text.from_ansi(network_received_bytes_graph.graph + f"\n {network_received_bytes_graph.colors_description_str}"), - # Rule(style='#AAAAAA'), - # Markdown("Bytes Transmitted", justify='center'), - # Text.from_ansi(network_transmit_bytes_graph.graph + f"\n {network_transmit_bytes_graph.colors_description_str}") - # ) - - # group_disk_io = Group( - # Markdown("Bytes Read", justify='center'), - # Text.from_ansi(disk_read_bytes_graph.graph + f"\n {disk_read_bytes_graph.colors_description_str}"), - # Rule(style='#AAAAAA'), - # Markdown("Bytes Written", justify='center'), - # Text.from_ansi(disk_written_bytes_graph.graph + f"\n {disk_written_bytes_graph.colors_description_str}") - # ) - - # Logging.log.info("Starting the Layout.") - # with Live(layout, auto_refresh=True, screen=True, refresh_per_second=GlobalAttrs.live_update_interval): - # while True: - # # pod_memory_usage = node_metrics.PodMemTopUsage(node=node_name) - # # layout["body2_a"].update(Panel(pod_memory_usage, title="[b]Top Pods in Memory Usage", padding=(1, 1))) - # # Logging.log.info("Updating the Layout with 'Top Pods in Memory Usage'") - # # Logging.log.info(f"Result:\n{pod_memory_usage}") - - # # if update_network_received_bytes_graph: - # # network_received_bytes = self.nodeNetworkReceiveBytes(node_name) - # # Logging.log.info("Updating Node 'network_received_bytes' metrics") - # # Logging.log.info(network_received_bytes) - # # for device, value in network_received_bytes.get('result').items(): - # # network_received_bytes_graph.update_lst(device, helper_.bytes_to_kb(value)) - - # # if update_network_transmit_bytes_graph: - # # Logging.log.info("Updating Node 'network_transmit_bytes' metrics") - # # Logging.log.info(network_transmit_bytes) - # # network_transmit_bytes = self.nodeNetworkTransmitBytes(node_name) - # # for device, value in network_transmit_bytes.get('result').items(): - # # network_transmit_bytes_graph.update_lst(device, helper_.bytes_to_kb(value)) - - # # if update_disk_read_bytes_graph: - # # disk_read_bytes = self.nodeDiskReadBytes(node_name) - # # Logging.log.info("Updating Node 'disk_read_bytes' metrics") - # # Logging.log.info(disk_read_bytes) - # # for device, value in disk_read_bytes.get('result').items(): - # # disk_read_bytes_graph.update_lst(device, helper_.bytes_to_kb(value)) - - # # if update_disk_written_bytes_graph: - # # disk_written_bytes = self.nodeDiskWrittenBytes(node_name) - # # Logging.log.info("Updating Node 'disk_written_bytes' metrics") - # # Logging.log.info(disk_written_bytes) - # # for device, value in disk_written_bytes.get('result').items(): - # # disk_written_bytes_graph.update_lst(device, helper_.bytes_to_kb(value)) - - # # if update_network_received_bytes_graph or update_network_transmit_bytes_graph: - # # group_network_io = Group( - # # Markdown("Bytes Received", justify='center'), - # # Text.from_ansi(network_received_bytes_graph.graph + f"\n {network_received_bytes_graph.colors_description_str}"), - # # Rule(style='#AAAAAA'), - # # Markdown("Bytes Transmitted", justify='center'), - # # Text.from_ansi(network_transmit_bytes_graph.graph + f"\n {network_transmit_bytes_graph.colors_description_str}") - # # ) - - # # if update_disk_read_bytes_graph or update_disk_written_bytes_graph: - # # group_disk_io = Group( - # # Markdown("Bytes Read", justify='center'), - # # Text.from_ansi(disk_read_bytes_graph.graph + f"\n {disk_read_bytes_graph.colors_description_str}"), - # # Rule(style='#AAAAAA'), - # # Markdown("Bytes Written", justify='center'), - # # Text.from_ansi(disk_written_bytes_graph.graph + f"\n {disk_written_bytes_graph.colors_description_str}") - # # ) - - # # layout["body2_b_b"].update(Panel(group_network_io, title="[b]Network IO", padding=(1, 1))) - # # layout["body2_b_a"].update(Panel(group_disk_io, title="[b]Disk IO", padding=(1, 1))) - - # Logging.log.info(f"waiting for the update interval '{GlobalAttrs.live_update_interval}' before updating the Layout ") - # time.sleep(GlobalAttrs.live_update_interval) - # Logging.log.info(f"Updating the layout") - - # except Exception as e: - # rich.print("\n[yellow]ERROR -- " + str(e)) - # rich.print("\n[underline bold]Exception:") - # traceback.print_exc() - # exit(1) - # except KeyboardInterrupt: - # print(" ", end="\r") - # rich.print("Ok") - # exit(0) - - - # def node_monitor_dashboard_memory(self, node_name): - # print("not implemented yet.") - # exit(0) diff --git a/kubePtop/dashboard_yaml_loader.py b/kubePtop/dashboard_yaml_loader.py new file mode 100644 index 0000000..e6c39eb --- /dev/null +++ b/kubePtop/dashboard_yaml_loader.py @@ -0,0 +1,45 @@ +import os +import yaml + +class dashboardYamlLoader: + def __init__(self) -> None: + pass + + def dashboard_yaml_schema_validation(self, yaml): + pass + + + + def load_dashboard_data(self, dashboard_name): + out = { + "success": False, + "data": None, + "fail_reason": "" + } + + # Check if the yaml file exists in the dashboards directory + ## If so, return the file path + ### The dashboard dir is taken as ENV + yaml_file = dashboard_name + # Check if the file does NOT exist + if not os.path.isfile(yaml_file): + out['fail_reason'] = f"Dashboard File '{yaml_file}' does NOT exist" + return out + + # Read the file + try: + with open(yaml_file, 'r') as file: + content = file.read() + out['data'] = yaml.safe_load(content) + except Exception as e: + out['fail_reason'] = f"Failed to open the dashboard file '{yaml_file}' > {e}" + return out + + # Yaml Schema validation + + + # Loading variables args + + + out['success'] = True + return out From 2372cd4413c3e71504f7c05e131fe184b60a679c Mon Sep 17 00:00:00 2001 From: eslam-gomaa Date: Mon, 24 Jun 2024 19:08:06 +0300 Subject: [PATCH 2/3] dasbboard structure is complete --- dashboard.yaml | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/dashboard.yaml b/dashboard.yaml index cab4ba4..6f6b624 100644 --- a/dashboard.yaml +++ b/dashboard.yaml @@ -2,7 +2,8 @@ dashboard: name: Kafka cluster description: Staging Kafka dashboard layout: - split_mode: row # row + splitMode: row # row + fullScreen: true header: enable: false size: 3 @@ -55,13 +56,13 @@ dashboard: size: 0 ratio: 1 - defaultDataSource: - type: prometheus - endpoint: "" - secure: false - basicAuthEnabled: false - basicAuthUserNameVariable: "" - basicAuthPasswordVariable: "" + # defaultDataSource: + # type: prometheus + # endpoint: "" + # secure: false + # basicAuthEnabled: false + # basicAuthUserNameVariable: "" + # basicAuthPasswordVariable: "" variables: - name: topic @@ -95,7 +96,7 @@ dashboard: box: right_a enable: true type: asciiGraph # || progressBar || asciiText || markdown || markdown Table ||Table - metricUnit: kb/s # byte_to_kb_mb_gb_tb # dynamic_byte_convert + metricUnit: kb # byte_to_kb_mb_gb_tb # dynamic_byte_convert metric: > topk(20, sum(irate(kafka_server_brokertopicmetrics_bytesin_total{topic=~"$topic"}[5m])) by (strimzi_io_cluster, topic)) / 1024 custom_key: "🍅 {{topic}}" @@ -113,7 +114,7 @@ dashboard: enable: true box: right_b type: asciiGraph # || progressBarList || asciiText - metricUnit: kb/s + metricUnit: kb metric: > topk(20, sum(irate(kafka_server_brokertopicmetrics_bytesout_total{topic=~"$topic"}[5m])) by (strimzi_io_cluster, topic)) / 1024 custom_key: "🥕 {{topic}}" @@ -123,9 +124,9 @@ dashboard: maxHeight: 17 maxWidth: 45 updateIntervalSeconds: 5 - historyData: - enable: true - time: 5m + # historyData: + # enable: true + # time: 5m - name: Kafka pods memory usage (Sorted by higher memory usage) enable: true @@ -150,7 +151,6 @@ dashboard: - name: Kafka Pods memory usage (simpleTable example) enable: true box: left_b - type: simpleTable metricUnit: byte metric: | @@ -161,37 +161,37 @@ dashboard: tableType: plain # https://github.com/astanin/python-tabulate?tab=readme-ov-file#table-format showValue: true headersUppercase: true - auto_convert_value_from_byte: true + autoConvertValue: true showTableIndex: true updateIntervalSeconds: 5 - - name: Kafka pods details (advancedTable example) enable: true box: left_c type: advancedTable metricUnit: byte - columns: - memory usage: + advancedTableColumns: + - memory usage: metric: | sort_desc(sum(container_memory_usage_bytes{namespace="$namespace", pod=~"$pod"}) by (pod, topology_ebs_csi_aws_com_zone)) metricUnit: byte - memory limit: + - memory limit: metric: | sort_desc(sum(container_spec_memory_limit_bytes{namespace="$namespace", pod=~"$pod"}) by (pod, topology_ebs_csi_aws_com_zone)) metricUnit: byte - memory cache: + - memory cache: metric: | sort_desc(sum(container_memory_cache{namespace="$namespace", pod=~"$pod"}) by (pod, topology_ebs_csi_aws_com_zone)) metricUnit: byte - memory swap: + - memory swap: metric: | sum(container_memory_swap{namespace="$namespace", pod=~"$pod"}) by (pod, topology_ebs_csi_aws_com_zone) metricUnit: byte - file descriptors: + autoConvertValdue: true + - file descriptors: metric: | sort_desc(sum(container_file_descriptors{namespace="$namespace", pod=~"$pod"}) by (pod, topology_ebs_csi_aws_com_zone)) metricUnit: counter - up time: + - up time: metric: | sum(time() - kube_pod_start_time{namespace="$namespace", pod=~"$pod"}) by (pod) metricUnit: seconds From 9bd6c4dcaa1d2d22cef00948e28b80cc52ffa205 Mon Sep 17 00:00:00 2001 From: eslam-gomaa Date: Mon, 24 Jun 2024 19:08:41 +0300 Subject: [PATCH 3/3] yaml validation for dashboards is complete --- kptop_tool.py | 35 -- kubePtop/dashboard_monitor.py | 20 +- kubePtop/dashboard_yaml_loader.py | 551 +++++++++++++++++++++++++++++- kubePtop/requirements.txt | 1 + 4 files changed, 557 insertions(+), 50 deletions(-) diff --git a/kptop_tool.py b/kptop_tool.py index 77eb404..5bee3b6 100644 --- a/kptop_tool.py +++ b/kptop_tool.py @@ -1,40 +1,5 @@ -# def run(): -# # It runs at the initilization -# from kubePtop.cli import Cli - - -# run() - -# from kubePtop.dashboard_monitor import customDashboardMonitoring from kubePtop.read_env import ReadEnv env = ReadEnv() env.read_env() from kubePtop.cli_args import Cli cli = Cli() - - -# import rich - -# test = customDashboardMonitoring() - -# Read cli -# if --dashboard -# # .parse_dashboard['data'].get('dashboard').get('variables', {}) -# -# elif --command -# else -> print help - - - - - - - - -# rich.print(test._find_variables_in_query('topk(20, sum(irate(kafka_server_brokertopicmetrics_bytesin_total{topic=~"$topic", namespace=~"$namespace"}[5m])) by (strimzi_io_cluster, topic)) / 1024')) -# exit(1) -# rich.print(test.build_custom_dashboard("./dashboard.yaml")) -# rich.print(test.nodeManagedK8sInfo('.*')) -# print(test.topNode()) -# test.topNodeTable(option="cloud") -# test.topNodeJson('ip-10-129-143-105.eu-west-1.compute.internal') diff --git a/kubePtop/dashboard_monitor.py b/kubePtop/dashboard_monitor.py index fd1d175..45bc1c8 100644 --- a/kubePtop/dashboard_monitor.py +++ b/kubePtop/dashboard_monitor.py @@ -106,9 +106,6 @@ def build_custom_dashboard(self, dashboard_data, dashboard_variables): # Build the Layout structure self.make_layout(layout_structure_dct=dashboard_data['data']) - # Build the dashboard variables - # self.build_variables(variables=dashboard_data['data'].get('dashboard').get('variables', {}), inital_args=inital_args) - # vistualize the metrics on the layout self.variables = dashboard_variables self.update_layout_visualization(layout_structure_dct=dashboard_data['data']) @@ -137,7 +134,7 @@ def update_layout_visualization(self, layout_structure_dct): elif visualization['type'] == 'advancedTable': if 'custom_key' in visualization: - progress_bar_list = self.build_advanced_table_handler(name=visualization['name'], layout_box_name=visualization['box'], advanced_table_options=visualization.get('advancedTableOptions', {}), metric_unit=visualization['metricUnit'], columns=visualization['columns'], custom_key=visualization['custom_key']) + progress_bar_list = self.build_advanced_table_handler(name=visualization['name'], layout_box_name=visualization['box'], advanced_table_options=visualization.get('advancedTableOptions', {}), metric_unit=visualization['metricUnit'], columns=visualization['advancedTableColumns'], custom_key=visualization['custom_key']) else: progress_bar_list = self.build_advanced_table_handler(name=visualization['name'], layout_box_name=visualization['box'], advanced_table_options=visualization.get('advancedTableOptions', {}), metric_unit=visualization['metricUnit'], columns=visualization['columns']) @@ -147,9 +144,8 @@ def update_layout_visualization(self, layout_structure_dct): import traceback Logging.log.info("Starting the Layout.") try: - with Live(self.layout, auto_refresh=True, screen=True, refresh_per_second=GlobalAttrs.live_update_interval): + with Live(self.layout, auto_refresh=True, screen=layout_structure_dct['dashboard']['layout'].get('fullScreen', True), refresh_per_second=GlobalAttrs.live_update_interval): while True: - # rich.print(self.layout) Logging.log.info(f"waiting for the update interval '{GlobalAttrs.live_update_interval}' before updating the Layout ") time.sleep(GlobalAttrs.live_update_interval) Logging.log.info(f"Updating the layout") @@ -662,7 +658,13 @@ def build_advanced_table(self, name, layout_box_name, advanced_table_options, me box_name = name data = {} - header = list(columns.keys()) + columns_dct = {} + for column in columns: + key = next(iter(column)) + new_dct = {k: v for k, v in column.items() if k != key} + columns_dct[key] = new_dct + + header = list(columns_dct.keys()) header.insert(0, 'name') if header_upper_case_: @@ -671,7 +673,7 @@ def build_advanced_table(self, name, layout_box_name, advanced_table_options, me while True: table = [header] - for column, column_info in columns.items(): + for column, column_info in columns_dct.items(): metric_data = self.get_metric_data(column_info['metric'], custom_key=custom_key) if not metric_data['success']: @@ -689,7 +691,7 @@ def build_advanced_table(self, name, layout_box_name, advanced_table_options, me } for name, value in data.items(): - row = [name] + [value.get(col, '?') for col in columns.keys()] # Ensure order matches headers + row = [name] + [value.get(col, '?') for col in columns_dct.keys()] # Ensure order matches headers table.append(row) out = tabulate(table, headers='firstrow', tablefmt=table_type_, showindex=show_table_index_) diff --git a/kubePtop/dashboard_yaml_loader.py b/kubePtop/dashboard_yaml_loader.py index e6c39eb..ea3ef71 100644 --- a/kubePtop/dashboard_yaml_loader.py +++ b/kubePtop/dashboard_yaml_loader.py @@ -1,13 +1,554 @@ import os import yaml - +import json +from cerberus import Validator +import rich class dashboardYamlLoader: def __init__(self) -> None: pass - def dashboard_yaml_schema_validation(self, yaml): - pass + def validate_dashboard_schema(self, dashboard_yaml_data): + schema_dct = { + + "dashboard": { + 'type': 'dict', + 'required': True, + 'schema': { + "name": { + 'type': 'string', + 'required': True, + }, + "description": { + 'type': 'string', + 'required': False, + 'default': "" + }, + "layout": { + 'type': 'dict', + 'required': True, + 'schema': { + 'splitMode': { + "type": "string", + 'required': False, + 'default': 'row', + 'allowed': [ + 'row', + 'column' + ] + }, + 'fullScreen': { + "type": "boolean", + 'required': False, + 'default': True, + 'allowed': [ + True, + False + ] + }, + "header": { + 'type': 'dict', + 'required': False, + 'schema': { + 'enable': { + "type": "boolean", + 'required': False, + 'default': False + }, + 'size': { + "type": "integer", + 'required': False, + 'default': 0 + }, + 'ratio': { + "type": "integer", + 'required': True, + 'default': 1 + }, + } + }, + 'body': { + 'type': 'dict', + 'schema': { + 'boxes': { + 'type': 'dict', + 'schema': { + 'left': { + 'type': 'dict', + 'schema': { + 'enable': { + 'type': 'boolean', + 'required': True + }, + 'size': { + 'type': 'integer', + 'required': False, + 'default': 0 + }, + 'ratio': { + 'type': 'integer', + 'required': False, + 'default': 1 + }, + 'split_mode': { + 'type': 'string', + 'required': False, + 'default': 'row', + 'allowed': [ + 'row', + 'column' + ] + }, + 'split': { + 'type': 'dict', + 'required': False, + 'allow_unknown': True, + 'schema': { + 'size': { + 'type': 'integer', + 'required': False, + 'default': 0 + }, + 'ratio': { + 'type': 'integer', + 'required': False, + 'default': 1 + } + } + } + } + }, + 'middle': { + 'type': 'dict', + 'schema': { + 'enable': { + 'type': 'boolean', + 'required': True + }, + 'size': { + 'type': 'integer', + 'required': False, + 'default': 0 + }, + 'ratio': { + 'type': 'integer', + 'required': False, + 'default': 1 + }, + 'split_mode': { + 'type': 'string', + 'required': False, + 'default': 'row', + 'allowed': [ + 'row', + 'column' + ] + }, + 'split': { + 'type': 'dict', + 'required': False, + 'allow_unknown': True, + 'schema': { + 'size': { + 'type': 'integer', + 'required': False, + 'default': 0 + }, + 'ratio': { + 'type': 'integer', + 'required': False, + 'default': 1 + } + } + } + } + }, + 'right': { + 'type': 'dict', + 'schema': { + 'enable': { + 'type': 'boolean', + 'required': True + }, + 'size': { + 'type': 'integer', + 'required': False, + 'default': 0 + }, + 'ratio': { + 'type': 'integer', + 'required': False, + 'default': 1 + }, + 'split_mode': { + 'type': 'string', + 'required': False, + 'default': 'row', + 'allowed': [ + 'row', + 'column' + ] + }, + 'split': { + 'type': 'dict', + 'required': False, + 'allow_unknown': True, + 'schema': { + 'size': { + 'type': 'integer', + 'required': False, + 'default': 0 + }, + 'ratio': { + 'type': 'integer', + 'required': False, + 'default': 1 + } + } + } + } + } + } + } + } + }, + } + }, + 'variables': { + 'type': 'list', + 'schema': { + 'type': 'dict', + 'schema': { + 'name': { + 'type': 'string', + 'required': True + }, + 'default': { + 'type': 'string', + 'required': True, + }, + 'cliArgument': { + 'type': 'dict', + 'schema': { + 'enable': { + 'type': 'boolean', + 'required': True + }, + 'short': { + 'type': 'string', + 'required': True, + 'regex': r'^-\w{1,2}$' + }, + 'required': { + 'type': 'boolean', + 'default': True, + 'required': False + }, + 'description': { + 'type': 'string', + 'required': False + } + } + } + } + } + }, + 'visualization': { + 'type': 'list', + 'schema': { + 'type': 'dict', + 'allow_unknown': True, + 'schema': { + 'name': { + 'type': 'string', + 'required': True + }, + 'box': { + 'type': 'string', + 'required': True + }, + 'enable': { + 'type': 'boolean', + 'required': False, + 'default': True + }, + 'type': { + 'type': 'string', + 'required': True, + 'allowed': [ + 'asciiGraph', + 'progressBarList', + 'simpleTable', + 'advancedTable', + ] + }, + 'metricUnit': { + 'type': 'string', + 'required': True, + 'allowed': [ + 'kb', + 'byte', + 'mb', + 'seconds' + ] + }, + 'metric': { + 'type': 'string' + }, + 'custom_key': { + 'type': 'string', + 'required': False, + }, + 'asciiGraphOptions': { + 'type': 'dict', + 'schema': { + 'height': { + 'type': 'integer', + 'required': False, + 'default': 0, + }, + 'width': { + 'type': 'integer', + 'required': False, + 'default': 80, + }, + 'maxHeight': { + 'type': 'integer', + 'required': False, + 'default': 17, + }, + 'maxWidth': { + 'type': 'integer', + 'required': False, + 'default': 45, + }, + 'updateIntervalSeconds': { + 'type': 'integer', + 'required': False, + 'default': 5, + } + } + }, + 'progressBarListOptions': { + 'type': 'dict', + 'schema': { + 'maxItemsCount': { + 'type': 'integer', + 'required': False, + 'default': 0, + }, + 'lineBreak': { + 'type': 'boolean', + 'required': False, + 'default': True, + }, + 'showBarPercentage': { + 'type': 'boolean', + 'required': False, + 'default': True, + }, + 'barWidth': { + 'type': 'integer', + 'required': False, + 'default': 25, + }, + 'updateIntervalSeconds': { + 'type': 'integer', + 'required': False, + 'default': 5, + } + } + }, + 'simpleTableOptions': { + 'type': 'dict', + 'schema': { + 'tableType': { + 'type': 'string', + 'required': False, + 'default': 'plain', + 'allowed': [ + 'plain', + 'simple', + 'github', + 'grid', + 'simple_grid', + 'rounded_grid', + 'heavy_grid', + 'mixed_grid', + 'double_grid', + 'fancy_grid', + 'outline', + 'simple_outline', + 'rounded_outline', + 'heavy_outline', + 'mixed_outline', + 'double_outline', + 'fancy_outline', + 'pipe', + 'orgtbl', + 'asciidoc', + 'jira', + 'presto', + 'pretty', + 'psql', + 'rst', + 'mediawiki', + 'moinmoin', + 'youtrack', + 'html', + 'unsafehtml', + 'latex', + 'latex_raw', + 'latex_booktabs', + 'latex_longtable', + 'textile', + 'tsv', + ] + }, + 'showValue': { + 'type': 'boolean', + 'required': True, + 'default': True, + }, + 'headersUppercase': { + 'type': 'boolean', + 'required': True, + 'default': True, + }, + 'autoConvertValue': { + 'type': 'boolean', + 'required': True, + 'default': False, + }, + 'showTableIndex': { + 'type': 'boolean', + 'required': True, + 'default': False, + }, + 'updateIntervalSeconds': { + 'type': 'integer', + 'required': True, + 'default': 5, + } + } + }, + 'advancedTableColumns': { + 'type': 'list', + 'required': False, + 'schema': { + 'type': 'dict', + 'required': True, + 'allow_unknown': True, + 'schema': { + 'metric': { + 'type': 'string', + 'required': True + }, + 'metricUnit': { + 'type': 'string', + 'required': True, + 'allowed': [ + 'kb', + 'byte', + 'mb', + 'seconds', + 'counter' + ] + } + } + } + }, + 'advancedTableOptions': { + 'type': 'dict', + 'schema': { + 'tableType': { + 'type': 'string', + 'required': False, + 'default': 'plain', + 'allowed': [ + 'plain', + 'simple', + 'github', + 'grid', + 'simple_grid', + 'rounded_grid', + 'heavy_grid', + 'mixed_grid', + 'double_grid', + 'fancy_grid', + 'outline', + 'simple_outline', + 'rounded_outline', + 'heavy_outline', + 'mixed_outline', + 'double_outline', + 'fancy_outline', + 'pipe', + 'orgtbl', + 'asciidoc', + 'jira', + 'presto', + 'pretty', + 'psql', + 'rst', + 'mediawiki', + 'moinmoin', + 'youtrack', + 'html', + 'unsafehtml', + 'latex', + 'latex_raw', + 'latex_booktabs', + 'latex_longtable', + 'textile', + 'tsv', + ] + }, + 'headersUppercase': { + 'type': 'boolean', + 'required': True, + 'default': True, + }, + 'autoConvertValue': { + 'type': 'boolean', + 'required': True, + 'default': False, + }, + 'showTableIndex': { + 'type': 'boolean', + 'required': True, + 'default': False, + }, + 'updateIntervalSeconds': { + 'type': 'integer', + 'required': True, + 'default': 5, + } + } + } + } + } + } + } + }, + } + # return schema_dct + v = Validator(schema_dct) + v.validate(dashboard_yaml_data) + if v.errors: + import re + yaml_errors = yaml.dump(v.errors, default_flow_style=False) + + # Regex to find "unknown field" and "must be of" + yaml_errors = re.sub(r'(unknown field)', '[red]\\1[/red]', yaml_errors) + yaml_errors = re.sub(r'(must be of.*)', '[red]\\1[/red]', yaml_errors) + yaml_errors = re.sub(r'(unallowed value.*)', '[red]\\1[/red]', yaml_errors) + yaml_errors = re.sub(r'(required field.*)', '[red]\\1[/red]', yaml_errors) + yaml_errors = re.sub(r'(null value not allowe.*)', '[red]\\1[/red]', yaml_errors) + + print("ERROR -- Please fix the following in the dashboard YAML file:\n") + rich.print("[bold]" + yaml_errors) + exit(1) def load_dashboard_data(self, dashboard_name): @@ -36,9 +577,7 @@ def load_dashboard_data(self, dashboard_name): return out # Yaml Schema validation - - - # Loading variables args + self.validate_dashboard_schema(out['data']) out['success'] = True diff --git a/kubePtop/requirements.txt b/kubePtop/requirements.txt index b90cdfc..370aac3 100644 --- a/kubePtop/requirements.txt +++ b/kubePtop/requirements.txt @@ -4,3 +4,4 @@ tabulate==0.9.0 argparse asciichartpy==1.5.25 kubernetes==26.1.0 +cerberus==1.3.5