From c3c92a47451241a85f2a071d19d61a648e312767 Mon Sep 17 00:00:00 2001 From: abdosi <58047199+abdosi@users.noreply.github.com> Date: Mon, 30 Jan 2023 17:52:50 -0800 Subject: [PATCH 01/66] Skip saidump for Spine Router as this can take more than 5 sec (#2637) To address sonic-net/sonic-buildimage#13561 skip saidump on T2 platforms for time-being. --- scripts/generate_dump | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/generate_dump b/scripts/generate_dump index 7587e9fa3a..7c94806943 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1560,6 +1560,7 @@ main() { save_cmd "show reboot-cause" reboot.cause local asic="$(/usr/local/bin/sonic-cfggen -y /etc/sonic/sonic_version.yml -v asic_type)" + local device_type=`sonic-db-cli CONFIG_DB hget 'DEVICE_METADATA|localhost' type` # 1st counter snapshot early. Need 2 snapshots to make sense of counters trend. save_counter_snapshot $asic 1 @@ -1643,7 +1644,9 @@ main() { save_cmd "hdparm -i /dev/sda" "hdparm" save_cmd "ps -AwwL -o user,pid,lwp,ppid,nlwp,pcpu,pri,nice,vsize,rss,tty,stat,wchan:12,start,bsdtime,command" "ps.extended" - save_saidump + if [[ "$device_type" != "SpineRouter" ]]; then + save_saidump + fi if [ "$asic" = "barefoot" ]; then collect_barefoot From 9ee6ac29bbd8a181d2a7e02cef082b6badcd5a27 Mon Sep 17 00:00:00 2001 From: Yaqiang Zhu Date: Mon, 30 Jan 2023 21:07:12 -0800 Subject: [PATCH 02/66] [doc] Update docs for dhcp_relay config cli (#2598) What I did Updated docs about dhcp_relay config cli How I did it Updated docs about dhcp_relay config cli Signed-off-by: Yaqiang Zhu --- doc/Command-Reference.md | 68 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 063db2cc4e..f2a4ada15c 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -2349,6 +2349,74 @@ This command is used to delete a configured DHCP Relay Destination IP address or Restarting DHCP relay service... ``` +**config dhcp_relay ipv4 helper add/del** + +This command is used to add or delete IPv4 DHCP Relay helper addresses to a VLAN. Note that more than one DHCP Relay helper addresses can be operated on a VLAN interface. + +- Usage: + ``` + config dhcp_relay ipv4 helper (add | del) + ``` + +- Example: + ``` + admin@sonic:~$ sudo config dhcp_relay ipv4 helper add 1000 7.7.7.7 + Added DHCP relay address [7.7.7.7] to Vlan1000 + Restarting DHCP relay service... + ``` + + ``` + admin@sonic:~$ sudo config dhcp_relay ipv4 helper add 1000 7.7.7.7 1.1.1.1 + Added DHCP relay address [7.7.7.7, 1.1.1.1] to Vlan1000 + Restarting DHCP relay service... + ``` + + ``` + admin@sonic:~$ sudo config dhcp_relay ipv4 helper del 1000 7.7.7.7 + Removed DHCP relay address [7.7.7.7] from Vlan1000 + Restarting DHCP relay service... + ``` + + ``` + admin@sonic:~$ sudo config dhcp_relay ipv4 helper del 1000 7.7.7.7 1.1.1.1 + Removed DHCP relay address [7.7.7.7, 1.1.1.1] from Vlan1000 + Restarting DHCP relay service... + ``` + +**config dhcp_relay ipv6 destination add/del** + +This command is used to add or del IPv6 DHCP Relay destination addresses to a VLAN. Note that more than one DHCP Relay Destination addresses can be operated on a VLAN interface. + +- Usage: + ``` + config dhcp_relay ipv6 destination (add | del) + ``` + +- Example: + ``` + admin@sonic:~$ sudo config dhcp_relay ipv6 destination add 1000 fc02:2000::1 + Added DHCP relay address [fc02:2000::1] to Vlan1000 + Restarting DHCP relay service... + ``` + + ``` + admin@sonic:~$ sudo config dhcp_relay ipv6 destination add 1000 fc02:2000::1 fc02:2000::2 + Added DHCP relay address [fc02:2000::1, fc02:2000::2] to Vlan1000 + Restarting DHCP relay service... + ``` + + ``` + admin@sonic:~$ sudo config dhcp_relay ipv6 destination del 1000 fc02:2000::1 + Removed DHCP relay address [fc02:2000::1] from Vlan1000 + Restarting DHCP relay service... + ``` + + ``` + admin@sonic:~$ sudo config dhcp_relay ipv6 destination del 1000 fc02:2000::1 fc02:2000::2 + Removed DHCP relay address [fc02:2000::1, fc02:2000::2] from Vlan1000 + Restarting DHCP relay service... + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#dhcp-relay) From 8239e9ab234000b2c1c0eebe5196c05856a75792 Mon Sep 17 00:00:00 2001 From: kartik-arista <61531803+kartik-arista@users.noreply.github.com> Date: Tue, 31 Jan 2023 10:19:26 -0800 Subject: [PATCH 03/66] Making 'show feature autorestart' more resilient to missing auto_restart config in CONFIG_DB (#2592) Fixes BUG 762723 --- show/feature.py | 4 ++-- tests/feature_test.py | 45 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 2 deletions(-) diff --git a/show/feature.py b/show/feature.py index 547d8d1729..60ff80321a 100644 --- a/show/feature.py +++ b/show/feature.py @@ -156,11 +156,11 @@ def feature_autorestart(db, feature_name): feature_table = db.cfgdb.get_table('FEATURE') if feature_name: if feature_table and feature_name in feature_table: - body.append([feature_name, feature_table[feature_name]['auto_restart']]) + body.append([feature_name, feature_table[ feature_name ].get('auto_restart', 'unknown')]) else: click.echo("Can not find feature {}".format(feature_name)) sys.exit(1) else: for name in natsorted(list(feature_table.keys())): - body.append([name, feature_table[name]['auto_restart']]) + body.append([name, feature_table[ name ].get('auto_restart', 'unknown')]) click.echo(tabulate(body, header)) diff --git a/tests/feature_test.py b/tests/feature_test.py index fa5c2870ea..8706e2a92f 100644 --- a/tests/feature_test.py +++ b/tests/feature_test.py @@ -130,6 +130,32 @@ telemetry enabled """ +show_feature_autorestart_missing_output="""\ +Feature AutoRestart +---------- -------------- +bar unknown +bgp enabled +database always_enabled +dhcp_relay enabled +lldp enabled +nat enabled +pmon enabled +radv enabled +restapi enabled +sflow enabled +snmp enabled +swss enabled +syncd enabled +teamd enabled +telemetry enabled +""" + +show_feature_autorestart_bar_missing_output="""\ +Feature AutoRestart +--------- ------------- +bar unknown +""" + show_feature_bgp_autorestart_output="""\ Feature AutoRestart --------- ------------- @@ -277,6 +303,25 @@ def test_show_unknown_autorestart_status(self, get_cmd_module): print(result.output) assert result.exit_code == 1 + def test_show_feature_autorestart_missing(self, get_cmd_module): + (config, show) = get_cmd_module + db = Db() + dbconn = db.db + db.cfgdb.set_entry("FEATURE", "bar", { "state": "enabled" }) + runner = CliRunner() + + result = runner.invoke(show.cli.commands["feature"].commands["autorestart"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_feature_autorestart_missing_output + + result = runner.invoke(show.cli.commands["feature"].commands["autorestart"], ["bar"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_feature_autorestart_bar_missing_output + def test_config_bgp_feature_state(self, get_cmd_module): (config, show) = get_cmd_module db = Db() From 1b71985e350a3c57b4fb2f4fe83e330fcbeeee31 Mon Sep 17 00:00:00 2001 From: wenyiz2021 <91497961+wenyiz2021@users.noreply.github.com> Date: Wed, 1 Feb 2023 09:33:14 -0800 Subject: [PATCH 04/66] [masic support] 'show run bgp' support for multi-asic (#2427) Support 'show run bgp' for multi-asics Add mock tables and UTs for single-asic, multi-asic, bgp not running cases --- show/main.py | 38 ++- tests/conftest.py | 17 ++ .../asic0/show_not_running_bgp.txt | 1 + tests/mock_tables/asic0/show_run_bgp.txt | 12 + tests/mock_tables/asic1/show_run_bgp.txt | 12 + tests/mock_tables/show_run_bgp.txt | 64 +++++ tests/show_run_bgp_test.py | 228 ++++++++++++++++++ 7 files changed, 368 insertions(+), 4 deletions(-) create mode 100644 tests/mock_tables/asic0/show_not_running_bgp.txt create mode 100644 tests/mock_tables/asic0/show_run_bgp.txt create mode 100644 tests/mock_tables/asic1/show_run_bgp.txt create mode 100644 tests/mock_tables/show_run_bgp.txt create mode 100644 tests/show_run_bgp_test.py diff --git a/show/main.py b/show/main.py index 0c9fd46703..e8c607facc 100755 --- a/show/main.py +++ b/show/main.py @@ -1439,10 +1439,40 @@ def ports(portname, verbose): # 'bgp' subcommand ("show runningconfiguration bgp") @runningconfiguration.command() @click.option('--verbose', is_flag=True, help="Enable verbose output") -def bgp(verbose): - """Show BGP running configuration""" - cmd = 'sudo {} -c "show running-config"'.format(constants.RVTYSH_COMMAND) - run_command(cmd, display_cmd=verbose) +@click.option('--namespace', '-n', 'namespace', required=False, default=None, type=str, show_default=False, + help='Option needed for multi-asic only: provide namespace name', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def bgp(namespace, verbose): + """ + Show BGP running configuration + Note: + multi-asic can run 'show run bgp' and show from all asics, or 'show run bgp -n ' + single-asic only run 'show run bgp', '-n' is not available + """ + + if multi_asic.is_multi_asic(): + if namespace and namespace not in multi_asic.get_namespace_list(): + ctx = click.get_current_context() + ctx.fail("invalid value for -n/--namespace option. provide namespace from list {}".format(multi_asic.get_namespace_list())) + if not multi_asic.is_multi_asic() and namespace: + ctx = click.get_current_context() + ctx.fail("-n/--namespace is not available for single asic") + + output = "" + cmd = "show running-config bgp" + import utilities_common.bgp_util as bgp_util + if multi_asic.is_multi_asic(): + if not namespace: + ns_list = multi_asic.get_namespace_list() + for ns in ns_list: + output += "\n------------Showing running config bgp on {}------------\n".format(ns) + output += bgp_util.run_bgp_show_command(cmd, ns) + else: + output += "\n------------Showing running config bgp on {}------------\n".format(namespace) + output += bgp_util.run_bgp_show_command(cmd, namespace) + else: + output += bgp_util.run_bgp_show_command(cmd) + print(output) # 'interfaces' subcommand ("show runningconfiguration interfaces") diff --git a/tests/conftest.py b/tests/conftest.py index 96b80df3e1..bf4c2a401f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -169,6 +169,9 @@ def setup_single_bgp_instance(request): elif request.param == 'v6': bgp_mocked_json = os.path.join( test_path, 'mock_tables', 'ipv6_bgp_summary.json') + elif request.param == 'show_run_bgp': + bgp_mocked_json = os.path.join( + test_path, 'mock_tables', 'show_run_bgp.txt') elif request.param == 'ip_route': bgp_mocked_json = 'ip_route.json' elif request.param == 'ip_specific_route': @@ -193,6 +196,13 @@ def mock_show_bgp_summary(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.RV return mock_frr_data return "" + def mock_show_run_bgp(request): + if os.path.isfile(bgp_mocked_json): + with open(bgp_mocked_json) as json_data: + mock_frr_data = json_data.read() + return mock_frr_data + return "" + def mock_run_bgp_command_for_static(vtysh_cmd, bgp_namespace="", vtysh_shell_cmd=constants.RVTYSH_COMMAND): if vtysh_cmd == "show ip route vrf all static": return config_int_ip_common.show_ip_route_with_static_expected_output @@ -239,6 +249,9 @@ def mock_run_bgp_command(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.RVT elif request.param == "show_bgp_summary_no_neigh": bgp_util.run_bgp_command = mock.MagicMock( return_value=mock_show_bgp_summary_no_neigh("", "")) + elif request.param.startswith('show_run_bgp'): + bgp_util.run_bgp_command = mock.MagicMock( + return_value=mock_show_run_bgp(request)) else: bgp_util.run_bgp_command = mock.MagicMock( return_value=mock_show_bgp_summary("", "")) @@ -270,6 +283,10 @@ def setup_multi_asic_bgp_instance(request): m_asic_json_file = 'ip_special_recursive_route.json' elif request.param == 'ip_route_summary': m_asic_json_file = 'ip_route_summary.txt' + elif request.param == 'show_run_bgp': + m_asic_json_file = 'show_run_bgp.txt' + elif request.param == 'show_not_running_bgp': + m_asic_json_file = 'show_not_running_bgp.txt' elif request.param.startswith('bgp_v4_network') or \ request.param.startswith('bgp_v6_network') or \ request.param.startswith('bgp_v4_neighbor') or \ diff --git a/tests/mock_tables/asic0/show_not_running_bgp.txt b/tests/mock_tables/asic0/show_not_running_bgp.txt new file mode 100644 index 0000000000..b156e857f1 --- /dev/null +++ b/tests/mock_tables/asic0/show_not_running_bgp.txt @@ -0,0 +1 @@ +Error response from daemon: Container 70e3d3bafd1ab5faf796892acff3e2ccbea3dcd5dcfefcc34f25f7cc916b67bb is not running diff --git a/tests/mock_tables/asic0/show_run_bgp.txt b/tests/mock_tables/asic0/show_run_bgp.txt new file mode 100644 index 0000000000..e5c9a9982c --- /dev/null +++ b/tests/mock_tables/asic0/show_run_bgp.txt @@ -0,0 +1,12 @@ +neighbor 10.0.0.1 remote-as 65200 +neighbor 10.0.0.1 peer-group TIER2_V4 +neighbor 10.0.0.1 description ARISTA01T2 +neighbor 10.0.0.5 remote-as 65200 +neighbor 10.0.0.5 peer-group TIER2_V4 +neighbor 10.0.0.5 description ARISTA03T2 +neighbor fc00::2 remote-as 65200 +neighbor fc00::2 peer-group TIER2_V6 +neighbor fc00::2 description ARISTA01T2 +neighbor fc00::6 remote-as 65200 +neighbor fc00::6 peer-group TIER2_V6 +neighbor fc00::6 description ARISTA03T2 diff --git a/tests/mock_tables/asic1/show_run_bgp.txt b/tests/mock_tables/asic1/show_run_bgp.txt new file mode 100644 index 0000000000..de81748cc6 --- /dev/null +++ b/tests/mock_tables/asic1/show_run_bgp.txt @@ -0,0 +1,12 @@ +neighbor 10.0.0.9 remote-as 65200 +neighbor 10.0.0.9 peer-group TIER2_V4 +neighbor 10.0.0.9 description ARISTA05T2 +neighbor 10.0.0.13 remote-as 65200 +neighbor 10.0.0.13 peer-group TIER2_V4 +neighbor 10.0.0.13 description ARISTA07T2 +neighbor fc00::a remote-as 65200 +neighbor fc00::a peer-group TIER2_V6 +neighbor fc00::a description ARISTA05T2 +neighbor fc00::e remote-as 65200 +neighbor fc00::e peer-group TIER2_V6 +neighbor fc00::e description ARISTA07T2 diff --git a/tests/mock_tables/show_run_bgp.txt b/tests/mock_tables/show_run_bgp.txt new file mode 100644 index 0000000000..9a3ae8b13e --- /dev/null +++ b/tests/mock_tables/show_run_bgp.txt @@ -0,0 +1,64 @@ +router bgp 65100 +bgp router-id 10.1.0.32 +bgp log-neighbor-changes +no bgp ebgp-requires-policy +no bgp default ipv4-unicast +bgp graceful-restart restart-time 240 +bgp graceful-restart select-defer-time 45 +bgp graceful-restart +bgp graceful-restart preserve-fw-state +bgp bestpath as-path multipath-relax +neighbor BGPSLBPassive peer-group +neighbor BGPSLBPassive remote-as 65432 +neighbor BGPSLBPassive passive +neighbor BGPSLBPassive ebgp-multihop 255 +neighbor BGPSLBPassive update-source 10.1.0.32 +neighbor BGPVac peer-group +neighbor BGPVac remote-as 65432 +neighbor BGPVac passive +neighbor BGPVac ebgp-multihop 255 +neighbor BGPVac update-source 10.1.0.32 +neighbor PEER_V4 peer-group +neighbor PEER_V6 peer-group +neighbor 10.0.0.57 remote-as 64600 +neighbor 10.0.0.57 peer-group PEER_V4 +neighbor 10.0.0.57 description ARISTA01T1 +neighbor 10.0.0.57 timers 3 10 +neighbor 10.0.0.57 timers connect 10 +neighbor 10.0.0.59 remote-as 64600 +neighbor 10.0.0.59 peer-group PEER_V4 +neighbor 10.0.0.59 description ARISTA02T1 +neighbor 10.0.0.59 timers 3 10 +neighbor 10.0.0.59 timers connect 10 +neighbor 10.0.0.61 remote-as 64600 +neighbor 10.0.0.61 peer-group PEER_V4 +neighbor 10.0.0.61 description ARISTA03T1 +neighbor 10.0.0.61 timers 3 10 +neighbor 10.0.0.61 timers connect 10 +neighbor 10.0.0.63 remote-as 64600 +neighbor 10.0.0.63 peer-group PEER_V4 +neighbor 10.0.0.63 description ARISTA04T1 +neighbor 10.0.0.63 timers 3 10 +neighbor 10.0.0.63 timers connect 10 +neighbor fc00::72 remote-as 64600 +neighbor fc00::72 peer-group PEER_V6 +neighbor fc00::72 description ARISTA01T1 +neighbor fc00::72 timers 3 10 +neighbor fc00::72 timers connect 10 +neighbor fc00::76 remote-as 64600 +neighbor fc00::76 peer-group PEER_V6 +neighbor fc00::76 description ARISTA02T1 +neighbor fc00::76 timers 3 10 +neighbor fc00::76 timers connect 10 +neighbor fc00::7a remote-as 64600 +neighbor fc00::7a peer-group PEER_V6 +neighbor fc00::7a description ARISTA03T1 +neighbor fc00::7a timers 3 10 +neighbor fc00::7a timers connect 10 +neighbor fc00::7e remote-as 64600 +neighbor fc00::7e peer-group PEER_V6 +neighbor fc00::7e description ARISTA04T1 +neighbor fc00::7e timers 3 10 +neighbor fc00::7e timers connect 10 +bgp listen range 10.255.0.0/25 peer-group BGPSLBPassive +bgp listen range 192.168.0.0/21 peer-group BGPVac diff --git a/tests/show_run_bgp_test.py b/tests/show_run_bgp_test.py new file mode 100644 index 0000000000..4d3ff843a0 --- /dev/null +++ b/tests/show_run_bgp_test.py @@ -0,0 +1,228 @@ +import os +import pytest +import importlib +from click.testing import CliRunner + +from utilities_common import multi_asic +from utilities_common import constants + +from unittest.mock import patch + +from sonic_py_common import device_info +import show.main as show + + +show_run_bgp_sasic = \ +"""router bgp 65100 +bgp router-id 10.1.0.32 +bgp log-neighbor-changes +no bgp ebgp-requires-policy +no bgp default ipv4-unicast +bgp graceful-restart restart-time 240 +bgp graceful-restart select-defer-time 45 +bgp graceful-restart +bgp graceful-restart preserve-fw-state +bgp bestpath as-path multipath-relax +neighbor BGPSLBPassive peer-group +neighbor BGPSLBPassive remote-as 65432 +neighbor BGPSLBPassive passive +neighbor BGPSLBPassive ebgp-multihop 255 +neighbor BGPSLBPassive update-source 10.1.0.32 +neighbor BGPVac peer-group +neighbor BGPVac remote-as 65432 +neighbor BGPVac passive +neighbor BGPVac ebgp-multihop 255 +neighbor BGPVac update-source 10.1.0.32 +neighbor PEER_V4 peer-group +neighbor PEER_V6 peer-group +neighbor 10.0.0.57 remote-as 64600 +neighbor 10.0.0.57 peer-group PEER_V4 +neighbor 10.0.0.57 description ARISTA01T1 +neighbor 10.0.0.57 timers 3 10 +neighbor 10.0.0.57 timers connect 10 +neighbor 10.0.0.59 remote-as 64600 +neighbor 10.0.0.59 peer-group PEER_V4 +neighbor 10.0.0.59 description ARISTA02T1 +neighbor 10.0.0.59 timers 3 10 +neighbor 10.0.0.59 timers connect 10 +neighbor 10.0.0.61 remote-as 64600 +neighbor 10.0.0.61 peer-group PEER_V4 +neighbor 10.0.0.61 description ARISTA03T1 +neighbor 10.0.0.61 timers 3 10 +neighbor 10.0.0.61 timers connect 10 +neighbor 10.0.0.63 remote-as 64600 +neighbor 10.0.0.63 peer-group PEER_V4 +neighbor 10.0.0.63 description ARISTA04T1 +neighbor 10.0.0.63 timers 3 10 +neighbor 10.0.0.63 timers connect 10 +neighbor fc00::72 remote-as 64600 +neighbor fc00::72 peer-group PEER_V6 +neighbor fc00::72 description ARISTA01T1 +neighbor fc00::72 timers 3 10 +neighbor fc00::72 timers connect 10 +neighbor fc00::76 remote-as 64600 +neighbor fc00::76 peer-group PEER_V6 +neighbor fc00::76 description ARISTA02T1 +neighbor fc00::76 timers 3 10 +neighbor fc00::76 timers connect 10 +neighbor fc00::7a remote-as 64600 +neighbor fc00::7a peer-group PEER_V6 +neighbor fc00::7a description ARISTA03T1 +neighbor fc00::7a timers 3 10 +neighbor fc00::7a timers connect 10 +neighbor fc00::7e remote-as 64600 +neighbor fc00::7e peer-group PEER_V6 +neighbor fc00::7e description ARISTA04T1 +neighbor fc00::7e timers 3 10 +neighbor fc00::7e timers connect 10 +bgp listen range 10.255.0.0/25 peer-group BGPSLBPassive +bgp listen range 192.168.0.0/21 peer-group BGPVac + +""" + +show_run_bgp_masic = \ +""" +------------Showing running config bgp on asic0------------ +neighbor 10.0.0.1 remote-as 65200 +neighbor 10.0.0.1 peer-group TIER2_V4 +neighbor 10.0.0.1 description ARISTA01T2 +neighbor 10.0.0.5 remote-as 65200 +neighbor 10.0.0.5 peer-group TIER2_V4 +neighbor 10.0.0.5 description ARISTA03T2 +neighbor fc00::2 remote-as 65200 +neighbor fc00::2 peer-group TIER2_V6 +neighbor fc00::2 description ARISTA01T2 +neighbor fc00::6 remote-as 65200 +neighbor fc00::6 peer-group TIER2_V6 +neighbor fc00::6 description ARISTA03T2 + +------------Showing running config bgp on asic1------------ +neighbor 10.0.0.9 remote-as 65200 +neighbor 10.0.0.9 peer-group TIER2_V4 +neighbor 10.0.0.9 description ARISTA05T2 +neighbor 10.0.0.13 remote-as 65200 +neighbor 10.0.0.13 peer-group TIER2_V4 +neighbor 10.0.0.13 description ARISTA07T2 +neighbor fc00::a remote-as 65200 +neighbor fc00::a peer-group TIER2_V6 +neighbor fc00::a description ARISTA05T2 +neighbor fc00::e remote-as 65200 +neighbor fc00::e peer-group TIER2_V6 +neighbor fc00::e description ARISTA07T2 + +""" + +show_run_bgp_masic_asic0 = \ +""" +------------Showing running config bgp on asic0------------ +neighbor 10.0.0.1 remote-as 65200 +neighbor 10.0.0.1 peer-group TIER2_V4 +neighbor 10.0.0.1 description ARISTA01T2 +neighbor 10.0.0.5 remote-as 65200 +neighbor 10.0.0.5 peer-group TIER2_V4 +neighbor 10.0.0.5 description ARISTA03T2 +neighbor fc00::2 remote-as 65200 +neighbor fc00::2 peer-group TIER2_V6 +neighbor fc00::2 description ARISTA01T2 +neighbor fc00::6 remote-as 65200 +neighbor fc00::6 peer-group TIER2_V6 +neighbor fc00::6 description ARISTA03T2 + +""" + +show_run_bgp_not_running = \ +""" +------------Showing running config bgp on asic0------------ +Error response from daemon: Container 70e3d3bafd1ab5faf796892acff3e2ccbea3dcd5dcfefcc34f25f7cc916b67bb is not running + +""" + +class TestShowRunBgpSingleAsic(object): + @classmethod + def setup_class(cls): + print("SETUP") + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + from .mock_tables import dbconnector + dbconnector.load_namespace_config() + + @pytest.mark.parametrize('setup_single_bgp_instance', + [ + 'show_run_bgp', + ], + indirect=['setup_single_bgp_instance']) + + def test_show_run_bgp_single(self, + setup_single_bgp_instance): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["bgp"], []) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_run_bgp_sasic + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + from .mock_tables import dbconnector + dbconnector.load_database_config() + + +class TestShowRunBgpMultiAsic(object): + @classmethod + def setup_class(cls): + print("SETUP") + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + from .mock_tables import dbconnector + dbconnector.load_namespace_config() + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + [ + 'show_run_bgp', + ], + indirect=['setup_multi_asic_bgp_instance']) + def test_show_run_bgp_all_asics(self, + setup_multi_asic_bgp_instance): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["bgp"], []) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_run_bgp_masic + + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + [ + 'show_run_bgp', + ], + indirect=['setup_multi_asic_bgp_instance']) + def test_show_run_bgp_asic0(self, + setup_multi_asic_bgp_instance): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["bgp"], ["-nasic0"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_run_bgp_masic_asic0 + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + [ + 'show_not_running_bgp', + ], + indirect=['setup_multi_asic_bgp_instance']) + def test_bgp0_not_running(self, + setup_multi_asic_bgp_instance): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["bgp"], ["-nasic0"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_run_bgp_not_running + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + from .mock_tables import dbconnector + dbconnector.load_database_config From 79ffd9fde54aa8247714ef8734a22c3ece018cac Mon Sep 17 00:00:00 2001 From: longhuan-cisco <84595962+longhuan-cisco@users.noreply.github.com> Date: Wed, 1 Feb 2023 11:12:41 -0800 Subject: [PATCH 05/66] Add Transceiver PM basic CLI support to show output from TRANSCEIVER_PM table for ZR (#2615) * Transceiver PM basic CLI support to show output from TRANSCEIVER_PM table * Fix alert typo * Fix display format and add cd short link * Add doc for pm * Update Command-Reference.md --- doc/Command-Reference.md | 26 +++++- scripts/sfpshow | 129 ++++++++++++++++++++++++++++ show/interfaces/__init__.py | 23 +++++ tests/mock_tables/state_db.json | 146 ++++++++++++++++++++++++++++++++ tests/sfp_test.py | 57 +++++++++++-- 5 files changed, 373 insertions(+), 8 deletions(-) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index f2a4ada15c..6882e48e65 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -927,7 +927,7 @@ This command displays information for all the interfaces for the transceiver req - Usage: ``` - show interfaces transceiver (eeprom [-d|--dom] | lpmode | presence | error-status [-hw|--fetch-from-hardware]) [] + show interfaces transceiver (eeprom [-d|--dom] | lpmode | presence | error-status [-hw|--fetch-from-hardware] | pm) [] ``` - Example (Decode and display information stored on the EEPROM of SFP transceiver connected to Ethernet0): @@ -990,6 +990,30 @@ This command displays information for all the interfaces for the transceiver req Ethernet100 OK ``` +- Example (Display performance monitoring info of SFP transceiver connected to Ethernet100): + ``` + admin@sonic:~$ show interfaces transceiver pm Ethernet100 + Ethernet100: + Parameter Unit Min Avg Max Threshold Threshold Threshold Threshold Threshold Threshold + High High Crossing Low Low Crossing + Alarm Warning Alert-High Alarm Warning Alert-Low + --------------- ------ -------- -------- -------- ----------- ----------- ------------ ----------- ----------- ----------- + Tx Power dBm -8.22 -8.23 -8.24 -5.0 -6.0 False -16.99 -16.003 False + Rx Total Power dBm -10.61 -10.62 -10.62 2.0 0.0 False -21.0 -18.0 False + Rx Signal Power dBm -40.0 0.0 40.0 13.0 10.0 True -18.0 -15.0 True + CD-short link ps/nm 0.0 0.0 0.0 1000.0 500.0 False -1000.0 -500.0 False + PDL dB 0.5 0.6 0.6 4.0 4.0 False 0.0 0.0 False + OSNR dB 36.5 36.5 36.5 99.0 99.0 False 0.0 0.0 False + eSNR dB 30.5 30.5 30.5 99.0 99.0 False 0.0 0.0 False + CFO MHz 54.0 70.0 121.0 3800.0 3800.0 False -3800.0 -3800.0 False + DGD ps 5.37 5.56 5.81 7.0 7.0 False 0.0 0.0 False + SOPMD ps^2 0.0 0.0 0.0 655.35 655.35 False 0.0 0.0 False + SOP ROC krad/s 1.0 1.0 2.0 N/A N/A N/A N/A N/A N/A + Pre-FEC BER N/A 4.58E-04 4.66E-04 5.76E-04 1.25E-02 1.10E-02 0.0 0.0 0.0 0.0 + Post-FEC BER N/A 0.0 0.0 0.0 1000.0 1.0 False 0.0 0.0 False + EVM % 100.0 100.0 100.0 N/A N/A N/A N/A N/A N/A + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#basic-show-commands) ## AAA & TACACS+ diff --git a/scripts/sfpshow b/scripts/sfpshow index 0787688903..7b3c0caca0 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -202,6 +202,36 @@ QSFP_DD_DOM_VALUE_UNIT_MAP = { 'voltage': 'Volts' } +ZR_PM_HEADER = ['Parameter', 'Unit', 'Min', 'Avg', 'Max', + 'Threshold\nHigh\nAlarm', 'Threshold\nHigh\nWarning', + 'Threshold\nCrossing\nAlert-High', + 'Threshold\nLow\nAlarm', 'Threshold\nLow\nWarning', + 'Threshold\nCrossing\nAlert-Low'] + +ZR_PM_VALUE_KEY_SUFFIXS = ['min', 'avg', 'max'] + +ZR_PM_THRESHOLD_KEY_SUFFIXS = ['highalarm', + 'highwarning', 'lowalarm', 'lowwarning'] + +# mapping from parameter_name to [unit, parameter_key_prefix] +ZR_PM_INFO_MAP = { + 'Tx Power': ['dBm', 'tx_power'], + 'Rx Total Power': ['dBm', 'rx_tot_power'], + 'Rx Signal Power': ['dBm', 'rx_sig_power'], + 'CD-short link': ['ps/nm', 'cd'], + 'PDL': ['dB', 'pdl'], + 'OSNR': ['dB', 'osnr'], + 'eSNR': ['dB', 'esnr'], + 'CFO': ['MHz', 'cfo'], + 'DGD': ['ps', 'dgd'], + 'SOPMD': ['ps^2', 'sopmd'], + 'SOP ROC': ['krad/s', 'soproc'], + 'Pre-FEC BER': ['N/A', 'prefec_ber'], + 'Post-FEC BER': ['N/A', 'uncorr_frames'], + 'EVM': ['%', 'evm'] +} + +ZR_PM_NOT_APPLICABLE_STR = 'Transceiver performance monitoring not applicable' def display_invalid_intf_eeprom(intf_name): output = intf_name + ': SFP EEPROM Not detected\n' @@ -215,6 +245,10 @@ def display_invalid_intf_presence(intf_name): click.echo(tabulate(port_table, header)) +def display_invalid_intf_pm(intf_name): + output = intf_name + ': %s\n' % ZR_PM_NOT_APPLICABLE_STR + click.echo(output) + class SFPShow(object): def __init__(self, intf_name, namespace_option, dump_dom=False): super(SFPShow, self).__init__() @@ -223,6 +257,7 @@ class SFPShow(object): self.dump_dom = dump_dom self.table = [] self.intf_eeprom: Dict[str, str] = {} + self.intf_pm: Dict[str, str] = {} self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace_option) # Convert dict values to cli output string @@ -402,6 +437,66 @@ class SFPShow(object): return output + def convert_pm_prefix_to_threshold_prefix(self, pm_prefix): + if pm_prefix == 'uncorr_frames': + return 'postfecber' + elif pm_prefix == 'cd': + return 'cdshort' + else: + return pm_prefix.replace('_', '') + + def beautify_pm_field(self, prefix, field): + if field is None: + return 'N/A' + elif prefix in {'prefec_ber'}: + return "{:.2E}".format(field) if field != 0 else '0.0' + else: + return str(field) + + def convert_interface_sfp_pm_to_cli_output_string(self, state_db, interface_name): + sfp_pm_dict = state_db.get_all( + self.db.STATE_DB, 'TRANSCEIVER_PM|{}'.format(interface_name)) + sfp_threshold_dict = state_db.get_all( + state_db.STATE_DB, 'TRANSCEIVER_DOM_THRESHOLD|{}'.format(interface_name)) + table = [] + indent_num = 4 + indent = ' ' * indent_num + if sfp_pm_dict: + output = '\n' + indent + for param_name, (unit, prefix) in ZR_PM_INFO_MAP.items(): + row = [param_name, unit] + values = [] + for suffix in ZR_PM_VALUE_KEY_SUFFIXS: + key = prefix + '_' + suffix + values.append( + float(sfp_pm_dict[key]) if key in sfp_pm_dict else None) + + thresholds = [] + for suffix in ZR_PM_THRESHOLD_KEY_SUFFIXS: + key = self.convert_pm_prefix_to_threshold_prefix( + prefix) + suffix + thresholds.append( + float(sfp_threshold_dict[key]) if key in sfp_threshold_dict else None) + + tca_high, tca_low = None, None + if values[2] is not None and thresholds[0] is not None: + # TCA-High: max > high_alarm + tca_high = values[2] > thresholds[0] + if values[0] is not None and thresholds[2] is not None: + # TCA-low: min < low_alarm + tca_low = values[0] < thresholds[2] + + for field in values + thresholds[:2] + [tca_high] + thresholds[2:] + [tca_low]: + row.append(self.beautify_pm_field(prefix, field)) + table.append(row) + + output += tabulate(table, + ZR_PM_HEADER, disable_numparse=True).replace('\n', '\n' + indent) + output += '\n' + else: + output = ZR_PM_NOT_APPLICABLE_STR + '\n' + return output + @multi_asic_util.run_on_multi_asic def get_eeprom(self): if self.intf_name is not None: @@ -441,6 +536,19 @@ class SFPShow(object): self.table += port_table + @multi_asic_util.run_on_multi_asic + def get_pm(self): + if self.intf_name is not None: + self.intf_pm[self.intf_name] = self.convert_interface_sfp_pm_to_cli_output_string( + self.db, self.intf_name) + else: + port_table_keys = self.db.keys(self.db.APPL_DB, "PORT_TABLE:*") + for i in port_table_keys: + interface = re.split(':', i, maxsplit=1)[-1].strip() + if interface and interface.startswith(front_panel_prefix()) and not interface.startswith((backplane_prefix(), inband_prefix(), recirc_prefix())): + self.intf_pm[interface] = self.convert_interface_sfp_pm_to_cli_output_string( + self.db, interface) + def display_eeprom(self): click.echo("\n".join([f"{k}: {v}" for k, v in natsorted(self.intf_eeprom.items())])) @@ -449,6 +557,9 @@ class SFPShow(object): sorted_port_table = natsorted(self.table) click.echo(tabulate(sorted_port_table, header)) + def display_pm(self): + click.echo( + "\n".join([f"{k}: {v}" for k, v in natsorted(self.intf_pm.items())])) # This is our main entrypoint - the main 'sfpshow' command @@ -494,6 +605,24 @@ def presence(port, namespace): sfp.get_presence() sfp.display_presence() +# 'pm' subcommand + + +@cli.command() +@click.option('-p', '--port', metavar='', help="Display SFP PM for port only") +@click.option('-n', '--namespace', default=None, help="Display interfaces for specific namespace") +def pm(port, namespace): + if port and multi_asic.is_multi_asic() and namespace is None: + try: + namespace = multi_asic.get_namespace_for_port(port) + except Exception: + display_invalid_intf_pm(port) + sys.exit(1) + + sfp = SFPShow(port, namespace) + sfp.get_pm() + sfp.display_pm() + if __name__ == "__main__": cli() diff --git a/show/interfaces/__init__.py b/show/interfaces/__init__.py index 3e82a68e66..0b172d6982 100644 --- a/show/interfaces/__init__.py +++ b/show/interfaces/__init__.py @@ -446,6 +446,29 @@ def eeprom(interfacename, dump_dom, namespace, verbose): clicommon.run_command(cmd, display_cmd=verbose) +@transceiver.command() +@click.argument('interfacename', required=False) +@click.option('--namespace', '-n', 'namespace', default=None, show_default=True, + type=click.Choice(multi_asic_util.multi_asic_ns_choices()), help='Namespace name or all') +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def pm(interfacename, namespace, verbose): + """Show interface transceiver performance monitoring information""" + + ctx = click.get_current_context() + + cmd = "sfpshow pm" + + if interfacename is not None: + interfacename = try_convert_interfacename_from_alias( + ctx, interfacename) + + cmd += " -p {}".format(interfacename) + + if namespace is not None: + cmd += " -n {}".format(namespace) + + clicommon.run_command(cmd, display_cmd=verbose) + @transceiver.command() @click.argument('interfacename', required=False) @click.option('--verbose', is_flag=True, help="Enable verbose output") diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 12552997b9..9fda6fa50f 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -227,6 +227,152 @@ "nominal_bit_rate": "Not supported for CMIS cables", "application_advertisement": "{1: {'host_electrical_interface_id': '400G CR8', 'module_media_interface_id': 'Copper cable', 'media_lane_count': 8, 'host_lane_count': 8, 'host_lane_assignment_options': 1, 'media_lane_assignment_options': 2}, 2: {'host_electrical_interface_id': '200GBASE-CR4 (Clause 136)'}}" }, + "TRANSCEIVER_DOM_THRESHOLD|Ethernet44":{ + "temphighalarm": "80.0", + "templowalarm": "-5.0", + "temphighwarning": "75.0", + "templowwarning": "0.0", + "vcchighalarm": "3.45", + "vcclowalarm": "3.1", + "vcchighwarning": "3.4", + "vcclowwarning": "3.15", + "rxpowerhighalarm": "2.0", + "rxpowerlowalarm": "-21.024", + "rxpowerhighwarning": "0.0", + "rxpowerlowwarning": "-18.013", + "txpowerhighalarm": "-5.0", + "txpowerlowalarm": "-16.99", + "txpowerhighwarning": "-6.0", + "txpowerlowwarning": "-16.003", + "txbiashighalarm": "450.0", + "txbiaslowalarm": "100.0", + "txbiashighwarning": "420.0", + "txbiaslowwarning": "110.0", + "lasertemphighalarm": "80.0", + "lasertemplowalarm": "-5.0", + "lasertemphighwarning": "75.0", + "lasertemplowwarning": "0.0", + "prefecberhighalarm": "0.0125", + "prefecberlowalarm": "0.0", + "prefecberhighwarning": "0.011000000000000001", + "prefecberlowwarning": "0.0", + "postfecberhighalarm": "1000", + "postfecberlowalarm": "0.0", + "postfecberhighwarning": "1.0", + "postfecberlowwarning": "0.0", + "biasxihighalarm": "99.00053406576639", + "biasxilowalarm": "0.9994659342336156", + "biasxihighwarning": "94.99961852445259", + "biasxilowwarning": "5.000381475547417", + "biasxqhighalarm": "99.00053406576639", + "biasxqlowalarm": "0.9994659342336156", + "biasxqhighwarning": "94.99961852445259", + "biasxqlowwarning": "5.000381475547417", + "biasxphighalarm": "99.00053406576639", + "biasxplowalarm": "0.9994659342336156", + "biasxphighwarning": "94.99961852445259", + "biasxplowwarning": "5.000381475547417", + "biasyihighalarm": "99.00053406576639", + "biasyilowalarm": "0.9994659342336156", + "biasyihighwarning": "94.99961852445259", + "biasyilowwarning": "5.000381475547417", + "biasyqhighalarm": "99.00053406576639", + "biasyqlowalarm": "0.9994659342336156", + "biasyqhighwarning": "94.99961852445259", + "biasyqlowwarning": "5.000381475547417", + "biasyphighalarm": "99.00053406576639", + "biasyplowalarm": "0.9994659342336156", + "biasyphighwarning": "94.99961852445259", + "biasyplowwarning": "5.000381475547417", + "cdshorthighalarm": "1000", + "cdshortlowalarm": "-1000", + "cdshorthighwarning": "500", + "cdshortlowwarning": "-500", + "cdlonghighalarm": "400000", + "cdlonglowalarm": "-400000", + "cdlonghighwarning": "200000", + "cdlonglowwarning": "-200000", + "dgdhighalarm": "7.0", + "dgdlowalarm": "0.0", + "dgdhighwarning": "7.0", + "dgdlowwarning": "0.0", + "sopmdhighalarm": "655.35", + "sopmdlowalarm": "0.0", + "sopmdhighwarning": "655.35", + "sopmdlowwarning": "0.0", + "pdlhighalarm": "4.0", + "pdllowalarm": "0.0", + "pdlhighwarning": "4.0", + "pdllowwarning": "0.0", + "osnrhighalarm": "99.0", + "osnrlowalarm": "0.0", + "osnrhighwarning": "99.0", + "osnrlowwarning": "0.0", + "esnrhighalarm": "99.0", + "esnrlowalarm": "0.0", + "esnrhighwarning": "99.0", + "esnrlowwarning": "0.0", + "cfohighalarm": "3800", + "cfolowalarm": "-3800", + "cfohighwarning": "3800", + "cfolowwarning": "-3800", + "txcurrpowerhighalarm": "-5.0", + "txcurrpowerlowalarm": "-17.0", + "txcurrpowerhighwarning": "-6.0", + "txcurrpowerlowwarning": "-16.0", + "rxtotpowerhighalarm": "2.0", + "rxtotpowerlowalarm": "-21.0", + "rxtotpowerhighwarning": "0.0", + "rxtotpowerlowwarning": "-18.0", + "rxsigpowerhighalarm": "13.0", + "rxsigpowerlowalarm": "-18.0", + "rxsigpowerhighwarning": "10.0", + "rxsigpowerlowwarning": "-15.0" + }, + "TRANSCEIVER_PM|Ethernet44":{ + "prefec_ber_avg": "0.00046578129838019075", + "prefec_ber_min": "0.00045750117895600233", + "prefec_ber_max": "0.000575639239547097", + "uncorr_frames_avg": "0.0", + "uncorr_frames_min": "0.0", + "uncorr_frames_max": "0.0", + "cd_avg": "0", + "cd_min": "0", + "cd_max": "0", + "dgd_avg": "5.56", + "dgd_min": "5.37", + "dgd_max": "5.81", + "sopmd_avg": "0.0", + "sopmd_min": "0.0", + "sopmd_max": "0.0", + "pdl_avg": "0.6", + "pdl_min": "0.5", + "pdl_max": "0.6", + "osnr_avg": "36.5", + "osnr_min": "36.5", + "osnr_max": "36.5", + "esnr_avg": "30.5", + "esnr_min": "30.5", + "esnr_max": "30.5", + "cfo_avg": "70", + "cfo_min": "54", + "cfo_max": "121", + "evm_avg": "100.0", + "evm_min": "100.0", + "evm_max": "100.0", + "soproc_avg": "1", + "soproc_min": "1", + "soproc_max": "2", + "tx_power_avg": "-8.23", + "tx_power_min": "-8.22", + "tx_power_max": "-8.24", + "rx_tot_power_avg": "-10.62", + "rx_tot_power_min": "-10.61", + "rx_tot_power_max": "-10.62", + "rx_sig_power_avg": "0", + "rx_sig_power_min": "-40", + "rx_sig_power_max": "40" + }, "TRANSCEIVER_STATUS|Ethernet0": { "status": "67", "error": "Blocking Error|High temperature" diff --git a/tests/sfp_test.py b/tests/sfp_test.py index 6d5d9fa7af..5e2c74265a 100644 --- a/tests/sfp_test.py +++ b/tests/sfp_test.py @@ -193,6 +193,28 @@ Vendor SN: INKAO2900002A """ +test_qsfp_dd_pm_output = """\ +Ethernet44: + Parameter Unit Min Avg Max Threshold Threshold Threshold Threshold Threshold Threshold + High High Crossing Low Low Crossing + Alarm Warning Alert-High Alarm Warning Alert-Low + --------------- ------ -------- -------- -------- ----------- ----------- ------------ ----------- ----------- ----------- + Tx Power dBm -8.22 -8.23 -8.24 -5.0 -6.0 False -16.99 -16.003 False + Rx Total Power dBm -10.61 -10.62 -10.62 2.0 0.0 False -21.0 -18.0 False + Rx Signal Power dBm -40.0 0.0 40.0 13.0 10.0 True -18.0 -15.0 True + CD-short link ps/nm 0.0 0.0 0.0 1000.0 500.0 False -1000.0 -500.0 False + PDL dB 0.5 0.6 0.6 4.0 4.0 False 0.0 0.0 False + OSNR dB 36.5 36.5 36.5 99.0 99.0 False 0.0 0.0 False + eSNR dB 30.5 30.5 30.5 99.0 99.0 False 0.0 0.0 False + CFO MHz 54.0 70.0 121.0 3800.0 3800.0 False -3800.0 -3800.0 False + DGD ps 5.37 5.56 5.81 7.0 7.0 False 0.0 0.0 False + SOPMD ps^2 0.0 0.0 0.0 655.35 655.35 False 0.0 0.0 False + SOP ROC krad/s 1.0 1.0 2.0 N/A N/A N/A N/A N/A N/A + Pre-FEC BER N/A 4.58E-04 4.66E-04 5.76E-04 1.25E-02 1.10E-02 0.0 0.0 0.0 0.0 + Post-FEC BER N/A 0.0 0.0 0.0 1000.0 1.0 False 0.0 0.0 False + EVM % 100.0 100.0 100.0 N/A N/A N/A N/A N/A N/A +""" + test_sfp_eeprom_dom_all_output = """\ Ethernet0: SFP EEPROM detected Application Advertisement: N/A @@ -341,6 +363,14 @@ Ethernet64 Present """ +test_qsfp_dd_pm_all_output = """\ +Ethernet0: Transceiver performance monitoring not applicable + +Ethernet4: Transceiver performance monitoring not applicable + +Ethernet64: Transceiver performance monitoring not applicable +""" + class TestSFP(object): @classmethod def setup_class(cls): @@ -441,6 +471,17 @@ def test_rj45_eeprom(self): expected = "Ethernet36: SFP EEPROM is not applicable for RJ45 port" assert result_lines == expected + def test_qsfp_dd_pm(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["pm"], ["Ethernet44"]) + assert result.exit_code == 0 + assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_qsfp_dd_pm_output + + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["pm"], ["Ethernet200"]) + result_lines = result.output.strip('\n') + expected = "Ethernet200: Transceiver performance monitoring not applicable" + assert result_lines == expected + @classmethod def teardown_class(cls): print("TEARDOWN") @@ -497,15 +538,11 @@ def test_sfp_eeprom_with_ns(self): expected = "Ethernet200: SFP EEPROM Not detected" assert result_lines == expected - def test_sfp_eeprom_with_ns(self): + def test_qsfp_dd_pm_with_ns(self): runner = CliRunner() - result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["Ethernet0 -n asic0"]) - assert result.exit_code == 0 - assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_sfp_eeprom_output - - result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["Ethernet200 -n asic0"]) + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["pm"], ["Ethernet0 -n asic0"]) result_lines = result.output.strip('\n') - expected = "Ethernet200: SFP EEPROM Not detected" + expected = "Ethernet0: Transceiver performance monitoring not applicable" assert result_lines == expected def test_sfp_eeprom_all(self): @@ -527,6 +564,12 @@ def test_is_rj45_port(self): sys.modules.pop('sonic_platform') assert platform_sfputil_helper.is_rj45_port("Ethernet0") == False + def test_qsfp_dd_pm_all(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["pm"]) + assert result.exit_code == 0 + assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_qsfp_dd_pm_all_output + @classmethod def teardown_class(cls): print("TEARDOWN") From 5d23934f93c9795d3f2154da83b9ed2579029d71 Mon Sep 17 00:00:00 2001 From: jfeng-arista <98421150+jfeng-arista@users.noreply.github.com> Date: Wed, 1 Feb 2023 11:29:49 -0800 Subject: [PATCH 06/66] [chassis][voq] Add asic id for linecards so "show fabric counters queue/port" can work. (#2499) * Add asic id for linecards so "show fabric counters queue/port" can work. * Add test coverage --------- Signed-off-by: Jie Feng --- scripts/fabricstat | 20 +++-- tests/fabricstat_test.py | 30 +++++++ tests/mock_tables/counters_db.json | 140 +++++++++++++++++++++++++++++ tests/mock_tables/state_db.json | 34 +++++++ 4 files changed, 218 insertions(+), 6 deletions(-) diff --git a/scripts/fabricstat b/scripts/fabricstat index e5c7d09f3b..fcc0983ade 100755 --- a/scripts/fabricstat +++ b/scripts/fabricstat @@ -14,7 +14,7 @@ import utilities_common.multi_asic as multi_asic_util # mock the redis for unit test purposes # try: - if os.environ["UTILITIES_UNIT_TESTING"] == "2": + if os.environ["UTILITIES_UNIT_TESTING"] == "1" or os.environ["UTILITIES_UNIT_TESTING"] == "2": modules_path = os.path.join(os.path.dirname(__file__), "..") tests_path = os.path.join(modules_path, "tests") sys.path.insert(0, modules_path) @@ -122,17 +122,21 @@ class FabricPortStat(FabricStat): table = [] header = None - asic = multi_asic.get_asic_id_from_name(self.namespace) + # Default ASIC name is 0 for single-ASIC systems. For multi-ASIC systems, + # derive name from namespace. + asic_name = '0' + if self.namespace: + asic_name = multi_asic.get_asic_id_from_name(self.namespace) for key, data in cnstat_dict.items(): port_id = key[len(PORT_NAME_PREFIX):] if errors_only: header = portstat_header_errors_only - table.append((asic, port_id, self.get_port_state(key), + table.append((asic_name, port_id, self.get_port_state(key), data.crc, data.fec_correctable, data.fec_uncorrectable, data.symbol_err)) else: header = portstat_header_all - table.append((asic, port_id, self.get_port_state(key), + table.append((asic_name, port_id, self.get_port_state(key), data.in_cell, data.in_octet, data.out_cell, data.out_octet, data.crc, data.fec_correctable, data.fec_uncorrectable, data.symbol_err)) @@ -168,11 +172,15 @@ class FabricQueueStat(FabricStat): return table = [] - asic = multi_asic.get_asic_id_from_name(self.namespace) + # Default ASIC name is 0 for single-ASIC systems. For multi-ASIC systems, + # derive name from namespace. + asic_name = '0' + if self.namespace: + asic_name = multi_asic.get_asic_id_from_name(self.namespace) for key, data in cnstat_dict.items(): port_name, queue_id = key.split(':') port_id = port_name[len(PORT_NAME_PREFIX):] - table.append((asic, port_id, self.get_port_state(port_name), queue_id, + table.append((asic_name, port_id, self.get_port_state(port_name), queue_id, data.curbyte, data.curlevel, data.watermarklevel)) print(tabulate(table, queuestat_header, tablefmt='simple', stralign='right')) diff --git a/tests/fabricstat_test.py b/tests/fabricstat_test.py index fb76bb41d7..7c2174b761 100644 --- a/tests/fabricstat_test.py +++ b/tests/fabricstat_test.py @@ -120,6 +120,36 @@ 7 0 93 up """ +class TestFabricStat(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + def test_single_show_fabric_counters(self): + from .mock_tables import mock_single_asic + import importlib + importlib.reload(mock_single_asic) + from .mock_tables import dbconnector + dbconnector.load_database_config + dbconnector.load_namespace_config() + + return_code, result = get_result_and_return_code('fabricstat') + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == multi_asic_fabric_counters_asic0 + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join( + os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + + class TestMultiAsicFabricStat(object): @classmethod def setup_class(cls): diff --git a/tests/mock_tables/counters_db.json b/tests/mock_tables/counters_db.json index 03b29cdded..f2caba2449 100644 --- a/tests/mock_tables/counters_db.json +++ b/tests/mock_tables/counters_db.json @@ -1626,6 +1626,146 @@ "oid:0x1500000000067d": "SAI_QUEUE_TYPE_UNICAST_VOQ", "oid:0x1500000000067e": "SAI_QUEUE_TYPE_UNICAST_VOQ" }, + "COUNTERS_FABRIC_PORT_NAME_MAP" : { + "PORT0": "oid:0x1000000000143", + "PORT1": "oid:0x1000000000144", + "PORT2": "oid:0x1000000000145", + "PORT3": "oid:0x1000000000146", + "PORT4": "oid:0x1000000000147", + "PORT5": "oid:0x1000000000148", + "PORT6": "oid:0x1000000000149", + "PORT7": "oid:0x100000000014a" + }, + "COUNTERS:oid:0x1000000000143": { + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_OUT_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_OCTETS": "1113", + "SAI_PORT_STAT_IF_IN_ERRORS": "0", + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS": "6", + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "5", + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "1759692040", + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "5" + }, + "COUNTERS:oid:0x1000000000144": { + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_OUT_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_ERRORS": "0", + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "0", + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "58977677898", + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "0" + }, + "COUNTERS:oid:0x1000000000145": { + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_OUT_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_OCTETS": "371", + "SAI_PORT_STAT_IF_IN_ERRORS": "0", + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS": "2", + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "0", + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "1769448760", + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "0" + }, + "COUNTERS:oid:0x1000000000146": { + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_OUT_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_ERRORS": "0", + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "0", + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "58976477608", + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "0" + }, + "COUNTERS:oid:0x1000000000147": { + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_OUT_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_OCTETS": "1855", + "SAI_PORT_STAT_IF_IN_ERRORS": "0", + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS": "10", + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "73", + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "1763293100", + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "73" + }, + "COUNTERS:oid:0x1000000000148": { + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_OUT_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_ERRORS": "0", + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "44196", + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "58975150569", + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "0" + }, + "COUNTERS:oid:0x1000000000149": { + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_OUT_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_OCTETS": "742", + "SAI_PORT_STAT_IF_IN_ERRORS": "0", + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS": "4", + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "10", + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "1763174090", + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "0" + }, + "COUNTERS:oid:0x100000000014a": { + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_OUT_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_OCTETS": "1855", + "SAI_PORT_STAT_IF_IN_ERRORS": "0", + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS": "10", + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "187", + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "1768439529", + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "1331" + }, + "COUNTERS_FABRIC_QUEUE_NAME_MAP" : { + "PORT0:0": "oid:0x15000000000186", + "PORT1:0": "oid:0x15000000000187", + "PORT2:0": "oid:0x15000000000188", + "PORT3:0": "oid:0x15000000000189", + "PORT4:0": "oid:0x1500000000018a", + "PORT5:0": "oid:0x1500000000018b", + "PORT6:0": "oid:0x1500000000018c", + "PORT7:0": "oid:0x1500000000018d" + }, + "COUNTERS:oid:0x15000000000186": { + "SAI_QUEUE_STAT_WATERMARK_LEVEL": "20", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "763", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL": "12" + }, + "COUNTERS:oid:0x15000000000187": { + "SAI_QUEUE_STAT_WATERMARK_LEVEL": "0", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "0", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL": "0" + }, + "COUNTERS:oid:0x15000000000188": { + "SAI_QUEUE_STAT_WATERMARK_LEVEL": "8", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "104", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL": "8" + }, + "COUNTERS:oid:0x15000000000189": { + "SAI_QUEUE_STAT_WATERMARK_LEVEL": "0", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "0", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL": "0" + }, + "COUNTERS:oid:0x1500000000018a": { + "SAI_QUEUE_STAT_WATERMARK_LEVEL": "22", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "1147", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL": "14" + }, + "COUNTERS:oid:0x1500000000018b": { + "SAI_QUEUE_STAT_WATERMARK_LEVEL": "0", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "0", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL": "0" + }, + "COUNTERS:oid:0x1500000000018c": { + "SAI_QUEUE_STAT_WATERMARK_LEVEL": "10", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "527", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL": "8" + }, + "COUNTERS:oid:0x1500000000018d": { + "SAI_QUEUE_STAT_WATERMARK_LEVEL": "17", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "1147", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL": "14" + }, "COUNTERS_DEBUG_NAME_PORT_STAT_MAP": { "DEBUG_0": "SAI_PORT_STAT_IN_DROP_REASON_RANGE_BASE", "DEBUG_2": "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS" diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 9fda6fa50f..d32836aecd 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -1121,5 +1121,39 @@ }, "ADVERTISE_NETWORK_TABLE|fccc:a250:a251::a6:1/128": { "profile": "" + }, + "FABRIC_PORT_TABLE|PORT0" : { + "STATUS": "up", + "REMOTE_MOD": "0", + "REMOTE_PORT": "79" + }, + "FABRIC_PORT_TABLE|PORT1" : { + "STATUS": "down" + }, + "FABRIC_PORT_TABLE|PORT2" : { + "STATUS": "up", + "REMOTE_MOD": "0", + "REMOTE_PORT": "94" + }, + "FABRIC_PORT_TABLE|PORT3" : { + "STATUS": "down" + }, + "FABRIC_PORT_TABLE|PORT4" : { + "STATUS": "up", + "REMOTE_MOD": "0", + "REMOTE_PORT": "85" + }, + "FABRIC_PORT_TABLE|PORT5" : { + "STATUS": "down" + }, + "FABRIC_PORT_TABLE|PORT6" : { + "STATUS": "up", + "REMOTE_MOD": "0", + "REMOTE_PORT": "84" + }, + "FABRIC_PORT_TABLE|PORT7" : { + "STATUS": "up", + "REMOTE_MOD": "0", + "REMOTE_PORT": "93" } } From c57c3fadc493d219027c91ff7c26e3d810ef3693 Mon Sep 17 00:00:00 2001 From: mihirpat1 <112018033+mihirpat1@users.noreply.github.com> Date: Wed, 1 Feb 2023 13:48:57 -0800 Subject: [PATCH 07/66] show logging CLI support for logs stored in tmpfs (#2641) * show logging CLI support for logs stored in tmpfs Signed-off-by: Mihir Patel * Fixed testcase failures * Reverted unwanted change in a file * Added testcase for syslog.1 in log.tmpfs directory * mend --------- Signed-off-by: Mihir Patel --- show/main.py | 12 +++++--- tests/show_test.py | 69 +++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 76 insertions(+), 5 deletions(-) diff --git a/show/main.py b/show/main.py index e8c607facc..117958f83a 100755 --- a/show/main.py +++ b/show/main.py @@ -1257,14 +1257,18 @@ def table(verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def logging(process, lines, follow, verbose): """Show system log""" + if os.path.exists("/var/log.tmpfs"): + log_path = "/var/log.tmpfs" + else: + log_path = "/var/log" if follow: - cmd = "sudo tail -F /var/log/syslog" + cmd = "sudo tail -F {}/syslog".format(log_path) run_command(cmd, display_cmd=verbose) else: - if os.path.isfile("/var/log/syslog.1"): - cmd = "sudo cat /var/log/syslog.1 /var/log/syslog" + if os.path.isfile("{}/syslog.1".format(log_path)): + cmd = "sudo cat {}/syslog.1 {}/syslog".format(log_path, log_path) else: - cmd = "sudo cat /var/log/syslog" + cmd = "sudo cat {}/syslog".format(log_path) if process is not None: cmd += " | grep '{}'".format(process) diff --git a/tests/show_test.py b/tests/show_test.py index 87c1b5a17e..114dbc3c6c 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -1,9 +1,12 @@ import os import sys +import pytest import show.main as show from click.testing import CliRunner from unittest import mock -from unittest.mock import call, MagicMock +from unittest.mock import call, MagicMock, patch + +EXPECTED_BASE_COMMAND = 'sudo ' test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -49,3 +52,67 @@ def teardown_class(cls): print("TEARDOWN") os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) os.environ["UTILITIES_UNIT_TESTING"] = "0" + +@patch('show.main.run_command') +@pytest.mark.parametrize( + "cli_arguments,expected", + [ + ([], 'cat /var/log/syslog'), + (['xcvrd'], "cat /var/log/syslog | grep 'xcvrd'"), + (['-l', '10'], 'cat /var/log/syslog | tail -10'), + (['-f'], 'tail -F /var/log/syslog'), + ] +) +def test_show_logging_default(run_command, cli_arguments, expected): + runner = CliRunner() + result = runner.invoke(show.cli.commands["logging"], cli_arguments) + run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected, display_cmd=False) + +@patch('show.main.run_command') +@patch('os.path.isfile', MagicMock(return_value=True)) +@pytest.mark.parametrize( + "cli_arguments,expected", + [ + ([], 'cat /var/log/syslog.1 /var/log/syslog'), + (['xcvrd'], "cat /var/log/syslog.1 /var/log/syslog | grep 'xcvrd'"), + (['-l', '10'], 'cat /var/log/syslog.1 /var/log/syslog | tail -10'), + (['-f'], 'tail -F /var/log/syslog'), + ] +) +def test_show_logging_syslog_1(run_command, cli_arguments, expected): + runner = CliRunner() + result = runner.invoke(show.cli.commands["logging"], cli_arguments) + run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected, display_cmd=False) + +@patch('show.main.run_command') +@patch('os.path.exists', MagicMock(return_value=True)) +@pytest.mark.parametrize( + "cli_arguments,expected", + [ + ([], 'cat /var/log.tmpfs/syslog'), + (['xcvrd'], "cat /var/log.tmpfs/syslog | grep 'xcvrd'"), + (['-l', '10'], 'cat /var/log.tmpfs/syslog | tail -10'), + (['-f'], 'tail -F /var/log.tmpfs/syslog'), + ] +) +def test_show_logging_tmpfs(run_command, cli_arguments, expected): + runner = CliRunner() + result = runner.invoke(show.cli.commands["logging"], cli_arguments) + run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected, display_cmd=False) + +@patch('show.main.run_command') +@patch('os.path.isfile', MagicMock(return_value=True)) +@patch('os.path.exists', MagicMock(return_value=True)) +@pytest.mark.parametrize( + "cli_arguments,expected", + [ + ([], 'cat /var/log.tmpfs/syslog.1 /var/log.tmpfs/syslog'), + (['xcvrd'], "cat /var/log.tmpfs/syslog.1 /var/log.tmpfs/syslog | grep 'xcvrd'"), + (['-l', '10'], 'cat /var/log.tmpfs/syslog.1 /var/log.tmpfs/syslog | tail -10'), + (['-f'], 'tail -F /var/log.tmpfs/syslog'), + ] +) +def test_show_logging_tmpfs_syslog_1(run_command, cli_arguments, expected): + runner = CliRunner() + result = runner.invoke(show.cli.commands["logging"], cli_arguments) + run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected, display_cmd=False) From a2520e60d471ebc4791a50d5bb9ca9691bd5c456 Mon Sep 17 00:00:00 2001 From: siqbal1986 Date: Mon, 6 Feb 2023 12:00:09 -0800 Subject: [PATCH 08/66] Fixed a bug in "show vnet routes all" causing screen overrun. (#2644) Signed-off-by: siqbal1486 --- show/vnet.py | 40 ++++++++++++++++++++++----- tests/mock_tables/appl_db.json | 4 +-- tests/mock_tables/state_db.json | 2 +- tests/show_vnet_test.py | 45 +++++++++++++++++++++++++++++++ tests/show_vnet_vxlan_cli_test.py | 22 ++------------- 5 files changed, 84 insertions(+), 29 deletions(-) diff --git a/show/vnet.py b/show/vnet.py index ba6f81ce8d..239e6d2206 100644 --- a/show/vnet.py +++ b/show/vnet.py @@ -333,6 +333,29 @@ def routes(): """Show vnet routes related information""" pass +def pretty_print(table, r, epval, mac_addr, vni, state): + endpoints = epval.split(',') + row_width = 3 + max_len = 0 + for ep in endpoints: + max_len = len(ep) if len(ep) > max_len else max_len + if max_len > 15: + row_width = 2 + iter = 0 + while iter < len(endpoints): + if iter +row_width > len(endpoints): + r.append(",".join(endpoints[iter:])) + else: + r.append(",".join(endpoints[iter:iter + row_width])) + if iter == 0: + r.append(mac_addr) + r.append(vni) + r.append(state) + else: + r.extend(["", "", ""]) + iter += row_width + table.append(r) + r = ["",""] @routes.command() def all(): @@ -373,12 +396,17 @@ def all(): state_db_key = '|'.join(k.split(":",2)) val = appl_db.get_all(appl_db.APPL_DB, k) val_state = state_db.get_all(state_db.STATE_DB, state_db_key) - r.append(val.get('endpoint')) - r.append(val.get('mac_address')) - r.append(val.get('vni')) - if val_state: - r.append(val_state.get('state')) - table.append(r) + epval = val.get('endpoint') + if len(epval) < 40: + r.append(epval) + r.append(val.get('mac_address')) + r.append(val.get('vni')) + if val_state: + r.append(val_state.get('state')) + table.append(r) + continue + state = val_state.get('state') if val_state else "" + pretty_print(table, r, epval, val.get('mac_address'), val.get('vni'), state ) click.echo(tabulate(table, header)) diff --git a/tests/mock_tables/appl_db.json b/tests/mock_tables/appl_db.json index 8554a07eaf..e330bdaddc 100644 --- a/tests/mock_tables/appl_db.json +++ b/tests/mock_tables/appl_db.json @@ -329,8 +329,8 @@ "endpoint_monitor":"100.251.7.1" }, "VNET_ROUTE_TUNNEL_TABLE:Vnet_v6_in_v6-0:fddd:a156:a251::a6:1/128": { - "endpoint": "fddd:a100:a251::a10:1,fddd:a101:a251::a10:1", - "endpoint_monitor":"fddd:a100:a251::a10:1,fddd:a101:a251::a10:1" + "endpoint": "fddd:a100:a251::a10:1,fddd:a101:a251::a10:1,fddd:a102:a251::a10:1,fddd:a103:a251::a10:1", + "endpoint_monitor":"fddd:a100:a251::a10:1,fddd:a101:a251::a10:1,fddd:a102:a251::a10:1,fddd:a103:a251::a10:1" }, "VNET_ROUTE_TUNNEL_TABLE:test_v4_in_v4-0:160.162.191.1/32": { "endpoint":"100.251.7.1", diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index d32836aecd..8462248a92 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -1086,7 +1086,7 @@ "state":"active" }, "VNET_ROUTE_TUNNEL_TABLE|Vnet_v6_in_v6-0|fddd:a156:a251::a6:1/128": { - "active_endpoints":"fddd:a100:a251::a10:1,fddd:a101:a251::a10:1", + "active_endpoints":"fddd:a100:a251::a10:1,fddd:a101:a251::a10:1,fddd:a102:a251::a10:1,fddd:a103:a251::a10:1", "state":"active" }, "BFD_SESSION_TABLE|default|default|100.251.7.1": { diff --git a/tests/show_vnet_test.py b/tests/show_vnet_test.py index 5317b9b3ff..eff75a583f 100644 --- a/tests/show_vnet_test.py +++ b/tests/show_vnet_test.py @@ -2,6 +2,7 @@ from click.testing import CliRunner from utilities_common.db import Db import show.main as show +import show.vnet as vnet class TestShowVnetRoutesAll(object): @classmethod @@ -9,6 +10,49 @@ def setup_class(cls): print("SETUP") os.environ["UTILITIES_UNIT_TESTING"] = "1" + def test_Preety_print(self): + table =[] + row = ["Vnet_v6_in_v6-0", "fddd:a156:a251::a6:1/128"] + mac_addr = "" + vni = "" + state = "active" + epval = "fddd:a100:a251::a10:1,fddd:a101:a251::a10:1" + + vnet.pretty_print(table, row, epval, mac_addr, vni, state) + expected_output = [['Vnet_v6_in_v6-0', 'fddd:a156:a251::a6:1/128', 'fddd:a100:a251::a10:1,fddd:a101:a251::a10:1', '', '', 'active']] + assert table == expected_output + + table =[] + row = ["Vnet_v6_in_v6-0", "fddd:a156:a251::a6:1/128"] + epval = "fddd:a100:a251::a10:1,fddd:a101:a251::a10:1,fddd:a100:a251::a11:1,fddd:a100:a251::a12:1,fddd:a100:a251::a13:1" + vnet.pretty_print(table, row, epval, mac_addr, vni, state) + expected_output = [ + ['Vnet_v6_in_v6-0', 'fddd:a156:a251::a6:1/128', 'fddd:a100:a251::a10:1,fddd:a101:a251::a10:1', '', '', 'active'], + ['', '', 'fddd:a100:a251::a11:1,fddd:a100:a251::a12:1', '', '', ''], + ['', '', 'fddd:a100:a251::a13:1', '', '', ''] + ] + assert table == expected_output + + table =[] + row = ["Vnet_v6_in_v6-0", "fddd:a156:a251::a6:1/128"] + epval = "192.168.1.1,192.168.1.2,192.168.1.3,192.168.1.4,192.168.1.5,192.168.1.6,192.168.1.7,192.168.1.8,192.168.1.9,192.168.1.10,192.168.1.11,192.168.1.12,192.168.1.13,192.168.1.14,192.168.1.15" + vnet.pretty_print(table, row, epval, mac_addr, vni, state) + expected_output =[ + ['Vnet_v6_in_v6-0', 'fddd:a156:a251::a6:1/128', '192.168.1.1,192.168.1.2,192.168.1.3', '', '', 'active'], + ['', '', '192.168.1.4,192.168.1.5,192.168.1.6', '', '', ''], + ['', '', '192.168.1.7,192.168.1.8,192.168.1.9', '', '', ''], + ['', '', '192.168.1.10,192.168.1.11,192.168.1.12', '', '', ''], + ['', '', '192.168.1.13,192.168.1.14,192.168.1.15', '', '', '']] + assert table == expected_output + + table =[] + row = ["Vnet_v6_in_v6-0", "fddd:a156:a251::a6:1/128"] + epval = "192.168.1.1" + vnet.pretty_print(table, row, epval, mac_addr, vni, state) + expected_output =[ + ['Vnet_v6_in_v6-0', 'fddd:a156:a251::a6:1/128', '192.168.1.1', '', '', 'active']] + assert table == expected_output + def test_show_vnet_routes_all_basic(self): runner = CliRunner() db = Db() @@ -22,6 +66,7 @@ def test_show_vnet_routes_all_basic(self): vnet name prefix endpoint mac address vni status --------------- ------------------------ ------------------------------------------- ------------- ----- -------- Vnet_v6_in_v6-0 fddd:a156:a251::a6:1/128 fddd:a100:a251::a10:1,fddd:a101:a251::a10:1 active + fddd:a102:a251::a10:1,fddd:a103:a251::a10:1 test_v4_in_v4-0 160.162.191.1/32 100.251.7.1 active test_v4_in_v4-0 160.163.191.1/32 100.251.7.1 active test_v4_in_v4-0 160.164.191.1/32 100.251.7.1 diff --git a/tests/show_vnet_vxlan_cli_test.py b/tests/show_vnet_vxlan_cli_test.py index f0cee3b257..c9aa5b6223 100644 --- a/tests/show_vnet_vxlan_cli_test.py +++ b/tests/show_vnet_vxlan_cli_test.py @@ -9,32 +9,12 @@ #test_path = os.path.dirname(os.path.abspath(__file__)) - - class TestShowVnet(object): @classmethod def setup_class(cls): print("SETUP") os.environ["UTILITIES_UNIT_TESTING"] = "1" - def test_show_vnet_routes_all_basic(self): - runner = CliRunner() - db = Db() - result = runner.invoke(show.cli.commands['vnet'].commands['routes'].commands['all'], [], obj=db) - assert result.exit_code == 0 - expected_output = """\ -vnet name prefix nexthop interface ------------ -------- --------- ----------- - -vnet name prefix endpoint mac address vni status ---------------- ------------------------ ------------------------------------------- ------------- ----- -------- -Vnet_v6_in_v6-0 fddd:a156:a251::a6:1/128 fddd:a100:a251::a10:1,fddd:a101:a251::a10:1 active -test_v4_in_v4-0 160.162.191.1/32 100.251.7.1 active -test_v4_in_v4-0 160.163.191.1/32 100.251.7.1 active -test_v4_in_v4-0 160.164.191.1/32 100.251.7.1 -""" - assert result.output == expected_output - def test_show_vnet_endpoint(self): runner = CliRunner() db = Db() @@ -45,6 +25,8 @@ def test_show_vnet_endpoint(self): --------------------- --------------------- -------------- -------- fddd:a100:a251::a10:1 fddd:a100:a251::a10:1 1 Unknown fddd:a101:a251::a10:1 fddd:a101:a251::a10:1 1 Down +fddd:a102:a251::a10:1 fddd:a102:a251::a10:1 1 Unknown +fddd:a103:a251::a10:1 fddd:a103:a251::a10:1 1 Unknown 100.251.7.1 100.251.7.1 3 Up """ assert result.output == expected_output From f9130d1cc3e376667335919a9c6c95218412d04c Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Tue, 7 Feb 2023 18:07:52 +0200 Subject: [PATCH 09/66] [db_migrator] make LOG_LEVEL_DB migration more robust (#2651) It could be that LOG_LEVEL_DB includes some invalid data and/or a KEY_SET that is not cleaned up due to an issue, for example we observed _gearsyncd_KEY_SET set included in the LOG_LEVEL_DB and preserved in warm reboot. However, this key is not of type hash which leads to an exception and migration failure. The migration logic should be more robust allowing users to upgrade even though some daemon has left overs in the LOG_LEVEL_DB or invalid data is written. - What I did To fix migration issue that leads to device configuration being lost. - How I did it Wrap the logic in try/except/finally. - How to verify it 202205 -> 202211/master upgrade. Signed-off-by: Stepan Blyschak --- scripts/db_migrator.py | 20 +++++++++++-------- .../loglevel_db/logger_tables_input.json | 7 +++++-- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index 6c8ef21b6f..c52e38bd63 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -822,14 +822,18 @@ def version_3_0_5(self): keys = self.loglevelDB.keys(self.loglevelDB.LOGLEVEL_DB, "*") if keys is not None: for key in keys: - if key != "JINJA2_CACHE": - fvs = self.loglevelDB.get_all(self.loglevelDB.LOGLEVEL_DB, key) - component = key.split(":")[1] - loglevel = fvs[loglevel_field] - logoutput = fvs[logoutput_field] - self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, component), loglevel_field, loglevel) - self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, component), logoutput_field, logoutput) - self.loglevelDB.delete(self.loglevelDB.LOGLEVEL_DB, key) + try: + if key != "JINJA2_CACHE": + fvs = self.loglevelDB.get_all(self.loglevelDB.LOGLEVEL_DB, key) + component = key.split(":")[1] + loglevel = fvs[loglevel_field] + logoutput = fvs[logoutput_field] + self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, component), loglevel_field, loglevel) + self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, component), logoutput_field, logoutput) + except Exception as err: + log.log_warning('Error occured during LOGLEVEL_DB migration for {}. Ignoring key {}'.format(err, key)) + finally: + self.loglevelDB.delete(self.loglevelDB.LOGLEVEL_DB, key) self.set_version('version_3_0_6') return 'version_3_0_6' diff --git a/tests/db_migrator_input/loglevel_db/logger_tables_input.json b/tests/db_migrator_input/loglevel_db/logger_tables_input.json index 02377ea0a4..ed1bc8057f 100644 --- a/tests/db_migrator_input/loglevel_db/logger_tables_input.json +++ b/tests/db_migrator_input/loglevel_db/logger_tables_input.json @@ -7,5 +7,8 @@ "LOGLEVEL": "SAI_LOG_LEVEL_NOTICE", "LOGOUTPUT": "SYSLOG" }, - "JINJA2_CACHE": {} -} \ No newline at end of file + "JINJA2_CACHE": {}, + "INVALID:INVALID": { + "invalid": "invalid" + } +} From 6e0e1dafb6622fbb1e6ad64ded7ad79936b47ab4 Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Tue, 7 Feb 2023 12:14:49 -0800 Subject: [PATCH 10/66] [sai_failure_dump]Invoking dump during SAI failure (#2633) * Added logic in techsupport script to collect SAI failure dump --- scripts/generate_dump | 64 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 51 insertions(+), 13 deletions(-) diff --git a/scripts/generate_dump b/scripts/generate_dump index 7c94806943..4400f4e984 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1053,21 +1053,26 @@ collect_mellanox() { local sai_dump_folder="/tmp/saisdkdump" local sai_dump_filename="${sai_dump_folder}/sai_sdk_dump_$(date +"%m_%d_%Y_%I_%M_%p")" - ${CMD_PREFIX}docker exec syncd mkdir -p $sai_dump_folder - ${CMD_PREFIX}docker exec syncd saisdkdump -f $sai_dump_filename - - if [ $? != 0 ]; then - echo "Failed to collect saisdkdump." - fi + if [[ "$( docker container inspect -f '{{.State.Running}}' syncd )" == "true" ]]; then + if [[ x"$(sonic-db-cli APPL_DB EXISTS PORT_TABLE:PortInitDone)" == x"1" ]]; then + # Run saisdkdump only after the create_switch is known to be successful + ${CMD_PREFIX}docker exec syncd mkdir -p $sai_dump_folder + ${CMD_PREFIX}docker exec syncd saisdkdump -f $sai_dump_filename + + if [ $? != 0 ]; then + echo "Failed to collect saisdkdump." + fi - copy_from_docker syncd $sai_dump_folder $sai_dump_folder - echo "$sai_dump_folder" - for file in `ls $sai_dump_folder`; do - save_file ${sai_dump_folder}/${file} sai_sdk_dump true - done + copy_from_docker syncd $sai_dump_folder $sai_dump_folder + echo "$sai_dump_folder" + for file in `ls $sai_dump_folder`; do + save_file ${sai_dump_folder}/${file} sai_sdk_dump true + done - ${CMD_PREFIX}rm -rf $sai_dump_folder - ${CMD_PREFIX}docker exec syncd rm -rf $sai_dump_folder + ${CMD_PREFIX}rm -rf $sai_dump_folder + ${CMD_PREFIX}docker exec syncd rm -rf $sai_dump_folder + fi + fi # run 'hw-management-generate-dump.sh' script and save the result file HW_DUMP_FILE=/usr/bin/hw-management-generate-dump.sh @@ -1429,6 +1434,38 @@ save_crash_files() { fi } +############################################################################### +# Collect SAI failure dump files under /var/log/sai_failure_dump/. These files are +# created because of the orchagent abort triggered by SAI programming failure +# Globals: +# None +# Arguments: +# None +# Returns: +# None +############################################################################### +save_sai_failure_dump(){ + for file in $(find_files "/var/log/sai_failure_dump/"); do + if $TAR -tf $TARFILE | grep $BASE/log/$(basename $file); then + # if the files are already collected under the log/ dir + # just add a symbolic link + if [ ! -z "${file##*.gz}" ]; then + # files saved under log/ are zipped with gz + file=$file.gz + fi + ${CMD_PREFIX}save_symlink ${file} sai_failure_dump log + else + if [ ! -z "${file##*.gz}" ]; then + ${CMD_PREFIX}save_file ${file} sai_failure_dump true + else + ${CMD_PREFIX}save_file ${file} sai_failure_dump false + fi + fi + #Clean up the file once its part of tech support + rm -f $file + done +} + ############################################################################### # Get number of ASICs in the platform # Globals: @@ -1709,6 +1746,7 @@ main() { save_log_files save_crash_files save_warmboot_files + save_sai_failure_dump if [[ "$asic" = "mellanox" ]]; then collect_mellanox_dfw_dumps From 5007f1f0424d517f2f852bba790553535697db42 Mon Sep 17 00:00:00 2001 From: vdahiya12 <67608553+vdahiya12@users.noreply.github.com> Date: Tue, 7 Feb 2023 12:30:18 -0800 Subject: [PATCH 11/66] [show] add support for gRPC show commands for `active-active` (#2629) Signed-off-by: vaibhav-dahiya vdahiya@microsoft.com This PR adds support for show mux hwmode muxdirection as well as show mux grpc muxdirection to show the state of gRPC connected to the SoCs for 'active-active' acble type vdahiya@sonic:~$ show mux grpc muxdirection Port Direction Presence PeerDirection ConnectivityState --------- ----------- ---------- --------------- ------------------- Ethernet0 active False active READY vdahiya@sonic:~$ vdahiya@sonic:~$ show mux grpc muxdirection --json { "HWMODE": { "Ethernet0": { "Direction": "active", "Presence": "False", "PeerDirection": "active", "ConnectivityState": "READY" } } } What I did Added support for the commands. How I did it How to verify it UT and running the changes on Testbed --- show/muxcable.py | 321 ++++++++++++++++++++++++++++---- tests/mock_tables/state_db.json | 18 ++ tests/muxcable_test.py | 187 +++++++++++++++++++ 3 files changed, 492 insertions(+), 34 deletions(-) diff --git a/show/muxcable.py b/show/muxcable.py index d9f0a94f15..b640d32135 100644 --- a/show/muxcable.py +++ b/show/muxcable.py @@ -36,6 +36,21 @@ VENDOR_MODEL_REGEX = re.compile(r"CAC\w{3}321P2P\w{2}MS") +def get_asic_index_for_port(port): + asic_index = None + if platform_sfputil is not None: + asic_index = platform_sfputil_helper.get_asic_id_for_logical_port(port) + if asic_index is None: + # TODO this import is only for unit test purposes, and should be removed once sonic_platform_base + # is fully mocked + import sonic_platform_base.sonic_sfp.sfputilhelper + asic_index = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper().get_asic_id_for_logical_port(port) + if asic_index is None: + port_name = platform_sfputil_helper.get_interface_alias(port, db) + click.echo("Got invalid asic index for port {}, cant retreive mux status".format(port_name)) + return 0 + return asic_index + def db_connect(db_name, namespace=EMPTY_NAMESPACE): return swsscommon.DBConnector(db_name, REDIS_TIMEOUT_MSECS, True, namespace) @@ -1239,6 +1254,67 @@ def get_hwmode_mux_direction_port(db, port): return res_dict +def create_active_active_mux_direction_json_result(result, port, db): + + port = platform_sfputil_helper.get_interface_alias(port, db) + result["HWMODE"][port] = {} + res_dict = get_grpc_cached_version_mux_direction_per_port(db, port) + result["HWMODE"][port]["Direction"] = res_dict["self_mux_direction"] + result["HWMODE"][port]["Presence"] = res_dict["presence"] + result["HWMODE"][port]["PeerDirection"] = res_dict["peer_mux_direction"] + result["HWMODE"][port]["ConnectivityState"] = res_dict["grpc_connection_status"] + + rc = res_dict["rc"] + + return rc + +def create_active_standby_mux_direction_json_result(result, port, db): + + res_dict = get_hwmode_mux_direction_port(db, port) + port = platform_sfputil_helper.get_interface_alias(port, db) + result["HWMODE"][port] = {} + result["HWMODE"][port]["Direction"] = res_dict[1] + result["HWMODE"][port]["Presence"] = res_dict[2] + + rc = res_dict[0] + + return rc + +def create_active_active_mux_direction_result(body, port, db): + + res_dict = get_grpc_cached_version_mux_direction_per_port(db, port) + temp_list = [] + port = platform_sfputil_helper.get_interface_alias(port, db) + temp_list.append(port) + temp_list.append(res_dict["self_mux_direction"]) + temp_list.append(res_dict["presence"]) + temp_list.append(res_dict["peer_mux_direction"]) + temp_list.append(res_dict["grpc_connection_status"]) + body.append(temp_list) + + rc = res_dict["rc"] + + return rc + +def create_active_standby_mux_direction_result(body, port, db): + + res_dict = get_hwmode_mux_direction_port(db, port) + + temp_list = [] + port = platform_sfputil_helper.get_interface_alias(port, db) + temp_list.append(port) + temp_list.append(res_dict[1]) + temp_list.append(res_dict[2]) + body.append(temp_list) + + rc = res_dict[0] + + delete_all_keys_in_db_table("APPL_DB", "XCVRD_SHOW_HWMODE_DIR_CMD") + delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RSP") + delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RES") + + return rc + @muxcable.group(cls=clicommon.AbbreviationGroup) def hwmode(): """Shows the muxcable hardware information directly""" @@ -1247,8 +1323,9 @@ def hwmode(): @hwmode.command() @click.argument('port', metavar='', required=False, default=None) +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format") @clicommon.pass_db -def muxdirection(db, port): +def muxdirection(db, port, json_output): """Shows the current direction of the muxcable {active/standy}""" port = platform_sfputil_helper.get_interface_name(port, db) @@ -1256,30 +1333,42 @@ def muxdirection(db, port): delete_all_keys_in_db_table("APPL_DB", "XCVRD_SHOW_HWMODE_DIR_CMD") delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RSP") delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RES") + per_npu_configdb = {} - if port is not None: + namespaces = multi_asic.get_front_end_namespaces() + for namespace in namespaces: + asic_id = multi_asic.get_asic_index_from_namespace(namespace) + per_npu_configdb[asic_id] = ConfigDBConnector(use_unix_socket_path=False, namespace=namespace) + per_npu_configdb[asic_id].connect() + + if port is not None: + + asic_index = get_asic_index_for_port(port) + cable_type = get_optional_value_for_key_in_config_tbl(per_npu_configdb[asic_index], port, "cable_type", "MUX_CABLE") if check_port_in_mux_cable_table(port) == False: click.echo("Not Y-cable port") return CONFIG_FAIL - res_dict = get_hwmode_mux_direction_port(db, port) - - body = [] - temp_list = [] - headers = ['Port', 'Direction', 'Presence'] - port = platform_sfputil_helper.get_interface_alias(port, db) - temp_list.append(port) - temp_list.append(res_dict[1]) - temp_list.append(res_dict[2]) - body.append(temp_list) - - rc = res_dict[0] - click.echo(tabulate(body, headers=headers)) + if json_output: + result = {} + result ["HWMODE"] = {} + if cable_type == "active-active": + rc = create_active_active_mux_direction_json_result(result, port, db) + else: + rc = False + rc = create_active_standby_mux_direction_json_result(result, port, db) + click.echo("{}".format(json.dumps(result, indent=4))) - delete_all_keys_in_db_table("APPL_DB", "XCVRD_SHOW_HWMODE_DIR_CMD") - delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RSP") - delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RES") + else: + body = [] + if cable_type == "active-active": + headers = ['Port', 'Direction', 'Presence', 'PeerDirection', 'ConnectivityState'] + rc = create_active_active_mux_direction_result(body, port, db) + else: + rc = create_active_standby_mux_direction_result(body, port, db) + headers = ['Port', 'Direction', 'Presence'] + click.echo(tabulate(body, headers=headers)) return rc @@ -1289,8 +1378,12 @@ def muxdirection(db, port): rc_exit = True body = [] + active_active = False + if json_output: + result = {} + result ["HWMODE"] = {} - for port in logical_port_list: + for port in natsorted(logical_port_list): if platform_sfputil is not None: physical_port_list = platform_sfputil_helper.logical_port_name_to_physical_port_list(port) @@ -1316,26 +1409,37 @@ def muxdirection(db, port): if port != logical_port_list_per_port[0]: continue - temp_list = [] + + asic_index = get_asic_index_for_port(port) + cable_type = get_optional_value_for_key_in_config_tbl(per_npu_configdb[asic_index], port, "cable_type", "MUX_CABLE") + if json_output: + if cable_type == "active-active": + rc = create_active_active_mux_direction_json_result(result, port, db) + active_active = True + else: + rc = create_active_standby_mux_direction_json_result(result, port, db) - res_dict = get_hwmode_mux_direction_port(db, port) + else: + if cable_type == 'active-active': + rc = create_active_active_mux_direction_result(body, port, db) + active_active = True + else: + rc = create_active_standby_mux_direction_result(body, port, db) + if rc != 0: + rc_exit = False - port = platform_sfputil_helper.get_interface_alias(port, db) - temp_list.append(port) - temp_list.append(res_dict[1]) - temp_list.append(res_dict[2]) - body.append(temp_list) - rc = res_dict[0] - if rc != 0: - rc_exit = False - headers = ['Port', 'Direction', 'Presence'] - click.echo(tabulate(body, headers=headers)) + if json_output: + click.echo("{}".format(json.dumps(result, indent=4))) + else: + if active_active: + + headers = ['Port', 'Direction', 'Presence', 'PeerDirection', 'ConnectivityState'] + else: + headers = ['Port', 'Direction', 'Presence'] + click.echo(tabulate(body, headers=headers)) - delete_all_keys_in_db_table("APPL_DB", "XCVRD_SHOW_HWMODE_DIR_CMD") - delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RSP") - delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RES") if rc_exit == False: sys.exit(EXIT_FAIL) @@ -2003,3 +2107,152 @@ def tunnel_route(db, port, json_output): click.echo(tabulate(print_data, headers=headers)) sys.exit(STATUS_SUCCESSFUL) + + +def get_grpc_cached_version_mux_direction_per_port(db, port): + + + state_db = {} + mux_info_dict = {} + mux_info_full_dict = {} + trans_info_full_dict = {} + mux_info_dict["rc"] = False + + # Getting all front asic namespace and correspding config and state DB connector + + namespaces = multi_asic.get_front_end_namespaces() + for namespace in namespaces: + asic_id = multi_asic.get_asic_index_from_namespace(namespace) + state_db[asic_id] = swsscommon.SonicV2Connector(use_unix_socket_path=False, namespace=namespace) + state_db[asic_id].connect(state_db[asic_id].STATE_DB) + + if platform_sfputil is not None: + asic_index = platform_sfputil_helper.get_asic_id_for_logical_port(port) + + if asic_index is None: + # TODO this import is only for unit test purposes, and should be removed once sonic_platform_base + # is fully mocked + import sonic_platform_base.sonic_sfp.sfputilhelper + asic_index = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper().get_asic_id_for_logical_port(port) + if asic_index is None: + click.echo("Got invalid asic index for port {}, cant retrieve mux cable table entries".format(port)) + return mux_info_dict + + + mux_info_full_dict[asic_index] = state_db[asic_index].get_all( + state_db[asic_index].STATE_DB, 'MUX_CABLE_INFO|{}'.format(port)) + trans_info_full_dict[asic_index] = state_db[asic_index].get_all( + state_db[asic_index].STATE_DB, 'TRANSCEIVER_STATUS|{}'.format(port)) + + res_dir = {} + res_dir = mux_info_full_dict[asic_index] + mux_info_dict["self_mux_direction"] = res_dir.get("self_mux_direction", None) + mux_info_dict["peer_mux_direction"] = res_dir.get("peer_mux_direction", None) + mux_info_dict["grpc_connection_status"] = res_dir.get("grpc_connection_status", None) + + trans_dir = {} + trans_dir = trans_info_full_dict[asic_index] + + status = trans_dir.get("status", "0") + presence = "True" if status == "1" else "False" + + mux_info_dict["presence"] = presence + + mux_info_dict["rc"] = True + + return mux_info_dict + + +@muxcable.group(cls=clicommon.AbbreviationGroup) +def grpc(): + """Shows the muxcable hardware information directly""" + pass + + +@grpc.command() +@click.argument('port', metavar='', required=False, default=None) +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format") +@clicommon.pass_db +def muxdirection(db, port, json_output): + """Shows the current direction of the FPGA facing port on Tx Side {active/standy}""" + + port = platform_sfputil_helper.get_interface_name(port, db) + + + if port is not None: + + if check_port_in_mux_cable_table(port) == False: + click.echo("Not Y-cable port") + return CONFIG_FAIL + + if json_output: + result = {} + result ["HWMODE"] = {} + rc = create_active_active_mux_direction_json_result(result, port, db) + click.echo("{}".format(json.dumps(result, indent=4))) + + else: + body = [] + + headers = ['Port', 'Direction', 'Presence', 'PeerDirection', 'ConnectivityState'] + rc = create_active_active_mux_direction_result(body, port, db) + click.echo(tabulate(body, headers=headers)) + + return rc + + else: + + + logical_port_list = platform_sfputil_helper.get_logical_list() + + rc_exit = True + body = [] + if json_output: + result = {} + result ["HWMODE"] = {} + + for port in natsorted(logical_port_list): + + if platform_sfputil is not None: + physical_port_list = platform_sfputil_helper.logical_port_name_to_physical_port_list(port) + + if not isinstance(physical_port_list, list): + continue + if len(physical_port_list) != 1: + continue + + if not check_port_in_mux_cable_table(port): + continue + + physical_port = physical_port_list[0] + logical_port_list_for_physical_port = platform_sfputil_helper.get_physical_to_logical() + + logical_port_list_per_port = logical_port_list_for_physical_port.get(physical_port, None) + + """ This check is required for checking whether or not this logical port is the one which is + actually mapped to physical port and by convention it is always the first port. + TODO: this should be removed with more logic to check which logical port maps to actual physical port + being used""" + + if port != logical_port_list_per_port[0]: + continue + + if json_output: + rc = create_active_active_mux_direction_json_result(result, port, db) + else: + rc = create_active_active_mux_direction_result(body, port, db) + + if rc != True: + rc_exit = False + + if json_output: + click.echo("{}".format(json.dumps(result, indent=4))) + else: + headers = ['Port', 'Direction', 'Presence', 'PeerDirection', 'ConnectivityState'] + + click.echo(tabulate(body, headers=headers)) + + if rc_exit == False: + sys.exit(EXIT_FAIL) + + diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 8462248a92..97633b3ef8 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -549,6 +549,24 @@ "version_self_next": "0.2MS", "version_self_active": "0.2MS", "version_self_inactive": "0.2MS", + "peer_mux_direction": "active", + "self_mux_direction": "active", + "grpc_connection_status": "READY", + "Value": "AABB" + }, + "MUX_CABLE_INFO|Ethernet4": { + "version_peer_next": "0.2MS", + "version_peer_active": "0.2MS", + "version_peer_inactive": "0.2MS", + "version_nic_next": "0.2MS", + "version_nic_active": "0.2MS", + "version_nic_inactive": "0.2MS", + "version_self_next": "0.2MS", + "version_self_active": "0.2MS", + "version_self_inactive": "0.2MS", + "peer_mux_direction": "active", + "self_mux_direction": "standby", + "grpc_connection_status": "READY", "Value": "AABB" }, "MUX_CABLE_INFO|Ethernet12": { diff --git a/tests/muxcable_test.py b/tests/muxcable_test.py index b8eb3dce62..7e4b4b250c 100644 --- a/tests/muxcable_test.py +++ b/tests/muxcable_test.py @@ -378,6 +378,50 @@ } """ +show_muxcable_grpc_muxdirection_active_expected_output = """\ +Port Direction Presence PeerDirection ConnectivityState +---------- ----------- ---------- --------------- ------------------- +Ethernet12 active True active READY +""" + +show_muxcable_grpc_muxdirection_standby_expected_output = """\ +Port Direction Presence PeerDirection ConnectivityState +--------- ----------- ---------- --------------- ------------------- +Ethernet4 standby True active READY +""" + +show_muxcable_grpc_muxdirection_active_expected_all_output = """\ +Port Direction Presence PeerDirection ConnectivityState +--------- ----------- ---------- --------------- ------------------- +Ethernet0 active False active READY +""" + +show_muxcable_grpc_muxdirection_active_expected_all_output_json = """\ +{ + "HWMODE": { + "Ethernet0": { + "Direction": "active", + "Presence": "False", + "PeerDirection": "active", + "ConnectivityState": "READY" + } + } +} +""" + +show_muxcable_grpc_muxdirection_standby_expected_output_json = """\ +{ + "HWMODE": { + "Ethernet4": { + "Direction": "standby", + "Presence": "True", + "PeerDirection": "active", + "ConnectivityState": "READY" + } + } +} +""" + expected_muxcable_cableinfo_output = """\ Vendor Model -------- --------------- @@ -395,6 +439,17 @@ Ethernet12 active True """ +show_muxcable_hwmode_muxdirection_active_expected_output_json = """\ +{ + "HWMODE": { + "Ethernet12": { + "Direction": "active", + "Presence": "True" + } + } +} +""" + show_muxcable_hwmode_muxdirection_active_expected_output_alias = """\ Port Direction Presence ------ ----------- ---------- @@ -2367,6 +2422,138 @@ def test_config_muxcable_telemetry_enable(self): "enable"], obj=db) assert result.exit_code == 0 + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "standby"})) + @mock.patch('show.muxcable.get_grpc_cached_version_mux_direction_per_port', mock.MagicMock(return_value={"self_mux_direction": "active", + "peer_mux_direction": "active", + "presence": "True", + "rc": 0, + "grpc_connection_status": "READY"})) + @mock.patch('show.muxcable.check_port_in_mux_cable_table', mock.MagicMock(return_value=True)) + @mock.patch('utilities_common.platform_sfputil_helper.get_logical_list', mock.MagicMock(return_value=["Ethernet0", "Ethernet12"])) + @mock.patch('utilities_common.platform_sfputil_helper.get_asic_id_for_logical_port', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.platform_sfputil', mock.MagicMock(return_value={0: ["Ethernet12", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.get_physical_to_logical', mock.MagicMock(return_value={0: ["Ethernet12", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.logical_port_name_to_physical_port_list', mock.MagicMock(return_value=[0])) + @mock.patch('sonic_y_cable.y_cable.check_read_side', mock.MagicMock(return_value=(1))) + @mock.patch('sonic_y_cable.y_cable.check_mux_direction', mock.MagicMock(return_value=(2))) + @mock.patch('re.match', mock.MagicMock(return_value=(True))) + def test_show_muxcable_grpc_muxdirection_port_standby_with_patch(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["grpc"].commands["muxdirection"], + ["Ethernet12"], obj=db) + assert result.exit_code == 0 + assert result.output == show_muxcable_grpc_muxdirection_active_expected_output + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "standby"})) + @mock.patch('show.muxcable.check_port_in_mux_cable_table', mock.MagicMock(return_value=True)) + @mock.patch('utilities_common.platform_sfputil_helper.get_logical_list', mock.MagicMock(return_value=["Ethernet0", "Ethernet4"])) + @mock.patch('utilities_common.platform_sfputil_helper.get_asic_id_for_logical_port', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.platform_sfputil', mock.MagicMock(return_value={0: ["Ethernet4", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.get_physical_to_logical', mock.MagicMock(return_value={0: ["Ethernet4", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.logical_port_name_to_physical_port_list', mock.MagicMock(return_value=[0])) + @mock.patch('sonic_y_cable.y_cable.check_read_side', mock.MagicMock(return_value=(1))) + @mock.patch('sonic_y_cable.y_cable.check_mux_direction', mock.MagicMock(return_value=(2))) + @mock.patch('re.match', mock.MagicMock(return_value=(True))) + def test_show_muxcable_grpc_muxdirection_port_standby(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["grpc"].commands["muxdirection"], + ["Ethernet4"], obj=db) + assert result.exit_code == 0 + assert result.output == show_muxcable_grpc_muxdirection_standby_expected_output + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "standby"})) + @mock.patch('show.muxcable.check_port_in_mux_cable_table', mock.MagicMock(return_value=True)) + @mock.patch('utilities_common.platform_sfputil_helper.get_logical_list', mock.MagicMock(return_value=["Ethernet0", "Ethernet4"])) + @mock.patch('utilities_common.platform_sfputil_helper.get_asic_id_for_logical_port', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.platform_sfputil', mock.MagicMock(return_value={0: ["Ethernet4", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.get_physical_to_logical', mock.MagicMock(return_value={0: ["Ethernet4", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.logical_port_name_to_physical_port_list', mock.MagicMock(return_value=[0])) + @mock.patch('sonic_y_cable.y_cable.check_read_side', mock.MagicMock(return_value=(1))) + @mock.patch('sonic_y_cable.y_cable.check_mux_direction', mock.MagicMock(return_value=(2))) + @mock.patch('re.match', mock.MagicMock(return_value=(True))) + def test_show_muxcable_grpc_muxdirection_port_standby_json(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["grpc"].commands["muxdirection"], + ["Ethernet4", "--json"], obj=db) + assert result.exit_code == 0 + assert result.output == show_muxcable_grpc_muxdirection_standby_expected_output_json + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "standby"})) + @mock.patch('show.muxcable.check_port_in_mux_cable_table', mock.MagicMock(return_value=True)) + @mock.patch('utilities_common.platform_sfputil_helper.get_logical_list', mock.MagicMock(return_value=["Ethernet0", "Ethernet4"])) + @mock.patch('utilities_common.platform_sfputil_helper.get_asic_id_for_logical_port', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.platform_sfputil', mock.MagicMock(return_value={0: ["Ethernet0", "Ethernet4"]})) + @mock.patch('utilities_common.platform_sfputil_helper.get_physical_to_logical', mock.MagicMock(return_value={0: ["Ethernet0", "Ethernet4"]})) + @mock.patch('utilities_common.platform_sfputil_helper.logical_port_name_to_physical_port_list', mock.MagicMock(return_value=[0])) + @mock.patch('sonic_y_cable.y_cable.check_read_side', mock.MagicMock(return_value=(1))) + @mock.patch('sonic_y_cable.y_cable.check_mux_direction', mock.MagicMock(return_value=(2))) + @mock.patch('re.match', mock.MagicMock(return_value=(True))) + def test_show_muxcable_grpc_muxdirection_port_all(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["grpc"].commands["muxdirection"], obj=db) + assert result.exit_code == 0 + assert result.output == show_muxcable_grpc_muxdirection_active_expected_all_output + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "standby"})) + @mock.patch('show.muxcable.check_port_in_mux_cable_table', mock.MagicMock(return_value=True)) + @mock.patch('utilities_common.platform_sfputil_helper.get_logical_list', mock.MagicMock(return_value=["Ethernet0", "Ethernet4"])) + @mock.patch('utilities_common.platform_sfputil_helper.get_asic_id_for_logical_port', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.platform_sfputil', mock.MagicMock(return_value={0: ["Ethernet0", "Ethernet4"]})) + @mock.patch('utilities_common.platform_sfputil_helper.get_physical_to_logical', mock.MagicMock(return_value={0: ["Ethernet0", "Ethernet4"]})) + @mock.patch('utilities_common.platform_sfputil_helper.logical_port_name_to_physical_port_list', mock.MagicMock(return_value=[0])) + @mock.patch('sonic_y_cable.y_cable.check_read_side', mock.MagicMock(return_value=(1))) + @mock.patch('sonic_y_cable.y_cable.check_mux_direction', mock.MagicMock(return_value=(2))) + @mock.patch('re.match', mock.MagicMock(return_value=(True))) + def test_show_muxcable_grpc_muxdirection_port_all_json(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["grpc"].commands["muxdirection"], ["--json"], obj=db) + assert result.exit_code == 0 + assert result.output == show_muxcable_grpc_muxdirection_active_expected_all_output_json + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "active"})) + @mock.patch('show.muxcable.get_hwmode_mux_direction_port', mock.MagicMock(return_value={0: 0, + 1: "active", + 2: "True"})) + @mock.patch('show.muxcable.check_port_in_mux_cable_table', mock.MagicMock(return_value=True)) + @mock.patch('utilities_common.platform_sfputil_helper.get_logical_list', mock.MagicMock(return_value=["Ethernet0", "Ethernet12"])) + @mock.patch('utilities_common.platform_sfputil_helper.get_asic_id_for_logical_port', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.platform_sfputil', mock.MagicMock(return_value={0: ["Ethernet12", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.get_physical_to_logical', mock.MagicMock(return_value={0: ["Ethernet12", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.logical_port_name_to_physical_port_list', mock.MagicMock(return_value=[0])) + @mock.patch('sonic_y_cable.y_cable.check_read_side', mock.MagicMock(return_value=(1))) + @mock.patch('sonic_y_cable.y_cable.check_mux_direction', mock.MagicMock(return_value=(1))) + @mock.patch('re.match', mock.MagicMock(return_value=(True))) + def test_show_muxcable_hwmode_muxdirection_port_active(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["hwmode"].commands["muxdirection"], + ["Ethernet12", "--json"], obj=db) + assert result.exit_code == 0 + assert result.output == show_muxcable_hwmode_muxdirection_active_expected_output_json + @classmethod def teardown_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "0" From 9e32962c55f2a1823b35673c0a7c7a5d04db5a8e Mon Sep 17 00:00:00 2001 From: mihirpat1 <112018033+mihirpat1@users.noreply.github.com> Date: Wed, 8 Feb 2023 16:39:00 -0800 Subject: [PATCH 12/66] Add transceiver info CLI support to show output from TRANSCEIVER_INFO for ZR (#2630) * Add transceiver info CLI support to show output from TRANSCEIVER_INFO for ZR Signed-off-by: Mihir Patel * Added test case for info CLI * Updated command reference * Resolved merged conflicts * Made convert_sfp_info_to_output_string generic for CMIS and non CMIS and added test case to address PR comment * Resolved test_multi_asic_interface_status_all failure * Addressed PR comments --------- Signed-off-by: Mihir Patel --- doc/Command-Reference.md | 44 ++++++- scripts/sfpshow | 29 ++++- show/interfaces/__init__.py | 22 ++++ tests/mock_tables/asic0/state_db.json | 37 ++++++ tests/mock_tables/asic1/state_db.json | 51 +++++--- tests/mock_tables/state_db.json | 37 ++++++ tests/multi_asic_intfutil_test.py | 24 ++-- tests/sfp_test.py | 162 +++++++++++++++++++++----- utilities_common/sfp_helper.py | 23 ++++ 9 files changed, 363 insertions(+), 66 deletions(-) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 6882e48e65..2c3f4fb0b3 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -927,7 +927,7 @@ This command displays information for all the interfaces for the transceiver req - Usage: ``` - show interfaces transceiver (eeprom [-d|--dom] | lpmode | presence | error-status [-hw|--fetch-from-hardware] | pm) [] + show interfaces transceiver (eeprom [-d|--dom] | info | lpmode | presence | error-status [-hw|--fetch-from-hardware] | pm) [] ``` - Example (Decode and display information stored on the EEPROM of SFP transceiver connected to Ethernet0): @@ -965,6 +965,48 @@ This command displays information for all the interfaces for the transceiver req Vcc : 0.0000Volts ``` +- Example (Decode and display information stored on the EEPROM of SFP transceiver connected to Ethernet16): + ``` + admin@sonic:~$ show interfaces transceiver info Ethernet16 + Ethernet16: SFP EEPROM detected + Active Firmware: 61.20 + Active application selected code assigned to host lane 1: 1 + Active application selected code assigned to host lane 2: 1 + Active application selected code assigned to host lane 3: 1 + Active application selected code assigned to host lane 4: 1 + Active application selected code assigned to host lane 5: 1 + Active application selected code assigned to host lane 6: 1 + Active application selected code assigned to host lane 7: 1 + Active application selected code assigned to host lane 8: 1 + Application Advertisement: 400GAUI-8 C2M (Annex 120E) - Host Assign (0x1) - 400ZR, DWDM, amplified - Media Assign (0x1) + 400GAUI-8 C2M (Annex 120E) - Host Assign (0x1) - 400ZR, Single Wavelength, Unamplified - Media Assign (0x1) + 100GAUI-2 C2M (Annex 135G) - Host Assign (0x55) - 400ZR, DWDM, amplified - Media Assign (0x1) + CMIS Rev: 4.1 + Connector: LC + Encoding: N/A + Extended Identifier: Power Class 8 (20.0W Max) + Extended RateSelect Compliance: N/A + Host Lane Count: 8 + Identifier: QSFP-DD Double Density 8X Pluggable Transceiver + Inactive Firmware: 61.20 + Length Cable Assembly(m): 0.0 + Media Interface Technology: 1550 nm DFB + Media Lane Count: 1 + Module Hardware Rev: 49.49 + Nominal Bit Rate(100Mbs): 0 + Specification Compliance: sm_media_interface + Supported Max Laser Frequency: 196100 + Supported Max TX Power: 4.0 + Supported Min Laser Frequency: 191300 + Supported Min TX Power: -22.9 + Vendor Date Code(YYYY-MM-DD Lot): 2020-21-02 17 + Vendor Name: Acacia Comm Inc. + Vendor OUI: 7c-b2-5c + Vendor PN: DP04QSDD-E20-00E + Vendor Rev: 01 + Vendor SN: 210753986 + ``` + - Example (Display status of low-power mode of SFP transceiver connected to Ethernet100): ``` admin@sonic:~$ show interfaces transceiver lpmode Ethernet100 diff --git a/scripts/sfpshow b/scripts/sfpshow index 7b3c0caca0..ac0adf5c6e 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -17,7 +17,7 @@ from natsort import natsorted from sonic_py_common.interface import front_panel_prefix, backplane_prefix, inband_prefix, recirc_prefix from sonic_py_common import multi_asic from utilities_common.sfp_helper import covert_application_advertisement_to_output_string -from utilities_common.sfp_helper import QSFP_DATA_MAP +from utilities_common.sfp_helper import QSFP_DATA_MAP, CMIS_DATA_MAP from tabulate import tabulate # Mock the redis DB for unit test purposes @@ -284,14 +284,16 @@ class SFPShow(object): def convert_sfp_info_to_output_string(self, sfp_info_dict): indent = ' ' * 8 output = '' + is_sfp_cmis = 'cmis_rev' in sfp_info_dict - sorted_qsfp_data_map_keys = sorted(QSFP_DATA_MAP, key=QSFP_DATA_MAP.get) - for key in sorted_qsfp_data_map_keys: + data_map = CMIS_DATA_MAP if is_sfp_cmis else QSFP_DATA_MAP + sorted_data_map_keys = sorted(data_map, key=data_map.get) + for key in sorted_data_map_keys: if key == 'cable_type': output += '{}{}: {}\n'.format(indent, sfp_info_dict['cable_type'], sfp_info_dict['cable_length']) elif key == 'cable_length': pass - elif key == 'specification_compliance': + elif key == 'specification_compliance' and not(is_sfp_cmis): if sfp_info_dict['type'] == "QSFP-DD Double Density 8X Pluggable Transceiver": output += '{}{}: {}\n'.format(indent, QSFP_DATA_MAP[key], sfp_info_dict[key]) else: @@ -308,7 +310,7 @@ class SFPShow(object): elif key == 'application_advertisement': output += covert_application_advertisement_to_output_string(indent, sfp_info_dict) else: - output += '{}{}: {}\n'.format(indent, QSFP_DATA_MAP[key], sfp_info_dict[key]) + output += '{}{}: {}\n'.format(indent, data_map[key], sfp_info_dict[key]) return output @@ -587,6 +589,23 @@ def eeprom(port, dump_dom, namespace): sfp.get_eeprom() sfp.display_eeprom() +# 'info' subcommand + +@cli.command() +@click.option('-p', '--port', metavar='', help="Display SFP EEPROM data for port only") +@click.option('-n', '--namespace', default=None, help="Display interfaces for specific namespace") +def info(port, namespace): + if port and multi_asic.is_multi_asic() and namespace is None: + try: + namespace = multi_asic.get_namespace_for_port(port) + except Exception: + display_invalid_intf_eeprom(port) + sys.exit(1) + + sfp = SFPShow(port, namespace) + sfp.get_eeprom() + sfp.display_eeprom() + # 'presence' subcommand diff --git a/show/interfaces/__init__.py b/show/interfaces/__init__.py index 0b172d6982..a7a562446b 100644 --- a/show/interfaces/__init__.py +++ b/show/interfaces/__init__.py @@ -469,6 +469,28 @@ def pm(interfacename, namespace, verbose): clicommon.run_command(cmd, display_cmd=verbose) +@transceiver.command() +@click.argument('interfacename', required=False) +@click.option('--namespace', '-n', 'namespace', default=None, show_default=True, + type=click.Choice(multi_asic_util.multi_asic_ns_choices()), help='Namespace name or all') +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def info(interfacename, namespace, verbose): + """Show interface transceiver information""" + + ctx = click.get_current_context() + + cmd = "sfpshow info" + + if interfacename is not None: + interfacename = try_convert_interfacename_from_alias(ctx, interfacename) + + cmd += " -p {}".format(interfacename) + + if namespace is not None: + cmd += " -n {}".format(namespace) + + clicommon.run_command(cmd, display_cmd=verbose) + @transceiver.command() @click.argument('interfacename', required=False) @click.option('--verbose', is_flag=True, help="Enable verbose output") diff --git a/tests/mock_tables/asic0/state_db.json b/tests/mock_tables/asic0/state_db.json index 21b4fa0eab..2756404971 100644 --- a/tests/mock_tables/asic0/state_db.json +++ b/tests/mock_tables/asic0/state_db.json @@ -51,6 +51,43 @@ "vcclowalarm": "2.9700", "vcclowwarning": "3.1349" }, + "TRANSCEIVER_INFO|Ethernet48": { + "type" : "QSFP-DD Double Density 8X Pluggable Transceiver", + "hardware_rev" : "1.1", + "serial" : "214455197", + "manufacturer" : "Acacia Comm Inc.", + "model" : "DP04QSDD-E20-001", + "connector" : "LC", + "encoding" : "N/A", + "ext_identifier" : "Power Class 8 (20.0W Max)", + "ext_rateselect_compliance" : "N/A", + "cable_type" : "Length Cable Assembly(m)", + "cable_length" : "0.0", + "nominal_bit_rate" : "0", + "specification_compliance" : "sm_media_interface", + "vendor_date" : "2021-11-19", + "vendor_oui" : "7c-b2-5c", + "application_advertisement" : "{1: {'host_electrical_interface_id': '400GAUI-8 C2M (Annex 120E)', 'module_media_interface_id': '400ZR, DWDM, amplified', 'media_lane_count': 1, 'host_lane_count': 8, 'host_lane_assignment_options': 1, 'media_lane_assignment_options': 1}, 2: {'host_electrical_interface_id': '400GAUI-8 C2M (Annex 120E)', 'module_media_interface_id': '400ZR, Single Wavelength, Unamplified', 'media_lane_count': 1, 'host_lane_count': 8, 'host_lane_assignment_options': 1, 'media_lane_assignment_options': 1}, 3: {'host_electrical_interface_id': '100GAUI-2 C2M (Annex 135G)', 'module_media_interface_id': '400ZR, DWDM, amplified', 'media_lane_count': 1, 'host_lane_count': 2, 'host_lane_assignment_options': 85, 'media_lane_assignment_options': 1}}", + "host_lane_count" : "8", + "media_lane_count" : "1", + "active_apsel_hostlane1" : "1", + "active_apsel_hostlane2" : "1", + "active_apsel_hostlane3" : "1", + "active_apsel_hostlane4" : "1", + "active_apsel_hostlane5" : "1", + "active_apsel_hostlane6" : "1", + "active_apsel_hostlane7" : "1", + "active_apsel_hostlane8" : "1", + "media_interface_technology" : "1550 nm DFB", + "vendor_rev" : "A", + "cmis_rev" : "4.1", + "active_firmware" : "61.20", + "inactive_firmware" : "161.10", + "supported_max_tx_power" : "4.0", + "supported_min_tx_power" : "-22.9", + "supported_max_laser_freq" : "196100", + "supported_min_laser_freq" : "191300" + }, "CHASSIS_INFO|chassis 1": { "psu_num": "2" }, diff --git a/tests/mock_tables/asic1/state_db.json b/tests/mock_tables/asic1/state_db.json index dd775b9b50..7397d25b8f 100644 --- a/tests/mock_tables/asic1/state_db.json +++ b/tests/mock_tables/asic1/state_db.json @@ -1,21 +1,40 @@ { "TRANSCEIVER_INFO|Ethernet64": { - "type": "QSFP28 or later", - "vendor_rev": "AC", - "serial": "MT1706FT02064", - "manufacturer": "Mellanox", - "model": "MFA1A00-C003", - "vendor_oui": "00-02-c9", - "vendor_date": "2017-01-13 ", - "connector": "No separable connector", - "encoding": "64B66B", - "ext_identifier": "Power Class 3(2.5W max), CDR present in Rx Tx", - "ext_rateselect_compliance": "QSFP+ Rate Select Version 1", - "cable_type": "Length Cable Assembly(m)", - "cable_length": "3", - "specification_compliance": "{'10/40G Ethernet Compliance Code': '40G Active Cable (XLPPI)'}", - "nominal_bit_rate": "255", - "application_advertisement": "N/A" + "type" : "QSFP-DD Double Density 8X Pluggable Transceiver", + "hardware_rev" : "X.X", + "serial" : "0123456789", + "manufacturer" : "XXXX", + "model" : "XXX", + "connector" : "LC", + "encoding" : "N/A", + "ext_identifier" : "Power Class 8 (20.0W Max)", + "ext_rateselect_compliance" : "N/A", + "cable_type" : "Length Cable Assembly(m)", + "cable_length" : "0.0", + "nominal_bit_rate" : "0", + "specification_compliance" : "sm_media_interface", + "vendor_date" : "2021-11-19", + "vendor_oui" : "XX-XX-XX", + "application_advertisement" : "{1: {'host_electrical_interface_id': '400GAUI-8 C2M (Annex 120E)', 'module_media_interface_id': '400ZR, DWDM, amplified', 'media_lane_count': 1, 'host_lane_count': 8, 'host_lane_assignment_options': 1, 'media_lane_assignment_options': 1}, 2: {'host_electrical_interface_id': '400GAUI-8 C2M (Annex 120E)', 'module_media_interface_id': '400ZR, Single Wavelength, Unamplified', 'media_lane_count': 1, 'host_lane_count': 8, 'host_lane_assignment_options': 1, 'media_lane_assignment_options': 1}, 3: {'host_electrical_interface_id': '100GAUI-2 C2M (Annex 135G)', 'module_media_interface_id': '400ZR, DWDM, amplified', 'media_lane_count': 1, 'host_lane_count': 2, 'host_lane_assignment_options': 85, 'media_lane_assignment_options': 1}}", + "host_lane_count" : "8", + "media_lane_count" : "1", + "active_apsel_hostlane1" : "1", + "active_apsel_hostlane2" : "1", + "active_apsel_hostlane3" : "1", + "active_apsel_hostlane4" : "1", + "active_apsel_hostlane5" : "1", + "active_apsel_hostlane6" : "1", + "active_apsel_hostlane7" : "1", + "active_apsel_hostlane8" : "1", + "media_interface_technology" : "1550 nm DFB", + "vendor_rev" : "XX", + "cmis_rev" : "4.1", + "active_firmware" : "X.X", + "inactive_firmware" : "X.X", + "supported_max_tx_power" : "4.0", + "supported_min_tx_power" : "-22.9", + "supported_max_laser_freq" : "196100", + "supported_min_laser_freq" : "191300" }, "TRANSCEIVER_DOM_SENSOR|Ethernet64": { "temperature": "30.9258", diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 97633b3ef8..4cdda56bc8 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -373,6 +373,43 @@ "rx_sig_power_min": "-40", "rx_sig_power_max": "40" }, + "TRANSCEIVER_INFO|Ethernet64": { + "type" : "QSFP-DD Double Density 8X Pluggable Transceiver", + "hardware_rev" : "X.X", + "serial" : "0123456789", + "manufacturer" : "XXXX", + "model" : "XXX", + "connector" : "LC", + "encoding" : "N/A", + "ext_identifier" : "Power Class 8 (20.0W Max)", + "ext_rateselect_compliance" : "N/A", + "cable_type" : "Length Cable Assembly(m)", + "cable_length" : "0.0", + "nominal_bit_rate" : "0", + "specification_compliance" : "sm_media_interface", + "vendor_date" : "2021-11-19", + "vendor_oui" : "XX-XX-XX", + "application_advertisement" : "{1: {'host_electrical_interface_id': '400GAUI-8 C2M (Annex 120E)', 'module_media_interface_id': '400ZR, DWDM, amplified', 'media_lane_count': 1, 'host_lane_count': 8, 'host_lane_assignment_options': 1, 'media_lane_assignment_options': 1}, 2: {'host_electrical_interface_id': '400GAUI-8 C2M (Annex 120E)', 'module_media_interface_id': '400ZR, Single Wavelength, Unamplified', 'media_lane_count': 1, 'host_lane_count': 8, 'host_lane_assignment_options': 1, 'media_lane_assignment_options': 1}, 3: {'host_electrical_interface_id': '100GAUI-2 C2M (Annex 135G)', 'module_media_interface_id': '400ZR, DWDM, amplified', 'media_lane_count': 1, 'host_lane_count': 2, 'host_lane_assignment_options': 85, 'media_lane_assignment_options': 1}}", + "host_lane_count" : "8", + "media_lane_count" : "1", + "active_apsel_hostlane1" : "1", + "active_apsel_hostlane2" : "1", + "active_apsel_hostlane3" : "1", + "active_apsel_hostlane4" : "1", + "active_apsel_hostlane5" : "1", + "active_apsel_hostlane6" : "1", + "active_apsel_hostlane7" : "1", + "active_apsel_hostlane8" : "1", + "media_interface_technology" : "1550 nm DFB", + "vendor_rev" : "XX", + "cmis_rev" : "4.1", + "active_firmware" : "X.X", + "inactive_firmware" : "X.X", + "supported_max_tx_power" : "4.0", + "supported_min_tx_power" : "-22.9", + "supported_max_laser_freq" : "196100", + "supported_min_laser_freq" : "191300" + }, "TRANSCEIVER_STATUS|Ethernet0": { "status": "67", "error": "Blocking Error|High temperature" diff --git a/tests/multi_asic_intfutil_test.py b/tests/multi_asic_intfutil_test.py index 56e11fa0d3..37e5b5b0f0 100644 --- a/tests/multi_asic_intfutil_test.py +++ b/tests/multi_asic_intfutil_test.py @@ -10,18 +10,18 @@ scripts_path = os.path.join(modules_path, "scripts") intf_status_all = """\ - Interface Lanes Speed MTU FEC Alias Vlan Oper Admin Type Asym PFC ---------------- ------------ ------- ----- ----- -------------- --------------- ------ ------- --------------- ---------- - Ethernet0 33,34,35,36 40G 9100 N/A Ethernet1/1 PortChannel1002 up up QSFP28 or later off - Ethernet4 29,30,31,32 40G 9100 N/A Ethernet1/2 PortChannel1002 up up N/A off - Ethernet64 29,30,31,32 40G 9100 N/A Ethernet1/17 routed up up QSFP28 or later off - Ethernet-BP0 93,94,95,96 40G 9100 N/A Ethernet-BP0 PortChannel4001 up up N/A off - Ethernet-BP4 97,98,99,100 40G 9100 N/A Ethernet-BP4 PortChannel4001 up up N/A off - Ethernet-BP256 61,62,63,64 40G 9100 N/A Ethernet-BP256 PortChannel4009 up up N/A off - Ethernet-BP260 57,58,59,60 40G 9100 N/A Ethernet-BP260 PortChannel4009 up up N/A off -PortChannel1002 N/A 80G 9100 N/A N/A trunk up up N/A N/A -PortChannel4001 N/A 80G 9100 N/A N/A routed up up N/A N/A -PortChannel4009 N/A 80G 9100 N/A N/A routed up up N/A N/A + Interface Lanes Speed MTU FEC Alias Vlan Oper Admin Type Asym PFC +--------------- ------------ ------- ----- ----- -------------- --------------- ------ ------- ----------------------------------------------- ---------- + Ethernet0 33,34,35,36 40G 9100 N/A Ethernet1/1 PortChannel1002 up up QSFP28 or later off + Ethernet4 29,30,31,32 40G 9100 N/A Ethernet1/2 PortChannel1002 up up N/A off + Ethernet64 29,30,31,32 40G 9100 N/A Ethernet1/17 routed up up QSFP-DD Double Density 8X Pluggable Transceiver off + Ethernet-BP0 93,94,95,96 40G 9100 N/A Ethernet-BP0 PortChannel4001 up up N/A off + Ethernet-BP4 97,98,99,100 40G 9100 N/A Ethernet-BP4 PortChannel4001 up up N/A off + Ethernet-BP256 61,62,63,64 40G 9100 N/A Ethernet-BP256 PortChannel4009 up up N/A off + Ethernet-BP260 57,58,59,60 40G 9100 N/A Ethernet-BP260 PortChannel4009 up up N/A off +PortChannel1002 N/A 80G 9100 N/A N/A trunk up up N/A N/A +PortChannel4001 N/A 80G 9100 N/A N/A routed up up N/A N/A +PortChannel4009 N/A 80G 9100 N/A N/A routed up up N/A N/A """ intf_status = """\ Interface Lanes Speed MTU FEC Alias Vlan Oper Admin Type Asym PFC diff --git a/tests/sfp_test.py b/tests/sfp_test.py index 5e2c74265a..b6b94ebff6 100644 --- a/tests/sfp_test.py +++ b/tests/sfp_test.py @@ -215,6 +215,46 @@ EVM % 100.0 100.0 100.0 N/A N/A N/A N/A N/A N/A """ +test_cmis_eeprom_output = """\ +Ethernet64: SFP EEPROM detected + Active Firmware: X.X + Active application selected code assigned to host lane 1: 1 + Active application selected code assigned to host lane 2: 1 + Active application selected code assigned to host lane 3: 1 + Active application selected code assigned to host lane 4: 1 + Active application selected code assigned to host lane 5: 1 + Active application selected code assigned to host lane 6: 1 + Active application selected code assigned to host lane 7: 1 + Active application selected code assigned to host lane 8: 1 + Application Advertisement: 400GAUI-8 C2M (Annex 120E) - Host Assign (0x1) - 400ZR, DWDM, amplified - Media Assign (0x1) + 400GAUI-8 C2M (Annex 120E) - Host Assign (0x1) - 400ZR, Single Wavelength, Unamplified - Media Assign (0x1) + 100GAUI-2 C2M (Annex 135G) - Host Assign (0x55) - 400ZR, DWDM, amplified - Media Assign (0x1) + CMIS Rev: 4.1 + Connector: LC + Encoding: N/A + Extended Identifier: Power Class 8 (20.0W Max) + Extended RateSelect Compliance: N/A + Host Lane Count: 8 + Identifier: QSFP-DD Double Density 8X Pluggable Transceiver + Inactive Firmware: X.X + Length Cable Assembly(m): 0.0 + Media Interface Technology: 1550 nm DFB + Media Lane Count: 1 + Module Hardware Rev: X.X + Nominal Bit Rate(100Mbs): 0 + Specification compliance: sm_media_interface + Supported Max Laser Frequency: 196100 + Supported Max TX Power: 4.0 + Supported Min Laser Frequency: 191300 + Supported Min TX Power: -22.9 + Vendor Date Code(YYYY-MM-DD Lot): 2021-11-19 + Vendor Name: XXXX + Vendor OUI: XX-XX-XX + Vendor PN: XXX + Vendor Rev: XX + Vendor SN: 0123456789 +""" + test_sfp_eeprom_dom_all_output = """\ Ethernet0: SFP EEPROM detected Application Advertisement: N/A @@ -267,22 +307,42 @@ Ethernet4: SFP EEPROM Not detected Ethernet64: SFP EEPROM detected - Application Advertisement: N/A - Connector: No separable connector - Encoding: 64B66B - Extended Identifier: Power Class 3(2.5W max), CDR present in Rx Tx - Extended RateSelect Compliance: QSFP+ Rate Select Version 1 - Identifier: QSFP28 or later - Length Cable Assembly(m): 3 - Nominal Bit Rate(100Mbs): 255 - Specification compliance: - 10/40G Ethernet Compliance Code: 40G Active Cable (XLPPI) - Vendor Date Code(YYYY-MM-DD Lot): 2017-01-13 - Vendor Name: Mellanox - Vendor OUI: 00-02-c9 - Vendor PN: MFA1A00-C003 - Vendor Rev: AC - Vendor SN: MT1706FT02064 + Active Firmware: X.X + Active application selected code assigned to host lane 1: 1 + Active application selected code assigned to host lane 2: 1 + Active application selected code assigned to host lane 3: 1 + Active application selected code assigned to host lane 4: 1 + Active application selected code assigned to host lane 5: 1 + Active application selected code assigned to host lane 6: 1 + Active application selected code assigned to host lane 7: 1 + Active application selected code assigned to host lane 8: 1 + Application Advertisement: 400GAUI-8 C2M (Annex 120E) - Host Assign (0x1) - 400ZR, DWDM, amplified - Media Assign (0x1) + 400GAUI-8 C2M (Annex 120E) - Host Assign (0x1) - 400ZR, Single Wavelength, Unamplified - Media Assign (0x1) + 100GAUI-2 C2M (Annex 135G) - Host Assign (0x55) - 400ZR, DWDM, amplified - Media Assign (0x1) + CMIS Rev: 4.1 + Connector: LC + Encoding: N/A + Extended Identifier: Power Class 8 (20.0W Max) + Extended RateSelect Compliance: N/A + Host Lane Count: 8 + Identifier: QSFP-DD Double Density 8X Pluggable Transceiver + Inactive Firmware: X.X + Length Cable Assembly(m): 0.0 + Media Interface Technology: 1550 nm DFB + Media Lane Count: 1 + Module Hardware Rev: X.X + Nominal Bit Rate(100Mbs): 0 + Specification compliance: sm_media_interface + Supported Max Laser Frequency: 196100 + Supported Max TX Power: 4.0 + Supported Min Laser Frequency: 191300 + Supported Min TX Power: -22.9 + Vendor Date Code(YYYY-MM-DD Lot): 2021-11-19 + Vendor Name: XXXX + Vendor OUI: XX-XX-XX + Vendor PN: XXX + Vendor Rev: XX + Vendor SN: 0123456789 ChannelMonitorValues: RX1Power: 0.3802dBm RX2Power: -0.4871dBm @@ -337,22 +397,42 @@ Ethernet4: SFP EEPROM Not detected Ethernet64: SFP EEPROM detected - Application Advertisement: N/A - Connector: No separable connector - Encoding: 64B66B - Extended Identifier: Power Class 3(2.5W max), CDR present in Rx Tx - Extended RateSelect Compliance: QSFP+ Rate Select Version 1 - Identifier: QSFP28 or later - Length Cable Assembly(m): 3 - Nominal Bit Rate(100Mbs): 255 - Specification compliance: - 10/40G Ethernet Compliance Code: 40G Active Cable (XLPPI) - Vendor Date Code(YYYY-MM-DD Lot): 2017-01-13 - Vendor Name: Mellanox - Vendor OUI: 00-02-c9 - Vendor PN: MFA1A00-C003 - Vendor Rev: AC - Vendor SN: MT1706FT02064 + Active Firmware: X.X + Active application selected code assigned to host lane 1: 1 + Active application selected code assigned to host lane 2: 1 + Active application selected code assigned to host lane 3: 1 + Active application selected code assigned to host lane 4: 1 + Active application selected code assigned to host lane 5: 1 + Active application selected code assigned to host lane 6: 1 + Active application selected code assigned to host lane 7: 1 + Active application selected code assigned to host lane 8: 1 + Application Advertisement: 400GAUI-8 C2M (Annex 120E) - Host Assign (0x1) - 400ZR, DWDM, amplified - Media Assign (0x1) + 400GAUI-8 C2M (Annex 120E) - Host Assign (0x1) - 400ZR, Single Wavelength, Unamplified - Media Assign (0x1) + 100GAUI-2 C2M (Annex 135G) - Host Assign (0x55) - 400ZR, DWDM, amplified - Media Assign (0x1) + CMIS Rev: 4.1 + Connector: LC + Encoding: N/A + Extended Identifier: Power Class 8 (20.0W Max) + Extended RateSelect Compliance: N/A + Host Lane Count: 8 + Identifier: QSFP-DD Double Density 8X Pluggable Transceiver + Inactive Firmware: X.X + Length Cable Assembly(m): 0.0 + Media Interface Technology: 1550 nm DFB + Media Lane Count: 1 + Module Hardware Rev: X.X + Nominal Bit Rate(100Mbs): 0 + Specification compliance: sm_media_interface + Supported Max Laser Frequency: 196100 + Supported Max TX Power: 4.0 + Supported Min Laser Frequency: 191300 + Supported Min TX Power: -22.9 + Vendor Date Code(YYYY-MM-DD Lot): 2021-11-19 + Vendor Name: XXXX + Vendor OUI: XX-XX-XX + Vendor PN: XXX + Vendor Rev: XX + Vendor SN: 0123456789 """ test_sfp_presence_all_output = """\ @@ -464,6 +544,12 @@ def test_qsfp_dd_eeprom_adv_app(self): print(result.output) assert result.output == test_qsfp_dd_eeprom_adv_app_output + def test_cmis_info(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["info"], ["Ethernet64"]) + assert result.exit_code == 0 + assert result.output == test_cmis_eeprom_output + def test_rj45_eeprom(self): runner = CliRunner() result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["Ethernet36"]) @@ -545,12 +631,24 @@ def test_qsfp_dd_pm_with_ns(self): expected = "Ethernet0: Transceiver performance monitoring not applicable" assert result_lines == expected + def test_cmis_sfp_info_with_ns(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["info"], ["Ethernet64 -n asic1"]) + assert result.exit_code == 0 + assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_cmis_eeprom_output + def test_sfp_eeprom_all(self): runner = CliRunner() result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"]) assert result.exit_code == 0 assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_sfp_eeprom_all_output + def test_sfp_info_all(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["info"]) + assert result.exit_code == 0 + assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_sfp_eeprom_all_output + def test_sfp_eeprom_dom_all(self): runner = CliRunner() result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["-d"]) diff --git a/utilities_common/sfp_helper.py b/utilities_common/sfp_helper.py index 6ae9b85a1c..a5bf7839a9 100644 --- a/utilities_common/sfp_helper.py +++ b/utilities_common/sfp_helper.py @@ -19,6 +19,29 @@ 'application_advertisement': 'Application Advertisement' } +QSFP_CMIS_DELTA_DATA_MAP = { + 'host_lane_count': 'Host Lane Count', + 'media_lane_count': 'Media Lane Count', + 'active_apsel_hostlane1': 'Active application selected code assigned to host lane 1', + 'active_apsel_hostlane2': 'Active application selected code assigned to host lane 2', + 'active_apsel_hostlane3': 'Active application selected code assigned to host lane 3', + 'active_apsel_hostlane4': 'Active application selected code assigned to host lane 4', + 'active_apsel_hostlane5': 'Active application selected code assigned to host lane 5', + 'active_apsel_hostlane6': 'Active application selected code assigned to host lane 6', + 'active_apsel_hostlane7': 'Active application selected code assigned to host lane 7', + 'active_apsel_hostlane8': 'Active application selected code assigned to host lane 8', + 'media_interface_technology': 'Media Interface Technology', + 'hardware_rev': 'Module Hardware Rev', + 'cmis_rev': 'CMIS Rev', + 'active_firmware': 'Active Firmware', + 'inactive_firmware': 'Inactive Firmware', + 'supported_max_tx_power': 'Supported Max TX Power', + 'supported_min_tx_power': 'Supported Min TX Power', + 'supported_max_laser_freq': 'Supported Max Laser Frequency', + 'supported_min_laser_freq': 'Supported Min Laser Frequency' +} + +CMIS_DATA_MAP = {**QSFP_DATA_MAP, **QSFP_CMIS_DELTA_DATA_MAP} def covert_application_advertisement_to_output_string(indent, sfp_info_dict): key = 'application_advertisement' From 9126e7f8ab66427096b16c6e305d075767be49eb Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Thu, 9 Feb 2023 05:20:11 +0200 Subject: [PATCH 13/66] [config/show] Add command to control pending FIB suppression (#2495) * [config/show] Add command to control pending FIB suppression What I did I added a command config suppress-pending-fib that will allow user to enable/disable this feature. Once it is enabled, BGP will wait for route to be programmed to HW before announcing the route to the peers. I also added a corresponding show command that prints the status of this feature. --- config/main.py | 16 +++++++++++-- doc/Command-Reference.md | 38 ++++++++++++++++++++++++++++++ show/main.py | 11 +++++++++ tests/suppress_pending_fib_test.py | 34 ++++++++++++++++++++++++++ 4 files changed, 97 insertions(+), 2 deletions(-) create mode 100644 tests/suppress_pending_fib_test.py diff --git a/config/main.py b/config/main.py index 5fdc177e2e..6f155ba669 100644 --- a/config/main.py +++ b/config/main.py @@ -1793,7 +1793,7 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, cfggen_namespace_option = " -n {}".format(namespace) clicommon.run_command(db_migrator + ' -o set_version' + cfggen_namespace_option) - # Keep device isolated with TSA + # Keep device isolated with TSA if traffic_shift_away: clicommon.run_command("TSA", display_cmd=True) if override_config: @@ -2006,9 +2006,21 @@ def synchronous_mode(sync_mode): config reload -y \n Option 2. systemctl restart swss""" % sync_mode) +# +# 'suppress-fib-pending' command ('config suppress-fib-pending ...') +# +@config.command('suppress-fib-pending') +@click.argument('state', metavar='', required=True, type=click.Choice(['enabled', 'disabled'])) +@clicommon.pass_db +def suppress_pending_fib(db, state): + ''' Enable or disable pending FIB suppression. Once enabled, BGP will not advertise routes that are not yet installed in the hardware ''' + + config_db = db.cfgdb + config_db.mod_entry('DEVICE_METADATA' , 'localhost', {"suppress-fib-pending" : state}) + # # 'yang_config_validation' command ('config yang_config_validation ...') -# +# @config.command('yang_config_validation') @click.argument('yang_config_validation', metavar='', required=True) def yang_config_validation(yang_config_validation): diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 2c3f4fb0b3..7ba12dfb8b 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -2055,6 +2055,26 @@ This command displays the routing policy that takes precedence over the other ro Exit routemap ``` +**show suppress-fib-pending** + +This command is used to show the status of suppress pending FIB feature. +When enabled, BGP will not advertise routes which aren't yet offloaded. + +- Usage: + ``` + show suppress-fib-pending + ``` + +- Examples: + ``` + admin@sonic:~$ show suppress-fib-pending + Enabled + ``` + ``` + admin@sonic:~$ show suppress-fib-pending + Disabled + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#bgp) ### BGP config commands @@ -2147,6 +2167,24 @@ This command is used to remove particular IPv4 or IPv6 BGP neighbor configuratio admin@sonic:~$ sudo config bgp remove neighbor SONIC02SPINE ``` +**config suppress-fib-pending** + +This command is used to enable or disable announcements of routes not yet installed in the HW. +Once enabled, BGP will not advertise routes which aren't yet offloaded. + +- Usage: + ``` + config suppress-fib-pending + ``` + +- Examples: + ``` + admin@sonic:~$ sudo config suppress-fib-pending enabled + ``` + ``` + admin@sonic:~$ sudo config suppress-fib-pending disabled + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#bgp) ## Console diff --git a/show/main.py b/show/main.py index 117958f83a..a60e8411c2 100755 --- a/show/main.py +++ b/show/main.py @@ -2093,6 +2093,17 @@ def peer(db, peer_ip): click.echo(tabulate(bfd_body, bfd_headers)) +# 'suppress-fib-pending' subcommand ("show suppress-fib-pending") +@cli.command('suppress-fib-pending') +@clicommon.pass_db +def suppress_pending_fib(db): + """ Show the status of suppress pending FIB feature """ + + field_values = db.cfgdb.get_entry('DEVICE_METADATA', 'localhost') + state = field_values.get('suppress-fib-pending', 'disabled').title() + click.echo(state) + + # Load plugins and register them helper = util_base.UtilHelper() helper.load_and_register_plugins(plugins, cli) diff --git a/tests/suppress_pending_fib_test.py b/tests/suppress_pending_fib_test.py new file mode 100644 index 0000000000..04064d306e --- /dev/null +++ b/tests/suppress_pending_fib_test.py @@ -0,0 +1,34 @@ +from click.testing import CliRunner + +import config.main as config +import show.main as show +from utilities_common.db import Db + + +class TestSuppressFibPending: + def test_synchronous_mode(self): + runner = CliRunner() + + db = Db() + + result = runner.invoke(config.config.commands['suppress-fib-pending'], ['enabled'], obj=db) + print(result.output) + assert result.exit_code == 0 + assert db.cfgdb.get_entry('DEVICE_METADATA' , 'localhost')['suppress-fib-pending'] == 'enabled' + + result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) + assert result.exit_code == 0 + assert result.output == 'Enabled\n' + + result = runner.invoke(config.config.commands['suppress-fib-pending'], ['disabled'], obj=db) + print(result.output) + assert result.exit_code == 0 + assert db.cfgdb.get_entry('DEVICE_METADATA' , 'localhost')['suppress-fib-pending'] == 'disabled' + + result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) + assert result.exit_code == 0 + assert result.output == 'Disabled\n' + + result = runner.invoke(config.config.commands['suppress-fib-pending'], ['invalid-input'], obj=db) + print(result.output) + assert result.exit_code != 0 From 7e94c5fa9e36e8ce9c462f1a1f8249493364664b Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Fri, 10 Feb 2023 09:13:51 +0800 Subject: [PATCH 14/66] [GCU] protect loopback0 from deletion (#2638) What I did Refer to sonic-net/sonic-buildimage#11171, protect loopback0 from deletion How I did it Add patch checker to fail the validation when remove loopback0 How to verify it Unit test --- generic_config_updater/gu_common.py | 11 +++-- .../generic_config_updater/gu_common_test.py | 42 +++++++++++++++++-- 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index 743253ccaf..0d7a5281bb 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -149,9 +149,14 @@ def validate_field_operation(self, old_config, target_config): patch = jsonpatch.JsonPatch.from_diff(old_config, target_config) # illegal_operations_to_fields_map['remove'] yields a list of fields for which `remove` is an illegal operation - illegal_operations_to_fields_map = {'add':[], - 'replace': [], - 'remove': ['/PFC_WD/GLOBAL/POLL_INTERVAL', '/PFC_WD/GLOBAL']} + illegal_operations_to_fields_map = { + 'add':[], + 'replace': [], + 'remove': [ + '/PFC_WD/GLOBAL/POLL_INTERVAL', + '/PFC_WD/GLOBAL', + '/LOOPBACK_INTERFACE/Loopback0'] + } for operation, field_list in illegal_operations_to_fields_map.items(): for field in field_list: if any(op['op'] == operation and field == op['path'] for op in patch): diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index dc18323661..7fa471ee3b 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -69,18 +69,54 @@ def setUp(self): self.config_wrapper_mock = gu_common.ConfigWrapper() self.config_wrapper_mock.get_config_db_as_json=MagicMock(return_value=Files.CONFIG_DB_AS_JSON) - def test_validate_field_operation_legal(self): + def test_validate_field_operation_legal__pfcwd(self): old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} target_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "40"}}} config_wrapper = gu_common.ConfigWrapper() config_wrapper.validate_field_operation(old_config, target_config) - - def test_validate_field_operation_illegal(self): + + def test_validate_field_operation_legal__rm_loopback1(self): + old_config = { + "LOOPBACK_INTERFACE": { + "Loopback0": {}, + "Loopback0|10.1.0.32/32": {}, + "Loopback1": {}, + "Loopback1|10.1.0.33/32": {} + } + } + target_config = { + "LOOPBACK_INTERFACE": { + "Loopback0": {}, + "Loopback0|10.1.0.32/32": {} + } + } + config_wrapper = gu_common.ConfigWrapper() + config_wrapper.validate_field_operation(old_config, target_config) + + def test_validate_field_operation_illegal__pfcwd(self): old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": 60}}} target_config = {"PFC_WD": {"GLOBAL": {}}} config_wrapper = gu_common.ConfigWrapper() self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) + def test_validate_field_operation_illegal__rm_loopback0(self): + old_config = { + "LOOPBACK_INTERFACE": { + "Loopback0": {}, + "Loopback0|10.1.0.32/32": {}, + "Loopback1": {}, + "Loopback1|10.1.0.33/32": {} + } + } + target_config = { + "LOOPBACK_INTERFACE": { + "Loopback1": {}, + "Loopback1|10.1.0.33/32": {} + } + } + config_wrapper = gu_common.ConfigWrapper() + self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) + def test_ctor__default_values_set(self): config_wrapper = gu_common.ConfigWrapper() From 784a15ccb51e777a16200ba59a4eab8849f3e9fa Mon Sep 17 00:00:00 2001 From: Yaqiang Zhu Date: Fri, 10 Feb 2023 17:49:38 +0800 Subject: [PATCH 15/66] [vlan] Refresh dhcpv6_relay config while adding/deleting a vlan (#2660) What I did Currently, add/del a vlan doesn't change related dhcpv6_relay config, which is incorrect. How I did it 1. Add dhcp_relay table init entry while adding vlan 2. Delete dhcp_relay related config while deleting vlan 3. Add unitest How to verify it 1. By unitest 2. install whl and run cli Signed-off-by: Yaqiang Zhu --- config/vlan.py | 43 ++++++++++++++------- tests/conftest.py | 11 ++++++ tests/mclag_test.py | 4 +- tests/vlan_test.py | 60 ++++++++++++++++++++++++++--- utilities_common/cli.py | 4 +- utilities_common/dhcp_relay_util.py | 20 ++++++++++ 6 files changed, 118 insertions(+), 24 deletions(-) create mode 100644 utilities_common/dhcp_relay_util.py diff --git a/config/vlan.py b/config/vlan.py index 7587e024a4..feb4fd2259 100644 --- a/config/vlan.py +++ b/config/vlan.py @@ -1,5 +1,6 @@ import click import utilities_common.cli as clicommon +import utilities_common.dhcp_relay_util as dhcp_relay_util from jsonpatch import JsonPatchConflict from time import sleep @@ -16,6 +17,11 @@ def vlan(): """VLAN-related configuration tasks""" pass + +def set_dhcp_relay_table(table, config_db, vlan_name, value): + config_db.set_entry(table, vlan_name, value) + + @vlan.command('add') @click.argument('vid', metavar='', required=True, type=int) @clicommon.pass_db @@ -24,7 +30,7 @@ def add_vlan(db, vid): ctx = click.get_current_context() vlan = 'Vlan{}'.format(vid) - + config_db = ValidatedConfigDBConnector(db.cfgdb) if ADHOC_VALIDATION: if not clicommon.is_vlanid_in_range(vid): @@ -32,14 +38,19 @@ def add_vlan(db, vid): if vid == 1: ctx.fail("{} is default VLAN".format(vlan)) # TODO: MISSING CONSTRAINT IN YANG MODEL - + if clicommon.check_if_vlanid_exist(db.cfgdb, vlan): # TODO: MISSING CONSTRAINT IN YANG MODEL ctx.fail("{} already exists".format(vlan)) - - try: - config_db.set_entry('VLAN', vlan, {'vlanid': str(vid)}) - except ValueError: - ctx.fail("Invalid VLAN ID {} (1-4094)".format(vid)) + if clicommon.check_if_vlanid_exist(db.cfgdb, vlan, "DHCP_RELAY"): + ctx.fail("DHCPv6 relay config for {} already exists".format(vlan)) + # set dhcpv4_relay table + set_dhcp_relay_table('VLAN', config_db, vlan, {'vlanid': str(vid)}) + + # set dhcpv6_relay table + set_dhcp_relay_table('DHCP_RELAY', config_db, vlan, {'vlanid': str(vid)}) + # We need to restart dhcp_relay service after dhcpv6_relay config change + dhcp_relay_util.handle_restart_dhcp_relay_service() + @vlan.command('del') @click.argument('vid', metavar='', required=True, type=int) @@ -67,19 +78,23 @@ def del_vlan(db, vid): ctx.fail("{} can not be removed. First remove IP addresses assigned to this VLAN".format(vlan)) keys = [ (k, v) for k, v in db.cfgdb.get_table('VLAN_MEMBER') if k == 'Vlan{}'.format(vid) ] - + if keys: # TODO: MISSING CONSTRAINT IN YANG MODEL ctx.fail("VLAN ID {} can not be removed. First remove all members assigned to this VLAN.".format(vid)) - + vxlan_table = db.cfgdb.get_table('VXLAN_TUNNEL_MAP') for vxmap_key, vxmap_data in vxlan_table.items(): if vxmap_data['vlan'] == 'Vlan{}'.format(vid): ctx.fail("vlan: {} can not be removed. First remove vxlan mapping '{}' assigned to VLAN".format(vid, '|'.join(vxmap_key)) ) - - try: - config_db.set_entry('VLAN', 'Vlan{}'.format(vid), None) - except JsonPatchConflict: - ctx.fail("{} does not exist".format(vlan)) + + # set dhcpv4_relay table + set_dhcp_relay_table('VLAN', config_db, vlan, None) + + # set dhcpv6_relay table + set_dhcp_relay_table('DHCP_RELAY', config_db, vlan, None) + # We need to restart dhcp_relay service after dhcpv6_relay config change + dhcp_relay_util.handle_restart_dhcp_relay_service() + def restart_ndppd(): verify_swss_running_cmd = "docker container inspect -f '{{.State.Status}}' swss" diff --git a/tests/conftest.py b/tests/conftest.py index bf4c2a401f..b6b454ba09 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -20,6 +20,7 @@ ) from . import config_int_ip_common import utilities_common.constants as constants +import config.main as config test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -356,3 +357,13 @@ def setup_fib_commands(): import show.main as show return show + +@pytest.fixture(scope='function') +def mock_restart_dhcp_relay_service(): + print("We are mocking restart dhcp_relay") + origin_func = config.vlan.dhcp_relay_util.handle_restart_dhcp_relay_service + config.vlan.dhcp_relay_util.handle_restart_dhcp_relay_service = mock.MagicMock(return_value=0) + + yield + + config.vlan.dhcp_relay_util.handle_restart_dhcp_relay_service = origin_func diff --git a/tests/mclag_test.py b/tests/mclag_test.py index d68c25a82e..a653174000 100644 --- a/tests/mclag_test.py +++ b/tests/mclag_test.py @@ -448,7 +448,7 @@ def test_mclag_add_member(self): - def test_mclag_add_unique_ip(self): + def test_mclag_add_unique_ip(self, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() obj = {'db':db.cfgdb} @@ -483,7 +483,7 @@ def test_mclag_add_unique_ip(self): keys = db.cfgdb.get_keys('MCLAG_UNIQUE_IP') assert MCLAG_UNIQUE_IP_VLAN not in keys, "unique ip not conifgured" - def test_mclag_add_unique_ip_non_default_vrf(self): + def test_mclag_add_unique_ip_non_default_vrf(self, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() obj = {'db':db.cfgdb} diff --git a/tests/vlan_test.py b/tests/vlan_test.py index 66ec3606cf..85673c5020 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -1,5 +1,6 @@ import os import traceback +import pytest from unittest import mock from click.testing import CliRunner @@ -10,6 +11,18 @@ from importlib import reload import utilities_common.bgp_util as bgp_util +IP_VERSION_PARAMS_MAP = { + "ipv4": { + "table": "VLAN" + }, + "ipv6": { + "table": "DHCP_RELAY" + } +} +DHCP_RELAY_TABLE_ENTRY = { + "vlanid": "1001" +} + show_vlan_brief_output="""\ +-----------+-----------------+-----------------+----------------+-------------+ | VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | @@ -143,6 +156,8 @@ | 4000 | | PortChannel1001 | tagged | disabled | +-----------+-----------------+-----------------+----------------+-------------+ """ + + class TestVlan(object): _old_run_bgp_command = None @classmethod @@ -319,7 +334,7 @@ def test_config_vlan_add_rif_portchannel_member(self): assert result.exit_code != 0 assert "Error: PortChannel0001 is a router interface!" in result.output - def test_config_vlan_with_vxlanmap_del_vlan(self): + def test_config_vlan_with_vxlanmap_del_vlan(self, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() obj = {'config_db': db.cfgdb} @@ -343,7 +358,7 @@ def test_config_vlan_with_vxlanmap_del_vlan(self): assert result.exit_code != 0 assert "Error: vlan: 1027 can not be removed. First remove vxlan mapping" in result.output - def test_config_vlan_del_vlan(self): + def test_config_vlan_del_vlan(self, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() obj = {'config_db':db.cfgdb} @@ -401,7 +416,7 @@ def test_config_vlan_del_nonexist_vlan_member(self): assert result.exit_code != 0 assert "Error: Ethernet0 is not a member of Vlan1000" in result.output - def test_config_add_del_vlan_and_vlan_member(self): + def test_config_add_del_vlan_and_vlan_member(self, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() @@ -444,7 +459,7 @@ def test_config_add_del_vlan_and_vlan_member(self): assert result.exit_code == 0 assert result.output == show_vlan_brief_output - def test_config_add_del_vlan_and_vlan_member_in_alias_mode(self): + def test_config_add_del_vlan_and_vlan_member_in_alias_mode(self, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() @@ -521,7 +536,7 @@ def test_config_vlan_proxy_arp_with_nonexist_vlan_intf(self): assert result.exit_code != 0 assert "Interface Vlan1001 does not exist" in result.output - def test_config_vlan_proxy_arp_enable(self): + def test_config_vlan_proxy_arp_enable(self, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() @@ -533,7 +548,7 @@ def test_config_vlan_proxy_arp_enable(self): assert result.exit_code == 0 assert db.cfgdb.get_entry("VLAN_INTERFACE", "Vlan1000") == {"proxy_arp": "enabled"} - def test_config_vlan_proxy_arp_disable(self): + def test_config_vlan_proxy_arp_disable(self, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() @@ -584,6 +599,39 @@ def test_config_vlan_add_member_of_portchannel(self): assert result.exit_code != 0 assert "Error: Ethernet32 is part of portchannel!" in result.output + @pytest.mark.parametrize("ip_version", ["ipv4", "ipv6"]) + def test_config_add_del_vlan_dhcp_relay(self, ip_version, mock_restart_dhcp_relay_service): + runner = CliRunner() + db = Db() + + # add vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + assert db.cfgdb.get_entry(IP_VERSION_PARAMS_MAP[ip_version]["table"], "Vlan1001") == DHCP_RELAY_TABLE_ENTRY + + # del vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + + assert "Vlan1001" not in db.cfgdb.get_keys(IP_VERSION_PARAMS_MAP[ip_version]["table"]) + + @pytest.mark.parametrize("ip_version", ["ipv6"]) + def test_config_add_exist_vlan_dhcp_relay(self, ip_version): + runner = CliRunner() + db = Db() + + db.cfgdb.set_entry("DHCP_RELAY", "Vlan1001", {"vlanid": "1001"}) + # add vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "DHCPv6 relay config for Vlan1001 already exists" in result.output + @classmethod def teardown_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "0" diff --git a/utilities_common/cli.py b/utilities_common/cli.py index ca9e061078..45b2cc5f3f 100644 --- a/utilities_common/cli.py +++ b/utilities_common/cli.py @@ -251,10 +251,10 @@ def is_vlanid_in_range(vid): return False -def check_if_vlanid_exist(config_db, vlan): +def check_if_vlanid_exist(config_db, vlan, table_name='VLAN'): """Check if vlan id exits in the config db or ot""" - if len(config_db.get_entry('VLAN', vlan)) != 0: + if len(config_db.get_entry(table_name, vlan)) != 0: return True return False diff --git a/utilities_common/dhcp_relay_util.py b/utilities_common/dhcp_relay_util.py new file mode 100644 index 0000000000..b9c0b4e20f --- /dev/null +++ b/utilities_common/dhcp_relay_util.py @@ -0,0 +1,20 @@ +import click +import utilities_common.cli as clicommon + + +def restart_dhcp_relay_service(): + """ + Restart dhcp_relay service + """ + click.echo("Restarting DHCP relay service...") + clicommon.run_command("systemctl stop dhcp_relay", display_cmd=False) + clicommon.run_command("systemctl reset-failed dhcp_relay", display_cmd=False) + clicommon.run_command("systemctl start dhcp_relay", display_cmd=False) + + +def handle_restart_dhcp_relay_service(): + try: + restart_dhcp_relay_service() + except SystemExit as e: + ctx = click.get_current_context() + ctx.fail("Restart service dhcp_relay failed with error {}".format(e)) From ee6d213f768e035340949a7f1b70c564c970dacb Mon Sep 17 00:00:00 2001 From: Vadym Hlushko <62022266+vadymhlushko-mlnx@users.noreply.github.com> Date: Mon, 13 Feb 2023 13:03:12 +0200 Subject: [PATCH 16/66] [generate_dump] Revert "Revert generate_dump optimization PR's #2599", add fixes for empty /dump forder and symbolic links (#2645) - What I did 0ee19e5 Revert Revert the show-techsupport optimization PR's #2599 c8940ad Add a fix for the empty /dump folder inside the final tar archive generated by the show techsupport CLI command. 8a8668c Add a fix to not follow the symbolic links to avoid duplicate files inside the final tar archive generated by the show techsupport CLI command. - How I did it Modify the scripts/generate_dump script. - How to verify it 1. Manual verification do the show techsupport CLI command and save output original.tar.gz (with original generate_dump script) do the show techsupport CLI command and save output fixes.tar.gz (with the generate_dump script modified by this PR) unpack both archives original.tar.gz and fixes.tar.gz compare both directories with ncdu & diff --brief --recursive original fixes Linux utilities 2. Run the community tests sonic-mgmt/tests/show_techsupport Signed-off-by: vadymhlushko-mlnx --- scripts/generate_dump | 277 ++++++++++++++++++++++-------------------- 1 file changed, 145 insertions(+), 132 deletions(-) diff --git a/scripts/generate_dump b/scripts/generate_dump index 4400f4e984..2a7172f4c7 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -106,7 +106,6 @@ save_bcmcmd() { local filename=$2 local filepath="${LOGDIR}/$filename" local do_gzip=${3:-false} - local tarpath="${BASE}/dump/$filename" local timeout_cmd="timeout --foreground ${TIMEOUT_MIN}m" local cmd=$(escape_quotes "$cmd") if [ ! -d $LOGDIR ]; then @@ -141,12 +140,9 @@ save_bcmcmd() { fi if $do_gzip; then gzip ${filepath} 2>/dev/null - tarpath="${tarpath}.gz" filepath="${filepath}.gz" fi - ($TAR $V -rhf $TARFILE -C $DUMPDIR "$tarpath" \ - || abort "${EXT_TAR_FAILED}" "tar append operation failed. Aborting to prevent data loss.") \ - && $RM $V -rf "$filepath" + end_t=$(date +%s%3N) echo "[ save_bcmcmd:$cmd ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO } @@ -180,7 +176,7 @@ save_bcmcmd_all_ns() { } ############################################################################### -# Runs a comamnd and saves its output to the incrementally built tar. +# Runs a comamnd and saves its output to the file. # Command gets timedout if it runs for more than TIMEOUT_MIN minutes. # Globals: # LOGDIR @@ -208,7 +204,6 @@ save_cmd() { local filename=$2 local filepath="${LOGDIR}/$filename" local do_gzip=${3:-false} - local tarpath="${BASE}/dump/$filename" local timeout_cmd="timeout --foreground ${TIMEOUT_MIN}m" local cleanup_method=${4:-dummy_cleanup_method} local redirect='&>' @@ -230,7 +225,6 @@ save_cmd() { # as one argument, e.g. vtysh -c "COMMAND HERE" needs to have # "COMMAND HERE" bunched together as 1 arg to vtysh -c if $do_gzip; then - tarpath="${tarpath}.gz" filepath="${filepath}.gz" # cleanup_method will run in a sub-shell, need declare it first local cmds="$cleanup_method_declration; $cmd $redirect_eval | $cleanup_method | gzip -c > '${filepath}'" @@ -260,13 +254,34 @@ save_cmd() { fi fi - ($TAR $V -rhf $TARFILE -C $DUMPDIR "$tarpath" \ - || abort "${EXT_TAR_FAILED}" "tar append operation failed. Aborting to prevent data loss.") \ - && $RM $V -rf "$filepath" end_t=$(date +%s%3N) echo "[ save_cmd:$cmd ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO } +############################################################################### +# Save all collected data to tar archive. +# Globals: +# DUMPDIR +# TAR +# TARFILE +# V +# BASE +# Arguments: +# None +# Returns: +# None +############################################################################### +save_to_tar() { + trap 'handle_error $? $LINENO' ERR + local start_t=$(date +%s%3N) + local end_t=0 + + $TAR $V -rhf $TARFILE -C $DUMPDIR "$BASE" + + end_t=$(date +%s%3N) + echo "[ save_to_tar ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO +} + ############################################################################### # Dummy cleanup method. # Globals: @@ -407,7 +422,7 @@ get_vtysh_namespace() { ############################################################################### # Runs a vtysh command in all namesapces for a multi ASIC platform, and in # default (host) namespace in single ASIC platforms. Saves its output to the -# incrementally built tar. +# file. # Globals: # None # Arguments: @@ -437,7 +452,7 @@ save_vtysh() { } ############################################################################### -# Runs an ip command and saves its output to the incrementally built tar. +# Runs an ip command and saves its output to the file. # Globals: # None # Arguments: @@ -456,7 +471,7 @@ save_ip() { } ############################################################################### -# Runs a bridge command and saves its output to the incrementally built tar. +# Runs a bridge command and saves its output to the file. # Globals: # None # Arguments: @@ -771,8 +786,8 @@ save_proc() { ( [ -e $f ] && $CP $V -r $f $TARDIR/proc ) || echo "$f not found" > $TARDIR/$f fi done - $TAR $V -rhf $TARFILE -C $DUMPDIR --mode=+rw $BASE/proc - $RM $V -rf $TARDIR/proc + + chmod ugo+rw -R $DUMPDIR/$BASE/proc } ############################################################################### @@ -823,9 +838,7 @@ save_proc_stats() { ( $CP $V -r $stats_file $TARDIR/proc_stats ) || echo "$stats_file error" > $TARDIR/$stats_file fi - $TAR $V -rhf $TARFILE -C $DUMPDIR --mode=+rw $BASE/proc_stats - $RM $V -rf $TARDIR/proc_stats - $RM -rf $stats_file + chmod ugo+rw -R $DUMPDIR/$BASE/proc_stats } ############################################################################### @@ -907,6 +920,7 @@ save_platform_info() { # filename: the full path of the file to save # base_dir: the directory in $TARDIR/ to stage the file # do_gzip: (OPTIONAL) true or false. Should the output be gzipped +# do_tar_append: (OPTIONAL) true or false. Should the output be added to final tar archive # Returns: # None ############################################################################### @@ -919,7 +933,7 @@ save_file() { local gz_path="$TARDIR/$supp_dir/$(basename $orig_path)" local tar_path="${BASE}/$supp_dir/$(basename $orig_path)" local do_gzip=${3:-true} - local do_tar_append=${4:-true} + local do_tar_append=${4:-false} if [ ! -d "$TARDIR/$supp_dir" ]; then $MKDIR $V -p "$TARDIR/$supp_dir" fi @@ -945,6 +959,7 @@ save_file() { || abort "${EXT_PROCFS_SAVE_FAILED}" "tar append operation failed. Aborting to prevent data loss.") \ && $RM $V -f "$gz_path" fi + end_t=$(date +%s%3N) echo "[ save_file:$orig_path] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO } @@ -1134,9 +1149,9 @@ collect_mellanox_dfw_dumps() { ${CMD_PREFIX}save_symlink ${file} sai_sdk_dump log else if [ ! -z "${file##*.gz}" ]; then - ${CMD_PREFIX}save_file ${file} sai_sdk_dump true + ${CMD_PREFIX}save_file ${file} sai_sdk_dump true true else - ${CMD_PREFIX}save_file ${file} sai_sdk_dump false + ${CMD_PREFIX}save_file ${file} sai_sdk_dump false true fi fi done @@ -1296,7 +1311,7 @@ collect_barefoot() { done for file in $(find /tmp/bf_logs -type f); do - save_file "${file}" log true true + save_file "${file}" log true done } @@ -1352,16 +1367,12 @@ save_log_files() { # don't gzip already-gzipped log files :) # do not append the individual files to the main tarball if [ -z "${file##*.gz}" ]; then - save_file $file log false false + save_file $file log false else - save_file $file log true false + save_file $file log true fi done - # Append the log folder to the main tarball - ($TAR $V -rhf $TARFILE -C $DUMPDIR ${BASE}/log \ - || abort "${EXT_TAR_FAILED}" "tar append operation failed. Aborting for safety") \ - && $RM $V -rf $TARDIR/log end_t=$(date +%s%3N) echo "[ TAR /var/log Files ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO @@ -1386,11 +1397,7 @@ save_warmboot_files() { else mkdir -p $TARDIR $CP $V -rf /host/warmboot $TARDIR - - ($TAR $V --warning=no-file-removed -rhf $TARFILE -C $DUMPDIR --mode=+rw \ - $BASE/warmboot \ - || abort "${EXT_TAR_FAILED}" "Tar append operation failed. Aborting for safety.") \ - && $RM $V -rf $TARDIR + chmod ugo+rw -R $DUMPDIR/$BASE/warmboot fi end_t=$(date +%s%3N) echo "[ Warm-boot Files ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO @@ -1456,9 +1463,9 @@ save_sai_failure_dump(){ ${CMD_PREFIX}save_symlink ${file} sai_failure_dump log else if [ ! -z "${file##*.gz}" ]; then - ${CMD_PREFIX}save_file ${file} sai_failure_dump true + ${CMD_PREFIX}save_file ${file} sai_failure_dump true true else - ${CMD_PREFIX}save_file ${file} sai_failure_dump false + ${CMD_PREFIX}save_file ${file} sai_failure_dump false true fi fi #Clean up the file once its part of tech support @@ -1584,102 +1591,120 @@ main() { /proc/pagetypeinfo /proc/partitions /proc/sched_debug /proc/slabinfo \ /proc/softirqs /proc/stat /proc/swaps /proc/sysvipc /proc/timer_list \ /proc/uptime /proc/version /proc/vmallocinfo /proc/vmstat \ - /proc/zoneinfo \ - || abort "${EXT_PROCFS_SAVE_FAILED}" "Proc saving operation failed. Aborting for safety." - save_proc_stats + /proc/zoneinfo & + save_proc_stats & end_t=$(date +%s%3N) echo "[ Capture Proc State ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO + wait # Save all the processes within each docker - save_cmd "show services" services.summary + save_cmd "show services" services.summary & # Save reboot cause information - save_cmd "show reboot-cause" reboot.cause + save_cmd "show reboot-cause" reboot.cause & + wait local asic="$(/usr/local/bin/sonic-cfggen -y /etc/sonic/sonic_version.yml -v asic_type)" local device_type=`sonic-db-cli CONFIG_DB hget 'DEVICE_METADATA|localhost' type` # 1st counter snapshot early. Need 2 snapshots to make sense of counters trend. save_counter_snapshot $asic 1 - save_cmd "systemd-analyze blame" "systemd.analyze.blame" - save_cmd "systemd-analyze dump" "systemd.analyze.dump" - save_cmd "systemd-analyze plot" "systemd.analyze.plot.svg" - - save_platform_info - - save_cmd "show vlan brief" "vlan.summary" - save_cmd "show version" "version" - save_cmd "show platform summary" "platform.summary" - save_cmd "cat /host/machine.conf" "machine.conf" - save_cmd "cat /boot/config-$(uname -r)" "boot.conf" - save_cmd "docker stats --no-stream" "docker.stats" - - save_cmd "sensors" "sensors" - save_cmd "lspci -vvv -xx" "lspci" - save_cmd "lsusb -v" "lsusb" - save_cmd "sysctl -a" "sysctl" - - save_ip_info - save_bridge_info - - save_frr_info - save_bgp_info - save_evpn_info - - save_cmd "show interface status -d all" "interface.status" - save_cmd "show interface transceiver presence" "interface.xcvrs.presence" - save_cmd "show interface transceiver eeprom --dom" "interface.xcvrs.eeprom" - save_cmd "show ip interface -d all" "ip.interface" - - save_cmd "lldpctl" "lldpctl" + save_cmd "systemd-analyze blame" "systemd.analyze.blame" & + save_cmd "systemd-analyze dump" "systemd.analyze.dump" & + save_cmd "systemd-analyze plot" "systemd.analyze.plot.svg" & + wait + + save_platform_info & + save_cmd "show vlan brief" "vlan.summary" & + save_cmd "show version" "version" & + save_cmd "show platform summary" "platform.summary" & + wait + + save_cmd "cat /host/machine.conf" "machine.conf" & + save_cmd "cat /boot/config-$(uname -r)" "boot.conf" & + save_cmd "docker stats --no-stream" "docker.stats" & + wait + + save_cmd "sensors" "sensors" & + save_cmd "lspci -vvv -xx" "lspci" & + save_cmd "lsusb -v" "lsusb" & + save_cmd "sysctl -a" "sysctl" & + wait + + save_ip_info & + save_bridge_info & + wait + + save_frr_info & + + save_bgp_info & + save_evpn_info & + wait + + save_cmd "show interface status -d all" "interface.status" & + save_cmd "show interface transceiver presence" "interface.xcvrs.presence" & + save_cmd "show interface transceiver eeprom --dom" "interface.xcvrs.eeprom" & + save_cmd "show ip interface -d all" "ip.interface" & + wait + + save_cmd "lldpctl" "lldpctl" & if [[ ( "$NUM_ASICS" > 1 ) ]]; then for (( i=0; i<$NUM_ASICS; i++ )) do - save_cmd "docker exec lldp$i lldpcli show statistics" "lldp$i.statistics" - save_cmd "docker logs bgp$i" "docker.bgp$i.log" - save_cmd "docker logs swss$i" "docker.swss$i.log" + save_cmd "docker exec lldp$i lldpcli show statistics" "lldp$i.statistics" & + save_cmd "docker logs bgp$i" "docker.bgp$i.log" & + save_cmd "docker logs swss$i" "docker.swss$i.log" & done else - save_cmd "docker exec lldp lldpcli show statistics" "lldp.statistics" - save_cmd "docker logs bgp" "docker.bgp.log" - save_cmd "docker logs swss" "docker.swss.log" + save_cmd "docker exec lldp lldpcli show statistics" "lldp.statistics" & + save_cmd "docker logs bgp" "docker.bgp.log" & + save_cmd "docker logs swss" "docker.swss.log" & fi - - save_cmd "ps aux" "ps.aux" - save_cmd "top -b -n 1" "top" - save_cmd "free" "free" - save_cmd "vmstat 1 5" "vmstat" - save_cmd "vmstat -m" "vmstat.m" - save_cmd "vmstat -s" "vmstat.s" - save_cmd "mount" "mount" - save_cmd "df" "df" - save_cmd "dmesg" "dmesg" - - save_nat_info - save_bfd_info - save_redis_info + wait + + save_cmd "ps aux" "ps.aux" & + save_cmd "top -b -n 1" "top" & + save_cmd "free" "free" & + wait + save_cmd "vmstat 1 5" "vmstat" & + save_cmd "vmstat -m" "vmstat.m" & + save_cmd "vmstat -s" "vmstat.s" & + wait + save_cmd "mount" "mount" & + save_cmd "df" "df" & + save_cmd "dmesg" "dmesg" & + wait + + save_nat_info & + save_bfd_info & + wait + save_redis_info & if $DEBUG_DUMP then - save_dump_state_all_ns + save_dump_state_all_ns & fi + wait - save_cmd "docker ps -a" "docker.ps" - save_cmd "docker top pmon" "docker.pmon" + save_cmd "docker ps -a" "docker.ps" & + save_cmd "docker top pmon" "docker.pmon" & if [[ -d ${PLUGINS_DIR} ]]; then local -r dump_plugins="$(find ${PLUGINS_DIR} -type f -executable)" for plugin in $dump_plugins; do # save stdout output of plugin and gzip it - save_cmd "$plugin" "$(basename $plugin)" true + save_cmd "$plugin" "$(basename $plugin)" true & done fi + wait - save_cmd "dpkg -l" "dpkg" - save_cmd "who -a" "who" - save_cmd "swapon -s" "swapon" - save_cmd "hdparm -i /dev/sda" "hdparm" - save_cmd "ps -AwwL -o user,pid,lwp,ppid,nlwp,pcpu,pri,nice,vsize,rss,tty,stat,wchan:12,start,bsdtime,command" "ps.extended" + save_cmd "dpkg -l" "dpkg" & + save_cmd "who -a" "who" & + save_cmd "swapon -s" "swapon" & + wait + save_cmd "hdparm -i /dev/sda" "hdparm" & + save_cmd "ps -AwwL -o user,pid,lwp,ppid,nlwp,pcpu,pri,nice,vsize,rss,tty,stat,wchan:12,start,bsdtime,command" "ps.extended" & + wait if [[ "$device_type" != "SpineRouter" ]]; then save_saidump @@ -1704,9 +1729,6 @@ main() { # 2nd counter snapshot late. Need 2 snapshots to make sense of counters trend. save_counter_snapshot $asic 2 - $RM $V -rf $TARDIR - $MKDIR $V -p $TARDIR - $MKDIR $V -p $LOGDIR # Copying the /etc files to a directory and then tar it $CP -r /etc $TARDIR/etc rm_list=$(find -L $TARDIR/etc -maxdepth 5 -type l) @@ -1718,34 +1740,25 @@ main() { # Remove secret from /etc files before tar remove_secret_from_etc_files $TARDIR - start_t=$(date +%s%3N) - ($TAR $V --warning=no-file-removed -rhf $TARFILE -C $DUMPDIR --mode=+rw \ - --exclude="etc/alternatives" \ - --exclude="*/etc/passwd*" \ - --exclude="*/etc/shadow*" \ - --exclude="*/etc/group*" \ - --exclude="*/etc/gshadow*" \ - --exclude="*/etc/ssh*" \ - --exclude="*get_creds*" \ - --exclude="*snmpd.conf*" \ - --exclude="*/etc/mlnx" \ - --exclude="*/etc/mft" \ - --exclude="*/etc/sonic/*.cer" \ - --exclude="*/etc/sonic/*.crt" \ - --exclude="*/etc/sonic/*.pem" \ - --exclude="*/etc/sonic/*.key" \ - --exclude="*/etc/ssl/*.pem" \ - --exclude="*/etc/ssl/certs/*" \ - --exclude="*/etc/ssl/private/*" \ - $BASE/etc \ - || abort "${EXT_TAR_FAILED}" "Tar append operation failed. Aborting for safety.") \ - && $RM $V -rf $TARDIR - end_t=$(date +%s%3N) - echo "[ TAR /etc Files ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO + # Remove unecessary files + $RM $V -rf $TARDIR/etc/alternatives $TARDIR/etc/passwd* \ + $TARDIR/etc/shadow* $TARDIR/etc/group* $TARDIR/etc/gshadow* \ + $TARDIR/etc/ssh* $TARDIR/etc/mlnx $TARDIR/etc/mft \ + $TARDIR/etc/ssl/certs/* $TARDIR/etc/ssl/private/* + rm_list=$(find -L $TARDIR -type f \( -iname \*.cer -o -iname \*.crt -o \ + -iname \*.pem -o -iname \*.key -o -iname \*snmpd.conf\* -o -iname \*get_creds\* \)) + if [ ! -z "$rm_list" ] + then + rm $rm_list + fi + + save_log_files & + save_crash_files & + save_warmboot_files & + wait + + save_to_tar - save_log_files - save_crash_files - save_warmboot_files save_sai_failure_dump if [[ "$asic" = "mellanox" ]]; then @@ -1760,7 +1773,7 @@ main() { ############################################################################### finalize() { # Save techsupport timing profile info - save_file $TECHSUPPORT_TIME_INFO log false + save_file $TECHSUPPORT_TIME_INFO log false true if $DO_COMPRESS; then RC=0 From 2a6a06cfc94853f0a74fa59c506535340649f9c8 Mon Sep 17 00:00:00 2001 From: wenyiz2021 <91497961+wenyiz2021@users.noreply.github.com> Date: Mon, 13 Feb 2023 11:04:58 -0800 Subject: [PATCH 17/66] [portstat CLI] don't print reminder if use json format (#2670) * no print if use json format * add print for chassis --- scripts/portstat | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/scripts/portstat b/scripts/portstat index 0e3b9c438c..27696729e9 100755 --- a/scripts/portstat +++ b/scripts/portstat @@ -17,6 +17,7 @@ from collections import OrderedDict, namedtuple from natsort import natsorted from tabulate import tabulate from sonic_py_common import multi_asic +from sonic_py_common import device_info # mock the redis for unit test purposes # try: @@ -337,8 +338,8 @@ class Portstat(object): print(table_as_json(table, header)) else: print(tabulate(table, header, tablefmt='simple', stralign='right')) - if multi_asic.is_multi_asic(): - print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") + if multi_asic.is_multi_asic() or device_info.is_chassis(): + print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") def cnstat_intf_diff_print(self, cnstat_new_dict, cnstat_old_dict, intf_list): """ @@ -555,8 +556,8 @@ class Portstat(object): print(table_as_json(table, header)) else: print(tabulate(table, header, tablefmt='simple', stralign='right')) - if multi_asic.is_multi_asic(): - print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") + if multi_asic.is_multi_asic() or device_info.is_chassis(): + print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") def main(): parser = argparse.ArgumentParser(description='Display the ports state and counters', From 556d0c6885bd9bca5d8c78c96b2daab8c83a66cc Mon Sep 17 00:00:00 2001 From: Yaqiang Zhu Date: Tue, 14 Feb 2023 09:18:37 +0800 Subject: [PATCH 18/66] [doc] Add docs for dhcp_relay show/clear cli (#2649) What I did Add docs for dhcp_realy show/clear cli How I did it Add docs for dhcp_realy show/clear cli Signed-off-by: Yaqiang Zhu --- doc/Command-Reference.md | 93 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 7ba12dfb8b..dbc7966c8d 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -40,6 +40,8 @@ * [Console connect commands](#console-connect-commands) * [Console clear commands](#console-clear-commands) * [DHCP Relay](#dhcp-relay) + * [DHCP Relay show commands](#dhcp-relay-show-commands) + * [DHCP Relay clear commands](#dhcp-relay-clear-commands) * [DHCP Relay config commands](#dhcp-relay-config-commands) * [Drop Counters](#drop-counters) * [Drop Counter show commands](#drop-counters-show-commands) @@ -2407,6 +2409,97 @@ Go Back To [Beginning of the document](#) or [Beginning of this section](#consol ## DHCP Relay +### DHCP Relay show commands + +This sub-section of commands is used to show the DHCP Relay IP address(es) in a VLAN interface and show dhcpv6_relay counter of a VLAN. + +**show dhcp_relay ipv4 helper** + +This command is used to show ipv4 dhcp_relay helper. + +- Usage: + ``` + show dhcp_relay ipv4 helper + ``` + +- Example: + ``` + admin@sonic:~$ show dhcp_relay ipv4 helper + -------- --------- + Vlan1000 192.0.0.1 + 192.0.0.2 + -------- --------- + ``` + +**show dhcp_relay ipv6 destination** + +This command is used to show ipv6 dhcp_relay destination. + +- Usage: + ``` + show dhcp_relay ipv6 destination + ``` + +- Example: + ``` + admin@sonic:~$ show dhcp_relay ipv6 destination + --------  ------------ + Vlan1000  fc02:2000::1 +           fc02:2000::2 +           fc02:2000::3 +           fc02:2000::4 + --------  ------------ + ``` + +**show dhcp_relay ipv6 counters** + +This command is used to show ipv6 dhcp_relay counters. + +- Usage: + ``` + show dhcp_relay ipv6 counters + ``` + +- Example: + ``` + admin@sonic:~$ sudo sonic-clear dhcp_relay counters +      Message Type    Vlan1000 + -------------------  ---------- +             Unknown           0 +             Solicit           0 +           Advertise           0 +             Request           5 +             Confirm           0 +               Renew           0 +              Rebind           0 +               Reply           0 +             Release           0 +             Decline           0 +         Reconfigure           0 + Information-Request           0 +       Relay-Forward           0 +         Relay-Reply           0 +           Malformed           0 + ``` + +### DHCP Relay clear commands + +This sub-section of commands is used to clear the DHCP Relay counters. + +**sonic-clear dhcp_relay ipv6 counter** + +This command is used to clear ipv6 dhcp_relay counters. + +- Usage: + ``` + sonic-clear dhcp_relay ipv6 counter [-i ] + ``` + +- Example: + ``` + admin@sonic:~$ sudo sonic-clear dhcp_relay ipv6 counters + ``` + ### DHCP Relay config commands This sub-section of commands is used to add or remove the DHCP Relay Destination IP address(es) for a VLAN interface. From 36824e40c1592ba274faae6c2bfef38d58b250a7 Mon Sep 17 00:00:00 2001 From: davidpil2002 <91657985+davidpil2002@users.noreply.github.com> Date: Tue, 14 Feb 2023 11:38:53 +0200 Subject: [PATCH 19/66] Add support of secure warm-boot (#2532) - What I did Add support of secure warm-boot to SONiC. Basically, warm-boot is supporting to load a new kernel without doing full/cold boot. That is by loading a new kernel and exec with kexec Linux command. As a result of that, even when the Secure Boot feature is enabled, still a user or a malicious user can load an unsigned kernel, so to avoid that we added the support of the secure warm boot. More Description about this feature can be found in the Secure Boot HLD: sonic-net/SONiC#1028 - How I did it In general, Linux support it, so I enabled this support by doing the follow steps: I added some special flags in Linux Kernel when user build the sonic-buildimage with secure boot feature enabled. I added a flag "-s" to the kexec command Note: more details in the HLD above. - How to verify it * Good flow: manually just install with sonic-installed a new secure image (a SONiC image that was build with Secure Boot flag enabled) after the secure image is installed, do: warm-reboot Check now that the new kernel is really loaded and switched. * Bad flow: Do the same steps 1-2 as a good flow but with an insecure image (SONiC image that was built without setting Secure Boot enabled) After the insecure image is installed, and triggered warm-boot you should get an error that the new unsigned kernel from the unsecured image was not loaded. Automation test - TBD --- scripts/fast-reboot | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index bfdc191b78..604fddf9ec 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -442,9 +442,20 @@ function load_aboot_secureboot_kernel() { swipath=$next_image kexec=true loadonly=true ENV_EXTRA_CMDLINE="$BOOT_OPTIONS" bash - } +function invoke_kexec() { + /sbin/kexec -l "$KERNEL_IMAGE" --initrd="$INITRD" --append="$BOOT_OPTIONS" $@ +} + function load_kernel() { # Load kernel into the memory - /sbin/kexec -a -l "$KERNEL_IMAGE" --initrd="$INITRD" --append="$BOOT_OPTIONS" + invoke_kexec -a +} + +function load_kernel_secure() { + # Load kernel into the memory secure + # -s flag is for enforcing the new load kernel(vmlinuz) to be signed and verify. + # not using -a flag, this flag can fallback to an old kexec load that do not support Secure Boot verification + invoke_kexec -s } function unload_kernel() @@ -601,7 +612,13 @@ fi if is_secureboot && grep -q aboot_machine= /host/machine.conf; then load_aboot_secureboot_kernel else - load_kernel + # check if secure boot is enable in UEFI + SECURE_UPGRADE_ENABLED=$(bootctl status 2>/dev/null | grep -c "Secure Boot: enabled") + if [ ${SECURE_UPGRADE_ENABLED} -eq 1 ]; then + load_kernel_secure + else + load_kernel + fi fi init_warm_reboot_states From 33e85d37f6abd4e7707d4c2436c9014239ce8d06 Mon Sep 17 00:00:00 2001 From: Yaqiang Zhu Date: Thu, 16 Feb 2023 02:31:01 +0800 Subject: [PATCH 20/66] [dhcp_relay] Remove add field of vlanid to DHCP_RELAY table while add vlan (#2678) What I did Remove add field of vlanid to DHCP_RELAY table while add vlan which would cause conflict with yang model. How I did it Remove add field of vlanid to DHCP_RELAY table while add vlan How to verify it By unit tests Signed-off-by: Yaqiang Zhu --- config/vlan.py | 2 +- tests/vlan_test.py | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/config/vlan.py b/config/vlan.py index feb4fd2259..f1c6f06d1f 100644 --- a/config/vlan.py +++ b/config/vlan.py @@ -47,7 +47,7 @@ def add_vlan(db, vid): set_dhcp_relay_table('VLAN', config_db, vlan, {'vlanid': str(vid)}) # set dhcpv6_relay table - set_dhcp_relay_table('DHCP_RELAY', config_db, vlan, {'vlanid': str(vid)}) + set_dhcp_relay_table('DHCP_RELAY', config_db, vlan, None) # We need to restart dhcp_relay service after dhcpv6_relay config change dhcp_relay_util.handle_restart_dhcp_relay_service() diff --git a/tests/vlan_test.py b/tests/vlan_test.py index 85673c5020..f582d0e3ba 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -19,9 +19,6 @@ "table": "DHCP_RELAY" } } -DHCP_RELAY_TABLE_ENTRY = { - "vlanid": "1001" -} show_vlan_brief_output="""\ +-----------+-----------------+-----------------+----------------+-------------+ @@ -610,7 +607,8 @@ def test_config_add_del_vlan_dhcp_relay(self, ip_version, mock_restart_dhcp_rela print(result.output) assert result.exit_code == 0 - assert db.cfgdb.get_entry(IP_VERSION_PARAMS_MAP[ip_version]["table"], "Vlan1001") == DHCP_RELAY_TABLE_ENTRY + exp_output = {"vlanid": "1001"} if ip_version == "ipv4" else {} + assert db.cfgdb.get_entry(IP_VERSION_PARAMS_MAP[ip_version]["table"], "Vlan1001") == exp_output # del vlan 1001 result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001"], obj=db) From 54e26359fccf45d2e40800cf5598a725798634cd Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Fri, 24 Feb 2023 12:26:32 -0500 Subject: [PATCH 21/66] Replace pickle by json (#2636) Signed-off-by: maipbui #### What I did `pickle` can lead to lead to code execution vulnerabilities. Recommend to serializing the relevant data as JSON. #### How I did it Replace `pickle` by `json` #### How to verify it Pass UT Manual test --- scripts/dropstat | 14 +-- scripts/flow_counters_stat | 10 +- scripts/intfstat | 64 +++++----- scripts/pfcstat | 62 +++++----- scripts/pg-drop | 8 +- scripts/portstat | 238 ++++++++++++++++++------------------- scripts/queuestat | 34 +++--- scripts/tunnelstat | 40 +++---- 8 files changed, 235 insertions(+), 235 deletions(-) diff --git a/scripts/dropstat b/scripts/dropstat index f98fc29197..4e9f5bb4d0 100755 --- a/scripts/dropstat +++ b/scripts/dropstat @@ -11,7 +11,7 @@ # - Refactor calls to COUNTERS_DB to reduce redundancy # - Cache DB queries to reduce # of expensive queries -import _pickle as pickle +import json import argparse import os import socket @@ -117,10 +117,10 @@ class DropStat(object): """ try: - pickle.dump(self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP), - open(self.port_drop_stats_file, 'wb+')) - pickle.dump(self.get_counts(self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP), self.get_switch_id()), - open(self.switch_drop_stats_file, 'wb+')) + json.dump(self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP), + open(self.port_drop_stats_file, 'w+')) + json.dump(self.get_counts(self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP), self.get_switch_id()), + open(self.switch_drop_stats_file, 'w+')) except IOError as e: print(e) sys.exit(e.errno) @@ -135,7 +135,7 @@ class DropStat(object): # Grab the latest clear checkpoint, if it exists if os.path.isfile(self.port_drop_stats_file): - port_drop_ckpt = pickle.load(open(self.port_drop_stats_file, 'rb')) + port_drop_ckpt = json.load(open(self.port_drop_stats_file, 'r')) counters = self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP, group, counter_type) headers = std_port_description_header + self.gather_headers(counters, DEBUG_COUNTER_PORT_STAT_MAP) @@ -162,7 +162,7 @@ class DropStat(object): # Grab the latest clear checkpoint, if it exists if os.path.isfile(self.switch_drop_stats_file): - switch_drop_ckpt = pickle.load(open(self.switch_drop_stats_file, 'rb')) + switch_drop_ckpt = json.load(open(self.switch_drop_stats_file, 'r')) counters = self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP, group, counter_type) headers = std_switch_description_header + self.gather_headers(counters, DEBUG_COUNTER_SWITCH_STAT_MAP) diff --git a/scripts/flow_counters_stat b/scripts/flow_counters_stat index ac5ef94beb..49b97e335b 100755 --- a/scripts/flow_counters_stat +++ b/scripts/flow_counters_stat @@ -2,7 +2,7 @@ import argparse import os -import _pickle as pickle +import json import sys from natsort import natsorted @@ -185,8 +185,8 @@ class FlowCounterStats(object): if os.path.exists(self.data_file): os.remove(self.data_file) - with open(self.data_file, 'wb') as f: - pickle.dump(data, f) + with open(self.data_file, 'w') as f: + json.dump(data, f) except IOError as e: print('Failed to save statistic - {}'.format(repr(e))) @@ -200,8 +200,8 @@ class FlowCounterStats(object): return None try: - with open(self.data_file, 'rb') as f: - data = pickle.load(f) + with open(self.data_file, 'r') as f: + data = json.load(f) except IOError as e: print('Failed to load statistic - {}'.format(repr(e))) return None diff --git a/scripts/intfstat b/scripts/intfstat index 30cfbf084d..b4a770adeb 100755 --- a/scripts/intfstat +++ b/scripts/intfstat @@ -6,7 +6,7 @@ # ##################################################################### -import _pickle as pickle +import json import argparse import datetime import sys @@ -28,7 +28,7 @@ from collections import namedtuple, OrderedDict from natsort import natsorted from tabulate import tabulate from utilities_common.netstat import ns_diff, table_as_json, STATUS_NA, format_brate, format_prate -from utilities_common.cli import UserCache +from utilities_common.cli import json_serial, UserCache from swsscommon.swsscommon import SonicV2Connector nstat_fields = ( @@ -96,7 +96,7 @@ class Intfstat(object): counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name) if counter_data: fields[pos] = str(counter_data) - cntr = NStats._make(fields) + cntr = NStats._make(fields)._asdict() return cntr def get_rates(table_id): @@ -153,14 +153,14 @@ class Intfstat(object): rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) table.append((key, - data.rx_p_ok, + data['rx_p_ok'], format_brate(rates.rx_bps), format_prate(rates.rx_pps), - data.rx_p_err, - data.tx_p_ok, + data['rx_p_err'], + data['tx_p_ok'], format_brate(rates.tx_bps), format_prate(rates.tx_pps), - data.tx_p_err)) + data['tx_p_err'])) if use_json: print(table_as_json(table, header)) @@ -186,24 +186,24 @@ class Intfstat(object): if old_cntr is not None: table.append((key, - ns_diff(cntr.rx_p_ok, old_cntr.rx_p_ok), + ns_diff(cntr['rx_p_ok'], old_cntr['rx_p_ok']), format_brate(rates.rx_bps), format_prate(rates.rx_pps), - ns_diff(cntr.rx_p_err, old_cntr.rx_p_err), - ns_diff(cntr.tx_p_ok, old_cntr.tx_p_ok), + ns_diff(cntr['rx_p_err'], old_cntr['rx_p_err']), + ns_diff(cntr['tx_p_ok'], old_cntr['tx_p_ok']), format_brate(rates.tx_bps), format_prate(rates.tx_pps), - ns_diff(cntr.tx_p_err, old_cntr.tx_p_err))) + ns_diff(cntr['tx_p_err'], old_cntr['tx_p_err']))) else: table.append((key, - cntr.rx_p_ok, + cntr['rx_p_ok'], format_brate(rates.rx_bps), format_prate(rates.rx_pps), - cntr.rx_p_err, - cntr.tx_p_ok, + cntr['rx_p_err'], + cntr['tx_p_ok'], format_brate(rates.tx_bps), format_prate(rates.tx_pps), - cntr.tx_p_err)) + cntr['tx_p_err'])) if use_json: print(table_as_json(table, header)) @@ -229,17 +229,17 @@ class Intfstat(object): if cnstat_old_dict and cnstat_old_dict.get(rif): old_cntr = cnstat_old_dict.get(rif) - body = body % (ns_diff(cntr.rx_p_ok, old_cntr.rx_p_ok), - ns_diff(cntr.rx_b_ok, old_cntr.rx_b_ok), - ns_diff(cntr.rx_p_err, old_cntr.rx_p_err), - ns_diff(cntr.rx_b_err, old_cntr.rx_b_err), - ns_diff(cntr.tx_p_ok, old_cntr.tx_p_ok), - ns_diff(cntr.tx_b_ok, old_cntr.tx_b_ok), - ns_diff(cntr.tx_p_err, old_cntr.tx_p_err), - ns_diff(cntr.tx_b_err, old_cntr.tx_b_err)) + body = body % (ns_diff(cntr['rx_p_ok'], old_cntr['rx_p_ok']), + ns_diff(cntr['rx_b_ok'], old_cntr['rx_b_ok']), + ns_diff(cntr['rx_p_err'], old_cntr['rx_p_err']), + ns_diff(cntr['rx_b_err'], old_cntr['rx_b_err']), + ns_diff(cntr['tx_p_ok'], old_cntr['tx_p_ok']), + ns_diff(cntr['tx_b_ok'], old_cntr['tx_b_ok']), + ns_diff(cntr['tx_p_err'], old_cntr['tx_p_err']), + ns_diff(cntr['tx_b_err'], old_cntr['tx_b_err'])) else: - body = body % (cntr.rx_p_ok, cntr.rx_b_ok, cntr.rx_p_err,cntr.rx_b_err, - cntr.tx_p_ok, cntr.tx_b_ok, cntr.tx_p_err, cntr.tx_b_err) + body = body % (cntr['rx_p_ok'], cntr['rx_b_ok'], cntr['rx_p_err'],cntr['rx_b_err'], + cntr['tx_p_ok'], cntr['tx_b_ok'], cntr['tx_p_err'], cntr['tx_b_err']) print(header) print(body) @@ -305,20 +305,20 @@ def main(): if tag_name is not None: if os.path.isfile(cnstat_fqn_general_file): try: - general_data = pickle.load(open(cnstat_fqn_general_file, 'rb')) + general_data = json.load(open(cnstat_fqn_general_file, 'r')) for key, val in cnstat_dict.items(): general_data[key] = val - pickle.dump(general_data, open(cnstat_fqn_general_file, 'wb')) + json.dump(general_data, open(cnstat_fqn_general_file, 'w')) except IOError as e: sys.exit(e.errno) # Add the information also to tag specific file if os.path.isfile(cnstat_fqn_file): - data = pickle.load(open(cnstat_fqn_file, 'rb')) + data = json.load(open(cnstat_fqn_file, 'r')) for key, val in cnstat_dict.items(): data[key] = val - pickle.dump(data, open(cnstat_fqn_file, 'wb')) + json.dump(data, open(cnstat_fqn_file, 'w')) else: - pickle.dump(cnstat_dict, open(cnstat_fqn_file, 'wb')) + json.dump(cnstat_dict, open(cnstat_fqn_file, 'w'), default=json_serial) except IOError as e: sys.exit(e.errno) else: @@ -330,9 +330,9 @@ def main(): try: cnstat_cached_dict = {} if os.path.isfile(cnstat_fqn_file): - cnstat_cached_dict = pickle.load(open(cnstat_fqn_file, 'rb')) + cnstat_cached_dict = json.load(open(cnstat_fqn_file, 'r')) else: - cnstat_cached_dict = pickle.load(open(cnstat_fqn_general_file, 'rb')) + cnstat_cached_dict = json.load(open(cnstat_fqn_general_file, 'r')) print("Last cached time was " + str(cnstat_cached_dict.get('time'))) if interface_name: diff --git a/scripts/pfcstat b/scripts/pfcstat index fb7e6018b6..094c6e9380 100755 --- a/scripts/pfcstat +++ b/scripts/pfcstat @@ -6,7 +6,7 @@ # ##################################################################### -import _pickle as pickle +import json import argparse import datetime import os.path @@ -37,7 +37,7 @@ except KeyError: from utilities_common.netstat import ns_diff, STATUS_NA, format_number_with_comma from utilities_common import multi_asic as multi_asic_util from utilities_common import constants -from utilities_common.cli import UserCache +from utilities_common.cli import json_serial, UserCache PStats = namedtuple("PStats", "pfc0, pfc1, pfc2, pfc3, pfc4, pfc5, pfc6, pfc7") @@ -101,7 +101,7 @@ class Pfcstat(object): fields[pos] = STATUS_NA else: fields[pos] = str(int(counter_data)) - cntr = PStats._make(fields) + cntr = PStats._make(fields)._asdict() return cntr # Get the info from database @@ -144,14 +144,14 @@ class Pfcstat(object): if key == 'time': continue table.append((key, - format_number_with_comma(data.pfc0), - format_number_with_comma(data.pfc1), - format_number_with_comma(data.pfc2), - format_number_with_comma(data.pfc3), - format_number_with_comma(data.pfc4), - format_number_with_comma(data.pfc5), - format_number_with_comma(data.pfc6), - format_number_with_comma(data.pfc7))) + format_number_with_comma(data['pfc0']), + format_number_with_comma(data['pfc1']), + format_number_with_comma(data['pfc2']), + format_number_with_comma(data['pfc3']), + format_number_with_comma(data['pfc4']), + format_number_with_comma(data['pfc5']), + format_number_with_comma(data['pfc6']), + format_number_with_comma(data['pfc7']))) if rx: print(tabulate(table, header_Rx, tablefmt='simple', stralign='right')) @@ -173,24 +173,24 @@ class Pfcstat(object): if old_cntr is not None: table.append((key, - ns_diff(cntr.pfc0, old_cntr.pfc0), - ns_diff(cntr.pfc1, old_cntr.pfc1), - ns_diff(cntr.pfc2, old_cntr.pfc2), - ns_diff(cntr.pfc3, old_cntr.pfc3), - ns_diff(cntr.pfc4, old_cntr.pfc4), - ns_diff(cntr.pfc5, old_cntr.pfc5), - ns_diff(cntr.pfc6, old_cntr.pfc6), - ns_diff(cntr.pfc7, old_cntr.pfc7))) + ns_diff(cntr['pfc0'], old_cntr['pfc0']), + ns_diff(cntr['pfc1'], old_cntr['pfc1']), + ns_diff(cntr['pfc2'], old_cntr['pfc2']), + ns_diff(cntr['pfc3'], old_cntr['pfc3']), + ns_diff(cntr['pfc4'], old_cntr['pfc4']), + ns_diff(cntr['pfc5'], old_cntr['pfc5']), + ns_diff(cntr['pfc6'], old_cntr['pfc6']), + ns_diff(cntr['pfc7'], old_cntr['pfc7']))) else: table.append((key, - format_number_with_comma(cntr.pfc0), - format_number_with_comma(cntr.pfc1), - format_number_with_comma(cntr.pfc2), - format_number_with_comma(cntr.pfc3), - format_number_with_comma(cntr.pfc4), - format_number_with_comma(cntr.pfc5), - format_number_with_comma(cntr.pfc6), - format_number_with_comma(cntr.pfc7))) + format_number_with_comma(cntr['pfc0']), + format_number_with_comma(cntr['pfc1']), + format_number_with_comma(cntr['pfc2']), + format_number_with_comma(cntr['pfc3']), + format_number_with_comma(cntr['pfc4']), + format_number_with_comma(cntr['pfc5']), + format_number_with_comma(cntr['pfc6']), + format_number_with_comma(cntr['pfc7']))) if rx: print(tabulate(table, header_Rx, tablefmt='simple', stralign='right')) @@ -256,8 +256,8 @@ Examples: if save_fresh_stats: try: - pickle.dump(cnstat_dict_rx, open(cnstat_fqn_file_rx, 'wb')) - pickle.dump(cnstat_dict_tx, open(cnstat_fqn_file_tx, 'wb')) + json.dump(cnstat_dict_rx, open(cnstat_fqn_file_rx, 'w'), default=json_serial) + json.dump(cnstat_dict_tx, open(cnstat_fqn_file_tx, 'w'), default=json_serial) except IOError as e: print(e.errno, e) sys.exit(e.errno) @@ -271,7 +271,7 @@ Examples: """ if os.path.isfile(cnstat_fqn_file_rx): try: - cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_rx, 'rb')) + cnstat_cached_dict = json.load(open(cnstat_fqn_file_rx, 'r')) print("Last cached time was " + str(cnstat_cached_dict.get('time'))) pfcstat.cnstat_diff_print(cnstat_dict_rx, cnstat_cached_dict, True) except IOError as e: @@ -286,7 +286,7 @@ Examples: """ if os.path.isfile(cnstat_fqn_file_tx): try: - cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_tx, 'rb')) + cnstat_cached_dict = json.load(open(cnstat_fqn_file_tx, 'r')) print("Last cached time was " + str(cnstat_cached_dict.get('time'))) pfcstat.cnstat_diff_print(cnstat_dict_tx, cnstat_cached_dict, False) except IOError as e: diff --git a/scripts/pg-drop b/scripts/pg-drop index 40b4e863d3..7741593081 100755 --- a/scripts/pg-drop +++ b/scripts/pg-drop @@ -5,7 +5,7 @@ # pg-drop is a tool for show/clear ingress pg dropped packet stats. # ##################################################################### -import _pickle as pickle +import json import argparse import os import sys @@ -144,7 +144,7 @@ class PgDropStat(object): port_drop_ckpt = {} # Grab the latest clear checkpoint, if it exists if os.path.isfile(self.port_drop_stats_file): - port_drop_ckpt = pickle.load(open(self.port_drop_stats_file, 'rb')) + port_drop_ckpt = json.load(open(self.port_drop_stats_file, 'r')) # Header list contains the port name followed by the PGs. Fields is used to populate the pg values fields = ["0"]* (len(self.header_list) - 1) @@ -216,10 +216,10 @@ class PgDropStat(object): counter_pg_drop_array = [ "SAI_INGRESS_PRIORITY_GROUP_STAT_DROPPED_PACKETS"] try: - pickle.dump(self.get_counts_table( + json.dump(self.get_counts_table( counter_pg_drop_array, COUNTERS_PG_NAME_MAP), - open(self.port_drop_stats_file, 'wb+')) + open(self.port_drop_stats_file, 'w+')) except IOError as e: print(e) sys.exit(e.errno) diff --git a/scripts/portstat b/scripts/portstat index 27696729e9..09ad88b08d 100755 --- a/scripts/portstat +++ b/scripts/portstat @@ -6,7 +6,7 @@ # ##################################################################### -import _pickle as pickle +import json import argparse import datetime import os.path @@ -40,7 +40,7 @@ from utilities_common.intf_filter import parse_interface_in_filter import utilities_common.multi_asic as multi_asic_util from utilities_common.netstat import ns_diff, table_as_json, format_brate, format_prate, format_util, format_number_with_comma -from utilities_common.cli import UserCache +from utilities_common.cli import json_serial, UserCache """ The order and count of statistics mentioned below needs to be in sync with the values in portstat script @@ -181,7 +181,7 @@ class Portstat(object): elif fields[pos] != STATUS_NA: fields[pos] = str(int(fields[pos]) + int(fvs[counter_name])) - cntr = NStats._make(fields) + cntr = NStats._make(fields)._asdict() return cntr def get_rates(table_id): @@ -278,61 +278,61 @@ class Portstat(object): if print_all: header = header_all table.append((key, self.get_port_state(key), - format_number_with_comma(data.rx_ok), + format_number_with_comma(data['rx_ok']), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(data.rx_err), - format_number_with_comma(data.rx_drop), - format_number_with_comma(data.rx_ovr), - format_number_with_comma(data.tx_ok), + format_number_with_comma(data['rx_err']), + format_number_with_comma(data['rx_drop']), + format_number_with_comma(data['rx_ovr']), + format_number_with_comma(data['tx_ok']), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed), - format_number_with_comma(data.tx_err), - format_number_with_comma(data.tx_drop), - format_number_with_comma(data.tx_ovr))) + format_number_with_comma(data['tx_err']), + format_number_with_comma(data['tx_drop']), + format_number_with_comma(data['tx_ovr']))) elif errors_only: header = header_errors_only table.append((key, self.get_port_state(key), - format_number_with_comma(data.rx_err), - format_number_with_comma(data.rx_drop), - format_number_with_comma(data.rx_ovr), - format_number_with_comma(data.tx_err), - format_number_with_comma(data.tx_drop), - format_number_with_comma(data.tx_ovr))) + format_number_with_comma(data['rx_err']), + format_number_with_comma(data['rx_drop']), + format_number_with_comma(data['rx_ovr']), + format_number_with_comma(data['tx_err']), + format_number_with_comma(data['tx_drop']), + format_number_with_comma(data['tx_ovr']))) elif fec_stats_only: header = header_fec_only table.append((key, self.get_port_state(key), - format_number_with_comma(data.fec_corr), - format_number_with_comma(data.fec_uncorr), - format_number_with_comma(data.fec_symbol_err))) + format_number_with_comma(data['fec_corr']), + format_number_with_comma(data['fec_uncorr']), + format_number_with_comma(data['fec_symbol_err']))) elif rates_only: header = header_rates_only table.append((key, self.get_port_state(key), - format_number_with_comma(data.rx_ok), + format_number_with_comma(data['rx_ok']), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(data.tx_ok), + format_number_with_comma(data['tx_ok']), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed))) else: header = header_std table.append((key, self.get_port_state(key), - format_number_with_comma(data.rx_ok), + format_number_with_comma(data['rx_ok']), format_brate(rates.rx_bps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(data.rx_err), - format_number_with_comma(data.rx_drop), - format_number_with_comma(data.rx_ovr), - format_number_with_comma(data.tx_ok), + format_number_with_comma(data['rx_err']), + format_number_with_comma(data['rx_drop']), + format_number_with_comma(data['rx_ovr']), + format_number_with_comma(data['tx_ok']), format_brate(rates.tx_bps), format_util(rates.tx_bps, port_speed), - format_number_with_comma(data.tx_err), - format_number_with_comma(data.tx_drop), - format_number_with_comma(data.tx_ovr))) + format_number_with_comma(data['tx_err']), + format_number_with_comma(data['tx_drop']), + format_number_with_comma(data['tx_ovr']))) if use_json: print(table_as_json(table, header)) @@ -353,51 +353,51 @@ class Portstat(object): if key in cnstat_old_dict: old_cntr = cnstat_old_dict.get(key) else: - old_cntr = NStats._make([0] * BUCKET_NUM) + old_cntr = NStats._make([0] * BUCKET_NUM)._asdict() if intf_list and key not in intf_list: continue - print("Packets Received 64 Octets..................... {}".format(ns_diff(cntr.rx_64, old_cntr.rx_64))) - print("Packets Received 65-127 Octets................. {}".format(ns_diff(cntr.rx_65_127, old_cntr.rx_65_127))) - print("Packets Received 128-255 Octets................ {}".format(ns_diff(cntr.rx_128_255, old_cntr.rx_128_255))) - print("Packets Received 256-511 Octets................ {}".format(ns_diff(cntr.rx_256_511, old_cntr.rx_256_511))) - print("Packets Received 512-1023 Octets............... {}".format(ns_diff(cntr.rx_512_1023, old_cntr.rx_512_1023))) - print("Packets Received 1024-1518 Octets.............. {}".format(ns_diff(cntr.rx_1024_1518, old_cntr.rx_1024_1518))) - print("Packets Received 1519-2047 Octets.............. {}".format(ns_diff(cntr.rx_1519_2047, old_cntr.rx_1519_2047))) - print("Packets Received 2048-4095 Octets.............. {}".format(ns_diff(cntr.rx_2048_4095, old_cntr.rx_2048_4095))) - print("Packets Received 4096-9216 Octets.............. {}".format(ns_diff(cntr.rx_4096_9216, old_cntr.rx_4096_9216))) - print("Packets Received 9217-16383 Octets............. {}".format(ns_diff(cntr.rx_9217_16383, old_cntr.rx_9217_16383))) + print("Packets Received 64 Octets..................... {}".format(ns_diff(cntr['rx_64'], old_cntr['rx_64']))) + print("Packets Received 65-127 Octets................. {}".format(ns_diff(cntr['rx_65_127'], old_cntr['rx_65_127']))) + print("Packets Received 128-255 Octets................ {}".format(ns_diff(cntr['rx_128_255'], old_cntr['rx_128_255']))) + print("Packets Received 256-511 Octets................ {}".format(ns_diff(cntr['rx_256_511'], old_cntr['rx_256_511']))) + print("Packets Received 512-1023 Octets............... {}".format(ns_diff(cntr['rx_512_1023'], old_cntr['rx_512_1023']))) + print("Packets Received 1024-1518 Octets.............. {}".format(ns_diff(cntr['rx_1024_1518'], old_cntr['rx_1024_1518']))) + print("Packets Received 1519-2047 Octets.............. {}".format(ns_diff(cntr['rx_1519_2047'], old_cntr['rx_1519_2047']))) + print("Packets Received 2048-4095 Octets.............. {}".format(ns_diff(cntr['rx_2048_4095'], old_cntr['rx_2048_4095']))) + print("Packets Received 4096-9216 Octets.............. {}".format(ns_diff(cntr['rx_4096_9216'], old_cntr['rx_4096_9216']))) + print("Packets Received 9217-16383 Octets............. {}".format(ns_diff(cntr['rx_9217_16383'], old_cntr['rx_9217_16383']))) print("") - print("Total Packets Received Without Errors.......... {}".format(ns_diff(cntr.rx_all, old_cntr.rx_all))) - print("Unicast Packets Received....................... {}".format(ns_diff(cntr.rx_uca, old_cntr.rx_uca))) - print("Multicast Packets Received..................... {}".format(ns_diff(cntr.rx_mca, old_cntr.rx_mca))) - print("Broadcast Packets Received..................... {}".format(ns_diff(cntr.rx_bca, old_cntr.rx_bca))) + print("Total Packets Received Without Errors.......... {}".format(ns_diff(cntr['rx_all'], old_cntr['rx_all']))) + print("Unicast Packets Received....................... {}".format(ns_diff(cntr['rx_uca'], old_cntr['rx_uca']))) + print("Multicast Packets Received..................... {}".format(ns_diff(cntr['rx_mca'], old_cntr['rx_mca']))) + print("Broadcast Packets Received..................... {}".format(ns_diff(cntr['rx_bca'], old_cntr['rx_bca']))) print("") - print("Jabbers Received............................... {}".format(ns_diff(cntr.rx_jbr, old_cntr.rx_jbr))) - print("Fragments Received............................. {}".format(ns_diff(cntr.rx_frag, old_cntr.rx_frag))) - print("Undersize Received............................. {}".format(ns_diff(cntr.rx_usize, old_cntr.rx_usize))) - print("Overruns Received.............................. {}".format(ns_diff(cntr.rx_ovrrun, old_cntr.rx_ovrrun))) + print("Jabbers Received............................... {}".format(ns_diff(cntr['rx_jbr'], old_cntr['rx_jbr']))) + print("Fragments Received............................. {}".format(ns_diff(cntr['rx_frag'], old_cntr['rx_frag']))) + print("Undersize Received............................. {}".format(ns_diff(cntr['rx_usize'], old_cntr['rx_usize']))) + print("Overruns Received.............................. {}".format(ns_diff(cntr['rx_ovrrun'], old_cntr['rx_ovrrun']))) print("") - print("Packets Transmitted 64 Octets.................. {}".format(ns_diff(cntr.tx_64, old_cntr.tx_64))) - print("Packets Transmitted 65-127 Octets.............. {}".format(ns_diff(cntr.tx_65_127, old_cntr.tx_65_127))) - print("Packets Transmitted 128-255 Octets............. {}".format(ns_diff(cntr.tx_128_255, old_cntr.tx_128_255))) - print("Packets Transmitted 256-511 Octets............. {}".format(ns_diff(cntr.tx_256_511, old_cntr.tx_256_511))) - print("Packets Transmitted 512-1023 Octets............ {}".format(ns_diff(cntr.tx_512_1023, old_cntr.tx_512_1023))) - print("Packets Transmitted 1024-1518 Octets........... {}".format(ns_diff(cntr.tx_1024_1518, old_cntr.tx_1024_1518))) - print("Packets Transmitted 1519-2047 Octets........... {}".format(ns_diff(cntr.tx_1519_2047, old_cntr.tx_1519_2047))) - print("Packets Transmitted 2048-4095 Octets........... {}".format(ns_diff(cntr.tx_2048_4095, old_cntr.tx_2048_4095))) - print("Packets Transmitted 4096-9216 Octets........... {}".format(ns_diff(cntr.tx_4096_9216, old_cntr.tx_4096_9216))) - print("Packets Transmitted 9217-16383 Octets.......... {}".format(ns_diff(cntr.tx_9217_16383, old_cntr.tx_9217_16383))) + print("Packets Transmitted 64 Octets.................. {}".format(ns_diff(cntr['tx_64'], old_cntr['tx_64']))) + print("Packets Transmitted 65-127 Octets.............. {}".format(ns_diff(cntr['tx_65_127'], old_cntr['tx_65_127']))) + print("Packets Transmitted 128-255 Octets............. {}".format(ns_diff(cntr['tx_128_255'], old_cntr['tx_128_255']))) + print("Packets Transmitted 256-511 Octets............. {}".format(ns_diff(cntr['tx_256_511'], old_cntr['tx_256_511']))) + print("Packets Transmitted 512-1023 Octets............ {}".format(ns_diff(cntr['tx_512_1023'], old_cntr['tx_512_1023']))) + print("Packets Transmitted 1024-1518 Octets........... {}".format(ns_diff(cntr['tx_1024_1518'], old_cntr['tx_1024_1518']))) + print("Packets Transmitted 1519-2047 Octets........... {}".format(ns_diff(cntr['tx_1519_2047'], old_cntr['tx_1519_2047']))) + print("Packets Transmitted 2048-4095 Octets........... {}".format(ns_diff(cntr['tx_2048_4095'], old_cntr['tx_2048_4095']))) + print("Packets Transmitted 4096-9216 Octets........... {}".format(ns_diff(cntr['tx_4096_9216'], old_cntr['tx_4096_9216']))) + print("Packets Transmitted 9217-16383 Octets.......... {}".format(ns_diff(cntr['tx_9217_16383'], old_cntr['tx_9217_16383']))) print("") - print("Total Packets Transmitted Successfully......... {}".format(ns_diff(cntr.tx_all, old_cntr.tx_all))) - print("Unicast Packets Transmitted.................... {}".format(ns_diff(cntr.tx_uca, old_cntr.tx_uca))) - print("Multicast Packets Transmitted.................. {}".format(ns_diff(cntr.tx_mca, old_cntr.tx_mca))) - print("Broadcast Packets Transmitted.................. {}".format(ns_diff(cntr.tx_bca, old_cntr.tx_bca))) + print("Total Packets Transmitted Successfully......... {}".format(ns_diff(cntr['tx_all'], old_cntr['tx_all']))) + print("Unicast Packets Transmitted.................... {}".format(ns_diff(cntr['tx_uca'], old_cntr['tx_uca']))) + print("Multicast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_mca'], old_cntr['tx_mca']))) + print("Broadcast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_bca'], old_cntr['tx_bca']))) print("Time Since Counters Last Cleared............... " + str(cnstat_old_dict.get('time'))) @@ -434,88 +434,88 @@ class Portstat(object): header = header_all if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr.rx_ok, old_cntr.rx_ok), + ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - ns_diff(cntr.rx_err, old_cntr.rx_err), - ns_diff(cntr.rx_drop, old_cntr.rx_drop), - ns_diff(cntr.rx_ovr, old_cntr.rx_ovr), - ns_diff(cntr.tx_ok, old_cntr.tx_ok), + ns_diff(cntr['rx_err'], old_cntr['rx_err']), + ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), + ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), + ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed), - ns_diff(cntr.tx_err, old_cntr.tx_err), - ns_diff(cntr.tx_drop, old_cntr.tx_drop), - ns_diff(cntr.tx_ovr, old_cntr.tx_ovr))) + ns_diff(cntr['tx_err'], old_cntr['tx_err']), + ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), + ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr.rx_ok), + format_number_with_comma(cntr['rx_ok']), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr.rx_err), - format_number_with_comma(cntr.rx_drop), - format_number_with_comma(cntr.rx_ovr), - format_number_with_comma(cntr.tx_ok), + format_number_with_comma(cntr['rx_err']), + format_number_with_comma(cntr['rx_drop']), + format_number_with_comma(cntr['rx_ovr']), + format_number_with_comma(cntr['tx_ok']), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed), - format_number_with_comma(cntr.tx_err), - format_number_with_comma(cntr.tx_drop), - format_number_with_comma(cntr.tx_ovr))) + format_number_with_comma(cntr['tx_err']), + format_number_with_comma(cntr['tx_drop']), + format_number_with_comma(cntr['tx_ovr']))) elif errors_only: header = header_errors_only if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr.rx_err, old_cntr.rx_err), - ns_diff(cntr.rx_drop, old_cntr.rx_drop), - ns_diff(cntr.rx_ovr, old_cntr.rx_ovr), - ns_diff(cntr.tx_err, old_cntr.tx_err), - ns_diff(cntr.tx_drop, old_cntr.tx_drop), - ns_diff(cntr.tx_ovr, old_cntr.tx_ovr))) + ns_diff(cntr['rx_err'], old_cntr['rx_err']), + ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), + ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), + ns_diff(cntr['tx_err'], old_cntr['tx_err']), + ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), + ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr.rx_err), - format_number_with_comma(cntr.rx_drop), - format_number_with_comma(cntr.rx_ovr), - format_number_with_comma(cntr.tx_err), - format_number_with_comma(cntr.tx_drop), - format_number_with_comma(cntr.tx_ovr))) + format_number_with_comma(cntr['rx_err']), + format_number_with_comma(cntr['rx_drop']), + format_number_with_comma(cntr['rx_ovr']), + format_number_with_comma(cntr['tx_err']), + format_number_with_comma(cntr['tx_drop']), + format_number_with_comma(cntr['tx_ovr']))) elif fec_stats_only: header = header_fec_only if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr.fec_corr, old_cntr.fec_corr), - ns_diff(cntr.fec_uncorr, old_cntr.fec_uncorr), - ns_diff(cntr.fec_symbol_err, old_cntr.fec_symbol_err))) + ns_diff(cntr['fec_corr'], old_cntr['fec_corr']), + ns_diff(cntr['fec_uncorr'], old_cntr['fec_uncorr']), + ns_diff(cntr['fec_symbol_err'], old_cntr['fec_symbol_err']))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr.fec_corr), - format_number_with_comma(cntr.fec_uncorr), - format_number_with_comma(cntr.fec_symbol_err))) + format_number_with_comma(cntr['fec_corr']), + format_number_with_comma(cntr['fec_uncorr']), + format_number_with_comma(cntr['fec_symbol_err']))) elif rates_only: header = header_rates_only if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr.rx_ok, old_cntr.rx_ok), + ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - ns_diff(cntr.tx_ok, old_cntr.tx_ok), + ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr.rx_ok), + format_number_with_comma(cntr['rx_ok']), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr.tx_ok), + format_number_with_comma(cntr['tx_ok']), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed))) @@ -524,33 +524,33 @@ class Portstat(object): if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr.rx_ok, old_cntr.rx_ok), + ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), format_brate(rates.rx_bps), format_util(rates.rx_bps, port_speed), - ns_diff(cntr.rx_err, old_cntr.rx_err), - ns_diff(cntr.rx_drop, old_cntr.rx_drop), - ns_diff(cntr.rx_ovr, old_cntr.rx_ovr), - ns_diff(cntr.tx_ok, old_cntr.tx_ok), + ns_diff(cntr['rx_err'], old_cntr['rx_err']), + ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), + ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), + ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), format_brate(rates.tx_bps), format_util(rates.tx_bps, port_speed), - ns_diff(cntr.tx_err, old_cntr.tx_err), - ns_diff(cntr.tx_drop, old_cntr.tx_drop), - ns_diff(cntr.tx_ovr, old_cntr.tx_ovr))) + ns_diff(cntr['tx_err'], old_cntr['tx_err']), + ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), + ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr.rx_ok), + format_number_with_comma(cntr['rx_ok']), format_brate(rates.rx_bps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr.rx_err), - format_number_with_comma(cntr.rx_drop), - format_number_with_comma(cntr.rx_ovr), - format_number_with_comma(cntr.tx_ok), + format_number_with_comma(cntr['rx_err']), + format_number_with_comma(cntr['rx_drop']), + format_number_with_comma(cntr['rx_ovr']), + format_number_with_comma(cntr['tx_ok']), format_brate(rates.tx_bps), format_util(rates.tx_bps, port_speed), - format_number_with_comma(cntr.tx_err), - format_number_with_comma(cntr.tx_drop), - format_number_with_comma(cntr.tx_ovr))) + format_number_with_comma(cntr['tx_err']), + format_number_with_comma(cntr['tx_drop']), + format_number_with_comma(cntr['tx_ovr']))) if use_json: print(table_as_json(table, header)) @@ -641,7 +641,7 @@ Examples: if save_fresh_stats: try: - pickle.dump(cnstat_dict, open(cnstat_fqn_file, 'wb')) + json.dump(cnstat_dict, open(cnstat_fqn_file, 'w'), default=json_serial) except IOError as e: sys.exit(e.errno) else: @@ -652,7 +652,7 @@ Examples: cnstat_cached_dict = OrderedDict() if os.path.isfile(cnstat_fqn_file): try: - cnstat_cached_dict = pickle.load(open(cnstat_fqn_file, 'rb')) + cnstat_cached_dict = json.load(open(cnstat_fqn_file, 'r')) if not detail: print("Last cached time was " + str(cnstat_cached_dict.get('time'))) portstat.cnstat_diff_print(cnstat_dict, cnstat_cached_dict, ratestat_dict, intf_list, use_json, print_all, errors_only, fec_stats_only, rates_only, detail) diff --git a/scripts/queuestat b/scripts/queuestat index 96a24b51a3..d82e7e4a6a 100755 --- a/scripts/queuestat +++ b/scripts/queuestat @@ -6,7 +6,7 @@ # ##################################################################### -import _pickle as pickle +import json import argparse import datetime import os.path @@ -33,7 +33,7 @@ except KeyError: pass from swsscommon.swsscommon import SonicV2Connector -from utilities_common.cli import UserCache +from utilities_common.cli import json_serial, UserCache from utilities_common import constants import utilities_common.multi_asic as multi_asic_util @@ -186,7 +186,7 @@ class Queuestat(object): fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: fields[pos] = str(int(counter_data)) - cntr = QueueStats._make(fields) + cntr = QueueStats._make(fields)._asdict() return cntr # Build a dictionary of the stats @@ -211,9 +211,9 @@ class Queuestat(object): if json_opt: json_output[port][key] = data continue - table.append((port, data.queuetype + str(data.queueindex), - data.totalpacket, data.totalbytes, - data.droppacket, data.dropbytes)) + table.append((port, data['queuetype'] + str(data['queueindex']), + data['totalpacket'], data['totalbytes'], + data['droppacket'], data['dropbytes'])) if json_opt: json_output[port].update(build_json(port, table)) @@ -241,15 +241,15 @@ class Queuestat(object): old_cntr = cnstat_old_dict.get(key) if old_cntr is not None: - table.append((port, cntr.queuetype + str(cntr.queueindex), - ns_diff(cntr.totalpacket, old_cntr.totalpacket), - ns_diff(cntr.totalbytes, old_cntr.totalbytes), - ns_diff(cntr.droppacket, old_cntr.droppacket), - ns_diff(cntr.dropbytes, old_cntr.dropbytes))) + table.append((port, cntr['queuetype'] + str(cntr['queueindex']), + ns_diff(cntr['totalpacket'], old_cntr['totalpacket']), + ns_diff(cntr['totalbytes'], old_cntr['totalbytes']), + ns_diff(cntr['droppacket'], old_cntr['droppacket']), + ns_diff(cntr['dropbytes'], old_cntr['dropbytes']))) else: - table.append((port, cntr.queuetype + str(cntr.queueindex), - cntr.totalpacket, cntr.totalbytes, - cntr.droppacket, cntr.dropbytes)) + table.append((port, cntr['queuetype'] + str(cntr['queueindex']), + cntr['totalpacket'], cntr['totalbytes'], + cntr['droppacket'], cntr['dropbytes'])) if json_opt: json_output[port].update(build_json(port, table)) @@ -273,7 +273,7 @@ class Queuestat(object): cnstat_fqn_file_name = cnstat_fqn_file + port if os.path.isfile(cnstat_fqn_file_name): try: - cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_name, 'rb')) + cnstat_cached_dict = json.load(open(cnstat_fqn_file_name, 'r')) if json_opt: json_output[port].update({"cached_time":cnstat_cached_dict.get('time')}) json_output.update(self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt)) @@ -307,7 +307,7 @@ class Queuestat(object): json_output[port] = {} if os.path.isfile(cnstat_fqn_file_name): try: - cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_name, 'rb')) + cnstat_cached_dict = json.load(open(cnstat_fqn_file_name, 'r')) if json_opt: json_output[port].update({"cached_time":cnstat_cached_dict.get('time')}) json_output.update(self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt)) @@ -330,7 +330,7 @@ class Queuestat(object): for port in natsorted(self.counter_port_name_map): cnstat_dict = self.get_cnstat(self.port_queues_map[port]) try: - pickle.dump(cnstat_dict, open(cnstat_fqn_file + port, 'wb')) + json.dump(cnstat_dict, open(cnstat_fqn_file + port, 'w'), default=json_serial) except IOError as e: print(e.errno, e) sys.exit(e.errno) diff --git a/scripts/tunnelstat b/scripts/tunnelstat index 8b045ec684..3d7423e86b 100755 --- a/scripts/tunnelstat +++ b/scripts/tunnelstat @@ -6,7 +6,7 @@ # ##################################################################### -import _pickle as pickle +import json import argparse import datetime import sys @@ -29,7 +29,7 @@ from collections import namedtuple, OrderedDict from natsort import natsorted from tabulate import tabulate from utilities_common.netstat import ns_diff, table_as_json, STATUS_NA, format_prate -from utilities_common.cli import UserCache +from utilities_common.cli import json_serial, UserCache from swsscommon.swsscommon import SonicV2Connector @@ -80,7 +80,7 @@ class Tunnelstat(object): counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name) if counter_data: fields[pos] = str(counter_data) - cntr = NStats._make(fields) + cntr = NStats._make(fields)._asdict() return cntr def get_rates(table_id): @@ -149,8 +149,8 @@ class Tunnelstat(object): continue rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) - table.append((key, data.rx_p_ok, data.rx_b_ok, format_prate(rates.rx_pps), - data.tx_p_ok, data.tx_b_ok, format_prate(rates.tx_pps))) + table.append((key, data['rx_p_ok'], data['rx_b_ok'], format_prate(rates.rx_pps), + data['tx_p_ok'], data['tx_b_ok'], format_prate(rates.tx_pps))) if use_json: print(table_as_json(table, header)) @@ -175,19 +175,19 @@ class Tunnelstat(object): rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) if old_cntr is not None: table.append((key, - ns_diff(cntr.rx_p_ok, old_cntr.rx_p_ok), - ns_diff(cntr.rx_b_ok, old_cntr.rx_b_ok), + ns_diff(cntr['rx_p_ok'], old_cntr['rx_p_ok']), + ns_diff(cntr['rx_b_ok'], old_cntr['rx_b_ok']), format_prate(rates.rx_pps), - ns_diff(cntr.tx_p_ok, old_cntr.tx_p_ok), - ns_diff(cntr.tx_b_ok, old_cntr.tx_b_ok), + ns_diff(cntr['tx_p_ok'], old_cntr['tx_p_ok']), + ns_diff(cntr['tx_b_ok'], old_cntr['tx_b_ok']), format_prate(rates.tx_pps))) else: table.append((key, - cntr.rx_p_ok, - cntr.rx_b_ok, + cntr['rx_p_ok'], + cntr['rx_b_ok'], format_prate(rates.rx_pps), - cntr.tx_p_ok, - cntr.tx_b_ok, + cntr['tx_p_ok'], + cntr['tx_b_ok'], format_prate(rates.tx_pps))) if use_json: print(table_as_json(table, header)) @@ -210,12 +210,12 @@ class Tunnelstat(object): if cnstat_old_dict: old_cntr = cnstat_old_dict.get(tunnel) if old_cntr: - body = body % (ns_diff(cntr.rx_p_ok, old_cntr.rx_p_ok), - ns_diff(cntr.rx_b_ok, old_cntr.rx_b_ok), - ns_diff(cntr.tx_p_ok, old_cntr.tx_p_ok), - ns_diff(cntr.tx_b_ok, old_cntr.tx_b_ok)) + body = body % (ns_diff(cntr['rx_p_ok'], old_cntr['rx_p_ok']), + ns_diff(cntr['rx_b_ok'], old_cntr['rx_b_ok']), + ns_diff(cntr['tx_p_ok'], old_cntr['tx_p_ok']), + ns_diff(cntr['tx_b_ok'], old_cntr['tx_b_ok'])) else: - body = body % (cntr.rx_p_ok, cntr.rx_b_ok, cntr.tx_p_ok, cntr.tx_b_ok) + body = body % (cntr['rx_p_ok'], cntr['rx_b_ok'], cntr['tx_p_ok'], cntr['tx_b_ok']) print(header) print(body) @@ -273,7 +273,7 @@ def main(): if save_fresh_stats: try: - pickle.dump(cnstat_dict, open(cnstat_fqn_file, 'wb')) + json.dump(cnstat_dict, open(cnstat_fqn_file, 'w'), default=json_serial) except IOError as e: sys.exit(e.errno) else: @@ -283,7 +283,7 @@ def main(): if wait_time_in_seconds == 0: if os.path.isfile(cnstat_fqn_file): try: - cnstat_cached_dict = pickle.load(open(cnstat_fqn_file, 'rb')) + cnstat_cached_dict = json.load(open(cnstat_fqn_file, 'r')) print("Last cached time was " + str(cnstat_cached_dict.get('time'))) if tunnel_name: tunnelstat.cnstat_single_tunnel(tunnel_name, cnstat_dict, cnstat_cached_dict) From eda4e91b8a0c9ca6a4f329609e6499d1e5aad45d Mon Sep 17 00:00:00 2001 From: vdahiya12 <67608553+vdahiya12@users.noreply.github.com> Date: Fri, 24 Feb 2023 12:46:36 -0800 Subject: [PATCH 22/66] [show][muxcable] add some new commands health, reset-cause, queue_info support for muxcable (#2414) This PR adds the support for adding some utility commands for muxacble This includes commands for health, operationtime, queueinfo, resetcause vdahiya@sonic:~$ show mux health Ethernet4 PORT ATTR HEALTH --------- --------------- -------- Ethernet4 health_check Ok vdahiya@sonic:~$ show mux health Ethernet4 --json { "health_check": "Ok" } vdahiya@sonic:~$ show mux operation Ethernet4 --json { "operation_time": "22:22" } vdahiya@sonic:~$ show mux operation Ethernet4 PORT ATTR OPERATION_TIME --------- -------------- ---------------- Ethernet4 operation_time 22:22 vdahiya@sonic:~$ vdahiya@sonic:~$ show mux resetcause Ethernet4 PORT ATTR RESETCAUSE --------- ----------- ------------ Ethernet4 reset_cause 0 vdahiya@sonic:~$ show mux resetcause Ethernet4 --json { "reset_cause": "0" } vdahiya@sonic:~$ show mux queueinfo Ethernet4 --json { "Remote": "{'VSC': {'r_ptr': 0, 'w_ptr': 0, 'total_count': 0, 'free_count': 0, 'buff_addr': 0, 'node_size': 0}, 'UART1': {'r_ptr': 0, 'w_ptr': 0, 'total_count': 0, 'free_count': 0, 'buff_addr': 209870, 'node_size': 1682183}, 'UART2': {'r_ptr': 13262, 'w_ptr': 3, 'total_count': 0, 'free_count': 0, 'buff_addr': 12, 'node_size': 0}}", "Local": "{'VSC': {'r_ptr': 0, 'w_ptr': 0, 'total_count': 0, 'free_count': 0, 'buff_addr': 0, 'node_size': 0}, 'UART1': {'r_ptr': 0, 'w_ptr': 0, 'total_count': 0, 'free_count': 0, 'buff_addr': 209870, 'node_size': 1682183}, 'UART2': {'r_ptr': 13262, 'w_ptr': 3, 'total_count': 0, 'free_count': 0, 'buff_addr': 12, 'node_size': 0}}" } --- doc/Command-Reference.md | 148 +++++++++++++++++++++++ show/muxcable.py | 245 ++++++++++++++++++++++++++++++++++++--- tests/muxcable_test.py | 124 ++++++++++++++++++++ 3 files changed, 503 insertions(+), 14 deletions(-) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index dbc7966c8d..69f282ccbb 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -6126,6 +6126,154 @@ This command displays the eye info in mv(milli volts) of the port user provides 632 622 ``` + +**show muxcable health ** + +This command displays the hardware health of the Y-cable which are connected to muxcable. The resultant table or json output will show the current hadrware health of the cable as Ok, Not Ok, Unknown. + +- Usage: + ``` + show muxcable health [OPTIONS] [PORT] + ``` + +While displaying the muxcable health, users need to provide the following fields + +- PORT required - Port name should be a valid port +- --json optional - -- option to display the result in json format. By default output will be in tabular format. + +-Ok means the cable is healthy + +in order to detemine whether the health of the cable is Ok +the following are checked +- the vendor name is correct able to be read +- the FW is correctly loaded for SerDes by reading the appropriate register val +- the Counters for UART are displaying healthy status + i.e Error Counters , retry Counters for UART or internal xfer protocols are below a threshold + + +- Example: + ``` + admin@sonic:~$ show muxcable health Ethernet4 + PORT ATTR HEALTH + --------- ------ -------- + Ethernet4 health Ok + ``` + ``` + admin@sonic:~$ show muxcable health Ethernet4 --json + ``` + ```json + { + "health": "Ok" + } + + ``` + + +**show muxcable queueinfo ** + +This command displays the queue info of the Y-cable which are connected to muxcable. The resultant table or json output will show the queue info in terms transactions for the UART stats in particular currently relevant to the MCU of the cable. + +- Usage: + ``` + show muxcable queueinfo [OPTIONS] [PORT] + ``` + +While displaying the muxcable queueinfo, users need to provide the following fields + +- PORT required - Port name should be a valid port +- --json optional - -- option to display the result in json format. By default output will be in tabular format. + +the result will be displayed like this, each item in the dictionary shows the health of the attribute in the queue +``` +"{'VSC': {'r_ptr': 0, 'w_ptr': 0, 'total_count': 0, 'free_count': 0, 'buff_addr': 0, 'node_size': 0}, 'UART1': {'r_ptr': 0, 'w_ptr': 0, 'total_count': 0, 'free_count': 0, 'buff_addr': 209870, 'node_size': 1682183}, 'UART2': {'r_ptr': 13262, 'w_ptr': 3, 'total_count': 0, 'free_count': 0, 'buff_addr': 12, 'node_size': 0} +``` + +- Example: + ``` + admin@sonic:~$ show muxcable queueinfo Ethernet0 + PORT ATTR VALUE + --------- ---------- ------- + Ethernet0 uart_stat1 2 + Ethernet0 uart_stat2 1 + ``` + ``` + admin@sonic:~$ show muxcable queueinfo Ethernet4 --json + ``` + ```json + { + "uart_stat1": "2", + "uart_stat2": "1", + + } + ``` + +**show muxcable operationtime ** + +This command displays the operationtime of the Y-cable which are connected to muxcable. The resultant table or json output will show the current operation time of the cable as `hh:mm:ss` format. Operation time means the time since the last time the reseated/reset of the cable is done, and the time would be in the format specified + +- Usage: + ``` + show muxcable operationtime [OPTIONS] [PORT] + ``` + +While displaying the muxcable operationtime, users need to provide the following fields + +- PORT required - Port name should be a valid port +- --json optional - -- option to display the result in json format. By default output will be in tabular format. + + +- Example: + ``` + admin@sonic:~$ show muxcable operationtime Ethernet4 + PORT ATTR OPERATION_TIME + --------- -------------- ---------------- + Ethernet4 operation_time 00:22:22 + ``` + ``` + admin@sonic:~$ show muxcable operationtime Ethernet4 --json + ``` + ```json + { + "operation_time": "00:22:22" + } + ``` + +**show muxcable resetcause ** + +This command displays the resetcause of the Y-cable which are connected to muxcable. The resultant table or json output will show the most recent reset cause of the cable as string format. + +- Usage: + ``` + show muxcable resetcause [OPTIONS] [PORT] + ``` + +While displaying the muxcable resetcause, users need to provide the following fields + +- PORT required - Port name should be a valid port +- --json optional - -- option to display the result in json format. By default output will be in tabular format. + +the reset cause only records NIC MCU reset status. The NIC MCU will automatically broadcast the reset cause status to each TORs, corresponding values returned +display cold reset if the last reset is cold reset (ex. HW/SW reset, power reset the cable, or reboot the NIC server) +display warm reset if the last reset is warm reset (ex. sudo config mux firmware activate....) +the value is persistent, no clear on read + +- Example: + ``` + admin@sonic:~$ show muxcable resetcause Ethernet4 + PORT ATTR RESETCAUSE + --------- ----------- ------------ + Ethernet4 reset_cause warm reset + ``` + ``` + admin@sonic:~$ show muxcable resetcause Ethernet4 --json + ``` + ```json + { + "reset_cause": "warm reset" + } + ``` + + ### Muxcable Config commands diff --git a/show/muxcable.py b/show/muxcable.py index b640d32135..837e362789 100644 --- a/show/muxcable.py +++ b/show/muxcable.py @@ -35,6 +35,11 @@ VENDOR_NAME = "Credo" VENDOR_MODEL_REGEX = re.compile(r"CAC\w{3}321P2P\w{2}MS") +#define table names that interact with Cli +XCVRD_GET_BER_CMD_TABLE = "XCVRD_GET_BER_CMD" +XCVRD_GET_BER_RSP_TABLE = "XCVRD_GET_BER_RSP" +XCVRD_GET_BER_RES_TABLE = "XCVRD_GET_BER_RES" +XCVRD_GET_BER_CMD_ARG_TABLE = "XCVRD_GET_BER_CMD_ARG" def get_asic_index_for_port(port): asic_index = None @@ -276,9 +281,11 @@ def get_result(port, res_dict, cmd ,result, table_name): (status, fvp) = xcvrd_show_fw_res_tbl[asic_index].get(port) res_dir = dict(fvp) + delete_all_keys_in_db_table("STATE_DB", table_name) + return res_dir -def update_and_get_response_for_xcvr_cmd(cmd_name, rsp_name, exp_rsp, cmd_table_name, cmd_arg_table_name, rsp_table_name ,port, cmd_timeout_secs, param_dict= None, arg=None): +def update_and_get_response_for_xcvr_cmd(cmd_name, rsp_name, exp_rsp, cmd_table_name, cmd_arg_table_name, rsp_table_name , res_table_name, port, cmd_timeout_secs, param_dict= None, arg=None): res_dict = {} state_db, appl_db = {}, {} @@ -291,6 +298,8 @@ def update_and_get_response_for_xcvr_cmd(cmd_name, rsp_name, exp_rsp, cmd_table_ time_start = time.time() + delete_all_keys_in_db_tables_helper(cmd_table_name, rsp_table_name, cmd_arg_table_name, res_table_name) + sel = swsscommon.Select() namespaces = multi_asic.get_front_end_namespaces() for namespace in namespaces: @@ -405,11 +414,26 @@ def update_and_get_response_for_xcvr_cmd(cmd_name, rsp_name, exp_rsp, cmd_table_ firmware_rsp_tbl[asic_index]._del(port) break - delete_all_keys_in_db_table("STATE_DB", rsp_table_name) + + delete_all_keys_in_db_tables_helper(cmd_table_name, rsp_table_name, cmd_arg_table_name, None) return res_dict +def delete_all_keys_in_db_tables_helper(cmd_table_name, rsp_table_name, cmd_arg_table_name = None, res_table_name = None): + + delete_all_keys_in_db_table("APPL_DB", cmd_table_name) + delete_all_keys_in_db_table("STATE_DB", rsp_table_name) + if cmd_arg_table_name is not None: + delete_all_keys_in_db_table("APPL_DB", cmd_arg_table_name) + + if res_table_name is not None: + delete_all_keys_in_db_table("STATE_DB", res_table_name) + + return 0 + + + # 'muxcable' command ("show muxcable") # @@ -926,7 +950,7 @@ def berinfo(db, port, target, json_output): res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", port, 10, param_dict, "ber") + "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", None, port, 10, param_dict, "ber") if res_dict[1] == "True": result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES") @@ -978,7 +1002,7 @@ def eyeinfo(db, port, target, json_output): res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", port, 10, param_dict, "eye") + "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", None, port, 10, param_dict, "eye") if res_dict[1] == "True": result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES") @@ -1029,7 +1053,7 @@ def fecstatistics(db, port, target, json_output): res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", port, 10, param_dict, "fec_stats") + "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", None, port, 10, param_dict, "fec_stats") if res_dict[1] == "True": result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES") @@ -1080,7 +1104,7 @@ def pcsstatistics(db, port, target, json_output): res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", port, 10, param_dict, "pcs_stats") + "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", None, port, 10, param_dict, "pcs_stats") if res_dict[1] == "True": result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES") @@ -1129,7 +1153,7 @@ def debugdumpregisters(db, port, option, json_output): res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", port, 100, param_dict, "debug_dump") + "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", None, port, 100, param_dict, "debug_dump") if res_dict[1] == "True": result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES") @@ -1173,7 +1197,7 @@ def alivecablestatus(db, port, json_output): res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "get_ber", "status", "True", "XCVRD_GET_BER_CMD", None, "XCVRD_GET_BER_RSP", port, 10, None, "cable_alive") + "get_ber", "status", "True", "XCVRD_GET_BER_CMD", None, "XCVRD_GET_BER_RSP", None, port, 10, None, "cable_alive") if res_dict[1] == "True": result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES") @@ -1245,7 +1269,7 @@ def get_hwmode_mux_direction_port(db, port): if port is not None: res_dict = update_and_get_response_for_xcvr_cmd( - "state", "state", "True", "XCVRD_SHOW_HWMODE_DIR_CMD", "XCVRD_SHOW_HWMODE_DIR_RES", "XCVRD_SHOW_HWMODE_DIR_RSP", port, HWMODE_MUXDIRECTION_TIMEOUT, None, "probe") + "state", "state", "True", "XCVRD_SHOW_HWMODE_DIR_CMD", "XCVRD_SHOW_HWMODE_DIR_RES", "XCVRD_SHOW_HWMODE_DIR_RSP", None, port, HWMODE_MUXDIRECTION_TIMEOUT, None, "probe") result = get_result(port, res_dict, "muxdirection" , result, "XCVRD_SHOW_HWMODE_DIR_RES") @@ -1464,7 +1488,7 @@ def switchmode(db, port): res_dict[0] = CONFIG_FAIL res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "state", "state", "True", "XCVRD_SHOW_HWMODE_SWMODE_CMD", None, "XCVRD_SHOW_HWMODE_SWMODE_RSP", port, 1, None, "probe") + "state", "state", "True", "XCVRD_SHOW_HWMODE_SWMODE_CMD", None, "XCVRD_SHOW_HWMODE_SWMODE_RSP", None, port, 1, None, "probe") body = [] temp_list = [] @@ -1520,7 +1544,7 @@ def switchmode(db, port): res_dict[0] = CONFIG_FAIL res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "state", "state", "True", "XCVRD_SHOW_HWMODE_SWMODE_CMD", None, "XCVRD_SHOW_HWMODE_SWMODE_RSP", port, 1, None, "probe") + "state", "state", "True", "XCVRD_SHOW_HWMODE_SWMODE_CMD", None, "XCVRD_SHOW_HWMODE_SWMODE_RSP", None, port, 1, None, "probe") port = platform_sfputil_helper.get_interface_alias(port, db) temp_list.append(port) temp_list.append(res_dict[1]) @@ -1705,7 +1729,7 @@ def version(db, port, active): mux_info_dict["version_self_next"] = "N/A" res_dict = update_and_get_response_for_xcvr_cmd( - "firmware_version", "status", "True", "XCVRD_SHOW_FW_CMD", None, "XCVRD_SHOW_FW_RSP", port, 20, None, "probe") + "firmware_version", "status", "True", "XCVRD_SHOW_FW_CMD", None, "XCVRD_SHOW_FW_RSP", None, port, 20, None, "probe") if res_dict[1] == "True": mux_info_dict = get_response_for_version(port, mux_info_dict) @@ -1874,7 +1898,7 @@ def event_log(db, port, json_output): res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "show_event", "status", "True", "XCVRD_EVENT_LOG_CMD", None, "XCVRD_EVENT_LOG_RSP", port, 1000, None, "probe") + "show_event", "status", "True", "XCVRD_EVENT_LOG_CMD", None, "XCVRD_EVENT_LOG_RSP", None, port, 1000, None, "probe") if res_dict[1] == "True": result = get_event_logs(port, res_dict, mux_info_dict) @@ -1916,7 +1940,7 @@ def get_fec_anlt_speed(db, port, json_output): res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "get_fec", "status", "True", "XCVRD_GET_FEC_CMD", None, "XCVRD_GET_FEC_RSP", port, 10, None, "probe") + "get_fec", "status", "True", "XCVRD_GET_FEC_CMD", None, "XCVRD_GET_FEC_RSP", None, port, 10, None, "probe") if res_dict[1] == "True": result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_FEC_RES") @@ -2255,4 +2279,197 @@ def muxdirection(db, port, json_output): if rc_exit == False: sys.exit(EXIT_FAIL) +@muxcable.command() +@click.argument('port', metavar='', required=True, default=None) +@click.argument('option', required=False, default=None) +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format") +@clicommon.pass_db +def queueinfo(db, port, option, json_output): + """Show muxcable queue info information, preagreed by vendors""" + + port = platform_sfputil_helper.get_interface_name(port, db) + + if port is not None: + + res_dict = {} + result = {} + param_dict = {} + param_dict["option"] = option + + + res_dict[0] = CONFIG_FAIL + res_dict[1] = "unknown" + + res_dict = update_and_get_response_for_xcvr_cmd( + "get_ber", "status", "True", XCVRD_GET_BER_CMD_TABLE, XCVRD_GET_BER_CMD_ARG_TABLE, XCVRD_GET_BER_RSP_TABLE, XCVRD_GET_BER_RES_TABLE, port, 100, param_dict, "queue_info") + + if res_dict[1] == "True": + result = get_result(port, res_dict, "fec" , result, XCVRD_GET_BER_RES_TABLE) + + + port = platform_sfputil_helper.get_interface_alias(port, db) + + if json_output: + click.echo("{}".format(json.dumps(result, indent=4))) + else: + headers = ['PORT', 'ATTR', 'VALUE'] + res = [[port]+[key] + [val] for key, val in result.items()] + click.echo(tabulate(res, headers=headers)) + else: + click.echo("Did not get a valid Port for queue info information".format(port)) + sys.exit(CONFIG_FAIL) + + +@muxcable.command() +@click.argument('port', metavar='', required=True, default=None) +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format") +@clicommon.pass_db +def health(db, port, json_output): + """Show muxcable health information as Ok or Not Ok""" + + """ + in order to detemine whether the health of the cable is Ok + the following are checked + - the vendor name is correct able to be read + - the FW is correctly loaded for SerDes by reading the appropriate register val + - the Counters for UART are displaying healthy status + i.e Error Counters , retry Counters for UART or internal xfer protocols are below a threshold + """ + + port = platform_sfputil_helper.get_interface_name(port, db) + + if port is not None: + + res_dict = {} + result = {} + + + res_dict[0] = CONFIG_FAIL + res_dict[1] = "unknown" + + res_dict = update_and_get_response_for_xcvr_cmd( + "get_ber", "status", "True", XCVRD_GET_BER_CMD_TABLE, None, XCVRD_GET_BER_RSP_TABLE, XCVRD_GET_BER_RES_TABLE, port, 10, None, "health_check") + + if res_dict[1] == "True": + result = get_result(port, res_dict, "fec" , result, XCVRD_GET_BER_RES_TABLE) + + + + port = platform_sfputil_helper.get_interface_alias(port, db) + + cable_health = result.get("health_check", None) + + if cable_health == "False": + result["health_check"] = "Not Ok" + elif cable_health == "True": + result["health_check"] = "Ok" + else: + result["health_check"] = "Unknown" + + + + if json_output: + click.echo("{}".format(json.dumps(result, indent=4))) + else: + headers = ['PORT', 'ATTR', 'HEALTH'] + res = [[port]+[key] + [val] for key, val in result.items()] + click.echo(tabulate(res, headers=headers)) + else: + click.echo("Did not get a valid Port for cable health status".format(port)) + sys.exit(CONFIG_FAIL) + +@muxcable.command() +@click.argument('port', metavar='', required=True, default=None) +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format") +@clicommon.pass_db +def resetcause(db, port, json_output): + """Show muxcable resetcause information """ + + port = platform_sfputil_helper.get_interface_name(port, db) + + """ + the reset cause only records NIC MCU reset status. The NIC MCU will automatically broadcast the reset cause status to each TORs, corresponding values returned + return 0 if the last reset is cold reset (ex. HW/SW reset, power reset the cable, or reboot the NIC server) + return 1 if the last reset is warm reset (ex. sudo config mux firmware activate....) + the value is persistent, no clear on read + """ + if port is not None: + + res_dict = {} + result = {} + + + res_dict[0] = CONFIG_FAIL + res_dict[1] = "unknown" + + res_dict = update_and_get_response_for_xcvr_cmd( + "get_ber", "status", "True", XCVRD_GET_BER_CMD_TABLE, None, XCVRD_GET_BER_RSP_TABLE, XCVRD_GET_BER_RES_TABLE, port, 10, None, "reset_cause") + + if res_dict[1] == "True": + result = get_result(port, res_dict, "fec" , result, XCVRD_GET_BER_RES_TABLE) + + + port = platform_sfputil_helper.get_interface_alias(port, db) + + reset_cause = result.get("reset_cause", None) + + if reset_cause == "0": + result["reset_cause"] = "cold reset" + elif reset_cause == "1": + result["reset_cause"] = "warm reset" + else: + result["reset_cause"] = "Unknown" + + if json_output: + click.echo("{}".format(json.dumps(result, indent=4))) + else: + headers = ['PORT', 'ATTR', 'RESETCAUSE'] + res = [[port]+[key] + [val] for key, val in result.items()] + click.echo(tabulate(res, headers=headers)) + else: + click.echo("Did not get a valid Port for cable resetcause information".format(port)) + sys.exit(CONFIG_FAIL) + +@muxcable.command() +@click.argument('port', metavar='', required=True, default=None) +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format") +@clicommon.pass_db +def operationtime(db, port, json_output): + """Show muxcable operation time hh:mm:ss forrmat""" + + port = platform_sfputil_helper.get_interface_name(port, db) + + if port is not None: + + res_dict = {} + result = {} + + + res_dict[0] = CONFIG_FAIL + res_dict[1] = "unknown" + + res_dict = update_and_get_response_for_xcvr_cmd( + "get_ber", "status", "True", XCVRD_GET_BER_CMD_TABLE, None, XCVRD_GET_BER_RSP_TABLE, XCVRD_GET_BER_RES_TABLE, port, 10, None, "operation_time") + + if res_dict[1] == "True": + result = get_result(port, res_dict, "fec" , result, XCVRD_GET_BER_RES_TABLE) + + + + port = platform_sfputil_helper.get_interface_alias(port, db) + + actual_time = result.get("operation_time", 0) + if actual_time is not None: + time = '{0:02.0f}:{1:02.0f}'.format(*divmod(int(actual_time) * 60, 60)) + result['operation_time'] = time + + if json_output: + click.echo("{}".format(json.dumps(result, indent=4))) + else: + headers = ['PORT', 'ATTR', 'OPERATION_TIME'] + res = [[port]+[key] + [val] for key, val in result.items()] + click.echo(tabulate(res, headers=headers)) + else: + click.echo("Did not get a valid Port for operation time".format(port)) + sys.exit(CONFIG_FAIL) diff --git a/tests/muxcable_test.py b/tests/muxcable_test.py index 7e4b4b250c..0405b27d87 100644 --- a/tests/muxcable_test.py +++ b/tests/muxcable_test.py @@ -613,6 +613,48 @@ Ethernet0 server_ipv4 10.2.1.1 added added """ + +show_muxcable_operationtime_expected_port_output="""\ +PORT ATTR OPERATION_TIME +--------- -------------- ---------------- +Ethernet0 operation_time 200:00 +""" + +show_muxcable_health_expected_port_output="""\ +PORT ATTR HEALTH +--------- ------------ -------- +Ethernet0 health_check Ok +""" + + +show_muxcable_queueinfo_expected_port_output="""\ +PORT ATTR VALUE +--------- ---------- ------- +Ethernet0 uart_stat1 2 +Ethernet0 uart_stat2 1 +""" + +show_muxcable_resetcause_expected_port_output="""\ +PORT ATTR RESETCAUSE +--------- ----------- ------------ +Ethernet0 reset_cause warm reset +""" + + +show_muxcable_health_expected_port_output_json="""\ +{ + "health_check": "Ok" +} +""" + + + +show_muxcable_resetcause_expected_port_output_json="""\ +{ + "reset_cause": "warm reset" +} +""" + class TestMuxcable(object): @classmethod def setup_class(cls): @@ -2554,6 +2596,88 @@ def test_show_muxcable_hwmode_muxdirection_port_active(self): assert result.exit_code == 0 assert result.output == show_muxcable_hwmode_muxdirection_active_expected_output_json + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "True"})) + @mock.patch('show.muxcable.get_result', mock.MagicMock(return_value={"health_check": "True"})) + def test_show_mux_health(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["health"], + ["Ethernet0"], obj=db) + assert result.output == show_muxcable_health_expected_port_output + + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "True"})) + @mock.patch('show.muxcable.get_result', mock.MagicMock(return_value={"health_check": "True"})) + def test_show_mux_health_json(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["health"], + ["Ethernet0", "--json"], obj=db) + assert result.output == show_muxcable_health_expected_port_output_json + + + + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "True"})) + @mock.patch('show.muxcable.get_result', mock.MagicMock(return_value={"operation_time": "200"})) + def test_show_mux_operation_time(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["operationtime"], + ["Ethernet0"], obj=db) + assert result.output == show_muxcable_operationtime_expected_port_output + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "True"})) + @mock.patch('show.muxcable.get_result', mock.MagicMock(return_value={"uart_stat1": "2", + "uart_stat2": "1"})) + def test_show_mux_queue_info(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["queueinfo"], + ["Ethernet0"], obj=db) + assert result.output == show_muxcable_queueinfo_expected_port_output + + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "True"})) + @mock.patch('show.muxcable.get_result', mock.MagicMock(return_value={"reset_cause": "1"})) + def test_show_mux_resetcause(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["resetcause"], + ["Ethernet0"], obj=db) + assert result.output == show_muxcable_resetcause_expected_port_output + + + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "True"})) + @mock.patch('show.muxcable.get_result', mock.MagicMock(return_value={"reset_cause": "1"})) + def test_show_mux_resetcause_json(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["resetcause"], + ["Ethernet0", "--json"], obj=db) + assert result.output == show_muxcable_resetcause_expected_port_output_json + + @classmethod def teardown_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "0" From e98011f8d5bfa6ae20433e6ac54fc1597dae61de Mon Sep 17 00:00:00 2001 From: StormLiangMS <89824293+StormLiangMS@users.noreply.github.com> Date: Mon, 27 Feb 2023 11:14:54 +0800 Subject: [PATCH 23/66] Revert "Secure upgrade (#2337)" (#2675) This reverts commit 6fe8599216afb1c302e77c52235c4849be6042b2. --- scripts/verify_image_sign.sh | 75 --------------- scripts/verify_image_sign_common.sh | 34 ------- setup.py | 2 - sonic_installer/bootloader/grub.py | 11 --- sonic_installer/main.py | 12 +-- tests/installer_bootloader_grub_test.py | 8 -- tests/scripts/create_mock_image.sh | 40 -------- .../create_sign_and_verify_test_files.sh | 91 ------------------- tests/scripts/verify_image_sign_test.sh | 29 ------ tests/sign_and_verify_test.py | 70 -------------- tests/test_sonic_installer.py | 9 +- tests/verify_image_sign_test.sh | 29 ------ 12 files changed, 2 insertions(+), 408 deletions(-) delete mode 100644 scripts/verify_image_sign.sh delete mode 100755 scripts/verify_image_sign_common.sh delete mode 100755 tests/scripts/create_mock_image.sh delete mode 100755 tests/scripts/create_sign_and_verify_test_files.sh delete mode 100755 tests/scripts/verify_image_sign_test.sh delete mode 100644 tests/sign_and_verify_test.py delete mode 100755 tests/verify_image_sign_test.sh diff --git a/scripts/verify_image_sign.sh b/scripts/verify_image_sign.sh deleted file mode 100644 index d66148d597..0000000000 --- a/scripts/verify_image_sign.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/bin/sh -image_file="${1}" -cms_sig_file="sig.cms" -lines_for_lookup=50 -SECURE_UPGRADE_ENABLED=0 -DIR="$(dirname "$0")" -if [ -d "/sys/firmware/efi/efivars" ]; then - if ! [ -n "$(ls -A /sys/firmware/efi/efivars 2>/dev/null)" ]; then - mount -t efivarfs none /sys/firmware/efi/efivars 2>/dev/null - fi - SECURE_UPGRADE_ENABLED=$(bootctl status 2>/dev/null | grep -c "Secure Boot: enabled") -else - echo "efi not supported - exiting without verification" - exit 0 -fi - -. /usr/local/bin/verify_image_sign_common.sh - -if [ ${SECURE_UPGRADE_ENABLED} -eq 0 ]; then - echo "secure boot not enabled - exiting without image verification" - exit 0 -fi - -clean_up () -{ - if [ -d ${EFI_CERTS_DIR} ]; then rm -rf ${EFI_CERTS_DIR}; fi - if [ -d "${TMP_DIR}" ]; then rm -rf ${TMP_DIR}; fi - exit $1 -} - -TMP_DIR=$(mktemp -d) -DATA_FILE="${TMP_DIR}/data.bin" -CMS_SIG_FILE="${TMP_DIR}/${cms_sig_file}" -TAR_SIZE=$(head -n $lines_for_lookup $image_file | grep "payload_image_size=" | cut -d"=" -f2- ) -SHARCH_SIZE=$(sed '/^exit_marker$/q' $image_file | wc -c) -SIG_PAYLOAD_SIZE=$(($TAR_SIZE + $SHARCH_SIZE )) -# Extract cms signature from signed file -# Add extra byte for payload -sed -e '1,/^exit_marker$/d' $image_file | tail -c +$(( $TAR_SIZE + 1 )) > $CMS_SIG_FILE -# Extract image from signed file -head -c $SIG_PAYLOAD_SIZE $image_file > $DATA_FILE -# verify signature with certificate fetched with efi tools -EFI_CERTS_DIR=/tmp/efi_certs -[ -d $EFI_CERTS_DIR ] && rm -rf $EFI_CERTS_DIR -mkdir $EFI_CERTS_DIR -efi-readvar -v db -o $EFI_CERTS_DIR/db_efi >/dev/null || -{ - echo "Error: unable to read certs from efi db: $?" - clean_up 1 -} -# Convert one file to der certificates -sig-list-to-certs $EFI_CERTS_DIR/db_efi $EFI_CERTS_DIR/db >/dev/null|| -{ - echo "Error: convert sig list to certs: $?" - clean_up 1 -} -for file in $(ls $EFI_CERTS_DIR | grep "db-"); do - LOG=$(openssl x509 -in $EFI_CERTS_DIR/$file -inform der -out $EFI_CERTS_DIR/cert.pem 2>&1) - if [ $? -ne 0 ]; then - logger "cms_validation: $LOG" - fi - # Verify detached signature - LOG=$(verify_image_sign_common $image_file $DATA_FILE $CMS_SIG_FILE) - VALIDATION_RES=$? - if [ $VALIDATION_RES -eq 0 ]; then - RESULT="CMS Verified OK using efi keys" - echo "verification ok:$RESULT" - # No need to continue. - # Exit without error if any success signature verification. - clean_up 0 - fi -done -echo "Failure: CMS signature Verification Failed: $LOG" - -clean_up 1 \ No newline at end of file diff --git a/scripts/verify_image_sign_common.sh b/scripts/verify_image_sign_common.sh deleted file mode 100755 index ec6511bc6d..0000000000 --- a/scripts/verify_image_sign_common.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -verify_image_sign_common() { - image_file="${1}" - cms_sig_file="sig.cms" - TMP_DIR=$(mktemp -d) - DATA_FILE="${2}" - CMS_SIG_FILE="${3}" - - openssl version | awk '$2 ~ /(^0\.)|(^1\.(0\.|1\.0))/ { exit 1 }' - if [ $? -eq 0 ]; then - # for version 1.1.1 and later - no_check_time="-no_check_time" - else - # for version older than 1.1.1 use noattr - no_check_time="-noattr" - fi - - # making sure image verification is supported - EFI_CERTS_DIR=/tmp/efi_certs - RESULT="CMS Verification Failure" - LOG=$(openssl cms -verify $no_check_time -noout -CAfile $EFI_CERTS_DIR/cert.pem -binary -in ${CMS_SIG_FILE} -content ${DATA_FILE} -inform pem 2>&1 > /dev/null ) - VALIDATION_RES=$? - if [ $VALIDATION_RES -eq 0 ]; then - RESULT="CMS Verified OK" - if [ -d "${TMP_DIR}" ]; then rm -rf ${TMP_DIR}; fi - echo "verification ok:$RESULT" - # No need to continue. - # Exit without error if any success signature verification. - return 0 - fi - - if [ -d "${TMP_DIR}" ]; then rm -rf ${TMP_DIR}; fi - return 1 -} diff --git a/setup.py b/setup.py index 231b80c8ed..70d7473bd7 100644 --- a/setup.py +++ b/setup.py @@ -154,8 +154,6 @@ 'scripts/memory_threshold_check_handler.py', 'scripts/techsupport_cleanup.py', 'scripts/storm_control.py', - 'scripts/verify_image_sign.sh', - 'scripts/verify_image_sign_common.sh', 'scripts/check_db_integrity.py', 'scripts/sysreadyshow' ], diff --git a/sonic_installer/bootloader/grub.py b/sonic_installer/bootloader/grub.py index dcafc3f840..7ab5c6c0bc 100644 --- a/sonic_installer/bootloader/grub.py +++ b/sonic_installer/bootloader/grub.py @@ -153,17 +153,6 @@ def verify_image_platform(self, image_path): # Check if platform is inside image's target platforms return self.platform_in_platforms_asic(platform, image_path) - def verify_image_sign(self, image_path): - click.echo('Verifying image signature') - verification_script_name = 'verify_image_sign.sh' - script_path = os.path.join('/usr', 'local', 'bin', verification_script_name) - if not os.path.exists(script_path): - click.echo("Unable to find verification script in path " + script_path) - return False - verification_result = subprocess.run([script_path, image_path], capture_output=True) - click.echo(str(verification_result.stdout) + " " + str(verification_result.stderr)) - return verification_result.returncode == 0 - @classmethod def detect(cls): return os.path.isfile(os.path.join(HOST_PATH, 'grub/grub.cfg')) diff --git a/sonic_installer/main.py b/sonic_installer/main.py index d78259317e..ce1c15866d 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -511,8 +511,7 @@ def sonic_installer(): @click.option('-y', '--yes', is_flag=True, callback=abort_if_false, expose_value=False, prompt='New image will be installed, continue?') @click.option('-f', '--force', '--skip-secure-check', is_flag=True, - help="Force installation of an image of a non-secure type than secure running " + - " image, this flag does not affect secure upgrade image verification") + help="Force installation of an image of a non-secure type than secure running image") @click.option('--skip-platform-check', is_flag=True, help="Force installation of an image of a type which is not of the same platform") @click.option('--skip_migration', is_flag=True, @@ -577,14 +576,6 @@ def install(url, force, skip_platform_check=False, skip_migration=False, skip_pa "Aborting...", LOG_ERR) raise click.Abort() - # Calling verification script by default - signature will be checked if enabled in bios - echo_and_log("Verifing image {} signature...".format(binary_image_version)) - if not bootloader.verify_image_sign(image_path): - echo_and_log('Error: Failed verify image signature', LOG_ERR) - raise click.Abort() - else: - echo_and_log('Verification successful') - echo_and_log("Installing image {} and setting it as default...".format(binary_image_version)) with SWAPAllocator(not skip_setup_swap, swap_mem_size, total_mem_threshold, available_mem_threshold): bootloader.install_image(image_path) @@ -967,6 +958,5 @@ def verify_next_image(): sys.exit(1) click.echo('Image successfully verified') - if __name__ == '__main__': sonic_installer() diff --git a/tests/installer_bootloader_grub_test.py b/tests/installer_bootloader_grub_test.py index 10c9dc5ba7..ff35e13b37 100644 --- a/tests/installer_bootloader_grub_test.py +++ b/tests/installer_bootloader_grub_test.py @@ -53,11 +53,3 @@ def test_set_fips_grub(): # Cleanup the _tmp_host folder shutil.rmtree(tmp_host_path) - -def test_verify_image(): - - bootloader = grub.GrubBootloader() - image = f'{grub.IMAGE_PREFIX}expeliarmus-{grub.IMAGE_PREFIX}abcde' - - # command should fail - assert not bootloader.verify_image_sign(image) diff --git a/tests/scripts/create_mock_image.sh b/tests/scripts/create_mock_image.sh deleted file mode 100755 index f23032af0d..0000000000 --- a/tests/scripts/create_mock_image.sh +++ /dev/null @@ -1,40 +0,0 @@ -repo_dir=$1 -input_image=$2 -output_file=$3 -cert_file=$4 -key_file=$5 -tmp_dir= -clean_up() -{ - sudo rm -rf $tmp_dir - sudo rm -rf $output_file - exit $1 -} - -DIR="$(dirname "$0")" - -tmp_dir=$(mktemp -d) -sha1=$(cat $input_image | sha1sum | awk '{print $1}') -echo -n "." -cp $repo_dir/installer/sharch_body.sh $output_file || { - echo "Error: Problems copying sharch_body.sh" - clean_up 1 -} -# Replace variables in the sharch template -sed -i -e "s/%%IMAGE_SHA1%%/$sha1/" $output_file -echo -n "." -tar_size="$(wc -c < "${input_image}")" -cat $input_image >> $output_file -sed -i -e "s|%%PAYLOAD_IMAGE_SIZE%%|${tar_size}|" ${output_file} -CMS_SIG="${tmp_dir}/signature.sig" - -echo "$0 CMS signing ${input_image} with ${key_file}. Output file ${output_file}" -. $repo_dir/scripts/sign_image_dev.sh -sign_image_dev ${cert_file} ${key_file} $output_file ${CMS_SIG} || clean_up 1 - -cat ${CMS_SIG} >> ${output_file} -echo "Signature done." -# append signature to binary -sudo rm -rf ${CMS_SIG} -sudo rm -rf $tmp_dir -exit 0 diff --git a/tests/scripts/create_sign_and_verify_test_files.sh b/tests/scripts/create_sign_and_verify_test_files.sh deleted file mode 100755 index 0040c04a7a..0000000000 --- a/tests/scripts/create_sign_and_verify_test_files.sh +++ /dev/null @@ -1,91 +0,0 @@ -repo_dir=$1 -out_dir=$2 -mock_image="mock_img.bin" -output_file=$out_dir/output_file.bin -cert_file=$3 -other_cert_file=$4 -tmp_dir= -clean_up() -{ - sudo rm -rf $tmp_dir - sudo rm -rf $mock_image - exit $1 -} -DIR="$(dirname "$0")" -[ -d $out_dir ] || rm -rf $out_dir -mkdir $out_dir -tmp_dir=$(mktemp -d) -#generate self signed keys and certificate -key_file=$tmp_dir/private-key.pem -pub_key_file=$tmp_dir/public-key.pem -openssl ecparam -name secp256r1 -genkey -noout -out $key_file -openssl ec -in $key_file -pubout -out $pub_key_file -openssl req -new -x509 -key $key_file -out $cert_file -days 360 -subj "/C=US/ST=Test/L=Test/O=Test/CN=Test" -alt_key_file=$tmp_dir/alt-private-key.pem -alt_pub_key_file=$tmp_dir/alt-public-key.pem -openssl ecparam -name secp256r1 -genkey -noout -out $alt_key_file -openssl ec -in $alt_key_file -pubout -out $alt_pub_key_file -openssl req -new -x509 -key $alt_key_file -out $other_cert_file -days 360 -subj "/C=US/ST=Test/L=Test/O=Test/CN=Test" - -echo "this is a mock image\nThis is another line !2#4%6\n" > $mock_image -echo "Created a mock image with following text:" -cat $mock_image -# create signed mock image - -sh $DIR/create_mock_image.sh $repo_dir $mock_image $output_file $cert_file $key_file || { - echo "Error: unable to create mock image" - clean_up 1 -} - -[ -f "$output_file" ] || { - echo "signed mock image not created - exiting without testing" - clean_up 1 -} - -test_image_1=$out_dir/test_image_1.bin -cp -v $output_file $test_image_1 || { - echo "Error: Problems copying image" - clean_up 1 -} - -# test_image_1 = modified image size to something else - should fail on signature verification -image_size=$(sed -n 's/^payload_image_size=\(.*\)/\1/p' < $test_image_1) -sed -i "/payload_image_size=/c\payload_image_size=$(($image_size - 5))" $test_image_1 - -test_image_2=$out_dir/test_image_2.bin -cp -v $output_file $test_image_2 || { - echo "Error: Problems copying image" - clean_up 1 -} - -# test_image_2 = modified image sha1 to other sha1 value - should fail on signature verification -im_sha=$(sed -n 's/^payload_sha1=\(.*\)/\1/p' < $test_image_2) -sed -i "/payload_sha1=/c\payload_sha1=2f1bbd5a0d411253103e688e4e66c00c94bedd40" $test_image_2 - -tmp_image=$tmp_dir/"tmp_image.bin" -echo "this is a different image now" >> $mock_image -sh $DIR/create_mock_image.sh $repo_dir $mock_image $tmp_image $cert_file $key_file || { - echo "Error: unable to create mock image" - clean_up 1 -} -# test_image_3 = original mock image with wrong signature -# Extract cms signature from signed file -test_image_3=$out_dir/"test_image_3.bin" -tmp_sig="${tmp_dir}/tmp_sig.sig" -TMP_TAR_SIZE=$(head -n 50 $tmp_image | grep "payload_image_size=" | cut -d"=" -f2- ) -sed -e '1,/^exit_marker$/d' $tmp_image | tail -c +$(( $TMP_TAR_SIZE + 1 )) > $tmp_sig - -TAR_SIZE=$(head -n 50 $output_file | grep "payload_image_size=" | cut -d"=" -f2- ) -SHARCH_SIZE=$(sed '/^exit_marker$/q' $output_file | wc -c) -SIG_PAYLOAD_SIZE=$(($TAR_SIZE + $SHARCH_SIZE )) -head -c $SIG_PAYLOAD_SIZE $output_file > $test_image_3 -sudo rm -rf $tmp_image - -cat ${tmp_sig} >> ${test_image_3} - -# test_image_4 = modified image with original mock image signature -test_image_4=$out_dir/"test_image_4.bin" -head -c $SIG_PAYLOAD_SIZE $output_file > $test_image_4 -echo "this is additional line" >> $test_image_4 -cat ${tmp_sig} >> ${test_image_4} -clean_up 0 \ No newline at end of file diff --git a/tests/scripts/verify_image_sign_test.sh b/tests/scripts/verify_image_sign_test.sh deleted file mode 100755 index f4abd2584f..0000000000 --- a/tests/scripts/verify_image_sign_test.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -image_file="${1}" -cert_path="${2}" -cms_sig_file="sig.cms" -TMP_DIR=$(mktemp -d) -DATA_FILE="${TMP_DIR}/data.bin" -CMS_SIG_FILE="${TMP_DIR}/${cms_sig_file}" -lines_for_lookup=50 - -TAR_SIZE=$(head -n $lines_for_lookup $image_file | grep "payload_image_size=" | cut -d"=" -f2- ) -SHARCH_SIZE=$(sed '/^exit_marker$/q' $image_file | wc -c) -SIG_PAYLOAD_SIZE=$(($TAR_SIZE + $SHARCH_SIZE )) -# Extract cms signature from signed file - exit marker marks last sharch prefix + number of image lines + 1 for next linel -# Add extra byte for payload - extracting image signature from line after data file -sed -e '1,/^exit_marker$/d' $image_file | tail -c +$(( $TAR_SIZE + 1 )) > $CMS_SIG_FILE -# Extract image from signed file -head -c $SIG_PAYLOAD_SIZE $image_file > $DATA_FILE -EFI_CERTS_DIR=/tmp/efi_certs -[ -d $EFI_CERTS_DIR ] && rm -rf $EFI_CERTS_DIR -mkdir $EFI_CERTS_DIR -cp $cert_path $EFI_CERTS_DIR/cert.pem - -DIR="$(dirname "$0")" -. $DIR/verify_image_sign_common.sh -verify_image_sign_common $image_file $DATA_FILE $CMS_SIG_FILE -VERIFICATION_RES=$? -if [ -d "${TMP_DIR}" ]; then rm -rf ${TMP_DIR}; fi -[ -d $EFI_CERTS_DIR ] && rm -rf $EFI_CERTS_DIR -exit $VERIFICATION_RES \ No newline at end of file diff --git a/tests/sign_and_verify_test.py b/tests/sign_and_verify_test.py deleted file mode 100644 index 77d58a4ac9..0000000000 --- a/tests/sign_and_verify_test.py +++ /dev/null @@ -1,70 +0,0 @@ - -import subprocess -import os -import sys -import shutil - - -class TestSignVerify(object): - def _run_verification_script_and_check(self, image, cert_file_path, success_str, expected_value=0): - res = subprocess.run(['sh', self._verification_script, image, cert_file_path]) - assert res.returncode == expected_value - print(success_str) - - def test_basic_signature_verification(self): - self._run_verification_script_and_check(os.path.join(self._out_dir_path, 'output_file.bin'), - self._cert_file_path, "test case 1 - basic verify signature - SUCCESS") - - # change image size to something else - should fail on signature verification - def test_modified_image_size(self): - self._run_verification_script_and_check(os.path.join(self._out_dir_path, 'test_image_1.bin'), - self._cert_file_path, "test case 2 - modified image size - SUCCESS", 1) - - def test_modified_image_sha1(self): - self._run_verification_script_and_check(os.path.join(self._out_dir_path, 'test_image_2.bin'), - self._cert_file_path, "test case 3 - modified image sha1 - SUCCESS", 1) - - def test_modified_image_data(self): - self._run_verification_script_and_check(os.path.join(self._out_dir_path, 'test_image_3.bin'), - self._cert_file_path, "test case 4 - modified image data - SUCCESS", 1) - - def test_modified_image_signature(self): - self._run_verification_script_and_check(os.path.join(self._out_dir_path, 'test_image_4.bin'), - self._cert_file_path, "test case 5 - modified image data - SUCCESS", 1) - - def test_verify_image_with_wrong_certificate(self): - self._run_verification_script_and_check(os.path.join(self._out_dir_path, 'output_file.bin'), - self._alt_cert_path, "test case 6 - verify with wrong signature - SUCCESS", 1) - - def __init__(self): - self._test_path = os.path.dirname(os.path.abspath(__file__)) - self._modules_path = os.path.dirname(self._test_path) - self._repo_path = os.path.join(self._modules_path, '../..') - self._test_scripts_path = os.path.join(self._test_path, "scripts") - sys.path.insert(0, self._test_path) - sys.path.insert(0, self._modules_path) - sys.path.insert(0, self._test_scripts_path) - script_path = os.path.join(self._test_scripts_path, 'create_sign_and_verify_test_files.sh') - self._verification_script = os.path.join(self._test_scripts_path, 'verify_image_sign_test.sh') - self._out_dir_path = '/tmp/sign_verify_test' - self._cert_file_path = os.path.join(self._out_dir_path, 'self_certificate.pem') - self._alt_cert_path = os.path.join(self._out_dir_path, 'alt_self_certificate.pem') - create_files_result = subprocess.run(['sh', script_path, self._repo_path, self._out_dir_path, - self._cert_file_path, - self._alt_cert_path]) - print(create_files_result) - assert create_files_result.returncode == 0 - - def __del__(self): - shutil.rmtree(self._out_dir_path) - - -if __name__ == '__main__': - t = TestSignVerify() - t.test_basic_signature_verification() - subprocess.run(['ls', '/tmp/sign_verify_test']) - t.test_modified_image_data() - t.test_modified_image_sha1() - t.test_modified_image_signature() - t.test_modified_image_size() - t.test_verify_image_with_wrong_certificate() diff --git a/tests/test_sonic_installer.py b/tests/test_sonic_installer.py index 0f8fcdb8ca..c445dfb6e3 100644 --- a/tests/test_sonic_installer.py +++ b/tests/test_sonic_installer.py @@ -3,7 +3,6 @@ from sonic_installer.main import sonic_installer from click.testing import CliRunner from unittest.mock import patch, Mock, call -from sonic_installer.bootloader import GrubBootloader @patch("sonic_installer.main.SWAPAllocator") @patch("sonic_installer.main.get_bootloader") @@ -32,7 +31,7 @@ def test_install(run_command, run_command_or_raise, get_bootloader, swap, fs): mock_bootloader.get_binary_image_version = Mock(return_value=new_image_version) mock_bootloader.get_installed_images = Mock(return_value=[current_image_version]) mock_bootloader.get_image_path = Mock(return_value=new_image_folder) - mock_bootloader.verify_image_sign = Mock(return_value=True) + @contextmanager def rootfs_path_mock(path): yield mounted_image_folder @@ -46,13 +45,7 @@ def rootfs_path_mock(path): print(result.output) assert result.exit_code == 0 - mock_bootloader_verify_image_sign_fail = mock_bootloader - mock_bootloader_verify_image_sign_fail.verify_image_sign = Mock(return_value=False) - get_bootloader.return_value=mock_bootloader_verify_image_sign_fail - result = runner.invoke(sonic_installer.commands["install"], [sonic_image_filename, "-y"]) - print(result.output) - assert result.exit_code != 0 # Assert bootloader install API was called mock_bootloader.install_image.assert_called_with(f"./{sonic_image_filename}") # Assert all below commands were called, so we ensure that diff --git a/tests/verify_image_sign_test.sh b/tests/verify_image_sign_test.sh deleted file mode 100755 index f4abd2584f..0000000000 --- a/tests/verify_image_sign_test.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -image_file="${1}" -cert_path="${2}" -cms_sig_file="sig.cms" -TMP_DIR=$(mktemp -d) -DATA_FILE="${TMP_DIR}/data.bin" -CMS_SIG_FILE="${TMP_DIR}/${cms_sig_file}" -lines_for_lookup=50 - -TAR_SIZE=$(head -n $lines_for_lookup $image_file | grep "payload_image_size=" | cut -d"=" -f2- ) -SHARCH_SIZE=$(sed '/^exit_marker$/q' $image_file | wc -c) -SIG_PAYLOAD_SIZE=$(($TAR_SIZE + $SHARCH_SIZE )) -# Extract cms signature from signed file - exit marker marks last sharch prefix + number of image lines + 1 for next linel -# Add extra byte for payload - extracting image signature from line after data file -sed -e '1,/^exit_marker$/d' $image_file | tail -c +$(( $TAR_SIZE + 1 )) > $CMS_SIG_FILE -# Extract image from signed file -head -c $SIG_PAYLOAD_SIZE $image_file > $DATA_FILE -EFI_CERTS_DIR=/tmp/efi_certs -[ -d $EFI_CERTS_DIR ] && rm -rf $EFI_CERTS_DIR -mkdir $EFI_CERTS_DIR -cp $cert_path $EFI_CERTS_DIR/cert.pem - -DIR="$(dirname "$0")" -. $DIR/verify_image_sign_common.sh -verify_image_sign_common $image_file $DATA_FILE $CMS_SIG_FILE -VERIFICATION_RES=$? -if [ -d "${TMP_DIR}" ]; then rm -rf ${TMP_DIR}; fi -[ -d $EFI_CERTS_DIR ] && rm -rf $EFI_CERTS_DIR -exit $VERIFICATION_RES \ No newline at end of file From 6f84aae74c4d1eaeac4a5e7efd260f5fcd284a77 Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Mon, 27 Feb 2023 17:49:34 +0800 Subject: [PATCH 24/66] Add begin logs to config reload/config minigraph/warm-reboot/fast-reboot (#2694) - What I did Add more logs for config reload/config minigraph/warm-reboot/fast/reboot to identify in the log (notice level) what was the command executed which could cause a service affect. - How I did it Add more logs for config reload/config minigraph/warm-reboot/fast/reboot. - How to verify it Manual test --- config/main.py | 14 ++++++++------ scripts/fast-reboot | 1 + 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/config/main.py b/config/main.py index 6f155ba669..f2576ac74d 100644 --- a/config/main.py +++ b/config/main.py @@ -1549,7 +1549,8 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form if not yes: click.confirm(message, abort=True) - log.log_info("'reload' executing...") + argv_str = ' '.join(['config', *sys.argv[1:]]) + log.log_notice(f"'reload' executing with command: {argv_str}") num_asic = multi_asic.get_num_asics() cfg_files = [] @@ -1569,7 +1570,7 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form #Stop services before config push if not no_service_restart: - log.log_info("'reload' stopping services...") + log.log_notice("'reload' stopping services...") _stop_services() # In Single ASIC platforms we have single DB service. In multi-ASIC platforms we have a global DB @@ -1678,7 +1679,7 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form # status from all services before we attempt to restart them if not no_service_restart: _reset_failed_services() - log.log_info("'reload' restarting services...") + log.log_notice("'reload' restarting services...") _restart_services() @config.command("load_mgmt_config") @@ -1725,11 +1726,12 @@ def load_mgmt_config(filename): @clicommon.pass_db def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, golden_config_path): """Reconfigure based on minigraph.""" - log.log_info("'load_minigraph' executing...") + argv_str = ' '.join(['config', *sys.argv[1:]]) + log.log_notice(f"'load_minigraph' executing with command: {argv_str}") #Stop services before config push if not no_service_restart: - log.log_info("'load_minigraph' stopping services...") + log.log_notice("'load_minigraph' stopping services...") _stop_services() # For Single Asic platform the namespace list has the empty string @@ -1815,7 +1817,7 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, if not no_service_restart: _reset_failed_services() #FIXME: After config DB daemon is implemented, we'll no longer need to restart every service. - log.log_info("'load_minigraph' restarting services...") + log.log_notice("'load_minigraph' restarting services...") _restart_services() click.echo("Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`.") diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 604fddf9ec..5e7cc34bc9 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -522,6 +522,7 @@ then exit "${EXIT_FAILURE}" fi +debug "Starting $REBOOT_TYPE" # re-run the script in background mode with detaching from the terminal session if [[ x"${DETACH}" == x"yes" && x"${ALREADY_DETACHED}" == x"" ]]; then From b81734289b7ca361b365cbf1c845d31b0a81657e Mon Sep 17 00:00:00 2001 From: isabelmsft <67024108+isabelmsft@users.noreply.github.com> Date: Mon, 27 Feb 2023 23:49:01 -0800 Subject: [PATCH 25/66] [GCU] Add Sample Unit Test for RDMA Headroom Pool Size Tuning (#2692) * add rdma gcu unit test * fix comment * clean unused code * clean format * extend to mock patchapplier, in place of changeapplier * replace tabs with spaces --- ...eature_patch_application_test_failure.json | 35 ++++++ ...eature_patch_application_test_success.json | 62 ++++++++++ .../gcu_feature_patch_application_test.py | 117 ++++++++++++++++++ 3 files changed, 214 insertions(+) create mode 100644 tests/generic_config_updater/files/feature_patch_application_test_failure.json create mode 100644 tests/generic_config_updater/files/feature_patch_application_test_success.json create mode 100644 tests/generic_config_updater/gcu_feature_patch_application_test.py diff --git a/tests/generic_config_updater/files/feature_patch_application_test_failure.json b/tests/generic_config_updater/files/feature_patch_application_test_failure.json new file mode 100644 index 0000000000..80c523ddfc --- /dev/null +++ b/tests/generic_config_updater/files/feature_patch_application_test_failure.json @@ -0,0 +1,35 @@ +{ + "RDMA_SHARED_POOL_SIZE_CHANGE__FAILURE": { + "desc": "For RDMA shared pool size tuning- adjust both shared pool and headroom pool", + "current_config": { + "BUFFER_POOL": { + "ingress_lossless_pool": { + "xoff": "4194112", + "type": "ingress", + "mode": "dynamic", + "size": "10875072" + }, + "egress_lossless_pool": { + "type": "egress", + "mode": "static", + "size": "15982720" + }, + "egress_lossy_pool": { + "type": "egress", + "mode": "dynamic", + "size": "9243812" + } + } + }, + "patch": [ + { + "op": "replace", + "path": "/BUFFER_POOL/ingress_lossless_pool/xoff", + "value": "invalid_xoff" + } + ], + "expected_error_substrings": [ + "Given patch will produce invalid config" + ] + } +} diff --git a/tests/generic_config_updater/files/feature_patch_application_test_success.json b/tests/generic_config_updater/files/feature_patch_application_test_success.json new file mode 100644 index 0000000000..7ca6cab4bb --- /dev/null +++ b/tests/generic_config_updater/files/feature_patch_application_test_success.json @@ -0,0 +1,62 @@ +{ + "RDMA_SHARED_POOL_SIZE_CHANGE__SUCCESS": { + "desc": "For RDMA shared pool size tuning- adjust both shared pool and headroom pool", + "current_config": { + "BUFFER_POOL": { + "ingress_lossless_pool": { + "xoff": "4194112", + "type": "ingress", + "mode": "dynamic", + "size": "10875072" + }, + "egress_lossless_pool": { + "type": "egress", + "mode": "static", + "size": "15982720" + }, + "egress_lossy_pool": { + "type": "egress", + "mode": "dynamic", + "size": "9243812" + } + } + }, + "patch": [ + { + "op": "replace", + "path": "/BUFFER_POOL/ingress_lossless_pool/xoff", + "value": "2155712" + }, + { + "op": "replace", + "path": "/BUFFER_POOL/ingress_lossless_pool/size", + "value": "12913472" + }, + { + "op": "replace", + "path": "/BUFFER_POOL/egress_lossy_pool/size", + "value": "5200000" + } + ], + "expected_config": { + "BUFFER_POOL": { + "ingress_lossless_pool": { + "xoff": "2155712", + "type": "ingress", + "mode": "dynamic", + "size": "12913472" + }, + "egress_lossless_pool": { + "type": "egress", + "mode": "static", + "size": "15982720" + }, + "egress_lossy_pool": { + "type": "egress", + "mode": "dynamic", + "size": "5200000" + } + } + } + } +} diff --git a/tests/generic_config_updater/gcu_feature_patch_application_test.py b/tests/generic_config_updater/gcu_feature_patch_application_test.py new file mode 100644 index 0000000000..9a52a04732 --- /dev/null +++ b/tests/generic_config_updater/gcu_feature_patch_application_test.py @@ -0,0 +1,117 @@ +import jsonpatch +import unittest +import copy +from unittest.mock import MagicMock, Mock +from mock import patch + +import generic_config_updater.change_applier +import generic_config_updater.patch_sorter as ps +import generic_config_updater.generic_updater as gu +from .gutest_helpers import Files +from generic_config_updater.gu_common import ConfigWrapper, PatchWrapper + +running_config = {} + +def set_entry(config_db, tbl, key, data): + global running_config + if data != None: + if tbl not in running_config: + running_config[tbl] = {} + running_config[tbl][key] = data + else: + assert tbl in running_config + assert key in running_config[tbl] + running_config[tbl].pop(key) + if not running_config[tbl]: + running_config.pop(tbl) + +def get_running_config(): + return running_config + +class TestFeaturePatchApplication(unittest.TestCase): + def setUp(self): + self.config_wrapper = ConfigWrapper() + + def test_feature_patch_application_success(self): + # Format of the JSON file containing the test-cases: + # + # { + # "":{ + # "desc":"", + # "current_config":, + # "patch":, + # "expected_config": + # }, + # . + # . + # . + # } + data = Files.FEATURE_PATCH_APPLICATION_TEST_SUCCESS + + for test_case_name in data: + with self.subTest(name=test_case_name): + self.run_single_success_case_applier(data[test_case_name]) + + def test_feature_patch_application_failure(self): + # Fromat of the JSON file containing the test-cases: + # + # { + # "":{ + # "desc":"", + # "current_config":, + # "patch":, + # "expected_error_substrings": + # }, + # . + # . + # . + # } + data = Files.FEATURE_PATCH_APPLICATION_TEST_FAILURE + + for test_case_name in data: + with self.subTest(name=test_case_name): + self.run_single_failure_case_applier(data[test_case_name]) + + def create_patch_applier(self, config): + global running_config + running_config = copy.deepcopy(config) + config_wrapper = self.config_wrapper + config_wrapper.get_config_db_as_json = MagicMock(side_effect=get_running_config) + change_applier = generic_config_updater.change_applier.ChangeApplier() + change_applier._get_running_config = MagicMock(side_effect=get_running_config) + patch_wrapper = PatchWrapper(config_wrapper) + return gu.PatchApplier(config_wrapper=config_wrapper, patch_wrapper=patch_wrapper, changeapplier=change_applier) + + @patch("generic_config_updater.change_applier.get_config_db") + @patch("generic_config_updater.change_applier.set_config") + def run_single_success_case_applier(self, data, mock_set, mock_db): + current_config = data["current_config"] + mock_set.side_effect = set_entry + expected_config = data["expected_config"] + patch = jsonpatch.JsonPatch(data["patch"]) + patch_applier = self.create_patch_applier(current_config) + patch_applier.apply(patch) + result_config = patch_applier.config_wrapper.get_config_db_as_json() + + self.assertEqual(expected_config, result_config) + + @patch("generic_config_updater.change_applier.get_config_db") + def run_single_failure_case_applier(self, data, mock_db): + current_config = data["current_config"] + patch = jsonpatch.JsonPatch(data["patch"]) + expected_error_substrings = data["expected_error_substrings"] + + try: + patch_applier = self.create_patch_applier(current_config) + patch_applier.apply(patch) + self.fail("An exception was supposed to be thrown") + except Exception as ex: + notfound_substrings = [] + error = str(ex) + + for substring in expected_error_substrings: + if substring not in error: + notfound_substrings.append(substring) + + if notfound_substrings: + self.fail(f"Did not find the expected substrings {notfound_substrings} in the error: '{error}'") From 2680e6f374ecc6bfdcfa1c579c79c7fba41a51e5 Mon Sep 17 00:00:00 2001 From: Yaqiang Zhu Date: Wed, 1 Mar 2023 10:05:04 +0800 Subject: [PATCH 26/66] [dhcp_relay] Fix dhcp_relay restart error while add/del vlan (#2688) Why I did In device that doesn't have dhcp_relay service, restart dhcp_relay after add/del vlan would encounter failed How I did it Add support to check whether device is support dhcp_relay service. How to verify it 1. Unit test 2. Build and install in device Signed-off-by: Yaqiang Zhu --- config/vlan.py | 39 +++++++++++++----- tests/conftest.py | 10 +++-- tests/vlan_test.py | 98 ++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 132 insertions(+), 15 deletions(-) diff --git a/config/vlan.py b/config/vlan.py index f1c6f06d1f..33c6145770 100644 --- a/config/vlan.py +++ b/config/vlan.py @@ -8,6 +8,8 @@ from .validated_config_db_connector import ValidatedConfigDBConnector ADHOC_VALIDATION = True +DHCP_RELAY_TABLE = "DHCP_RELAY" +DHCPV6_SERVERS = "dhcpv6_servers" # # 'vlan' group ('config vlan ...') @@ -22,6 +24,11 @@ def set_dhcp_relay_table(table, config_db, vlan_name, value): config_db.set_entry(table, vlan_name, value) +def is_dhcp_relay_running(): + out, _ = clicommon.run_command("systemctl show dhcp_relay.service --property ActiveState --value", return_cmd=True) + return out.strip() == "active" + + @vlan.command('add') @click.argument('vid', metavar='', required=True, type=int) @clicommon.pass_db @@ -46,22 +53,34 @@ def add_vlan(db, vid): # set dhcpv4_relay table set_dhcp_relay_table('VLAN', config_db, vlan, {'vlanid': str(vid)}) - # set dhcpv6_relay table - set_dhcp_relay_table('DHCP_RELAY', config_db, vlan, None) - # We need to restart dhcp_relay service after dhcpv6_relay config change - dhcp_relay_util.handle_restart_dhcp_relay_service() + +def is_dhcpv6_relay_config_exist(db, vlan_name): + keys = db.cfgdb.get_keys(DHCP_RELAY_TABLE) + if len(keys) == 0 or vlan_name not in keys: + return False + + table = db.cfgdb.get_entry("DHCP_RELAY", vlan_name) + dhcpv6_servers = table.get(DHCPV6_SERVERS, []) + if len(dhcpv6_servers) > 0: + return True @vlan.command('del') @click.argument('vid', metavar='', required=True, type=int) +@click.option('--no_restart_dhcp_relay', is_flag=True, type=click.BOOL, required=False, default=False, + help="If no_restart_dhcp_relay is True, do not restart dhcp_relay while del vlan and \ + require dhcpv6 relay of this is empty") @clicommon.pass_db -def del_vlan(db, vid): +def del_vlan(db, vid, no_restart_dhcp_relay): """Delete VLAN""" log.log_info("'vlan del {}' executing...".format(vid)) ctx = click.get_current_context() vlan = 'Vlan{}'.format(vid) + if no_restart_dhcp_relay: + if is_dhcpv6_relay_config_exist(db, vlan): + ctx.fail("Can't delete {} because related DHCPv6 Relay config is exist".format(vlan)) config_db = ValidatedConfigDBConnector(db.cfgdb) if ADHOC_VALIDATION: @@ -90,10 +109,12 @@ def del_vlan(db, vid): # set dhcpv4_relay table set_dhcp_relay_table('VLAN', config_db, vlan, None) - # set dhcpv6_relay table - set_dhcp_relay_table('DHCP_RELAY', config_db, vlan, None) - # We need to restart dhcp_relay service after dhcpv6_relay config change - dhcp_relay_util.handle_restart_dhcp_relay_service() + if not no_restart_dhcp_relay and is_dhcpv6_relay_config_exist(db, vlan): + # set dhcpv6_relay table + set_dhcp_relay_table('DHCP_RELAY', config_db, vlan, None) + # We need to restart dhcp_relay service after dhcpv6_relay config change + if is_dhcp_relay_running(): + dhcp_relay_util.handle_restart_dhcp_relay_service() def restart_ndppd(): diff --git a/tests/conftest.py b/tests/conftest.py index b6b454ba09..6e70f8c9aa 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -361,9 +361,13 @@ def setup_fib_commands(): @pytest.fixture(scope='function') def mock_restart_dhcp_relay_service(): print("We are mocking restart dhcp_relay") - origin_func = config.vlan.dhcp_relay_util.handle_restart_dhcp_relay_service - config.vlan.dhcp_relay_util.handle_restart_dhcp_relay_service = mock.MagicMock(return_value=0) + origin_funcs = [] + origin_funcs.append(config.vlan.dhcp_relay_util.restart_dhcp_relay_service) + origin_funcs.append(config.vlan.is_dhcp_relay_running) + config.vlan.dhcp_relay_util.restart_dhcp_relay_service = mock.MagicMock(return_value=0) + config.vlan.is_dhcp_relay_running = mock.MagicMock(return_value=True) yield - config.vlan.dhcp_relay_util.handle_restart_dhcp_relay_service = origin_func + config.vlan.dhcp_relay_util.restart_dhcp_relay_service = origin_funcs[0] + config.vlan.is_dhcp_relay_running = origin_funcs[1] diff --git a/tests/vlan_test.py b/tests/vlan_test.py index f582d0e3ba..19622777a0 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -597,7 +597,7 @@ def test_config_vlan_add_member_of_portchannel(self): assert "Error: Ethernet32 is part of portchannel!" in result.output @pytest.mark.parametrize("ip_version", ["ipv4", "ipv6"]) - def test_config_add_del_vlan_dhcp_relay(self, ip_version, mock_restart_dhcp_relay_service): + def test_config_add_del_vlan_dhcp_relay_with_empty_entry(self, ip_version, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() @@ -611,11 +611,103 @@ def test_config_add_del_vlan_dhcp_relay(self, ip_version, mock_restart_dhcp_rela assert db.cfgdb.get_entry(IP_VERSION_PARAMS_MAP[ip_version]["table"], "Vlan1001") == exp_output # del vlan 1001 - result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001"], obj=db) + with mock.patch("utilities_common.dhcp_relay_util.handle_restart_dhcp_relay_service") as mock_handle_restart: + result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + + assert result.exit_code == 0 + assert "Vlan1001" not in db.cfgdb.get_keys(IP_VERSION_PARAMS_MAP[ip_version]["table"]) + assert "Restart service dhcp_relay failed with error" not in result.output + + @pytest.mark.parametrize("ip_version", ["ipv4", "ipv6"]) + def test_config_add_del_vlan_dhcp_relay_with_non_empty_entry(self, ip_version, mock_restart_dhcp_relay_service): + runner = CliRunner() + db = Db() + + # add vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + exp_output = {"vlanid": "1001"} if ip_version == "ipv4" else {} + assert db.cfgdb.get_entry(IP_VERSION_PARAMS_MAP[ip_version]["table"], "Vlan1001") == exp_output + db.cfgdb.set_entry("DHCP_RELAY", "Vlan1001", {"dhcpv6_servers": ["fc02:2000::5"]}) + + # del vlan 1001 + with mock.patch("utilities_common.dhcp_relay_util.handle_restart_dhcp_relay_service") as mock_handle_restart: + result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + + assert result.exit_code == 0 + assert "Vlan1001" not in db.cfgdb.get_keys(IP_VERSION_PARAMS_MAP[ip_version]["table"]) + mock_handle_restart.assert_called_once() + assert "Restart service dhcp_relay failed with error" not in result.output + + @pytest.mark.parametrize("ip_version", ["ipv4", "ipv6"]) + def test_config_add_del_vlan_with_dhcp_relay_not_running(self, ip_version): + runner = CliRunner() + db = Db() + + # add vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001"], obj=db) print(result.exit_code) print(result.output) + assert result.exit_code == 0 + + exp_output = {"vlanid": "1001"} if ip_version == "ipv4" else {} + assert db.cfgdb.get_entry(IP_VERSION_PARAMS_MAP[ip_version]["table"], "Vlan1001") == exp_output + + # del vlan 1001 + with mock.patch("utilities_common.dhcp_relay_util.handle_restart_dhcp_relay_service") \ + as mock_restart_dhcp_relay_service: + result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) - assert "Vlan1001" not in db.cfgdb.get_keys(IP_VERSION_PARAMS_MAP[ip_version]["table"]) + assert result.exit_code == 0 + assert "Vlan1001" not in db.cfgdb.get_keys(IP_VERSION_PARAMS_MAP[ip_version]["table"]) + assert mock_restart_dhcp_relay_service.call_count == 0 + assert "Restarting DHCP relay service..." not in result.output + assert "Restart service dhcp_relay failed with error" not in result.output + + def test_config_add_del_vlan_with_not_restart_dhcp_relay_ipv6(self): + runner = CliRunner() + db = Db() + + # add vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + db.cfgdb.set_entry("DHCP_RELAY", "Vlan1001", {"dhcpv6_servers": ["fc02:2000::5"]}) + + # del vlan 1001 + with mock.patch("utilities_common.dhcp_relay_util.handle_restart_dhcp_relay_service") \ + as mock_restart_dhcp_relay_service: + result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001", "--no_restart_dhcp_relay"], + obj=db) + print(result.exit_code) + print(result.output) + + assert result.exit_code != 0 + assert mock_restart_dhcp_relay_service.call_count == 0 + assert "Can't delete Vlan1001 because related DHCPv6 Relay config is exist" in result.output + + db.cfgdb.set_entry("DHCP_RELAY", "Vlan1001", None) + # del vlan 1001 + with mock.patch("utilities_common.dhcp_relay_util.handle_restart_dhcp_relay_service") \ + as mock_restart_dhcp_relay_service: + result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001", "--no_restart_dhcp_relay"], + obj=db) + print(result.exit_code) + print(result.output) + + assert result.exit_code == 0 + assert mock_restart_dhcp_relay_service.call_count == 0 @pytest.mark.parametrize("ip_version", ["ipv6"]) def test_config_add_exist_vlan_dhcp_relay(self, ip_version): From cd519aac344651f604be4c75050d1db4b10307a4 Mon Sep 17 00:00:00 2001 From: Liu Shilong Date: Thu, 2 Mar 2023 15:36:57 +0800 Subject: [PATCH 27/66] [ci] Fix pipeline issue caused by sonic-slave-* change. (#2709) What I did These 3 packages maybe purged by default. Do not block pipeline. Download deb/whl packages only to accelerate download process. How I did it How to verify it --- azure-pipelines.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 1856c7a0f5..46369c01b2 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -45,11 +45,14 @@ stages: artifact: sonic-buildimage.vs runVersion: 'latestFromBranch' runBranch: 'refs/heads/$(sourceBranch)' + patterns: | + **/*.deb + **/*.whl displayName: "Download artifacts from latest sonic-buildimage build" - script: | set -xe - sudo apt-get -y purge libhiredis-dev libnl-3-dev libnl-route-3-dev + sudo apt-get -y purge libhiredis-dev libnl-3-dev libnl-route-3-dev || true sudo dpkg -i libnl-3-200_*.deb sudo dpkg -i libnl-genl-3-200_*.deb sudo dpkg -i libnl-route-3-200_*.deb From a015834d27f853344bb46b0445fb4dcd44b11cf3 Mon Sep 17 00:00:00 2001 From: Vaibhav Hemant Dixit Date: Fri, 3 Mar 2023 12:45:40 -0800 Subject: [PATCH 28/66] [db_migrator] Add missing attribute 'weight' to route entries in APPL DB (#2691) Fixes: 201911 to 202205 warm upgrade failure in fpmsyncd reconciliation due to missing weight attr in routes. (sonic-net/sonic-buildimage#12625) How I did it Check for missing attribute weight in APPLDB route entries. If found missing this attribute is added with empty value. How to verify it Verified on physical device. 201911 to 202205 upgrade worked fine. --- scripts/db_migrator.py | 19 ++++++++++ .../appl_db/routes_migrate_expected.json | 12 +++++++ .../appl_db/routes_migrate_input.json | 10 ++++++ .../config_db/routes_migrate_input.json | 3 ++ tests/db_migrator_test.py | 35 +++++++++++++++++++ 5 files changed, 79 insertions(+) create mode 100644 tests/db_migrator_input/appl_db/routes_migrate_expected.json create mode 100644 tests/db_migrator_input/appl_db/routes_migrate_input.json create mode 100644 tests/db_migrator_input/config_db/routes_migrate_input.json diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index c52e38bd63..5c946bbb9f 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -579,6 +579,23 @@ def migrate_port_qos_map_global(self): self.configDB.set_entry('PORT_QOS_MAP', 'global', {"dscp_to_tc_map": dscp_to_tc_map_table_names[0]}) log.log_info("Created entry for global DSCP_TO_TC_MAP {}".format(dscp_to_tc_map_table_names[0])) + def migrate_route_table(self): + """ + Handle route table migration. Migrations handled: + 1. 'weight' attr in ROUTE object was introduced 202205 onwards. + Upgrade from older branch to 202205 will require this 'weight' attr to be added explicitly + """ + route_table = self.appDB.get_table("ROUTE_TABLE") + for route_prefix, route_attr in route_table.items(): + if 'weight' not in route_attr: + if type(route_prefix) == tuple: + # IPv6 route_prefix is returned from db as tuple + route_key = "ROUTE_TABLE:" + ":".join(route_prefix) + else: + # IPv4 route_prefix is returned from db as str + route_key = "ROUTE_TABLE:{}".format(route_prefix) + self.appDB.set(self.appDB.APPL_DB, route_key, 'weight','') + def version_unknown(self): """ version_unknown tracks all SONiC versions that doesn't have a version @@ -899,6 +916,8 @@ def common_migration_ops(self): else: log.log_notice("Asic Type: {}, Hwsku: {}".format(self.asic_type, self.hwsku)) + self.migrate_route_table() + def migrate(self): version = self.get_version() log.log_info('Upgrading from version ' + version) diff --git a/tests/db_migrator_input/appl_db/routes_migrate_expected.json b/tests/db_migrator_input/appl_db/routes_migrate_expected.json new file mode 100644 index 0000000000..5cad371c31 --- /dev/null +++ b/tests/db_migrator_input/appl_db/routes_migrate_expected.json @@ -0,0 +1,12 @@ +{ + "ROUTE_TABLE:192.168.104.0/25": { + "nexthop": "10.0.0.57,10.0.0.59,10.0.0.61,10.0.0.63", + "ifname" : "PortChannel101,PortChannel102,PortChannel103,PortChannel104", + "weight": "" + }, + "ROUTE_TABLE:20c0:fe28:0:80::/64": { + "nexthop": "fc00::72,fc00::76,fc00::7a,fc00::7e", + "ifname" : "PortChannel101,PortChannel102,PortChannel103,PortChannel104", + "weight": "" + } +} diff --git a/tests/db_migrator_input/appl_db/routes_migrate_input.json b/tests/db_migrator_input/appl_db/routes_migrate_input.json new file mode 100644 index 0000000000..7249488cd6 --- /dev/null +++ b/tests/db_migrator_input/appl_db/routes_migrate_input.json @@ -0,0 +1,10 @@ +{ + "ROUTE_TABLE:192.168.104.0/25": { + "nexthop": "10.0.0.57,10.0.0.59,10.0.0.61,10.0.0.63", + "ifname" : "PortChannel101,PortChannel102,PortChannel103,PortChannel104" + }, + "ROUTE_TABLE:20c0:fe28:0:80::/64": { + "nexthop": "fc00::72,fc00::76,fc00::7a,fc00::7e", + "ifname" : "PortChannel101,PortChannel102,PortChannel103,PortChannel104" + } +} diff --git a/tests/db_migrator_input/config_db/routes_migrate_input.json b/tests/db_migrator_input/config_db/routes_migrate_input.json new file mode 100644 index 0000000000..672268b286 --- /dev/null +++ b/tests/db_migrator_input/config_db/routes_migrate_input.json @@ -0,0 +1,3 @@ +{ + "VERSIONS|DATABASE": {"VERSION": "version_1_0_1"} +} diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index 223f5d582e..b5c70fce8e 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -518,3 +518,38 @@ def test_migrate_loopback_int(self): expected_keys = expected_appl_db.get_all(expected_appl_db.APPL_DB, key) diff = DeepDiff(resulting_keys, expected_keys, ignore_order=True) assert not diff + +class TestWarmUpgrade_without_route_weights(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "2" + + @classmethod + def teardown_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "0" + dbconnector.dedicated_dbs['CONFIG_DB'] = None + dbconnector.dedicated_dbs['APPL_DB'] = None + + def test_migrate_weights_for_nexthops(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'routes_migrate_input') + dbconnector.dedicated_dbs['APPL_DB'] = os.path.join(mock_db_path, 'appl_db', 'routes_migrate_input') + + import db_migrator + dbmgtr = db_migrator.DBMigrator(None) + dbmgtr.migrate() + dbconnector.dedicated_dbs['APPL_DB'] = os.path.join(mock_db_path, 'appl_db', 'routes_migrate_expected') + expected_db = Db() + + # verify migrated appDB + expected_appl_db = SonicV2Connector(host='127.0.0.1') + expected_appl_db.connect(expected_appl_db.APPL_DB) + expected_keys = expected_appl_db.keys(expected_appl_db.APPL_DB, "ROUTE_TABLE:*") + expected_keys.sort() + resulting_keys = dbmgtr.appDB.keys(dbmgtr.appDB.APPL_DB, "ROUTE_TABLE:*") + resulting_keys.sort() + assert expected_keys == resulting_keys + for key in expected_keys: + resulting_keys = dbmgtr.appDB.get_all(dbmgtr.appDB.APPL_DB, key) + expected_keys = expected_appl_db.get_all(expected_appl_db.APPL_DB, key) + diff = DeepDiff(resulting_keys, expected_keys, ignore_order=True) + assert not diff From c2bc150a6a05c97362d540c874deff81fad6f870 Mon Sep 17 00:00:00 2001 From: Vaibhav Hemant Dixit Date: Mon, 6 Mar 2023 10:56:51 -0800 Subject: [PATCH 29/66] [warm/fast-reboot] Backup logs from tmpfs to disk during fast/warm shutdown (#2714) Goal: Preserve logs during TOR upgrades and shutdown Need: Below PRs moved logs from disk to tmpfs for specific hwskus. Due to these changes, shutdown path logs are now lost. The logs in shutdown path are crucial for debug purposes. sonic-net/sonic-buildimage#13805 sonic-net/sonic-buildimage#13587 sonic-net/sonic-buildimage#13587 How I did it Check if logs are on tmpfs. If yes, backup logs from /var/log How to verify it Verified on a physical device - logs on tmfs are backed up for past 30 minutes. --- scripts/fast-reboot | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 5e7cc34bc9..2fb744f404 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -806,6 +806,17 @@ fi # Reboot: explicitly call Linux native reboot under sbin debug "Rebooting with ${REBOOT_METHOD} to ${NEXT_SONIC_IMAGE} ..." + +LOGS_ON_TMPFS=0 +df --output=fstype /var/log* | grep -c 'tmpfs' || LOGS_ON_TMPFS=$? +if [[ LOGS_ON_TMPFS -eq 0 ]]; then + debug "Backup shutdown logs to /host/logs_before_reboot" + mkdir -p /host/logs_before_reboot || /bin/true + # maxdepth 2: find files within 2 nested directories (eg. /var/log/ and /var/log/swss/) + # mmin 30: find files written in past 30 minutes + find /var/log -maxdepth 2 -mmin -30 -type f | xargs -I {} cp {} /host/logs_before_reboot/ || /bin/true +fi + exec ${REBOOT_METHOD} # Should never reach here From 90d70152c76f40bf7c1f8e2c6aff6eb58b951a05 Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Tue, 7 Mar 2023 20:23:07 +0200 Subject: [PATCH 30/66] [route_check] implement a check for FRR routes not marked offloaded (#2531) * [route_check] implement a check for FRR routes not marked offloaded * Implemented a route_check functioality that will check "show ip route json" output from FRR and will ensure that all routes are marked as offloaded. If some routes are not offloaded for 15 sec, this is considered as an issue and a mitigation logic is invoked. --- scripts/route_check.py | 117 ++++++++++++++++++++++++++++--- tests/mock_tables/config_db.json | 3 +- tests/route_check_test.py | 24 ++++++- tests/route_check_test_data.py | 104 ++++++++++++++++++++++++++- 4 files changed, 234 insertions(+), 14 deletions(-) diff --git a/scripts/route_check.py b/scripts/route_check.py index c6234bcc9d..4db3f399a2 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -11,11 +11,11 @@ How: NOTE: The flow from APPL-DB to ASIC-DB takes non zero milliseconds. 1) Initiate subscribe for ASIC-DB updates. - 2) Read APPL-DB & ASIC-DB + 2) Read APPL-DB & ASIC-DB 3) Get the diff. - 4) If any diff, + 4) If any diff, 4.1) Collect subscribe messages for a second - 4.2) check diff against the subscribe messages + 4.2) check diff against the subscribe messages 5) Rule out local interfaces & default routes 6) If still outstanding diffs, report failure. @@ -29,7 +29,7 @@ down to ensure failure. Analyze the reported failures to match expected. You may use the exit code to verify the result as success or not. - + """ @@ -45,6 +45,7 @@ import time import signal import traceback +import subprocess from swsscommon import swsscommon from utilities_common import chassis @@ -71,6 +72,9 @@ PRINT_MSG_LEN_MAX = 1000 +FRR_CHECK_RETRIES = 3 +FRR_WAIT_TIME = 15 + class Level(Enum): ERR = 'ERR' INFO = 'INFO' @@ -293,7 +297,7 @@ def get_routes(): def get_route_entries(): """ - helper to read present route entries from ASIC-DB and + helper to read present route entries from ASIC-DB and as well initiate selector for ASIC-DB:ASIC-state updates. :return (selector, subscriber, ) """ @@ -309,7 +313,7 @@ def get_route_entries(): res, e = checkout_rt_entry(k) if res: rt.append(e) - + print_message(syslog.LOG_DEBUG, json.dumps({"ASIC_ROUTE_ENTRY": sorted(rt)}, indent=4)) selector = swsscommon.Select() @@ -317,6 +321,31 @@ def get_route_entries(): return (selector, subs, sorted(rt)) +def is_suppress_fib_pending_enabled(): + """ + Returns True if FIB suppression is enabled, False otherwise + """ + cfg_db = swsscommon.ConfigDBConnector() + cfg_db.connect() + + state = cfg_db.get_entry('DEVICE_METADATA', 'localhost').get('suppress-fib-pending') + + return state == 'enabled' + + +def get_frr_routes(): + """ + Read routes from zebra through CLI command + :return frr routes dictionary + """ + + output = subprocess.check_output('show ip route json', shell=True) + routes = json.loads(output) + output = subprocess.check_output('show ipv6 route json', shell=True) + routes.update(json.loads(output)) + return routes + + def get_interfaces(): """ helper to read interface table from APPL-DB. @@ -354,7 +383,7 @@ def filter_out_local_interfaces(keys): chassis_local_intfs = chassis.get_chassis_local_interfaces() local_if_lst.update(set(chassis_local_intfs)) - + db = swsscommon.DBConnector(APPL_DB_NAME, 0) tbl = swsscommon.Table(db, 'ROUTE_TABLE') @@ -493,6 +522,61 @@ def filter_out_standalone_tunnel_routes(routes): return updated_routes +def check_frr_pending_routes(): + """ + Check FRR routes for offload flag presence by executing "show ip route json" + Returns a list of routes that have no offload flag. + """ + + missed_rt = [] + + retries = FRR_CHECK_RETRIES + for i in range(retries): + missed_rt = [] + frr_routes = get_frr_routes() + + for _, entries in frr_routes.items(): + for entry in entries: + if entry['protocol'] != 'bgp': + continue + + # TODO: Also handle VRF routes. Currently this script does not check for VRF routes so it would be incorrect for us + # to assume they are installed in ASIC_DB, so we don't handle them. + if entry['vrfName'] != 'default': + continue + + if not entry.get('offloaded', False): + missed_rt.append(entry) + + if not missed_rt: + break + + time.sleep(FRR_WAIT_TIME) + + return missed_rt + + +def mitigate_installed_not_offloaded_frr_routes(missed_frr_rt, rt_appl): + """ + Mitigate installed but not offloaded FRR routes. + + In case route exists in APPL_DB, this function will manually send a notification to fpmsyncd + to trigger the flow that sends offload flag to zebra. + + It is designed to mitigate a problem when orchagent fails to send notification about installed route to fpmsyncd + or fpmsyncd not being able to read the notification or in case zebra fails to receive offload update due to variety of reasons. + All of the above mentioned cases must be considered as a bug, but even in that case we will report an error in the log but + given that this script ensures the route is installed in the hardware it will automitigate such a bug. + """ + db = swsscommon.DBConnector('APPL_STATE_DB', 0) + response_producer = swsscommon.NotificationProducer(db, f'{APPL_DB_NAME}_{swsscommon.APP_ROUTE_TABLE_NAME}_RESPONSE_CHANNEL') + for entry in [entry for entry in missed_frr_rt if entry['prefix'] in rt_appl]: + fvs = swsscommon.FieldValuePairs([('err_str', 'SWSS_RC_SUCCESS'), ('protocol', entry['protocol'])]) + response_producer.send('SWSS_RC_SUCCESS', entry['prefix'], fvs) + + print_message(syslog.LOG_ERR, f'Mitigated route {entry["prefix"]}') + + def get_soc_ips(config_db): mux_table = config_db.get_table('MUX_CABLE') soc_ips = [] @@ -536,7 +620,7 @@ def check_routes(): """ The heart of this script which runs the checks. Read APPL-DB & ASIC-DB, the relevant tables for route checking. - Checkout routes in ASIC-DB to match APPL-DB, discounting local & + Checkout routes in ASIC-DB to match APPL-DB, discounting local & default routes. In case of missed / unexpected entries in ASIC, it might be due to update latency between APPL & ASIC DBs. So collect ASIC-DB subscribe updates for a second, and checkout if you see SET @@ -545,12 +629,16 @@ def check_routes(): If there are still some unjustifiable diffs, between APPL & ASIC DB, related to routes report failure, else all good. + If there are FRR routes that aren't marked offloaded but all APPL & ASIC DB + routes are in sync report failure and perform a mitigation action. + :return (0, None) on sucess, else (-1, results) where results holds the unjustifiable entries. """ intf_appl_miss = [] rt_appl_miss = [] rt_asic_miss = [] + rt_frr_miss = [] results = {} adds = [] @@ -599,11 +687,22 @@ def check_routes(): if rt_asic_miss: results["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss + rt_frr_miss = check_frr_pending_routes() + + if rt_frr_miss: + results["missed_FRR_routes"] = rt_frr_miss + if results: print_message(syslog.LOG_WARNING, "Failure results: {", json.dumps(results, indent=4), "}") print_message(syslog.LOG_WARNING, "Failed. Look at reported mismatches above") print_message(syslog.LOG_WARNING, "add: ", json.dumps(adds, indent=4)) print_message(syslog.LOG_WARNING, "del: ", json.dumps(deletes, indent=4)) + + if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: + print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR but all routes in APPL_DB and ASIC_DB are in sync") + if is_suppress_fib_pending_enabled(): + mitigate_installed_not_offloaded_frr_routes(rt_frr_miss, rt_appl) + return -1, results else: print_message(syslog.LOG_INFO, "All good!") @@ -649,7 +748,7 @@ def main(): return ret, res else: return ret, res - + if __name__ == "__main__": diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 899dada260..51af58e86d 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -831,7 +831,8 @@ "mac": "1d:34:db:16:a6:00", "platform": "x86_64-mlnx_msn3800-r0", "peer_switch": "sonic-switch", - "type": "ToRRouter" + "type": "ToRRouter", + "suppress-fib-pending": "enabled" }, "SNMP_COMMUNITY|msft": { "TYPE": "RO" diff --git a/tests/route_check_test.py b/tests/route_check_test.py index 85e6a64a95..4d93c74e2d 100644 --- a/tests/route_check_test.py +++ b/tests/route_check_test.py @@ -7,7 +7,7 @@ import time from sonic_py_common import device_info from unittest.mock import MagicMock, patch -from tests.route_check_test_data import APPL_DB, ARGS, ASIC_DB, CONFIG_DB, DEFAULT_CONFIG_DB, DESCR, OP_DEL, OP_SET, PRE, RESULT, RET, TEST_DATA, UPD +from tests.route_check_test_data import APPL_DB, ARGS, ASIC_DB, CONFIG_DB, DEFAULT_CONFIG_DB, DESCR, OP_DEL, OP_SET, PRE, RESULT, RET, TEST_DATA, UPD, FRR_ROUTES import pytest @@ -239,6 +239,7 @@ def setup(self): def init(self): route_check.UNIT_TESTING = 1 + route_check.FRR_WAIT_TIME = 0 @pytest.fixture def force_hang(self): @@ -258,7 +259,8 @@ def mock_dbs(self): patch("route_check.swsscommon.Table") as mock_table, \ patch("route_check.swsscommon.Select") as mock_sel, \ patch("route_check.swsscommon.SubscriberStateTable") as mock_subs, \ - patch("route_check.swsscommon.ConfigDBConnector", return_value=mock_config_db): + patch("route_check.swsscommon.ConfigDBConnector", return_value=mock_config_db), \ + patch("route_check.swsscommon.NotificationProducer"): device_info.get_platform = MagicMock(return_value='unittest') set_mock(mock_table, mock_conn, mock_sel, mock_subs, mock_config_db) yield @@ -272,7 +274,21 @@ def test_route_check(self, mock_dbs, test_num): set_test_case_data(ct_data) logger.info("Running test case {}: {}".format(test_num, ct_data[DESCR])) - with patch('sys.argv', ct_data[ARGS].split()): + with patch('sys.argv', ct_data[ARGS].split()), \ + patch('route_check.subprocess.check_output') as mock_check_output: + + check_frr_patch = patch('route_check.check_frr_pending_routes', lambda: []) + + if FRR_ROUTES in ct_data: + routes = ct_data[FRR_ROUTES] + + def side_effect(*args, **kwargs): + return json.dumps(routes) + + mock_check_output.side_effect = side_effect + else: + check_frr_patch.start() + ret, res = route_check.main() expect_ret = ct_data[RET] if RET in ct_data else 0 expect_res = ct_data[RESULT] if RESULT in ct_data else None @@ -283,6 +299,8 @@ def test_route_check(self, mock_dbs, test_num): assert ret == expect_ret assert res == expect_res + check_frr_patch.stop() + def test_timeout(self, mock_dbs, force_hang): # Test timeout ex_raised = False diff --git a/tests/route_check_test_data.py b/tests/route_check_test_data.py index 9e4cd3a009..b8ba9c521a 100644 --- a/tests/route_check_test_data.py +++ b/tests/route_check_test_data.py @@ -6,6 +6,7 @@ CONFIG_DB = 4 PRE = "pre-value" UPD = "update" +FRR_ROUTES = "frr-routes" RESULT = "res" OP_SET = "SET" @@ -359,5 +360,106 @@ } } } - } + }, + "10": { + DESCR: "basic good one, check FRR routes", + ARGS: "route_check -m INFO -i 1000", + PRE: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + }, + }, + FRR_ROUTES: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "offloaded": "true", + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + "offloaded": "true", + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + }, + ], + }, + }, + "11": { + DESCR: "failure test case, missing FRR routes", + ARGS: "route_check -m INFO -i 1000", + PRE: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + }, + }, + FRR_ROUTES: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "offloaded": "true", + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + }, + ], + }, + RESULT: { + "missed_FRR_routes": [ + {"prefix": "10.10.196.12/31", "vrfName": "default", "protocol": "bgp"} + ], + }, + RET: -1, + }, } From e16bdaae18ae41f43e6bffabbb7a30cc47785c74 Mon Sep 17 00:00:00 2001 From: kellyyeh <42761586+kellyyeh@users.noreply.github.com> Date: Tue, 7 Mar 2023 10:47:13 -0800 Subject: [PATCH 31/66] Fix non-zero status exit on non secure boot system (#2715) What I did Warm-reboot fails on kvm due to non-zero exit upon command bootctl status 2>/dev/null | grep -c "Secure Boot: enabled" How I did it Added || true to return 0 when previous command fails. Added CHECK_SECURE_UPGRADE_ENABLED to check output of previous command Added debug logs How to verify it Run warm-reboot on kvm and physical device when increased verbosity. Expects debug log to indicate secure/non secure boot. Successful warm reboot --- scripts/fast-reboot | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 2fb744f404..defde666ee 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -614,11 +614,14 @@ if is_secureboot && grep -q aboot_machine= /host/machine.conf; then load_aboot_secureboot_kernel else # check if secure boot is enable in UEFI - SECURE_UPGRADE_ENABLED=$(bootctl status 2>/dev/null | grep -c "Secure Boot: enabled") - if [ ${SECURE_UPGRADE_ENABLED} -eq 1 ]; then - load_kernel_secure - else + CHECK_SECURE_UPGRADE_ENABLED=0 + SECURE_UPGRADE_ENABLED=$(bootctl status 2>/dev/null | grep -c "Secure Boot: enabled") || CHECK_SECURE_UPGRADE_ENABLED=$? + if [[ CHECK_SECURE_UPGRADE_ENABLED -ne 0 ]]; then + debug "Loading kernel without secure boot" load_kernel + else + debug "Loading kernel with secure boot" + load_kernel_secure fi fi From 2fc2b826cfb37310b4c44c0ecc46d6121dc95417 Mon Sep 17 00:00:00 2001 From: isabelmsft <67024108+isabelmsft@users.noreply.github.com> Date: Tue, 7 Mar 2023 14:42:50 -0800 Subject: [PATCH 32/66] YANG validation for ConfigDB Updates: MIRROR_SESSION use case (#2430) --- config/main.py | 66 ++++++++++++++++------- tests/config_mirror_session_test.py | 82 +++++++++++++++++++++++++++++ tests/config_snmp_test.py | 1 + 3 files changed, 129 insertions(+), 20 deletions(-) diff --git a/config/main.py b/config/main.py index f2576ac74d..384e6f9f68 100644 --- a/config/main.py +++ b/config/main.py @@ -2354,25 +2354,35 @@ def add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer session_info['gre_type'] = gre_type session_info = gather_session_info(session_info, policer, queue, src_port, direction) + ctx = click.get_current_context() """ For multi-npu platforms we need to program all front asic namespaces """ namespaces = multi_asic.get_all_namespaces() if not namespaces['front_ns']: - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - if validate_mirror_session_config(config_db, session_name, None, src_port, direction) is False: - return - config_db.set_entry("MIRROR_SESSION", session_name, session_info) + if ADHOC_VALIDATION: + if validate_mirror_session_config(config_db, session_name, None, src_port, direction) is False: + return + try: + config_db.set_entry("MIRROR_SESSION", session_name, session_info) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) + else: per_npu_configdb = {} for front_asic_namespaces in namespaces['front_ns']: - per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) + per_npu_configdb[front_asic_namespaces] = ValidatedConfigDBConnector(ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)) per_npu_configdb[front_asic_namespaces].connect() - if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, None, src_port, direction) is False: - return - per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) + if ADHOC_VALIDATION: + if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, None, src_port, direction) is False: + return + try: + per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) @mirror_session.group(cls=clicommon.AbbreviationGroup, name='span') @click.pass_context @@ -2404,25 +2414,34 @@ def add_span(session_name, dst_port, src_port, direction, queue, policer): } session_info = gather_session_info(session_info, policer, queue, src_port, direction) + ctx = click.get_current_context() """ For multi-npu platforms we need to program all front asic namespaces """ namespaces = multi_asic.get_all_namespaces() if not namespaces['front_ns']: - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - if validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction) is False: - return - config_db.set_entry("MIRROR_SESSION", session_name, session_info) + if ADHOC_VALIDATION: + if validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction) is False: + return + try: + config_db.set_entry("MIRROR_SESSION", session_name, session_info) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: per_npu_configdb = {} for front_asic_namespaces in namespaces['front_ns']: - per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) + per_npu_configdb[front_asic_namespaces] = ValidatedConfigDBConnector(ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)) per_npu_configdb[front_asic_namespaces].connect() - if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, dst_port, src_port, direction) is False: - return - per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) + if ADHOC_VALIDATION: + if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, dst_port, src_port, direction) is False: + return + try: + per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) @mirror_session.command() @@ -2434,16 +2453,23 @@ def remove(session_name): For multi-npu platforms we need to program all front asic namespaces """ namespaces = multi_asic.get_all_namespaces() + ctx = click.get_current_context() if not namespaces['front_ns']: - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - config_db.set_entry("MIRROR_SESSION", session_name, None) + try: + config_db.set_entry("MIRROR_SESSION", session_name, None) + except JsonPatchConflict as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: per_npu_configdb = {} for front_asic_namespaces in namespaces['front_ns']: - per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) + per_npu_configdb[front_asic_namespaces] = ValidatedConfigDBConnector(ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)) per_npu_configdb[front_asic_namespaces].connect() - per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, None) + try: + per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, None) + except JsonPatchConflict as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'pfcwd' group ('config pfcwd ...') diff --git a/tests/config_mirror_session_test.py b/tests/config_mirror_session_test.py index 5585cab87a..ccbc196b50 100644 --- a/tests/config_mirror_session_test.py +++ b/tests/config_mirror_session_test.py @@ -1,7 +1,11 @@ import pytest import config.main as config +import jsonpatch from unittest import mock from click.testing import CliRunner +from mock import patch +from jsonpatch import JsonPatchConflict +from sonic_py_common import multi_asic ERR_MSG_IP_FAILURE = "does not appear to be an IPv4 or IPv6 network" ERR_MSG_IP_VERSION_FAILURE = "not a valid IPv4 address" @@ -172,7 +176,34 @@ def test_mirror_session_erspan_add(): mocked.assert_called_with("test_session", "100.1.1.1", "2.2.2.2", 8, 63, 0, 0, None, None, None) +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) +def test_mirror_session_erspan_add_invalid_yang_validation(): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["erspan"].commands["add"], + ["test_session", "100.1.1.1", "2.2.2.2", "8", "63", "10", "100"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + +@patch("config.main.ConfigDBConnector", spec=True, connect=mock.Mock()) +@patch("config.main.multi_asic.get_all_namespaces", mock.Mock(return_value={'front_ns': 'sample_ns'})) +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) +def test_mirror_session_erspan_add_multi_asic_invalid_yang_validation(mock_db_connector): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["erspan"].commands["add"], + ["test_session", "100.1.1.1", "2.2.2.2", "8", "63", "10", "100"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + def test_mirror_session_span_add(): + config.ADHOC_VALIDATION = True runner = CliRunner() # Verify invalid queue @@ -273,3 +304,54 @@ def test_mirror_session_span_add(): mocked.assert_called_with("test_session", "Ethernet0", "Ethernet4", "rx", 0, None) + +@patch("config.main.ConfigDBConnector", spec=True, connect=mock.Mock()) +@patch("config.main.multi_asic.get_all_namespaces", mock.Mock(return_value={'front_ns': 'sample_ns'})) +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) +def test_mirror_session_span_add_multi_asic_invalid_yang_validation(mock_db_connector): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethernet0", "Ethernet4", "rx", "0"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) +def test_mirror_session_span_add_invalid_yang_validation(): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethernet0", "Ethernet4", "rx", "0"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + +@patch("config.main.multi_asic.get_all_namespaces", mock.Mock(return_value={'front_ns': 'sample_ns'})) +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.main.ConfigDBConnector", spec=True, connect=mock.Mock()) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) +def test_mirror_session_remove_multi_asic_invalid_yang_validation(mock_db_connector): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["remove"], + ["mrr_sample"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) +def test_mirror_session_remove_invalid_yang_validation(): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["remove"], + ["mrr_sample"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output diff --git a/tests/config_snmp_test.py b/tests/config_snmp_test.py index 096f21cb80..76f5675690 100644 --- a/tests/config_snmp_test.py +++ b/tests/config_snmp_test.py @@ -118,6 +118,7 @@ def setup_class(cls): # Add snmp community tests def test_config_snmp_community_add_new_community_ro(self): + config.ADHOC_VALIDATION = True db = Db() runner = CliRunner() with mock.patch('utilities_common.cli.run_command') as mock_run_command: From c7aa841632c1083b72a9d0d0b7ab5579d9d90bb0 Mon Sep 17 00:00:00 2001 From: vdahiya12 <67608553+vdahiya12@users.noreply.github.com> Date: Tue, 7 Mar 2023 15:19:53 -0800 Subject: [PATCH 33/66] [show][muxcable] increase timeout for displaying HW_STATUS (#2712) What I did probe mux direction not always return success. Sample output of: while [ 1 ]; do date; show mux hwmode muxdirection; show mux status; sleep 1; done Mon 27 Feb 2023 03:12:25 PM UTC Port Direction Presence ----------- ----------- ---------- Ethernet16 unknown True PORT STATUS HEALTH HWSTATUS LAST_SWITCHOVER_TIME ----------- -------- -------- ------------ --------------------------- Ethernet16 standby healthy inconsistent 2023-Feb-25 07:55:18.269177 If we increase the timeout to 0.5 secs to get the values back from ycabled, this will remove the inconsistency issue, and display the consistent values, because while telemetry is going on, the time to get actual mux value takes significantly longer than 0.1 seconds. PORT STATUS HEALTH HWSTATUS LAST_SWITCHOVER_TIME ----------- -------- -------- ------------ --------------------------- Ethernet16 standby healthy consistent 2023-Feb-25 07:55:18.269177 How I did it How to verify it Manually run changes on setup worst-case CLI return time could be 16 seconds for 32 ports. on avg each port is 200 mSec if telemetry is going, but on average show command will return in < 1 sec for all 32 ports. Signed-off-by: vaibhav-dahiya --- show/muxcable.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/show/muxcable.py b/show/muxcable.py index 837e362789..5df4bd8c2a 100644 --- a/show/muxcable.py +++ b/show/muxcable.py @@ -20,7 +20,7 @@ REDIS_TIMEOUT_MSECS = 0 SELECT_TIMEOUT = 1000 -HWMODE_MUXDIRECTION_TIMEOUT = 0.1 +HWMODE_MUXDIRECTION_TIMEOUT = 0.5 # The empty namespace refers to linux host namespace. EMPTY_NAMESPACE = '' From 2ef5b31e807048e39fc125370b40b58ba8db8b03 Mon Sep 17 00:00:00 2001 From: isabelmsft <67024108+isabelmsft@users.noreply.github.com> Date: Wed, 8 Mar 2023 00:19:03 -0800 Subject: [PATCH 34/66] [GCU] Add PFC_WD RDMA validator (#2619) --- generic_config_updater/change_applier.py | 2 +- .../field_operation_validators.py | 26 ++++++++++++++ .../gcu_field_operation_validators.conf.json | 20 +++++++++++ ....json => gcu_services_validator.conf.json} | 0 generic_config_updater/gu_common.py | 36 +++++++++++++++++++ setup.py | 2 +- .../generic_config_updater/gu_common_test.py | 26 +++++++++----- 7 files changed, 102 insertions(+), 10 deletions(-) create mode 100644 generic_config_updater/field_operation_validators.py create mode 100644 generic_config_updater/gcu_field_operation_validators.conf.json rename generic_config_updater/{generic_config_updater.conf.json => gcu_services_validator.conf.json} (100%) diff --git a/generic_config_updater/change_applier.py b/generic_config_updater/change_applier.py index f5a365d59f..d0818172f8 100644 --- a/generic_config_updater/change_applier.py +++ b/generic_config_updater/change_applier.py @@ -9,7 +9,7 @@ from .gu_common import genericUpdaterLogging SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -UPDATER_CONF_FILE = f"{SCRIPT_DIR}/generic_config_updater.conf.json" +UPDATER_CONF_FILE = f"{SCRIPT_DIR}/gcu_services_validator.conf.json" logger = genericUpdaterLogging.get_logger(title="Change Applier") print_to_console = False diff --git a/generic_config_updater/field_operation_validators.py b/generic_config_updater/field_operation_validators.py new file mode 100644 index 0000000000..befd4b8749 --- /dev/null +++ b/generic_config_updater/field_operation_validators.py @@ -0,0 +1,26 @@ +from sonic_py_common import device_info +import re + +def rdma_config_update_validator(): + version_info = device_info.get_sonic_version_info() + build_version = version_info.get('build_version') + asic_type = version_info.get('asic_type') + + if (asic_type != 'mellanox' and asic_type != 'broadcom' and asic_type != 'cisco-8000'): + return False + + version_substrings = build_version.split('.') + branch_version = None + + for substring in version_substrings: + if substring.isdigit() and re.match(r'^\d{8}$', substring): + branch_version = substring + break + + if branch_version is None: + return False + + if asic_type == 'cisco-8000': + return branch_version >= "20201200" + else: + return branch_version >= "20181100" diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json new file mode 100644 index 0000000000..f12a14d8eb --- /dev/null +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -0,0 +1,20 @@ +{ + "README": [ + "field_operation_validators provides, module & method name as ", + " .", + "NOTE: module name could have '.'", + " ", + "The last element separated by '.' is considered as ", + "method name", + "", + "e.g. 'show.acl.test_acl'", + "", + "field_operation_validators for a given table defines a list of validators that all must pass for modification to the specified field and table to be allowed", + "" + ], + "tables": { + "PFC_WD": { + "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ] + } + } +} diff --git a/generic_config_updater/generic_config_updater.conf.json b/generic_config_updater/gcu_services_validator.conf.json similarity index 100% rename from generic_config_updater/generic_config_updater.conf.json rename to generic_config_updater/gcu_services_validator.conf.json diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index 0d7a5281bb..e8c66fcbbe 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -1,5 +1,6 @@ import json import jsonpatch +import importlib from jsonpointer import JsonPointer import sonic_yang import sonic_yang_ext @@ -7,11 +8,14 @@ import yang as ly import copy import re +import os from sonic_py_common import logger from enum import Enum YANG_DIR = "/usr/local/yang-models" SYSLOG_IDENTIFIER = "GenericConfigUpdater" +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +GCU_FIELD_OP_CONF_FILE = f"{SCRIPT_DIR}/gcu_field_operation_validators.conf.json" class GenericConfigUpdaterError(Exception): pass @@ -162,6 +166,38 @@ def validate_field_operation(self, old_config, target_config): if any(op['op'] == operation and field == op['path'] for op in patch): raise IllegalPatchOperationError("Given patch operation is invalid. Operation: {} is illegal on field: {}".format(operation, field)) + def _invoke_validating_function(cmd): + # cmd is in the format as . + method_name = cmd.split(".")[-1] + module_name = ".".join(cmd.split(".")[0:-1]) + if module_name != "generic_config_updater.field_operation_validators" or "validator" not in method_name: + raise GenericConfigUpdaterError("Attempting to call invalid method {} in module {}. Module must be generic_config_updater.field_operation_validators, and method must be a defined validator".format(method_name, module_name)) + module = importlib.import_module(module_name, package=None) + method_to_call = getattr(module, method_name) + return method_to_call() + + if os.path.exists(GCU_FIELD_OP_CONF_FILE): + with open(GCU_FIELD_OP_CONF_FILE, "r") as s: + gcu_field_operation_conf = json.load(s) + else: + raise GenericConfigUpdaterError("GCU field operation validators config file not found") + + for element in patch: + path = element["path"] + match = re.search(r'\/([^\/]+)(\/|$)', path) # This matches the table name in the path, eg if path if /PFC_WD/GLOBAL, the match would be PFC_WD + if match is not None: + table = match.group(1) + else: + raise GenericConfigUpdaterError("Invalid jsonpatch path: {}".format(path)) + validating_functions= set() + tables = gcu_field_operation_conf["tables"] + validating_functions.update(tables.get(table, {}).get("field_operation_validators", [])) + + for function in validating_functions: + if not _invoke_validating_function(function): + raise IllegalPatchOperationError("Modification of {} table is illegal- validating function {} returned False".format(table, function)) + + def validate_lanes(self, config_db): if "PORT" not in config_db: return True, None diff --git a/setup.py b/setup.py index 70d7473bd7..6af8e3394a 100644 --- a/setup.py +++ b/setup.py @@ -64,7 +64,7 @@ 'sonic_cli_gen', ], package_data={ - 'generic_config_updater': ['generic_config_updater.conf.json'], + 'generic_config_updater': ['gcu_services_validator.conf.json', 'gcu_field_operation_validators.conf.json'], 'show': ['aliases.ini'], 'sonic_installer': ['aliases.ini'], 'tests': ['acl_input/*', diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index 7fa471ee3b..a319a25ead 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -3,8 +3,10 @@ import jsonpatch import sonic_yang import unittest -from unittest.mock import MagicMock, Mock, patch +import mock +from unittest.mock import MagicMock, Mock +from mock import patch from .gutest_helpers import create_side_effect_dict, Files import generic_config_updater.gu_common as gu_common @@ -69,11 +71,25 @@ def setUp(self): self.config_wrapper_mock = gu_common.ConfigWrapper() self.config_wrapper_mock.get_config_db_as_json=MagicMock(return_value=Files.CONFIG_DB_AS_JSON) + @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"asic_type": "mellanox", "build_version": "SONiC.20181131"})) def test_validate_field_operation_legal__pfcwd(self): old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} target_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "40"}}} config_wrapper = gu_common.ConfigWrapper() config_wrapper.validate_field_operation(old_config, target_config) + + def test_validate_field_operation_illegal__pfcwd(self): + old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} + target_config = {"PFC_WD": {"GLOBAL": {}}} + config_wrapper = gu_common.ConfigWrapper() + self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) + + @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"asic_type": "invalid-asic", "build_version": "SONiC.20181131"})) + def test_validate_field_modification_illegal__pfcwd(self): + old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} + target_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "80"}}} + config_wrapper = gu_common.ConfigWrapper() + self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) def test_validate_field_operation_legal__rm_loopback1(self): old_config = { @@ -92,13 +108,7 @@ def test_validate_field_operation_legal__rm_loopback1(self): } config_wrapper = gu_common.ConfigWrapper() config_wrapper.validate_field_operation(old_config, target_config) - - def test_validate_field_operation_illegal__pfcwd(self): - old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": 60}}} - target_config = {"PFC_WD": {"GLOBAL": {}}} - config_wrapper = gu_common.ConfigWrapper() - self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) - + def test_validate_field_operation_illegal__rm_loopback0(self): old_config = { "LOOPBACK_INTERFACE": { From 64d2efd20b528f67c22dcfaf42e6ca5081aba416 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Wed, 8 Mar 2023 13:28:59 -0800 Subject: [PATCH 35/66] Improve show acl commands (#2667) * Add status for ACL_TABLE and ACL_RULE in STATE_DB --- acl_loader/main.py | 75 ++++++++++++++++---- tests/aclshow_test.py | 7 +- tests/mock_tables/asic0/config_db.json | 11 +++ tests/mock_tables/asic0/state_db.json | 6 ++ tests/mock_tables/asic2/config_db.json | 11 +++ tests/mock_tables/asic2/state_db.json | 6 ++ tests/mock_tables/config_db.json | 11 +++ tests/mock_tables/state_db.json | 6 ++ tests/show_acl_test.py | 95 ++++++++++++++++++++++++++ 9 files changed, 213 insertions(+), 15 deletions(-) create mode 100644 tests/show_acl_test.py diff --git a/acl_loader/main.py b/acl_loader/main.py index c50efec032..2eab089c21 100644 --- a/acl_loader/main.py +++ b/acl_loader/main.py @@ -72,6 +72,10 @@ class AclLoader(object): ACL_TABLE = "ACL_TABLE" ACL_RULE = "ACL_RULE" + CFG_ACL_TABLE = "ACL_TABLE" + STATE_ACL_TABLE = "ACL_TABLE_TABLE" + CFG_ACL_RULE = "ACL_RULE" + STATE_ACL_RULE = "ACL_RULE_TABLE" ACL_TABLE_TYPE_MIRROR = "MIRROR" ACL_TABLE_TYPE_CTRLPLANE = "CTRLPLANE" CFG_MIRROR_SESSION_TABLE = "MIRROR_SESSION" @@ -117,11 +121,16 @@ def __init__(self): self.tables_db_info = {} self.rules_db_info = {} self.rules_info = {} + self.tables_state_info = None + self.rules_state_info = None # Load database config files load_db_config() self.sessions_db_info = {} + self.acl_table_status = {} + self.acl_rule_status = {} + self.configdb = ConfigDBConnector() self.configdb.connect() self.statedb = SonicV2Connector(host="127.0.0.1") @@ -156,6 +165,8 @@ def __init__(self): self.read_rules_info() self.read_sessions_info() self.read_policers_info() + self.acl_table_status = self.read_acl_object_status_info(self.CFG_ACL_TABLE, self.STATE_ACL_TABLE) + self.acl_rule_status = self.read_acl_object_status_info(self.CFG_ACL_RULE, self.STATE_ACL_RULE) def read_tables_info(self): """ @@ -210,7 +221,7 @@ def read_sessions_info(self): for key in self.sessions_db_info: if self.per_npu_statedb: # For multi-npu platforms we will read from all front asic name space - # statedb as the monitor port will be differnt for each asic + # statedb as the monitor port will be different for each asic # and it's status also might be different (ideally should not happen) # We will store them as dict of 'asic' : value self.sessions_db_info[key]["status"] = {} @@ -224,6 +235,35 @@ def read_sessions_info(self): self.sessions_db_info[key]["status"] = state_db_info.get("status", "inactive") if state_db_info else "error" self.sessions_db_info[key]["monitor_port"] = state_db_info.get("monitor_port", "") if state_db_info else "" + def read_acl_object_status_info(self, cfg_db_table_name, state_db_table_name): + """ + Read ACL_TABLE status or ACL_RULE status from STATE_DB + """ + if self.per_npu_configdb: + namespace_configdb = list(self.per_npu_configdb.values())[0] + keys = namespace_configdb.get_table(cfg_db_table_name).keys() + else: + keys = self.configdb.get_table(cfg_db_table_name).keys() + + status = {} + for key in keys: + # For ACL_RULE, the key is (acl_table_name, acl_rule_name) + if isinstance(key, tuple): + state_db_key = key[0] + "|" + key[1] + else: + state_db_key = key + status[key] = {} + if self.per_npu_statedb: + status[key]['status'] = {} + for namespace_key, namespace_statedb in self.per_npu_statedb.items(): + state_db_info = namespace_statedb.get_all(self.statedb.STATE_DB, "{}|{}".format(state_db_table_name, state_db_key)) + status[key]['status'][namespace_key] = state_db_info.get("status", "N/A") if state_db_info else "N/A" + else: + state_db_info = self.statedb.get_all(self.statedb.STATE_DB, "{}|{}".format(state_db_table_name, state_db_key)) + status[key]['status'] = state_db_info.get("status", "N/A") if state_db_info else "N/A" + + return status + def get_sessions_db_info(self): return self.sessions_db_info @@ -786,32 +826,36 @@ def show_table(self, table_name): :param table_name: Optional. ACL table name. Filter tables by specified name. :return: """ - header = ("Name", "Type", "Binding", "Description", "Stage") + header = ("Name", "Type", "Binding", "Description", "Stage", "Status") data = [] for key, val in self.get_tables_db_info().items(): if table_name and key != table_name: continue - + stage = val.get("stage", Stage.INGRESS).lower() - + # Get ACL table status from STATE_DB + if key in self.acl_table_status: + status = self.acl_table_status[key]['status'] + else: + status = 'N/A' if val["type"] == AclLoader.ACL_TABLE_TYPE_CTRLPLANE: services = natsorted(val["services"]) - data.append([key, val["type"], services[0], val["policy_desc"], stage]) + data.append([key, val["type"], services[0], val["policy_desc"], stage, status]) if len(services) > 1: for service in services[1:]: - data.append(["", "", service, "", ""]) + data.append(["", "", service, "", "", ""]) else: if not val["ports"]: - data.append([key, val["type"], "", val["policy_desc"], stage]) + data.append([key, val["type"], "", val["policy_desc"], stage, status]) else: ports = natsorted(val["ports"]) - data.append([key, val["type"], ports[0], val["policy_desc"], stage]) + data.append([key, val["type"], ports[0], val["policy_desc"], stage, status]) if len(ports) > 1: for port in ports[1:]: - data.append(["", "", port, "", ""]) + data.append(["", "", port, "", "", ""]) print(tabulate.tabulate(data, headers=header, tablefmt="simple", missingval="")) @@ -873,7 +917,7 @@ def show_rule(self, table_name, rule_id): :param rule_id: Optional. ACL rule name. Filter rule by specified rule name. :return: """ - header = ("Table", "Rule", "Priority", "Action", "Match") + header = ("Table", "Rule", "Priority", "Action", "Match", "Status") def pop_priority(val): priority = "N/A" @@ -919,11 +963,16 @@ def pop_matches(val): priority = pop_priority(val) action = pop_action(val) matches = pop_matches(val) - - rule_data = [[tname, rid, priority, action, matches[0]]] + # Get ACL rule status from STATE_DB + status_key = (tname, rid) + if status_key in self.acl_rule_status: + status = self.acl_rule_status[status_key]['status'] + else: + status = "N/A" + rule_data = [[tname, rid, priority, action, matches[0], status]] if len(matches) > 1: for m in matches[1:]: - rule_data.append(["", "", "", "", m]) + rule_data.append(["", "", "", "", m, ""]) raw_data.append([priority, rule_data]) diff --git a/tests/aclshow_test.py b/tests/aclshow_test.py index 90fe46f683..0abe509aad 100644 --- a/tests/aclshow_test.py +++ b/tests/aclshow_test.py @@ -46,6 +46,7 @@ RULE_9 DATAACL 9991 901 900 RULE_10 DATAACL 9989 1001 1000 DEFAULT_RULE DATAACL 1 2 1 +RULE_1 DATAACL_5 9999 N/A N/A RULE_NO_COUNTER DATAACL_NO_COUNTER 9995 N/A N/A RULE_6 EVERFLOW 9994 601 600 RULE_08 EVERFLOW 9992 0 0 @@ -89,8 +90,8 @@ # Expected output for aclshow -r RULE_4,RULE_6 -vv rule4_rule6_verbose_output = '' + \ """Reading ACL info... -Total number of ACL Tables: 11 -Total number of ACL Rules: 20 +Total number of ACL Tables: 12 +Total number of ACL Rules: 21 RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT ----------- ------------ ------ --------------- ------------- @@ -136,6 +137,7 @@ RULE_9 DATAACL 9991 0 0 RULE_10 DATAACL 9989 0 0 DEFAULT_RULE DATAACL 1 0 0 +RULE_1 DATAACL_5 9999 N/A N/A RULE_NO_COUNTER DATAACL_NO_COUNTER 9995 N/A N/A RULE_6 EVERFLOW 9994 0 0 RULE_08 EVERFLOW 9992 0 0 @@ -161,6 +163,7 @@ RULE_9 DATAACL 9991 0 0 RULE_10 DATAACL 9989 0 0 DEFAULT_RULE DATAACL 1 0 0 +RULE_1 DATAACL_5 9999 N/A N/A RULE_NO_COUNTER DATAACL_NO_COUNTER 9995 100 100 RULE_6 EVERFLOW 9994 0 0 RULE_08 EVERFLOW 9992 0 0 diff --git a/tests/mock_tables/asic0/config_db.json b/tests/mock_tables/asic0/config_db.json index 66b51f4ccb..de20194a64 100644 --- a/tests/mock_tables/asic0/config_db.json +++ b/tests/mock_tables/asic0/config_db.json @@ -246,5 +246,16 @@ "holdtime": "10", "asn": "65200", "keepalive": "3" + }, + "ACL_RULE|DATAACL_5|RULE_1": { + "IP_PROTOCOL": "126", + "PACKET_ACTION": "FORWARD", + "PRIORITY": "9999" + }, + "ACL_TABLE|DATAACL_5": { + "policy_desc": "DATAACL_5", + "ports@": "Ethernet124", + "type": "L3", + "stage": "ingress" } } diff --git a/tests/mock_tables/asic0/state_db.json b/tests/mock_tables/asic0/state_db.json index 2756404971..559af04826 100644 --- a/tests/mock_tables/asic0/state_db.json +++ b/tests/mock_tables/asic0/state_db.json @@ -286,5 +286,11 @@ "STATUS": "up", "REMOTE_MOD": "0", "REMOTE_PORT": "93" + }, + "ACL_TABLE_TABLE|DATAACL_5" : { + "status": "Active" + }, + "ACL_RULE_TABLE|DATAACL_5|RULE_1" : { + "status": "Active" } } diff --git a/tests/mock_tables/asic2/config_db.json b/tests/mock_tables/asic2/config_db.json index 532d85bcbb..bfda10a0d5 100644 --- a/tests/mock_tables/asic2/config_db.json +++ b/tests/mock_tables/asic2/config_db.json @@ -124,5 +124,16 @@ "state": "disabled", "auto_restart": "disabled", "high_mem_alert": "disabled" + }, + "ACL_RULE|DATAACL_5|RULE_1": { + "IP_PROTOCOL": "126", + "PACKET_ACTION": "FORWARD", + "PRIORITY": "9999" + }, + "ACL_TABLE|DATAACL_5": { + "policy_desc": "DATAACL_5", + "ports@": "Ethernet124", + "type": "L3", + "stage": "ingress" } } diff --git a/tests/mock_tables/asic2/state_db.json b/tests/mock_tables/asic2/state_db.json index f6e3eee4cf..c6c8c88898 100644 --- a/tests/mock_tables/asic2/state_db.json +++ b/tests/mock_tables/asic2/state_db.json @@ -207,5 +207,11 @@ "speed_target": "50", "led_status": "green", "timestamp": "20200813 01:32:30" + }, + "ACL_TABLE_TABLE|DATAACL_5" : { + "status": "Active" + }, + "ACL_RULE_TABLE|DATAACL_5|RULE_1" : { + "status": "Active" } } diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 51af58e86d..3a2b681a6e 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -496,6 +496,11 @@ "PACKET_ACTION": "FORWARD", "PRIORITY": "9995" }, + "ACL_RULE|DATAACL_5|RULE_1": { + "IP_PROTOCOL": "126", + "PACKET_ACTION": "FORWARD", + "PRIORITY": "9999" + }, "ACL_TABLE|NULL_ROUTE_V4": { "policy_desc": "DATAACL", "ports@": "PortChannel0002,PortChannel0005,PortChannel0008,PortChannel0011,PortChannel0014,PortChannel0017,PortChannel0020,PortChannel0023", @@ -533,6 +538,12 @@ "type": "L3V6", "stage": "egress" }, + "ACL_TABLE|DATAACL_5": { + "policy_desc": "DATAACL_5", + "ports@": "Ethernet124", + "type": "L3", + "stage": "ingress" + }, "ACL_TABLE|EVERFLOW": { "policy_desc": "EVERFLOW", "ports@": "PortChannel0002,PortChannel0005,PortChannel0008,PortChannel0011,PortChannel0014,PortChannel0017,PortChannel0020,PortChannel0023,Ethernet100,Ethernet104,Ethernet92,Ethernet96,Ethernet84,Ethernet88,Ethernet76,Ethernet80,Ethernet108,Ethernet112,Ethernet64,Ethernet120,Ethernet116,Ethernet124,Ethernet72,Ethernet68", diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 4cdda56bc8..cd1a194ba8 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -1210,5 +1210,11 @@ "STATUS": "up", "REMOTE_MOD": "0", "REMOTE_PORT": "93" + }, + "ACL_TABLE_TABLE|DATAACL_5" : { + "status": "Active" + }, + "ACL_RULE_TABLE|DATAACL_5|RULE_1" : { + "status": "Active" } } diff --git a/tests/show_acl_test.py b/tests/show_acl_test.py new file mode 100644 index 0000000000..1b2cdc60a9 --- /dev/null +++ b/tests/show_acl_test.py @@ -0,0 +1,95 @@ +import os +import pytest +from click.testing import CliRunner + +import acl_loader.main as acl_loader_show +from acl_loader import * +from acl_loader.main import * +from importlib import reload + +root_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(root_path) +scripts_path = os.path.join(modules_path, "scripts") + + +@pytest.fixture() +def setup_teardown_single_asic(): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + yield + os.environ["UTILITIES_UNIT_TESTING"] = "0" + + +@pytest.fixture(scope="class") +def setup_teardown_multi_asic(): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + from .mock_tables import mock_multi_asic_3_asics + reload(mock_multi_asic_3_asics) + from .mock_tables import dbconnector + dbconnector.load_namespace_config() + yield + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + from .mock_tables import mock_single_asic + reload(mock_single_asic) + + +class TestShowACLSingleASIC(object): + def test_show_acl_table(self, setup_teardown_single_asic): + runner = CliRunner() + aclloader = AclLoader() + context = { + "acl_loader": aclloader + } + result = runner.invoke(acl_loader_show.cli.commands['show'].commands['table'], ['DATAACL_5'], obj=context) + assert result.exit_code == 0 + # We only care about the third line, which contains the 'Active' + result_top = result.output.split('\n')[2] + expected_output = "DATAACL_5 L3 Ethernet124 DATAACL_5 ingress Active" + assert result_top == expected_output + + def test_show_acl_rule(self, setup_teardown_single_asic): + runner = CliRunner() + aclloader = AclLoader() + context = { + "acl_loader": aclloader + } + result = runner.invoke(acl_loader_show.cli.commands['show'].commands['rule'], ['DATAACL_5'], obj=context) + assert result.exit_code == 0 + # We only care about the third line, which contains the 'Active' + result_top = result.output.split('\n')[2] + expected_output = "DATAACL_5 RULE_1 9999 FORWARD IP_PROTOCOL: 126 Active" + assert result_top == expected_output + + +class TestShowACLMultiASIC(object): + def test_show_acl_table(self, setup_teardown_multi_asic): + runner = CliRunner() + aclloader = AclLoader() + context = { + "acl_loader": aclloader + } + result = runner.invoke(acl_loader_show.cli.commands['show'].commands['table'], ['DATAACL_5'], obj=context) + assert result.exit_code == 0 + # We only care about the third line, which contains the 'Active' + result_top = result.output.split('\n')[2] + expected_output = "DATAACL_5 L3 Ethernet124 DATAACL_5 ingress {'asic0': 'Active', 'asic2': 'Active'}" + assert result_top == expected_output + + def test_show_acl_rule(self, setup_teardown_multi_asic): + runner = CliRunner() + aclloader = AclLoader() + context = { + "acl_loader": aclloader + } + result = runner.invoke(acl_loader_show.cli.commands['show'].commands['rule'], ['DATAACL_5'], obj=context) + assert result.exit_code == 0 + # We only care about the third line, which contains the 'Active' + result_top = result.output.split('\n')[2] + expected_output = "DATAACL_5 RULE_1 9999 FORWARD IP_PROTOCOL: 126 {'asic0': 'Active', 'asic2': 'Active'}" + assert result_top == expected_output + + From 338d1c05bf067447bdc29013b419d2c51da5c086 Mon Sep 17 00:00:00 2001 From: Liu Shilong Date: Thu, 9 Mar 2023 06:57:05 +0800 Subject: [PATCH 36/66] Check SONiC dependencies before installation. (#2716) #### What I did SONiC related packages shouldn't be intalled from Pypi. It is security compliance requirement. Check SONiC related packages when using setup.py. --- setup.py | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/setup.py b/setup.py index 6af8e3394a..f071797280 100644 --- a/setup.py +++ b/setup.py @@ -5,9 +5,34 @@ # under scripts/. Consider stop using scripts and use console_scripts instead # # https://stackoverflow.com/questions/18787036/difference-between-entry-points-console-scripts-and-scripts-in-setup-py +from __future__ import print_function +import sys import fastentrypoints from setuptools import setup +import pkg_resources +from packaging import version + +# sonic_dependencies, version requirement only supports '>=' +sonic_dependencies = [ + 'sonic-config-engine', + 'sonic-platform-common', + 'sonic-py-common', + 'sonic-yang-mgmt', +] + +for package in sonic_dependencies: + try: + package_dist = pkg_resources.get_distribution(package.split(">=")[0]) + except pkg_resources.DistributionNotFound: + print(package + " is not found!", file=sys.stderr) + print("Please build and install SONiC python wheels dependencies from sonic-buildimage", file=sys.stderr) + exit(1) + if ">=" in package: + if version.parse(package_dist.version) >= version.parse(package.split(">=")[1]): + continue + print(package + " version not match!", file=sys.stderr) + exit(1) setup( name='sonic-utilities', @@ -211,16 +236,12 @@ 'prettyprinter>=0.18.0', 'pyroute2>=0.5.14, <0.6.1', 'requests>=2.25.0', - 'sonic-config-engine', - 'sonic-platform-common', - 'sonic-py-common', - 'sonic-yang-mgmt', 'tabulate==0.8.2', 'toposort==1.6', 'www-authenticate==0.9.2', 'xmltodict==0.12.0', 'lazy-object-proxy', - ], + ] + sonic_dependencies, setup_requires= [ 'pytest-runner', 'wheel' From 9f83ace943bc4938a0bfc75239ecdac8600e5b56 Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Thu, 9 Mar 2023 09:12:19 +0800 Subject: [PATCH 37/66] [GCU] Add vlanintf-validator (#2697) What I did Fix the bug of GCU vlan interface modification. It should call ip neigh flush dev after removing interface ip. The fix is basically following config CLI's tradition. How I did it Add vlanintf service validator to check if extra step of ip neigh flush is needed. How to verify it GCU E2E test in dualtor testbed. --- .../gcu_services_validator.conf.json | 6 +++ generic_config_updater/services_validator.py | 21 ++++++++ .../service_validator_test.py | 51 ++++++++++++++++++- 3 files changed, 77 insertions(+), 1 deletion(-) diff --git a/generic_config_updater/gcu_services_validator.conf.json b/generic_config_updater/gcu_services_validator.conf.json index 907b5a6863..852b587286 100644 --- a/generic_config_updater/gcu_services_validator.conf.json +++ b/generic_config_updater/gcu_services_validator.conf.json @@ -48,6 +48,9 @@ }, "NTP_SERVER": { "services_to_validate": [ "ntp-service" ] + }, + "VLAN_INTERFACE": { + "services_to_validate": [ "vlanintf-service" ] } }, "services": { @@ -71,6 +74,9 @@ }, "ntp-service": { "validate_commands": [ "generic_config_updater.services_validator.ntp_validator" ] + }, + "vlanintf-service": { + "validate_commands": [ "generic_config_updater.services_validator.vlanintf_validator" ] } } } diff --git a/generic_config_updater/services_validator.py b/generic_config_updater/services_validator.py index 44a9e095eb..5d8c1f0d51 100644 --- a/generic_config_updater/services_validator.py +++ b/generic_config_updater/services_validator.py @@ -101,3 +101,24 @@ def caclmgrd_validator(old_config, upd_config, keys): def ntp_validator(old_config, upd_config, keys): return _service_restart("ntp-config") + +def vlanintf_validator(old_config, upd_config, keys): + old_vlan_intf = old_config.get("VLAN_INTERFACE", {}) + upd_vlan_intf = upd_config.get("VLAN_INTERFACE", {}) + + # Get the tuple with format (iface, iface_ip) then check deleted tuple + # Example: + # old_keys = [("Vlan1000", "192.168.0.1")] + # upd_keys = [("Vlan1000", "192.168.0.2")] + old_keys = [ tuple(key.split("|")) + for key in old_vlan_intf if len(key.split("|")) == 2 ] + upd_keys = [ tuple(key.split("|")) + for key in upd_vlan_intf if len(key.split("|")) == 2 ] + + deleted_keys = list(set(old_keys) - set(upd_keys)) + for key in deleted_keys: + iface, iface_ip = key + rc = os.system(f"ip neigh flush dev {iface} {iface_ip}") + if not rc: + return False + return True diff --git a/tests/generic_config_updater/service_validator_test.py b/tests/generic_config_updater/service_validator_test.py index 2f51771d33..f14a3ad7b0 100644 --- a/tests/generic_config_updater/service_validator_test.py +++ b/tests/generic_config_updater/service_validator_test.py @@ -6,7 +6,7 @@ from collections import defaultdict from unittest.mock import patch -from generic_config_updater.services_validator import vlan_validator, rsyslog_validator, caclmgrd_validator +from generic_config_updater.services_validator import vlan_validator, rsyslog_validator, caclmgrd_validator, vlanintf_validator import generic_config_updater.gu_common @@ -152,6 +152,46 @@ def mock_time_sleep_call(sleep_time): { "cmd": "systemctl restart rsyslog", "rc": 1 }, # restart again; fails ] +test_vlanintf_data = [ + { "old": {}, "upd": {}, "cmd": "" }, + { + "old": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {} } }, + "upd": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {} } }, + "cmd": "" + }, + { + "old": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {} } }, + "upd": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.2/21": {} } }, + "cmd": "ip neigh flush dev Vlan1000 192.168.0.1/21" + }, + { + "old": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {} } }, + "upd": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {}, + "Vlan1000|192.168.0.2/21": {} } }, + "cmd": "" + }, + { + "old": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {} } }, + "upd": {}, + "cmd": "ip neigh flush dev Vlan1000 192.168.0.1/21" + } + ] + + class TestServiceValidator(unittest.TestCase): @patch("generic_config_updater.change_applier.os.system") @@ -177,6 +217,15 @@ def test_change_apply_os_system(self, mock_os_sys): rc = rsyslog_validator("", "", "") assert not rc, "rsyslog_validator expected to fail" + os_system_calls = [] + os_system_call_index = 0 + for entry in test_vlanintf_data: + if entry["cmd"]: + os_system_calls.append({"cmd": entry["cmd"], "rc": 0 }) + msg = "case failed: {}".format(str(entry)) + + vlanintf_validator(entry["old"], entry["upd"], None) + @patch("generic_config_updater.services_validator.time.sleep") def test_change_apply_time_sleep(self, mock_time_sleep): global time_sleep_calls, time_sleep_call_index From 7a604c51671a85470db3d15aaa83b6b39a01531a Mon Sep 17 00:00:00 2001 From: jhli-cisco <93410383+jhli-cisco@users.noreply.github.com> Date: Wed, 8 Mar 2023 18:03:50 -0800 Subject: [PATCH 38/66] update fast-reboot (#2728) --- scripts/fast-reboot | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index defde666ee..426c7b2727 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -23,6 +23,7 @@ PLATFORM=$(sonic-cfggen -H -v DEVICE_METADATA.localhost.platform) PLATFORM_PLUGIN="${REBOOT_TYPE}_plugin" LOG_SSD_HEALTH="/usr/local/bin/log_ssd_health" PLATFORM_FWUTIL_AU_REBOOT_HANDLE="platform_fw_au_reboot_handle" +PLATFORM_REBOOT_PRE_CHECK="platform_reboot_pre_check" SSD_FW_UPDATE="ssd-fw-upgrade" SSD_FW_UPDATE_BOOT_OPTION=no TAG_LATEST=yes @@ -179,6 +180,10 @@ function initialize_pre_shutdown() function request_pre_shutdown() { + if [ -x ${DEVPATH}/${PLATFORM}/${PLATFORM_REBOOT_PRE_CHECK} ]; then + debug "Requesting platform reboot pre-check ..." + ${DEVPATH}/${PLATFORM}/${PLATFORM_REBOOT_PRE_CHECK} ${REBOOT_TYPE} + fi debug "Requesting pre-shutdown ..." STATE=$(timeout 5s docker exec syncd /usr/bin/syncd_request_shutdown --pre &> /dev/null; if [[ $? == 124 ]]; then echo "timed out"; fi) if [[ x"${STATE}" == x"timed out" ]]; then From ff6883233a3c86e993add50453c3152745eaff0d Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Fri, 10 Mar 2023 04:07:25 +0200 Subject: [PATCH 39/66] [route_check] fix IPv6 address handling (#2722) *In case user has configured an IPv6 address on an interface in CONFIG DB in non simplified form like 2000:31:0:0::1/64 it is present in a simplified form in ASIC_DB. This leads to route_check failure since it just compares strings. --- scripts/route_check.py | 5 +++-- tests/route_check_test_data.py | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/scripts/route_check.py b/scripts/route_check.py index 4db3f399a2..c832b2c6ea 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -47,6 +47,7 @@ import traceback import subprocess +from ipaddress import ip_network from swsscommon import swsscommon from utilities_common import chassis @@ -145,7 +146,7 @@ def add_prefix(ip): ip = ip + PREFIX_SEPARATOR + "32" else: ip = ip + PREFIX_SEPARATOR + "128" - return ip + return str(ip_network(ip)) def add_prefix_ifnot(ip): @@ -154,7 +155,7 @@ def add_prefix_ifnot(ip): :param ip: IP to add prefix as string. :return ip with prefix """ - return ip if ip.find(PREFIX_SEPARATOR) != -1 else add_prefix(ip) + return str(ip_network(ip)) if ip.find(PREFIX_SEPARATOR) != -1 else add_prefix(ip) def is_local(ip): diff --git a/tests/route_check_test_data.py b/tests/route_check_test_data.py index b8ba9c521a..7ed1eee41f 100644 --- a/tests/route_check_test_data.py +++ b/tests/route_check_test_data.py @@ -462,4 +462,22 @@ }, RET: -1, }, + "10": { + DESCR: "basic good one with IPv6 address", + ARGS: "route_check -m INFO -i 1000", + PRE: { + APPL_DB: { + ROUTE_TABLE: { + }, + INTF_TABLE: { + "PortChannel1013:2000:31:0:0::1/64": {}, + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "2000:31::1/128" + RT_ENTRY_KEY_SUFFIX: {}, + } + } + } + }, } From e6179afa8771bfa1643243f7ef166dd1dc256b24 Mon Sep 17 00:00:00 2001 From: Aryeh Feigin <101218333+arfeigin@users.noreply.github.com> Date: Fri, 10 Mar 2023 18:41:30 +0200 Subject: [PATCH 40/66] Remove timer from FAST_REBOOT STATE_DB entry and use finalizer (#2621) This should come along with sonic-buildimage PR (sonic-net/sonic-buildimage#13484) implementing fast-reboot finalizing logic in finalize-warmboot script and other submodules PRs utilizing the change. This PR should come along with the following PRs as well: sonic-net/sonic-swss-common#742 sonic-net/sonic-platform-daemons#335 sonic-net/sonic-sairedis#1196 This set of PRs solves the issue sonic-net/sonic-buildimage#13251 What I did Remove the timer used to clear fast-reboot entry from state-db, instead it will be cleared by fast-reboot finalize function implemented inside finalize-warmboot script (which will be invoked since fast-reboot is using warm-reboot infrastructure). As well instead of having "1" as the value for fast-reboot entry in state-db and deleting it when done it is now modified to set enable/disable according to the context. As well all scripts reading this entry should be modified to the new value options. How I did it Removed the timer usage in the fast-reboot script and adding fast-reboot finalize logic to warm-reboot in the linked PR. Use "enable/disable" instead of "1" as the entry value. How to verify it Run fast-reboot and check that the state-db entry for fast-reboot is being deleted after finalizing fast-reboot and not by an expiring timer. --- scripts/db_migrator.py | 23 +++++++++++-- scripts/fast-reboot | 6 ++-- .../templates/service_mgmt.sh.j2 | 3 +- .../state_db/fast_reboot_expected.json | 5 +++ .../state_db/fast_reboot_input.json | 2 ++ tests/db_migrator_test.py | 32 +++++++++++++++++++ 6 files changed, 65 insertions(+), 6 deletions(-) create mode 100644 tests/db_migrator_input/state_db/fast_reboot_expected.json create mode 100644 tests/db_migrator_input/state_db/fast_reboot_input.json diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index 5c946bbb9f..b08c397971 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -45,7 +45,7 @@ def __init__(self, namespace, socket=None): none-zero values. build: sequentially increase within a minor version domain. """ - self.CURRENT_VERSION = 'version_4_0_0' + self.CURRENT_VERSION = 'version_4_0_1' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' @@ -867,9 +867,28 @@ def version_3_0_6(self): def version_4_0_0(self): """ Version 4_0_0. - This is the latest version for master branch """ log.log_info('Handling version_4_0_0') + # Update state-db fast-reboot entry to enable if set to enable fast-reboot finalizer when using upgrade with fast-reboot + # since upgrading from previous version FAST_REBOOT table will be deleted when the timer will expire. + # reading FAST_REBOOT table can't be done with stateDB.get as it uses hget behind the scenes and the table structure is + # not using hash and won't work. + # FAST_REBOOT table exists only if fast-reboot was triggered. + keys = self.stateDB.keys(self.stateDB.STATE_DB, "FAST_REBOOT") + if keys is not None: + enable_state = 'true' + else: + enable_state = 'false' + self.stateDB.set(self.stateDB.STATE_DB, 'FAST_RESTART_ENABLE_TABLE|system', 'enable', enable_state) + self.set_version('version_4_0_1') + return 'version_4_0_1' + + def version_4_0_1(self): + """ + Version 4_0_1. + This is the latest version for master branch + """ + log.log_info('Handling version_4_0_1') return None def get_version(self): diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 426c7b2727..fb162ae180 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -149,7 +149,7 @@ function clear_boot() #clear_fast_boot if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then - sonic-db-cli STATE_DB DEL "FAST_REBOOT|system" &>/dev/null || /bin/true + sonic-db-cli STATE_DB HSET "FAST_RESTART_ENABLE_TABLE|system" "enable" "false" &>/dev/null || /bin/true fi } @@ -270,7 +270,7 @@ function backup_database() and not string.match(k, 'WARM_RESTART_ENABLE_TABLE|') \ and not string.match(k, 'VXLAN_TUNNEL_TABLE|') \ and not string.match(k, 'BUFFER_MAX_PARAM_TABLE|') \ - and not string.match(k, 'FAST_REBOOT|') then + and not string.match(k, 'FAST_RESTART_ENABLE_TABLE|') then redis.call('del', k) end end @@ -549,7 +549,7 @@ case "$REBOOT_TYPE" in check_warm_restart_in_progress BOOT_TYPE_ARG=$REBOOT_TYPE trap clear_boot EXIT HUP INT QUIT TERM KILL ABRT ALRM - sonic-db-cli STATE_DB SET "FAST_REBOOT|system" "1" "EX" "210" &>/dev/null + sonic-db-cli STATE_DB HSET "FAST_RESTART_ENABLE_TABLE|system" "enable" "true" &>/dev/null config warm_restart enable system ;; "warm-reboot") diff --git a/sonic-utilities-data/templates/service_mgmt.sh.j2 b/sonic-utilities-data/templates/service_mgmt.sh.j2 index d206049015..5c8f4e4974 100644 --- a/sonic-utilities-data/templates/service_mgmt.sh.j2 +++ b/sonic-utilities-data/templates/service_mgmt.sh.j2 @@ -51,7 +51,8 @@ function check_warm_boot() function check_fast_boot() { - if [[ $($SONIC_DB_CLI STATE_DB GET "FAST_REBOOT|system") == "1" ]]; then + SYSTEM_FAST_REBOOT=`$SONIC_DB_CLI STATE_DB hget "FAST_RESTART_ENABLE_TABLE|system" enable` + if [[ x"${SYSTEM_FAST_REBOOT}" == x"true" ]]; then FAST_BOOT="true" else FAST_BOOT="false" diff --git a/tests/db_migrator_input/state_db/fast_reboot_expected.json b/tests/db_migrator_input/state_db/fast_reboot_expected.json new file mode 100644 index 0000000000..e3a7a5fa14 --- /dev/null +++ b/tests/db_migrator_input/state_db/fast_reboot_expected.json @@ -0,0 +1,5 @@ +{ + "FAST_RESTART_ENABLE_TABLE|system": { + "enable": "false" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/state_db/fast_reboot_input.json b/tests/db_migrator_input/state_db/fast_reboot_input.json new file mode 100644 index 0000000000..7a73a41bfd --- /dev/null +++ b/tests/db_migrator_input/state_db/fast_reboot_input.json @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index b5c70fce8e..e9c184d160 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -451,6 +451,38 @@ def test_move_logger_tables_in_warm_upgrade(self): diff = DeepDiff(resulting_table, expected_table, ignore_order=True) assert not diff +class TestFastRebootTableModification(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "2" + + @classmethod + def teardown_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "0" + dbconnector.dedicated_dbs['STATE_DB'] = None + + def mock_dedicated_state_db(self): + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db') + + def test_rename_fast_reboot_table_check_enable(self): + device_info.get_sonic_version_info = get_sonic_version_info_mlnx + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db', 'fast_reboot_input') + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'empty-config-input') + + import db_migrator + dbmgtr = db_migrator.DBMigrator(None) + dbmgtr.migrate() + + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db', 'fast_reboot_expected') + expected_db = SonicV2Connector(host='127.0.0.1') + expected_db.connect(expected_db.STATE_DB) + + resulting_table = dbmgtr.stateDB.get_all(dbmgtr.stateDB.STATE_DB, 'FAST_RESTART_ENABLE_TABLE|system') + expected_table = expected_db.get_all(expected_db.STATE_DB, 'FAST_RESTART_ENABLE_TABLE|system') + + diff = DeepDiff(resulting_table, expected_table, ignore_order=True) + assert not diff + class TestWarmUpgrade_to_2_0_2(object): @classmethod def setup_class(cls): From f7f783bce46a4383260e229dea90834672f03b6f Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Tue, 14 Mar 2023 21:01:52 +0800 Subject: [PATCH 41/66] Enhance the logic to wait for all buffer tables to be removed in _clear_qos (#2720) - What I did This is an enhancement of PR #2503 - How I did it On top of waiting for BUFFER_POOL_TABLE to be cleared from APPL_DB, we need to wait for KEY_SET and DEL_SET as well. KEY_SET and DEL_SET are designed to accommodate the APPL_DB entries that were updated by manager daemons but have not yet been handled by the orchagent. In this case, even if the buffer tables are empty, entries in KEY_SET or DEL_SET will be in the buffer tables later on. So, we need to wait for key set tables as well. Do not delay for traditional buffer manager because it does not remove any buffer table. Provide a CLI option to print the detailed message if there is any table item which still exists - How to verify it Manually test and unit test - Previous command output (if the output of a command-line utility has changed) Running command: /usr/local/bin/sonic-cfggen -d --write-to-db -t /usr/share/sonic/device/x86_64-mlnx_msn2410-r0/ACS-MSN2410/buffers_dynamic.json.j2,config-db -t /usr/share/sonic/device/x86_64-mlnx_msn2410-r0/ACS-MSN2410/qos.json.j2,config-db -y /etc/sonic/sonic_version.yml - New command output (if the output of a command-line utility has changed) Only with option --verbose there are new output. Without the option, the output is the same as it is. admin@mtbc-sonic-01-2410:~$ sudo config qos reload --verbose Some entries matching BUFFER_*_TABLE:* still exist: BUFFER_QUEUE_TABLE:Ethernet108:0-2 Some entries matching BUFFER_*_SET still exist: BUFFER_PG_TABLE_KEY_SET Some entries matching BUFFER_*_TABLE:* still exist: BUFFER_QUEUE_TABLE:Ethernet108:0-2 Some entries matching BUFFER_*_SET still exist: BUFFER_PG_TABLE_KEY_SET Some entries matching BUFFER_*_TABLE:* still exist: BUFFER_QUEUE_TABLE:Ethernet108:0-2 Running command: /usr/local/bin/sonic-cfggen -d --write-to-db -t /usr/share/sonic/device/x86_64-mlnx_msn2410-r0/ACS-MSN2410/buffers_dynamic.json.j2,config-db -t /usr/share/sonic/device/x86_64-mlnx_msn2410-r0/ACS-MSN2410/qos.json.j2,config-db -y /etc/sonic/sonic_version.yml --- config/main.py | 33 +++++++++++++++++++++------------ tests/config_test.py | 10 ++++++++-- 2 files changed, 29 insertions(+), 14 deletions(-) diff --git a/config/main.py b/config/main.py index 384e6f9f68..d14d392355 100644 --- a/config/main.py +++ b/config/main.py @@ -743,24 +743,28 @@ def storm_control_delete_entry(port_name, storm_type): return True -def _wait_until_clear(table, interval=0.5, timeout=30): +def _wait_until_clear(tables, interval=0.5, timeout=30, verbose=False): start = time.time() empty = False app_db = SonicV2Connector(host='127.0.0.1') app_db.connect(app_db.APPL_DB) while not empty and time.time() - start < timeout: - current_profiles = app_db.keys(app_db.APPL_DB, table) - if not current_profiles: - empty = True - else: - time.sleep(interval) + non_empty_table_count = 0 + for table in tables: + keys = app_db.keys(app_db.APPL_DB, table) + if keys: + non_empty_table_count += 1 + if verbose: + click.echo("Some entries matching {} still exist: {}".format(table, keys[0])) + time.sleep(interval) + empty = (non_empty_table_count == 0) if not empty: click.echo("Operation not completed successfully, please save and reload configuration.") return empty -def _clear_qos(delay = False): +def _clear_qos(delay=False, verbose=False): QOS_TABLE_NAMES = [ 'PORT_QOS_MAP', 'QUEUE', @@ -797,7 +801,10 @@ def _clear_qos(delay = False): for qos_table in QOS_TABLE_NAMES: config_db.delete_table(qos_table) if delay: - _wait_until_clear("BUFFER_POOL_TABLE:*",interval=0.5, timeout=30) + device_metadata = config_db.get_entry('DEVICE_METADATA', 'localhost') + # Traditional buffer manager do not remove buffer tables in any case, no need to wait. + timeout = 120 if device_metadata and device_metadata.get('buffer_model') == 'dynamic' else 0 + _wait_until_clear(["BUFFER_*_TABLE:*", "BUFFER_*_SET"], interval=0.5, timeout=timeout, verbose=verbose) def _get_sonic_generated_services(num_asic): if not os.path.isfile(SONIC_GENERATED_SERVICE_PATH): @@ -2644,10 +2651,11 @@ def qos(ctx): pass @qos.command('clear') -def clear(): +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def clear(verbose): """Clear QoS configuration""" log.log_info("'qos clear' executing...") - _clear_qos() + _clear_qos(verbose=verbose) def _update_buffer_calculation_model(config_db, model): """Update the buffer calculation model into CONFIG_DB""" @@ -2664,6 +2672,7 @@ def _update_buffer_calculation_model(config_db, model): @click.option('--ports', is_flag=False, required=False, help="List of ports that needs to be updated") @click.option('--no-dynamic-buffer', is_flag=True, help="Disable dynamic buffer calculation") @click.option('--no-delay', is_flag=True, hidden=True) +@click.option('--verbose', is_flag=True, help="Enable verbose output") @click.option( '--json-data', type=click.STRING, help="json string with additional data, valid with --dry-run option" @@ -2672,7 +2681,7 @@ def _update_buffer_calculation_model(config_db, model): '--dry_run', type=click.STRING, help="Dry run, writes config to the given file" ) -def reload(ctx, no_dynamic_buffer, no_delay, dry_run, json_data, ports): +def reload(ctx, no_dynamic_buffer, no_delay, dry_run, json_data, ports, verbose): """Reload QoS configuration""" if ports: log.log_info("'qos reload --ports {}' executing...".format(ports)) @@ -2681,7 +2690,7 @@ def reload(ctx, no_dynamic_buffer, no_delay, dry_run, json_data, ports): log.log_info("'qos reload' executing...") if not dry_run: - _clear_qos(delay = not no_delay) + _clear_qos(delay = not no_delay, verbose=verbose) _, hwsku_path = device_info.get_paths_to_platform_and_hwsku_dirs() sonic_version_file = device_info.get_sonic_version_file() diff --git a/tests/config_test.py b/tests/config_test.py index 5fa50abd00..fff66d47e6 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -693,7 +693,7 @@ def test_qos_wait_until_clear_empty(self): with mock.patch('swsscommon.swsscommon.SonicV2Connector.keys', side_effect=TestConfigQos._keys): TestConfigQos._keys_counter = 1 - empty = _wait_until_clear("BUFFER_POOL_TABLE:*", 0.5,2) + empty = _wait_until_clear(["BUFFER_POOL_TABLE:*"], 0.5,2) assert empty def test_qos_wait_until_clear_not_empty(self): @@ -701,9 +701,15 @@ def test_qos_wait_until_clear_not_empty(self): with mock.patch('swsscommon.swsscommon.SonicV2Connector.keys', side_effect=TestConfigQos._keys): TestConfigQos._keys_counter = 10 - empty = _wait_until_clear("BUFFER_POOL_TABLE:*", 0.5,2) + empty = _wait_until_clear(["BUFFER_POOL_TABLE:*"], 0.5,2) assert not empty + @mock.patch('config.main._wait_until_clear') + def test_qos_clear_no_wait(self, _wait_until_clear): + from config.main import _clear_qos + _clear_qos(True, False) + _wait_until_clear.assert_called_with(['BUFFER_*_TABLE:*', 'BUFFER_*_SET'], interval=0.5, timeout=0, verbose=False) + def test_qos_reload_single( self, get_cmd_module, setup_qos_mock_apis, setup_single_broadcom_asic From 76457141db02b80abc003d00261e4c4635b83676 Mon Sep 17 00:00:00 2001 From: Aryeh Feigin <101218333+arfeigin@users.noreply.github.com> Date: Tue, 14 Mar 2023 22:13:51 +0200 Subject: [PATCH 42/66] Fix fast-reboot DB migration (#2734) Fix DB migrator logic for migrating fast-reboot table, fixing #2621 db_migrator. How I did it Checking if fast-reboot table exists in DB. How to verify it Verified manually, migrating after fast-reboot and after cold/warm reboot. --- scripts/db_migrator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index b08c397971..64fddea290 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -874,8 +874,8 @@ def version_4_0_0(self): # reading FAST_REBOOT table can't be done with stateDB.get as it uses hget behind the scenes and the table structure is # not using hash and won't work. # FAST_REBOOT table exists only if fast-reboot was triggered. - keys = self.stateDB.keys(self.stateDB.STATE_DB, "FAST_REBOOT") - if keys is not None: + keys = self.stateDB.keys(self.stateDB.STATE_DB, "FAST_REBOOT|system") + if keys: enable_state = 'true' else: enable_state = 'false' From c869c9707a7622f31da7f92a338d7af358461f8a Mon Sep 17 00:00:00 2001 From: Vivek Date: Tue, 14 Mar 2023 17:55:40 -0700 Subject: [PATCH 43/66] Update the ref guide to reflect the vlan brief output (#2731) What I did show vlan brief will only be showing dhcpv4 addresses and not dhcpv6 destination Signed-off-by: Vivek Reddy Karri --- doc/Command-Reference.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 69f282ccbb..494773b83c 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -9793,7 +9793,7 @@ Go Back To [Beginning of the document](#) or [Beginning of this section](#System **show vlan brief** -This command displays brief information about all the vlans configured in the device. It displays the vlan ID, IP address (if configured for the vlan), list of vlan member ports, whether the port is tagged or in untagged mode, the DHCP Helper Address, and the proxy ARP status +This command displays brief information about all the vlans configured in the device. It displays the vlan ID, IP address (if configured for the vlan), list of vlan member ports, whether the port is tagged or in untagged mode, the DHCPv4 Helper Address, and the proxy ARP status - Usage: ``` From 2d95529dce9ef3f23b859ec09135c40d87d4f4d5 Mon Sep 17 00:00:00 2001 From: Neetha John Date: Thu, 16 Mar 2023 17:31:49 -0700 Subject: [PATCH 44/66] Revert "Update load minigraph to load backend acl (#2236)" (#2735) This reverts commit 1518ca92df1e794222bf45100246c8ef956d7af6. --- config/main.py | 43 ++----------------------------------------- tests/config_test.py | 43 ------------------------------------------- 2 files changed, 2 insertions(+), 84 deletions(-) diff --git a/config/main.py b/config/main.py index d14d392355..44f633bf44 100644 --- a/config/main.py +++ b/config/main.py @@ -1162,41 +1162,6 @@ def validate_gre_type(ctx, _, value): except ValueError: raise click.UsageError("{} is not a valid GRE type".format(value)) -def _is_storage_device(cfg_db): - """ - Check if the device is a storage device or not - """ - device_metadata = cfg_db.get_entry("DEVICE_METADATA", "localhost") - return device_metadata.get("storage_device", "Unknown") == "true" - -def _is_acl_table_present(cfg_db, acl_table_name): - """ - Check if acl table exists - """ - return acl_table_name in cfg_db.get_keys("ACL_TABLE") - -def load_backend_acl(cfg_db, device_type): - """ - Load acl on backend storage device - """ - - BACKEND_ACL_TEMPLATE_FILE = os.path.join('/', "usr", "share", "sonic", "templates", "backend_acl.j2") - BACKEND_ACL_FILE = os.path.join('/', "etc", "sonic", "backend_acl.json") - - if device_type and device_type == "BackEndToRRouter" and _is_storage_device(cfg_db) and _is_acl_table_present(cfg_db, "DATAACL"): - if os.path.isfile(BACKEND_ACL_TEMPLATE_FILE): - clicommon.run_command( - "{} -d -t {},{}".format( - SONIC_CFGGEN_PATH, - BACKEND_ACL_TEMPLATE_FILE, - BACKEND_ACL_FILE - ), - display_cmd=True - ) - if os.path.isfile(BACKEND_ACL_FILE): - clicommon.run_command("acl-loader update incremental {}".format(BACKEND_ACL_FILE), display_cmd=True) - - # This is our main entrypoint - the main 'config' command @click.group(cls=clicommon.AbbreviationGroup, context_settings=CONTEXT_SETTINGS) @click.pass_context @@ -1774,12 +1739,6 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, if os.path.isfile('/etc/sonic/acl.json'): clicommon.run_command("acl-loader update full /etc/sonic/acl.json", display_cmd=True) - # get the device type - device_type = _get_device_type() - - # Load backend acl - load_backend_acl(db.cfgdb, device_type) - # Load port_config.json try: load_port_config(db.cfgdb, '/etc/sonic/port_config.json') @@ -1789,6 +1748,8 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, # generate QoS and Buffer configs clicommon.run_command("config qos reload --no-dynamic-buffer --no-delay", display_cmd=True) + # get the device type + device_type = _get_device_type() if device_type != 'MgmtToRRouter' and device_type != 'MgmtTsToR' and device_type != 'BmcMgmtToRRouter' and device_type != 'EPMS': clicommon.run_command("pfcwd start_default", display_cmd=True) diff --git a/tests/config_test.py b/tests/config_test.py index fff66d47e6..4ebc14cd14 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -354,49 +354,6 @@ def test_load_minigraph_with_port_config(self, get_cmd_module, setup_single_broa port_config = [{"PORT": {"Ethernet0": {"admin_status": "up"}}}] self.check_port_config(db, config, port_config, "config interface startup Ethernet0") - def test_load_backend_acl(self, get_cmd_module, setup_single_broadcom_asic): - db = Db() - db.cfgdb.set_entry("DEVICE_METADATA", "localhost", {"storage_device": "true"}) - self.check_backend_acl(get_cmd_module, db, device_type='BackEndToRRouter', condition=True) - - def test_load_backend_acl_not_storage(self, get_cmd_module, setup_single_broadcom_asic): - db = Db() - self.check_backend_acl(get_cmd_module, db, device_type='BackEndToRRouter', condition=False) - - def test_load_backend_acl_storage_leaf(self, get_cmd_module, setup_single_broadcom_asic): - db = Db() - db.cfgdb.set_entry("DEVICE_METADATA", "localhost", {"storage_device": "true"}) - self.check_backend_acl(get_cmd_module, db, device_type='BackEndLeafRouter', condition=False) - - def test_load_backend_acl_storage_no_dataacl(self, get_cmd_module, setup_single_broadcom_asic): - db = Db() - db.cfgdb.set_entry("DEVICE_METADATA", "localhost", {"storage_device": "true"}) - db.cfgdb.set_entry("ACL_TABLE", "DATAACL", None) - self.check_backend_acl(get_cmd_module, db, device_type='BackEndToRRouter', condition=False) - - def check_backend_acl(self, get_cmd_module, db, device_type='BackEndToRRouter', condition=True): - def is_file_side_effect(filename): - return True if 'backend_acl' in filename else False - with mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): - with mock.patch('config.main._get_device_type', mock.MagicMock(return_value=device_type)): - with mock.patch( - "utilities_common.cli.run_command", - mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: - (config, show) = get_cmd_module - runner = CliRunner() - result = runner.invoke(config.config.commands["load_minigraph"], ["-y"], obj=db) - print(result.exit_code) - expected_output = ['Running command: acl-loader update incremental /etc/sonic/backend_acl.json', - 'Running command: /usr/local/bin/sonic-cfggen -d -t /usr/share/sonic/templates/backend_acl.j2,/etc/sonic/backend_acl.json' - ] - print(result.output) - assert result.exit_code == 0 - output = result.output.split('\n') - if condition: - assert set(expected_output).issubset(set(output)) - else: - assert not(set(expected_output).issubset(set(output))) - def check_port_config(self, db, config, port_config, expected_output): def read_json_file_side_effect(filename): return port_config From f27dea0cfdefbdcfc03d19136e4ae47ea72fd51f Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Fri, 17 Mar 2023 09:10:47 +0200 Subject: [PATCH 45/66] [route_check] remove check-frr_patch mock (#2732) The test fails with python3.7 (works in 3.9) when stopping patch which hasn't been started. We can always mock check_output call and if FRR_ROUTES is not defined return empty dictionary by the mock. #### What I did Removed check_frr_patch mock to fix UT running on python3.7 #### How I did it Removed the mock #### How to verify it Run unit test in stretch env --- tests/route_check_test.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/tests/route_check_test.py b/tests/route_check_test.py index 4d93c74e2d..118e9eab56 100644 --- a/tests/route_check_test.py +++ b/tests/route_check_test.py @@ -277,17 +277,12 @@ def test_route_check(self, mock_dbs, test_num): with patch('sys.argv', ct_data[ARGS].split()), \ patch('route_check.subprocess.check_output') as mock_check_output: - check_frr_patch = patch('route_check.check_frr_pending_routes', lambda: []) + routes = ct_data.get(FRR_ROUTES, {}) - if FRR_ROUTES in ct_data: - routes = ct_data[FRR_ROUTES] + def side_effect(*args, **kwargs): + return json.dumps(routes) - def side_effect(*args, **kwargs): - return json.dumps(routes) - - mock_check_output.side_effect = side_effect - else: - check_frr_patch.start() + mock_check_output.side_effect = side_effect ret, res = route_check.main() expect_ret = ct_data[RET] if RET in ct_data else 0 @@ -299,8 +294,6 @@ def side_effect(*args, **kwargs): assert ret == expect_ret assert res == expect_res - check_frr_patch.stop() - def test_timeout(self, mock_dbs, force_hang): # Test timeout ex_raised = False From 05fa7513355cf333818c480fade157bdff969811 Mon Sep 17 00:00:00 2001 From: abdosi <58047199+abdosi@users.noreply.github.com> Date: Fri, 17 Mar 2023 16:27:48 -0700 Subject: [PATCH 46/66] Fix the `show interface counters` throwing exception on device with no external interfaces (#2703) Fix the `show interface counters` throwing exception issue where device do not have any external ports and all are internal links (ethernet or fabric) which is possible in chassis --- scripts/portstat | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/scripts/portstat b/scripts/portstat index 09ad88b08d..43746cc1c3 100755 --- a/scripts/portstat +++ b/scripts/portstat @@ -333,13 +333,13 @@ class Portstat(object): format_number_with_comma(data['tx_err']), format_number_with_comma(data['tx_drop']), format_number_with_comma(data['tx_ovr']))) - - if use_json: - print(table_as_json(table, header)) - else: - print(tabulate(table, header, tablefmt='simple', stralign='right')) - if multi_asic.is_multi_asic() or device_info.is_chassis(): - print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") + if table: + if use_json: + print(table_as_json(table, header)) + else: + print(tabulate(table, header, tablefmt='simple', stralign='right')) + if multi_asic.is_multi_asic() or device_info.is_chassis() and not use_json: + print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") def cnstat_intf_diff_print(self, cnstat_new_dict, cnstat_old_dict, intf_list): """ @@ -551,13 +551,13 @@ class Portstat(object): format_number_with_comma(cntr['tx_err']), format_number_with_comma(cntr['tx_drop']), format_number_with_comma(cntr['tx_ovr']))) - - if use_json: - print(table_as_json(table, header)) - else: - print(tabulate(table, header, tablefmt='simple', stralign='right')) - if multi_asic.is_multi_asic() or device_info.is_chassis(): - print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") + if table: + if use_json: + print(table_as_json(table, header)) + else: + print(tabulate(table, header, tablefmt='simple', stralign='right')) + if multi_asic.is_multi_asic() or device_info.is_chassis() and not use_json: + print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") def main(): parser = argparse.ArgumentParser(description='Display the ports state and counters', From 10f31ea6fb0876f913cfcfce8c95011e675a99f6 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Tue, 21 Mar 2023 00:25:39 -0400 Subject: [PATCH 47/66] Revert "Replace pickle by json (#2636)" (#2746) This reverts commit 54e26359fccf45d2e40800cf5598a725798634cd. Due to https://github.com/sonic-net/sonic-buildimage/issues/14089 Signed-off-by: Mai Bui --- scripts/dropstat | 14 +-- scripts/flow_counters_stat | 10 +- scripts/intfstat | 64 +++++----- scripts/pfcstat | 62 +++++----- scripts/pg-drop | 8 +- scripts/portstat | 239 +++++++++++++++++++------------------ scripts/queuestat | 34 +++--- scripts/tunnelstat | 40 +++---- 8 files changed, 236 insertions(+), 235 deletions(-) diff --git a/scripts/dropstat b/scripts/dropstat index 4e9f5bb4d0..f98fc29197 100755 --- a/scripts/dropstat +++ b/scripts/dropstat @@ -11,7 +11,7 @@ # - Refactor calls to COUNTERS_DB to reduce redundancy # - Cache DB queries to reduce # of expensive queries -import json +import _pickle as pickle import argparse import os import socket @@ -117,10 +117,10 @@ class DropStat(object): """ try: - json.dump(self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP), - open(self.port_drop_stats_file, 'w+')) - json.dump(self.get_counts(self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP), self.get_switch_id()), - open(self.switch_drop_stats_file, 'w+')) + pickle.dump(self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP), + open(self.port_drop_stats_file, 'wb+')) + pickle.dump(self.get_counts(self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP), self.get_switch_id()), + open(self.switch_drop_stats_file, 'wb+')) except IOError as e: print(e) sys.exit(e.errno) @@ -135,7 +135,7 @@ class DropStat(object): # Grab the latest clear checkpoint, if it exists if os.path.isfile(self.port_drop_stats_file): - port_drop_ckpt = json.load(open(self.port_drop_stats_file, 'r')) + port_drop_ckpt = pickle.load(open(self.port_drop_stats_file, 'rb')) counters = self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP, group, counter_type) headers = std_port_description_header + self.gather_headers(counters, DEBUG_COUNTER_PORT_STAT_MAP) @@ -162,7 +162,7 @@ class DropStat(object): # Grab the latest clear checkpoint, if it exists if os.path.isfile(self.switch_drop_stats_file): - switch_drop_ckpt = json.load(open(self.switch_drop_stats_file, 'r')) + switch_drop_ckpt = pickle.load(open(self.switch_drop_stats_file, 'rb')) counters = self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP, group, counter_type) headers = std_switch_description_header + self.gather_headers(counters, DEBUG_COUNTER_SWITCH_STAT_MAP) diff --git a/scripts/flow_counters_stat b/scripts/flow_counters_stat index 49b97e335b..ac5ef94beb 100755 --- a/scripts/flow_counters_stat +++ b/scripts/flow_counters_stat @@ -2,7 +2,7 @@ import argparse import os -import json +import _pickle as pickle import sys from natsort import natsorted @@ -185,8 +185,8 @@ class FlowCounterStats(object): if os.path.exists(self.data_file): os.remove(self.data_file) - with open(self.data_file, 'w') as f: - json.dump(data, f) + with open(self.data_file, 'wb') as f: + pickle.dump(data, f) except IOError as e: print('Failed to save statistic - {}'.format(repr(e))) @@ -200,8 +200,8 @@ class FlowCounterStats(object): return None try: - with open(self.data_file, 'r') as f: - data = json.load(f) + with open(self.data_file, 'rb') as f: + data = pickle.load(f) except IOError as e: print('Failed to load statistic - {}'.format(repr(e))) return None diff --git a/scripts/intfstat b/scripts/intfstat index b4a770adeb..30cfbf084d 100755 --- a/scripts/intfstat +++ b/scripts/intfstat @@ -6,7 +6,7 @@ # ##################################################################### -import json +import _pickle as pickle import argparse import datetime import sys @@ -28,7 +28,7 @@ from collections import namedtuple, OrderedDict from natsort import natsorted from tabulate import tabulate from utilities_common.netstat import ns_diff, table_as_json, STATUS_NA, format_brate, format_prate -from utilities_common.cli import json_serial, UserCache +from utilities_common.cli import UserCache from swsscommon.swsscommon import SonicV2Connector nstat_fields = ( @@ -96,7 +96,7 @@ class Intfstat(object): counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name) if counter_data: fields[pos] = str(counter_data) - cntr = NStats._make(fields)._asdict() + cntr = NStats._make(fields) return cntr def get_rates(table_id): @@ -153,14 +153,14 @@ class Intfstat(object): rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) table.append((key, - data['rx_p_ok'], + data.rx_p_ok, format_brate(rates.rx_bps), format_prate(rates.rx_pps), - data['rx_p_err'], - data['tx_p_ok'], + data.rx_p_err, + data.tx_p_ok, format_brate(rates.tx_bps), format_prate(rates.tx_pps), - data['tx_p_err'])) + data.tx_p_err)) if use_json: print(table_as_json(table, header)) @@ -186,24 +186,24 @@ class Intfstat(object): if old_cntr is not None: table.append((key, - ns_diff(cntr['rx_p_ok'], old_cntr['rx_p_ok']), + ns_diff(cntr.rx_p_ok, old_cntr.rx_p_ok), format_brate(rates.rx_bps), format_prate(rates.rx_pps), - ns_diff(cntr['rx_p_err'], old_cntr['rx_p_err']), - ns_diff(cntr['tx_p_ok'], old_cntr['tx_p_ok']), + ns_diff(cntr.rx_p_err, old_cntr.rx_p_err), + ns_diff(cntr.tx_p_ok, old_cntr.tx_p_ok), format_brate(rates.tx_bps), format_prate(rates.tx_pps), - ns_diff(cntr['tx_p_err'], old_cntr['tx_p_err']))) + ns_diff(cntr.tx_p_err, old_cntr.tx_p_err))) else: table.append((key, - cntr['rx_p_ok'], + cntr.rx_p_ok, format_brate(rates.rx_bps), format_prate(rates.rx_pps), - cntr['rx_p_err'], - cntr['tx_p_ok'], + cntr.rx_p_err, + cntr.tx_p_ok, format_brate(rates.tx_bps), format_prate(rates.tx_pps), - cntr['tx_p_err'])) + cntr.tx_p_err)) if use_json: print(table_as_json(table, header)) @@ -229,17 +229,17 @@ class Intfstat(object): if cnstat_old_dict and cnstat_old_dict.get(rif): old_cntr = cnstat_old_dict.get(rif) - body = body % (ns_diff(cntr['rx_p_ok'], old_cntr['rx_p_ok']), - ns_diff(cntr['rx_b_ok'], old_cntr['rx_b_ok']), - ns_diff(cntr['rx_p_err'], old_cntr['rx_p_err']), - ns_diff(cntr['rx_b_err'], old_cntr['rx_b_err']), - ns_diff(cntr['tx_p_ok'], old_cntr['tx_p_ok']), - ns_diff(cntr['tx_b_ok'], old_cntr['tx_b_ok']), - ns_diff(cntr['tx_p_err'], old_cntr['tx_p_err']), - ns_diff(cntr['tx_b_err'], old_cntr['tx_b_err'])) + body = body % (ns_diff(cntr.rx_p_ok, old_cntr.rx_p_ok), + ns_diff(cntr.rx_b_ok, old_cntr.rx_b_ok), + ns_diff(cntr.rx_p_err, old_cntr.rx_p_err), + ns_diff(cntr.rx_b_err, old_cntr.rx_b_err), + ns_diff(cntr.tx_p_ok, old_cntr.tx_p_ok), + ns_diff(cntr.tx_b_ok, old_cntr.tx_b_ok), + ns_diff(cntr.tx_p_err, old_cntr.tx_p_err), + ns_diff(cntr.tx_b_err, old_cntr.tx_b_err)) else: - body = body % (cntr['rx_p_ok'], cntr['rx_b_ok'], cntr['rx_p_err'],cntr['rx_b_err'], - cntr['tx_p_ok'], cntr['tx_b_ok'], cntr['tx_p_err'], cntr['tx_b_err']) + body = body % (cntr.rx_p_ok, cntr.rx_b_ok, cntr.rx_p_err,cntr.rx_b_err, + cntr.tx_p_ok, cntr.tx_b_ok, cntr.tx_p_err, cntr.tx_b_err) print(header) print(body) @@ -305,20 +305,20 @@ def main(): if tag_name is not None: if os.path.isfile(cnstat_fqn_general_file): try: - general_data = json.load(open(cnstat_fqn_general_file, 'r')) + general_data = pickle.load(open(cnstat_fqn_general_file, 'rb')) for key, val in cnstat_dict.items(): general_data[key] = val - json.dump(general_data, open(cnstat_fqn_general_file, 'w')) + pickle.dump(general_data, open(cnstat_fqn_general_file, 'wb')) except IOError as e: sys.exit(e.errno) # Add the information also to tag specific file if os.path.isfile(cnstat_fqn_file): - data = json.load(open(cnstat_fqn_file, 'r')) + data = pickle.load(open(cnstat_fqn_file, 'rb')) for key, val in cnstat_dict.items(): data[key] = val - json.dump(data, open(cnstat_fqn_file, 'w')) + pickle.dump(data, open(cnstat_fqn_file, 'wb')) else: - json.dump(cnstat_dict, open(cnstat_fqn_file, 'w'), default=json_serial) + pickle.dump(cnstat_dict, open(cnstat_fqn_file, 'wb')) except IOError as e: sys.exit(e.errno) else: @@ -330,9 +330,9 @@ def main(): try: cnstat_cached_dict = {} if os.path.isfile(cnstat_fqn_file): - cnstat_cached_dict = json.load(open(cnstat_fqn_file, 'r')) + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file, 'rb')) else: - cnstat_cached_dict = json.load(open(cnstat_fqn_general_file, 'r')) + cnstat_cached_dict = pickle.load(open(cnstat_fqn_general_file, 'rb')) print("Last cached time was " + str(cnstat_cached_dict.get('time'))) if interface_name: diff --git a/scripts/pfcstat b/scripts/pfcstat index 094c6e9380..fb7e6018b6 100755 --- a/scripts/pfcstat +++ b/scripts/pfcstat @@ -6,7 +6,7 @@ # ##################################################################### -import json +import _pickle as pickle import argparse import datetime import os.path @@ -37,7 +37,7 @@ except KeyError: from utilities_common.netstat import ns_diff, STATUS_NA, format_number_with_comma from utilities_common import multi_asic as multi_asic_util from utilities_common import constants -from utilities_common.cli import json_serial, UserCache +from utilities_common.cli import UserCache PStats = namedtuple("PStats", "pfc0, pfc1, pfc2, pfc3, pfc4, pfc5, pfc6, pfc7") @@ -101,7 +101,7 @@ class Pfcstat(object): fields[pos] = STATUS_NA else: fields[pos] = str(int(counter_data)) - cntr = PStats._make(fields)._asdict() + cntr = PStats._make(fields) return cntr # Get the info from database @@ -144,14 +144,14 @@ class Pfcstat(object): if key == 'time': continue table.append((key, - format_number_with_comma(data['pfc0']), - format_number_with_comma(data['pfc1']), - format_number_with_comma(data['pfc2']), - format_number_with_comma(data['pfc3']), - format_number_with_comma(data['pfc4']), - format_number_with_comma(data['pfc5']), - format_number_with_comma(data['pfc6']), - format_number_with_comma(data['pfc7']))) + format_number_with_comma(data.pfc0), + format_number_with_comma(data.pfc1), + format_number_with_comma(data.pfc2), + format_number_with_comma(data.pfc3), + format_number_with_comma(data.pfc4), + format_number_with_comma(data.pfc5), + format_number_with_comma(data.pfc6), + format_number_with_comma(data.pfc7))) if rx: print(tabulate(table, header_Rx, tablefmt='simple', stralign='right')) @@ -173,24 +173,24 @@ class Pfcstat(object): if old_cntr is not None: table.append((key, - ns_diff(cntr['pfc0'], old_cntr['pfc0']), - ns_diff(cntr['pfc1'], old_cntr['pfc1']), - ns_diff(cntr['pfc2'], old_cntr['pfc2']), - ns_diff(cntr['pfc3'], old_cntr['pfc3']), - ns_diff(cntr['pfc4'], old_cntr['pfc4']), - ns_diff(cntr['pfc5'], old_cntr['pfc5']), - ns_diff(cntr['pfc6'], old_cntr['pfc6']), - ns_diff(cntr['pfc7'], old_cntr['pfc7']))) + ns_diff(cntr.pfc0, old_cntr.pfc0), + ns_diff(cntr.pfc1, old_cntr.pfc1), + ns_diff(cntr.pfc2, old_cntr.pfc2), + ns_diff(cntr.pfc3, old_cntr.pfc3), + ns_diff(cntr.pfc4, old_cntr.pfc4), + ns_diff(cntr.pfc5, old_cntr.pfc5), + ns_diff(cntr.pfc6, old_cntr.pfc6), + ns_diff(cntr.pfc7, old_cntr.pfc7))) else: table.append((key, - format_number_with_comma(cntr['pfc0']), - format_number_with_comma(cntr['pfc1']), - format_number_with_comma(cntr['pfc2']), - format_number_with_comma(cntr['pfc3']), - format_number_with_comma(cntr['pfc4']), - format_number_with_comma(cntr['pfc5']), - format_number_with_comma(cntr['pfc6']), - format_number_with_comma(cntr['pfc7']))) + format_number_with_comma(cntr.pfc0), + format_number_with_comma(cntr.pfc1), + format_number_with_comma(cntr.pfc2), + format_number_with_comma(cntr.pfc3), + format_number_with_comma(cntr.pfc4), + format_number_with_comma(cntr.pfc5), + format_number_with_comma(cntr.pfc6), + format_number_with_comma(cntr.pfc7))) if rx: print(tabulate(table, header_Rx, tablefmt='simple', stralign='right')) @@ -256,8 +256,8 @@ Examples: if save_fresh_stats: try: - json.dump(cnstat_dict_rx, open(cnstat_fqn_file_rx, 'w'), default=json_serial) - json.dump(cnstat_dict_tx, open(cnstat_fqn_file_tx, 'w'), default=json_serial) + pickle.dump(cnstat_dict_rx, open(cnstat_fqn_file_rx, 'wb')) + pickle.dump(cnstat_dict_tx, open(cnstat_fqn_file_tx, 'wb')) except IOError as e: print(e.errno, e) sys.exit(e.errno) @@ -271,7 +271,7 @@ Examples: """ if os.path.isfile(cnstat_fqn_file_rx): try: - cnstat_cached_dict = json.load(open(cnstat_fqn_file_rx, 'r')) + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_rx, 'rb')) print("Last cached time was " + str(cnstat_cached_dict.get('time'))) pfcstat.cnstat_diff_print(cnstat_dict_rx, cnstat_cached_dict, True) except IOError as e: @@ -286,7 +286,7 @@ Examples: """ if os.path.isfile(cnstat_fqn_file_tx): try: - cnstat_cached_dict = json.load(open(cnstat_fqn_file_tx, 'r')) + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_tx, 'rb')) print("Last cached time was " + str(cnstat_cached_dict.get('time'))) pfcstat.cnstat_diff_print(cnstat_dict_tx, cnstat_cached_dict, False) except IOError as e: diff --git a/scripts/pg-drop b/scripts/pg-drop index 7741593081..40b4e863d3 100755 --- a/scripts/pg-drop +++ b/scripts/pg-drop @@ -5,7 +5,7 @@ # pg-drop is a tool for show/clear ingress pg dropped packet stats. # ##################################################################### -import json +import _pickle as pickle import argparse import os import sys @@ -144,7 +144,7 @@ class PgDropStat(object): port_drop_ckpt = {} # Grab the latest clear checkpoint, if it exists if os.path.isfile(self.port_drop_stats_file): - port_drop_ckpt = json.load(open(self.port_drop_stats_file, 'r')) + port_drop_ckpt = pickle.load(open(self.port_drop_stats_file, 'rb')) # Header list contains the port name followed by the PGs. Fields is used to populate the pg values fields = ["0"]* (len(self.header_list) - 1) @@ -216,10 +216,10 @@ class PgDropStat(object): counter_pg_drop_array = [ "SAI_INGRESS_PRIORITY_GROUP_STAT_DROPPED_PACKETS"] try: - json.dump(self.get_counts_table( + pickle.dump(self.get_counts_table( counter_pg_drop_array, COUNTERS_PG_NAME_MAP), - open(self.port_drop_stats_file, 'w+')) + open(self.port_drop_stats_file, 'wb+')) except IOError as e: print(e) sys.exit(e.errno) diff --git a/scripts/portstat b/scripts/portstat index 43746cc1c3..399733f69c 100755 --- a/scripts/portstat +++ b/scripts/portstat @@ -6,7 +6,7 @@ # ##################################################################### -import json +import _pickle as pickle import argparse import datetime import os.path @@ -40,7 +40,7 @@ from utilities_common.intf_filter import parse_interface_in_filter import utilities_common.multi_asic as multi_asic_util from utilities_common.netstat import ns_diff, table_as_json, format_brate, format_prate, format_util, format_number_with_comma -from utilities_common.cli import json_serial, UserCache +from utilities_common.cli import UserCache """ The order and count of statistics mentioned below needs to be in sync with the values in portstat script @@ -181,7 +181,7 @@ class Portstat(object): elif fields[pos] != STATUS_NA: fields[pos] = str(int(fields[pos]) + int(fvs[counter_name])) - cntr = NStats._make(fields)._asdict() + cntr = NStats._make(fields) return cntr def get_rates(table_id): @@ -278,61 +278,62 @@ class Portstat(object): if print_all: header = header_all table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_ok']), + format_number_with_comma(data.rx_ok), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(data['rx_err']), - format_number_with_comma(data['rx_drop']), - format_number_with_comma(data['rx_ovr']), - format_number_with_comma(data['tx_ok']), + format_number_with_comma(data.rx_err), + format_number_with_comma(data.rx_drop), + format_number_with_comma(data.rx_ovr), + format_number_with_comma(data.tx_ok), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed), - format_number_with_comma(data['tx_err']), - format_number_with_comma(data['tx_drop']), - format_number_with_comma(data['tx_ovr']))) + format_number_with_comma(data.tx_err), + format_number_with_comma(data.tx_drop), + format_number_with_comma(data.tx_ovr))) elif errors_only: header = header_errors_only table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_err']), - format_number_with_comma(data['rx_drop']), - format_number_with_comma(data['rx_ovr']), - format_number_with_comma(data['tx_err']), - format_number_with_comma(data['tx_drop']), - format_number_with_comma(data['tx_ovr']))) + format_number_with_comma(data.rx_err), + format_number_with_comma(data.rx_drop), + format_number_with_comma(data.rx_ovr), + format_number_with_comma(data.tx_err), + format_number_with_comma(data.tx_drop), + format_number_with_comma(data.tx_ovr))) elif fec_stats_only: header = header_fec_only table.append((key, self.get_port_state(key), - format_number_with_comma(data['fec_corr']), - format_number_with_comma(data['fec_uncorr']), - format_number_with_comma(data['fec_symbol_err']))) + format_number_with_comma(data.fec_corr), + format_number_with_comma(data.fec_uncorr), + format_number_with_comma(data.fec_symbol_err))) elif rates_only: header = header_rates_only table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_ok']), + format_number_with_comma(data.rx_ok), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(data['tx_ok']), + format_number_with_comma(data.tx_ok), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed))) else: header = header_std table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_ok']), + format_number_with_comma(data.rx_ok), format_brate(rates.rx_bps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(data['rx_err']), - format_number_with_comma(data['rx_drop']), - format_number_with_comma(data['rx_ovr']), - format_number_with_comma(data['tx_ok']), + format_number_with_comma(data.rx_err), + format_number_with_comma(data.rx_drop), + format_number_with_comma(data.rx_ovr), + format_number_with_comma(data.tx_ok), format_brate(rates.tx_bps), format_util(rates.tx_bps, port_speed), - format_number_with_comma(data['tx_err']), - format_number_with_comma(data['tx_drop']), - format_number_with_comma(data['tx_ovr']))) + + format_number_with_comma(data.tx_err), + format_number_with_comma(data.tx_drop), + format_number_with_comma(data.tx_ovr))) if table: if use_json: print(table_as_json(table, header)) @@ -353,51 +354,51 @@ class Portstat(object): if key in cnstat_old_dict: old_cntr = cnstat_old_dict.get(key) else: - old_cntr = NStats._make([0] * BUCKET_NUM)._asdict() + old_cntr = NStats._make([0] * BUCKET_NUM) if intf_list and key not in intf_list: continue - print("Packets Received 64 Octets..................... {}".format(ns_diff(cntr['rx_64'], old_cntr['rx_64']))) - print("Packets Received 65-127 Octets................. {}".format(ns_diff(cntr['rx_65_127'], old_cntr['rx_65_127']))) - print("Packets Received 128-255 Octets................ {}".format(ns_diff(cntr['rx_128_255'], old_cntr['rx_128_255']))) - print("Packets Received 256-511 Octets................ {}".format(ns_diff(cntr['rx_256_511'], old_cntr['rx_256_511']))) - print("Packets Received 512-1023 Octets............... {}".format(ns_diff(cntr['rx_512_1023'], old_cntr['rx_512_1023']))) - print("Packets Received 1024-1518 Octets.............. {}".format(ns_diff(cntr['rx_1024_1518'], old_cntr['rx_1024_1518']))) - print("Packets Received 1519-2047 Octets.............. {}".format(ns_diff(cntr['rx_1519_2047'], old_cntr['rx_1519_2047']))) - print("Packets Received 2048-4095 Octets.............. {}".format(ns_diff(cntr['rx_2048_4095'], old_cntr['rx_2048_4095']))) - print("Packets Received 4096-9216 Octets.............. {}".format(ns_diff(cntr['rx_4096_9216'], old_cntr['rx_4096_9216']))) - print("Packets Received 9217-16383 Octets............. {}".format(ns_diff(cntr['rx_9217_16383'], old_cntr['rx_9217_16383']))) + print("Packets Received 64 Octets..................... {}".format(ns_diff(cntr.rx_64, old_cntr.rx_64))) + print("Packets Received 65-127 Octets................. {}".format(ns_diff(cntr.rx_65_127, old_cntr.rx_65_127))) + print("Packets Received 128-255 Octets................ {}".format(ns_diff(cntr.rx_128_255, old_cntr.rx_128_255))) + print("Packets Received 256-511 Octets................ {}".format(ns_diff(cntr.rx_256_511, old_cntr.rx_256_511))) + print("Packets Received 512-1023 Octets............... {}".format(ns_diff(cntr.rx_512_1023, old_cntr.rx_512_1023))) + print("Packets Received 1024-1518 Octets.............. {}".format(ns_diff(cntr.rx_1024_1518, old_cntr.rx_1024_1518))) + print("Packets Received 1519-2047 Octets.............. {}".format(ns_diff(cntr.rx_1519_2047, old_cntr.rx_1519_2047))) + print("Packets Received 2048-4095 Octets.............. {}".format(ns_diff(cntr.rx_2048_4095, old_cntr.rx_2048_4095))) + print("Packets Received 4096-9216 Octets.............. {}".format(ns_diff(cntr.rx_4096_9216, old_cntr.rx_4096_9216))) + print("Packets Received 9217-16383 Octets............. {}".format(ns_diff(cntr.rx_9217_16383, old_cntr.rx_9217_16383))) print("") - print("Total Packets Received Without Errors.......... {}".format(ns_diff(cntr['rx_all'], old_cntr['rx_all']))) - print("Unicast Packets Received....................... {}".format(ns_diff(cntr['rx_uca'], old_cntr['rx_uca']))) - print("Multicast Packets Received..................... {}".format(ns_diff(cntr['rx_mca'], old_cntr['rx_mca']))) - print("Broadcast Packets Received..................... {}".format(ns_diff(cntr['rx_bca'], old_cntr['rx_bca']))) + print("Total Packets Received Without Errors.......... {}".format(ns_diff(cntr.rx_all, old_cntr.rx_all))) + print("Unicast Packets Received....................... {}".format(ns_diff(cntr.rx_uca, old_cntr.rx_uca))) + print("Multicast Packets Received..................... {}".format(ns_diff(cntr.rx_mca, old_cntr.rx_mca))) + print("Broadcast Packets Received..................... {}".format(ns_diff(cntr.rx_bca, old_cntr.rx_bca))) print("") - print("Jabbers Received............................... {}".format(ns_diff(cntr['rx_jbr'], old_cntr['rx_jbr']))) - print("Fragments Received............................. {}".format(ns_diff(cntr['rx_frag'], old_cntr['rx_frag']))) - print("Undersize Received............................. {}".format(ns_diff(cntr['rx_usize'], old_cntr['rx_usize']))) - print("Overruns Received.............................. {}".format(ns_diff(cntr['rx_ovrrun'], old_cntr['rx_ovrrun']))) + print("Jabbers Received............................... {}".format(ns_diff(cntr.rx_jbr, old_cntr.rx_jbr))) + print("Fragments Received............................. {}".format(ns_diff(cntr.rx_frag, old_cntr.rx_frag))) + print("Undersize Received............................. {}".format(ns_diff(cntr.rx_usize, old_cntr.rx_usize))) + print("Overruns Received.............................. {}".format(ns_diff(cntr.rx_ovrrun, old_cntr.rx_ovrrun))) print("") - print("Packets Transmitted 64 Octets.................. {}".format(ns_diff(cntr['tx_64'], old_cntr['tx_64']))) - print("Packets Transmitted 65-127 Octets.............. {}".format(ns_diff(cntr['tx_65_127'], old_cntr['tx_65_127']))) - print("Packets Transmitted 128-255 Octets............. {}".format(ns_diff(cntr['tx_128_255'], old_cntr['tx_128_255']))) - print("Packets Transmitted 256-511 Octets............. {}".format(ns_diff(cntr['tx_256_511'], old_cntr['tx_256_511']))) - print("Packets Transmitted 512-1023 Octets............ {}".format(ns_diff(cntr['tx_512_1023'], old_cntr['tx_512_1023']))) - print("Packets Transmitted 1024-1518 Octets........... {}".format(ns_diff(cntr['tx_1024_1518'], old_cntr['tx_1024_1518']))) - print("Packets Transmitted 1519-2047 Octets........... {}".format(ns_diff(cntr['tx_1519_2047'], old_cntr['tx_1519_2047']))) - print("Packets Transmitted 2048-4095 Octets........... {}".format(ns_diff(cntr['tx_2048_4095'], old_cntr['tx_2048_4095']))) - print("Packets Transmitted 4096-9216 Octets........... {}".format(ns_diff(cntr['tx_4096_9216'], old_cntr['tx_4096_9216']))) - print("Packets Transmitted 9217-16383 Octets.......... {}".format(ns_diff(cntr['tx_9217_16383'], old_cntr['tx_9217_16383']))) + print("Packets Transmitted 64 Octets.................. {}".format(ns_diff(cntr.tx_64, old_cntr.tx_64))) + print("Packets Transmitted 65-127 Octets.............. {}".format(ns_diff(cntr.tx_65_127, old_cntr.tx_65_127))) + print("Packets Transmitted 128-255 Octets............. {}".format(ns_diff(cntr.tx_128_255, old_cntr.tx_128_255))) + print("Packets Transmitted 256-511 Octets............. {}".format(ns_diff(cntr.tx_256_511, old_cntr.tx_256_511))) + print("Packets Transmitted 512-1023 Octets............ {}".format(ns_diff(cntr.tx_512_1023, old_cntr.tx_512_1023))) + print("Packets Transmitted 1024-1518 Octets........... {}".format(ns_diff(cntr.tx_1024_1518, old_cntr.tx_1024_1518))) + print("Packets Transmitted 1519-2047 Octets........... {}".format(ns_diff(cntr.tx_1519_2047, old_cntr.tx_1519_2047))) + print("Packets Transmitted 2048-4095 Octets........... {}".format(ns_diff(cntr.tx_2048_4095, old_cntr.tx_2048_4095))) + print("Packets Transmitted 4096-9216 Octets........... {}".format(ns_diff(cntr.tx_4096_9216, old_cntr.tx_4096_9216))) + print("Packets Transmitted 9217-16383 Octets.......... {}".format(ns_diff(cntr.tx_9217_16383, old_cntr.tx_9217_16383))) print("") - print("Total Packets Transmitted Successfully......... {}".format(ns_diff(cntr['tx_all'], old_cntr['tx_all']))) - print("Unicast Packets Transmitted.................... {}".format(ns_diff(cntr['tx_uca'], old_cntr['tx_uca']))) - print("Multicast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_mca'], old_cntr['tx_mca']))) - print("Broadcast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_bca'], old_cntr['tx_bca']))) + print("Total Packets Transmitted Successfully......... {}".format(ns_diff(cntr.tx_all, old_cntr.tx_all))) + print("Unicast Packets Transmitted.................... {}".format(ns_diff(cntr.tx_uca, old_cntr.tx_uca))) + print("Multicast Packets Transmitted.................. {}".format(ns_diff(cntr.tx_mca, old_cntr.tx_mca))) + print("Broadcast Packets Transmitted.................. {}".format(ns_diff(cntr.tx_bca, old_cntr.tx_bca))) print("Time Since Counters Last Cleared............... " + str(cnstat_old_dict.get('time'))) @@ -434,88 +435,88 @@ class Portstat(object): header = header_all if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), + ns_diff(cntr.rx_ok, old_cntr.rx_ok), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - ns_diff(cntr['rx_err'], old_cntr['rx_err']), - ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), - ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), - ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), + ns_diff(cntr.rx_err, old_cntr.rx_err), + ns_diff(cntr.rx_drop, old_cntr.rx_drop), + ns_diff(cntr.rx_ovr, old_cntr.rx_ovr), + ns_diff(cntr.tx_ok, old_cntr.tx_ok), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed), - ns_diff(cntr['tx_err'], old_cntr['tx_err']), - ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), - ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) + ns_diff(cntr.tx_err, old_cntr.tx_err), + ns_diff(cntr.tx_drop, old_cntr.tx_drop), + ns_diff(cntr.tx_ovr, old_cntr.tx_ovr))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['rx_ok']), + format_number_with_comma(cntr.rx_ok), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr['rx_err']), - format_number_with_comma(cntr['rx_drop']), - format_number_with_comma(cntr['rx_ovr']), - format_number_with_comma(cntr['tx_ok']), + format_number_with_comma(cntr.rx_err), + format_number_with_comma(cntr.rx_drop), + format_number_with_comma(cntr.rx_ovr), + format_number_with_comma(cntr.tx_ok), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed), - format_number_with_comma(cntr['tx_err']), - format_number_with_comma(cntr['tx_drop']), - format_number_with_comma(cntr['tx_ovr']))) + format_number_with_comma(cntr.tx_err), + format_number_with_comma(cntr.tx_drop), + format_number_with_comma(cntr.tx_ovr))) elif errors_only: header = header_errors_only if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr['rx_err'], old_cntr['rx_err']), - ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), - ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), - ns_diff(cntr['tx_err'], old_cntr['tx_err']), - ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), - ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) + ns_diff(cntr.rx_err, old_cntr.rx_err), + ns_diff(cntr.rx_drop, old_cntr.rx_drop), + ns_diff(cntr.rx_ovr, old_cntr.rx_ovr), + ns_diff(cntr.tx_err, old_cntr.tx_err), + ns_diff(cntr.tx_drop, old_cntr.tx_drop), + ns_diff(cntr.tx_ovr, old_cntr.tx_ovr))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['rx_err']), - format_number_with_comma(cntr['rx_drop']), - format_number_with_comma(cntr['rx_ovr']), - format_number_with_comma(cntr['tx_err']), - format_number_with_comma(cntr['tx_drop']), - format_number_with_comma(cntr['tx_ovr']))) + format_number_with_comma(cntr.rx_err), + format_number_with_comma(cntr.rx_drop), + format_number_with_comma(cntr.rx_ovr), + format_number_with_comma(cntr.tx_err), + format_number_with_comma(cntr.tx_drop), + format_number_with_comma(cntr.tx_ovr))) elif fec_stats_only: header = header_fec_only if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr['fec_corr'], old_cntr['fec_corr']), - ns_diff(cntr['fec_uncorr'], old_cntr['fec_uncorr']), - ns_diff(cntr['fec_symbol_err'], old_cntr['fec_symbol_err']))) + ns_diff(cntr.fec_corr, old_cntr.fec_corr), + ns_diff(cntr.fec_uncorr, old_cntr.fec_uncorr), + ns_diff(cntr.fec_symbol_err, old_cntr.fec_symbol_err))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['fec_corr']), - format_number_with_comma(cntr['fec_uncorr']), - format_number_with_comma(cntr['fec_symbol_err']))) + format_number_with_comma(cntr.fec_corr), + format_number_with_comma(cntr.fec_uncorr), + format_number_with_comma(cntr.fec_symbol_err))) elif rates_only: header = header_rates_only if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), + ns_diff(cntr.rx_ok, old_cntr.rx_ok), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), + ns_diff(cntr.tx_ok, old_cntr.tx_ok), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['rx_ok']), + format_number_with_comma(cntr.rx_ok), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr['tx_ok']), + format_number_with_comma(cntr.tx_ok), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed))) @@ -524,33 +525,33 @@ class Portstat(object): if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), + ns_diff(cntr.rx_ok, old_cntr.rx_ok), format_brate(rates.rx_bps), format_util(rates.rx_bps, port_speed), - ns_diff(cntr['rx_err'], old_cntr['rx_err']), - ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), - ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), - ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), + ns_diff(cntr.rx_err, old_cntr.rx_err), + ns_diff(cntr.rx_drop, old_cntr.rx_drop), + ns_diff(cntr.rx_ovr, old_cntr.rx_ovr), + ns_diff(cntr.tx_ok, old_cntr.tx_ok), format_brate(rates.tx_bps), format_util(rates.tx_bps, port_speed), - ns_diff(cntr['tx_err'], old_cntr['tx_err']), - ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), - ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) + ns_diff(cntr.tx_err, old_cntr.tx_err), + ns_diff(cntr.tx_drop, old_cntr.tx_drop), + ns_diff(cntr.tx_ovr, old_cntr.tx_ovr))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['rx_ok']), + format_number_with_comma(cntr.rx_ok), format_brate(rates.rx_bps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr['rx_err']), - format_number_with_comma(cntr['rx_drop']), - format_number_with_comma(cntr['rx_ovr']), - format_number_with_comma(cntr['tx_ok']), + format_number_with_comma(cntr.rx_err), + format_number_with_comma(cntr.rx_drop), + format_number_with_comma(cntr.rx_ovr), + format_number_with_comma(cntr.tx_ok), format_brate(rates.tx_bps), format_util(rates.tx_bps, port_speed), - format_number_with_comma(cntr['tx_err']), - format_number_with_comma(cntr['tx_drop']), - format_number_with_comma(cntr['tx_ovr']))) + format_number_with_comma(cntr.tx_err), + format_number_with_comma(cntr.tx_drop), + format_number_with_comma(cntr.tx_ovr))) if table: if use_json: print(table_as_json(table, header)) @@ -641,7 +642,7 @@ Examples: if save_fresh_stats: try: - json.dump(cnstat_dict, open(cnstat_fqn_file, 'w'), default=json_serial) + pickle.dump(cnstat_dict, open(cnstat_fqn_file, 'wb')) except IOError as e: sys.exit(e.errno) else: @@ -652,7 +653,7 @@ Examples: cnstat_cached_dict = OrderedDict() if os.path.isfile(cnstat_fqn_file): try: - cnstat_cached_dict = json.load(open(cnstat_fqn_file, 'r')) + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file, 'rb')) if not detail: print("Last cached time was " + str(cnstat_cached_dict.get('time'))) portstat.cnstat_diff_print(cnstat_dict, cnstat_cached_dict, ratestat_dict, intf_list, use_json, print_all, errors_only, fec_stats_only, rates_only, detail) diff --git a/scripts/queuestat b/scripts/queuestat index d82e7e4a6a..96a24b51a3 100755 --- a/scripts/queuestat +++ b/scripts/queuestat @@ -6,7 +6,7 @@ # ##################################################################### -import json +import _pickle as pickle import argparse import datetime import os.path @@ -33,7 +33,7 @@ except KeyError: pass from swsscommon.swsscommon import SonicV2Connector -from utilities_common.cli import json_serial, UserCache +from utilities_common.cli import UserCache from utilities_common import constants import utilities_common.multi_asic as multi_asic_util @@ -186,7 +186,7 @@ class Queuestat(object): fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: fields[pos] = str(int(counter_data)) - cntr = QueueStats._make(fields)._asdict() + cntr = QueueStats._make(fields) return cntr # Build a dictionary of the stats @@ -211,9 +211,9 @@ class Queuestat(object): if json_opt: json_output[port][key] = data continue - table.append((port, data['queuetype'] + str(data['queueindex']), - data['totalpacket'], data['totalbytes'], - data['droppacket'], data['dropbytes'])) + table.append((port, data.queuetype + str(data.queueindex), + data.totalpacket, data.totalbytes, + data.droppacket, data.dropbytes)) if json_opt: json_output[port].update(build_json(port, table)) @@ -241,15 +241,15 @@ class Queuestat(object): old_cntr = cnstat_old_dict.get(key) if old_cntr is not None: - table.append((port, cntr['queuetype'] + str(cntr['queueindex']), - ns_diff(cntr['totalpacket'], old_cntr['totalpacket']), - ns_diff(cntr['totalbytes'], old_cntr['totalbytes']), - ns_diff(cntr['droppacket'], old_cntr['droppacket']), - ns_diff(cntr['dropbytes'], old_cntr['dropbytes']))) + table.append((port, cntr.queuetype + str(cntr.queueindex), + ns_diff(cntr.totalpacket, old_cntr.totalpacket), + ns_diff(cntr.totalbytes, old_cntr.totalbytes), + ns_diff(cntr.droppacket, old_cntr.droppacket), + ns_diff(cntr.dropbytes, old_cntr.dropbytes))) else: - table.append((port, cntr['queuetype'] + str(cntr['queueindex']), - cntr['totalpacket'], cntr['totalbytes'], - cntr['droppacket'], cntr['dropbytes'])) + table.append((port, cntr.queuetype + str(cntr.queueindex), + cntr.totalpacket, cntr.totalbytes, + cntr.droppacket, cntr.dropbytes)) if json_opt: json_output[port].update(build_json(port, table)) @@ -273,7 +273,7 @@ class Queuestat(object): cnstat_fqn_file_name = cnstat_fqn_file + port if os.path.isfile(cnstat_fqn_file_name): try: - cnstat_cached_dict = json.load(open(cnstat_fqn_file_name, 'r')) + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_name, 'rb')) if json_opt: json_output[port].update({"cached_time":cnstat_cached_dict.get('time')}) json_output.update(self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt)) @@ -307,7 +307,7 @@ class Queuestat(object): json_output[port] = {} if os.path.isfile(cnstat_fqn_file_name): try: - cnstat_cached_dict = json.load(open(cnstat_fqn_file_name, 'r')) + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_name, 'rb')) if json_opt: json_output[port].update({"cached_time":cnstat_cached_dict.get('time')}) json_output.update(self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt)) @@ -330,7 +330,7 @@ class Queuestat(object): for port in natsorted(self.counter_port_name_map): cnstat_dict = self.get_cnstat(self.port_queues_map[port]) try: - json.dump(cnstat_dict, open(cnstat_fqn_file + port, 'w'), default=json_serial) + pickle.dump(cnstat_dict, open(cnstat_fqn_file + port, 'wb')) except IOError as e: print(e.errno, e) sys.exit(e.errno) diff --git a/scripts/tunnelstat b/scripts/tunnelstat index 3d7423e86b..8b045ec684 100755 --- a/scripts/tunnelstat +++ b/scripts/tunnelstat @@ -6,7 +6,7 @@ # ##################################################################### -import json +import _pickle as pickle import argparse import datetime import sys @@ -29,7 +29,7 @@ from collections import namedtuple, OrderedDict from natsort import natsorted from tabulate import tabulate from utilities_common.netstat import ns_diff, table_as_json, STATUS_NA, format_prate -from utilities_common.cli import json_serial, UserCache +from utilities_common.cli import UserCache from swsscommon.swsscommon import SonicV2Connector @@ -80,7 +80,7 @@ class Tunnelstat(object): counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name) if counter_data: fields[pos] = str(counter_data) - cntr = NStats._make(fields)._asdict() + cntr = NStats._make(fields) return cntr def get_rates(table_id): @@ -149,8 +149,8 @@ class Tunnelstat(object): continue rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) - table.append((key, data['rx_p_ok'], data['rx_b_ok'], format_prate(rates.rx_pps), - data['tx_p_ok'], data['tx_b_ok'], format_prate(rates.tx_pps))) + table.append((key, data.rx_p_ok, data.rx_b_ok, format_prate(rates.rx_pps), + data.tx_p_ok, data.tx_b_ok, format_prate(rates.tx_pps))) if use_json: print(table_as_json(table, header)) @@ -175,19 +175,19 @@ class Tunnelstat(object): rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) if old_cntr is not None: table.append((key, - ns_diff(cntr['rx_p_ok'], old_cntr['rx_p_ok']), - ns_diff(cntr['rx_b_ok'], old_cntr['rx_b_ok']), + ns_diff(cntr.rx_p_ok, old_cntr.rx_p_ok), + ns_diff(cntr.rx_b_ok, old_cntr.rx_b_ok), format_prate(rates.rx_pps), - ns_diff(cntr['tx_p_ok'], old_cntr['tx_p_ok']), - ns_diff(cntr['tx_b_ok'], old_cntr['tx_b_ok']), + ns_diff(cntr.tx_p_ok, old_cntr.tx_p_ok), + ns_diff(cntr.tx_b_ok, old_cntr.tx_b_ok), format_prate(rates.tx_pps))) else: table.append((key, - cntr['rx_p_ok'], - cntr['rx_b_ok'], + cntr.rx_p_ok, + cntr.rx_b_ok, format_prate(rates.rx_pps), - cntr['tx_p_ok'], - cntr['tx_b_ok'], + cntr.tx_p_ok, + cntr.tx_b_ok, format_prate(rates.tx_pps))) if use_json: print(table_as_json(table, header)) @@ -210,12 +210,12 @@ class Tunnelstat(object): if cnstat_old_dict: old_cntr = cnstat_old_dict.get(tunnel) if old_cntr: - body = body % (ns_diff(cntr['rx_p_ok'], old_cntr['rx_p_ok']), - ns_diff(cntr['rx_b_ok'], old_cntr['rx_b_ok']), - ns_diff(cntr['tx_p_ok'], old_cntr['tx_p_ok']), - ns_diff(cntr['tx_b_ok'], old_cntr['tx_b_ok'])) + body = body % (ns_diff(cntr.rx_p_ok, old_cntr.rx_p_ok), + ns_diff(cntr.rx_b_ok, old_cntr.rx_b_ok), + ns_diff(cntr.tx_p_ok, old_cntr.tx_p_ok), + ns_diff(cntr.tx_b_ok, old_cntr.tx_b_ok)) else: - body = body % (cntr['rx_p_ok'], cntr['rx_b_ok'], cntr['tx_p_ok'], cntr['tx_b_ok']) + body = body % (cntr.rx_p_ok, cntr.rx_b_ok, cntr.tx_p_ok, cntr.tx_b_ok) print(header) print(body) @@ -273,7 +273,7 @@ def main(): if save_fresh_stats: try: - json.dump(cnstat_dict, open(cnstat_fqn_file, 'w'), default=json_serial) + pickle.dump(cnstat_dict, open(cnstat_fqn_file, 'wb')) except IOError as e: sys.exit(e.errno) else: @@ -283,7 +283,7 @@ def main(): if wait_time_in_seconds == 0: if os.path.isfile(cnstat_fqn_file): try: - cnstat_cached_dict = json.load(open(cnstat_fqn_file, 'r')) + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file, 'rb')) print("Last cached time was " + str(cnstat_cached_dict.get('time'))) if tunnel_name: tunnelstat.cnstat_single_tunnel(tunnel_name, cnstat_dict, cnstat_cached_dict) From 824680ed5de03f1750dbd32b200cd3ec67713533 Mon Sep 17 00:00:00 2001 From: saurabh17g Date: Mon, 27 Mar 2023 00:28:59 -0700 Subject: [PATCH 48/66] Resolved rc!=0 problem by replacing fgrep with awk. Added ipv4 filtering to get only v4 peers in case of show ip bgp neighbors (#2756) Co-authored-by: Saurabh Ghorpade --- scripts/generate_dump | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/generate_dump b/scripts/generate_dump index 2a7172f4c7..79f6ae1b21 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -521,12 +521,12 @@ save_bgp_neighbor() { local asic_id=${1:-""} local ns=$(get_vtysh_namespace $asic_id) - neighbor_list_v4=$(${timeout_cmd} bash -c "vtysh $ns -c 'show ip bgp neighbors' | grep 'BGP neighbor is' | awk -F '[, ]' '{print \$4}'") + neighbor_list_v4=$(${timeout_cmd} bash -c "vtysh $ns -c 'show ip bgp neighbors' | grep 'BGP neighbor is' | awk -F '[, ]' '{print \$4}' | awk /\\\./") for word in $neighbor_list_v4; do save_cmd "vtysh $ns -c \"show ip bgp neighbors $word advertised-routes\"" "ip.bgp.neighbor.$word.adv$asic_id" save_cmd "vtysh $ns -c \"show ip bgp neighbors $word routes\"" "ip.bgp.neighbor.$word.rcv$asic_id" done - neighbor_list_v6=$(${timeout_cmd} bash -c "vtysh $ns -c 'show bgp ipv6 neighbors' | grep 'BGP neighbor is' | awk -F '[, ]' '{print \$4}' | fgrep ':'") + neighbor_list_v6=$(${timeout_cmd} bash -c "vtysh $ns -c 'show bgp ipv6 neighbors' | grep 'BGP neighbor is' | awk -F '[, ]' '{print \$4}' | awk /:/") for word in $neighbor_list_v6; do save_cmd "vtysh $ns -c \"show bgp ipv6 neighbors $word advertised-routes\"" "ipv6.bgp.neighbor.$word.adv$asic_id" save_cmd "vtysh $ns -c \"show bgp ipv6 neighbors $word routes\"" "ipv6.bgp.neighbor.$word.rcv$asic_id" From 79a21ceff25cbe3fda8e09e4ad0db6f750b1be0d Mon Sep 17 00:00:00 2001 From: StormLiangMS <89824293+StormLiangMS@users.noreply.github.com> Date: Tue, 28 Mar 2023 21:40:10 +0800 Subject: [PATCH 49/66] Revert frr route check (#2761) * Revert "[route_check] remove check-frr_patch mock (#2732)" This reverts commit f27dea0cfdefbdcfc03d19136e4ae47ea72fd51f. * Revert "[route_check] fix IPv6 address handling (#2722)" This reverts commit ff6883233a3c86e993add50453c3152745eaff0d. * Revert "[route_check] implement a check for FRR routes not marked offloaded (#2531)" This reverts commit 90d70152c76f40bf7c1f8e2c6aff6eb58b951a05. --- scripts/route_check.py | 122 +++---------------------------- tests/mock_tables/config_db.json | 3 +- tests/route_check_test.py | 17 +---- tests/route_check_test_data.py | 122 +------------------------------ 4 files changed, 16 insertions(+), 248 deletions(-) diff --git a/scripts/route_check.py b/scripts/route_check.py index c832b2c6ea..c6234bcc9d 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -11,11 +11,11 @@ How: NOTE: The flow from APPL-DB to ASIC-DB takes non zero milliseconds. 1) Initiate subscribe for ASIC-DB updates. - 2) Read APPL-DB & ASIC-DB + 2) Read APPL-DB & ASIC-DB 3) Get the diff. - 4) If any diff, + 4) If any diff, 4.1) Collect subscribe messages for a second - 4.2) check diff against the subscribe messages + 4.2) check diff against the subscribe messages 5) Rule out local interfaces & default routes 6) If still outstanding diffs, report failure. @@ -29,7 +29,7 @@ down to ensure failure. Analyze the reported failures to match expected. You may use the exit code to verify the result as success or not. - + """ @@ -45,9 +45,7 @@ import time import signal import traceback -import subprocess -from ipaddress import ip_network from swsscommon import swsscommon from utilities_common import chassis @@ -73,9 +71,6 @@ PRINT_MSG_LEN_MAX = 1000 -FRR_CHECK_RETRIES = 3 -FRR_WAIT_TIME = 15 - class Level(Enum): ERR = 'ERR' INFO = 'INFO' @@ -146,7 +141,7 @@ def add_prefix(ip): ip = ip + PREFIX_SEPARATOR + "32" else: ip = ip + PREFIX_SEPARATOR + "128" - return str(ip_network(ip)) + return ip def add_prefix_ifnot(ip): @@ -155,7 +150,7 @@ def add_prefix_ifnot(ip): :param ip: IP to add prefix as string. :return ip with prefix """ - return str(ip_network(ip)) if ip.find(PREFIX_SEPARATOR) != -1 else add_prefix(ip) + return ip if ip.find(PREFIX_SEPARATOR) != -1 else add_prefix(ip) def is_local(ip): @@ -298,7 +293,7 @@ def get_routes(): def get_route_entries(): """ - helper to read present route entries from ASIC-DB and + helper to read present route entries from ASIC-DB and as well initiate selector for ASIC-DB:ASIC-state updates. :return (selector, subscriber, ) """ @@ -314,7 +309,7 @@ def get_route_entries(): res, e = checkout_rt_entry(k) if res: rt.append(e) - + print_message(syslog.LOG_DEBUG, json.dumps({"ASIC_ROUTE_ENTRY": sorted(rt)}, indent=4)) selector = swsscommon.Select() @@ -322,31 +317,6 @@ def get_route_entries(): return (selector, subs, sorted(rt)) -def is_suppress_fib_pending_enabled(): - """ - Returns True if FIB suppression is enabled, False otherwise - """ - cfg_db = swsscommon.ConfigDBConnector() - cfg_db.connect() - - state = cfg_db.get_entry('DEVICE_METADATA', 'localhost').get('suppress-fib-pending') - - return state == 'enabled' - - -def get_frr_routes(): - """ - Read routes from zebra through CLI command - :return frr routes dictionary - """ - - output = subprocess.check_output('show ip route json', shell=True) - routes = json.loads(output) - output = subprocess.check_output('show ipv6 route json', shell=True) - routes.update(json.loads(output)) - return routes - - def get_interfaces(): """ helper to read interface table from APPL-DB. @@ -384,7 +354,7 @@ def filter_out_local_interfaces(keys): chassis_local_intfs = chassis.get_chassis_local_interfaces() local_if_lst.update(set(chassis_local_intfs)) - + db = swsscommon.DBConnector(APPL_DB_NAME, 0) tbl = swsscommon.Table(db, 'ROUTE_TABLE') @@ -523,61 +493,6 @@ def filter_out_standalone_tunnel_routes(routes): return updated_routes -def check_frr_pending_routes(): - """ - Check FRR routes for offload flag presence by executing "show ip route json" - Returns a list of routes that have no offload flag. - """ - - missed_rt = [] - - retries = FRR_CHECK_RETRIES - for i in range(retries): - missed_rt = [] - frr_routes = get_frr_routes() - - for _, entries in frr_routes.items(): - for entry in entries: - if entry['protocol'] != 'bgp': - continue - - # TODO: Also handle VRF routes. Currently this script does not check for VRF routes so it would be incorrect for us - # to assume they are installed in ASIC_DB, so we don't handle them. - if entry['vrfName'] != 'default': - continue - - if not entry.get('offloaded', False): - missed_rt.append(entry) - - if not missed_rt: - break - - time.sleep(FRR_WAIT_TIME) - - return missed_rt - - -def mitigate_installed_not_offloaded_frr_routes(missed_frr_rt, rt_appl): - """ - Mitigate installed but not offloaded FRR routes. - - In case route exists in APPL_DB, this function will manually send a notification to fpmsyncd - to trigger the flow that sends offload flag to zebra. - - It is designed to mitigate a problem when orchagent fails to send notification about installed route to fpmsyncd - or fpmsyncd not being able to read the notification or in case zebra fails to receive offload update due to variety of reasons. - All of the above mentioned cases must be considered as a bug, but even in that case we will report an error in the log but - given that this script ensures the route is installed in the hardware it will automitigate such a bug. - """ - db = swsscommon.DBConnector('APPL_STATE_DB', 0) - response_producer = swsscommon.NotificationProducer(db, f'{APPL_DB_NAME}_{swsscommon.APP_ROUTE_TABLE_NAME}_RESPONSE_CHANNEL') - for entry in [entry for entry in missed_frr_rt if entry['prefix'] in rt_appl]: - fvs = swsscommon.FieldValuePairs([('err_str', 'SWSS_RC_SUCCESS'), ('protocol', entry['protocol'])]) - response_producer.send('SWSS_RC_SUCCESS', entry['prefix'], fvs) - - print_message(syslog.LOG_ERR, f'Mitigated route {entry["prefix"]}') - - def get_soc_ips(config_db): mux_table = config_db.get_table('MUX_CABLE') soc_ips = [] @@ -621,7 +536,7 @@ def check_routes(): """ The heart of this script which runs the checks. Read APPL-DB & ASIC-DB, the relevant tables for route checking. - Checkout routes in ASIC-DB to match APPL-DB, discounting local & + Checkout routes in ASIC-DB to match APPL-DB, discounting local & default routes. In case of missed / unexpected entries in ASIC, it might be due to update latency between APPL & ASIC DBs. So collect ASIC-DB subscribe updates for a second, and checkout if you see SET @@ -630,16 +545,12 @@ def check_routes(): If there are still some unjustifiable diffs, between APPL & ASIC DB, related to routes report failure, else all good. - If there are FRR routes that aren't marked offloaded but all APPL & ASIC DB - routes are in sync report failure and perform a mitigation action. - :return (0, None) on sucess, else (-1, results) where results holds the unjustifiable entries. """ intf_appl_miss = [] rt_appl_miss = [] rt_asic_miss = [] - rt_frr_miss = [] results = {} adds = [] @@ -688,22 +599,11 @@ def check_routes(): if rt_asic_miss: results["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss - rt_frr_miss = check_frr_pending_routes() - - if rt_frr_miss: - results["missed_FRR_routes"] = rt_frr_miss - if results: print_message(syslog.LOG_WARNING, "Failure results: {", json.dumps(results, indent=4), "}") print_message(syslog.LOG_WARNING, "Failed. Look at reported mismatches above") print_message(syslog.LOG_WARNING, "add: ", json.dumps(adds, indent=4)) print_message(syslog.LOG_WARNING, "del: ", json.dumps(deletes, indent=4)) - - if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: - print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR but all routes in APPL_DB and ASIC_DB are in sync") - if is_suppress_fib_pending_enabled(): - mitigate_installed_not_offloaded_frr_routes(rt_frr_miss, rt_appl) - return -1, results else: print_message(syslog.LOG_INFO, "All good!") @@ -749,7 +649,7 @@ def main(): return ret, res else: return ret, res - + if __name__ == "__main__": diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 3a2b681a6e..22744365f1 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -842,8 +842,7 @@ "mac": "1d:34:db:16:a6:00", "platform": "x86_64-mlnx_msn3800-r0", "peer_switch": "sonic-switch", - "type": "ToRRouter", - "suppress-fib-pending": "enabled" + "type": "ToRRouter" }, "SNMP_COMMUNITY|msft": { "TYPE": "RO" diff --git a/tests/route_check_test.py b/tests/route_check_test.py index 118e9eab56..85e6a64a95 100644 --- a/tests/route_check_test.py +++ b/tests/route_check_test.py @@ -7,7 +7,7 @@ import time from sonic_py_common import device_info from unittest.mock import MagicMock, patch -from tests.route_check_test_data import APPL_DB, ARGS, ASIC_DB, CONFIG_DB, DEFAULT_CONFIG_DB, DESCR, OP_DEL, OP_SET, PRE, RESULT, RET, TEST_DATA, UPD, FRR_ROUTES +from tests.route_check_test_data import APPL_DB, ARGS, ASIC_DB, CONFIG_DB, DEFAULT_CONFIG_DB, DESCR, OP_DEL, OP_SET, PRE, RESULT, RET, TEST_DATA, UPD import pytest @@ -239,7 +239,6 @@ def setup(self): def init(self): route_check.UNIT_TESTING = 1 - route_check.FRR_WAIT_TIME = 0 @pytest.fixture def force_hang(self): @@ -259,8 +258,7 @@ def mock_dbs(self): patch("route_check.swsscommon.Table") as mock_table, \ patch("route_check.swsscommon.Select") as mock_sel, \ patch("route_check.swsscommon.SubscriberStateTable") as mock_subs, \ - patch("route_check.swsscommon.ConfigDBConnector", return_value=mock_config_db), \ - patch("route_check.swsscommon.NotificationProducer"): + patch("route_check.swsscommon.ConfigDBConnector", return_value=mock_config_db): device_info.get_platform = MagicMock(return_value='unittest') set_mock(mock_table, mock_conn, mock_sel, mock_subs, mock_config_db) yield @@ -274,16 +272,7 @@ def test_route_check(self, mock_dbs, test_num): set_test_case_data(ct_data) logger.info("Running test case {}: {}".format(test_num, ct_data[DESCR])) - with patch('sys.argv', ct_data[ARGS].split()), \ - patch('route_check.subprocess.check_output') as mock_check_output: - - routes = ct_data.get(FRR_ROUTES, {}) - - def side_effect(*args, **kwargs): - return json.dumps(routes) - - mock_check_output.side_effect = side_effect - + with patch('sys.argv', ct_data[ARGS].split()): ret, res = route_check.main() expect_ret = ct_data[RET] if RET in ct_data else 0 expect_res = ct_data[RESULT] if RESULT in ct_data else None diff --git a/tests/route_check_test_data.py b/tests/route_check_test_data.py index 7ed1eee41f..9e4cd3a009 100644 --- a/tests/route_check_test_data.py +++ b/tests/route_check_test_data.py @@ -6,7 +6,6 @@ CONFIG_DB = 4 PRE = "pre-value" UPD = "update" -FRR_ROUTES = "frr-routes" RESULT = "res" OP_SET = "SET" @@ -360,124 +359,5 @@ } } } - }, - "10": { - DESCR: "basic good one, check FRR routes", - ARGS: "route_check -m INFO -i 1000", - PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} - } - }, - }, - FRR_ROUTES: { - "0.0.0.0/0": [ - { - "prefix": "0.0.0.0/0", - "vrfName": "default", - "protocol": "bgp", - "offloaded": "true", - }, - ], - "10.10.196.12/31": [ - { - "prefix": "10.10.196.12/31", - "vrfName": "default", - "protocol": "bgp", - "offloaded": "true", - }, - ], - "10.10.196.24/31": [ - { - "protocol": "connected", - }, - ], - }, - }, - "11": { - DESCR: "failure test case, missing FRR routes", - ARGS: "route_check -m INFO -i 1000", - PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} - } - }, - }, - FRR_ROUTES: { - "0.0.0.0/0": [ - { - "prefix": "0.0.0.0/0", - "vrfName": "default", - "protocol": "bgp", - "offloaded": "true", - }, - ], - "10.10.196.12/31": [ - { - "prefix": "10.10.196.12/31", - "vrfName": "default", - "protocol": "bgp", - }, - ], - "10.10.196.24/31": [ - { - "protocol": "connected", - }, - ], - }, - RESULT: { - "missed_FRR_routes": [ - {"prefix": "10.10.196.12/31", "vrfName": "default", "protocol": "bgp"} - ], - }, - RET: -1, - }, - "10": { - DESCR: "basic good one with IPv6 address", - ARGS: "route_check -m INFO -i 1000", - PRE: { - APPL_DB: { - ROUTE_TABLE: { - }, - INTF_TABLE: { - "PortChannel1013:2000:31:0:0::1/64": {}, - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "2000:31::1/128" + RT_ENTRY_KEY_SUFFIX: {}, - } - } - } - }, + } } From 53f611b7910ea2b9ec7d1c564d78f6d27dea6952 Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Wed, 29 Mar 2023 02:14:24 +0800 Subject: [PATCH 50/66] Revert "Convert IPv6 addresses to lowercase in apply-patch (#2299)" (#2758) This reverts commit 28b6ba5fc11f65abaf421c70159a605d233eda41. There are some issues when GCU tries to remove the interface IP in some tests, such as add rack test. In the initial config, the INTERFACE's IPv6 was all loaded in uppercase by default. "INTERFACE": { "Ethernet68": {}, "Ethernet68|10.0.0.34/31": {}, "Ethernet68|FC00::45/126": {}, "Ethernet72": {}, "Ethernet72|10.0.0.36/31": {}, "Ethernet72|FC00::49/126": {}, GCU will never be able to remove that IP because IPv6 was always translated to lowercase due to #2299 . It reported the error can't remove a non-existent object, thus making GCU fail. #2299 is to deal with this issue: https://github.com/sonic-net/sonic-buildimage/issues/11622. Although config CLI always translates uppercase to lowercase when adding an IP, the user can have two choices to remove that IP: One is to use config CLI to remove that IP no matter uppercase or lowercase. Another way is to use GCU. In order to use GCU, the user has to check the IP format saved in ConfigDB because GCU operation does differentiate between uppercase and lowercase. #### What I did Revert #2299 --- config/main.py | 14 -------------- tests/ip_config_input/patch_ipv6.test | 6 ------ tests/ip_config_test.py | 20 -------------------- 3 files changed, 40 deletions(-) delete mode 100644 tests/ip_config_input/patch_ipv6.test diff --git a/config/main.py b/config/main.py index 44f633bf44..fa4bd0f0e9 100644 --- a/config/main.py +++ b/config/main.py @@ -1362,20 +1362,6 @@ def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, i patch_as_json = json.loads(text) patch = jsonpatch.JsonPatch(patch_as_json) - # convert IPv6 addresses to lowercase - for patch_line in patch: - if 'remove' == patch_line['op']: - match = re.search(r"(?P/INTERFACE/\w+\|)(?P([a-fA-F0-9]{0,4}[:~]|::){1,7}[a-fA-F0-9]{0,4})" - "(?P.*)", str.format(patch_line['path'])) - if match: - prefix = match.group('prefix') - ipv6_address_str = match.group('ipv6_address') - suffix = match.group('suffix') - ipv6_address_str = ipv6_address_str.lower() - click.secho("converted ipv6 address to lowercase {} with prefix {} in value: {}" - .format(ipv6_address_str, prefix, patch_line['path'])) - patch_line['path'] = prefix + ipv6_address_str + suffix - config_format = ConfigFormat[format.upper()] GenericUpdater().apply_patch(patch, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path) diff --git a/tests/ip_config_input/patch_ipv6.test b/tests/ip_config_input/patch_ipv6.test deleted file mode 100644 index 00b43fda4c..0000000000 --- a/tests/ip_config_input/patch_ipv6.test +++ /dev/null @@ -1,6 +0,0 @@ -[ - { - "path": "/INTERFACE/Ethernet12|FC00::1~1126", - "op": "remove" - } -] diff --git a/tests/ip_config_test.py b/tests/ip_config_test.py index f315b11d82..2f262a4a09 100644 --- a/tests/ip_config_test.py +++ b/tests/ip_config_test.py @@ -1,5 +1,3 @@ -import json -import jsonpatch import os import traceback from unittest import mock @@ -14,9 +12,6 @@ from utilities_common.db import Db import utilities_common.bgp_util as bgp_util -test_path = os.path.dirname(os.path.abspath(__file__)) -ip_config_input_path = os.path.join(test_path, "ip_config_input") - ERROR_MSG = "Error: IP address is not valid" INVALID_VRF_MSG ="""\ @@ -243,21 +238,6 @@ def test_add_del_interface_shortened_ipv6_with_leading_zeros(self): assert result.exit_code != 0 assert ('Ethernet68', '3000::1/64') not in db.cfgdb.get_table('INTERFACE') - def test_remove_interface_case_sensitive_mock_ipv6_w_apply_patch(self): - runner = CliRunner() - any_patch_as_json = [{"op": "remove", "path": "/INTERFACE/Ethernet12|FC00::1~1126"}] - any_patch = jsonpatch.JsonPatch(any_patch_as_json) - any_patch_as_text = json.dumps(any_patch_as_json) - ipv6_patch_file = os.path.join(ip_config_input_path, 'patch_ipv6.test') - - # config apply-patch patch - mock_generic_updater = mock.Mock() - with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): - with mock.patch('builtins.open', mock.mock_open(read_data=any_patch_as_text)): - result = runner.invoke(config.config.commands["apply-patch"], [ipv6_patch_file], catch_exceptions=False) - print(result.exit_code, result.output) - assert "converted ipv6 address to lowercase fc00::1~1126 with prefix /INTERFACE/Ethernet12| in value: /INTERFACE/Ethernet12|FC00::1~1126" in result.output - def test_intf_vrf_bind_unbind(self): runner = CliRunner() db = Db() From 832ef9c4c50e8cd3c0c745b34dac39280ada319b Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Wed, 29 Mar 2023 22:11:08 +0800 Subject: [PATCH 51/66] Fix bug in GCU vlanintf_validator (#2765) What I did Fix the bug in vlanintf_validator How I did it ret will be 0 if success. Fix the statement. How to verify it E2E test --- generic_config_updater/services_validator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generic_config_updater/services_validator.py b/generic_config_updater/services_validator.py index 5d8c1f0d51..497cb4ee74 100644 --- a/generic_config_updater/services_validator.py +++ b/generic_config_updater/services_validator.py @@ -119,6 +119,6 @@ def vlanintf_validator(old_config, upd_config, keys): for key in deleted_keys: iface, iface_ip = key rc = os.system(f"ip neigh flush dev {iface} {iface_ip}") - if not rc: + if rc: return False return True From e6f9f46413dc1dfc9da778984d81ff8ee8234194 Mon Sep 17 00:00:00 2001 From: isabelmsft <67024108+isabelmsft@users.noreply.github.com> Date: Fri, 31 Mar 2023 16:28:20 -0700 Subject: [PATCH 52/66] [GCU] Performance improvement by making patch sorting optional (#2764) --- config/validated_config_db_connector.py | 5 +++-- generic_config_updater/generic_updater.py | 22 ++++++++++++------- .../generic_updater_test.py | 4 ++-- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/config/validated_config_db_connector.py b/config/validated_config_db_connector.py index 7f6e230ace..25d8ef5f71 100644 --- a/config/validated_config_db_connector.py +++ b/config/validated_config_db_connector.py @@ -94,7 +94,8 @@ def apply_patch(self, gcu_patch, table): config_format = ConfigFormat[format.upper()] try: - GenericUpdater().apply_patch(patch=gcu_patch, config_format=config_format, verbose=False, dry_run=False, ignore_non_yang_tables=False, ignore_paths=None) + # Because all writes to ConfigDB through ValidatedConfigDBConnector are simple and don't require sorting, we set sort=False to skip sorting and improve performance + GenericUpdater().apply_patch(patch=gcu_patch, config_format=config_format, verbose=False, dry_run=False, ignore_non_yang_tables=False, ignore_paths=None, sort=False) except EmptyTableError: self.validated_delete_table(table) @@ -103,7 +104,7 @@ def validated_delete_table(self, table): format = ConfigFormat.CONFIGDB.name config_format = ConfigFormat[format.upper()] try: - GenericUpdater().apply_patch(patch=gcu_patch, config_format=config_format, verbose=False, dry_run=False, ignore_non_yang_tables=False, ignore_paths=None) + GenericUpdater().apply_patch(patch=gcu_patch, config_format=config_format, verbose=False, dry_run=False, ignore_non_yang_tables=False, ignore_paths=None, sort=False) except ValueError as e: logger = genericUpdaterLogging.get_logger(title="Patch Applier", print_all_to_console=True) logger.log_notice("Unable to remove entry, as doing so will result in invalid config. Error: {}".format(e)) diff --git a/generic_config_updater/generic_updater.py b/generic_config_updater/generic_updater.py index aa41853204..f9aab82336 100644 --- a/generic_config_updater/generic_updater.py +++ b/generic_config_updater/generic_updater.py @@ -36,7 +36,7 @@ def __init__(self, self.patchsorter = patchsorter if patchsorter is not None else StrictPatchSorter(self.config_wrapper, self.patch_wrapper) self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier() - def apply(self, patch): + def apply(self, patch, sort=True): self.logger.log_notice("Patch application starting.") self.logger.log_notice(f"Patch: {patch}") @@ -63,11 +63,17 @@ def apply(self, patch): f"Table{'s' if len(empty_tables) != 1 else ''}: {empty_tables_txt}") # Generate list of changes to apply - self.logger.log_notice("Sorting patch updates.") - changes = self.patchsorter.sort(patch) + if sort: + self.logger.log_notice("Sorting patch updates.") + changes = self.patchsorter.sort(patch) + else: + self.logger.log_notice("Converting patch to JsonChange.") + changes = [JsonChange(jsonpatch.JsonPatch([element])) for element in patch] + changes_len = len(changes) - self.logger.log_notice(f"The patch was sorted into {changes_len} " \ - f"change{'s' if changes_len != 1 else ''}{':' if changes_len > 0 else '.'}") + self.logger.log_notice(f"The patch was converted into {changes_len} " \ + f"change{'s' if changes_len != 1 else ''}{':' if changes_len > 0 else '.'}") + for change in changes: self.logger.log_notice(f" * {change}") @@ -284,7 +290,7 @@ def __init__(self, self.config_lock = config_lock - def apply(self, patch): + def apply(self, patch, sort=True): self.execute_write_action(Decorator.apply, self, patch) def replace(self, target_config): @@ -407,9 +413,9 @@ def __init__(self, generic_update_factory=None): self.generic_update_factory = \ generic_update_factory if generic_update_factory is not None else GenericUpdateFactory() - def apply_patch(self, patch, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths): + def apply_patch(self, patch, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths, sort=True): patch_applier = self.generic_update_factory.create_patch_applier(config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths) - patch_applier.apply(patch) + patch_applier.apply(patch, sort) def replace(self, target_config, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths): config_replacer = self.generic_update_factory.create_config_replacer(config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths) diff --git a/tests/generic_config_updater/generic_updater_test.py b/tests/generic_config_updater/generic_updater_test.py index aab2eae275..96c25e3552 100644 --- a/tests/generic_config_updater/generic_updater_test.py +++ b/tests/generic_config_updater/generic_updater_test.py @@ -526,7 +526,7 @@ def setUp(self): def test_apply_patch__creates_applier_and_apply(self): # Arrange patch_applier = Mock() - patch_applier.apply.side_effect = create_side_effect_dict({(str(Files.SINGLE_OPERATION_SONIC_YANG_PATCH),): 0}) + patch_applier.apply.side_effect = create_side_effect_dict({(str(Files.SINGLE_OPERATION_SONIC_YANG_PATCH), "True"): 0}) factory = Mock() factory.create_patch_applier.side_effect = \ @@ -548,7 +548,7 @@ def test_apply_patch__creates_applier_and_apply(self): self.any_ignore_paths) # Assert - patch_applier.apply.assert_has_calls([call(Files.SINGLE_OPERATION_SONIC_YANG_PATCH)]) + patch_applier.apply.assert_has_calls([call(Files.SINGLE_OPERATION_SONIC_YANG_PATCH, True)]) def test_replace__creates_replacer_and_replace(self): # Arrange From eaf46677160946e125f88af9ce9538c373e9e1d7 Mon Sep 17 00:00:00 2001 From: jfeng-arista <98421150+jfeng-arista@users.noreply.github.com> Date: Mon, 3 Apr 2023 14:34:52 -0700 Subject: [PATCH 53/66] [voq] Add fabric_ns to the ns_list when display_option is DISPLAY_ALL. (#2717) The get_ns_list_based_on_options function only gets front_ns and back_ns in current code. This change adds fabric_ns in the ns_list when display_option is DISPLAY_ALL. This enables the fabric related tests on voq chassis, e.g the change in sonic-net/sonic-mgmt#6620 --- utilities_common/multi_asic.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/utilities_common/multi_asic.py b/utilities_common/multi_asic.py index 9e213f67f1..b1f24e12e8 100644 --- a/utilities_common/multi_asic.py +++ b/utilities_common/multi_asic.py @@ -63,12 +63,13 @@ def get_ns_list_based_on_options(self): namespaces = multi_asic.get_all_namespaces() if self.namespace_option is None: if self.get_display_option() == constants.DISPLAY_ALL: - ns_list = namespaces['front_ns'] + namespaces['back_ns'] + ns_list = namespaces['front_ns'] + namespaces['back_ns'] + namespaces['fabric_ns'] else: ns_list = namespaces['front_ns'] else: if self.namespace_option not in namespaces['front_ns'] and \ - self.namespace_option not in namespaces['back_ns']: + self.namespace_option not in namespaces['back_ns'] and \ + self.namespace_option not in namespaces['fabric_ns']: raise ValueError( 'Unknown Namespace {}'.format(self.namespace_option)) ns_list = [self.namespace_option] From dbb214980ab8c7d626deb51eb7bb2d61e58af55a Mon Sep 17 00:00:00 2001 From: Saikrishna Arcot Date: Tue, 4 Apr 2023 15:25:53 -0700 Subject: [PATCH 54/66] [ci] Switch to using regular swss-common artifacts (#2780) The swss-common pipeline is now built for Bullseye. Signed-off-by: Saikrishna Arcot --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 46369c01b2..4a00e67440 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -69,7 +69,7 @@ stages: source: specific project: build pipeline: 9 - artifact: sonic-swss-common.bullseye.amd64 + artifact: sonic-swss-common runVersion: 'latestFromBranch' runBranch: 'refs/heads/master' displayName: "Download sonic swss common deb packages" From 1802f34a29ca607aa8df021d2ac89f550485a234 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Thu, 6 Apr 2023 13:11:21 -0400 Subject: [PATCH 55/66] [sfputil] replace shell=True (#2727) Signed-off-by: maipbui #### What I did `subprocess()` - when using with `shell=True` is dangerous. Using subprocess function without a static string can lead to command injection. #### How I did it `subprocess()` - use `shell=False` instead, use list of strings Ref: [https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation](https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation) #### How to verify it Pass UT Manual test ``` admin@str-s6000-acs-12:~$ sudo sfputil show error-status -hw Port Error Status ----------- -------------- Ethernet0 Ethernet4 OK Ethernet8 OK Ethernet12 OK Ethernet16 OK Ethernet20 OK Ethernet24 OK Ethernet28 OK ``` --- sfputil/main.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sfputil/main.py b/sfputil/main.py index 8992e9238a..726ed2feba 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -906,11 +906,11 @@ def fetch_error_status_from_platform_api(port): " errors=['{}:{}'.format(sfp.index, 'OK (Not implemented)') for sfp in sfp_list]\n" \ "print(errors)\n" - get_error_status_command = "docker exec pmon python3 -c \"{}{}{}\"".format( - init_chassis_code, generate_sfp_list_code, get_error_status_code) + get_error_status_command = ["docker", "exec", "pmon", "python3", "-c", "{}{}{}".format( + init_chassis_code, generate_sfp_list_code, get_error_status_code)] # Fetch error status from pmon docker try: - output = subprocess.check_output(get_error_status_command, shell=True, universal_newlines=True) + output = subprocess.check_output(get_error_status_command, universal_newlines=True) except subprocess.CalledProcessError as e: click.Abort("Error! Unable to fetch error status for SPF modules. Error code = {}, error messages: {}".format(e.returncode, e.output)) return None From eab54667f0637983f76f5894eaa6f73237014a9e Mon Sep 17 00:00:00 2001 From: isabelmsft <67024108+isabelmsft@users.noreply.github.com> Date: Thu, 6 Apr 2023 15:12:12 -0700 Subject: [PATCH 56/66] YANG Validation for MCLAG, NAT, MUXCABLE tables (#2755) --- config/console.py | 61 ++++++--- config/kube.py | 15 +- config/main.py | 61 ++++++--- config/mclag.py | 133 +++++++++++------- config/muxcable.py | 30 ++-- config/nat.py | 311 ++++++++++++++++++++++++++++-------------- tests/config_test.py | 62 +++++++++ tests/console_test.py | 101 ++++++++++++++ tests/kube_test.py | 25 ++++ tests/mclag_test.py | 128 +++++++++++++++-- tests/nat_test.py | 267 ++++++++++++++++++++++++++++++++++++ tests/sflow_test.py | 20 +++ 12 files changed, 1003 insertions(+), 211 deletions(-) create mode 100644 tests/nat_test.py diff --git a/config/console.py b/config/console.py index b28aeda672..1ecf80c381 100644 --- a/config/console.py +++ b/config/console.py @@ -1,6 +1,7 @@ import click import utilities_common.cli as clicommon - +from .validated_config_db_connector import ValidatedConfigDBConnector +from jsonpatch import JsonPatchConflict # # 'console' group ('config console ...') # @@ -16,14 +17,18 @@ def console(): @clicommon.pass_db def enable_console_switch(db): """Enable console switch""" - config_db = db.cfgdb + config_db = ValidatedConfigDBConnector(db.cfgdb) table = "CONSOLE_SWITCH" dataKey1 = 'console_mgmt' dataKey2 = 'enabled' data = { dataKey2 : "yes" } - config_db.mod_entry(table, dataKey1, data) + try: + config_db.mod_entry(table, dataKey1, data) + except ValueError as e: + ctx = click.get_current_context() + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'console disable' group ('config console disable') @@ -32,14 +37,18 @@ def enable_console_switch(db): @clicommon.pass_db def disable_console_switch(db): """Disable console switch""" - config_db = db.cfgdb + config_db = ValidatedConfigDBConnector(db.cfgdb) table = "CONSOLE_SWITCH" dataKey1 = 'console_mgmt' dataKey2 = 'enabled' data = { dataKey2 : "no" } - config_db.mod_entry(table, dataKey1, data) + try: + config_db.mod_entry(table, dataKey1, data) + except ValueError as e: + ctx = click.get_current_context() + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'console add' group ('config console add ...') @@ -52,7 +61,7 @@ def disable_console_switch(db): @click.option('--devicename', '-d', metavar='', required=False) def add_console_setting(db, linenum, baud, flowcontrol, devicename): """Add Console-realted configuration tasks""" - config_db = db.cfgdb + config_db = ValidatedConfigDBConnector(db.cfgdb) table = "CONSOLE_PORT" dataKey1 = 'baud_rate' @@ -72,7 +81,10 @@ def add_console_setting(db, linenum, baud, flowcontrol, devicename): ctx.fail("Given device name {} has been used. Please enter a valid device name or remove the existing one !!".format(devicename)) console_entry[dataKey3] = devicename - config_db.set_entry(table, linenum, console_entry) + try: + config_db.set_entry(table, linenum, console_entry) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # @@ -83,15 +95,18 @@ def add_console_setting(db, linenum, baud, flowcontrol, devicename): @click.argument('linenum', metavar='', required=True, type=click.IntRange(0, 65535)) def remove_console_setting(db, linenum): """Remove Console-related configuration tasks""" - config_db = db.cfgdb + config_db = ValidatedConfigDBConnector(db.cfgdb) + ctx = click.get_current_context() table = "CONSOLE_PORT" data = config_db.get_entry(table, linenum) if data: - config_db.mod_entry(table, linenum, None) + try: + config_db.set_entry(table, linenum, None) + except JsonPatchConflict as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: - ctx = click.get_current_context() ctx.fail("Trying to delete console port setting, which is not present.") # @@ -103,7 +118,7 @@ def remove_console_setting(db, linenum): @click.argument('devicename', metavar='', required=False) def upate_console_remote_device_name(db, linenum, devicename): """Update remote device name for a console line""" - config_db = db.cfgdb + config_db = ValidatedConfigDBConnector(db.cfgdb) ctx = click.get_current_context() table = "CONSOLE_PORT" @@ -117,12 +132,18 @@ def upate_console_remote_device_name(db, linenum, devicename): elif not devicename: # remove configuration key from console setting if user not give a remote device name data.pop(dataKey, None) - config_db.mod_entry(table, linenum, data) + try: + config_db.mod_entry(table, linenum, data) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) elif isExistingSameDevice(config_db, devicename, table): ctx.fail("Given device name {} has been used. Please enter a valid device name or remove the existing one !!".format(devicename)) else: data[dataKey] = devicename - config_db.mod_entry(table, linenum, data) + try: + config_db.mod_entry(table, linenum, data) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: ctx.fail("Trying to update console port setting, which is not present.") @@ -135,7 +156,7 @@ def upate_console_remote_device_name(db, linenum, devicename): @click.argument('baud', metavar='', required=True, type=click.INT) def update_console_baud(db, linenum, baud): """Update baud for a console line""" - config_db = db.cfgdb + config_db = ValidatedConfigDBConnector(db.cfgdb) ctx = click.get_current_context() table = "CONSOLE_PORT" @@ -149,7 +170,10 @@ def update_console_baud(db, linenum, baud): return else: data[dataKey] = baud - config_db.mod_entry(table, linenum, data) + try: + config_db.mod_entry(table, linenum, data) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: ctx.fail("Trying to update console port setting, which is not present.") @@ -162,7 +186,7 @@ def update_console_baud(db, linenum, baud): @click.argument('linenum', metavar='', required=True, type=click.IntRange(0, 65535)) def update_console_flow_control(db, mode, linenum): """Update flow control setting for a console line""" - config_db = db.cfgdb + config_db = ValidatedConfigDBConnector(db.cfgdb) ctx = click.get_current_context() table = "CONSOLE_PORT" @@ -177,7 +201,10 @@ def update_console_flow_control(db, mode, linenum): return else: data[dataKey] = innerMode - config_db.mod_entry(table, linenum, data) + try: + config_db.mod_entry(table, linenum, data) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: ctx.fail("Trying to update console port setting, which is not present.") diff --git a/config/kube.py b/config/kube.py index 706a5ab260..526a4dd028 100644 --- a/config/kube.py +++ b/config/kube.py @@ -1,6 +1,7 @@ import click from utilities_common.cli import AbbreviationGroup, pass_db +from .validated_config_db_connector import ValidatedConfigDBConnector from .utils import log @@ -21,22 +22,30 @@ KUBE_LABEL_SET_KEY = "SET" def _update_kube_server(db, field, val): - db_data = db.cfgdb.get_entry(KUBE_SERVER_TABLE_NAME, KUBE_SERVER_TABLE_KEY) + config_db = ValidatedConfigDBConnector(db.cfgdb) + db_data = config_db.get_entry(KUBE_SERVER_TABLE_NAME, KUBE_SERVER_TABLE_KEY) def_data = { KUBE_SERVER_IP: "", KUBE_SERVER_PORT: "6443", KUBE_SERVER_INSECURE: "True", KUBE_SERVER_DISABLE: "False" } + ctx = click.get_current_context() for f in def_data: if db_data and f in db_data: if f == field and db_data[f] != val: - db.cfgdb.mod_entry(KUBE_SERVER_TABLE_NAME, KUBE_SERVER_TABLE_KEY, {field: val}) + try: + config_db.mod_entry(KUBE_SERVER_TABLE_NAME, KUBE_SERVER_TABLE_KEY, {field: val}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) log.log_info("modify kubernetes server entry {}={}".format(field,val)) else: # Missing field. Set to default or given value v = val if f == field else def_data[f] - db.cfgdb.mod_entry(KUBE_SERVER_TABLE_NAME, KUBE_SERVER_TABLE_KEY, {f: v}) + try: + config_db.mod_entry(KUBE_SERVER_TABLE_NAME, KUBE_SERVER_TABLE_KEY, {f: v}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) log.log_info("set kubernetes server entry {}={}".format(f,v)) diff --git a/config/main.py b/config/main.py index fa4bd0f0e9..9e77211c1e 100644 --- a/config/main.py +++ b/config/main.py @@ -15,6 +15,7 @@ import copy from jsonpatch import JsonPatchConflict +from jsonpointer import JsonPointerException from collections import OrderedDict from generic_config_updater.generic_updater import GenericUpdater, ConfigFormat from minigraph import parse_device_desc_xml, minigraph_encoder @@ -4149,7 +4150,7 @@ def breakout(ctx, interface_name, mode, verbose, force_remove_dependencies, load raise click.Abort() # Get the config_db connector - config_db = ctx.obj['config_db'] + config_db = ValidatedConfigDBConnector(ctx.obj['config_db']) target_brkout_mode = mode @@ -4228,7 +4229,10 @@ def breakout(ctx, interface_name, mode, verbose, force_remove_dependencies, load if interface_name not in brkout_cfg_keys: click.secho("[ERROR] {} is not present in 'BREAKOUT_CFG' Table!".format(interface_name), fg='red') raise click.Abort() - config_db.set_entry("BREAKOUT_CFG", interface_name, {'brkout_mode': target_brkout_mode}) + try: + config_db.set_entry("BREAKOUT_CFG", interface_name, {'brkout_mode': target_brkout_mode}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) click.secho("Breakout process got successfully completed." .format(interface_name), fg="cyan", underline=True) click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.") @@ -6375,15 +6379,19 @@ def ntp(ctx): @click.pass_context def add_ntp_server(ctx, ntp_ip_address): """ Add NTP server IP """ - if not clicommon.is_ipaddress(ntp_ip_address): - ctx.fail('Invalid ip address') - db = ctx.obj['db'] + if ADHOC_VALIDATION: + if not clicommon.is_ipaddress(ntp_ip_address): + ctx.fail('Invalid IP address') + db = ValidatedConfigDBConnector(ctx.obj['db']) ntp_servers = db.get_table("NTP_SERVER") if ntp_ip_address in ntp_servers: click.echo("NTP server {} is already configured".format(ntp_ip_address)) return else: - db.set_entry('NTP_SERVER', ntp_ip_address, {'NULL': 'NULL'}) + try: + db.set_entry('NTP_SERVER', ntp_ip_address, {'NULL': 'NULL'}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) click.echo("NTP server {} added to configuration".format(ntp_ip_address)) try: click.echo("Restarting ntp-config service...") @@ -6396,12 +6404,16 @@ def add_ntp_server(ctx, ntp_ip_address): @click.pass_context def del_ntp_server(ctx, ntp_ip_address): """ Delete NTP server IP """ - if not clicommon.is_ipaddress(ntp_ip_address): - ctx.fail('Invalid IP address') - db = ctx.obj['db'] + if ADHOC_VALIDATION: + if not clicommon.is_ipaddress(ntp_ip_address): + ctx.fail('Invalid IP address') + db = ValidatedConfigDBConnector(ctx.obj['db']) ntp_servers = db.get_table("NTP_SERVER") if ntp_ip_address in ntp_servers: - db.set_entry('NTP_SERVER', '{}'.format(ntp_ip_address), None) + try: + db.set_entry('NTP_SERVER', '{}'.format(ntp_ip_address), None) + except JsonPatchConflict as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) click.echo("NTP server {} removed from configuration".format(ntp_ip_address)) else: ctx.fail("NTP server {} is not configured.".format(ntp_ip_address)) @@ -6654,16 +6666,19 @@ def add(ctx, name, ipaddr, port, vrf): if not is_valid_collector_info(name, ipaddr, port, vrf): return - config_db = ctx.obj['db'] + config_db = ValidatedConfigDBConnector(ctx.obj['db']) collector_tbl = config_db.get_table('SFLOW_COLLECTOR') if (collector_tbl and name not in collector_tbl and len(collector_tbl) == 2): click.echo("Only 2 collectors can be configured, please delete one") return - - config_db.mod_entry('SFLOW_COLLECTOR', name, - {"collector_ip": ipaddr, "collector_port": port, - "collector_vrf": vrf}) + + try: + config_db.mod_entry('SFLOW_COLLECTOR', name, + {"collector_ip": ipaddr, "collector_port": port, + "collector_vrf": vrf}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) return # @@ -6674,14 +6689,18 @@ def add(ctx, name, ipaddr, port, vrf): @click.pass_context def del_collector(ctx, name): """Delete a sFlow collector""" - config_db = ctx.obj['db'] - collector_tbl = config_db.get_table('SFLOW_COLLECTOR') + config_db = ValidatedConfigDBConnector(ctx.obj['db']) + if ADHOC_VALIDATION: + collector_tbl = config_db.get_table('SFLOW_COLLECTOR') - if name not in collector_tbl: - click.echo("Collector: {} not configured".format(name)) - return + if name not in collector_tbl: + click.echo("Collector: {} not configured".format(name)) + return - config_db.mod_entry('SFLOW_COLLECTOR', name, None) + try: + config_db.set_entry('SFLOW_COLLECTOR', name, None) + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'sflow agent-id' group diff --git a/config/mclag.py b/config/mclag.py index 589bb61a20..abc6ee051a 100644 --- a/config/mclag.py +++ b/config/mclag.py @@ -1,8 +1,12 @@ import click from swsscommon.swsscommon import ConfigDBConnector +from .validated_config_db_connector import ValidatedConfigDBConnector import ipaddress +from jsonpatch import JsonPatchConflict +from jsonpointer import JsonPointerException +ADHOC_VALIDATION = False CFG_PORTCHANNEL_PREFIX = "PortChannel" CFG_PORTCHANNEL_PREFIX_LEN = 11 CFG_PORTCHANNEL_MAX_VAL = 9999 @@ -86,8 +90,7 @@ def is_ipv4_addr_valid(addr): def check_if_interface_is_valid(db, interface_name): from .main import interface_name_is_valid - if interface_name_is_valid(db,interface_name) is False: - ctx.fail("Interface name is invalid. Please enter a valid interface name!!") + return interface_name_is_valid(db,interface_name) def get_intf_vrf_bind_unique_ip(db, interface_name, interface_type): intfvrf = db.get_table(interface_type) @@ -121,34 +124,42 @@ def mclag(ctx): @click.pass_context def add_mclag_domain(ctx, domain_id, source_ip_addr, peer_ip_addr, peer_ifname): """Add MCLAG Domain""" - - if not mclag_domain_id_valid(domain_id): - ctx.fail("{} invalid domain ID, valid range is 1 to 4095".format(domain_id)) - if not is_ipv4_addr_valid(source_ip_addr): - ctx.fail("{} invalid local ip address".format(source_ip_addr)) - if not is_ipv4_addr_valid(peer_ip_addr): - ctx.fail("{} invalid peer ip address".format(peer_ip_addr)) - - db = ctx.obj['db'] + if ADHOC_VALIDATION: + if not mclag_domain_id_valid(domain_id): + ctx.fail("{} invalid domain ID, valid range is 1 to 4095".format(domain_id)) + if not is_ipv4_addr_valid(source_ip_addr): + ctx.fail("{} invalid local ip address".format(source_ip_addr)) + if not is_ipv4_addr_valid(peer_ip_addr): + ctx.fail("{} invalid peer ip address".format(peer_ip_addr)) + + db = ValidatedConfigDBConnector(ctx.obj['db']) fvs = {} fvs['source_ip'] = str(source_ip_addr) fvs['peer_ip'] = str(peer_ip_addr) - if peer_ifname is not None: - if (peer_ifname.startswith("Ethernet") is False) and (peer_ifname.startswith("PortChannel") is False): - ctx.fail("peer interface is invalid, should be Ethernet interface or portChannel !!") - if (peer_ifname.startswith("Ethernet") is True) and (check_if_interface_is_valid(db, peer_ifname) is False): - ctx.fail("peer Ethernet interface name is invalid. it is not present in port table of configDb!!") - if (peer_ifname.startswith("PortChannel")) and (is_portchannel_name_valid(peer_ifname) is False): - ctx.fail("peer PortChannel interface name is invalid !!") - fvs['peer_link'] = str(peer_ifname) + if ADHOC_VALIDATION: + if peer_ifname is not None: + if (peer_ifname.startswith("Ethernet") is False) and (peer_ifname.startswith("PortChannel") is False): + ctx.fail("peer interface is invalid, should be Ethernet interface or portChannel !!") + if (peer_ifname.startswith("Ethernet") is True) and (check_if_interface_is_valid(db, peer_ifname) is False): + ctx.fail("peer Ethernet interface name is invalid. it is not present in port table of configDb!!") + if (peer_ifname.startswith("PortChannel")) and (is_portchannel_name_valid(peer_ifname) is False): + ctx.fail("peer PortChannel interface name is invalid !!") + fvs['peer_link'] = str(peer_ifname) mclag_domain_keys = db.get_table('MCLAG_DOMAIN').keys() if len(mclag_domain_keys) == 0: - db.set_entry('MCLAG_DOMAIN', domain_id, fvs) + try: + db.set_entry('MCLAG_DOMAIN', domain_id, fvs) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: + domain_id = str(domain_id) if domain_id in mclag_domain_keys: - db.mod_entry('MCLAG_DOMAIN', domain_id, fvs) - else: - ctx.fail("only one mclag Domain can be configured. Already one domain {} configured ".format(mclag_domain_keys[0])) + try: + db.mod_entry('MCLAG_DOMAIN', domain_id, fvs) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) + else: + ctx.fail("only one mclag Domain can be configured. Already one domain {} configured ".format(list(mclag_domain_keys)[0])) #mclag domain delete @@ -158,15 +169,16 @@ def add_mclag_domain(ctx, domain_id, source_ip_addr, peer_ip_addr, peer_ifname): @click.pass_context def del_mclag_domain(ctx, domain_id): """Delete MCLAG Domain""" - - if not mclag_domain_id_valid(domain_id): - ctx.fail("{} invalid domain ID, valid range is 1 to 4095".format(domain_id)) - - db = ctx.obj['db'] - entry = db.get_entry('MCLAG_DOMAIN', domain_id) - if entry is None: - ctx.fail("MCLAG Domain {} not configured ".format(domain_id)) - return + + db = ValidatedConfigDBConnector(ctx.obj['db']) + + if ADHOC_VALIDATION: + if not mclag_domain_id_valid(domain_id): + ctx.fail("{} invalid domain ID, valid range is 1 to 4095".format(domain_id)) + + entry = db.get_entry('MCLAG_DOMAIN', domain_id) + if entry is None: + ctx.fail("MCLAG Domain {} not configured ".format(domain_id)) click.echo("MCLAG Domain delete takes care of deleting all associated MCLAG Interfaces") @@ -175,11 +187,17 @@ def del_mclag_domain(ctx, domain_id): #delete associated mclag interfaces for iface_domain_id, iface_name in interface_table_keys: - if (int(iface_domain_id) == domain_id): - db.set_entry('MCLAG_INTERFACE', (iface_domain_id, iface_name), None ) + if (int(iface_domain_id) == domain_id): + try: + db.set_entry('MCLAG_INTERFACE', (iface_domain_id, iface_name), None ) + except (JsonPointerException, JsonPatchConflict) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) #delete mclag domain - db.set_entry('MCLAG_DOMAIN', domain_id, None) + try: + db.set_entry('MCLAG_DOMAIN', domain_id, None) + except (JsonPointerException, JsonPatchConflict) as e: + ctx.fail("Invalid ConfigDB. Error: MCLAG_DOMAIN {} failed to be deleted".format(domain_id)) #keepalive timeout config @@ -260,16 +278,21 @@ def mclag_member(ctx): @click.pass_context def add_mclag_member(ctx, domain_id, portchannel_names): """Add member MCLAG interfaces from MCLAG Domain""" - db = ctx.obj['db'] - entry = db.get_entry('MCLAG_DOMAIN', domain_id) - if len(entry) == 0: - ctx.fail("MCLAG Domain " + domain_id + " not configured, configure mclag domain first") + db = ValidatedConfigDBConnector(ctx.obj['db']) + if ADHOC_VALIDATION: + entry = db.get_entry('MCLAG_DOMAIN', domain_id) + if len(entry) == 0: + ctx.fail("MCLAG Domain " + domain_id + " not configured, configure mclag domain first") portchannel_list = portchannel_names.split(",") for portchannel_name in portchannel_list: - if is_portchannel_name_valid(portchannel_name) != True: - ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) - db.set_entry('MCLAG_INTERFACE', (domain_id, portchannel_name), {'if_type':"PortChannel"} ) + if ADHOC_VALIDATION: + if is_portchannel_name_valid(portchannel_name) != True: + ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) + try: + db.set_entry('MCLAG_INTERFACE', (domain_id, portchannel_name), {'if_type':"PortChannel"} ) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) @mclag_member.command('del') @click.argument('domain_id', metavar='', required=True) @@ -277,13 +300,17 @@ def add_mclag_member(ctx, domain_id, portchannel_names): @click.pass_context def del_mclag_member(ctx, domain_id, portchannel_names): """Delete member MCLAG interfaces from MCLAG Domain""" - db = ctx.obj['db'] + db = ValidatedConfigDBConnector(ctx.obj['db']) #split comma seperated portchannel names portchannel_list = portchannel_names.split(",") for portchannel_name in portchannel_list: - if is_portchannel_name_valid(portchannel_name) != True: - ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) - db.set_entry('MCLAG_INTERFACE', (domain_id, portchannel_name), None ) + if ADHOC_VALIDATION: + if is_portchannel_name_valid(portchannel_name) != True: + ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) + try: + db.set_entry('MCLAG_INTERFACE', (domain_id, portchannel_name), None ) + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Failed to delete mclag member {} from mclag domain {}".format(portchannel_name, domain_id)) #mclag unique ip config @mclag.group('unique-ip') @@ -297,7 +324,7 @@ def mclag_unique_ip(ctx): @click.pass_context def add_mclag_unique_ip(ctx, interface_names): """Add Unique IP on MCLAG Vlan interface""" - db = ctx.obj['db'] + db = ValidatedConfigDBConnector(ctx.obj['db']) mclag_domain_keys = db.get_table('MCLAG_DOMAIN').keys() if len(mclag_domain_keys) == 0: ctx.fail("MCLAG not configured. MCLAG should be configured.") @@ -318,14 +345,17 @@ def add_mclag_unique_ip(ctx, interface_names): (intf_name, ip) = k if intf_name == interface_name and ip != 0: ctx.fail("%s is configured with IP %s, remove the IP configuration and reconfigure after enabling unique IP configuration."%(str(intf_name), str(ip))) - db.set_entry('MCLAG_UNIQUE_IP', (interface_name), {'unique_ip':"enable"} ) + try: + db.set_entry('MCLAG_UNIQUE_IP', (interface_name), {'unique_ip':"enable"} ) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) @mclag_unique_ip.command('del') @click.argument('interface_names', metavar='', required=True) @click.pass_context def del_mclag_unique_ip(ctx, interface_names): """Delete Unique IP from MCLAG Vlan interface""" - db = ctx.obj['db'] + db = ValidatedConfigDBConnector(ctx.obj['db']) #split comma seperated interface names interface_list = interface_names.split(",") for interface_name in interface_list: @@ -341,7 +371,10 @@ def del_mclag_unique_ip(ctx, interface_names): (intf_name, ip) = k if intf_name == interface_name and ip != 0: ctx.fail("%s is configured with IP %s, remove the IP configuration and reconfigure after disabling unique IP configuration."%(str(intf_name), str(ip))) - db.set_entry('MCLAG_UNIQUE_IP', (interface_name), None ) + try: + db.set_entry('MCLAG_UNIQUE_IP', (interface_name), None ) + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Failed to delete mclag unique IP from Vlan interface {}".format(interface_name)) ####### diff --git a/config/muxcable.py b/config/muxcable.py index f53eae22e3..ba80cb02af 100644 --- a/config/muxcable.py +++ b/config/muxcable.py @@ -246,7 +246,7 @@ def lookup_statedb_and_update_configdb(db, per_npu_statedb, config_db, port, sta ipv6_value = get_value_for_key_in_config_tbl(config_db, port, "server_ipv6", "MUX_CABLE") soc_ipv4_value = get_optional_value_for_key_in_config_tbl(config_db, port, "soc_ipv4", "MUX_CABLE") cable_type = get_optional_value_for_key_in_config_tbl(config_db, port, "cable_type", "MUX_CABLE") - + ctx = click.get_current_context() state = get_value_for_key_in_dict(muxcable_statedb_dict, port, "state", "MUX_CABLE_TABLE") port_name = platform_sfputil_helper.get_interface_alias(port, db) @@ -255,15 +255,21 @@ def lookup_statedb_and_update_configdb(db, per_npu_statedb, config_db, port, sta port_status_dict[port_name] = 'OK' else: if cable_type is not None or soc_ipv4_value is not None: - config_db.set_entry("MUX_CABLE", port, {"state": state_cfg_val, - "server_ipv4": ipv4_value, - "server_ipv6": ipv6_value, - "soc_ipv4":soc_ipv4_value, - "cable_type": cable_type}) + try: + config_db.set_entry("MUX_CABLE", port, {"state": state_cfg_val, + "server_ipv4": ipv4_value, + "server_ipv6": ipv6_value, + "soc_ipv4":soc_ipv4_value, + "cable_type": cable_type}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: - config_db.set_entry("MUX_CABLE", port, {"state": state_cfg_val, - "server_ipv4": ipv4_value, - "server_ipv6": ipv6_value}) + try: + config_db.set_entry("MUX_CABLE", port, {"state": state_cfg_val, + "server_ipv4": ipv4_value, + "server_ipv6": ipv6_value}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) if (str(state_cfg_val) == 'active' and str(state) != 'active') or (str(state_cfg_val) == 'standby' and str(state) != 'standby'): port_status_dict[port_name] = 'INPROGRESS' else: @@ -274,9 +280,13 @@ def update_configdb_pck_loss_data(config_db, port, val): ipv4_value = get_value_for_key_in_config_tbl(config_db, port, "server_ipv4", "MUX_CABLE") ipv6_value = get_value_for_key_in_config_tbl(config_db, port, "server_ipv6", "MUX_CABLE") - config_db.set_entry("MUX_CABLE", port, {"state": configdb_state, + try: + config_db.set_entry("MUX_CABLE", port, {"state": configdb_state, "server_ipv4": ipv4_value, "server_ipv6": ipv6_value, "pck_loss_data_reset": val}) + except ValueError as e: + ctx = click.get_current_context() + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # 'muxcable' command ("config muxcable mode active|auto") @muxcable.command() diff --git a/config/nat.py b/config/nat.py index 99e21b2750..8d2ad32c22 100644 --- a/config/nat.py +++ b/config/nat.py @@ -1,8 +1,12 @@ import ipaddress import click +from jsonpatch import JsonPatchConflict +from jsonpointer import JsonPointerException from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from .validated_config_db_connector import ValidatedConfigDBConnector +ADHOC_VALIDATION = True def is_valid_ipv4_address(address): """Check if the given ipv4 address is valid""" @@ -243,15 +247,15 @@ def static(): @click.option('-twice_nat_id', metavar='', required=False, type=click.IntRange(1, 9999), help="Set the twice nat id") def add_basic(ctx, global_ip, local_ip, nat_type, twice_nat_id): """Add Static NAT-related configutation""" + if ADHOC_VALIDATION: + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - # Verify the ip address format - if is_valid_ipv4_address(local_ip) is False: - ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - - if is_valid_ipv4_address(global_ip) is False: - ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() entryFound = False @@ -304,13 +308,25 @@ def add_basic(ctx, global_ip, local_ip, nat_type, twice_nat_id): ctx.fail("Same Twice nat id is not allowed for more than 2 entries!!") if nat_type is not None and twice_nat_id is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: nat_type, dataKey3: twice_nat_id}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: nat_type, dataKey3: twice_nat_id}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) elif nat_type is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: nat_type}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: nat_type}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) elif twice_nat_id is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey3: twice_nat_id}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey3: twice_nat_id}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: - config_db.set_entry(table, key, {dataKey1: local_ip}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat add static tcp' command ('config nat add static tcp ') @@ -325,15 +341,15 @@ def add_basic(ctx, global_ip, local_ip, nat_type, twice_nat_id): @click.option('-twice_nat_id', metavar='', required=False, type=click.IntRange(1, 9999), help="Set the twice nat id") def add_tcp(ctx, global_ip, global_port, local_ip, local_port, nat_type, twice_nat_id): """Add Static TCP Protocol NAPT-related configutation""" + if ADHOC_VALIDATION: + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - # Verify the ip address format - if is_valid_ipv4_address(local_ip) is False: - ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - - if is_valid_ipv4_address(global_ip) is False: - ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() entryFound = False @@ -384,13 +400,25 @@ def add_tcp(ctx, global_ip, global_port, local_ip, local_port, nat_type, twice_n ctx.fail("Same Twice nat id is not allowed for more than 2 entries!!") if nat_type is not None and twice_nat_id is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type, dataKey4: twice_nat_id}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type, dataKey4: twice_nat_id}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) elif nat_type is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) elif twice_nat_id is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey4: twice_nat_id}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey4: twice_nat_id}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat add static udp' command ('config nat add static udp ') @@ -405,15 +433,16 @@ def add_tcp(ctx, global_ip, global_port, local_ip, local_port, nat_type, twice_n @click.option('-twice_nat_id', metavar='', required=False, type=click.IntRange(1, 9999), help="Set the twice nat id") def add_udp(ctx, global_ip, global_port, local_ip, local_port, nat_type, twice_nat_id): """Add Static UDP Protocol NAPT-related configutation""" + + if ADHOC_VALIDATION: + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - # Verify the ip address format - if is_valid_ipv4_address(local_ip) is False: - ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - - if is_valid_ipv4_address(global_ip) is False: - ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() entryFound = False @@ -464,13 +493,25 @@ def add_udp(ctx, global_ip, global_port, local_ip, local_port, nat_type, twice_n ctx.fail("Same Twice nat id is not allowed for more than 2 entries!!") if nat_type is not None and twice_nat_id is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type, dataKey4: twice_nat_id}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type, dataKey4: twice_nat_id}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) elif nat_type is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) elif twice_nat_id is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey4: twice_nat_id}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey4: twice_nat_id}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat remove static' group ('config nat remove static ...') @@ -489,15 +530,16 @@ def static(): @click.argument('local_ip', metavar='', required=True) def remove_basic(ctx, global_ip, local_ip): """Remove Static NAT-related configutation""" + + if ADHOC_VALIDATION: + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - # Verify the ip address format - if is_valid_ipv4_address(local_ip) is False: - ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - - if is_valid_ipv4_address(global_ip) is False: - ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() entryFound = False @@ -508,8 +550,11 @@ def remove_basic(ctx, global_ip, local_ip): data = config_db.get_entry(table, key) if data: if data[dataKey] == local_ip: - config_db.set_entry(table, key, None) - entryFound = True + try: + config_db.set_entry(table, key, None) + entryFound = True + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) if entryFound is False: click.echo("Trying to delete static nat entry, which is not present.") @@ -526,15 +571,16 @@ def remove_basic(ctx, global_ip, local_ip): @click.argument('local_port', metavar='', type=click.IntRange(1, 65535), required=True) def remove_tcp(ctx, global_ip, global_port, local_ip, local_port): """Remove Static TCP Protocol NAPT-related configutation""" + + if ADHOC_VALIDATION: + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - # Verify the ip address format - if is_valid_ipv4_address(local_ip) is False: - ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - - if is_valid_ipv4_address(global_ip) is False: - ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() entryFound = False @@ -544,8 +590,11 @@ def remove_tcp(ctx, global_ip, global_port, local_ip, local_port): data = config_db.get_entry(table, key) if data: if data['local_ip'] == local_ip and data['local_port'] == str(local_port): - config_db.set_entry(table, key, None) - entryFound = True + try: + config_db.set_entry(table, key, None) + entryFound = True + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) if entryFound is False: click.echo("Trying to delete static napt entry, which is not present.") @@ -561,15 +610,16 @@ def remove_tcp(ctx, global_ip, global_port, local_ip, local_port): @click.argument('local_port', metavar='', type=click.IntRange(1, 65535), required=True) def remove_udp(ctx, global_ip, global_port, local_ip, local_port): """Remove Static UDP Protocol NAPT-related configutation""" + + if ADHOC_VALIDATION: + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - # Verify the ip address format - if is_valid_ipv4_address(local_ip) is False: - ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - - if is_valid_ipv4_address(global_ip) is False: - ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() entryFound = False @@ -581,8 +631,11 @@ def remove_udp(ctx, global_ip, global_port, local_ip, local_port): data = config_db.get_entry(table, key) if data: if data[dataKey1] == local_ip and data[dataKey2] == str(local_port): - config_db.set_entry(table, key, None) - entryFound = True + try: + config_db.set_entry(table, key, None) + entryFound = True + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) if entryFound is False: click.echo("Trying to delete static napt entry, which is not present.") @@ -595,7 +648,7 @@ def remove_udp(ctx, global_ip, global_port, local_ip, local_port): def remove_static_all(ctx): """Remove all Static related configutation""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() tables = ['STATIC_NAT', 'STATIC_NAPT'] @@ -604,7 +657,10 @@ def remove_static_all(ctx): table_dict = config_db.get_table(table_name) if table_dict: for table_key_name in table_dict: - config_db.set_entry(table_name, table_key_name, None) + try: + config_db.set_entry(table_name, table_key_name, None) + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat add pool' command ('config nat add pool ') @@ -664,7 +720,7 @@ def add_pool(ctx, pool_name, global_ip_range, global_port_range): else: global_port_range = "NULL" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() entryFound = False @@ -711,7 +767,10 @@ def add_pool(ctx, pool_name, global_ip_range, global_port_range): ctx.fail("Given Ip address entry is overlapping with existing Static NAT entry !!") if entryFound == False: - config_db.set_entry(table, key, {dataKey1: global_ip_range, dataKey2 : global_port_range}) + try: + config_db.set_entry(table, key, {dataKey1: global_ip_range, dataKey2 : global_port_range}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat add binding' command ('config nat add binding ') @@ -740,7 +799,7 @@ def add_binding(ctx, binding_name, pool_name, acl_name, nat_type, twice_nat_id): if len(binding_name) > 32: ctx.fail("Invalid binding name. Maximum allowed binding name is 32 characters !!") - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() data = config_db.get_entry(table, key) @@ -773,7 +832,10 @@ def add_binding(ctx, binding_name, pool_name, acl_name, nat_type, twice_nat_id): if count > 1: ctx.fail("Same Twice nat id is not allowed for more than 2 entries!!") - config_db.set_entry(table, key, {dataKey1: acl_name, dataKey2: pool_name, dataKey3: nat_type, dataKey4: twice_nat_id}) + try: + config_db.set_entry(table, key, {dataKey1: acl_name, dataKey2: pool_name, dataKey3: nat_type, dataKey4: twice_nat_id}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat remove pool' command ('config nat remove pool ') @@ -791,7 +853,7 @@ def remove_pool(ctx, pool_name): if len(pool_name) > 32: ctx.fail("Invalid pool name. Maximum allowed pool name is 32 characters !!") - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() data = config_db.get_entry(table, key) @@ -808,7 +870,10 @@ def remove_pool(ctx, pool_name): break if entryFound == False: - config_db.set_entry(table, key, None) + try: + config_db.set_entry(table, key, None) + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat remove pools' command ('config nat remove pools') @@ -818,7 +883,7 @@ def remove_pool(ctx, pool_name): def remove_pools(ctx): """Remove all Pools for Dynamic configutation""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() entryFound = False @@ -835,8 +900,11 @@ def remove_pools(ctx): entryFound = True break - if entryFound == False: - config_db.set_entry(pool_table_name, pool_key_name, None) + if entryFound == False: + try: + config_db.set_entry(pool_table_name, pool_key_name, None) + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat remove binding' command ('config nat remove binding ') @@ -854,7 +922,7 @@ def remove_binding(ctx, binding_name): if len(binding_name) > 32: ctx.fail("Invalid binding name. Maximum allowed binding name is 32 characters !!") - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() data = config_db.get_entry(table, key) @@ -863,7 +931,10 @@ def remove_binding(ctx, binding_name): entryFound = True if entryFound == False: - config_db.set_entry(table, key, None) + try: + config_db.set_entry(table, key, None) + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat remove bindings' command ('config nat remove bindings') @@ -873,14 +944,17 @@ def remove_binding(ctx, binding_name): def remove_bindings(ctx): """Remove all Bindings for Dynamic configutation""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigBConnector(ConfigDBConnector()) config_db.connect() binding_table_name = 'NAT_BINDINGS' binding_dict = config_db.get_table(binding_table_name) if binding_dict: for binding_key_name in binding_dict: - config_db.set_entry(binding_table_name, binding_key_name, None) + try: + config_db.set_entry(binding_table_name, binding_key_name, None) + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat add interface' command ('config nat add interface -nat_zone ') @@ -892,7 +966,7 @@ def remove_bindings(ctx): def add_interface(ctx, interface_name, nat_zone): """Add interface related nat configuration""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() if nat_interface_name_is_valid(interface_name) is False: @@ -912,7 +986,10 @@ def add_interface(ctx, interface_name, nat_zone): if not interface_table_dict or interface_name not in interface_table_dict: ctx.fail("Interface table is not present. Please configure ip-address on {} and apply the nat zone !!".format(interface_name)) - config_db.mod_entry(interface_table_type, interface_name, {"nat_zone": nat_zone}) + try: + config_db.mod_entry(interface_table_type, interface_name, {"nat_zone": nat_zone}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat remove interface' command ('config nat remove interface ') @@ -922,7 +999,7 @@ def add_interface(ctx, interface_name, nat_zone): @click.argument('interface_name', metavar='', required=True) def remove_interface(ctx, interface_name): """Remove interface related NAT configuration""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() if nat_interface_name_is_valid(interface_name) is False: @@ -942,7 +1019,10 @@ def remove_interface(ctx, interface_name): if not interface_table_dict or interface_name not in interface_table_dict: ctx.fail("Interface table is not present. Ignoring the nat zone configuration") - config_db.mod_entry(interface_table_type, interface_name, {"nat_zone": "0"}) + try: + config_db.mod_entry(interface_table_type, interface_name, {"nat_zone": "0"}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat remove interfaces' command ('config nat remove interfaces') @@ -951,7 +1031,7 @@ def remove_interface(ctx, interface_name): @click.pass_context def remove_interfaces(ctx): """Remove all interface related NAT configuration""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE'] @@ -964,7 +1044,10 @@ def remove_interfaces(ctx): if isinstance(table_key_name, str) is False: continue - config_db.set_entry(table_name, table_key_name, nat_config) + try: + config_db.set_entry(table_name, table_key_name, nat_config) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat feature' group ('config nat feature ') @@ -982,9 +1065,12 @@ def feature(): def enable(ctx): """Enbale the NAT feature """ - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - config_db.mod_entry("NAT_GLOBAL", "Values", {"admin_mode": "enabled"}) + try: + config_db.mod_entry("NAT_GLOBAL", "Values", {"admin_mode": "enabled"}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat feature disable' command ('config nat feature disable>') @@ -993,9 +1079,12 @@ def enable(ctx): @click.pass_context def disable(ctx): """Disable the NAT feature """ - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - config_db.mod_entry("NAT_GLOBAL", "Values", {"admin_mode": "disabled"}) + try: + config_db.mod_entry("NAT_GLOBAL", "Values", {"admin_mode": "disabled"}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat set timeout' command ('config nat set timeout ') @@ -1005,10 +1094,13 @@ def disable(ctx): @click.argument('seconds', metavar='', type=click.IntRange(300, 432000), required=True) def timeout(ctx, seconds): """Set NAT timeout configuration""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - - config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_timeout": seconds}) + + try: + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_timeout": seconds}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat set tcp-timeout' command ('config nat set tcp-timeout ') @@ -1018,10 +1110,13 @@ def timeout(ctx, seconds): @click.argument('seconds', metavar='', type=click.IntRange(300, 432000), required=True) def tcp_timeout(ctx, seconds): """Set NAT TCP timeout configuration""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - - config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_tcp_timeout": seconds}) + + try: + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_tcp_timeout": seconds}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat set udp-timeout' command ('config nat set udp-timeout ') @@ -1031,10 +1126,13 @@ def tcp_timeout(ctx, seconds): @click.argument('seconds', metavar='', type=click.IntRange(120, 600), required=True) def udp_timeout(ctx, seconds): """Set NAT UDP timeout configuration""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_udp_timeout": seconds}) + try: + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_udp_timeout": seconds}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat reset timeout' command ('config nat reset timeout') @@ -1043,11 +1141,14 @@ def udp_timeout(ctx, seconds): @click.pass_context def timeout(ctx): """Reset NAT timeout configuration to default value (600 seconds)""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() seconds = 600 - - config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_timeout": seconds}) + + try: + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_timeout": seconds}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat reset tcp-timeout' command ('config nat reset tcp-timeout') @@ -1056,11 +1157,14 @@ def timeout(ctx): @click.pass_context def tcp_timeout(ctx): """Reset NAT TCP timeout configuration to default value (86400 seconds)""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() seconds = 86400 - - config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_tcp_timeout": seconds}) + + try: + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_tcp_timeout": seconds}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat reset udp-timeout' command ('config nat reset udp-timeout') @@ -1069,8 +1173,11 @@ def tcp_timeout(ctx): @click.pass_context def udp_timeout(ctx): """Reset NAT UDP timeout configuration to default value (300 seconds)""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() seconds = 300 - - config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_udp_timeout": seconds}) + + try: + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_udp_timeout": seconds}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) diff --git a/tests/config_test.py b/tests/config_test.py index 4ebc14cd14..c1bb86fe40 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -8,6 +8,7 @@ import unittest import ipaddress from unittest import mock +from jsonpatch import JsonPatchConflict import click from click.testing import CliRunner @@ -1873,3 +1874,64 @@ def test_add_loopback_adhoc_validation(self): @classmethod def teardown_class(cls): print("TEARDOWN") + + +class TestConfigNtp(object): + @classmethod + def setup_class(cls): + print("SETUP") + import config.main + importlib.reload(config.main) + + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + def test_add_ntp_server_failed_yang_validation(self): + config.ADHOC_VALIDATION = False + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + result = runner.invoke(config.config.commands["ntp"], ["add", "10.10.10.x"], obj=obj) + print(result.exit_code) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + def test_add_ntp_server_invalid_ip(self): + config.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + result = runner.invoke(config.config.commands["ntp"], ["add", "10.10.10.x"], obj=obj) + print(result.exit_code) + print(result.output) + assert "Invalid IP address" in result.output + + def test_del_ntp_server_invalid_ip(self): + config.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + result = runner.invoke(config.config.commands["ntp"], ["del", "10.10.10.x"], obj=obj) + print(result.exit_code) + print(result.output) + assert "Invalid IP address" in result.output + + @patch("config.main.ConfigDBConnector.get_table", mock.Mock(return_value="10.10.10.10")) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + def test_del_ntp_server_invalid_ip_yang_validation(self): + config.ADHOC_VALIDATION = False + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + result = runner.invoke(config.config.commands["ntp"], ["del", "10.10.10.10"], obj=obj) + print(result.exit_code) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + @classmethod + def teardown_class(cls): + print("TEARDOWN") diff --git a/tests/console_test.py b/tests/console_test.py index 8161eda7dd..528f5f4ba8 100644 --- a/tests/console_test.py +++ b/tests/console_test.py @@ -1,8 +1,10 @@ import os import sys import subprocess +import jsonpatch import pexpect from unittest import mock +from mock import patch import pytest @@ -14,6 +16,7 @@ from utilities_common.db import Db from consutil.lib import * from sonic_py_common import device_info +from jsonpatch import JsonPatchConflict class TestConfigConsoleCommands(object): @classmethod @@ -28,6 +31,16 @@ def test_enable_console_switch(self): print(result.exit_code) print(sys.stderr, result.output) assert result.exit_code == 0 + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_enable_console_switch_yang_validation(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(config.config.commands["console"].commands["enable"]) + print(result.exit_code) + assert "Invalid ConfigDB. Error" in result.output def test_disable_console_switch(self): runner = CliRunner() @@ -38,6 +51,17 @@ def test_disable_console_switch(self): print(sys.stderr, result.output) assert result.exit_code == 0 + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_disable_console_switch_yang_validation(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(config.config.commands["console"].commands["disable"]) + print(result.exit_code) + print(sys.stderr, result.output) + assert "Invalid ConfigDB. Error" in result.output + def test_console_add_exists(self): runner = CliRunner() db = Db() @@ -95,6 +119,18 @@ def test_console_add_success(self): print(sys.stderr, result.output) assert result.exit_code == 0 + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) + def test_console_add_yang_validation(self): + runner = CliRunner() + db = Db() + + # add a console setting without flow control option + result = runner.invoke(config.config.commands["console"].commands["add"], ["0", '--baud', "9600"], obj=db) + print(result.exit_code) + print(sys.stderr, result.output) + assert "Invalid ConfigDB. Error" in result.output + def test_console_del_non_exists(self): runner = CliRunner() db = Db() @@ -117,6 +153,19 @@ def test_console_del_success(self): print(sys.stderr, result.output) assert result.exit_code == 0 + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + def test_console_del_yang_validation(self): + runner = CliRunner() + db = Db() + db.cfgdb.set_entry("CONSOLE_PORT", "1", { "baud_rate" : "9600" }) + + # add a console setting which the port exists + result = runner.invoke(config.config.commands["console"].commands["del"], ["1"], obj=db) + print(result.exit_code) + print(sys.stderr, result.output) + assert "Invalid ConfigDB. Error" in result.output + def test_update_console_remote_device_name_non_exists(self): runner = CliRunner() db = Db() @@ -163,6 +212,19 @@ def test_update_console_remote_device_name_reset(self): print(sys.stderr, result.output) assert result.exit_code == 0 + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_update_console_remote_device_name_reset_yang_validation(self): + runner = CliRunner() + db = Db() + db.cfgdb.set_entry("CONSOLE_PORT", 2, { "remote_device" : "switch1" }) + + # trying to reset a console line remote device configuration which is not exists + result = runner.invoke(config.config.commands["console"].commands["remote_device"], ["2"], obj=db) + print(result.exit_code) + print(sys.stderr, result.output) + assert "Invalid ConfigDB. Error" in result.output + def test_update_console_remote_device_name_success(self): runner = CliRunner() db = Db() @@ -174,6 +236,19 @@ def test_update_console_remote_device_name_success(self): print(sys.stderr, result.output) assert result.exit_code == 0 + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_update_console_remote_device_name_yang_validation(self): + runner = CliRunner() + db = Db() + db.cfgdb.set_entry("CONSOLE_PORT", "1", { "baud_rate" : "9600" }) + + # trying to set a console line remote device configuration + result = runner.invoke(config.config.commands["console"].commands["remote_device"], ["1", "switch1"], obj=db) + print(result.exit_code) + print(sys.stderr, result.output) + assert "Invalid ConfigDB. Error" in result.output + def test_update_console_baud_no_change(self): runner = CliRunner() db = Db() @@ -207,6 +282,19 @@ def test_update_console_baud_success(self): print(sys.stderr, result.output) assert result.exit_code == 0 + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_update_console_baud_yang_validation(self): + runner = CliRunner() + db = Db() + db.cfgdb.set_entry("CONSOLE_PORT", "1", { "baud_rate" : "9600" }) + + # trying to set a console line baud + result = runner.invoke(config.config.commands["console"].commands["baud"], ["1", "115200"], obj=db) + print(result.exit_code) + print(sys.stderr, result.output) + assert "Invalid ConfigDB. Error" in result.output + def test_update_console_flow_control_no_change(self): runner = CliRunner() db = Db() @@ -240,6 +328,19 @@ def test_update_console_flow_control_success(self): print(sys.stderr, result.output) assert result.exit_code == 0 + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_update_console_flow_control_yang_validation(self): + runner = CliRunner() + db = Db() + db.cfgdb.set_entry("CONSOLE_PORT", "1", { "baud_rate" : "9600", "flow_control" : "0" }) + + # trying to set a console line flow control option + result = runner.invoke(config.config.commands["console"].commands["flow_control"], ["enable", "1"], obj=db) + print(result.exit_code) + print(sys.stderr, result.output) + assert "Invalid ConfigDB. Error" in result.output + class TestConsutilLib(object): @classmethod def setup_class(cls): diff --git a/tests/kube_test.py b/tests/kube_test.py index e49a2a55f8..5b51049e7b 100644 --- a/tests/kube_test.py +++ b/tests/kube_test.py @@ -1,5 +1,8 @@ +import mock + from click.testing import CliRunner from utilities_common.db import Db +from mock import patch show_no_server_output="""\ Kubernetes server is not configured @@ -110,8 +113,30 @@ def test_no_kube_server(self, get_cmd_module): result = runner.invoke(show.cli.commands["kubernetes"].commands["server"].commands["config"], [], obj=db) self.__check_res(result, "config command default value", show_server_output_5) + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_no_kube_server_yang_validation(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + db.cfgdb.delete_table("KUBERNETES_MASTER") + # Check server not configured + result = runner.invoke(show.cli.commands["kubernetes"].commands["server"].commands["config"], [], obj=db) + self.__check_res(result, "null server config test", show_no_server_output) + + # Add IP when not configured + result = runner.invoke(config.config.commands["kubernetes"].commands["server"], ["ip", "10.10.10.11"], obj=db) + assert "Invalid ConfigDB. Error" in result.output + + db.cfgdb.mod_entry("KUBERNETES_MASTER", "SERVER", {"ip": "10.10.10.11"}) + # Add IP when already configured + result = runner.invoke(config.config.commands["kubernetes"].commands["server"], ["ip", "10.10.10.12"], obj=db) + assert "Invalid ConfigDB. Error" in result.output + + def test_only_kube_server(self, get_cmd_module): (config, show) = get_cmd_module runner = CliRunner() diff --git a/tests/mclag_test.py b/tests/mclag_test.py index a653174000..2401978e97 100644 --- a/tests/mclag_test.py +++ b/tests/mclag_test.py @@ -1,14 +1,19 @@ import os import traceback +import mock +import jsonpatch from click.testing import CliRunner import config.main as config +import config.mclag as mclag import show.main as show from utilities_common.db import Db - +from mock import patch +from jsonpatch import JsonPatchConflict MCLAG_DOMAIN_ID = "123" +MCLAG_NONEXISTENT_DOMAIN_ID = "234" MCLAG_INVALID_DOMAIN_ID1 = "-1" MCLAG_INVALID_DOMAIN_ID2 = "5000" MCLAG_DOMAIN_ID2 = "500" @@ -87,6 +92,7 @@ def verify_mclag_interface(self, db, domain_id, intf_str): return False def test_add_mclag_with_invalid_src_ip(self): + mclag.ADHOC_VALIDATION = True runner = CliRunner() db = Db() obj = {'db':db.cfgdb} @@ -223,9 +229,33 @@ def test_add_invalid_mclag_domain(self): result = runner.invoke(config.config.commands["mclag"].commands["add"], [5000, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_PEER_LINK], obj=obj) assert result.exit_code != 0, "mclag invalid domain test case with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) + def test_add_mclag_domain_invalid_yang_validation(self): + mclag.ADHOC_VALIDATION = False + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + # add invalid mclag domain + result = runner.invoke(config.config.commands["mclag"].commands["add"], [MCLAG_DOMAIN_ID, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_INVALID_PEER_LINK4], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + @patch("config.main.ConfigDBConnector.get_table", mock.Mock(return_value={"123": "xyz"})) + def test_add_mclag_domain_invalid_yang_validation_override(self): + mclag.ADHOC_VALIDATION = False + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + # add invalid mclag domain + result = runner.invoke(config.config.commands["mclag"].commands["add"], [MCLAG_DOMAIN_ID, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_INVALID_PEER_LINK4], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + def test_add_mclag_domain(self): + mclag.ADHOC_VALIDATION = True runner = CliRunner() db = Db() obj = {'db':db.cfgdb} @@ -378,10 +408,29 @@ def test_mclag_add_invalid_member(self): result = runner.invoke(config.config.commands["mclag"].commands["member"].commands["add"], [MCLAG_DOMAIN_ID, MCLAG_INVALID_PORTCHANNEL4], obj=obj) assert result.exit_code != 0, "mclag invalid member add case failed with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) + + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) + def test_mclag_add_invalid_member_yang_validation(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + mclag.ADHOC_VALIDATION = False + + # add valid mclag domain + db.cfgdb.set_entry("MCLAG_DOMAIN", MCLAG_DOMAIN_ID, {"source_ip": MCLAG_SRC_IP, "peer_ip": MCLAG_PEER_IP, "peer_link": MCLAG_PEER_LINK}) + + with mock.patch('validated_config_db_connector.device_info.is_yang_config_validation_enabled', mock.Mock(return_value=True)): + result = runner.invoke(config.config.commands["mclag"].commands["member"].commands["add"], [MCLAG_DOMAIN_ID, MCLAG_INVALID_MCLAG_MEMBER], obj=obj) + print(result.exit_code) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + def test_mclag_add_member(self): runner = CliRunner() db = Db() obj = {'db':db.cfgdb} + mclag.ADHOC_VALIDATION = True # add valid mclag domain @@ -447,6 +496,29 @@ def test_mclag_add_member(self): assert result.exit_code != 0, "mclag invalid member del case failed with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) + def test_mclag_add__unique_ip_yang_validation(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + db.cfgdb.set_entry("MCLAG_DOMAIN", MCLAG_DOMAIN_ID, {"source_ip": MCLAG_SRC_IP}) + + with mock.patch('validated_config_db_connector.device_info.is_yang_config_validation_enabled', return_value=True): + result = runner.invoke(config.config.commands["mclag"].commands["unique-ip"].commands["add"], [MCLAG_UNIQUE_IP_VLAN], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + def test_mclag_del_unique_ip_yang_validation(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + db.cfgdb.set_entry("MCLAG_DOMAIN", MCLAG_DOMAIN_ID, {"source_ip": MCLAG_SRC_IP}) + + with mock.patch('validated_config_db_connector.device_info.is_yang_config_validation_enabled', return_value=True): + result = runner.invoke(config.config.commands["mclag"].commands["unique-ip"].commands["del"], [MCLAG_UNIQUE_IP_VLAN], obj=obj) + assert "Failed to delete mclag unique IP" in result.output + def test_mclag_add_unique_ip(self, mock_restart_dhcp_relay_service): runner = CliRunner() @@ -544,12 +616,18 @@ def test_add_mclag_with_invalid_domain_id(self): result = runner.invoke(config.config.commands["mclag"].commands["add"], [MCLAG_INVALID_DOMAIN_ID2, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_PEER_LINK], obj=obj) assert result.exit_code != 0, "mclag invalid src ip test caase with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) - + def test_del_mclag_with_invalid_domain_id(self): + mclag.ADHOC_VALIDATION = True runner = CliRunner() db = Db() obj = {'db':db.cfgdb} + with mock.patch('config.main.ConfigDBConnector.get_entry', return_value=None): + # del mclag nonexistent domain_id + result = runner.invoke(config.config.commands["mclag"].commands["del"], [MCLAG_NONEXISTENT_DOMAIN_ID], obj=obj) + assert result.exit_code != 0, "mclag invalid domain id test case with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) + # del mclag with invalid domain_id result = runner.invoke(config.config.commands["mclag"].commands["del"], [MCLAG_INVALID_DOMAIN_ID1], obj=obj) assert result.exit_code != 0, "mclag invalid domain id test case with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) @@ -557,10 +635,10 @@ def test_del_mclag_with_invalid_domain_id(self): result = runner.invoke(config.config.commands["mclag"].commands["del"], [MCLAG_INVALID_DOMAIN_ID2], obj=obj) assert result.exit_code != 0, "mclag invalid domain id test case with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) result = runner.invoke(config.config.commands["mclag"].commands["del"], [MCLAG_DOMAIN_ID3], obj=obj) + print(result.output) assert result.exit_code == 0, "mclag invalid domain id test case with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) - def test_modify_mclag_domain(self): runner = CliRunner() db = Db() @@ -568,15 +646,14 @@ def test_modify_mclag_domain(self): # add mclag domain entry in db db.cfgdb.set_entry("MCLAG_DOMAIN", MCLAG_DOMAIN_ID, {"source_ip": MCLAG_SRC_IP}) - result = runner.invoke(config.config.commands["mclag"].commands["add"], [MCLAG_DOMAIN_ID, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_PEER_LINK], obj=obj) - assert result.exit_code != 0, "mclag add domain peer ip test caase with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) + assert result.exit_code == 0, "mclag add domain peer ip test caase with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) assert self.verify_mclag_domain_cfg(db, MCLAG_DOMAIN_ID, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_PEER_LINK) == True, "mclag config not found" - + print(result.output) # modify mclag config - result = runner.invoke(config.config.commands["mclag"].commands["add"], [MCLAG_DOMAIN_ID, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_PEER_LINK], obj=obj) - assert result.exit_code != 0, "test_mclag_domain_add_again with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) + result = runner.invoke(config.config.commands["mclag"].commands["add"], [MCLAG_DOMAIN_ID, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_PEER_LINK2], obj=obj) + assert result.exit_code == 0, "test_mclag_domain_add_again with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) assert self.verify_mclag_domain_cfg(db, MCLAG_DOMAIN_ID, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_PEER_LINK2) == True, "mclag config not modified" @@ -590,6 +667,7 @@ def test_add_mclag_domain_no_peer_link(self): assert result.exit_code != 0, "mclag add domain peer ip test caase with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) assert self.verify_mclag_domain_cfg(db, MCLAG_DOMAIN_ID, MCLAG_SRC_IP, MCLAG_PEER_IP) == False, "mclag config not found" + def test_del_mclag_domain_with_members(self): runner = CliRunner() db = Db() @@ -617,11 +695,45 @@ def test_del_mclag_domain_with_members(self): assert self.verify_mclag_interface(db, MCLAG_DOMAIN_ID, MCLAG_MEMBER_PO) == False, "mclag member not deleted" assert self.verify_mclag_domain_cfg(db, MCLAG_DOMAIN_ID) == False, "mclag domain not present" + + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + def test_del_mclag_domain_with_members_invalid_yang_validation(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + mclag.ADHOC_VALIDATION = False + + db.cfgdb.set_entry("MCLAG_DOMAIN", MCLAG_DOMAIN_ID, {"source_ip": MCLAG_SRC_IP, "peer_ip": MCLAG_PEER_IP, "peer_link": MCLAG_PEER_LINK}) + db.cfgdb.set_entry('MCLAG_INTERFACE', (MCLAG_DOMAIN_ID, MCLAG_MEMBER_PO), {'if_type':"PortChannel"} ) + db.cfgdb.set_entry('MCLAG_INTERFACE', (MCLAG_DOMAIN_ID, MCLAG_MEMBER_PO2), {'if_type':"PortChannel"} ) + + with mock.patch('validated_config_db_connector.device_info.is_yang_config_validation_enabled', return_value=True): + result = runner.invoke(config.config.commands["mclag"].commands["member"].commands["del"], [MCLAG_DOMAIN_ID, MCLAG_MEMBER_PO2], obj=obj) + assert "Failed to delete mclag member" in result.output + + with mock.patch('validated_config_db_connector.device_info.is_yang_config_validation_enabled', return_value=True): + result = runner.invoke(config.config.commands["mclag"].commands["del"], [MCLAG_DOMAIN_ID], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + def test_del_mclag_domain_invalid_yang_validation(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + mclag.ADHOC_VALIDATION = False + + db.cfgdb.set_entry("MCLAG_DOMAIN", MCLAG_DOMAIN_ID, {"source_ip": MCLAG_SRC_IP, "peer_ip": MCLAG_PEER_IP, "peer_link": MCLAG_PEER_LINK}) + with mock.patch('validated_config_db_connector.device_info.is_yang_config_validation_enabled', return_value=True): + result = runner.invoke(config.config.commands["mclag"].commands["del"], [MCLAG_DOMAIN_ID], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + def test_mclag_keepalive_for_non_existent_domain(self): runner = CliRunner() db = Db() obj = {'db':db.cfgdb} + mclag.ADHOC_VALIDATION = True # configure keepalive timer for non-existing domain result = runner.invoke(config.config.commands["mclag"].commands["keepalive-interval"], [MCLAG_DOMAIN_ID, MCLAG_INVALID_KEEPALIVE_TIMER], obj=obj) diff --git a/tests/nat_test.py b/tests/nat_test.py new file mode 100644 index 0000000000..e37f13bc71 --- /dev/null +++ b/tests/nat_test.py @@ -0,0 +1,267 @@ +import mock + +from click.testing import CliRunner +from utilities_common.db import Db +from mock import patch +from jsonpatch import JsonPatchConflict +import config.main as config +import config.nat as nat +import config.validated_config_db_connector as validated_config_db_connector + +class TestNat(object): + @classmethod + def setup_class(cls): + print("SETUP") + + def test_add_basic_invalid(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["basic", "65.66.45.1", "12.12.12.14x", "-nat_type", "dnat"], obj=obj) + assert "Please enter a valid local ip address" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["basic", "65.66.45.1x", "12.12.12.14", "-nat_type", "dnat"], obj=obj) + assert "Please enter a valid global ip address" in result.output + + @patch("config.nat.SonicV2Connector.get_all", mock.Mock(return_value={"MAX_NAT_ENTRIES": "9999"})) + @patch("config.nat.SonicV2Connector.exists", mock.Mock(return_value="True")) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) + def test_add_basic_yang_validation(self): + nat.ADHOC_VALIDATION = False + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["basic", "65.66.45.1", "12.12.12.14", "-nat_type", "dnat", "-twice_nat_id", "3"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["basic", "65.66.45.1", "12.12.12.14", "-nat_type", "dnat"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["basic", "65.66.45.1", "12.12.12.14", "-twice_nat_id", "3"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["basic", "65.66.45.1", "12.12.12.14"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + def test_add_tcp_invalid(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["tcp", "65.66.45.1", "100", "12.12.12.14x", "200", "-nat_type", "dnat"], obj=obj) + assert "Please enter a valid local ip address" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["tcp", "65.66.45.1x", "100", "12.12.12.14", "200", "-nat_type", "dnat"], obj=obj) + assert "Please enter a valid global ip address" in result.output + + @patch("config.nat.SonicV2Connector.get_all", mock.Mock(return_value={"MAX_NAT_ENTRIES": "9999"})) + @patch("config.nat.SonicV2Connector.exists", mock.Mock(return_value="True")) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) + def test_add_tcp_yang_validation(self): + nat.ADHOC_VALIDATION = False + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["tcp", "65.66.45.1", "100", "12.12.12.14", "200", "-nat_type", "dnat", "-twice_nat_id", "3"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["tcp", "65.66.45.1", "100", "12.12.12.14", "200", "-nat_type", "dnat"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["tcp", "65.66.45.1", "100", "12.12.12.14", "200", "-twice_nat_id", "3"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["tcp", "65.66.45.1", "100", "12.12.12.14", "200"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + def test_add_udp_invalid(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["udp", "65.66.45.1", "100", "12.12.12.14x", "200", "-nat_type", "dnat"], obj=obj) + assert "Please enter a valid local ip address" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["udp", "65.66.45.1x", "100", "12.12.12.14", "200", "-nat_type", "dnat"], obj=obj) + assert "Please enter a valid global ip address" in result.output + + @patch("config.nat.SonicV2Connector.get_all", mock.Mock(return_value={"MAX_NAT_ENTRIES": "9999"})) + @patch("config.nat.SonicV2Connector.exists", mock.Mock(return_value="True")) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) + def test_add_udp_yang_validation(self): + nat.ADHOC_VALIDATION = False + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["udp", "65.66.45.1", "100", "12.12.12.14", "200", "-nat_type", "dnat", "-twice_nat_id", "3"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["udp", "65.66.45.1", "100", "12.12.12.14", "200", "-nat_type", "dnat"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["udp", "65.66.45.1", "100", "12.12.12.14", "200", "-twice_nat_id", "3"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["udp", "65.66.45.1", "100", "12.12.12.14", "200"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + def test_remove_basic(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["remove"].commands["static"].commands["basic"], ["65.66.45.1", "12.12.12.14x"], obj=obj) + assert "Please enter a valid local ip address" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["remove"].commands["static"].commands["basic"], ["65.66.45.1x", "12.12.12.14"], obj=obj) + assert "Please enter a valid global ip address" in result.output + + @patch("config.nat.ConfigDBConnector.get_entry", mock.Mock(return_value={"local_ip": "12.12.12.14"})) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + def test_remove_basic_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["remove"].commands["static"].commands["basic"], ["65.66.45.1", "12.12.12.14"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + def test_remove_udp(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["remove"].commands["static"].commands["udp"], ["65.66.45.1", "100", "12.12.12.14x", "200"], obj=obj) + assert "Please enter a valid local ip address" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["remove"].commands["static"].commands["udp"], ["65.66.45.1x", "100", "12.12.12.14", "200"], obj=obj) + assert "Please enter a valid global ip address" in result.output + + @patch("config.nat.ConfigDBConnector.get_entry", mock.Mock(return_value={"local_ip": "12.12.12.14", "local_port": "200"})) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + def test_remove_udp_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["remove"].commands["static"].commands["udp"], ["65.66.45.1", "100", "12.12.12.14", "200"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("config.nat.ConfigDBConnector.get_table", mock.Mock(return_value={"sample_table_key": "sample_table_value"})) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + def test_remove_static_all_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["remove"].commands["static"].commands["all"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_enable_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["feature"].commands["enable"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_disable_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["feature"].commands["disable"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_timeout_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["set"].commands["timeout"], ["301"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_tcp_timeout_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["set"].commands["tcp-timeout"], ["301"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_udp_timeout_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["set"].commands["udp-timeout"], ["301"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_reset_timeout_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["reset"].commands["timeout"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_reset_tcp_timeout_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["reset"].commands["tcp-timeout"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_reset_udp_timeout_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["reset"].commands["udp-timeout"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output diff --git a/tests/sflow_test.py b/tests/sflow_test.py index 226e52ae5e..da03ff396e 100644 --- a/tests/sflow_test.py +++ b/tests/sflow_test.py @@ -3,6 +3,7 @@ import pytest from unittest import mock +from jsonpatch import JsonPatchConflict from click.testing import CliRunner from utilities_common.db import Db from mock import patch @@ -193,6 +194,25 @@ def test_config_sflow_collector(self): assert result.output == show_sflow_output return + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + def test_config_sflow_collector_invalid_yang_validation(self): + db = Db() + runner = CliRunner() + obj = {'db':db.cfgdb} + + config.ADHOC_VALIDTION = False + result = runner.invoke(config.config.commands["sflow"]. + commands["collector"].commands["del"], ["prod"], obj=obj) + print(result.exit_code, result.output) + assert "Invalid ConfigDB. Error" in result.output + + result = runner.invoke(config.config.commands["sflow"]. + commands["collector"].commands["add"], + ["prod", "fe80::6e82:6aff:fe1e:cd8e", "--vrf", "mgmt"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) From d1cb91572952008dc15249428711324f0cfa3b25 Mon Sep 17 00:00:00 2001 From: Saikrishna Arcot Date: Thu, 6 Apr 2023 15:35:12 -0700 Subject: [PATCH 57/66] Use sonic-swss-common artifacts from the matching source branch (#2783) Signed-off-by: Saikrishna Arcot --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 4a00e67440..eecf1c9e53 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -71,7 +71,7 @@ stages: pipeline: 9 artifact: sonic-swss-common runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' + runBranch: 'refs/heads/$(sourceBranch)' displayName: "Download sonic swss common deb packages" - script: | From adeac253a84b5ee857131a7a54e68685cdf78836 Mon Sep 17 00:00:00 2001 From: dbarashinvd <105214075+dbarashinvd@users.noreply.github.com> Date: Sat, 8 Apr 2023 00:12:02 +0300 Subject: [PATCH 58/66] fix radius error on unicode name error due to python3 (#2751) fixes https://github.com/sonic-net/sonic-buildimage/issues/14356 #### What I did fix radius command python config file for commands that fails with tracebacks due to NameError: name 'unicode' is not define #### How I did it delete any unicode from python config file #### How to verify it run the two radius commands: ``` config radius nasip 1.1.1.1 config radius sourceip 2000::1 ``` and check show radius output ``` show radius ``` #### Previous command output (if the output of a command-line utility has changed) ``` root@qa-eth-vt03-1-4600ca1:/home/admin# config radius nasip 1.1.1.1 Traceback (most recent call last): File "/usr/local/bin/config", line 8, in sys.exit(config()) File "/usr/local/lib/python3.9/dist-packages/click/core.py", line 764, in call return self.main(*args, **kwargs) File "/usr/local/lib/python3.9/dist-packages/click/core.py", line 717, in main rv = self.invoke(ctx) File "/usr/local/lib/python3.9/dist-packages/click/core.py", line 1137, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/usr/local/lib/python3.9/dist-packages/click/core.py", line 1137, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/usr/local/lib/python3.9/dist-packages/click/core.py", line 956, in invoke return ctx.invoke(self.callback, **ctx.params) File "/usr/local/lib/python3.9/dist-packages/click/core.py", line 555, in invoke return callback(*args, **kwargs) File "/usr/local/lib/python3.9/dist-packages/click/decorators.py", line 17, in new_func return f(get_current_context(), *args, **kwargs) File "/usr/local/lib/python3.9/dist-packages/config/aaa.py", line 448, in nasip v6_invalid_list = [ipaddress.IPv6Address(unicode('0::0')), ipaddress.IPv6Address(unicode('0::1'))] NameError: name 'unicode' is not defined root@qa-eth-vt03-1-4600ca1:/home/admin# config radius sourceip 2000::1 ... File "/usr/local/lib/python3.9/dist-packages/click/decorators.py", line 17, in new_func return f(get_current_context(), *args, **kwargs) File "/usr/local/lib/python3.9/dist-packages/config/aaa.py", line 407, in sourceip v6_invalid_list = [ipaddress.IPv6Address(unicode('0::0')), ipaddress.IPv6Address(unicode('0::1'))] NameError: name 'unicode' is not defined root@qa-eth-vt03-1-4600ca1:/home/admin# ``` #### New command output (if the output of a command-line utility has changed) ``` root@r-panther-13:/home/admin# config radius nasip 1.1.1.1 root@r-panther-13:/home/admin# config radius sourceip 1.1.1.1 root@r-panther-13:/home/admin# show radius RADIUS global auth_type pap (default) RADIUS global retransmit 3 (default) RADIUS global timeout 5 (default) RADIUS global passkey (default) RADIUS global nas_ip 1.1.1.1 RADIUS global src_ip 1.1.1.1 root@r-panther-13:/home/admin# ``` --- config/aaa.py | 8 +++---- tests/radius_test.py | 50 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 4 deletions(-) diff --git a/config/aaa.py b/config/aaa.py index 6f4a42b340..3c76187126 100644 --- a/config/aaa.py +++ b/config/aaa.py @@ -405,8 +405,8 @@ def sourceip(ctx, src_ip): click.echo('Invalid ip address') return - v6_invalid_list = [ipaddress.IPv6Address(unicode('0::0')), ipaddress.IPv6Address(unicode('0::1'))] - net = ipaddress.ip_network(unicode(src_ip), strict=False) + v6_invalid_list = [ipaddress.IPv6Address('0::0'), ipaddress.IPv6Address('0::1')] + net = ipaddress.ip_network(src_ip, strict=False) if (net.version == 4): if src_ip == "0.0.0.0": click.echo('enter non-zero ip address') @@ -446,8 +446,8 @@ def nasip(ctx, nas_ip): click.echo('Invalid ip address') return - v6_invalid_list = [ipaddress.IPv6Address(unicode('0::0')), ipaddress.IPv6Address(unicode('0::1'))] - net = ipaddress.ip_network(unicode(nas_ip), strict=False) + v6_invalid_list = [ipaddress.IPv6Address('0::0'), ipaddress.IPv6Address('0::1')] + net = ipaddress.ip_network(nas_ip, strict=False) if (net.version == 4): if nas_ip == "0.0.0.0": click.echo('enter non-zero ip address') diff --git a/tests/radius_test.py b/tests/radius_test.py index 49a1ac3ec4..928e629616 100644 --- a/tests/radius_test.py +++ b/tests/radius_test.py @@ -52,6 +52,16 @@ """ +show_radius_global_nasip_source_ip_output="""\ +RADIUS global auth_type pap (default) +RADIUS global retransmit 3 (default) +RADIUS global timeout 5 (default) +RADIUS global passkey (default) +RADIUS global nas_ip 1.1.1.1 +RADIUS global src_ip 2000::1 + +""" + config_radius_empty_output="""\ """ @@ -217,3 +227,43 @@ def test_config_radius_server_invalid_delete_yang_validation(self): ["delete", "10.10.10.x"]) print(result.output) assert "Invalid ConfigDB. Error" in result.output + + def test_config_radius_nasip_sourceip(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + db.cfgdb.delete_table("RADIUS") + db.cfgdb.delete_table("RADIUS_SERVER") + + result = runner.invoke(config.config.commands["radius"],\ + ["nasip", "1.1.1.1"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + result = runner.invoke(config.config.commands["radius"],\ + ["sourceip", "2000::1"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands["radius"], []) + print(result.exit_code) + print(result.output) + assert result.output == show_radius_default_output + + db.cfgdb.mod_entry("RADIUS", "global", \ + {'auth_type' : 'pap (default)', \ + 'retransmit': '3 (default)', \ + 'timeout' : '5 (default)', \ + 'passkey' : ' (default)', \ + 'nas_ip' : '1.1.1.1', \ + 'src_ip' : '2000::1', \ + } \ + ) + + result = runner.invoke(show.cli.commands["radius"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_radius_global_nasip_source_ip_output From 882da011e8e50642309f979d6a7344bd8dbd6f44 Mon Sep 17 00:00:00 2001 From: Dev Ojha <47282568+developfast@users.noreply.github.com> Date: Fri, 7 Apr 2023 16:30:51 -0700 Subject: [PATCH 59/66] [DBMigrator] Update db_migrator to support EdgeZoneAggregator Buffer Config for T0s (#2747) * Added db_migrator functionality to support EdgeZoneAggregator buffer upgrade for T0s, and unit tests Signed-off-by: dojha * Fixed unit tests and added edge case for non-relevant version upgrades Signed-off-by: dojha * changed cable logic + added 1 more unit test * changed versioning on master to 5_0_1 * fixed version typo * Added proper comments for change * removed port status change * changed version on unit tests * Removed extra whitelines * Made minor changes on comments * Added versions from other branches and resolved comments Signed-off-by: dojha * moved db_migrator change to common migration path * Update sample-t0-edgezoneagg-config-same-cable-output.json --------- Signed-off-by: dojha --- scripts/db_migrator.py | 61 +++++++-- .../sample-t0-edgezoneagg-config-input.json | 123 ++++++++++++++++++ .../sample-t0-edgezoneagg-config-output.json | 123 ++++++++++++++++++ ...0-edgezoneagg-config-same-cable-input.json | 123 ++++++++++++++++++ ...-edgezoneagg-config-same-cable-output.json | 123 ++++++++++++++++++ tests/db_migrator_test.py | 44 ++++++- 6 files changed, 585 insertions(+), 12 deletions(-) create mode 100644 tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-input.json create mode 100644 tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-output.json create mode 100644 tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-same-cable-input.json create mode 100644 tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-same-cable-output.json diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index 64fddea290..5ed4133bc8 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -167,7 +167,7 @@ def migrate_mgmt_ports_on_s6100(self): self.appDB.set(self.appDB.APPL_DB, 'PORT_TABLE:PortConfigDone', 'count', str(total_count)) log.log_notice("Port count updated from {} to : {}".format(portCount, self.appDB.get(self.appDB.APPL_DB, 'PORT_TABLE:PortConfigDone', 'count'))) return True - + def migrate_intf_table(self): ''' Migrate all data from existing INTF table in APP DB during warmboot with IP Prefix @@ -265,7 +265,6 @@ def migrate_config_db_buffer_tables_for_dynamic_calculation(self, speed_list, ca @append_item_method - a function which is called to append an item to the list of pending commit items any update to buffer configuration will be pended and won't be applied until all configuration is checked and aligns with the default one - 1. Buffer profiles for lossless PGs in BUFFER_PROFILE table will be removed if their names have the convention of pg_lossless___profile where the speed and cable_length belongs speed_list and cable_len_list respectively @@ -349,7 +348,6 @@ def prepare_dynamic_buffer_for_warm_reboot(self, buffer_pools=None, buffer_profi ''' This is the very first warm reboot of buffermgrd (dynamic) if the system reboot from old image by warm-reboot In this case steps need to be taken to get buffermgrd prepared (for warm reboot) - During warm reboot, buffer tables should be installed in the first place. However, it isn't able to achieve that when system is warm-rebooted from an old image without dynamic buffer supported, because the buffer info wasn't in the APPL_DB in the old image. @@ -357,7 +355,6 @@ def prepare_dynamic_buffer_for_warm_reboot(self, buffer_pools=None, buffer_profi During warm-reboot, db_migrator adjusts buffer info in CONFIG_DB by removing some fields according to requirement from dynamic buffer calculation. The buffer info before that adjustment needs to be copied to APPL_DB. - 1. set WARM_RESTART_TABLE|buffermgrd as {restore_count: 0} 2. Copy the following tables from CONFIG_DB into APPL_DB in case of warm reboot The separator in fields that reference objects in other table needs to be updated from '|' to ':' @@ -367,7 +364,6 @@ def prepare_dynamic_buffer_for_warm_reboot(self, buffer_pools=None, buffer_profi - BUFFER_QUEUE, separator updated for field 'profile - BUFFER_PORT_INGRESS_PROFILE_LIST, separator updated for field 'profile_list' - BUFFER_PORT_EGRESS_PROFILE_LIST, separator updated for field 'profile_list' - ''' warmreboot_state = self.stateDB.get(self.stateDB.STATE_DB, 'WARM_RESTART_ENABLE_TABLE|system', 'enable') mmu_size = self.stateDB.get(self.stateDB.STATE_DB, 'BUFFER_MAX_PARAM_TABLE|global', 'mmu_size') @@ -572,7 +568,7 @@ def migrate_port_qos_map_global(self): dscp_to_tc_map_table_names = self.configDB.get_keys('DSCP_TO_TC_MAP') if len(dscp_to_tc_map_table_names) == 0: return - + qos_maps = self.configDB.get_table('PORT_QOS_MAP') if 'global' not in qos_maps.keys(): # We are unlikely to have more than 1 DSCP_TO_TC_MAP in previous versions @@ -596,6 +592,50 @@ def migrate_route_table(self): route_key = "ROUTE_TABLE:{}".format(route_prefix) self.appDB.set(self.appDB.APPL_DB, route_key, 'weight','') + def update_edgezone_aggregator_config(self): + """ + Update cable length configuration in ConfigDB for T0 neighbor interfaces + connected to EdgeZone Aggregator devices, while resetting the port values to trigger a buffer change + 1. Find a list of all interfaces connected to an EdgeZone Aggregator device. + 2. If all the cable lengths are the same, do nothing and return. + 3. If there are different cable lengths, update CABLE_LENGTH values for these interfaces with a constant value of 40m. + """ + device_neighbor_metadata = self.configDB.get_table("DEVICE_NEIGHBOR_METADATA") + device_neighbors = self.configDB.get_table("DEVICE_NEIGHBOR") + cable_length = self.configDB.get_table("CABLE_LENGTH") + port_table = self.configDB.get_table("PORT") + edgezone_aggregator_devs = [] + edgezone_aggregator_intfs = [] + EDGEZONE_AGG_CABLE_LENGTH = "40m" + for k, v in device_neighbor_metadata.items(): + if v.get("type") == "EdgeZoneAggregator": + edgezone_aggregator_devs.append(k) + + if len(edgezone_aggregator_devs) == 0: + return + + for intf, intf_info in device_neighbors.items(): + if intf_info.get("name") in edgezone_aggregator_devs: + edgezone_aggregator_intfs.append(intf) + + cable_length_table = self.configDB.get_entry("CABLE_LENGTH", "AZURE") + first_cable_intf = next(iter(cable_length_table)) + first_cable_length = cable_length_table[first_cable_intf] + index = 0 + + for intf, length in cable_length_table.items(): + index += 1 + if first_cable_length != length: + break + elif index == len(cable_length_table): + # All cable lengths are the same, nothing to modify + return + + for intf, length in cable_length_table.items(): + if intf in edgezone_aggregator_intfs: + # Set new cable length values + self.configDB.set(self.configDB.CONFIG_DB, "CABLE_LENGTH|AZURE", intf, EDGEZONE_AGG_CABLE_LENGTH) + def version_unknown(self): """ version_unknown tracks all SONiC versions that doesn't have a version @@ -757,7 +797,7 @@ def version_2_0_1(self): def version_2_0_2(self): """ Version 2_0_2 - This is the latest version for 202012 branch + This is the latest version for 202012 branch """ log.log_info('Handling version_2_0_2') self.set_version('version_3_0_0') @@ -882,7 +922,7 @@ def version_4_0_0(self): self.stateDB.set(self.stateDB.STATE_DB, 'FAST_RESTART_ENABLE_TABLE|system', 'enable', enable_state) self.set_version('version_4_0_1') return 'version_4_0_1' - + def version_4_0_1(self): """ Version 4_0_1. @@ -930,13 +970,16 @@ def common_migration_ops(self): # removed together with calling to migrate_copp_table function. if self.asic_type != "mellanox": self.migrate_copp_table() - if self.asic_type == "broadcom" and 'Force10-S6100' in self.hwsku: + if self.asic_type == "broadcom" and 'Force10-S6100' in self.hwsku: self.migrate_mgmt_ports_on_s6100() else: log.log_notice("Asic Type: {}, Hwsku: {}".format(self.asic_type, self.hwsku)) self.migrate_route_table() + # Updating edgezone aggregator cable length config for T0 devices + self.update_edgezone_aggregator_config() + def migrate(self): version = self.get_version() log.log_info('Upgrading from version ' + version) diff --git a/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-input.json b/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-input.json new file mode 100644 index 0000000000..2b24076d8f --- /dev/null +++ b/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-input.json @@ -0,0 +1,123 @@ +{ + "DEVICE_NEIGHBOR_METADATA|ARISTA01T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA02T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA03T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA04T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR|Ethernet0": { + "name": "ARISTA01T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet4": { + "name": "ARISTA02T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet8": { + "name": "ARISTA03T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet12": { + "name": "ARISTA04T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet16": { + "name": "Servers1", + "port": "eth0" + }, + "DEVICE_NEIGHBOR|Ethernet20": { + "name": "Servers2", + "port": "eth0" + }, + "PORT|Ethernet0": { + "admin_status": "up", + "alias": "Ethernet1/1", + "description": "", + "index": "1", + "lanes": "77,78", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet4": { + "admin_status": "up", + "alias": "Ethernet2/1", + "description": "", + "index": "2", + "lanes": "79,80", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet8": { + "admin_status": "up", + "alias": "Ethernet3/1", + "description": "", + "index": "3", + "lanes": "81,82", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet12": { + "admin_status": "up", + "alias": "Ethernet4/1", + "description": "", + "index": "4", + "lanes": "83,84", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet16": { + "admin_status": "up", + "alias": "Ethernet5/1", + "description": "", + "index": "5", + "lanes": "85,86", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet20": { + "admin_status": "up", + "alias": "Ethernet6/1", + "description": "", + "index": "6", + "lanes": "87,88", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "CABLE_LENGTH|AZURE": { + "Ethernet0": "300m", + "Ethernet4": "300m", + "Ethernet8": "300m", + "Ethernet12": "300m", + "Ethernet16": "5m", + "Ethernet20": "5m" + }, + "VERSIONS|DATABASE": { + "VERSION": "version_4_0_1" + } +} diff --git a/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-output.json b/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-output.json new file mode 100644 index 0000000000..16646fc08b --- /dev/null +++ b/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-output.json @@ -0,0 +1,123 @@ +{ + "DEVICE_NEIGHBOR_METADATA|ARISTA01T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA02T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA03T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA04T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR|Ethernet0": { + "name": "ARISTA01T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet4": { + "name": "ARISTA02T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet8": { + "name": "ARISTA03T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet12": { + "name": "ARISTA04T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet16": { + "name": "Servers1", + "port": "eth0" + }, + "DEVICE_NEIGHBOR|Ethernet20": { + "name": "Servers2", + "port": "eth0" + }, + "PORT|Ethernet0": { + "admin_status": "up", + "alias": "Ethernet1/1", + "description": "", + "index": "1", + "lanes": "77,78", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet4": { + "admin_status": "up", + "alias": "Ethernet2/1", + "description": "", + "index": "2", + "lanes": "79,80", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet8": { + "admin_status": "up", + "alias": "Ethernet3/1", + "description": "", + "index": "3", + "lanes": "81,82", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet12": { + "admin_status": "up", + "alias": "Ethernet4/1", + "description": "", + "index": "4", + "lanes": "83,84", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet16": { + "admin_status": "up", + "alias": "Ethernet5/1", + "description": "", + "index": "5", + "lanes": "85,86", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet20": { + "admin_status": "up", + "alias": "Ethernet6/1", + "description": "", + "index": "6", + "lanes": "87,88", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "CABLE_LENGTH|AZURE": { + "Ethernet0": "40m", + "Ethernet4": "40m", + "Ethernet8": "40m", + "Ethernet12": "40m", + "Ethernet16": "5m", + "Ethernet20": "5m" + }, + "VERSIONS|DATABASE": { + "VERSION": "version_4_0_1" + } +} diff --git a/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-same-cable-input.json b/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-same-cable-input.json new file mode 100644 index 0000000000..f36bc7c739 --- /dev/null +++ b/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-same-cable-input.json @@ -0,0 +1,123 @@ +{ + "DEVICE_NEIGHBOR_METADATA|ARISTA01T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA02T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA03T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA04T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR|Ethernet0": { + "name": "ARISTA01T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet4": { + "name": "ARISTA02T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet8": { + "name": "ARISTA03T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet12": { + "name": "ARISTA04T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet16": { + "name": "Servers1", + "port": "eth0" + }, + "DEVICE_NEIGHBOR|Ethernet20": { + "name": "Servers2", + "port": "eth0" + }, + "PORT|Ethernet0": { + "admin_status": "up", + "alias": "Ethernet1/1", + "description": "", + "index": "1", + "lanes": "77,78", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet4": { + "admin_status": "up", + "alias": "Ethernet2/1", + "description": "", + "index": "2", + "lanes": "79,80", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet8": { + "admin_status": "up", + "alias": "Ethernet3/1", + "description": "", + "index": "3", + "lanes": "81,82", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet12": { + "admin_status": "up", + "alias": "Ethernet4/1", + "description": "", + "index": "4", + "lanes": "83,84", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet16": { + "admin_status": "up", + "alias": "Ethernet5/1", + "description": "", + "index": "5", + "lanes": "85,86", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet20": { + "admin_status": "up", + "alias": "Ethernet6/1", + "description": "", + "index": "6", + "lanes": "87,88", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "CABLE_LENGTH|AZURE": { + "Ethernet0": "300m", + "Ethernet4": "300m", + "Ethernet8": "300m", + "Ethernet12": "300m", + "Ethernet16": "300m", + "Ethernet20": "300m" + }, + "VERSIONS|DATABASE": { + "VERSION": "version_4_0_1" + } +} diff --git a/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-same-cable-output.json b/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-same-cable-output.json new file mode 100644 index 0000000000..f36bc7c739 --- /dev/null +++ b/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-same-cable-output.json @@ -0,0 +1,123 @@ +{ + "DEVICE_NEIGHBOR_METADATA|ARISTA01T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA02T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA03T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA04T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR|Ethernet0": { + "name": "ARISTA01T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet4": { + "name": "ARISTA02T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet8": { + "name": "ARISTA03T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet12": { + "name": "ARISTA04T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet16": { + "name": "Servers1", + "port": "eth0" + }, + "DEVICE_NEIGHBOR|Ethernet20": { + "name": "Servers2", + "port": "eth0" + }, + "PORT|Ethernet0": { + "admin_status": "up", + "alias": "Ethernet1/1", + "description": "", + "index": "1", + "lanes": "77,78", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet4": { + "admin_status": "up", + "alias": "Ethernet2/1", + "description": "", + "index": "2", + "lanes": "79,80", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet8": { + "admin_status": "up", + "alias": "Ethernet3/1", + "description": "", + "index": "3", + "lanes": "81,82", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet12": { + "admin_status": "up", + "alias": "Ethernet4/1", + "description": "", + "index": "4", + "lanes": "83,84", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet16": { + "admin_status": "up", + "alias": "Ethernet5/1", + "description": "", + "index": "5", + "lanes": "85,86", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet20": { + "admin_status": "up", + "alias": "Ethernet6/1", + "description": "", + "index": "6", + "lanes": "87,88", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "CABLE_LENGTH|AZURE": { + "Ethernet0": "300m", + "Ethernet4": "300m", + "Ethernet8": "300m", + "Ethernet12": "300m", + "Ethernet16": "300m", + "Ethernet20": "300m" + }, + "VERSIONS|DATABASE": { + "VERSION": "version_4_0_1" + } +} diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index e9c184d160..c06bb11d11 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -409,7 +409,7 @@ def test_global_dscp_to_tc_map_migrator(self): dbmgtr_mlnx.migrate() resulting_table = dbmgtr_mlnx.configDB.get_table('PORT_QOS_MAP') assert resulting_table == {} - + class TestMoveLoggerTablesInWarmUpgrade(object): @classmethod def setup_class(cls): @@ -468,11 +468,11 @@ def test_rename_fast_reboot_table_check_enable(self): device_info.get_sonic_version_info = get_sonic_version_info_mlnx dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db', 'fast_reboot_input') dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'empty-config-input') - + import db_migrator dbmgtr = db_migrator.DBMigrator(None) dbmgtr.migrate() - + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db', 'fast_reboot_expected') expected_db = SonicV2Connector(host='127.0.0.1') expected_db.connect(expected_db.STATE_DB) @@ -585,3 +585,41 @@ def test_migrate_weights_for_nexthops(self): expected_keys = expected_appl_db.get_all(expected_appl_db.APPL_DB, key) diff = DeepDiff(resulting_keys, expected_keys, ignore_order=True) assert not diff + +class TestWarmUpgrade_T0_EdgeZoneAggregator(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "2" + + @classmethod + def teardown_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "0" + dbconnector.dedicated_dbs['CONFIG_DB'] = None + + def test_warm_upgrade_t0_edgezone_aggregator_diff_cable_length(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'sample-t0-edgezoneagg-config-input') + import db_migrator + dbmgtr = db_migrator.DBMigrator(None) + dbmgtr.migrate() + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'sample-t0-edgezoneagg-config-output') + expected_db = Db() + + resulting_table = dbmgtr.configDB.get_table('CABLE_LENGTH') + expected_table = expected_db.cfgdb.get_table('CABLE_LENGTH') + + diff = DeepDiff(resulting_table, expected_table, ignore_order=True) + assert not diff + + def test_warm_upgrade_t0_edgezone_aggregator_same_cable_length(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'sample-t0-edgezoneagg-config-same-cable-input') + import db_migrator + dbmgtr = db_migrator.DBMigrator(None) + dbmgtr.migrate() + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'sample-t0-edgezoneagg-config-same-cable-output') + expected_db = Db() + + resulting_table = dbmgtr.configDB.get_table('CABLE_LENGTH') + expected_table = expected_db.cfgdb.get_table('CABLE_LENGTH') + + diff = DeepDiff(resulting_table, expected_table, ignore_order=True) + assert not diff From ff032fe2102129069cfe4a263c8ba07f515dec18 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Sun, 9 Apr 2023 21:19:46 -0400 Subject: [PATCH 60/66] [debug/undebug] replace shell=True (#2662) Signed-off-by: Mai Bui #### What I did `subprocess()` - when using with `shell=True` is dangerous. Using subprocess function without a static string can lead to command injection. #### How I did it `subprocess()` - use `shell=False` instead, use list of strings Ref: [https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation](https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation) #### How to verify it Manual test, execute all debug/undebug bgp/zebra commands Add UT --- debug/main.py | 108 ++++---- tests/debug_test.py | 599 ++++++++++++++++++++++++++++++++++++++++++++ undebug/main.py | 108 ++++---- 3 files changed, 711 insertions(+), 104 deletions(-) create mode 100644 tests/debug_test.py diff --git a/debug/main.py b/debug/main.py index 8c502c96ad..069159fc75 100755 --- a/debug/main.py +++ b/debug/main.py @@ -1,9 +1,13 @@ +import re +import sys import click import subprocess +from shlex import join def run_command(command, pager=False): - click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) - p = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE) + command_str = join(command) + click.echo(click.style("Command: ", fg='cyan') + click.style(command_str, fg='green')) + p = subprocess.Popen(command, text=True, stdout=subprocess.PIPE) output = p.stdout.read() if pager: click.echo_via_pager(output) @@ -21,8 +25,8 @@ def cli(): """SONiC command line - 'debug' command""" pass - -p = subprocess.check_output(["sudo vtysh -c 'show version'"], shell=True, text=True) +prefix_pattern = '^[A-Za-z0-9.:/]*$' +p = subprocess.check_output(['sudo', 'vtysh', '-c', 'show version'], text=True) if 'FRRouting' in p: # # 'bgp' group for FRR ### @@ -35,66 +39,64 @@ def bgp(): @bgp.command('allow-martians') def allow_martians(): """BGP allow martian next hops""" - command = 'sudo vtysh -c "debug bgp allow-martians"' + command = ['sudo', 'vtysh', '-c', "debug bgp allow-martians"] run_command(command) @bgp.command() @click.argument('additional', type=click.Choice(['segment']), required=False) def as4(additional): """BGP AS4 actions""" - command = 'sudo vtysh -c "debug bgp as4' + command = ['sudo', 'vtysh', '-c', "debug bgp as4"] if additional is not None: - command += " segment" - command += '"' + command[-1] += " segment" run_command(command) @bgp.command() @click.argument('prefix', required=True) def bestpath(prefix): """BGP bestpath""" - command = 'sudo vtysh -c "debug bgp bestpath %s"' % prefix + if not re.match(prefix_pattern, prefix): + sys.exit('Prefix contains only number, alphabet, period, colon, and forward slash') + command = ['sudo', 'vtysh', '-c', "debug bgp bestpath %s" % prefix] run_command(command) @bgp.command() @click.argument('prefix_or_iface', required=False) def keepalives(prefix_or_iface): """BGP Neighbor Keepalives""" - command = 'sudo vtysh -c "debug bgp keepalives' + command = ['sudo', 'vtysh', '-c', "debug bgp keepalives"] if prefix_or_iface is not None: - command += " " + prefix_or_iface - command += '"' + command[-1] += ' ' + prefix_or_iface run_command(command) @bgp.command('neighbor-events') @click.argument('prefix_or_iface', required=False) def neighbor_events(prefix_or_iface): """BGP Neighbor Events""" - command = 'sudo vtysh -c "debug bgp neighbor-events' + command = ['sudo', 'vtysh', '-c', "debug bgp neighbor-events"] if prefix_or_iface is not None: - command += " " + prefix_or_iface - command += '"' + command[-1] += ' ' + prefix_or_iface run_command(command) @bgp.command() def nht(): """BGP nexthop tracking events""" - command = 'sudo vtysh -c "debug bgp nht"' + command = ['sudo', 'vtysh', '-c', "debug bgp nht"] run_command(command) @bgp.command() @click.argument('additional', type=click.Choice(['error']), required=False) def pbr(additional): """BGP policy based routing""" - command = 'sudo vtysh -c "debug bgp pbr' + command = ['sudo', 'vtysh', '-c', "debug bgp pbr"] if additional is not None: - command += " error" - command += '"' + command[-1] += " error" run_command(command) @bgp.command('update-groups') def update_groups(): """BGP update-groups""" - command = 'sudo vtysh -c "debug bgp update-groups"' + command = ['sudo', 'vtysh', '-c', "debug bgp update-groups"] run_command(command) @bgp.command() @@ -102,22 +104,25 @@ def update_groups(): @click.argument('prefix', required=False) def updates(direction, prefix): """BGP updates""" - command = 'sudo vtysh -c "debug bgp updates' + bgp_cmd = "debug bgp updates" if direction is not None: - command += " " + direction + bgp_cmd += ' ' + direction if prefix is not None: - command += " " + prefix - command += '"' + if not re.match(prefix_pattern, prefix): + sys.exit('Prefix contains only number, alphabet, period, colon, and forward slash') + bgp_cmd += ' ' + prefix + command = ['sudo', 'vtysh', '-c', bgp_cmd] run_command(command) @bgp.command() @click.argument('prefix', required=False) def zebra(prefix): """BGP Zebra messages""" - command = 'sudo vtysh -c "debug bgp zebra' + command = ['sudo', 'vtysh', '-c', "debug bgp zebra"] if prefix is not None: - command += " prefix " + prefix - command += '"' + if not re.match(prefix_pattern, prefix): + sys.exit('Prefix contains only number, alphabet, period, colon, and forward slash') + command[-1] += " prefix " + prefix run_command(command) # @@ -132,56 +137,54 @@ def zebra(): @click.argument('detailed', type=click.Choice(['detailed']), required=False) def dplane(detailed): """Debug zebra dataplane events""" - command = 'sudo vtysh -c "debug zebra dplane' + command = ['sudo', 'vtysh', '-c', "debug zebra dplane"] if detailed is not None: - command += " detailed" - command += '"' + command[-1] += " detailed" run_command(command) @zebra.command() def events(): """Debug option set for zebra events""" - command = 'sudo vtysh -c "debug zebra events"' + command = ['sudo', 'vtysh', '-c', "debug zebra events"] run_command(command) @zebra.command() def fpm(): """Debug zebra FPM events""" - command = 'sudo vtysh -c "debug zebra fpm"' + command = ['sudo', 'vtysh', '-c', "debug zebra fpm"] run_command(command) @zebra.command() def kernel(): """Debug option set for zebra between kernel interface""" - command = 'sudo vtysh -c "debug zebra kernel"' + command = ['sudo', 'vtysh', '-c', "debug zebra kernel"] run_command(command) @zebra.command() def nht(): """Debug option set for zebra next hop tracking""" - command = 'sudo vtysh -c "debug zebra nht"' + command = ['sudo', 'vtysh', '-c', "debug zebra nht"] run_command(command) @zebra.command() def packet(): """Debug option set for zebra packet""" - command = 'sudo vtysh -c "debug zebra packet"' + command = ['sudo', 'vtysh', '-c', "debug zebra packet"] run_command(command) @zebra.command() @click.argument('detailed', type=click.Choice(['detailed']), required=False) def rib(detailed): """Debug RIB events""" - command = 'sudo vtysh -c "debug zebra rib' + command = ['sudo', 'vtysh', '-c', "debug zebra rib"] if detailed is not None: - command += " detailed" - command += '"' + command[-1] += " detailed" run_command(command) @zebra.command() def vxlan(): """Debug option set for zebra VxLAN (EVPN)""" - command = 'sudo vtysh -c "debug zebra vxlan"' + command = ['sudo', 'vtysh', '-c', "debug zebra vxlan"] run_command(command) else: @@ -193,49 +196,49 @@ def vxlan(): def bgp(ctx): """debug bgp on""" if ctx.invoked_subcommand is None: - command = 'sudo vtysh -c "debug bgp"' + command = ['sudo', 'vtysh', '-c', "debug bgp"] run_command(command) @bgp.command() def events(): """debug bgp events on""" - command = 'sudo vtysh -c "debug bgp events"' + command = ['sudo', 'vtysh', '-c', "debug bgp events"] run_command(command) @bgp.command() def updates(): """debug bgp updates on""" - command = 'sudo vtysh -c "debug bgp updates"' + command = ['sudo', 'vtysh', '-c', "debug bgp updates"] run_command(command) @bgp.command() def as4(): """debug bgp as4 actions on""" - command = 'sudo vtysh -c "debug bgp as4"' + command = ['sudo', 'vtysh', '-c', "debug bgp as4"] run_command(command) @bgp.command() def filters(): """debug bgp filters on""" - command = 'sudo vtysh -c "debug bgp filters"' + command = ['sudo', 'vtysh', '-c', "debug bgp filters"] run_command(command) @bgp.command() def fsm(): """debug bgp finite state machine on""" - command = 'sudo vtysh -c "debug bgp fsm"' + command = ['sudo', 'vtysh', '-c', "debug bgp fsm"] run_command(command) @bgp.command() def keepalives(): """debug bgp keepalives on""" - command = 'sudo vtysh -c "debug bgp keepalives"' + command = ['sudo', 'vtysh', '-c', "debug bgp keepalives"] run_command(command) @bgp.command() def zebra(): """debug bgp zebra messages on""" - command = 'sudo vtysh -c "debug bgp zebra"' + command = ['sudo', 'vtysh', '-c', "debug bgp zebra"] run_command(command) # @@ -248,32 +251,31 @@ def zebra(): @zebra.command() def events(): - """debug option set for zebra events""" - command = 'sudo vtysh -c "debug zebra events"' + command = ['sudo', 'vtysh', '-c', "debug zebra events"] run_command(command) @zebra.command() def fpm(): """debug zebra FPM events""" - command = 'sudo vtysh -c "debug zebra fpm"' + command = ['sudo', 'vtysh', '-c', "debug zebra fpm"] run_command(command) @zebra.command() def kernel(): """debug option set for zebra between kernel interface""" - command = 'sudo vtysh -c "debug zebra kernel"' + command = ['sudo', 'vtysh', '-c', "debug zebra kernel"] run_command(command) @zebra.command() def packet(): """debug option set for zebra packet""" - command = 'sudo vtysh -c "debug zebra packet"' + command = ['sudo', 'vtysh', '-c', "debug zebra packet"] run_command(command) @zebra.command() def rib(): """debug RIB events""" - command = 'sudo vtysh -c "debug zebra rib"' + command = ['sudo', 'vtysh', '-c', "debug zebra rib"] run_command(command) diff --git a/tests/debug_test.py b/tests/debug_test.py new file mode 100644 index 0000000000..7ac182f434 --- /dev/null +++ b/tests/debug_test.py @@ -0,0 +1,599 @@ +import click +import pytest +import importlib +from unittest.mock import patch, MagicMock +from click.testing import CliRunner + +class TestDebugFrr(object): + @patch('subprocess.check_output', MagicMock(return_value='FRRouting')) + def setup(self): + print('SETUP') + import debug.main as debug + import undebug.main as undebug + importlib.reload(debug) + importlib.reload(undebug) + + # debug + @patch('debug.main.run_command') + def test_debug_bgp_allow_martians(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['allow-martians']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp allow-martians']) + + @patch('debug.main.run_command') + def test_debug_bgp_as4(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['as4']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp as4']) + + result = runner.invoke(debug.cli.commands['bgp'].commands['as4'], ['segment']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp as4 segment']) + + @patch('debug.main.run_command') + def test_debug_bgp_bestpath(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['bestpath'], ['dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp bestpath dummyprefix']) + + @patch('debug.main.run_command') + def test_debug_bgp_keepalives(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['keepalives']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp keepalives']) + + result = runner.invoke(debug.cli.commands['bgp'].commands['keepalives'], ['dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp keepalives dummyprefix']) + + @patch('debug.main.run_command') + def test_debug_bgp_neighbor_events(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['neighbor-events']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp neighbor-events']) + + result = runner.invoke(debug.cli.commands['bgp'].commands['neighbor-events'], ['dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp neighbor-events dummyprefix']) + + @patch('debug.main.run_command') + def test_debug_bgp_nht(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['nht']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp nht']) + + @patch('debug.main.run_command') + def test_debug_bgp_pbr(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['pbr']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp pbr']) + + result = runner.invoke(debug.cli.commands['bgp'].commands['pbr'], ['error']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp pbr error']) + + @patch('debug.main.run_command') + def test_debug_bgp_update_groups(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['update-groups']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp update-groups']) + + @patch('debug.main.run_command') + def test_debug_bgp_updates(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['updates']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp updates']) + + result = runner.invoke(debug.cli.commands['bgp'].commands['updates'], ['prefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp updates prefix']) + + result = runner.invoke(debug.cli.commands['bgp'].commands['updates'], ['prefix', 'dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp updates prefix dummyprefix']) + + + @patch('debug.main.run_command') + def test_debug_bgp_zebra(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['zebra']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp zebra']) + + result = runner.invoke(debug.cli.commands['bgp'].commands['zebra'], ['dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp zebra prefix dummyprefix']) + + @patch('debug.main.run_command') + def test_debug_zebra_dplane(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['dplane']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra dplane']) + + result = runner.invoke(debug.cli.commands['zebra'].commands['dplane'], ['detailed']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra dplane detailed']) + + @patch('debug.main.run_command') + def test_debug_zebra_events(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['events']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra events']) + + @patch('debug.main.run_command') + def test_debug_zebra_fpm(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['fpm']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra fpm']) + + @patch('debug.main.run_command') + def test_debug_zebra_kernel(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['kernel']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra kernel']) + + @patch('debug.main.run_command') + def test_debug_zebra_nht(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['nht']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra nht']) + + @patch('debug.main.run_command') + def test_debug_zebra_packet(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['packet']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra packet']) + + @patch('debug.main.run_command') + def test_debug_zebra_rib(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['rib']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra rib']) + + result = runner.invoke(debug.cli.commands['zebra'].commands['rib'], ['detailed']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra rib detailed']) + + @patch('debug.main.run_command') + def test_debug_zebra_vxlan(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['vxlan']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra vxlan']) + + # undebug + @patch('undebug.main.run_command') + def test_undebug_bgp_allow_martians(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['allow-martians']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp allow-martians']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_as4(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['as4']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp as4']) + + result = runner.invoke(undebug.cli.commands['bgp'].commands['as4'], ['segment']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp as4 segment']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_bestpath(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['bestpath'], ['dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp bestpath dummyprefix']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_keepalives(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['keepalives']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp keepalives']) + + result = runner.invoke(undebug.cli.commands['bgp'].commands['keepalives'], ['dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp keepalives dummyprefix']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_neighbor_events(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['neighbor-events']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp neighbor-events']) + + result = runner.invoke(undebug.cli.commands['bgp'].commands['neighbor-events'], ['dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp neighbor-events dummyprefix']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_nht(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['nht']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp nht']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_pbr(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['pbr']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp pbr']) + + result = runner.invoke(undebug.cli.commands['bgp'].commands['pbr'], ['error']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp pbr error']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_update_groups(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['update-groups']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp update-groups']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_updates(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['updates']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp updates']) + + result = runner.invoke(undebug.cli.commands['bgp'].commands['updates'], ['prefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp updates prefix']) + + result = runner.invoke(undebug.cli.commands['bgp'].commands['updates'], ['prefix', 'dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp updates prefix dummyprefix']) + + + @patch('undebug.main.run_command') + def test_undebug_bgp_zebra(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['zebra']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp zebra']) + + result = runner.invoke(undebug.cli.commands['bgp'].commands['zebra'], ['dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp zebra prefix dummyprefix']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_dplane(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['dplane']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra dplane']) + + result = runner.invoke(undebug.cli.commands['zebra'].commands['dplane'], ['detailed']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra dplane detailed']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_events(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['events']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra events']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_fpm(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['fpm']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra fpm']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_kernel(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['kernel']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra kernel']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_nht(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['nht']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra nht']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_packet(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['packet']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra packet']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_rib(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['rib']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra rib']) + + result = runner.invoke(undebug.cli.commands['zebra'].commands['rib'], ['detailed']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra rib detailed']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_vxlan(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['vxlan']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra vxlan']) + +class TestDebugQuagga(object): + @patch('subprocess.check_output', MagicMock(return_value='quagga')) + def setup(self): + print('SETUP') + import debug.main as debug + import undebug.main as undebug + importlib.reload(debug) + importlib.reload(undebug) + + # debug + @patch('debug.main.run_command') + def test_debug_bgp(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp']) + + @patch('debug.main.run_command') + def test_debug_bgp_events(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['events']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp events']) + + @patch('debug.main.run_command') + def test_debug_bgp_updates(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['updates']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp updates']) + + @patch('debug.main.run_command') + def test_debug_bgp_as4(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['as4']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp as4']) + + @patch('debug.main.run_command') + def test_debug_bgp_filters(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['filters']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp filters']) + + @patch('debug.main.run_command') + def test_debug_bgp_fsm(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['fsm']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp fsm']) + + @patch('debug.main.run_command') + def test_debug_bgp_keepalives(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['keepalives']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp keepalives']) + + @patch('debug.main.run_command') + def test_debug_bgp_zebra(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['zebra']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp zebra']) + + @patch('debug.main.run_command') + def test_debug_zebra_events(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['events']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra events']) + + @patch('debug.main.run_command') + def test_debug_zebra_fpm(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['fpm']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra fpm']) + + @patch('debug.main.run_command') + def test_debug_zebra_kernel(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['kernel']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra kernel']) + + @patch('debug.main.run_command') + def test_debug_zebra_packet(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['packet']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra packet']) + + @patch('debug.main.run_command') + def test_debug_zebra_rib(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['rib']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra rib']) + + # undebug + @patch('undebug.main.run_command') + def test_undebug_bgp(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_events(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['events']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp events']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_updates(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['updates']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp updates']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_as4(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['as4']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp as4']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_filters(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['filters']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp filters']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_fsm(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['fsm']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp fsm']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_keepalives(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['keepalives']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp keepalives']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_zebra(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['zebra']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp zebra']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_events(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['events']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra events']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_fpm(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['fpm']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra fpm']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_kernel(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['kernel']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra kernel']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_packet(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['packet']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra packet']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_rib(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['rib']) + assert result.exit_code == 0 + + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra rib']) + diff --git a/undebug/main.py b/undebug/main.py index 3810add68b..17767973cc 100644 --- a/undebug/main.py +++ b/undebug/main.py @@ -1,9 +1,13 @@ +import re +import sys import click import subprocess +from shlex import join def run_command(command, pager=False): - click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) - p = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE) + command_str = join(command) + click.echo(click.style("Command: ", fg='cyan') + click.style(command_str, fg='green')) + p = subprocess.Popen(command, text=True, stdout=subprocess.PIPE) output = p.stdout.read() if pager: click.echo_via_pager(output) @@ -22,7 +26,8 @@ def cli(): pass -p = subprocess.check_output(["sudo vtysh -c 'show version'"], shell=True, text=True) +prefix_pattern = '^[A-Za-z0-9.:/]*$' +p = subprocess.check_output(["sudo", "vtysh", "-c", 'show version'], text=True) if 'FRRouting' in p: # # 'bgp' group for FRR ### @@ -35,66 +40,64 @@ def bgp(): @bgp.command('allow-martians') def allow_martians(): """BGP allow martian next hops""" - command = 'sudo vtysh -c "no debug bgp allow-martians"' + command = ["sudo", "vtysh", "-c", "no debug bgp allow-martians"] run_command(command) @bgp.command() @click.argument('additional', type=click.Choice(['segment']), required=False) def as4(additional): """BGP AS4 actions""" - command = 'sudo vtysh -c "no debug bgp as4' + command = ["sudo", "vtysh", "-c", "no debug bgp as4"] if additional is not None: - command += " segment" - command += '"' + command[-1] += " segment" run_command(command) @bgp.command() @click.argument('prefix', required=True) def bestpath(prefix): """BGP bestpath""" - command = 'sudo vtysh -c "no debug bgp bestpath %s"' % prefix + if not re.match(prefix_pattern, prefix): + sys.exit('Prefix contains only number, alphabet, period, colon, and forward slash') + command = ["sudo", "vtysh", "-c", "no debug bgp bestpath %s" % prefix] run_command(command) @bgp.command() @click.argument('prefix_or_iface', required=False) def keepalives(prefix_or_iface): """BGP Neighbor Keepalives""" - command = 'sudo vtysh -c "no debug bgp keepalives' + command = ["sudo", "vtysh", "-c", "no debug bgp keepalives"] if prefix_or_iface is not None: - command += " " + prefix_or_iface - command += '"' + command[-1] += ' ' + prefix_or_iface run_command(command) @bgp.command('neighbor-events') @click.argument('prefix_or_iface', required=False) def neighbor_events(prefix_or_iface): """BGP Neighbor Events""" - command = 'sudo vtysh -c "no debug bgp neighbor-events' + command = ["sudo", "vtysh", "-c", "no debug bgp neighbor-events"] if prefix_or_iface is not None: - command += " " + prefix_or_iface - command += '"' + command[-1] += ' ' + prefix_or_iface run_command(command) @bgp.command() def nht(): """BGP nexthop tracking events""" - command = 'sudo vtysh -c "no debug bgp nht"' + command = ["sudo", "vtysh", "-c", "no debug bgp nht"] run_command(command) @bgp.command() @click.argument('additional', type=click.Choice(['error']), required=False) def pbr(additional): """BGP policy based routing""" - command = 'sudo vtysh -c "no debug bgp pbr' + command = ["sudo", "vtysh", "-c", "no debug bgp pbr"] if additional is not None: - command += " error" - command += '"' + command[-1] += " error" run_command(command) @bgp.command('update-groups') def update_groups(): """BGP update-groups""" - command = 'sudo vtysh -c "no debug bgp update-groups"' + command = ["sudo", "vtysh", "-c", "no debug bgp update-groups"] run_command(command) @bgp.command() @@ -102,22 +105,26 @@ def update_groups(): @click.argument('prefix', required=False) def updates(direction, prefix): """BGP updates""" - command = 'sudo vtysh -c "no debug bgp updates' + bgp_cmd = "no debug bgp updates" if direction is not None: - command += " " + direction + bgp_cmd += ' ' + direction if prefix is not None: - command += " " + prefix - command += '"' + if not re.match(prefix_pattern, prefix): + sys.exit('Prefix contains only number, alphabet, period, colon, and forward slash') + bgp_cmd += ' ' + prefix + command = ["sudo", "vtysh", "-c", bgp_cmd] run_command(command) @bgp.command() @click.argument('prefix', required=False) def zebra(prefix): """BGP Zebra messages""" - command = 'sudo vtysh -c "no debug bgp zebra' + bgp_cmd = "no debug bgp zebra" if prefix is not None: - command += " prefix " + prefix - command += '"' + if not re.match(prefix_pattern, prefix): + sys.exit('Prefix contains only number, alphabet, period, colon, and forward slash') + bgp_cmd += ' prefix ' + prefix + command = ["sudo", "vtysh", "-c", bgp_cmd] run_command(command) # @@ -132,56 +139,55 @@ def zebra(): @click.argument('detailed', type=click.Choice(['detailed']), required=False) def dplane(detailed): """Debug zebra dataplane events""" - command = 'sudo vtysh -c "no debug zebra dplane' + zb_cmd = "no debug zebra dplane" if detailed is not None: - command += " detailed" - command += '"' + zb_cmd += " detailed" + command = ["sudo", "vtysh", "-c", zb_cmd] run_command(command) @zebra.command() def events(): """Debug option set for zebra events""" - command = 'sudo vtysh -c "no debug zebra events"' + command = ["sudo", "vtysh", "-c", "no debug zebra events"] run_command(command) @zebra.command() def fpm(): """Debug zebra FPM events""" - command = 'sudo vtysh -c "no debug zebra fpm"' + command = ["sudo", "vtysh", "-c", "no debug zebra fpm"] run_command(command) @zebra.command() def kernel(): """Debug option set for zebra between kernel interface""" - command = 'sudo vtysh -c "no debug zebra kernel"' + command = ["sudo", "vtysh", "-c", "no debug zebra kernel"] run_command(command) @zebra.command() def nht(): """Debug option set for zebra next hop tracking""" - command = 'sudo vtysh -c "no debug zebra nht"' + command = ["sudo", "vtysh", "-c", "no debug zebra nht"] run_command(command) @zebra.command() def packet(): """Debug option set for zebra packet""" - command = 'sudo vtysh -c "no debug zebra packet"' + command = ["sudo", "vtysh", "-c", "no debug zebra packet"] run_command(command) @zebra.command() @click.argument('detailed', type=click.Choice(['detailed']), required=False) def rib(detailed): """Debug RIB events""" - command = 'sudo vtysh -c "no debug zebra rib' + command = ["sudo", "vtysh", "-c", "no debug zebra rib"] if detailed is not None: - command += " detailed" - command += '"' + command[-1] += " detailed" run_command(command) @zebra.command() def vxlan(): """Debug option set for zebra VxLAN (EVPN)""" - command = 'sudo vtysh -c "no debug zebra vxlan"' + command = ["sudo", "vtysh", "-c", "no debug zebra vxlan"] run_command(command) else: @@ -193,49 +199,49 @@ def vxlan(): def bgp(ctx): """debug bgp off""" if ctx.invoked_subcommand is None: - command = 'sudo vtysh -c "no debug bgp"' + command = ["sudo", "vtysh", "-c", "no debug bgp"] run_command(command) @bgp.command() def events(): """debug bgp events off""" - command = 'sudo vtysh -c "no debug bgp events"' + command = ["sudo", "vtysh", "-c", "no debug bgp events"] run_command(command) @bgp.command() def updates(): """debug bgp updates off""" - command = 'sudo vtysh -c "no debug bgp updates"' + command = ["sudo", "vtysh", "-c", "no debug bgp updates"] run_command(command) @bgp.command() def as4(): """debug bgp as4 actions off""" - command = 'sudo vtysh -c "no debug bgp as4"' + command = ["sudo", "vtysh", "-c", "no debug bgp as4"] run_command(command) @bgp.command() def filters(): """debug bgp filters off""" - command = 'sudo vtysh -c "no debug bgp filters"' + command = ["sudo", "vtysh", "-c", "no debug bgp filters"] run_command(command) @bgp.command() def fsm(): """debug bgp finite state machine off""" - command = 'sudo vtysh -c "no debug bgp fsm"' + command = ["sudo", "vtysh", "-c", "no debug bgp fsm"] run_command(command) @bgp.command() def keepalives(): """debug bgp keepalives off""" - command = 'sudo vtysh -c "no debug bgp keepalives"' + command = ["sudo", "vtysh", "-c", "no debug bgp keepalives"] run_command(command) @bgp.command() def zebra(): """debug bgp zebra messages off""" - command = 'sudo vtysh -c "no debug bgp zebra"' + command = ["sudo", "vtysh", "-c", "no debug bgp zebra"] run_command(command) # @@ -249,31 +255,31 @@ def zebra(): @zebra.command() def events(): """debug option set for zebra events""" - command = 'sudo vtysh -c "no debug zebra events"' + command = ["sudo", "vtysh", "-c", "no debug zebra events"] run_command(command) @zebra.command() def fpm(): """debug zebra FPM events""" - command = 'sudo vtysh -c "no debug zebra fpm"' + command = ["sudo", "vtysh", "-c", "no debug zebra fpm"] run_command(command) @zebra.command() def kernel(): """debug option set for zebra between kernel interface""" - command = 'sudo vtysh -c "no debug zebra kernel"' + command = ["sudo", "vtysh", "-c", "no debug zebra kernel"] run_command(command) @zebra.command() def packet(): """debug option set for zebra packet""" - command = 'sudo vtysh -c "no debug zebra packet"' + command = ["sudo", "vtysh", "-c", "no debug zebra packet"] run_command(command) @zebra.command() def rib(): """debug RIB events""" - command = 'sudo vtysh -c "no debug zebra rib"' + command = ["sudo", "vtysh", "-c", "no debug zebra rib"] run_command(command) From 04d0b34a5a2997f72dde0bedded8f252e45e74c3 Mon Sep 17 00:00:00 2001 From: saksarav-nokia Date: Mon, 10 Apr 2023 16:10:23 -0400 Subject: [PATCH 61/66] [voq][chassis][generate_dump] [BCM] Dump only the relevant BCM commands for fabric cards (#2606) Signed-off-by: Sakthivadivu Saravanaraj sakthivadivu.saravanaraj@nokia.com What I did When we run generate_dump script in SFM cards in a Broadcom chassis, the errors were printed in syslog for all the Broadcom commands which are not supported in fabric/Ramon cards. Added a check to dump all l2, l3, fp and tm commands only if the switch_type is non fabric card since these commands are not valid for fabric cards. How I did it Get the switch_type from DEVICE_METADATA and check the switch_type before dumping these BCM commands. How to verify it Ran generate_dump script in both voq and fabric cards, (1)verified that the fabric cards don't log the errors for the BCM commands (2) verified that the commands are dumped correctly in non-fabric dnx cards. Signed-off-by: Sakthivadivu Saravanaraj Signed-off-by: saksarav --- scripts/generate_dump | 66 ++++++++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 29 deletions(-) diff --git a/scripts/generate_dump b/scripts/generate_dump index 79f6ae1b21..74ceede065 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1226,39 +1226,47 @@ collect_broadcom() { fi if [ "$bcm_family" == "broadcom-dnx" ]; then - save_bcmcmd_all_ns "\"l2 show\"" "l2.summary" - save_bcmcmd_all_ns "\"field group list\"" "fpgroup.list.summary" - total_fp_groups=34 - for (( fp_grp=0; fp_grp<$total_fp_groups; fp_grp++ )) - do - save_bcmcmd_all_ns "\"field group info group=$fp_grp\"" "fpgroup$fp_grp.info.summary" - done - save_bcmcmd_all_ns "\"dbal table dump table=IPV4_UNICAST_PRIVATE_LPM_FORWARD\"" "l3.ipv4.lpm.summary" - save_bcmcmd_all_ns "\"dbal table dump table=IPV6_UNICAST_PRIVATE_LPM_FORWARD\"" "l3.ipv6.lpm.summary" - save_bcmcmd_all_ns "\"dbal table dump table=IPV4_UNICAST_PRIVATE_HOST\"" "l3.ipv4.host.summary" - save_bcmcmd_all_ns "\"dbal table dump table=IPV6_UNICAST_PRIVATE_HOST\"" "l3.ipv6.host.summary" - save_bcmcmd_all_ns "\"dbal table dump table=SUPER_FEC_1ST_HIERARCHY\"" "l3.egress.fec.summary" - save_bcmcmd_all_ns "\"dbal table dump table=ECMP_TABLE\"" "ecmp.table.summary" - save_bcmcmd_all_ns "\"dbal table dump table=ECMP_GROUP_PROFILE_TABLE\"" "ecmp.group.summary" - save_bcmcmd_all_ns "\"dbal table dump table=ING_VSI_INFO_DB\"" "ing.vsi.summary" - save_bcmcmd_all_ns "\"dbal table dump table=L3_MY_MAC_DA_PREFIXES\"" "l3.mymac.summary" - save_bcmcmd_all_ns "\"dbal table dump table=INGRESS_VLAN_MEMBERSHIP\"" "ing.vlan.summary" - save_bcmcmd_all_ns "\"dbal table dump table=LOCAL_SBC_IN_LIF_MATCH_INFO_SW\"" "sbc.inlif.summary" - save_bcmcmd_all_ns "\"dbal table dump table=SNIF_COMMAND_TABLE\"" "snif.command.summary" - save_bcmcmd_all_ns "\"port mgmt dump full\"" "port.mgmt.summary" - save_bcmcmd_all_ns "\"tm lag\"" "tm.lag.summary" - save_bcmcmd_all_ns "\"pp info fec\"" "pp.fec.summary" - save_bcmcmd_all_ns "\"nif sts\"" "nif.sts.summary" + supervisor=0 + PLATFORM_ENV_CONF=/usr/share/sonic/device/${platform}/platform_env.conf + if [ -f "$PLATFORM_ENV_CONF" ]; then + source $PLATFORM_ENV_CONF + fi + if [[ x"$supervisor" != x"1" ]]; then + + save_bcmcmd_all_ns "\"l2 show\"" "l2.summary" + save_bcmcmd_all_ns "\"field group list\"" "fpgroup.list.summary" + total_fp_groups=34 + for (( fp_grp=0; fp_grp<$total_fp_groups; fp_grp++ )) + do + save_bcmcmd_all_ns "\"field group info group=$fp_grp\"" "fpgroup$fp_grp.info.summary" + done + save_bcmcmd_all_ns "\"dbal table dump table=IPV4_UNICAST_PRIVATE_LPM_FORWARD\"" "l3.ipv4.lpm.summary" + save_bcmcmd_all_ns "\"dbal table dump table=IPV6_UNICAST_PRIVATE_LPM_FORWARD\"" "l3.ipv6.lpm.summary" + save_bcmcmd_all_ns "\"dbal table dump table=IPV4_UNICAST_PRIVATE_HOST\"" "l3.ipv4.host.summary" + save_bcmcmd_all_ns "\"dbal table dump table=IPV6_UNICAST_PRIVATE_HOST\"" "l3.ipv6.host.summary" + save_bcmcmd_all_ns "\"dbal table dump table=SUPER_FEC_1ST_HIERARCHY\"" "l3.egress.fec.summary" + save_bcmcmd_all_ns "\"dbal table dump table=ECMP_TABLE\"" "ecmp.table.summary" + save_bcmcmd_all_ns "\"dbal table dump table=ECMP_GROUP_PROFILE_TABLE\"" "ecmp.group.summary" + save_bcmcmd_all_ns "\"dbal table dump table=ING_VSI_INFO_DB\"" "ing.vsi.summary" + save_bcmcmd_all_ns "\"dbal table dump table=L3_MY_MAC_DA_PREFIXES\"" "l3.mymac.summary" + save_bcmcmd_all_ns "\"dbal table dump table=INGRESS_VLAN_MEMBERSHIP\"" "ing.vlan.summary" + save_bcmcmd_all_ns "\"dbal table dump table=LOCAL_SBC_IN_LIF_MATCH_INFO_SW\"" "sbc.inlif.summary" + save_bcmcmd_all_ns "\"dbal table dump table=SNIF_COMMAND_TABLE\"" "snif.command.summary" + save_bcmcmd_all_ns "\"port mgmt dump full\"" "port.mgmt.summary" + save_bcmcmd_all_ns "\"tm lag\"" "tm.lag.summary" + save_bcmcmd_all_ns "\"pp info fec\"" "pp.fec.summary" + save_bcmcmd_all_ns "\"nif sts\"" "nif.sts.summary" + save_bcmcmd_all_ns "\"tm ing q map\"" "tm.ingress.qmap.summary" + save_bcmcmd_all_ns "\"tm ing vsq resources\"" "tm.ing.vsq.res.summary" + for group in {a..f} + do + save_bcmcmd_all_ns "\"tm ing vsq non g=$group\"" "tm.ing.vsq.non.group-$group.summary" + done + fi save_bcmcmd_all_ns "\"port pm info\"" "port.pm.summary" save_bcmcmd_all_ns "\"conf show\"" "conf.show.summary" save_bcmcmd_all_ns "\"show counters\"" "show.counters.summary" save_bcmcmd_all_ns "\"diag counter g\"" "diag.counter.summary" - save_bcmcmd_all_ns "\"tm ing q map\"" "tm.ingress.qmap.summary" - save_bcmcmd_all_ns "\"tm ing vsq resources\"" "tm.ing.vsq.res.summary" - for group in {a..f} - do - save_bcmcmd_all_ns "\"tm ing vsq non g=$group\"" "tm.ing.vsq.non.group-$group.summary" - done save_bcmcmd_all_ns "\"fabric connectivity\"" "fabric.connect.summary" save_bcmcmd_all_ns "\"port status\"" "port.status.summary" else From a1057b279233f484be550ee36bd4d4677794a407 Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Mon, 10 Apr 2023 18:38:29 -0700 Subject: [PATCH 62/66] [config reload]Config Reload Enhancement (#2693) #### What I did Code changes for HLD: https://github.com/sonic-net/SONiC/pull/1203 Removed the timer based checks for config reload Added db_migrator to migrate from "has_timer" to "delayed" Modified package-manager to migrate from "has_timer" to "delayed" #### How I did it Modified relevant files #### How to verify it Added UT to verify --- config/main.py | 27 +----- scripts/db_migrator.py | 23 ++++- .../service_creator/feature.py | 6 +- tests/config_test.py | 85 +------------------ tests/counterpoll_input/config_db.json | 30 +++---- .../config_db/feature-expected.json | 6 +- .../config_db/feature-input.json | 3 +- tests/db_migrator_input/init_cfg.json | 6 +- tests/mock_tables/t1/config_db.json | 30 +++---- .../test_service_creator.py | 10 +-- 10 files changed, 69 insertions(+), 157 deletions(-) diff --git a/config/main.py b/config/main.py index 9e77211c1e..990dfc689f 100644 --- a/config/main.py +++ b/config/main.py @@ -870,23 +870,8 @@ def _get_sonic_services(): return (unit.strip() for unit in out.splitlines()) -def _get_delayed_sonic_units(get_timers=False): - rc1, _ = clicommon.run_command("systemctl list-dependencies --plain sonic-delayed.target | sed '1d'", return_cmd=True) - rc2, _ = clicommon.run_command("systemctl is-enabled {}".format(rc1.replace("\n", " ")), return_cmd=True) - timer = [line.strip() for line in rc1.splitlines()] - state = [line.strip() for line in rc2.splitlines()] - services = [] - for unit in timer: - if state[timer.index(unit)] == "enabled": - if not get_timers: - services.append(re.sub('\.timer$', '', unit, 1)) - else: - services.append(unit) - return services - - def _reset_failed_services(): - for service in itertools.chain(_get_sonic_services(), _get_delayed_sonic_units()): + for service in _get_sonic_services(): clicommon.run_command("systemctl reset-failed {}".format(service)) @@ -905,12 +890,6 @@ def _restart_services(): click.echo("Reloading Monit configuration ...") clicommon.run_command("sudo monit reload") -def _delay_timers_elapsed(): - for timer in _get_delayed_sonic_units(get_timers=True): - out, _ = clicommon.run_command("systemctl show {} --property=LastTriggerUSecMonotonic --value".format(timer), return_cmd=True) - if out.strip() == "0": - return False - return True def _per_namespace_swss_ready(service_name): out, _ = clicommon.run_command("systemctl show {} --property ActiveState --value".format(service_name), return_cmd=True) @@ -1492,10 +1471,6 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form click.echo("System is not up. Retry later or use -f to avoid system checks") sys.exit(CONFIG_RELOAD_NOT_READY) - if not _delay_timers_elapsed(): - click.echo("Relevant services are not up. Retry later or use -f to avoid system checks") - sys.exit(CONFIG_RELOAD_NOT_READY) - if not _swss_ready(): click.echo("SwSS container is not ready. Retry later or use -f to avoid system checks") sys.exit(CONFIG_RELOAD_NOT_READY) diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index 5ed4133bc8..f1bc404d47 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -45,7 +45,7 @@ def __init__(self, namespace, socket=None): none-zero values. build: sequentially increase within a minor version domain. """ - self.CURRENT_VERSION = 'version_4_0_1' + self.CURRENT_VERSION = 'version_4_0_2' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' @@ -575,6 +575,17 @@ def migrate_port_qos_map_global(self): self.configDB.set_entry('PORT_QOS_MAP', 'global', {"dscp_to_tc_map": dscp_to_tc_map_table_names[0]}) log.log_info("Created entry for global DSCP_TO_TC_MAP {}".format(dscp_to_tc_map_table_names[0])) + def migrate_feature_timer(self): + ''' + Migrate feature 'has_timer' field to 'delayed' + ''' + feature_table = self.configDB.get_table('FEATURE') + for feature, config in feature_table.items(): + state = config.get('has_timer') + if state is not None: + config['delayed'] = state + config.pop('has_timer') + self.configDB.set_entry('FEATURE', feature, config) def migrate_route_table(self): """ Handle route table migration. Migrations handled: @@ -926,9 +937,17 @@ def version_4_0_0(self): def version_4_0_1(self): """ Version 4_0_1. + """ + self.migrate_feature_timer() + self.set_version('version_4_0_2') + return 'version_4_0_2' + + def version_4_0_2(self): + """ + Version 4_0_2. This is the latest version for master branch """ - log.log_info('Handling version_4_0_1') + log.log_info('Handling version_4_0_2') return None def get_version(self): diff --git a/sonic_package_manager/service_creator/feature.py b/sonic_package_manager/service_creator/feature.py index 90378d378f..43b6c309fe 100644 --- a/sonic_package_manager/service_creator/feature.py +++ b/sonic_package_manager/service_creator/feature.py @@ -105,7 +105,7 @@ def update(self, old_manifest: Manifest, new_manifest: Manifest): """ Migrate feature configuration. It can be that non-configurable - feature entries have to be updated. e.g: "has_timer" for example if + feature entries have to be updated. e.g: "delayed" for example if the new feature introduces a service timer or name of the service has changed, but user configurable entries are not changed). @@ -227,12 +227,12 @@ def get_default_feature_entries(state=None, owner=None) -> Dict[str, str]: @staticmethod def get_non_configurable_feature_entries(manifest) -> Dict[str, str]: - """ Get non-configurable feature table entries: e.g. 'has_timer' """ + """ Get non-configurable feature table entries: e.g. 'delayed' """ return { 'has_per_asic_scope': str(manifest['service']['asic-service']), 'has_global_scope': str(manifest['service']['host-service']), - 'has_timer': str(manifest['service']['delayed']), + 'delayed': str(manifest['service']['delayed']), 'check_up_status': str(manifest['service']['check_up_status']), 'support_syslog_rate_limit': str(manifest['service']['syslog']['support-rate-limit']), } diff --git a/tests/config_test.py b/tests/config_test.py index c1bb86fe40..e1e3037fe9 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -115,10 +115,6 @@ Reloading Monit configuration ... """ -reload_config_with_untriggered_timer_output="""\ -Relevant services are not up. Retry later or use -f to avoid system checks -""" - def mock_run_command_side_effect(*args, **kwargs): command = args[0] @@ -155,41 +151,6 @@ def mock_run_command_side_effect_disabled_timer(*args, **kwargs): else: return '', 0 -def mock_run_command_side_effect_untriggered_timer(*args, **kwargs): - command = args[0] - - if kwargs.get('display_cmd'): - click.echo(click.style("Running command: ", fg='cyan') + click.style(command, fg='green')) - - if kwargs.get('return_cmd'): - if command == "systemctl list-dependencies --plain sonic-delayed.target | sed '1d'": - return 'snmp.timer', 0 - elif command == "systemctl list-dependencies --plain sonic.target | sed '1d'": - return 'swss', 0 - elif command == "systemctl is-enabled snmp.timer": - return 'enabled', 0 - elif command == "systemctl show snmp.timer --property=LastTriggerUSecMonotonic --value": - return '0', 0 - else: - return '', 0 - -def mock_run_command_side_effect_gnmi(*args, **kwargs): - command = args[0] - - if kwargs.get('display_cmd'): - click.echo(click.style("Running command: ", fg='cyan') + click.style(command, fg='green')) - - if kwargs.get('return_cmd'): - if command == "systemctl list-dependencies --plain sonic-delayed.target | sed '1d'": - return 'gnmi.timer', 0 - elif command == "systemctl list-dependencies --plain sonic.target | sed '1d'": - return 'swss', 0 - elif command == "systemctl is-enabled gnmi.timer": - return 'enabled', 0 - else: - return '', 0 - - # Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. sonic_cfggen = load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') @@ -235,32 +196,6 @@ def test_config_reload(self, get_cmd_module, setup_single_broadcom_asic): assert "\n".join([l.rstrip() for l in result.output.split('\n')][:1]) == reload_config_with_sys_info_command_output - def test_config_reload_untriggered_timer(self, get_cmd_module, setup_single_broadcom_asic): - with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect_untriggered_timer)) as mock_run_command: - (config, show) = get_cmd_module - - jsonfile_config = os.path.join(mock_db_path, "config_db.json") - jsonfile_init_cfg = os.path.join(mock_db_path, "init_cfg.json") - - # create object - config.INIT_CFG_FILE = jsonfile_init_cfg - config.DEFAULT_CONFIG_DB_FILE = jsonfile_config - - db = Db() - runner = CliRunner() - obj = {'config_db': db.cfgdb} - - # simulate 'config reload' to provoke load_sys_info option - result = runner.invoke(config.config.commands["reload"], ["-l", "-y"], obj=obj) - - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - - assert result.exit_code == 1 - - assert "\n".join([l.rstrip() for l in result.output.split('\n')][:2]) == reload_config_with_untriggered_timer_output - @classmethod def teardown_class(cls): print("TEARDOWN") @@ -293,25 +228,7 @@ def test_load_minigraph(self, get_cmd_module, setup_single_broadcom_asic): assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_command_output # Verify "systemctl reset-failed" is called for services under sonic.target mock_run_command.assert_any_call('systemctl reset-failed swss') - # Verify "systemctl reset-failed" is called for services under sonic-delayed.target - mock_run_command.assert_any_call('systemctl reset-failed snmp') - assert mock_run_command.call_count == 11 - - def test_load_minigraph_with_gnmi_timer(self, get_cmd_module, setup_single_broadcom_asic): - with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect_gnmi)) as mock_run_command: - (config, show) = get_cmd_module - runner = CliRunner() - result = runner.invoke(config.config.commands["load_minigraph"], ["-y"]) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_command_output - # Verify "systemctl reset-failed" is called for services under sonic.target - mock_run_command.assert_any_call('systemctl reset-failed swss') - # Verify "systemctl reset-failed" is called for services under sonic-delayed.target - mock_run_command.assert_any_call('systemctl reset-failed gnmi') - assert mock_run_command.call_count == 11 + assert mock_run_command.call_count == 8 def test_load_minigraph_with_port_config_bad_format(self, get_cmd_module, setup_single_broadcom_asic): with mock.patch( diff --git a/tests/counterpoll_input/config_db.json b/tests/counterpoll_input/config_db.json index 40ff750db6..38cde7c15e 100644 --- a/tests/counterpoll_input/config_db.json +++ b/tests/counterpoll_input/config_db.json @@ -2235,7 +2235,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "True", - "has_timer": "False" + "delayed": "False" }, "pmon": { "has_per_asic_scope": "False", @@ -2243,7 +2243,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "True", - "has_timer": "False" + "delayed": "False" }, "sflow": { "has_per_asic_scope": "False", @@ -2251,7 +2251,7 @@ "auto_restart": "enabled", "state": "disabled", "has_global_scope": "True", - "has_timer": "False" + "delayed": "False" }, "database": { "has_per_asic_scope": "True", @@ -2259,7 +2259,7 @@ "auto_restart": "disabled", "state": "enabled", "has_global_scope": "True", - "has_timer": "False" + "delayed": "False" }, "telemetry": { "has_per_asic_scope": "False", @@ -2268,7 +2268,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "True", - "has_timer": "True" + "delayed": "True" }, "snmp": { "has_per_asic_scope": "False", @@ -2276,7 +2276,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "True", - "has_timer": "True" + "delayed": "True" }, "bgp": { "has_per_asic_scope": "True", @@ -2284,7 +2284,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "False", - "has_timer": "False" + "delayed": "False" }, "radv": { "has_per_asic_scope": "False", @@ -2292,7 +2292,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "True", - "has_timer": "False" + "delayed": "False" }, "mgmt-framework": { "has_per_asic_scope": "False", @@ -2300,7 +2300,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "True", - "has_timer": "True" + "delayed": "True" }, "nat": { "has_per_asic_scope": "False", @@ -2308,7 +2308,7 @@ "auto_restart": "enabled", "state": "disabled", "has_global_scope": "True", - "has_timer": "False" + "delayed": "False" }, "teamd": { "has_per_asic_scope": "True", @@ -2316,7 +2316,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "False", - "has_timer": "False" + "delayed": "False" }, "dhcp_relay": { "has_per_asic_scope": "False", @@ -2324,7 +2324,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "True", - "has_timer": "False" + "delayed": "False" }, "swss": { "has_per_asic_scope": "True", @@ -2332,7 +2332,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "False", - "has_timer": "False" + "delayed": "False" }, "syncd": { "has_per_asic_scope": "True", @@ -2340,7 +2340,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "False", - "has_timer": "False" + "delayed": "False" } }, "DSCP_TO_TC_MAP": { @@ -2669,4 +2669,4 @@ "size": "56368" } } -} \ No newline at end of file +} diff --git a/tests/db_migrator_input/config_db/feature-expected.json b/tests/db_migrator_input/config_db/feature-expected.json index 92653771fc..baf051a8bd 100644 --- a/tests/db_migrator_input/config_db/feature-expected.json +++ b/tests/db_migrator_input/config_db/feature-expected.json @@ -3,7 +3,7 @@ "auto_restart": "disabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" }, @@ -11,7 +11,7 @@ "auto_restart": "enabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" }, @@ -19,7 +19,7 @@ "auto_restart": "enabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "True", "high_mem_alert": "disabled", "state": "enabled" } diff --git a/tests/db_migrator_input/config_db/feature-input.json b/tests/db_migrator_input/config_db/feature-input.json index c6d512dad1..46a6cae613 100644 --- a/tests/db_migrator_input/config_db/feature-input.json +++ b/tests/db_migrator_input/config_db/feature-input.json @@ -8,7 +8,8 @@ "high_mem_alert": "disabled" }, "FEATURE|telemetry": { - "status": "enabled" + "status": "enabled", + "has_timer": "True" }, "FEATURE|syncd": { "state": "enabled" diff --git a/tests/db_migrator_input/init_cfg.json b/tests/db_migrator_input/init_cfg.json index 634477a4f9..a714b8cdfe 100644 --- a/tests/db_migrator_input/init_cfg.json +++ b/tests/db_migrator_input/init_cfg.json @@ -4,7 +4,7 @@ "auto_restart": "enabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" }, @@ -12,7 +12,7 @@ "auto_restart": "enabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" }, @@ -20,7 +20,7 @@ "auto_restart": "disabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "disabled" } diff --git a/tests/mock_tables/t1/config_db.json b/tests/mock_tables/t1/config_db.json index f1f835182f..42a0e2da6c 100644 --- a/tests/mock_tables/t1/config_db.json +++ b/tests/mock_tables/t1/config_db.json @@ -1798,7 +1798,7 @@ "auto_restart": "enabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1811,7 +1811,7 @@ "auto_restart": "disabled", "has_global_scope": "True", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1824,7 +1824,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "False", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1837,7 +1837,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1850,7 +1850,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "False", - "has_timer": "True", + "delayed": "True", "high_mem_alert": "disabled", "state": "enabled" } @@ -1863,7 +1863,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "False", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "disabled" } @@ -1876,7 +1876,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "False", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1889,7 +1889,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "False", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1902,7 +1902,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "False", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "disabled" } @@ -1915,7 +1915,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "False", - "has_timer": "True", + "delayed": "True", "high_mem_alert": "disabled", "state": "enabled" } @@ -1928,7 +1928,7 @@ "auto_restart": "enabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1941,7 +1941,7 @@ "auto_restart": "enabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1954,7 +1954,7 @@ "auto_restart": "enabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1967,7 +1967,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "False", - "has_timer": "True", + "delayed": "True", "high_mem_alert": "disabled", "state": "enabled", "status": "enabled" @@ -3510,4 +3510,4 @@ "VERSION": "version_1_0_4" } } -} \ No newline at end of file +} diff --git a/tests/sonic_package_manager/test_service_creator.py b/tests/sonic_package_manager/test_service_creator.py index c97d362626..689a635411 100644 --- a/tests/sonic_package_manager/test_service_creator.py +++ b/tests/sonic_package_manager/test_service_creator.py @@ -218,7 +218,7 @@ def test_feature_registration(mock_sonic_db, manifest): 'set_owner': 'local', 'has_per_asic_scope': 'False', 'has_global_scope': 'True', - 'has_timer': 'False', + 'delayed': 'False', 'check_up_status': 'False', 'support_syslog_rate_limit': 'False', }) @@ -232,7 +232,7 @@ def test_feature_update(mock_sonic_db, manifest): 'set_owner': 'local', 'has_per_asic_scope': 'False', 'has_global_scope': 'True', - 'has_timer': 'False', + 'delayed': 'False', 'check_up_status': 'False', 'support_syslog_rate_limit': 'False', } @@ -256,7 +256,7 @@ def test_feature_update(mock_sonic_db, manifest): 'set_owner': 'local', 'has_per_asic_scope': 'False', 'has_global_scope': 'True', - 'has_timer': 'True', + 'delayed': 'True', 'check_up_status': 'False', 'support_syslog_rate_limit': 'False', }), @@ -278,7 +278,7 @@ def test_feature_registration_with_timer(mock_sonic_db, manifest): 'set_owner': 'local', 'has_per_asic_scope': 'False', 'has_global_scope': 'True', - 'has_timer': 'True', + 'delayed': 'True', 'check_up_status': 'False', 'support_syslog_rate_limit': 'False', }) @@ -298,7 +298,7 @@ def test_feature_registration_with_non_default_owner(mock_sonic_db, manifest): 'set_owner': 'kube', 'has_per_asic_scope': 'False', 'has_global_scope': 'True', - 'has_timer': 'False', + 'delayed': 'False', 'check_up_status': 'False', 'support_syslog_rate_limit': 'False', }) From 1468f4a039611c6e8bd7ad8778ed5643f154524d Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Wed, 12 Apr 2023 09:20:21 +0800 Subject: [PATCH 63/66] Support to display the SONiC OS Version in the command show version (#2787) What I did Support to display the SONiC OS Version in the command show version. It will be used to display the version info in the SONiC command "show version". The version is used to do the FIPS certification. We do not do the FIPS certification on a specific release, but on the SONiC OS Version. SONiC Software Version: SONiC.master-13812.218661-7d94c0c28 SONiC OS Version: 11 Distribution: Debian 11.6 Kernel: 5.10.0-18-2-amd64 How I did it The device info is in sonic-net/sonic-buildimage, see PR: sonic-net/sonic-buildimage#14601 The submodule change can be merged to sonic-buildimage, after the PR 14601 merged. How to verify it --- show/main.py | 1 + tests/show_test.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/show/main.py b/show/main.py index a60e8411c2..f2b71c9ccf 100755 --- a/show/main.py +++ b/show/main.py @@ -1297,6 +1297,7 @@ def version(verbose): sys_date = datetime.now() click.echo("\nSONiC Software Version: SONiC.{}".format(version_info['build_version'])) + click.echo("\nSONiC OS Version: {}".format(version_info['sonic_os_version'])) click.echo("Distribution: Debian {}".format(version_info['debian_version'])) click.echo("Kernel: {}".format(version_info['kernel_version'])) click.echo("Build commit: {}".format(version_info['commit_id'])) diff --git a/tests/show_test.py b/tests/show_test.py index 114dbc3c6c..ddb59078b2 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -116,3 +116,34 @@ def test_show_logging_tmpfs_syslog_1(run_command, cli_arguments, expected): runner = CliRunner() result = runner.invoke(show.cli.commands["logging"], cli_arguments) run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected, display_cmd=False) + +def side_effect_subprocess_popen(*args, **kwargs): + mock = MagicMock() + if args[0] == "uptime": + mock.stdout.read.return_value = "05:58:07 up 25 days" + elif args[0].startswith("sudo docker images"): + mock.stdout.read.return_value = "REPOSITORY TAG" + return mock + +@patch('sonic_py_common.device_info.get_sonic_version_info', MagicMock(return_value={ + "build_version": "release-1.1-7d94c0c28", + "sonic_os_version": "11", + "debian_version": "11.6", + "kernel_version": "5.10", + "commit_id": "7d94c0c28", + "build_date": "Wed Feb 15 06:17:08 UTC 2023", + "built_by": "AzDevOps"})) +@patch('sonic_py_common.device_info.get_platform_info', MagicMock(return_value={ + "platform": "x86_64-kvm_x86_64-r0", + "hwsku": "Force10-S6000", + "asic_type": "vs", + "asic_count": 1})) +@patch('sonic_py_common.device_info.get_chassis_info', MagicMock(return_value={ + "serial": "N/A", + "model": "N/A", + "revision": "N/A"})) +@patch('subprocess.Popen', MagicMock(side_effect=side_effect_subprocess_popen)) +def test_show_version(): + runner = CliRunner() + result = runner.invoke(show.cli.commands["version"]) + assert "SONiC OS Version: 11" in result.output From ba28df305d75d828307fe8b780a2eb14afcc2891 Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Thu, 13 Apr 2023 19:21:56 +0800 Subject: [PATCH 64/66] Remove the no use new line in show version (#2792) What I did Remove the additional new line in the command show version, the new line character is no use. SONiC Software Version: SONiC.master-14619.252155-d7c9d3b7d SONiC OS Version: 11 Distribution: Debian 11.6 Change to: SONiC Software Version: SONiC.master-14619.252155-d7c9d3b7d SONiC OS Version: 11 Distribution: Debian 11.6 How I did it How to verify it --- show/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/show/main.py b/show/main.py index f2b71c9ccf..7f79cd4779 100755 --- a/show/main.py +++ b/show/main.py @@ -1297,7 +1297,7 @@ def version(verbose): sys_date = datetime.now() click.echo("\nSONiC Software Version: SONiC.{}".format(version_info['build_version'])) - click.echo("\nSONiC OS Version: {}".format(version_info['sonic_os_version'])) + click.echo("SONiC OS Version: {}".format(version_info['sonic_os_version'])) click.echo("Distribution: Debian {}".format(version_info['debian_version'])) click.echo("Kernel: {}".format(version_info['kernel_version'])) click.echo("Build commit: {}".format(version_info['commit_id'])) From d17d124ebd1fd2a631be7f8ba24aef82906c9e3d Mon Sep 17 00:00:00 2001 From: Vadym Hlushko <62022266+vadymhlushko-mlnx@users.noreply.github.com> Date: Thu, 13 Apr 2023 14:37:22 +0300 Subject: [PATCH 65/66] [aclshow][user-manual] Add the aclshow utility description to the User Manual document (#2779) Add the aclshow utility description to the User Manual document Signed-off-by: vadymhlushko-mlnx --- doc/Command-Reference.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 494773b83c..86902cd7e7 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -1538,6 +1538,36 @@ This command is used to create new ACL tables. Go Back To [Beginning of the document](#) or [Beginning of this section](#acl) +**aclshow** + +This command is used to display: ACL rules, tables and their priority, ACL packets counters, and bytes counters + +- Usage: + ``` + aclshow [-h] [-a] [-c] [-r RULES] [-t TABLES] [-v] [-vv] + ``` + +- Parameters: + - -a, --all: Show all ACL counters + - -c, --clear: Clear ACL counters statistics + - -r RULES, --rules RULES: Show only specified ACL rules and their counters + - -t TABLES, --tables TABLES: Show only specified ACL tables and their counters + - -vv, --verbose: Verbose output + +- Examples: + ``` + admin@sonic:~$ sudo aclshow -a + RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT + ----------- ------------ ------ --------------- ------------- + RULE_1 DATAACL 9999 0 0 + RULE_2 DATAACL 9998 0 0 + RULE_1 SNMP_ACL 9999 N/A N/A + ``` + + If the `PACKETS COUNT` and `BYTES COUNT` fields have the `N/A` value it means either that the ACL rule is invalid or it is a `control plane` ACL and those counters are created in Linux, not in SONiC `COUNTERS_DB` and the [iptables](https://linux.die.net/man/8/iptables) utility should be used to view those counters. + + If the `PACKETS COUNT` and `BYTES COUNT` fields have some numeric value it means that it is a SONiC ACL's and those counters are created in SONiC `COUNTERS_DB`. + ## ARP & NDP From d433b2f954e446db7a655e882a7274cd5bce3a50 Mon Sep 17 00:00:00 2001 From: Aryeh Feigin <101218333+arfeigin@users.noreply.github.com> Date: Tue, 18 Apr 2023 09:14:02 +0300 Subject: [PATCH 66/66] [fast-reboot] set teamd timer to minimum, preserve connected routes (#2760) Part of sonic-net/sonic-buildimage#14583 Similar to #2744 - What I did Added a script to filter routes: preserve default routes (was already done as part of fast-reboot script) and connected routes. Set teamd timer to minimal allowed value (1 second) for fast-reboot. Both made in order to shorten dataplane downtime. - How I did it fast-reboot-filter-routes.py was added to preserve connected and default routes and is being called from fast-reboot script. teamd-timer is set when setting fast-reboot. --- scripts/fast-reboot | 18 +++--- scripts/fast-reboot-filter-routes.py | 91 ++++++++++++++++++++++++++++ setup.py | 1 + 3 files changed, 102 insertions(+), 8 deletions(-) create mode 100755 scripts/fast-reboot-filter-routes.py diff --git a/scripts/fast-reboot b/scripts/fast-reboot index fb162ae180..eea97e792b 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -550,6 +550,7 @@ case "$REBOOT_TYPE" in BOOT_TYPE_ARG=$REBOOT_TYPE trap clear_boot EXIT HUP INT QUIT TERM KILL ABRT ALRM sonic-db-cli STATE_DB HSET "FAST_RESTART_ENABLE_TABLE|system" "enable" "true" &>/dev/null + config warm_restart teamsyncd_timer 1 config warm_restart enable system ;; "warm-reboot") @@ -667,14 +668,15 @@ fi set +e if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then - # Clear all routes except of default routes for faster reconciliation time. - sonic-db-cli APPL_DB eval " - for _, k in ipairs(redis.call('keys', '*')) do - if string.match(k, 'ROUTE_TABLE:') and not string.match(k, 'ROUTE_TABLE:0.0.0.0/0') and not string.match(k, 'ROUTE_TABLE:::/0') then \ - redis.call('del', k) - end - end - " 0 > /dev/null + # Clear all routes except of default and connected routes for faster reconciliation time. + debug "Clearing routes..." + FILTER_ROUTES=0 + python /usr/local/bin/fast-reboot-filter-routes.py || FILTER_ROUTES=$? + if [[ FILTER_ROUTES -ne 0 ]]; then + error "Preserving connected and default routes failed." + else + debug "Routes deleted from APP-DB, default and connected routes preserved." + fi fi # disable trap-handlers which were set before diff --git a/scripts/fast-reboot-filter-routes.py b/scripts/fast-reboot-filter-routes.py new file mode 100755 index 0000000000..9328b79ed2 --- /dev/null +++ b/scripts/fast-reboot-filter-routes.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 + +import json +import sys +import os +import utilities_common.cli as clicommon +import syslog +import traceback +import click +from swsscommon.swsscommon import ConfigDBConnector + +ROUTE_IDX = 1 + +def get_connected_routes(): + cmd = 'sudo vtysh -c "show ip route connected json"' + connected_routes = [] + try: + output, ret = clicommon.run_command(cmd, return_cmd=True) + if ret != 0: + click.echo(output.rstrip('\n')) + sys.exit(ret) + if output is not None: + route_info = json.loads(output) + for route in route_info.keys(): + connected_routes.append(route) + except Exception: + ctx = click.get_current_context() + ctx.fail("Unable to get connected routes from bgp") + + return connected_routes + +def get_route(db, route): + key = 'ROUTE_TABLE:%s' % route + val = db.keys(db.APPL_DB, key) + if val: + return val[0].split(":", 1)[ROUTE_IDX] + else: + return None + +def generate_default_route_entries(): + db = ConfigDBConnector() + db.db_connect(db.APPL_DB) + + default_routes = [] + + ipv4_default = get_route(db, '0.0.0.0/0') + if ipv4_default is not None: + default_routes.append(ipv4_default) + + ipv6_default = get_route(db, '::/0') + if ipv6_default is not None: + default_routes.append(ipv6_default) + + return default_routes + +def filter_routes(preserved_routes): + db = ConfigDBConnector() + db.db_connect(db.APPL_DB) + + key = 'ROUTE_TABLE:*' + routes = db.keys(db.APPL_DB, key) + + for route in routes: + stripped_route = route.split(":", 1)[ROUTE_IDX] + if stripped_route not in preserved_routes: + db.delete(db.APPL_DB, route) + +def main(): + default_routes = generate_default_route_entries() + connected_routes = get_connected_routes() + preserved_routes = set(default_routes + connected_routes) + filter_routes(preserved_routes) + return 0 + +if __name__ == '__main__': + res = 0 + try: + syslog.openlog('fast-reboot-filter-routes') + res = main() + except KeyboardInterrupt: + syslog.syslog(syslog.LOG_NOTICE, "SIGINT received. Quitting") + res = 1 + except Exception as e: + syslog.syslog(syslog.LOG_ERR, "Got an exception %s: Traceback: %s" % (str(e), traceback.format_exc())) + res = 2 + finally: + syslog.closelog() + try: + sys.exit(res) + except SystemExit: + os._exit(res) diff --git a/setup.py b/setup.py index f071797280..a2c851998f 100644 --- a/setup.py +++ b/setup.py @@ -128,6 +128,7 @@ 'scripts/fanshow', 'scripts/fast-reboot', 'scripts/fast-reboot-dump.py', + 'scripts/fast-reboot-filter-routes.py', 'scripts/fdbclear', 'scripts/fdbshow', 'scripts/fibshow',