diff --git a/acl_loader/main.py b/acl_loader/main.py index c50efec032..2eab089c21 100644 --- a/acl_loader/main.py +++ b/acl_loader/main.py @@ -72,6 +72,10 @@ class AclLoader(object): ACL_TABLE = "ACL_TABLE" ACL_RULE = "ACL_RULE" + CFG_ACL_TABLE = "ACL_TABLE" + STATE_ACL_TABLE = "ACL_TABLE_TABLE" + CFG_ACL_RULE = "ACL_RULE" + STATE_ACL_RULE = "ACL_RULE_TABLE" ACL_TABLE_TYPE_MIRROR = "MIRROR" ACL_TABLE_TYPE_CTRLPLANE = "CTRLPLANE" CFG_MIRROR_SESSION_TABLE = "MIRROR_SESSION" @@ -117,11 +121,16 @@ def __init__(self): self.tables_db_info = {} self.rules_db_info = {} self.rules_info = {} + self.tables_state_info = None + self.rules_state_info = None # Load database config files load_db_config() self.sessions_db_info = {} + self.acl_table_status = {} + self.acl_rule_status = {} + self.configdb = ConfigDBConnector() self.configdb.connect() self.statedb = SonicV2Connector(host="127.0.0.1") @@ -156,6 +165,8 @@ def __init__(self): self.read_rules_info() self.read_sessions_info() self.read_policers_info() + self.acl_table_status = self.read_acl_object_status_info(self.CFG_ACL_TABLE, self.STATE_ACL_TABLE) + self.acl_rule_status = self.read_acl_object_status_info(self.CFG_ACL_RULE, self.STATE_ACL_RULE) def read_tables_info(self): """ @@ -210,7 +221,7 @@ def read_sessions_info(self): for key in self.sessions_db_info: if self.per_npu_statedb: # For multi-npu platforms we will read from all front asic name space - # statedb as the monitor port will be differnt for each asic + # statedb as the monitor port will be different for each asic # and it's status also might be different (ideally should not happen) # We will store them as dict of 'asic' : value self.sessions_db_info[key]["status"] = {} @@ -224,6 +235,35 @@ def read_sessions_info(self): self.sessions_db_info[key]["status"] = state_db_info.get("status", "inactive") if state_db_info else "error" self.sessions_db_info[key]["monitor_port"] = state_db_info.get("monitor_port", "") if state_db_info else "" + def read_acl_object_status_info(self, cfg_db_table_name, state_db_table_name): + """ + Read ACL_TABLE status or ACL_RULE status from STATE_DB + """ + if self.per_npu_configdb: + namespace_configdb = list(self.per_npu_configdb.values())[0] + keys = namespace_configdb.get_table(cfg_db_table_name).keys() + else: + keys = self.configdb.get_table(cfg_db_table_name).keys() + + status = {} + for key in keys: + # For ACL_RULE, the key is (acl_table_name, acl_rule_name) + if isinstance(key, tuple): + state_db_key = key[0] + "|" + key[1] + else: + state_db_key = key + status[key] = {} + if self.per_npu_statedb: + status[key]['status'] = {} + for namespace_key, namespace_statedb in self.per_npu_statedb.items(): + state_db_info = namespace_statedb.get_all(self.statedb.STATE_DB, "{}|{}".format(state_db_table_name, state_db_key)) + status[key]['status'][namespace_key] = state_db_info.get("status", "N/A") if state_db_info else "N/A" + else: + state_db_info = self.statedb.get_all(self.statedb.STATE_DB, "{}|{}".format(state_db_table_name, state_db_key)) + status[key]['status'] = state_db_info.get("status", "N/A") if state_db_info else "N/A" + + return status + def get_sessions_db_info(self): return self.sessions_db_info @@ -786,32 +826,36 @@ def show_table(self, table_name): :param table_name: Optional. ACL table name. Filter tables by specified name. :return: """ - header = ("Name", "Type", "Binding", "Description", "Stage") + header = ("Name", "Type", "Binding", "Description", "Stage", "Status") data = [] for key, val in self.get_tables_db_info().items(): if table_name and key != table_name: continue - + stage = val.get("stage", Stage.INGRESS).lower() - + # Get ACL table status from STATE_DB + if key in self.acl_table_status: + status = self.acl_table_status[key]['status'] + else: + status = 'N/A' if val["type"] == AclLoader.ACL_TABLE_TYPE_CTRLPLANE: services = natsorted(val["services"]) - data.append([key, val["type"], services[0], val["policy_desc"], stage]) + data.append([key, val["type"], services[0], val["policy_desc"], stage, status]) if len(services) > 1: for service in services[1:]: - data.append(["", "", service, "", ""]) + data.append(["", "", service, "", "", ""]) else: if not val["ports"]: - data.append([key, val["type"], "", val["policy_desc"], stage]) + data.append([key, val["type"], "", val["policy_desc"], stage, status]) else: ports = natsorted(val["ports"]) - data.append([key, val["type"], ports[0], val["policy_desc"], stage]) + data.append([key, val["type"], ports[0], val["policy_desc"], stage, status]) if len(ports) > 1: for port in ports[1:]: - data.append(["", "", port, "", ""]) + data.append(["", "", port, "", "", ""]) print(tabulate.tabulate(data, headers=header, tablefmt="simple", missingval="")) @@ -873,7 +917,7 @@ def show_rule(self, table_name, rule_id): :param rule_id: Optional. ACL rule name. Filter rule by specified rule name. :return: """ - header = ("Table", "Rule", "Priority", "Action", "Match") + header = ("Table", "Rule", "Priority", "Action", "Match", "Status") def pop_priority(val): priority = "N/A" @@ -919,11 +963,16 @@ def pop_matches(val): priority = pop_priority(val) action = pop_action(val) matches = pop_matches(val) - - rule_data = [[tname, rid, priority, action, matches[0]]] + # Get ACL rule status from STATE_DB + status_key = (tname, rid) + if status_key in self.acl_rule_status: + status = self.acl_rule_status[status_key]['status'] + else: + status = "N/A" + rule_data = [[tname, rid, priority, action, matches[0], status]] if len(matches) > 1: for m in matches[1:]: - rule_data.append(["", "", "", "", m]) + rule_data.append(["", "", "", "", m, ""]) raw_data.append([priority, rule_data]) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 1856c7a0f5..eecf1c9e53 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -45,11 +45,14 @@ stages: artifact: sonic-buildimage.vs runVersion: 'latestFromBranch' runBranch: 'refs/heads/$(sourceBranch)' + patterns: | + **/*.deb + **/*.whl displayName: "Download artifacts from latest sonic-buildimage build" - script: | set -xe - sudo apt-get -y purge libhiredis-dev libnl-3-dev libnl-route-3-dev + sudo apt-get -y purge libhiredis-dev libnl-3-dev libnl-route-3-dev || true sudo dpkg -i libnl-3-200_*.deb sudo dpkg -i libnl-genl-3-200_*.deb sudo dpkg -i libnl-route-3-200_*.deb @@ -66,9 +69,9 @@ stages: source: specific project: build pipeline: 9 - artifact: sonic-swss-common.bullseye.amd64 + artifact: sonic-swss-common runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' + runBranch: 'refs/heads/$(sourceBranch)' displayName: "Download sonic swss common deb packages" - script: | diff --git a/config/aaa.py b/config/aaa.py index 6f4a42b340..3c76187126 100644 --- a/config/aaa.py +++ b/config/aaa.py @@ -405,8 +405,8 @@ def sourceip(ctx, src_ip): click.echo('Invalid ip address') return - v6_invalid_list = [ipaddress.IPv6Address(unicode('0::0')), ipaddress.IPv6Address(unicode('0::1'))] - net = ipaddress.ip_network(unicode(src_ip), strict=False) + v6_invalid_list = [ipaddress.IPv6Address('0::0'), ipaddress.IPv6Address('0::1')] + net = ipaddress.ip_network(src_ip, strict=False) if (net.version == 4): if src_ip == "0.0.0.0": click.echo('enter non-zero ip address') @@ -446,8 +446,8 @@ def nasip(ctx, nas_ip): click.echo('Invalid ip address') return - v6_invalid_list = [ipaddress.IPv6Address(unicode('0::0')), ipaddress.IPv6Address(unicode('0::1'))] - net = ipaddress.ip_network(unicode(nas_ip), strict=False) + v6_invalid_list = [ipaddress.IPv6Address('0::0'), ipaddress.IPv6Address('0::1')] + net = ipaddress.ip_network(nas_ip, strict=False) if (net.version == 4): if nas_ip == "0.0.0.0": click.echo('enter non-zero ip address') diff --git a/config/console.py b/config/console.py index b28aeda672..1ecf80c381 100644 --- a/config/console.py +++ b/config/console.py @@ -1,6 +1,7 @@ import click import utilities_common.cli as clicommon - +from .validated_config_db_connector import ValidatedConfigDBConnector +from jsonpatch import JsonPatchConflict # # 'console' group ('config console ...') # @@ -16,14 +17,18 @@ def console(): @clicommon.pass_db def enable_console_switch(db): """Enable console switch""" - config_db = db.cfgdb + config_db = ValidatedConfigDBConnector(db.cfgdb) table = "CONSOLE_SWITCH" dataKey1 = 'console_mgmt' dataKey2 = 'enabled' data = { dataKey2 : "yes" } - config_db.mod_entry(table, dataKey1, data) + try: + config_db.mod_entry(table, dataKey1, data) + except ValueError as e: + ctx = click.get_current_context() + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'console disable' group ('config console disable') @@ -32,14 +37,18 @@ def enable_console_switch(db): @clicommon.pass_db def disable_console_switch(db): """Disable console switch""" - config_db = db.cfgdb + config_db = ValidatedConfigDBConnector(db.cfgdb) table = "CONSOLE_SWITCH" dataKey1 = 'console_mgmt' dataKey2 = 'enabled' data = { dataKey2 : "no" } - config_db.mod_entry(table, dataKey1, data) + try: + config_db.mod_entry(table, dataKey1, data) + except ValueError as e: + ctx = click.get_current_context() + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'console add' group ('config console add ...') @@ -52,7 +61,7 @@ def disable_console_switch(db): @click.option('--devicename', '-d', metavar='', required=False) def add_console_setting(db, linenum, baud, flowcontrol, devicename): """Add Console-realted configuration tasks""" - config_db = db.cfgdb + config_db = ValidatedConfigDBConnector(db.cfgdb) table = "CONSOLE_PORT" dataKey1 = 'baud_rate' @@ -72,7 +81,10 @@ def add_console_setting(db, linenum, baud, flowcontrol, devicename): ctx.fail("Given device name {} has been used. Please enter a valid device name or remove the existing one !!".format(devicename)) console_entry[dataKey3] = devicename - config_db.set_entry(table, linenum, console_entry) + try: + config_db.set_entry(table, linenum, console_entry) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # @@ -83,15 +95,18 @@ def add_console_setting(db, linenum, baud, flowcontrol, devicename): @click.argument('linenum', metavar='', required=True, type=click.IntRange(0, 65535)) def remove_console_setting(db, linenum): """Remove Console-related configuration tasks""" - config_db = db.cfgdb + config_db = ValidatedConfigDBConnector(db.cfgdb) + ctx = click.get_current_context() table = "CONSOLE_PORT" data = config_db.get_entry(table, linenum) if data: - config_db.mod_entry(table, linenum, None) + try: + config_db.set_entry(table, linenum, None) + except JsonPatchConflict as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: - ctx = click.get_current_context() ctx.fail("Trying to delete console port setting, which is not present.") # @@ -103,7 +118,7 @@ def remove_console_setting(db, linenum): @click.argument('devicename', metavar='', required=False) def upate_console_remote_device_name(db, linenum, devicename): """Update remote device name for a console line""" - config_db = db.cfgdb + config_db = ValidatedConfigDBConnector(db.cfgdb) ctx = click.get_current_context() table = "CONSOLE_PORT" @@ -117,12 +132,18 @@ def upate_console_remote_device_name(db, linenum, devicename): elif not devicename: # remove configuration key from console setting if user not give a remote device name data.pop(dataKey, None) - config_db.mod_entry(table, linenum, data) + try: + config_db.mod_entry(table, linenum, data) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) elif isExistingSameDevice(config_db, devicename, table): ctx.fail("Given device name {} has been used. Please enter a valid device name or remove the existing one !!".format(devicename)) else: data[dataKey] = devicename - config_db.mod_entry(table, linenum, data) + try: + config_db.mod_entry(table, linenum, data) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: ctx.fail("Trying to update console port setting, which is not present.") @@ -135,7 +156,7 @@ def upate_console_remote_device_name(db, linenum, devicename): @click.argument('baud', metavar='', required=True, type=click.INT) def update_console_baud(db, linenum, baud): """Update baud for a console line""" - config_db = db.cfgdb + config_db = ValidatedConfigDBConnector(db.cfgdb) ctx = click.get_current_context() table = "CONSOLE_PORT" @@ -149,7 +170,10 @@ def update_console_baud(db, linenum, baud): return else: data[dataKey] = baud - config_db.mod_entry(table, linenum, data) + try: + config_db.mod_entry(table, linenum, data) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: ctx.fail("Trying to update console port setting, which is not present.") @@ -162,7 +186,7 @@ def update_console_baud(db, linenum, baud): @click.argument('linenum', metavar='', required=True, type=click.IntRange(0, 65535)) def update_console_flow_control(db, mode, linenum): """Update flow control setting for a console line""" - config_db = db.cfgdb + config_db = ValidatedConfigDBConnector(db.cfgdb) ctx = click.get_current_context() table = "CONSOLE_PORT" @@ -177,7 +201,10 @@ def update_console_flow_control(db, mode, linenum): return else: data[dataKey] = innerMode - config_db.mod_entry(table, linenum, data) + try: + config_db.mod_entry(table, linenum, data) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: ctx.fail("Trying to update console port setting, which is not present.") diff --git a/config/kube.py b/config/kube.py index 706a5ab260..526a4dd028 100644 --- a/config/kube.py +++ b/config/kube.py @@ -1,6 +1,7 @@ import click from utilities_common.cli import AbbreviationGroup, pass_db +from .validated_config_db_connector import ValidatedConfigDBConnector from .utils import log @@ -21,22 +22,30 @@ KUBE_LABEL_SET_KEY = "SET" def _update_kube_server(db, field, val): - db_data = db.cfgdb.get_entry(KUBE_SERVER_TABLE_NAME, KUBE_SERVER_TABLE_KEY) + config_db = ValidatedConfigDBConnector(db.cfgdb) + db_data = config_db.get_entry(KUBE_SERVER_TABLE_NAME, KUBE_SERVER_TABLE_KEY) def_data = { KUBE_SERVER_IP: "", KUBE_SERVER_PORT: "6443", KUBE_SERVER_INSECURE: "True", KUBE_SERVER_DISABLE: "False" } + ctx = click.get_current_context() for f in def_data: if db_data and f in db_data: if f == field and db_data[f] != val: - db.cfgdb.mod_entry(KUBE_SERVER_TABLE_NAME, KUBE_SERVER_TABLE_KEY, {field: val}) + try: + config_db.mod_entry(KUBE_SERVER_TABLE_NAME, KUBE_SERVER_TABLE_KEY, {field: val}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) log.log_info("modify kubernetes server entry {}={}".format(field,val)) else: # Missing field. Set to default or given value v = val if f == field else def_data[f] - db.cfgdb.mod_entry(KUBE_SERVER_TABLE_NAME, KUBE_SERVER_TABLE_KEY, {f: v}) + try: + config_db.mod_entry(KUBE_SERVER_TABLE_NAME, KUBE_SERVER_TABLE_KEY, {f: v}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) log.log_info("set kubernetes server entry {}={}".format(f,v)) diff --git a/config/main.py b/config/main.py index 2bc9055967..c202b2cd04 100644 --- a/config/main.py +++ b/config/main.py @@ -15,6 +15,7 @@ import copy from jsonpatch import JsonPatchConflict +from jsonpointer import JsonPointerException from collections import OrderedDict from generic_config_updater.generic_updater import GenericUpdater, ConfigFormat from minigraph import parse_device_desc_xml, minigraph_encoder @@ -743,24 +744,28 @@ def storm_control_delete_entry(port_name, storm_type): return True -def _wait_until_clear(table, interval=0.5, timeout=30): +def _wait_until_clear(tables, interval=0.5, timeout=30, verbose=False): start = time.time() empty = False app_db = SonicV2Connector(host='127.0.0.1') app_db.connect(app_db.APPL_DB) while not empty and time.time() - start < timeout: - current_profiles = app_db.keys(app_db.APPL_DB, table) - if not current_profiles: - empty = True - else: - time.sleep(interval) + non_empty_table_count = 0 + for table in tables: + keys = app_db.keys(app_db.APPL_DB, table) + if keys: + non_empty_table_count += 1 + if verbose: + click.echo("Some entries matching {} still exist: {}".format(table, keys[0])) + time.sleep(interval) + empty = (non_empty_table_count == 0) if not empty: click.echo("Operation not completed successfully, please save and reload configuration.") return empty -def _clear_qos(delay = False): +def _clear_qos(delay=False, verbose=False): QOS_TABLE_NAMES = [ 'PORT_QOS_MAP', 'QUEUE', @@ -797,7 +802,10 @@ def _clear_qos(delay = False): for qos_table in QOS_TABLE_NAMES: config_db.delete_table(qos_table) if delay: - _wait_until_clear("BUFFER_POOL_TABLE:*",interval=0.5, timeout=30) + device_metadata = config_db.get_entry('DEVICE_METADATA', 'localhost') + # Traditional buffer manager do not remove buffer tables in any case, no need to wait. + timeout = 120 if device_metadata and device_metadata.get('buffer_model') == 'dynamic' else 0 + _wait_until_clear(["BUFFER_*_TABLE:*", "BUFFER_*_SET"], interval=0.5, timeout=timeout, verbose=verbose) def _get_sonic_generated_services(num_asic): if not os.path.isfile(SONIC_GENERATED_SERVICE_PATH): @@ -862,23 +870,8 @@ def _get_sonic_services(): return (unit.strip() for unit in out.splitlines()) -def _get_delayed_sonic_units(get_timers=False): - rc1, _ = clicommon.run_command("systemctl list-dependencies --plain sonic-delayed.target | sed '1d'", return_cmd=True) - rc2, _ = clicommon.run_command("systemctl is-enabled {}".format(rc1.replace("\n", " ")), return_cmd=True) - timer = [line.strip() for line in rc1.splitlines()] - state = [line.strip() for line in rc2.splitlines()] - services = [] - for unit in timer: - if state[timer.index(unit)] == "enabled": - if not get_timers: - services.append(re.sub('\.timer$', '', unit, 1)) - else: - services.append(unit) - return services - - def _reset_failed_services(): - for service in itertools.chain(_get_sonic_services(), _get_delayed_sonic_units()): + for service in _get_sonic_services(): clicommon.run_command("systemctl reset-failed {}".format(service)) @@ -897,12 +890,6 @@ def _restart_services(): click.echo("Reloading Monit configuration ...") clicommon.run_command("sudo monit reload") -def _delay_timers_elapsed(): - for timer in _get_delayed_sonic_units(get_timers=True): - out, _ = clicommon.run_command("systemctl show {} --property=LastTriggerUSecMonotonic --value".format(timer), return_cmd=True) - if out.strip() == "0": - return False - return True def _per_namespace_swss_ready(service_name): out, _ = clicommon.run_command("systemctl show {} --property ActiveState --value".format(service_name), return_cmd=True) @@ -1155,41 +1142,6 @@ def validate_gre_type(ctx, _, value): except ValueError: raise click.UsageError("{} is not a valid GRE type".format(value)) -def _is_storage_device(cfg_db): - """ - Check if the device is a storage device or not - """ - device_metadata = cfg_db.get_entry("DEVICE_METADATA", "localhost") - return device_metadata.get("storage_device", "Unknown") == "true" - -def _is_acl_table_present(cfg_db, acl_table_name): - """ - Check if acl table exists - """ - return acl_table_name in cfg_db.get_keys("ACL_TABLE") - -def load_backend_acl(cfg_db, device_type): - """ - Load acl on backend storage device - """ - - BACKEND_ACL_TEMPLATE_FILE = os.path.join('/', "usr", "share", "sonic", "templates", "backend_acl.j2") - BACKEND_ACL_FILE = os.path.join('/', "etc", "sonic", "backend_acl.json") - - if device_type and device_type == "BackEndToRRouter" and _is_storage_device(cfg_db) and _is_acl_table_present(cfg_db, "DATAACL"): - if os.path.isfile(BACKEND_ACL_TEMPLATE_FILE): - clicommon.run_command( - "{} -d -t {},{}".format( - SONIC_CFGGEN_PATH, - BACKEND_ACL_TEMPLATE_FILE, - BACKEND_ACL_FILE - ), - display_cmd=True - ) - if os.path.isfile(BACKEND_ACL_FILE): - clicommon.run_command("acl-loader update incremental {}".format(BACKEND_ACL_FILE), display_cmd=True) - - # This is our main entrypoint - the main 'config' command @click.group(cls=clicommon.AbbreviationGroup, context_settings=CONTEXT_SETTINGS) @click.pass_context @@ -1390,20 +1342,6 @@ def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, i patch_as_json = json.loads(text) patch = jsonpatch.JsonPatch(patch_as_json) - # convert IPv6 addresses to lowercase - for patch_line in patch: - if 'remove' == patch_line['op']: - match = re.search(r"(?P/INTERFACE/\w+\|)(?P([a-fA-F0-9]{0,4}[:~]|::){1,7}[a-fA-F0-9]{0,4})" - "(?P.*)", str.format(patch_line['path'])) - if match: - prefix = match.group('prefix') - ipv6_address_str = match.group('ipv6_address') - suffix = match.group('suffix') - ipv6_address_str = ipv6_address_str.lower() - click.secho("converted ipv6 address to lowercase {} with prefix {} in value: {}" - .format(ipv6_address_str, prefix, patch_line['path'])) - patch_line['path'] = prefix + ipv6_address_str + suffix - config_format = ConfigFormat[format.upper()] GenericUpdater().apply_patch(patch, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path) @@ -1533,10 +1471,6 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form click.echo("System is not up. Retry later or use -f to avoid system checks") sys.exit(CONFIG_RELOAD_NOT_READY) - if not _delay_timers_elapsed(): - click.echo("Relevant services are not up. Retry later or use -f to avoid system checks") - sys.exit(CONFIG_RELOAD_NOT_READY) - if not _swss_ready(): click.echo("SwSS container is not ready. Retry later or use -f to avoid system checks") sys.exit(CONFIG_RELOAD_NOT_READY) @@ -1549,7 +1483,8 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form if not yes: click.confirm(message, abort=True) - log.log_info("'reload' executing...") + argv_str = ' '.join(['config', *sys.argv[1:]]) + log.log_notice(f"'reload' executing with command: {argv_str}") num_asic = multi_asic.get_num_asics() cfg_files = [] @@ -1569,7 +1504,7 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form #Stop services before config push if not no_service_restart: - log.log_info("'reload' stopping services...") + log.log_notice("'reload' stopping services...") _stop_services() # In Single ASIC platforms we have single DB service. In multi-ASIC platforms we have a global DB @@ -1678,7 +1613,7 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form # status from all services before we attempt to restart them if not no_service_restart: _reset_failed_services() - log.log_info("'reload' restarting services...") + log.log_notice("'reload' restarting services...") _restart_services() @config.command("load_mgmt_config") @@ -1725,11 +1660,12 @@ def load_mgmt_config(filename): @clicommon.pass_db def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, golden_config_path): """Reconfigure based on minigraph.""" - log.log_info("'load_minigraph' executing...") + argv_str = ' '.join(['config', *sys.argv[1:]]) + log.log_notice(f"'load_minigraph' executing with command: {argv_str}") #Stop services before config push if not no_service_restart: - log.log_info("'load_minigraph' stopping services...") + log.log_notice("'load_minigraph' stopping services...") _stop_services() # For Single Asic platform the namespace list has the empty string @@ -1765,12 +1701,6 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, if os.path.isfile('/etc/sonic/acl.json'): clicommon.run_command("acl-loader update full /etc/sonic/acl.json", display_cmd=True) - # get the device type - device_type = _get_device_type() - - # Load backend acl - load_backend_acl(db.cfgdb, device_type) - # Load port_config.json try: load_port_config(db.cfgdb, '/etc/sonic/port_config.json') @@ -1780,6 +1710,8 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, # generate QoS and Buffer configs clicommon.run_command("config qos reload --no-dynamic-buffer --no-delay", display_cmd=True) + # get the device type + device_type = _get_device_type() if device_type != 'MgmtToRRouter' and device_type != 'MgmtTsToR' and device_type != 'BmcMgmtToRRouter' and device_type != 'EPMS': clicommon.run_command("pfcwd start_default", display_cmd=True) @@ -1793,7 +1725,7 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, cfggen_namespace_option = " -n {}".format(namespace) clicommon.run_command(db_migrator + ' -o set_version' + cfggen_namespace_option) - # Keep device isolated with TSA + # Keep device isolated with TSA if traffic_shift_away: clicommon.run_command("TSA", display_cmd=True) if override_config: @@ -1815,7 +1747,7 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, if not no_service_restart: _reset_failed_services() #FIXME: After config DB daemon is implemented, we'll no longer need to restart every service. - log.log_info("'load_minigraph' restarting services...") + log.log_notice("'load_minigraph' restarting services...") _restart_services() click.echo("Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`.") @@ -2006,9 +1938,21 @@ def synchronous_mode(sync_mode): config reload -y \n Option 2. systemctl restart swss""" % sync_mode) +# +# 'suppress-fib-pending' command ('config suppress-fib-pending ...') +# +@config.command('suppress-fib-pending') +@click.argument('state', metavar='', required=True, type=click.Choice(['enabled', 'disabled'])) +@clicommon.pass_db +def suppress_pending_fib(db, state): + ''' Enable or disable pending FIB suppression. Once enabled, BGP will not advertise routes that are not yet installed in the hardware ''' + + config_db = db.cfgdb + config_db.mod_entry('DEVICE_METADATA' , 'localhost', {"suppress-fib-pending" : state}) + # # 'yang_config_validation' command ('config yang_config_validation ...') -# +# @config.command('yang_config_validation') @click.argument('yang_config_validation', metavar='', required=True) def yang_config_validation(yang_config_validation): @@ -2397,25 +2341,35 @@ def add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer session_info['gre_type'] = gre_type session_info = gather_session_info(session_info, policer, queue, src_port, direction) + ctx = click.get_current_context() """ For multi-npu platforms we need to program all front asic namespaces """ namespaces = multi_asic.get_all_namespaces() if not namespaces['front_ns']: - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - if validate_mirror_session_config(config_db, session_name, None, src_port, direction) is False: - return - config_db.set_entry("MIRROR_SESSION", session_name, session_info) + if ADHOC_VALIDATION: + if validate_mirror_session_config(config_db, session_name, None, src_port, direction) is False: + return + try: + config_db.set_entry("MIRROR_SESSION", session_name, session_info) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) + else: per_npu_configdb = {} for front_asic_namespaces in namespaces['front_ns']: - per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) + per_npu_configdb[front_asic_namespaces] = ValidatedConfigDBConnector(ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)) per_npu_configdb[front_asic_namespaces].connect() - if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, None, src_port, direction) is False: - return - per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) + if ADHOC_VALIDATION: + if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, None, src_port, direction) is False: + return + try: + per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) @mirror_session.group(cls=clicommon.AbbreviationGroup, name='span') @click.pass_context @@ -2447,25 +2401,34 @@ def add_span(session_name, dst_port, src_port, direction, queue, policer): } session_info = gather_session_info(session_info, policer, queue, src_port, direction) + ctx = click.get_current_context() """ For multi-npu platforms we need to program all front asic namespaces """ namespaces = multi_asic.get_all_namespaces() if not namespaces['front_ns']: - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - if validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction) is False: - return - config_db.set_entry("MIRROR_SESSION", session_name, session_info) + if ADHOC_VALIDATION: + if validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction) is False: + return + try: + config_db.set_entry("MIRROR_SESSION", session_name, session_info) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: per_npu_configdb = {} for front_asic_namespaces in namespaces['front_ns']: - per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) + per_npu_configdb[front_asic_namespaces] = ValidatedConfigDBConnector(ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)) per_npu_configdb[front_asic_namespaces].connect() - if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, dst_port, src_port, direction) is False: - return - per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) + if ADHOC_VALIDATION: + if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, dst_port, src_port, direction) is False: + return + try: + per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) @mirror_session.command() @@ -2477,16 +2440,23 @@ def remove(session_name): For multi-npu platforms we need to program all front asic namespaces """ namespaces = multi_asic.get_all_namespaces() + ctx = click.get_current_context() if not namespaces['front_ns']: - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - config_db.set_entry("MIRROR_SESSION", session_name, None) + try: + config_db.set_entry("MIRROR_SESSION", session_name, None) + except JsonPatchConflict as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: per_npu_configdb = {} for front_asic_namespaces in namespaces['front_ns']: - per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) + per_npu_configdb[front_asic_namespaces] = ValidatedConfigDBConnector(ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)) per_npu_configdb[front_asic_namespaces].connect() - per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, None) + try: + per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, None) + except JsonPatchConflict as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'pfcwd' group ('config pfcwd ...') @@ -2661,10 +2631,11 @@ def qos(ctx): pass @qos.command('clear') -def clear(): +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def clear(verbose): """Clear QoS configuration""" log.log_info("'qos clear' executing...") - _clear_qos() + _clear_qos(verbose=verbose) def _update_buffer_calculation_model(config_db, model): """Update the buffer calculation model into CONFIG_DB""" @@ -2681,6 +2652,7 @@ def _update_buffer_calculation_model(config_db, model): @click.option('--ports', is_flag=False, required=False, help="List of ports that needs to be updated") @click.option('--no-dynamic-buffer', is_flag=True, help="Disable dynamic buffer calculation") @click.option('--no-delay', is_flag=True, hidden=True) +@click.option('--verbose', is_flag=True, help="Enable verbose output") @click.option( '--json-data', type=click.STRING, help="json string with additional data, valid with --dry-run option" @@ -2689,7 +2661,7 @@ def _update_buffer_calculation_model(config_db, model): '--dry_run', type=click.STRING, help="Dry run, writes config to the given file" ) -def reload(ctx, no_dynamic_buffer, no_delay, dry_run, json_data, ports): +def reload(ctx, no_dynamic_buffer, no_delay, dry_run, json_data, ports, verbose): """Reload QoS configuration""" if ports: log.log_info("'qos reload --ports {}' executing...".format(ports)) @@ -2698,7 +2670,7 @@ def reload(ctx, no_dynamic_buffer, no_delay, dry_run, json_data, ports): log.log_info("'qos reload' executing...") if not dry_run: - _clear_qos(delay = not no_delay) + _clear_qos(delay = not no_delay, verbose=verbose) _, hwsku_path = device_info.get_paths_to_platform_and_hwsku_dirs() sonic_version_file = device_info.get_sonic_version_file() @@ -4210,7 +4182,7 @@ def breakout(ctx, interface_name, mode, verbose, force_remove_dependencies, load raise click.Abort() # Get the config_db connector - config_db = ctx.obj['config_db'] + config_db = ValidatedConfigDBConnector(ctx.obj['config_db']) target_brkout_mode = mode @@ -4289,7 +4261,10 @@ def breakout(ctx, interface_name, mode, verbose, force_remove_dependencies, load if interface_name not in brkout_cfg_keys: click.secho("[ERROR] {} is not present in 'BREAKOUT_CFG' Table!".format(interface_name), fg='red') raise click.Abort() - config_db.set_entry("BREAKOUT_CFG", interface_name, {'brkout_mode': target_brkout_mode}) + try: + config_db.set_entry("BREAKOUT_CFG", interface_name, {'brkout_mode': target_brkout_mode}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) click.secho("Breakout process got successfully completed." .format(interface_name), fg="cyan", underline=True) click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.") @@ -6436,15 +6411,19 @@ def ntp(ctx): @click.pass_context def add_ntp_server(ctx, ntp_ip_address): """ Add NTP server IP """ - if not clicommon.is_ipaddress(ntp_ip_address): - ctx.fail('Invalid ip address') - db = ctx.obj['db'] + if ADHOC_VALIDATION: + if not clicommon.is_ipaddress(ntp_ip_address): + ctx.fail('Invalid IP address') + db = ValidatedConfigDBConnector(ctx.obj['db']) ntp_servers = db.get_table("NTP_SERVER") if ntp_ip_address in ntp_servers: click.echo("NTP server {} is already configured".format(ntp_ip_address)) return else: - db.set_entry('NTP_SERVER', ntp_ip_address, {'NULL': 'NULL'}) + try: + db.set_entry('NTP_SERVER', ntp_ip_address, {'NULL': 'NULL'}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) click.echo("NTP server {} added to configuration".format(ntp_ip_address)) try: click.echo("Restarting ntp-config service...") @@ -6457,12 +6436,16 @@ def add_ntp_server(ctx, ntp_ip_address): @click.pass_context def del_ntp_server(ctx, ntp_ip_address): """ Delete NTP server IP """ - if not clicommon.is_ipaddress(ntp_ip_address): - ctx.fail('Invalid IP address') - db = ctx.obj['db'] + if ADHOC_VALIDATION: + if not clicommon.is_ipaddress(ntp_ip_address): + ctx.fail('Invalid IP address') + db = ValidatedConfigDBConnector(ctx.obj['db']) ntp_servers = db.get_table("NTP_SERVER") if ntp_ip_address in ntp_servers: - db.set_entry('NTP_SERVER', '{}'.format(ntp_ip_address), None) + try: + db.set_entry('NTP_SERVER', '{}'.format(ntp_ip_address), None) + except JsonPatchConflict as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) click.echo("NTP server {} removed from configuration".format(ntp_ip_address)) else: ctx.fail("NTP server {} is not configured.".format(ntp_ip_address)) @@ -6715,16 +6698,19 @@ def add(ctx, name, ipaddr, port, vrf): if not is_valid_collector_info(name, ipaddr, port, vrf): return - config_db = ctx.obj['db'] + config_db = ValidatedConfigDBConnector(ctx.obj['db']) collector_tbl = config_db.get_table('SFLOW_COLLECTOR') if (collector_tbl and name not in collector_tbl and len(collector_tbl) == 2): click.echo("Only 2 collectors can be configured, please delete one") return - - config_db.mod_entry('SFLOW_COLLECTOR', name, - {"collector_ip": ipaddr, "collector_port": port, - "collector_vrf": vrf}) + + try: + config_db.mod_entry('SFLOW_COLLECTOR', name, + {"collector_ip": ipaddr, "collector_port": port, + "collector_vrf": vrf}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) return # @@ -6735,14 +6721,18 @@ def add(ctx, name, ipaddr, port, vrf): @click.pass_context def del_collector(ctx, name): """Delete a sFlow collector""" - config_db = ctx.obj['db'] - collector_tbl = config_db.get_table('SFLOW_COLLECTOR') + config_db = ValidatedConfigDBConnector(ctx.obj['db']) + if ADHOC_VALIDATION: + collector_tbl = config_db.get_table('SFLOW_COLLECTOR') - if name not in collector_tbl: - click.echo("Collector: {} not configured".format(name)) - return + if name not in collector_tbl: + click.echo("Collector: {} not configured".format(name)) + return - config_db.mod_entry('SFLOW_COLLECTOR', name, None) + try: + config_db.set_entry('SFLOW_COLLECTOR', name, None) + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'sflow agent-id' group diff --git a/config/mclag.py b/config/mclag.py index 589bb61a20..abc6ee051a 100644 --- a/config/mclag.py +++ b/config/mclag.py @@ -1,8 +1,12 @@ import click from swsscommon.swsscommon import ConfigDBConnector +from .validated_config_db_connector import ValidatedConfigDBConnector import ipaddress +from jsonpatch import JsonPatchConflict +from jsonpointer import JsonPointerException +ADHOC_VALIDATION = False CFG_PORTCHANNEL_PREFIX = "PortChannel" CFG_PORTCHANNEL_PREFIX_LEN = 11 CFG_PORTCHANNEL_MAX_VAL = 9999 @@ -86,8 +90,7 @@ def is_ipv4_addr_valid(addr): def check_if_interface_is_valid(db, interface_name): from .main import interface_name_is_valid - if interface_name_is_valid(db,interface_name) is False: - ctx.fail("Interface name is invalid. Please enter a valid interface name!!") + return interface_name_is_valid(db,interface_name) def get_intf_vrf_bind_unique_ip(db, interface_name, interface_type): intfvrf = db.get_table(interface_type) @@ -121,34 +124,42 @@ def mclag(ctx): @click.pass_context def add_mclag_domain(ctx, domain_id, source_ip_addr, peer_ip_addr, peer_ifname): """Add MCLAG Domain""" - - if not mclag_domain_id_valid(domain_id): - ctx.fail("{} invalid domain ID, valid range is 1 to 4095".format(domain_id)) - if not is_ipv4_addr_valid(source_ip_addr): - ctx.fail("{} invalid local ip address".format(source_ip_addr)) - if not is_ipv4_addr_valid(peer_ip_addr): - ctx.fail("{} invalid peer ip address".format(peer_ip_addr)) - - db = ctx.obj['db'] + if ADHOC_VALIDATION: + if not mclag_domain_id_valid(domain_id): + ctx.fail("{} invalid domain ID, valid range is 1 to 4095".format(domain_id)) + if not is_ipv4_addr_valid(source_ip_addr): + ctx.fail("{} invalid local ip address".format(source_ip_addr)) + if not is_ipv4_addr_valid(peer_ip_addr): + ctx.fail("{} invalid peer ip address".format(peer_ip_addr)) + + db = ValidatedConfigDBConnector(ctx.obj['db']) fvs = {} fvs['source_ip'] = str(source_ip_addr) fvs['peer_ip'] = str(peer_ip_addr) - if peer_ifname is not None: - if (peer_ifname.startswith("Ethernet") is False) and (peer_ifname.startswith("PortChannel") is False): - ctx.fail("peer interface is invalid, should be Ethernet interface or portChannel !!") - if (peer_ifname.startswith("Ethernet") is True) and (check_if_interface_is_valid(db, peer_ifname) is False): - ctx.fail("peer Ethernet interface name is invalid. it is not present in port table of configDb!!") - if (peer_ifname.startswith("PortChannel")) and (is_portchannel_name_valid(peer_ifname) is False): - ctx.fail("peer PortChannel interface name is invalid !!") - fvs['peer_link'] = str(peer_ifname) + if ADHOC_VALIDATION: + if peer_ifname is not None: + if (peer_ifname.startswith("Ethernet") is False) and (peer_ifname.startswith("PortChannel") is False): + ctx.fail("peer interface is invalid, should be Ethernet interface or portChannel !!") + if (peer_ifname.startswith("Ethernet") is True) and (check_if_interface_is_valid(db, peer_ifname) is False): + ctx.fail("peer Ethernet interface name is invalid. it is not present in port table of configDb!!") + if (peer_ifname.startswith("PortChannel")) and (is_portchannel_name_valid(peer_ifname) is False): + ctx.fail("peer PortChannel interface name is invalid !!") + fvs['peer_link'] = str(peer_ifname) mclag_domain_keys = db.get_table('MCLAG_DOMAIN').keys() if len(mclag_domain_keys) == 0: - db.set_entry('MCLAG_DOMAIN', domain_id, fvs) + try: + db.set_entry('MCLAG_DOMAIN', domain_id, fvs) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: + domain_id = str(domain_id) if domain_id in mclag_domain_keys: - db.mod_entry('MCLAG_DOMAIN', domain_id, fvs) - else: - ctx.fail("only one mclag Domain can be configured. Already one domain {} configured ".format(mclag_domain_keys[0])) + try: + db.mod_entry('MCLAG_DOMAIN', domain_id, fvs) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) + else: + ctx.fail("only one mclag Domain can be configured. Already one domain {} configured ".format(list(mclag_domain_keys)[0])) #mclag domain delete @@ -158,15 +169,16 @@ def add_mclag_domain(ctx, domain_id, source_ip_addr, peer_ip_addr, peer_ifname): @click.pass_context def del_mclag_domain(ctx, domain_id): """Delete MCLAG Domain""" - - if not mclag_domain_id_valid(domain_id): - ctx.fail("{} invalid domain ID, valid range is 1 to 4095".format(domain_id)) - - db = ctx.obj['db'] - entry = db.get_entry('MCLAG_DOMAIN', domain_id) - if entry is None: - ctx.fail("MCLAG Domain {} not configured ".format(domain_id)) - return + + db = ValidatedConfigDBConnector(ctx.obj['db']) + + if ADHOC_VALIDATION: + if not mclag_domain_id_valid(domain_id): + ctx.fail("{} invalid domain ID, valid range is 1 to 4095".format(domain_id)) + + entry = db.get_entry('MCLAG_DOMAIN', domain_id) + if entry is None: + ctx.fail("MCLAG Domain {} not configured ".format(domain_id)) click.echo("MCLAG Domain delete takes care of deleting all associated MCLAG Interfaces") @@ -175,11 +187,17 @@ def del_mclag_domain(ctx, domain_id): #delete associated mclag interfaces for iface_domain_id, iface_name in interface_table_keys: - if (int(iface_domain_id) == domain_id): - db.set_entry('MCLAG_INTERFACE', (iface_domain_id, iface_name), None ) + if (int(iface_domain_id) == domain_id): + try: + db.set_entry('MCLAG_INTERFACE', (iface_domain_id, iface_name), None ) + except (JsonPointerException, JsonPatchConflict) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) #delete mclag domain - db.set_entry('MCLAG_DOMAIN', domain_id, None) + try: + db.set_entry('MCLAG_DOMAIN', domain_id, None) + except (JsonPointerException, JsonPatchConflict) as e: + ctx.fail("Invalid ConfigDB. Error: MCLAG_DOMAIN {} failed to be deleted".format(domain_id)) #keepalive timeout config @@ -260,16 +278,21 @@ def mclag_member(ctx): @click.pass_context def add_mclag_member(ctx, domain_id, portchannel_names): """Add member MCLAG interfaces from MCLAG Domain""" - db = ctx.obj['db'] - entry = db.get_entry('MCLAG_DOMAIN', domain_id) - if len(entry) == 0: - ctx.fail("MCLAG Domain " + domain_id + " not configured, configure mclag domain first") + db = ValidatedConfigDBConnector(ctx.obj['db']) + if ADHOC_VALIDATION: + entry = db.get_entry('MCLAG_DOMAIN', domain_id) + if len(entry) == 0: + ctx.fail("MCLAG Domain " + domain_id + " not configured, configure mclag domain first") portchannel_list = portchannel_names.split(",") for portchannel_name in portchannel_list: - if is_portchannel_name_valid(portchannel_name) != True: - ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) - db.set_entry('MCLAG_INTERFACE', (domain_id, portchannel_name), {'if_type':"PortChannel"} ) + if ADHOC_VALIDATION: + if is_portchannel_name_valid(portchannel_name) != True: + ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) + try: + db.set_entry('MCLAG_INTERFACE', (domain_id, portchannel_name), {'if_type':"PortChannel"} ) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) @mclag_member.command('del') @click.argument('domain_id', metavar='', required=True) @@ -277,13 +300,17 @@ def add_mclag_member(ctx, domain_id, portchannel_names): @click.pass_context def del_mclag_member(ctx, domain_id, portchannel_names): """Delete member MCLAG interfaces from MCLAG Domain""" - db = ctx.obj['db'] + db = ValidatedConfigDBConnector(ctx.obj['db']) #split comma seperated portchannel names portchannel_list = portchannel_names.split(",") for portchannel_name in portchannel_list: - if is_portchannel_name_valid(portchannel_name) != True: - ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) - db.set_entry('MCLAG_INTERFACE', (domain_id, portchannel_name), None ) + if ADHOC_VALIDATION: + if is_portchannel_name_valid(portchannel_name) != True: + ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) + try: + db.set_entry('MCLAG_INTERFACE', (domain_id, portchannel_name), None ) + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Failed to delete mclag member {} from mclag domain {}".format(portchannel_name, domain_id)) #mclag unique ip config @mclag.group('unique-ip') @@ -297,7 +324,7 @@ def mclag_unique_ip(ctx): @click.pass_context def add_mclag_unique_ip(ctx, interface_names): """Add Unique IP on MCLAG Vlan interface""" - db = ctx.obj['db'] + db = ValidatedConfigDBConnector(ctx.obj['db']) mclag_domain_keys = db.get_table('MCLAG_DOMAIN').keys() if len(mclag_domain_keys) == 0: ctx.fail("MCLAG not configured. MCLAG should be configured.") @@ -318,14 +345,17 @@ def add_mclag_unique_ip(ctx, interface_names): (intf_name, ip) = k if intf_name == interface_name and ip != 0: ctx.fail("%s is configured with IP %s, remove the IP configuration and reconfigure after enabling unique IP configuration."%(str(intf_name), str(ip))) - db.set_entry('MCLAG_UNIQUE_IP', (interface_name), {'unique_ip':"enable"} ) + try: + db.set_entry('MCLAG_UNIQUE_IP', (interface_name), {'unique_ip':"enable"} ) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) @mclag_unique_ip.command('del') @click.argument('interface_names', metavar='', required=True) @click.pass_context def del_mclag_unique_ip(ctx, interface_names): """Delete Unique IP from MCLAG Vlan interface""" - db = ctx.obj['db'] + db = ValidatedConfigDBConnector(ctx.obj['db']) #split comma seperated interface names interface_list = interface_names.split(",") for interface_name in interface_list: @@ -341,7 +371,10 @@ def del_mclag_unique_ip(ctx, interface_names): (intf_name, ip) = k if intf_name == interface_name and ip != 0: ctx.fail("%s is configured with IP %s, remove the IP configuration and reconfigure after disabling unique IP configuration."%(str(intf_name), str(ip))) - db.set_entry('MCLAG_UNIQUE_IP', (interface_name), None ) + try: + db.set_entry('MCLAG_UNIQUE_IP', (interface_name), None ) + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Failed to delete mclag unique IP from Vlan interface {}".format(interface_name)) ####### diff --git a/config/muxcable.py b/config/muxcable.py index f53eae22e3..ba80cb02af 100644 --- a/config/muxcable.py +++ b/config/muxcable.py @@ -246,7 +246,7 @@ def lookup_statedb_and_update_configdb(db, per_npu_statedb, config_db, port, sta ipv6_value = get_value_for_key_in_config_tbl(config_db, port, "server_ipv6", "MUX_CABLE") soc_ipv4_value = get_optional_value_for_key_in_config_tbl(config_db, port, "soc_ipv4", "MUX_CABLE") cable_type = get_optional_value_for_key_in_config_tbl(config_db, port, "cable_type", "MUX_CABLE") - + ctx = click.get_current_context() state = get_value_for_key_in_dict(muxcable_statedb_dict, port, "state", "MUX_CABLE_TABLE") port_name = platform_sfputil_helper.get_interface_alias(port, db) @@ -255,15 +255,21 @@ def lookup_statedb_and_update_configdb(db, per_npu_statedb, config_db, port, sta port_status_dict[port_name] = 'OK' else: if cable_type is not None or soc_ipv4_value is not None: - config_db.set_entry("MUX_CABLE", port, {"state": state_cfg_val, - "server_ipv4": ipv4_value, - "server_ipv6": ipv6_value, - "soc_ipv4":soc_ipv4_value, - "cable_type": cable_type}) + try: + config_db.set_entry("MUX_CABLE", port, {"state": state_cfg_val, + "server_ipv4": ipv4_value, + "server_ipv6": ipv6_value, + "soc_ipv4":soc_ipv4_value, + "cable_type": cable_type}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: - config_db.set_entry("MUX_CABLE", port, {"state": state_cfg_val, - "server_ipv4": ipv4_value, - "server_ipv6": ipv6_value}) + try: + config_db.set_entry("MUX_CABLE", port, {"state": state_cfg_val, + "server_ipv4": ipv4_value, + "server_ipv6": ipv6_value}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) if (str(state_cfg_val) == 'active' and str(state) != 'active') or (str(state_cfg_val) == 'standby' and str(state) != 'standby'): port_status_dict[port_name] = 'INPROGRESS' else: @@ -274,9 +280,13 @@ def update_configdb_pck_loss_data(config_db, port, val): ipv4_value = get_value_for_key_in_config_tbl(config_db, port, "server_ipv4", "MUX_CABLE") ipv6_value = get_value_for_key_in_config_tbl(config_db, port, "server_ipv6", "MUX_CABLE") - config_db.set_entry("MUX_CABLE", port, {"state": configdb_state, + try: + config_db.set_entry("MUX_CABLE", port, {"state": configdb_state, "server_ipv4": ipv4_value, "server_ipv6": ipv6_value, "pck_loss_data_reset": val}) + except ValueError as e: + ctx = click.get_current_context() + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # 'muxcable' command ("config muxcable mode active|auto") @muxcable.command() diff --git a/config/nat.py b/config/nat.py index 99e21b2750..8d2ad32c22 100644 --- a/config/nat.py +++ b/config/nat.py @@ -1,8 +1,12 @@ import ipaddress import click +from jsonpatch import JsonPatchConflict +from jsonpointer import JsonPointerException from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from .validated_config_db_connector import ValidatedConfigDBConnector +ADHOC_VALIDATION = True def is_valid_ipv4_address(address): """Check if the given ipv4 address is valid""" @@ -243,15 +247,15 @@ def static(): @click.option('-twice_nat_id', metavar='', required=False, type=click.IntRange(1, 9999), help="Set the twice nat id") def add_basic(ctx, global_ip, local_ip, nat_type, twice_nat_id): """Add Static NAT-related configutation""" + if ADHOC_VALIDATION: + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - # Verify the ip address format - if is_valid_ipv4_address(local_ip) is False: - ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - - if is_valid_ipv4_address(global_ip) is False: - ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() entryFound = False @@ -304,13 +308,25 @@ def add_basic(ctx, global_ip, local_ip, nat_type, twice_nat_id): ctx.fail("Same Twice nat id is not allowed for more than 2 entries!!") if nat_type is not None and twice_nat_id is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: nat_type, dataKey3: twice_nat_id}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: nat_type, dataKey3: twice_nat_id}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) elif nat_type is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: nat_type}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: nat_type}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) elif twice_nat_id is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey3: twice_nat_id}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey3: twice_nat_id}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: - config_db.set_entry(table, key, {dataKey1: local_ip}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat add static tcp' command ('config nat add static tcp ') @@ -325,15 +341,15 @@ def add_basic(ctx, global_ip, local_ip, nat_type, twice_nat_id): @click.option('-twice_nat_id', metavar='', required=False, type=click.IntRange(1, 9999), help="Set the twice nat id") def add_tcp(ctx, global_ip, global_port, local_ip, local_port, nat_type, twice_nat_id): """Add Static TCP Protocol NAPT-related configutation""" + if ADHOC_VALIDATION: + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - # Verify the ip address format - if is_valid_ipv4_address(local_ip) is False: - ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - - if is_valid_ipv4_address(global_ip) is False: - ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() entryFound = False @@ -384,13 +400,25 @@ def add_tcp(ctx, global_ip, global_port, local_ip, local_port, nat_type, twice_n ctx.fail("Same Twice nat id is not allowed for more than 2 entries!!") if nat_type is not None and twice_nat_id is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type, dataKey4: twice_nat_id}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type, dataKey4: twice_nat_id}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) elif nat_type is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) elif twice_nat_id is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey4: twice_nat_id}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey4: twice_nat_id}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat add static udp' command ('config nat add static udp ') @@ -405,15 +433,16 @@ def add_tcp(ctx, global_ip, global_port, local_ip, local_port, nat_type, twice_n @click.option('-twice_nat_id', metavar='', required=False, type=click.IntRange(1, 9999), help="Set the twice nat id") def add_udp(ctx, global_ip, global_port, local_ip, local_port, nat_type, twice_nat_id): """Add Static UDP Protocol NAPT-related configutation""" + + if ADHOC_VALIDATION: + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - # Verify the ip address format - if is_valid_ipv4_address(local_ip) is False: - ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - - if is_valid_ipv4_address(global_ip) is False: - ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() entryFound = False @@ -464,13 +493,25 @@ def add_udp(ctx, global_ip, global_port, local_ip, local_port, nat_type, twice_n ctx.fail("Same Twice nat id is not allowed for more than 2 entries!!") if nat_type is not None and twice_nat_id is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type, dataKey4: twice_nat_id}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type, dataKey4: twice_nat_id}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) elif nat_type is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) elif twice_nat_id is not None: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey4: twice_nat_id}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey4: twice_nat_id}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: - config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port}) + try: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat remove static' group ('config nat remove static ...') @@ -489,15 +530,16 @@ def static(): @click.argument('local_ip', metavar='', required=True) def remove_basic(ctx, global_ip, local_ip): """Remove Static NAT-related configutation""" + + if ADHOC_VALIDATION: + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - # Verify the ip address format - if is_valid_ipv4_address(local_ip) is False: - ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - - if is_valid_ipv4_address(global_ip) is False: - ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() entryFound = False @@ -508,8 +550,11 @@ def remove_basic(ctx, global_ip, local_ip): data = config_db.get_entry(table, key) if data: if data[dataKey] == local_ip: - config_db.set_entry(table, key, None) - entryFound = True + try: + config_db.set_entry(table, key, None) + entryFound = True + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) if entryFound is False: click.echo("Trying to delete static nat entry, which is not present.") @@ -526,15 +571,16 @@ def remove_basic(ctx, global_ip, local_ip): @click.argument('local_port', metavar='', type=click.IntRange(1, 65535), required=True) def remove_tcp(ctx, global_ip, global_port, local_ip, local_port): """Remove Static TCP Protocol NAPT-related configutation""" + + if ADHOC_VALIDATION: + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - # Verify the ip address format - if is_valid_ipv4_address(local_ip) is False: - ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - - if is_valid_ipv4_address(global_ip) is False: - ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() entryFound = False @@ -544,8 +590,11 @@ def remove_tcp(ctx, global_ip, global_port, local_ip, local_port): data = config_db.get_entry(table, key) if data: if data['local_ip'] == local_ip and data['local_port'] == str(local_port): - config_db.set_entry(table, key, None) - entryFound = True + try: + config_db.set_entry(table, key, None) + entryFound = True + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) if entryFound is False: click.echo("Trying to delete static napt entry, which is not present.") @@ -561,15 +610,16 @@ def remove_tcp(ctx, global_ip, global_port, local_ip, local_port): @click.argument('local_port', metavar='', type=click.IntRange(1, 65535), required=True) def remove_udp(ctx, global_ip, global_port, local_ip, local_port): """Remove Static UDP Protocol NAPT-related configutation""" + + if ADHOC_VALIDATION: + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - # Verify the ip address format - if is_valid_ipv4_address(local_ip) is False: - ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) - - if is_valid_ipv4_address(global_ip) is False: - ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() entryFound = False @@ -581,8 +631,11 @@ def remove_udp(ctx, global_ip, global_port, local_ip, local_port): data = config_db.get_entry(table, key) if data: if data[dataKey1] == local_ip and data[dataKey2] == str(local_port): - config_db.set_entry(table, key, None) - entryFound = True + try: + config_db.set_entry(table, key, None) + entryFound = True + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) if entryFound is False: click.echo("Trying to delete static napt entry, which is not present.") @@ -595,7 +648,7 @@ def remove_udp(ctx, global_ip, global_port, local_ip, local_port): def remove_static_all(ctx): """Remove all Static related configutation""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() tables = ['STATIC_NAT', 'STATIC_NAPT'] @@ -604,7 +657,10 @@ def remove_static_all(ctx): table_dict = config_db.get_table(table_name) if table_dict: for table_key_name in table_dict: - config_db.set_entry(table_name, table_key_name, None) + try: + config_db.set_entry(table_name, table_key_name, None) + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat add pool' command ('config nat add pool ') @@ -664,7 +720,7 @@ def add_pool(ctx, pool_name, global_ip_range, global_port_range): else: global_port_range = "NULL" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() entryFound = False @@ -711,7 +767,10 @@ def add_pool(ctx, pool_name, global_ip_range, global_port_range): ctx.fail("Given Ip address entry is overlapping with existing Static NAT entry !!") if entryFound == False: - config_db.set_entry(table, key, {dataKey1: global_ip_range, dataKey2 : global_port_range}) + try: + config_db.set_entry(table, key, {dataKey1: global_ip_range, dataKey2 : global_port_range}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat add binding' command ('config nat add binding ') @@ -740,7 +799,7 @@ def add_binding(ctx, binding_name, pool_name, acl_name, nat_type, twice_nat_id): if len(binding_name) > 32: ctx.fail("Invalid binding name. Maximum allowed binding name is 32 characters !!") - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() data = config_db.get_entry(table, key) @@ -773,7 +832,10 @@ def add_binding(ctx, binding_name, pool_name, acl_name, nat_type, twice_nat_id): if count > 1: ctx.fail("Same Twice nat id is not allowed for more than 2 entries!!") - config_db.set_entry(table, key, {dataKey1: acl_name, dataKey2: pool_name, dataKey3: nat_type, dataKey4: twice_nat_id}) + try: + config_db.set_entry(table, key, {dataKey1: acl_name, dataKey2: pool_name, dataKey3: nat_type, dataKey4: twice_nat_id}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat remove pool' command ('config nat remove pool ') @@ -791,7 +853,7 @@ def remove_pool(ctx, pool_name): if len(pool_name) > 32: ctx.fail("Invalid pool name. Maximum allowed pool name is 32 characters !!") - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() data = config_db.get_entry(table, key) @@ -808,7 +870,10 @@ def remove_pool(ctx, pool_name): break if entryFound == False: - config_db.set_entry(table, key, None) + try: + config_db.set_entry(table, key, None) + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat remove pools' command ('config nat remove pools') @@ -818,7 +883,7 @@ def remove_pool(ctx, pool_name): def remove_pools(ctx): """Remove all Pools for Dynamic configutation""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() entryFound = False @@ -835,8 +900,11 @@ def remove_pools(ctx): entryFound = True break - if entryFound == False: - config_db.set_entry(pool_table_name, pool_key_name, None) + if entryFound == False: + try: + config_db.set_entry(pool_table_name, pool_key_name, None) + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat remove binding' command ('config nat remove binding ') @@ -854,7 +922,7 @@ def remove_binding(ctx, binding_name): if len(binding_name) > 32: ctx.fail("Invalid binding name. Maximum allowed binding name is 32 characters !!") - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() data = config_db.get_entry(table, key) @@ -863,7 +931,10 @@ def remove_binding(ctx, binding_name): entryFound = True if entryFound == False: - config_db.set_entry(table, key, None) + try: + config_db.set_entry(table, key, None) + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat remove bindings' command ('config nat remove bindings') @@ -873,14 +944,17 @@ def remove_binding(ctx, binding_name): def remove_bindings(ctx): """Remove all Bindings for Dynamic configutation""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigBConnector(ConfigDBConnector()) config_db.connect() binding_table_name = 'NAT_BINDINGS' binding_dict = config_db.get_table(binding_table_name) if binding_dict: for binding_key_name in binding_dict: - config_db.set_entry(binding_table_name, binding_key_name, None) + try: + config_db.set_entry(binding_table_name, binding_key_name, None) + except (JsonPatchConflict, JsonPointerException) as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat add interface' command ('config nat add interface -nat_zone ') @@ -892,7 +966,7 @@ def remove_bindings(ctx): def add_interface(ctx, interface_name, nat_zone): """Add interface related nat configuration""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() if nat_interface_name_is_valid(interface_name) is False: @@ -912,7 +986,10 @@ def add_interface(ctx, interface_name, nat_zone): if not interface_table_dict or interface_name not in interface_table_dict: ctx.fail("Interface table is not present. Please configure ip-address on {} and apply the nat zone !!".format(interface_name)) - config_db.mod_entry(interface_table_type, interface_name, {"nat_zone": nat_zone}) + try: + config_db.mod_entry(interface_table_type, interface_name, {"nat_zone": nat_zone}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat remove interface' command ('config nat remove interface ') @@ -922,7 +999,7 @@ def add_interface(ctx, interface_name, nat_zone): @click.argument('interface_name', metavar='', required=True) def remove_interface(ctx, interface_name): """Remove interface related NAT configuration""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() if nat_interface_name_is_valid(interface_name) is False: @@ -942,7 +1019,10 @@ def remove_interface(ctx, interface_name): if not interface_table_dict or interface_name not in interface_table_dict: ctx.fail("Interface table is not present. Ignoring the nat zone configuration") - config_db.mod_entry(interface_table_type, interface_name, {"nat_zone": "0"}) + try: + config_db.mod_entry(interface_table_type, interface_name, {"nat_zone": "0"}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat remove interfaces' command ('config nat remove interfaces') @@ -951,7 +1031,7 @@ def remove_interface(ctx, interface_name): @click.pass_context def remove_interfaces(ctx): """Remove all interface related NAT configuration""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE'] @@ -964,7 +1044,10 @@ def remove_interfaces(ctx): if isinstance(table_key_name, str) is False: continue - config_db.set_entry(table_name, table_key_name, nat_config) + try: + config_db.set_entry(table_name, table_key_name, nat_config) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat feature' group ('config nat feature ') @@ -982,9 +1065,12 @@ def feature(): def enable(ctx): """Enbale the NAT feature """ - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - config_db.mod_entry("NAT_GLOBAL", "Values", {"admin_mode": "enabled"}) + try: + config_db.mod_entry("NAT_GLOBAL", "Values", {"admin_mode": "enabled"}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat feature disable' command ('config nat feature disable>') @@ -993,9 +1079,12 @@ def enable(ctx): @click.pass_context def disable(ctx): """Disable the NAT feature """ - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - config_db.mod_entry("NAT_GLOBAL", "Values", {"admin_mode": "disabled"}) + try: + config_db.mod_entry("NAT_GLOBAL", "Values", {"admin_mode": "disabled"}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat set timeout' command ('config nat set timeout ') @@ -1005,10 +1094,13 @@ def disable(ctx): @click.argument('seconds', metavar='', type=click.IntRange(300, 432000), required=True) def timeout(ctx, seconds): """Set NAT timeout configuration""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - - config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_timeout": seconds}) + + try: + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_timeout": seconds}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat set tcp-timeout' command ('config nat set tcp-timeout ') @@ -1018,10 +1110,13 @@ def timeout(ctx, seconds): @click.argument('seconds', metavar='', type=click.IntRange(300, 432000), required=True) def tcp_timeout(ctx, seconds): """Set NAT TCP timeout configuration""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - - config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_tcp_timeout": seconds}) + + try: + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_tcp_timeout": seconds}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat set udp-timeout' command ('config nat set udp-timeout ') @@ -1031,10 +1126,13 @@ def tcp_timeout(ctx, seconds): @click.argument('seconds', metavar='', type=click.IntRange(120, 600), required=True) def udp_timeout(ctx, seconds): """Set NAT UDP timeout configuration""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_udp_timeout": seconds}) + try: + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_udp_timeout": seconds}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat reset timeout' command ('config nat reset timeout') @@ -1043,11 +1141,14 @@ def udp_timeout(ctx, seconds): @click.pass_context def timeout(ctx): """Reset NAT timeout configuration to default value (600 seconds)""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() seconds = 600 - - config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_timeout": seconds}) + + try: + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_timeout": seconds}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat reset tcp-timeout' command ('config nat reset tcp-timeout') @@ -1056,11 +1157,14 @@ def timeout(ctx): @click.pass_context def tcp_timeout(ctx): """Reset NAT TCP timeout configuration to default value (86400 seconds)""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() seconds = 86400 - - config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_tcp_timeout": seconds}) + + try: + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_tcp_timeout": seconds}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'nat reset udp-timeout' command ('config nat reset udp-timeout') @@ -1069,8 +1173,11 @@ def tcp_timeout(ctx): @click.pass_context def udp_timeout(ctx): """Reset NAT UDP timeout configuration to default value (300 seconds)""" - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() seconds = 300 - - config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_udp_timeout": seconds}) + + try: + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_udp_timeout": seconds}) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) diff --git a/config/validated_config_db_connector.py b/config/validated_config_db_connector.py index 7f6e230ace..25d8ef5f71 100644 --- a/config/validated_config_db_connector.py +++ b/config/validated_config_db_connector.py @@ -94,7 +94,8 @@ def apply_patch(self, gcu_patch, table): config_format = ConfigFormat[format.upper()] try: - GenericUpdater().apply_patch(patch=gcu_patch, config_format=config_format, verbose=False, dry_run=False, ignore_non_yang_tables=False, ignore_paths=None) + # Because all writes to ConfigDB through ValidatedConfigDBConnector are simple and don't require sorting, we set sort=False to skip sorting and improve performance + GenericUpdater().apply_patch(patch=gcu_patch, config_format=config_format, verbose=False, dry_run=False, ignore_non_yang_tables=False, ignore_paths=None, sort=False) except EmptyTableError: self.validated_delete_table(table) @@ -103,7 +104,7 @@ def validated_delete_table(self, table): format = ConfigFormat.CONFIGDB.name config_format = ConfigFormat[format.upper()] try: - GenericUpdater().apply_patch(patch=gcu_patch, config_format=config_format, verbose=False, dry_run=False, ignore_non_yang_tables=False, ignore_paths=None) + GenericUpdater().apply_patch(patch=gcu_patch, config_format=config_format, verbose=False, dry_run=False, ignore_non_yang_tables=False, ignore_paths=None, sort=False) except ValueError as e: logger = genericUpdaterLogging.get_logger(title="Patch Applier", print_all_to_console=True) logger.log_notice("Unable to remove entry, as doing so will result in invalid config. Error: {}".format(e)) diff --git a/config/vlan.py b/config/vlan.py index 7587e024a4..33c6145770 100644 --- a/config/vlan.py +++ b/config/vlan.py @@ -1,5 +1,6 @@ import click import utilities_common.cli as clicommon +import utilities_common.dhcp_relay_util as dhcp_relay_util from jsonpatch import JsonPatchConflict from time import sleep @@ -7,6 +8,8 @@ from .validated_config_db_connector import ValidatedConfigDBConnector ADHOC_VALIDATION = True +DHCP_RELAY_TABLE = "DHCP_RELAY" +DHCPV6_SERVERS = "dhcpv6_servers" # # 'vlan' group ('config vlan ...') @@ -16,6 +19,16 @@ def vlan(): """VLAN-related configuration tasks""" pass + +def set_dhcp_relay_table(table, config_db, vlan_name, value): + config_db.set_entry(table, vlan_name, value) + + +def is_dhcp_relay_running(): + out, _ = clicommon.run_command("systemctl show dhcp_relay.service --property ActiveState --value", return_cmd=True) + return out.strip() == "active" + + @vlan.command('add') @click.argument('vid', metavar='', required=True, type=int) @clicommon.pass_db @@ -24,7 +37,7 @@ def add_vlan(db, vid): ctx = click.get_current_context() vlan = 'Vlan{}'.format(vid) - + config_db = ValidatedConfigDBConnector(db.cfgdb) if ADHOC_VALIDATION: if not clicommon.is_vlanid_in_range(vid): @@ -32,25 +45,42 @@ def add_vlan(db, vid): if vid == 1: ctx.fail("{} is default VLAN".format(vlan)) # TODO: MISSING CONSTRAINT IN YANG MODEL - + if clicommon.check_if_vlanid_exist(db.cfgdb, vlan): # TODO: MISSING CONSTRAINT IN YANG MODEL ctx.fail("{} already exists".format(vlan)) - - try: - config_db.set_entry('VLAN', vlan, {'vlanid': str(vid)}) - except ValueError: - ctx.fail("Invalid VLAN ID {} (1-4094)".format(vid)) + if clicommon.check_if_vlanid_exist(db.cfgdb, vlan, "DHCP_RELAY"): + ctx.fail("DHCPv6 relay config for {} already exists".format(vlan)) + # set dhcpv4_relay table + set_dhcp_relay_table('VLAN', config_db, vlan, {'vlanid': str(vid)}) + + +def is_dhcpv6_relay_config_exist(db, vlan_name): + keys = db.cfgdb.get_keys(DHCP_RELAY_TABLE) + if len(keys) == 0 or vlan_name not in keys: + return False + + table = db.cfgdb.get_entry("DHCP_RELAY", vlan_name) + dhcpv6_servers = table.get(DHCPV6_SERVERS, []) + if len(dhcpv6_servers) > 0: + return True + @vlan.command('del') @click.argument('vid', metavar='', required=True, type=int) +@click.option('--no_restart_dhcp_relay', is_flag=True, type=click.BOOL, required=False, default=False, + help="If no_restart_dhcp_relay is True, do not restart dhcp_relay while del vlan and \ + require dhcpv6 relay of this is empty") @clicommon.pass_db -def del_vlan(db, vid): +def del_vlan(db, vid, no_restart_dhcp_relay): """Delete VLAN""" log.log_info("'vlan del {}' executing...".format(vid)) ctx = click.get_current_context() vlan = 'Vlan{}'.format(vid) + if no_restart_dhcp_relay: + if is_dhcpv6_relay_config_exist(db, vlan): + ctx.fail("Can't delete {} because related DHCPv6 Relay config is exist".format(vlan)) config_db = ValidatedConfigDBConnector(db.cfgdb) if ADHOC_VALIDATION: @@ -67,19 +97,25 @@ def del_vlan(db, vid): ctx.fail("{} can not be removed. First remove IP addresses assigned to this VLAN".format(vlan)) keys = [ (k, v) for k, v in db.cfgdb.get_table('VLAN_MEMBER') if k == 'Vlan{}'.format(vid) ] - + if keys: # TODO: MISSING CONSTRAINT IN YANG MODEL ctx.fail("VLAN ID {} can not be removed. First remove all members assigned to this VLAN.".format(vid)) - + vxlan_table = db.cfgdb.get_table('VXLAN_TUNNEL_MAP') for vxmap_key, vxmap_data in vxlan_table.items(): if vxmap_data['vlan'] == 'Vlan{}'.format(vid): ctx.fail("vlan: {} can not be removed. First remove vxlan mapping '{}' assigned to VLAN".format(vid, '|'.join(vxmap_key)) ) - - try: - config_db.set_entry('VLAN', 'Vlan{}'.format(vid), None) - except JsonPatchConflict: - ctx.fail("{} does not exist".format(vlan)) + + # set dhcpv4_relay table + set_dhcp_relay_table('VLAN', config_db, vlan, None) + + if not no_restart_dhcp_relay and is_dhcpv6_relay_config_exist(db, vlan): + # set dhcpv6_relay table + set_dhcp_relay_table('DHCP_RELAY', config_db, vlan, None) + # We need to restart dhcp_relay service after dhcpv6_relay config change + if is_dhcp_relay_running(): + dhcp_relay_util.handle_restart_dhcp_relay_service() + def restart_ndppd(): verify_swss_running_cmd = "docker container inspect -f '{{.State.Status}}' swss" diff --git a/debug/main.py b/debug/main.py index 8c502c96ad..069159fc75 100755 --- a/debug/main.py +++ b/debug/main.py @@ -1,9 +1,13 @@ +import re +import sys import click import subprocess +from shlex import join def run_command(command, pager=False): - click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) - p = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE) + command_str = join(command) + click.echo(click.style("Command: ", fg='cyan') + click.style(command_str, fg='green')) + p = subprocess.Popen(command, text=True, stdout=subprocess.PIPE) output = p.stdout.read() if pager: click.echo_via_pager(output) @@ -21,8 +25,8 @@ def cli(): """SONiC command line - 'debug' command""" pass - -p = subprocess.check_output(["sudo vtysh -c 'show version'"], shell=True, text=True) +prefix_pattern = '^[A-Za-z0-9.:/]*$' +p = subprocess.check_output(['sudo', 'vtysh', '-c', 'show version'], text=True) if 'FRRouting' in p: # # 'bgp' group for FRR ### @@ -35,66 +39,64 @@ def bgp(): @bgp.command('allow-martians') def allow_martians(): """BGP allow martian next hops""" - command = 'sudo vtysh -c "debug bgp allow-martians"' + command = ['sudo', 'vtysh', '-c', "debug bgp allow-martians"] run_command(command) @bgp.command() @click.argument('additional', type=click.Choice(['segment']), required=False) def as4(additional): """BGP AS4 actions""" - command = 'sudo vtysh -c "debug bgp as4' + command = ['sudo', 'vtysh', '-c', "debug bgp as4"] if additional is not None: - command += " segment" - command += '"' + command[-1] += " segment" run_command(command) @bgp.command() @click.argument('prefix', required=True) def bestpath(prefix): """BGP bestpath""" - command = 'sudo vtysh -c "debug bgp bestpath %s"' % prefix + if not re.match(prefix_pattern, prefix): + sys.exit('Prefix contains only number, alphabet, period, colon, and forward slash') + command = ['sudo', 'vtysh', '-c', "debug bgp bestpath %s" % prefix] run_command(command) @bgp.command() @click.argument('prefix_or_iface', required=False) def keepalives(prefix_or_iface): """BGP Neighbor Keepalives""" - command = 'sudo vtysh -c "debug bgp keepalives' + command = ['sudo', 'vtysh', '-c', "debug bgp keepalives"] if prefix_or_iface is not None: - command += " " + prefix_or_iface - command += '"' + command[-1] += ' ' + prefix_or_iface run_command(command) @bgp.command('neighbor-events') @click.argument('prefix_or_iface', required=False) def neighbor_events(prefix_or_iface): """BGP Neighbor Events""" - command = 'sudo vtysh -c "debug bgp neighbor-events' + command = ['sudo', 'vtysh', '-c', "debug bgp neighbor-events"] if prefix_or_iface is not None: - command += " " + prefix_or_iface - command += '"' + command[-1] += ' ' + prefix_or_iface run_command(command) @bgp.command() def nht(): """BGP nexthop tracking events""" - command = 'sudo vtysh -c "debug bgp nht"' + command = ['sudo', 'vtysh', '-c', "debug bgp nht"] run_command(command) @bgp.command() @click.argument('additional', type=click.Choice(['error']), required=False) def pbr(additional): """BGP policy based routing""" - command = 'sudo vtysh -c "debug bgp pbr' + command = ['sudo', 'vtysh', '-c', "debug bgp pbr"] if additional is not None: - command += " error" - command += '"' + command[-1] += " error" run_command(command) @bgp.command('update-groups') def update_groups(): """BGP update-groups""" - command = 'sudo vtysh -c "debug bgp update-groups"' + command = ['sudo', 'vtysh', '-c', "debug bgp update-groups"] run_command(command) @bgp.command() @@ -102,22 +104,25 @@ def update_groups(): @click.argument('prefix', required=False) def updates(direction, prefix): """BGP updates""" - command = 'sudo vtysh -c "debug bgp updates' + bgp_cmd = "debug bgp updates" if direction is not None: - command += " " + direction + bgp_cmd += ' ' + direction if prefix is not None: - command += " " + prefix - command += '"' + if not re.match(prefix_pattern, prefix): + sys.exit('Prefix contains only number, alphabet, period, colon, and forward slash') + bgp_cmd += ' ' + prefix + command = ['sudo', 'vtysh', '-c', bgp_cmd] run_command(command) @bgp.command() @click.argument('prefix', required=False) def zebra(prefix): """BGP Zebra messages""" - command = 'sudo vtysh -c "debug bgp zebra' + command = ['sudo', 'vtysh', '-c', "debug bgp zebra"] if prefix is not None: - command += " prefix " + prefix - command += '"' + if not re.match(prefix_pattern, prefix): + sys.exit('Prefix contains only number, alphabet, period, colon, and forward slash') + command[-1] += " prefix " + prefix run_command(command) # @@ -132,56 +137,54 @@ def zebra(): @click.argument('detailed', type=click.Choice(['detailed']), required=False) def dplane(detailed): """Debug zebra dataplane events""" - command = 'sudo vtysh -c "debug zebra dplane' + command = ['sudo', 'vtysh', '-c', "debug zebra dplane"] if detailed is not None: - command += " detailed" - command += '"' + command[-1] += " detailed" run_command(command) @zebra.command() def events(): """Debug option set for zebra events""" - command = 'sudo vtysh -c "debug zebra events"' + command = ['sudo', 'vtysh', '-c', "debug zebra events"] run_command(command) @zebra.command() def fpm(): """Debug zebra FPM events""" - command = 'sudo vtysh -c "debug zebra fpm"' + command = ['sudo', 'vtysh', '-c', "debug zebra fpm"] run_command(command) @zebra.command() def kernel(): """Debug option set for zebra between kernel interface""" - command = 'sudo vtysh -c "debug zebra kernel"' + command = ['sudo', 'vtysh', '-c', "debug zebra kernel"] run_command(command) @zebra.command() def nht(): """Debug option set for zebra next hop tracking""" - command = 'sudo vtysh -c "debug zebra nht"' + command = ['sudo', 'vtysh', '-c', "debug zebra nht"] run_command(command) @zebra.command() def packet(): """Debug option set for zebra packet""" - command = 'sudo vtysh -c "debug zebra packet"' + command = ['sudo', 'vtysh', '-c', "debug zebra packet"] run_command(command) @zebra.command() @click.argument('detailed', type=click.Choice(['detailed']), required=False) def rib(detailed): """Debug RIB events""" - command = 'sudo vtysh -c "debug zebra rib' + command = ['sudo', 'vtysh', '-c', "debug zebra rib"] if detailed is not None: - command += " detailed" - command += '"' + command[-1] += " detailed" run_command(command) @zebra.command() def vxlan(): """Debug option set for zebra VxLAN (EVPN)""" - command = 'sudo vtysh -c "debug zebra vxlan"' + command = ['sudo', 'vtysh', '-c', "debug zebra vxlan"] run_command(command) else: @@ -193,49 +196,49 @@ def vxlan(): def bgp(ctx): """debug bgp on""" if ctx.invoked_subcommand is None: - command = 'sudo vtysh -c "debug bgp"' + command = ['sudo', 'vtysh', '-c', "debug bgp"] run_command(command) @bgp.command() def events(): """debug bgp events on""" - command = 'sudo vtysh -c "debug bgp events"' + command = ['sudo', 'vtysh', '-c', "debug bgp events"] run_command(command) @bgp.command() def updates(): """debug bgp updates on""" - command = 'sudo vtysh -c "debug bgp updates"' + command = ['sudo', 'vtysh', '-c', "debug bgp updates"] run_command(command) @bgp.command() def as4(): """debug bgp as4 actions on""" - command = 'sudo vtysh -c "debug bgp as4"' + command = ['sudo', 'vtysh', '-c', "debug bgp as4"] run_command(command) @bgp.command() def filters(): """debug bgp filters on""" - command = 'sudo vtysh -c "debug bgp filters"' + command = ['sudo', 'vtysh', '-c', "debug bgp filters"] run_command(command) @bgp.command() def fsm(): """debug bgp finite state machine on""" - command = 'sudo vtysh -c "debug bgp fsm"' + command = ['sudo', 'vtysh', '-c', "debug bgp fsm"] run_command(command) @bgp.command() def keepalives(): """debug bgp keepalives on""" - command = 'sudo vtysh -c "debug bgp keepalives"' + command = ['sudo', 'vtysh', '-c', "debug bgp keepalives"] run_command(command) @bgp.command() def zebra(): """debug bgp zebra messages on""" - command = 'sudo vtysh -c "debug bgp zebra"' + command = ['sudo', 'vtysh', '-c', "debug bgp zebra"] run_command(command) # @@ -248,32 +251,31 @@ def zebra(): @zebra.command() def events(): - """debug option set for zebra events""" - command = 'sudo vtysh -c "debug zebra events"' + command = ['sudo', 'vtysh', '-c', "debug zebra events"] run_command(command) @zebra.command() def fpm(): """debug zebra FPM events""" - command = 'sudo vtysh -c "debug zebra fpm"' + command = ['sudo', 'vtysh', '-c', "debug zebra fpm"] run_command(command) @zebra.command() def kernel(): """debug option set for zebra between kernel interface""" - command = 'sudo vtysh -c "debug zebra kernel"' + command = ['sudo', 'vtysh', '-c', "debug zebra kernel"] run_command(command) @zebra.command() def packet(): """debug option set for zebra packet""" - command = 'sudo vtysh -c "debug zebra packet"' + command = ['sudo', 'vtysh', '-c', "debug zebra packet"] run_command(command) @zebra.command() def rib(): """debug RIB events""" - command = 'sudo vtysh -c "debug zebra rib"' + command = ['sudo', 'vtysh', '-c', "debug zebra rib"] run_command(command) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 063db2cc4e..86902cd7e7 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -40,6 +40,8 @@ * [Console connect commands](#console-connect-commands) * [Console clear commands](#console-clear-commands) * [DHCP Relay](#dhcp-relay) + * [DHCP Relay show commands](#dhcp-relay-show-commands) + * [DHCP Relay clear commands](#dhcp-relay-clear-commands) * [DHCP Relay config commands](#dhcp-relay-config-commands) * [Drop Counters](#drop-counters) * [Drop Counter show commands](#drop-counters-show-commands) @@ -927,7 +929,7 @@ This command displays information for all the interfaces for the transceiver req - Usage: ``` - show interfaces transceiver (eeprom [-d|--dom] | lpmode | presence | error-status [-hw|--fetch-from-hardware]) [] + show interfaces transceiver (eeprom [-d|--dom] | info | lpmode | presence | error-status [-hw|--fetch-from-hardware] | pm) [] ``` - Example (Decode and display information stored on the EEPROM of SFP transceiver connected to Ethernet0): @@ -965,6 +967,48 @@ This command displays information for all the interfaces for the transceiver req Vcc : 0.0000Volts ``` +- Example (Decode and display information stored on the EEPROM of SFP transceiver connected to Ethernet16): + ``` + admin@sonic:~$ show interfaces transceiver info Ethernet16 + Ethernet16: SFP EEPROM detected + Active Firmware: 61.20 + Active application selected code assigned to host lane 1: 1 + Active application selected code assigned to host lane 2: 1 + Active application selected code assigned to host lane 3: 1 + Active application selected code assigned to host lane 4: 1 + Active application selected code assigned to host lane 5: 1 + Active application selected code assigned to host lane 6: 1 + Active application selected code assigned to host lane 7: 1 + Active application selected code assigned to host lane 8: 1 + Application Advertisement: 400GAUI-8 C2M (Annex 120E) - Host Assign (0x1) - 400ZR, DWDM, amplified - Media Assign (0x1) + 400GAUI-8 C2M (Annex 120E) - Host Assign (0x1) - 400ZR, Single Wavelength, Unamplified - Media Assign (0x1) + 100GAUI-2 C2M (Annex 135G) - Host Assign (0x55) - 400ZR, DWDM, amplified - Media Assign (0x1) + CMIS Rev: 4.1 + Connector: LC + Encoding: N/A + Extended Identifier: Power Class 8 (20.0W Max) + Extended RateSelect Compliance: N/A + Host Lane Count: 8 + Identifier: QSFP-DD Double Density 8X Pluggable Transceiver + Inactive Firmware: 61.20 + Length Cable Assembly(m): 0.0 + Media Interface Technology: 1550 nm DFB + Media Lane Count: 1 + Module Hardware Rev: 49.49 + Nominal Bit Rate(100Mbs): 0 + Specification Compliance: sm_media_interface + Supported Max Laser Frequency: 196100 + Supported Max TX Power: 4.0 + Supported Min Laser Frequency: 191300 + Supported Min TX Power: -22.9 + Vendor Date Code(YYYY-MM-DD Lot): 2020-21-02 17 + Vendor Name: Acacia Comm Inc. + Vendor OUI: 7c-b2-5c + Vendor PN: DP04QSDD-E20-00E + Vendor Rev: 01 + Vendor SN: 210753986 + ``` + - Example (Display status of low-power mode of SFP transceiver connected to Ethernet100): ``` admin@sonic:~$ show interfaces transceiver lpmode Ethernet100 @@ -990,6 +1034,30 @@ This command displays information for all the interfaces for the transceiver req Ethernet100 OK ``` +- Example (Display performance monitoring info of SFP transceiver connected to Ethernet100): + ``` + admin@sonic:~$ show interfaces transceiver pm Ethernet100 + Ethernet100: + Parameter Unit Min Avg Max Threshold Threshold Threshold Threshold Threshold Threshold + High High Crossing Low Low Crossing + Alarm Warning Alert-High Alarm Warning Alert-Low + --------------- ------ -------- -------- -------- ----------- ----------- ------------ ----------- ----------- ----------- + Tx Power dBm -8.22 -8.23 -8.24 -5.0 -6.0 False -16.99 -16.003 False + Rx Total Power dBm -10.61 -10.62 -10.62 2.0 0.0 False -21.0 -18.0 False + Rx Signal Power dBm -40.0 0.0 40.0 13.0 10.0 True -18.0 -15.0 True + CD-short link ps/nm 0.0 0.0 0.0 1000.0 500.0 False -1000.0 -500.0 False + PDL dB 0.5 0.6 0.6 4.0 4.0 False 0.0 0.0 False + OSNR dB 36.5 36.5 36.5 99.0 99.0 False 0.0 0.0 False + eSNR dB 30.5 30.5 30.5 99.0 99.0 False 0.0 0.0 False + CFO MHz 54.0 70.0 121.0 3800.0 3800.0 False -3800.0 -3800.0 False + DGD ps 5.37 5.56 5.81 7.0 7.0 False 0.0 0.0 False + SOPMD ps^2 0.0 0.0 0.0 655.35 655.35 False 0.0 0.0 False + SOP ROC krad/s 1.0 1.0 2.0 N/A N/A N/A N/A N/A N/A + Pre-FEC BER N/A 4.58E-04 4.66E-04 5.76E-04 1.25E-02 1.10E-02 0.0 0.0 0.0 0.0 + Post-FEC BER N/A 0.0 0.0 0.0 1000.0 1.0 False 0.0 0.0 False + EVM % 100.0 100.0 100.0 N/A N/A N/A N/A N/A N/A + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#basic-show-commands) ## AAA & TACACS+ @@ -1470,6 +1538,36 @@ This command is used to create new ACL tables. Go Back To [Beginning of the document](#) or [Beginning of this section](#acl) +**aclshow** + +This command is used to display: ACL rules, tables and their priority, ACL packets counters, and bytes counters + +- Usage: + ``` + aclshow [-h] [-a] [-c] [-r RULES] [-t TABLES] [-v] [-vv] + ``` + +- Parameters: + - -a, --all: Show all ACL counters + - -c, --clear: Clear ACL counters statistics + - -r RULES, --rules RULES: Show only specified ACL rules and their counters + - -t TABLES, --tables TABLES: Show only specified ACL tables and their counters + - -vv, --verbose: Verbose output + +- Examples: + ``` + admin@sonic:~$ sudo aclshow -a + RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT + ----------- ------------ ------ --------------- ------------- + RULE_1 DATAACL 9999 0 0 + RULE_2 DATAACL 9998 0 0 + RULE_1 SNMP_ACL 9999 N/A N/A + ``` + + If the `PACKETS COUNT` and `BYTES COUNT` fields have the `N/A` value it means either that the ACL rule is invalid or it is a `control plane` ACL and those counters are created in Linux, not in SONiC `COUNTERS_DB` and the [iptables](https://linux.die.net/man/8/iptables) utility should be used to view those counters. + + If the `PACKETS COUNT` and `BYTES COUNT` fields have some numeric value it means that it is a SONiC ACL's and those counters are created in SONiC `COUNTERS_DB`. + ## ARP & NDP @@ -1989,6 +2087,26 @@ This command displays the routing policy that takes precedence over the other ro Exit routemap ``` +**show suppress-fib-pending** + +This command is used to show the status of suppress pending FIB feature. +When enabled, BGP will not advertise routes which aren't yet offloaded. + +- Usage: + ``` + show suppress-fib-pending + ``` + +- Examples: + ``` + admin@sonic:~$ show suppress-fib-pending + Enabled + ``` + ``` + admin@sonic:~$ show suppress-fib-pending + Disabled + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#bgp) ### BGP config commands @@ -2081,6 +2199,24 @@ This command is used to remove particular IPv4 or IPv6 BGP neighbor configuratio admin@sonic:~$ sudo config bgp remove neighbor SONIC02SPINE ``` +**config suppress-fib-pending** + +This command is used to enable or disable announcements of routes not yet installed in the HW. +Once enabled, BGP will not advertise routes which aren't yet offloaded. + +- Usage: + ``` + config suppress-fib-pending + ``` + +- Examples: + ``` + admin@sonic:~$ sudo config suppress-fib-pending enabled + ``` + ``` + admin@sonic:~$ sudo config suppress-fib-pending disabled + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#bgp) ## Console @@ -2303,6 +2439,97 @@ Go Back To [Beginning of the document](#) or [Beginning of this section](#consol ## DHCP Relay +### DHCP Relay show commands + +This sub-section of commands is used to show the DHCP Relay IP address(es) in a VLAN interface and show dhcpv6_relay counter of a VLAN. + +**show dhcp_relay ipv4 helper** + +This command is used to show ipv4 dhcp_relay helper. + +- Usage: + ``` + show dhcp_relay ipv4 helper + ``` + +- Example: + ``` + admin@sonic:~$ show dhcp_relay ipv4 helper + -------- --------- + Vlan1000 192.0.0.1 + 192.0.0.2 + -------- --------- + ``` + +**show dhcp_relay ipv6 destination** + +This command is used to show ipv6 dhcp_relay destination. + +- Usage: + ``` + show dhcp_relay ipv6 destination + ``` + +- Example: + ``` + admin@sonic:~$ show dhcp_relay ipv6 destination + --------  ------------ + Vlan1000  fc02:2000::1 +           fc02:2000::2 +           fc02:2000::3 +           fc02:2000::4 + --------  ------------ + ``` + +**show dhcp_relay ipv6 counters** + +This command is used to show ipv6 dhcp_relay counters. + +- Usage: + ``` + show dhcp_relay ipv6 counters + ``` + +- Example: + ``` + admin@sonic:~$ sudo sonic-clear dhcp_relay counters +      Message Type    Vlan1000 + -------------------  ---------- +             Unknown           0 +             Solicit           0 +           Advertise           0 +             Request           5 +             Confirm           0 +               Renew           0 +              Rebind           0 +               Reply           0 +             Release           0 +             Decline           0 +         Reconfigure           0 + Information-Request           0 +       Relay-Forward           0 +         Relay-Reply           0 +           Malformed           0 + ``` + +### DHCP Relay clear commands + +This sub-section of commands is used to clear the DHCP Relay counters. + +**sonic-clear dhcp_relay ipv6 counter** + +This command is used to clear ipv6 dhcp_relay counters. + +- Usage: + ``` + sonic-clear dhcp_relay ipv6 counter [-i ] + ``` + +- Example: + ``` + admin@sonic:~$ sudo sonic-clear dhcp_relay ipv6 counters + ``` + ### DHCP Relay config commands This sub-section of commands is used to add or remove the DHCP Relay Destination IP address(es) for a VLAN interface. @@ -2349,6 +2576,74 @@ This command is used to delete a configured DHCP Relay Destination IP address or Restarting DHCP relay service... ``` +**config dhcp_relay ipv4 helper add/del** + +This command is used to add or delete IPv4 DHCP Relay helper addresses to a VLAN. Note that more than one DHCP Relay helper addresses can be operated on a VLAN interface. + +- Usage: + ``` + config dhcp_relay ipv4 helper (add | del) + ``` + +- Example: + ``` + admin@sonic:~$ sudo config dhcp_relay ipv4 helper add 1000 7.7.7.7 + Added DHCP relay address [7.7.7.7] to Vlan1000 + Restarting DHCP relay service... + ``` + + ``` + admin@sonic:~$ sudo config dhcp_relay ipv4 helper add 1000 7.7.7.7 1.1.1.1 + Added DHCP relay address [7.7.7.7, 1.1.1.1] to Vlan1000 + Restarting DHCP relay service... + ``` + + ``` + admin@sonic:~$ sudo config dhcp_relay ipv4 helper del 1000 7.7.7.7 + Removed DHCP relay address [7.7.7.7] from Vlan1000 + Restarting DHCP relay service... + ``` + + ``` + admin@sonic:~$ sudo config dhcp_relay ipv4 helper del 1000 7.7.7.7 1.1.1.1 + Removed DHCP relay address [7.7.7.7, 1.1.1.1] from Vlan1000 + Restarting DHCP relay service... + ``` + +**config dhcp_relay ipv6 destination add/del** + +This command is used to add or del IPv6 DHCP Relay destination addresses to a VLAN. Note that more than one DHCP Relay Destination addresses can be operated on a VLAN interface. + +- Usage: + ``` + config dhcp_relay ipv6 destination (add | del) + ``` + +- Example: + ``` + admin@sonic:~$ sudo config dhcp_relay ipv6 destination add 1000 fc02:2000::1 + Added DHCP relay address [fc02:2000::1] to Vlan1000 + Restarting DHCP relay service... + ``` + + ``` + admin@sonic:~$ sudo config dhcp_relay ipv6 destination add 1000 fc02:2000::1 fc02:2000::2 + Added DHCP relay address [fc02:2000::1, fc02:2000::2] to Vlan1000 + Restarting DHCP relay service... + ``` + + ``` + admin@sonic:~$ sudo config dhcp_relay ipv6 destination del 1000 fc02:2000::1 + Removed DHCP relay address [fc02:2000::1] from Vlan1000 + Restarting DHCP relay service... + ``` + + ``` + admin@sonic:~$ sudo config dhcp_relay ipv6 destination del 1000 fc02:2000::1 fc02:2000::2 + Removed DHCP relay address [fc02:2000::1, fc02:2000::2] from Vlan1000 + Restarting DHCP relay service... + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#dhcp-relay) @@ -5861,6 +6156,154 @@ This command displays the eye info in mv(milli volts) of the port user provides 632 622 ``` + +**show muxcable health ** + +This command displays the hardware health of the Y-cable which are connected to muxcable. The resultant table or json output will show the current hadrware health of the cable as Ok, Not Ok, Unknown. + +- Usage: + ``` + show muxcable health [OPTIONS] [PORT] + ``` + +While displaying the muxcable health, users need to provide the following fields + +- PORT required - Port name should be a valid port +- --json optional - -- option to display the result in json format. By default output will be in tabular format. + +-Ok means the cable is healthy + +in order to detemine whether the health of the cable is Ok +the following are checked +- the vendor name is correct able to be read +- the FW is correctly loaded for SerDes by reading the appropriate register val +- the Counters for UART are displaying healthy status + i.e Error Counters , retry Counters for UART or internal xfer protocols are below a threshold + + +- Example: + ``` + admin@sonic:~$ show muxcable health Ethernet4 + PORT ATTR HEALTH + --------- ------ -------- + Ethernet4 health Ok + ``` + ``` + admin@sonic:~$ show muxcable health Ethernet4 --json + ``` + ```json + { + "health": "Ok" + } + + ``` + + +**show muxcable queueinfo ** + +This command displays the queue info of the Y-cable which are connected to muxcable. The resultant table or json output will show the queue info in terms transactions for the UART stats in particular currently relevant to the MCU of the cable. + +- Usage: + ``` + show muxcable queueinfo [OPTIONS] [PORT] + ``` + +While displaying the muxcable queueinfo, users need to provide the following fields + +- PORT required - Port name should be a valid port +- --json optional - -- option to display the result in json format. By default output will be in tabular format. + +the result will be displayed like this, each item in the dictionary shows the health of the attribute in the queue +``` +"{'VSC': {'r_ptr': 0, 'w_ptr': 0, 'total_count': 0, 'free_count': 0, 'buff_addr': 0, 'node_size': 0}, 'UART1': {'r_ptr': 0, 'w_ptr': 0, 'total_count': 0, 'free_count': 0, 'buff_addr': 209870, 'node_size': 1682183}, 'UART2': {'r_ptr': 13262, 'w_ptr': 3, 'total_count': 0, 'free_count': 0, 'buff_addr': 12, 'node_size': 0} +``` + +- Example: + ``` + admin@sonic:~$ show muxcable queueinfo Ethernet0 + PORT ATTR VALUE + --------- ---------- ------- + Ethernet0 uart_stat1 2 + Ethernet0 uart_stat2 1 + ``` + ``` + admin@sonic:~$ show muxcable queueinfo Ethernet4 --json + ``` + ```json + { + "uart_stat1": "2", + "uart_stat2": "1", + + } + ``` + +**show muxcable operationtime ** + +This command displays the operationtime of the Y-cable which are connected to muxcable. The resultant table or json output will show the current operation time of the cable as `hh:mm:ss` format. Operation time means the time since the last time the reseated/reset of the cable is done, and the time would be in the format specified + +- Usage: + ``` + show muxcable operationtime [OPTIONS] [PORT] + ``` + +While displaying the muxcable operationtime, users need to provide the following fields + +- PORT required - Port name should be a valid port +- --json optional - -- option to display the result in json format. By default output will be in tabular format. + + +- Example: + ``` + admin@sonic:~$ show muxcable operationtime Ethernet4 + PORT ATTR OPERATION_TIME + --------- -------------- ---------------- + Ethernet4 operation_time 00:22:22 + ``` + ``` + admin@sonic:~$ show muxcable operationtime Ethernet4 --json + ``` + ```json + { + "operation_time": "00:22:22" + } + ``` + +**show muxcable resetcause ** + +This command displays the resetcause of the Y-cable which are connected to muxcable. The resultant table or json output will show the most recent reset cause of the cable as string format. + +- Usage: + ``` + show muxcable resetcause [OPTIONS] [PORT] + ``` + +While displaying the muxcable resetcause, users need to provide the following fields + +- PORT required - Port name should be a valid port +- --json optional - -- option to display the result in json format. By default output will be in tabular format. + +the reset cause only records NIC MCU reset status. The NIC MCU will automatically broadcast the reset cause status to each TORs, corresponding values returned +display cold reset if the last reset is cold reset (ex. HW/SW reset, power reset the cable, or reboot the NIC server) +display warm reset if the last reset is warm reset (ex. sudo config mux firmware activate....) +the value is persistent, no clear on read + +- Example: + ``` + admin@sonic:~$ show muxcable resetcause Ethernet4 + PORT ATTR RESETCAUSE + --------- ----------- ------------ + Ethernet4 reset_cause warm reset + ``` + ``` + admin@sonic:~$ show muxcable resetcause Ethernet4 --json + ``` + ```json + { + "reset_cause": "warm reset" + } + ``` + + ### Muxcable Config commands @@ -9380,7 +9823,7 @@ Go Back To [Beginning of the document](#) or [Beginning of this section](#System **show vlan brief** -This command displays brief information about all the vlans configured in the device. It displays the vlan ID, IP address (if configured for the vlan), list of vlan member ports, whether the port is tagged or in untagged mode, the DHCP Helper Address, and the proxy ARP status +This command displays brief information about all the vlans configured in the device. It displays the vlan ID, IP address (if configured for the vlan), list of vlan member ports, whether the port is tagged or in untagged mode, the DHCPv4 Helper Address, and the proxy ARP status - Usage: ``` diff --git a/generic_config_updater/change_applier.py b/generic_config_updater/change_applier.py index f5a365d59f..d0818172f8 100644 --- a/generic_config_updater/change_applier.py +++ b/generic_config_updater/change_applier.py @@ -9,7 +9,7 @@ from .gu_common import genericUpdaterLogging SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -UPDATER_CONF_FILE = f"{SCRIPT_DIR}/generic_config_updater.conf.json" +UPDATER_CONF_FILE = f"{SCRIPT_DIR}/gcu_services_validator.conf.json" logger = genericUpdaterLogging.get_logger(title="Change Applier") print_to_console = False diff --git a/generic_config_updater/field_operation_validators.py b/generic_config_updater/field_operation_validators.py new file mode 100644 index 0000000000..befd4b8749 --- /dev/null +++ b/generic_config_updater/field_operation_validators.py @@ -0,0 +1,26 @@ +from sonic_py_common import device_info +import re + +def rdma_config_update_validator(): + version_info = device_info.get_sonic_version_info() + build_version = version_info.get('build_version') + asic_type = version_info.get('asic_type') + + if (asic_type != 'mellanox' and asic_type != 'broadcom' and asic_type != 'cisco-8000'): + return False + + version_substrings = build_version.split('.') + branch_version = None + + for substring in version_substrings: + if substring.isdigit() and re.match(r'^\d{8}$', substring): + branch_version = substring + break + + if branch_version is None: + return False + + if asic_type == 'cisco-8000': + return branch_version >= "20201200" + else: + return branch_version >= "20181100" diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json new file mode 100644 index 0000000000..f12a14d8eb --- /dev/null +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -0,0 +1,20 @@ +{ + "README": [ + "field_operation_validators provides, module & method name as ", + " .", + "NOTE: module name could have '.'", + " ", + "The last element separated by '.' is considered as ", + "method name", + "", + "e.g. 'show.acl.test_acl'", + "", + "field_operation_validators for a given table defines a list of validators that all must pass for modification to the specified field and table to be allowed", + "" + ], + "tables": { + "PFC_WD": { + "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ] + } + } +} diff --git a/generic_config_updater/generic_config_updater.conf.json b/generic_config_updater/gcu_services_validator.conf.json similarity index 91% rename from generic_config_updater/generic_config_updater.conf.json rename to generic_config_updater/gcu_services_validator.conf.json index 907b5a6863..852b587286 100644 --- a/generic_config_updater/generic_config_updater.conf.json +++ b/generic_config_updater/gcu_services_validator.conf.json @@ -48,6 +48,9 @@ }, "NTP_SERVER": { "services_to_validate": [ "ntp-service" ] + }, + "VLAN_INTERFACE": { + "services_to_validate": [ "vlanintf-service" ] } }, "services": { @@ -71,6 +74,9 @@ }, "ntp-service": { "validate_commands": [ "generic_config_updater.services_validator.ntp_validator" ] + }, + "vlanintf-service": { + "validate_commands": [ "generic_config_updater.services_validator.vlanintf_validator" ] } } } diff --git a/generic_config_updater/generic_updater.py b/generic_config_updater/generic_updater.py index aa41853204..f9aab82336 100644 --- a/generic_config_updater/generic_updater.py +++ b/generic_config_updater/generic_updater.py @@ -36,7 +36,7 @@ def __init__(self, self.patchsorter = patchsorter if patchsorter is not None else StrictPatchSorter(self.config_wrapper, self.patch_wrapper) self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier() - def apply(self, patch): + def apply(self, patch, sort=True): self.logger.log_notice("Patch application starting.") self.logger.log_notice(f"Patch: {patch}") @@ -63,11 +63,17 @@ def apply(self, patch): f"Table{'s' if len(empty_tables) != 1 else ''}: {empty_tables_txt}") # Generate list of changes to apply - self.logger.log_notice("Sorting patch updates.") - changes = self.patchsorter.sort(patch) + if sort: + self.logger.log_notice("Sorting patch updates.") + changes = self.patchsorter.sort(patch) + else: + self.logger.log_notice("Converting patch to JsonChange.") + changes = [JsonChange(jsonpatch.JsonPatch([element])) for element in patch] + changes_len = len(changes) - self.logger.log_notice(f"The patch was sorted into {changes_len} " \ - f"change{'s' if changes_len != 1 else ''}{':' if changes_len > 0 else '.'}") + self.logger.log_notice(f"The patch was converted into {changes_len} " \ + f"change{'s' if changes_len != 1 else ''}{':' if changes_len > 0 else '.'}") + for change in changes: self.logger.log_notice(f" * {change}") @@ -284,7 +290,7 @@ def __init__(self, self.config_lock = config_lock - def apply(self, patch): + def apply(self, patch, sort=True): self.execute_write_action(Decorator.apply, self, patch) def replace(self, target_config): @@ -407,9 +413,9 @@ def __init__(self, generic_update_factory=None): self.generic_update_factory = \ generic_update_factory if generic_update_factory is not None else GenericUpdateFactory() - def apply_patch(self, patch, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths): + def apply_patch(self, patch, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths, sort=True): patch_applier = self.generic_update_factory.create_patch_applier(config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths) - patch_applier.apply(patch) + patch_applier.apply(patch, sort) def replace(self, target_config, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths): config_replacer = self.generic_update_factory.create_config_replacer(config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths) diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index 743253ccaf..e8c66fcbbe 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -1,5 +1,6 @@ import json import jsonpatch +import importlib from jsonpointer import JsonPointer import sonic_yang import sonic_yang_ext @@ -7,11 +8,14 @@ import yang as ly import copy import re +import os from sonic_py_common import logger from enum import Enum YANG_DIR = "/usr/local/yang-models" SYSLOG_IDENTIFIER = "GenericConfigUpdater" +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +GCU_FIELD_OP_CONF_FILE = f"{SCRIPT_DIR}/gcu_field_operation_validators.conf.json" class GenericConfigUpdaterError(Exception): pass @@ -149,14 +153,51 @@ def validate_field_operation(self, old_config, target_config): patch = jsonpatch.JsonPatch.from_diff(old_config, target_config) # illegal_operations_to_fields_map['remove'] yields a list of fields for which `remove` is an illegal operation - illegal_operations_to_fields_map = {'add':[], - 'replace': [], - 'remove': ['/PFC_WD/GLOBAL/POLL_INTERVAL', '/PFC_WD/GLOBAL']} + illegal_operations_to_fields_map = { + 'add':[], + 'replace': [], + 'remove': [ + '/PFC_WD/GLOBAL/POLL_INTERVAL', + '/PFC_WD/GLOBAL', + '/LOOPBACK_INTERFACE/Loopback0'] + } for operation, field_list in illegal_operations_to_fields_map.items(): for field in field_list: if any(op['op'] == operation and field == op['path'] for op in patch): raise IllegalPatchOperationError("Given patch operation is invalid. Operation: {} is illegal on field: {}".format(operation, field)) + def _invoke_validating_function(cmd): + # cmd is in the format as . + method_name = cmd.split(".")[-1] + module_name = ".".join(cmd.split(".")[0:-1]) + if module_name != "generic_config_updater.field_operation_validators" or "validator" not in method_name: + raise GenericConfigUpdaterError("Attempting to call invalid method {} in module {}. Module must be generic_config_updater.field_operation_validators, and method must be a defined validator".format(method_name, module_name)) + module = importlib.import_module(module_name, package=None) + method_to_call = getattr(module, method_name) + return method_to_call() + + if os.path.exists(GCU_FIELD_OP_CONF_FILE): + with open(GCU_FIELD_OP_CONF_FILE, "r") as s: + gcu_field_operation_conf = json.load(s) + else: + raise GenericConfigUpdaterError("GCU field operation validators config file not found") + + for element in patch: + path = element["path"] + match = re.search(r'\/([^\/]+)(\/|$)', path) # This matches the table name in the path, eg if path if /PFC_WD/GLOBAL, the match would be PFC_WD + if match is not None: + table = match.group(1) + else: + raise GenericConfigUpdaterError("Invalid jsonpatch path: {}".format(path)) + validating_functions= set() + tables = gcu_field_operation_conf["tables"] + validating_functions.update(tables.get(table, {}).get("field_operation_validators", [])) + + for function in validating_functions: + if not _invoke_validating_function(function): + raise IllegalPatchOperationError("Modification of {} table is illegal- validating function {} returned False".format(table, function)) + + def validate_lanes(self, config_db): if "PORT" not in config_db: return True, None diff --git a/generic_config_updater/services_validator.py b/generic_config_updater/services_validator.py index 44a9e095eb..497cb4ee74 100644 --- a/generic_config_updater/services_validator.py +++ b/generic_config_updater/services_validator.py @@ -101,3 +101,24 @@ def caclmgrd_validator(old_config, upd_config, keys): def ntp_validator(old_config, upd_config, keys): return _service_restart("ntp-config") + +def vlanintf_validator(old_config, upd_config, keys): + old_vlan_intf = old_config.get("VLAN_INTERFACE", {}) + upd_vlan_intf = upd_config.get("VLAN_INTERFACE", {}) + + # Get the tuple with format (iface, iface_ip) then check deleted tuple + # Example: + # old_keys = [("Vlan1000", "192.168.0.1")] + # upd_keys = [("Vlan1000", "192.168.0.2")] + old_keys = [ tuple(key.split("|")) + for key in old_vlan_intf if len(key.split("|")) == 2 ] + upd_keys = [ tuple(key.split("|")) + for key in upd_vlan_intf if len(key.split("|")) == 2 ] + + deleted_keys = list(set(old_keys) - set(upd_keys)) + for key in deleted_keys: + iface, iface_ip = key + rc = os.system(f"ip neigh flush dev {iface} {iface_ip}") + if rc: + return False + return True diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index 6c8ef21b6f..f1bc404d47 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -45,7 +45,7 @@ def __init__(self, namespace, socket=None): none-zero values. build: sequentially increase within a minor version domain. """ - self.CURRENT_VERSION = 'version_4_0_0' + self.CURRENT_VERSION = 'version_4_0_2' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' @@ -167,7 +167,7 @@ def migrate_mgmt_ports_on_s6100(self): self.appDB.set(self.appDB.APPL_DB, 'PORT_TABLE:PortConfigDone', 'count', str(total_count)) log.log_notice("Port count updated from {} to : {}".format(portCount, self.appDB.get(self.appDB.APPL_DB, 'PORT_TABLE:PortConfigDone', 'count'))) return True - + def migrate_intf_table(self): ''' Migrate all data from existing INTF table in APP DB during warmboot with IP Prefix @@ -265,7 +265,6 @@ def migrate_config_db_buffer_tables_for_dynamic_calculation(self, speed_list, ca @append_item_method - a function which is called to append an item to the list of pending commit items any update to buffer configuration will be pended and won't be applied until all configuration is checked and aligns with the default one - 1. Buffer profiles for lossless PGs in BUFFER_PROFILE table will be removed if their names have the convention of pg_lossless___profile where the speed and cable_length belongs speed_list and cable_len_list respectively @@ -349,7 +348,6 @@ def prepare_dynamic_buffer_for_warm_reboot(self, buffer_pools=None, buffer_profi ''' This is the very first warm reboot of buffermgrd (dynamic) if the system reboot from old image by warm-reboot In this case steps need to be taken to get buffermgrd prepared (for warm reboot) - During warm reboot, buffer tables should be installed in the first place. However, it isn't able to achieve that when system is warm-rebooted from an old image without dynamic buffer supported, because the buffer info wasn't in the APPL_DB in the old image. @@ -357,7 +355,6 @@ def prepare_dynamic_buffer_for_warm_reboot(self, buffer_pools=None, buffer_profi During warm-reboot, db_migrator adjusts buffer info in CONFIG_DB by removing some fields according to requirement from dynamic buffer calculation. The buffer info before that adjustment needs to be copied to APPL_DB. - 1. set WARM_RESTART_TABLE|buffermgrd as {restore_count: 0} 2. Copy the following tables from CONFIG_DB into APPL_DB in case of warm reboot The separator in fields that reference objects in other table needs to be updated from '|' to ':' @@ -367,7 +364,6 @@ def prepare_dynamic_buffer_for_warm_reboot(self, buffer_pools=None, buffer_profi - BUFFER_QUEUE, separator updated for field 'profile - BUFFER_PORT_INGRESS_PROFILE_LIST, separator updated for field 'profile_list' - BUFFER_PORT_EGRESS_PROFILE_LIST, separator updated for field 'profile_list' - ''' warmreboot_state = self.stateDB.get(self.stateDB.STATE_DB, 'WARM_RESTART_ENABLE_TABLE|system', 'enable') mmu_size = self.stateDB.get(self.stateDB.STATE_DB, 'BUFFER_MAX_PARAM_TABLE|global', 'mmu_size') @@ -572,13 +568,85 @@ def migrate_port_qos_map_global(self): dscp_to_tc_map_table_names = self.configDB.get_keys('DSCP_TO_TC_MAP') if len(dscp_to_tc_map_table_names) == 0: return - + qos_maps = self.configDB.get_table('PORT_QOS_MAP') if 'global' not in qos_maps.keys(): # We are unlikely to have more than 1 DSCP_TO_TC_MAP in previous versions self.configDB.set_entry('PORT_QOS_MAP', 'global', {"dscp_to_tc_map": dscp_to_tc_map_table_names[0]}) log.log_info("Created entry for global DSCP_TO_TC_MAP {}".format(dscp_to_tc_map_table_names[0])) + def migrate_feature_timer(self): + ''' + Migrate feature 'has_timer' field to 'delayed' + ''' + feature_table = self.configDB.get_table('FEATURE') + for feature, config in feature_table.items(): + state = config.get('has_timer') + if state is not None: + config['delayed'] = state + config.pop('has_timer') + self.configDB.set_entry('FEATURE', feature, config) + def migrate_route_table(self): + """ + Handle route table migration. Migrations handled: + 1. 'weight' attr in ROUTE object was introduced 202205 onwards. + Upgrade from older branch to 202205 will require this 'weight' attr to be added explicitly + """ + route_table = self.appDB.get_table("ROUTE_TABLE") + for route_prefix, route_attr in route_table.items(): + if 'weight' not in route_attr: + if type(route_prefix) == tuple: + # IPv6 route_prefix is returned from db as tuple + route_key = "ROUTE_TABLE:" + ":".join(route_prefix) + else: + # IPv4 route_prefix is returned from db as str + route_key = "ROUTE_TABLE:{}".format(route_prefix) + self.appDB.set(self.appDB.APPL_DB, route_key, 'weight','') + + def update_edgezone_aggregator_config(self): + """ + Update cable length configuration in ConfigDB for T0 neighbor interfaces + connected to EdgeZone Aggregator devices, while resetting the port values to trigger a buffer change + 1. Find a list of all interfaces connected to an EdgeZone Aggregator device. + 2. If all the cable lengths are the same, do nothing and return. + 3. If there are different cable lengths, update CABLE_LENGTH values for these interfaces with a constant value of 40m. + """ + device_neighbor_metadata = self.configDB.get_table("DEVICE_NEIGHBOR_METADATA") + device_neighbors = self.configDB.get_table("DEVICE_NEIGHBOR") + cable_length = self.configDB.get_table("CABLE_LENGTH") + port_table = self.configDB.get_table("PORT") + edgezone_aggregator_devs = [] + edgezone_aggregator_intfs = [] + EDGEZONE_AGG_CABLE_LENGTH = "40m" + for k, v in device_neighbor_metadata.items(): + if v.get("type") == "EdgeZoneAggregator": + edgezone_aggregator_devs.append(k) + + if len(edgezone_aggregator_devs) == 0: + return + + for intf, intf_info in device_neighbors.items(): + if intf_info.get("name") in edgezone_aggregator_devs: + edgezone_aggregator_intfs.append(intf) + + cable_length_table = self.configDB.get_entry("CABLE_LENGTH", "AZURE") + first_cable_intf = next(iter(cable_length_table)) + first_cable_length = cable_length_table[first_cable_intf] + index = 0 + + for intf, length in cable_length_table.items(): + index += 1 + if first_cable_length != length: + break + elif index == len(cable_length_table): + # All cable lengths are the same, nothing to modify + return + + for intf, length in cable_length_table.items(): + if intf in edgezone_aggregator_intfs: + # Set new cable length values + self.configDB.set(self.configDB.CONFIG_DB, "CABLE_LENGTH|AZURE", intf, EDGEZONE_AGG_CABLE_LENGTH) + def version_unknown(self): """ version_unknown tracks all SONiC versions that doesn't have a version @@ -740,7 +808,7 @@ def version_2_0_1(self): def version_2_0_2(self): """ Version 2_0_2 - This is the latest version for 202012 branch + This is the latest version for 202012 branch """ log.log_info('Handling version_2_0_2') self.set_version('version_3_0_0') @@ -822,14 +890,18 @@ def version_3_0_5(self): keys = self.loglevelDB.keys(self.loglevelDB.LOGLEVEL_DB, "*") if keys is not None: for key in keys: - if key != "JINJA2_CACHE": - fvs = self.loglevelDB.get_all(self.loglevelDB.LOGLEVEL_DB, key) - component = key.split(":")[1] - loglevel = fvs[loglevel_field] - logoutput = fvs[logoutput_field] - self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, component), loglevel_field, loglevel) - self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, component), logoutput_field, logoutput) - self.loglevelDB.delete(self.loglevelDB.LOGLEVEL_DB, key) + try: + if key != "JINJA2_CACHE": + fvs = self.loglevelDB.get_all(self.loglevelDB.LOGLEVEL_DB, key) + component = key.split(":")[1] + loglevel = fvs[loglevel_field] + logoutput = fvs[logoutput_field] + self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, component), loglevel_field, loglevel) + self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, component), logoutput_field, logoutput) + except Exception as err: + log.log_warning('Error occured during LOGLEVEL_DB migration for {}. Ignoring key {}'.format(err, key)) + finally: + self.loglevelDB.delete(self.loglevelDB.LOGLEVEL_DB, key) self.set_version('version_3_0_6') return 'version_3_0_6' @@ -846,9 +918,36 @@ def version_3_0_6(self): def version_4_0_0(self): """ Version 4_0_0. - This is the latest version for master branch """ log.log_info('Handling version_4_0_0') + # Update state-db fast-reboot entry to enable if set to enable fast-reboot finalizer when using upgrade with fast-reboot + # since upgrading from previous version FAST_REBOOT table will be deleted when the timer will expire. + # reading FAST_REBOOT table can't be done with stateDB.get as it uses hget behind the scenes and the table structure is + # not using hash and won't work. + # FAST_REBOOT table exists only if fast-reboot was triggered. + keys = self.stateDB.keys(self.stateDB.STATE_DB, "FAST_REBOOT|system") + if keys: + enable_state = 'true' + else: + enable_state = 'false' + self.stateDB.set(self.stateDB.STATE_DB, 'FAST_RESTART_ENABLE_TABLE|system', 'enable', enable_state) + self.set_version('version_4_0_1') + return 'version_4_0_1' + + def version_4_0_1(self): + """ + Version 4_0_1. + """ + self.migrate_feature_timer() + self.set_version('version_4_0_2') + return 'version_4_0_2' + + def version_4_0_2(self): + """ + Version 4_0_2. + This is the latest version for master branch + """ + log.log_info('Handling version_4_0_2') return None def get_version(self): @@ -890,11 +989,16 @@ def common_migration_ops(self): # removed together with calling to migrate_copp_table function. if self.asic_type != "mellanox": self.migrate_copp_table() - if self.asic_type == "broadcom" and 'Force10-S6100' in self.hwsku: + if self.asic_type == "broadcom" and 'Force10-S6100' in self.hwsku: self.migrate_mgmt_ports_on_s6100() else: log.log_notice("Asic Type: {}, Hwsku: {}".format(self.asic_type, self.hwsku)) + self.migrate_route_table() + + # Updating edgezone aggregator cable length config for T0 devices + self.update_edgezone_aggregator_config() + def migrate(self): version = self.get_version() log.log_info('Upgrading from version ' + version) diff --git a/scripts/fabricstat b/scripts/fabricstat index e5c7d09f3b..fcc0983ade 100755 --- a/scripts/fabricstat +++ b/scripts/fabricstat @@ -14,7 +14,7 @@ import utilities_common.multi_asic as multi_asic_util # mock the redis for unit test purposes # try: - if os.environ["UTILITIES_UNIT_TESTING"] == "2": + if os.environ["UTILITIES_UNIT_TESTING"] == "1" or os.environ["UTILITIES_UNIT_TESTING"] == "2": modules_path = os.path.join(os.path.dirname(__file__), "..") tests_path = os.path.join(modules_path, "tests") sys.path.insert(0, modules_path) @@ -122,17 +122,21 @@ class FabricPortStat(FabricStat): table = [] header = None - asic = multi_asic.get_asic_id_from_name(self.namespace) + # Default ASIC name is 0 for single-ASIC systems. For multi-ASIC systems, + # derive name from namespace. + asic_name = '0' + if self.namespace: + asic_name = multi_asic.get_asic_id_from_name(self.namespace) for key, data in cnstat_dict.items(): port_id = key[len(PORT_NAME_PREFIX):] if errors_only: header = portstat_header_errors_only - table.append((asic, port_id, self.get_port_state(key), + table.append((asic_name, port_id, self.get_port_state(key), data.crc, data.fec_correctable, data.fec_uncorrectable, data.symbol_err)) else: header = portstat_header_all - table.append((asic, port_id, self.get_port_state(key), + table.append((asic_name, port_id, self.get_port_state(key), data.in_cell, data.in_octet, data.out_cell, data.out_octet, data.crc, data.fec_correctable, data.fec_uncorrectable, data.symbol_err)) @@ -168,11 +172,15 @@ class FabricQueueStat(FabricStat): return table = [] - asic = multi_asic.get_asic_id_from_name(self.namespace) + # Default ASIC name is 0 for single-ASIC systems. For multi-ASIC systems, + # derive name from namespace. + asic_name = '0' + if self.namespace: + asic_name = multi_asic.get_asic_id_from_name(self.namespace) for key, data in cnstat_dict.items(): port_name, queue_id = key.split(':') port_id = port_name[len(PORT_NAME_PREFIX):] - table.append((asic, port_id, self.get_port_state(port_name), queue_id, + table.append((asic_name, port_id, self.get_port_state(port_name), queue_id, data.curbyte, data.curlevel, data.watermarklevel)) print(tabulate(table, queuestat_header, tablefmt='simple', stralign='right')) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index bfdc191b78..eea97e792b 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -23,6 +23,7 @@ PLATFORM=$(sonic-cfggen -H -v DEVICE_METADATA.localhost.platform) PLATFORM_PLUGIN="${REBOOT_TYPE}_plugin" LOG_SSD_HEALTH="/usr/local/bin/log_ssd_health" PLATFORM_FWUTIL_AU_REBOOT_HANDLE="platform_fw_au_reboot_handle" +PLATFORM_REBOOT_PRE_CHECK="platform_reboot_pre_check" SSD_FW_UPDATE="ssd-fw-upgrade" SSD_FW_UPDATE_BOOT_OPTION=no TAG_LATEST=yes @@ -148,7 +149,7 @@ function clear_boot() #clear_fast_boot if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then - sonic-db-cli STATE_DB DEL "FAST_REBOOT|system" &>/dev/null || /bin/true + sonic-db-cli STATE_DB HSET "FAST_RESTART_ENABLE_TABLE|system" "enable" "false" &>/dev/null || /bin/true fi } @@ -179,6 +180,10 @@ function initialize_pre_shutdown() function request_pre_shutdown() { + if [ -x ${DEVPATH}/${PLATFORM}/${PLATFORM_REBOOT_PRE_CHECK} ]; then + debug "Requesting platform reboot pre-check ..." + ${DEVPATH}/${PLATFORM}/${PLATFORM_REBOOT_PRE_CHECK} ${REBOOT_TYPE} + fi debug "Requesting pre-shutdown ..." STATE=$(timeout 5s docker exec syncd /usr/bin/syncd_request_shutdown --pre &> /dev/null; if [[ $? == 124 ]]; then echo "timed out"; fi) if [[ x"${STATE}" == x"timed out" ]]; then @@ -265,7 +270,7 @@ function backup_database() and not string.match(k, 'WARM_RESTART_ENABLE_TABLE|') \ and not string.match(k, 'VXLAN_TUNNEL_TABLE|') \ and not string.match(k, 'BUFFER_MAX_PARAM_TABLE|') \ - and not string.match(k, 'FAST_REBOOT|') then + and not string.match(k, 'FAST_RESTART_ENABLE_TABLE|') then redis.call('del', k) end end @@ -442,9 +447,20 @@ function load_aboot_secureboot_kernel() { swipath=$next_image kexec=true loadonly=true ENV_EXTRA_CMDLINE="$BOOT_OPTIONS" bash - } +function invoke_kexec() { + /sbin/kexec -l "$KERNEL_IMAGE" --initrd="$INITRD" --append="$BOOT_OPTIONS" $@ +} + function load_kernel() { # Load kernel into the memory - /sbin/kexec -a -l "$KERNEL_IMAGE" --initrd="$INITRD" --append="$BOOT_OPTIONS" + invoke_kexec -a +} + +function load_kernel_secure() { + # Load kernel into the memory secure + # -s flag is for enforcing the new load kernel(vmlinuz) to be signed and verify. + # not using -a flag, this flag can fallback to an old kexec load that do not support Secure Boot verification + invoke_kexec -s } function unload_kernel() @@ -511,6 +527,7 @@ then exit "${EXIT_FAILURE}" fi +debug "Starting $REBOOT_TYPE" # re-run the script in background mode with detaching from the terminal session if [[ x"${DETACH}" == x"yes" && x"${ALREADY_DETACHED}" == x"" ]]; then @@ -532,7 +549,8 @@ case "$REBOOT_TYPE" in check_warm_restart_in_progress BOOT_TYPE_ARG=$REBOOT_TYPE trap clear_boot EXIT HUP INT QUIT TERM KILL ABRT ALRM - sonic-db-cli STATE_DB SET "FAST_REBOOT|system" "1" "EX" "210" &>/dev/null + sonic-db-cli STATE_DB HSET "FAST_RESTART_ENABLE_TABLE|system" "enable" "true" &>/dev/null + config warm_restart teamsyncd_timer 1 config warm_restart enable system ;; "warm-reboot") @@ -601,7 +619,16 @@ fi if is_secureboot && grep -q aboot_machine= /host/machine.conf; then load_aboot_secureboot_kernel else - load_kernel + # check if secure boot is enable in UEFI + CHECK_SECURE_UPGRADE_ENABLED=0 + SECURE_UPGRADE_ENABLED=$(bootctl status 2>/dev/null | grep -c "Secure Boot: enabled") || CHECK_SECURE_UPGRADE_ENABLED=$? + if [[ CHECK_SECURE_UPGRADE_ENABLED -ne 0 ]]; then + debug "Loading kernel without secure boot" + load_kernel + else + debug "Loading kernel with secure boot" + load_kernel_secure + fi fi init_warm_reboot_states @@ -641,14 +668,15 @@ fi set +e if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then - # Clear all routes except of default routes for faster reconciliation time. - sonic-db-cli APPL_DB eval " - for _, k in ipairs(redis.call('keys', '*')) do - if string.match(k, 'ROUTE_TABLE:') and not string.match(k, 'ROUTE_TABLE:0.0.0.0/0') and not string.match(k, 'ROUTE_TABLE:::/0') then \ - redis.call('del', k) - end - end - " 0 > /dev/null + # Clear all routes except of default and connected routes for faster reconciliation time. + debug "Clearing routes..." + FILTER_ROUTES=0 + python /usr/local/bin/fast-reboot-filter-routes.py || FILTER_ROUTES=$? + if [[ FILTER_ROUTES -ne 0 ]]; then + error "Preserving connected and default routes failed." + else + debug "Routes deleted from APP-DB, default and connected routes preserved." + fi fi # disable trap-handlers which were set before @@ -788,6 +816,17 @@ fi # Reboot: explicitly call Linux native reboot under sbin debug "Rebooting with ${REBOOT_METHOD} to ${NEXT_SONIC_IMAGE} ..." + +LOGS_ON_TMPFS=0 +df --output=fstype /var/log* | grep -c 'tmpfs' || LOGS_ON_TMPFS=$? +if [[ LOGS_ON_TMPFS -eq 0 ]]; then + debug "Backup shutdown logs to /host/logs_before_reboot" + mkdir -p /host/logs_before_reboot || /bin/true + # maxdepth 2: find files within 2 nested directories (eg. /var/log/ and /var/log/swss/) + # mmin 30: find files written in past 30 minutes + find /var/log -maxdepth 2 -mmin -30 -type f | xargs -I {} cp {} /host/logs_before_reboot/ || /bin/true +fi + exec ${REBOOT_METHOD} # Should never reach here diff --git a/scripts/fast-reboot-filter-routes.py b/scripts/fast-reboot-filter-routes.py new file mode 100755 index 0000000000..9328b79ed2 --- /dev/null +++ b/scripts/fast-reboot-filter-routes.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 + +import json +import sys +import os +import utilities_common.cli as clicommon +import syslog +import traceback +import click +from swsscommon.swsscommon import ConfigDBConnector + +ROUTE_IDX = 1 + +def get_connected_routes(): + cmd = 'sudo vtysh -c "show ip route connected json"' + connected_routes = [] + try: + output, ret = clicommon.run_command(cmd, return_cmd=True) + if ret != 0: + click.echo(output.rstrip('\n')) + sys.exit(ret) + if output is not None: + route_info = json.loads(output) + for route in route_info.keys(): + connected_routes.append(route) + except Exception: + ctx = click.get_current_context() + ctx.fail("Unable to get connected routes from bgp") + + return connected_routes + +def get_route(db, route): + key = 'ROUTE_TABLE:%s' % route + val = db.keys(db.APPL_DB, key) + if val: + return val[0].split(":", 1)[ROUTE_IDX] + else: + return None + +def generate_default_route_entries(): + db = ConfigDBConnector() + db.db_connect(db.APPL_DB) + + default_routes = [] + + ipv4_default = get_route(db, '0.0.0.0/0') + if ipv4_default is not None: + default_routes.append(ipv4_default) + + ipv6_default = get_route(db, '::/0') + if ipv6_default is not None: + default_routes.append(ipv6_default) + + return default_routes + +def filter_routes(preserved_routes): + db = ConfigDBConnector() + db.db_connect(db.APPL_DB) + + key = 'ROUTE_TABLE:*' + routes = db.keys(db.APPL_DB, key) + + for route in routes: + stripped_route = route.split(":", 1)[ROUTE_IDX] + if stripped_route not in preserved_routes: + db.delete(db.APPL_DB, route) + +def main(): + default_routes = generate_default_route_entries() + connected_routes = get_connected_routes() + preserved_routes = set(default_routes + connected_routes) + filter_routes(preserved_routes) + return 0 + +if __name__ == '__main__': + res = 0 + try: + syslog.openlog('fast-reboot-filter-routes') + res = main() + except KeyboardInterrupt: + syslog.syslog(syslog.LOG_NOTICE, "SIGINT received. Quitting") + res = 1 + except Exception as e: + syslog.syslog(syslog.LOG_ERR, "Got an exception %s: Traceback: %s" % (str(e), traceback.format_exc())) + res = 2 + finally: + syslog.closelog() + try: + sys.exit(res) + except SystemExit: + os._exit(res) diff --git a/scripts/generate_dump b/scripts/generate_dump index 7587e9fa3a..74ceede065 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -106,7 +106,6 @@ save_bcmcmd() { local filename=$2 local filepath="${LOGDIR}/$filename" local do_gzip=${3:-false} - local tarpath="${BASE}/dump/$filename" local timeout_cmd="timeout --foreground ${TIMEOUT_MIN}m" local cmd=$(escape_quotes "$cmd") if [ ! -d $LOGDIR ]; then @@ -141,12 +140,9 @@ save_bcmcmd() { fi if $do_gzip; then gzip ${filepath} 2>/dev/null - tarpath="${tarpath}.gz" filepath="${filepath}.gz" fi - ($TAR $V -rhf $TARFILE -C $DUMPDIR "$tarpath" \ - || abort "${EXT_TAR_FAILED}" "tar append operation failed. Aborting to prevent data loss.") \ - && $RM $V -rf "$filepath" + end_t=$(date +%s%3N) echo "[ save_bcmcmd:$cmd ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO } @@ -180,7 +176,7 @@ save_bcmcmd_all_ns() { } ############################################################################### -# Runs a comamnd and saves its output to the incrementally built tar. +# Runs a comamnd and saves its output to the file. # Command gets timedout if it runs for more than TIMEOUT_MIN minutes. # Globals: # LOGDIR @@ -208,7 +204,6 @@ save_cmd() { local filename=$2 local filepath="${LOGDIR}/$filename" local do_gzip=${3:-false} - local tarpath="${BASE}/dump/$filename" local timeout_cmd="timeout --foreground ${TIMEOUT_MIN}m" local cleanup_method=${4:-dummy_cleanup_method} local redirect='&>' @@ -230,7 +225,6 @@ save_cmd() { # as one argument, e.g. vtysh -c "COMMAND HERE" needs to have # "COMMAND HERE" bunched together as 1 arg to vtysh -c if $do_gzip; then - tarpath="${tarpath}.gz" filepath="${filepath}.gz" # cleanup_method will run in a sub-shell, need declare it first local cmds="$cleanup_method_declration; $cmd $redirect_eval | $cleanup_method | gzip -c > '${filepath}'" @@ -260,13 +254,34 @@ save_cmd() { fi fi - ($TAR $V -rhf $TARFILE -C $DUMPDIR "$tarpath" \ - || abort "${EXT_TAR_FAILED}" "tar append operation failed. Aborting to prevent data loss.") \ - && $RM $V -rf "$filepath" end_t=$(date +%s%3N) echo "[ save_cmd:$cmd ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO } +############################################################################### +# Save all collected data to tar archive. +# Globals: +# DUMPDIR +# TAR +# TARFILE +# V +# BASE +# Arguments: +# None +# Returns: +# None +############################################################################### +save_to_tar() { + trap 'handle_error $? $LINENO' ERR + local start_t=$(date +%s%3N) + local end_t=0 + + $TAR $V -rhf $TARFILE -C $DUMPDIR "$BASE" + + end_t=$(date +%s%3N) + echo "[ save_to_tar ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO +} + ############################################################################### # Dummy cleanup method. # Globals: @@ -407,7 +422,7 @@ get_vtysh_namespace() { ############################################################################### # Runs a vtysh command in all namesapces for a multi ASIC platform, and in # default (host) namespace in single ASIC platforms. Saves its output to the -# incrementally built tar. +# file. # Globals: # None # Arguments: @@ -437,7 +452,7 @@ save_vtysh() { } ############################################################################### -# Runs an ip command and saves its output to the incrementally built tar. +# Runs an ip command and saves its output to the file. # Globals: # None # Arguments: @@ -456,7 +471,7 @@ save_ip() { } ############################################################################### -# Runs a bridge command and saves its output to the incrementally built tar. +# Runs a bridge command and saves its output to the file. # Globals: # None # Arguments: @@ -506,12 +521,12 @@ save_bgp_neighbor() { local asic_id=${1:-""} local ns=$(get_vtysh_namespace $asic_id) - neighbor_list_v4=$(${timeout_cmd} bash -c "vtysh $ns -c 'show ip bgp neighbors' | grep 'BGP neighbor is' | awk -F '[, ]' '{print \$4}'") + neighbor_list_v4=$(${timeout_cmd} bash -c "vtysh $ns -c 'show ip bgp neighbors' | grep 'BGP neighbor is' | awk -F '[, ]' '{print \$4}' | awk /\\\./") for word in $neighbor_list_v4; do save_cmd "vtysh $ns -c \"show ip bgp neighbors $word advertised-routes\"" "ip.bgp.neighbor.$word.adv$asic_id" save_cmd "vtysh $ns -c \"show ip bgp neighbors $word routes\"" "ip.bgp.neighbor.$word.rcv$asic_id" done - neighbor_list_v6=$(${timeout_cmd} bash -c "vtysh $ns -c 'show bgp ipv6 neighbors' | grep 'BGP neighbor is' | awk -F '[, ]' '{print \$4}' | fgrep ':'") + neighbor_list_v6=$(${timeout_cmd} bash -c "vtysh $ns -c 'show bgp ipv6 neighbors' | grep 'BGP neighbor is' | awk -F '[, ]' '{print \$4}' | awk /:/") for word in $neighbor_list_v6; do save_cmd "vtysh $ns -c \"show bgp ipv6 neighbors $word advertised-routes\"" "ipv6.bgp.neighbor.$word.adv$asic_id" save_cmd "vtysh $ns -c \"show bgp ipv6 neighbors $word routes\"" "ipv6.bgp.neighbor.$word.rcv$asic_id" @@ -771,8 +786,8 @@ save_proc() { ( [ -e $f ] && $CP $V -r $f $TARDIR/proc ) || echo "$f not found" > $TARDIR/$f fi done - $TAR $V -rhf $TARFILE -C $DUMPDIR --mode=+rw $BASE/proc - $RM $V -rf $TARDIR/proc + + chmod ugo+rw -R $DUMPDIR/$BASE/proc } ############################################################################### @@ -823,9 +838,7 @@ save_proc_stats() { ( $CP $V -r $stats_file $TARDIR/proc_stats ) || echo "$stats_file error" > $TARDIR/$stats_file fi - $TAR $V -rhf $TARFILE -C $DUMPDIR --mode=+rw $BASE/proc_stats - $RM $V -rf $TARDIR/proc_stats - $RM -rf $stats_file + chmod ugo+rw -R $DUMPDIR/$BASE/proc_stats } ############################################################################### @@ -907,6 +920,7 @@ save_platform_info() { # filename: the full path of the file to save # base_dir: the directory in $TARDIR/ to stage the file # do_gzip: (OPTIONAL) true or false. Should the output be gzipped +# do_tar_append: (OPTIONAL) true or false. Should the output be added to final tar archive # Returns: # None ############################################################################### @@ -919,7 +933,7 @@ save_file() { local gz_path="$TARDIR/$supp_dir/$(basename $orig_path)" local tar_path="${BASE}/$supp_dir/$(basename $orig_path)" local do_gzip=${3:-true} - local do_tar_append=${4:-true} + local do_tar_append=${4:-false} if [ ! -d "$TARDIR/$supp_dir" ]; then $MKDIR $V -p "$TARDIR/$supp_dir" fi @@ -945,6 +959,7 @@ save_file() { || abort "${EXT_PROCFS_SAVE_FAILED}" "tar append operation failed. Aborting to prevent data loss.") \ && $RM $V -f "$gz_path" fi + end_t=$(date +%s%3N) echo "[ save_file:$orig_path] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO } @@ -1053,21 +1068,26 @@ collect_mellanox() { local sai_dump_folder="/tmp/saisdkdump" local sai_dump_filename="${sai_dump_folder}/sai_sdk_dump_$(date +"%m_%d_%Y_%I_%M_%p")" - ${CMD_PREFIX}docker exec syncd mkdir -p $sai_dump_folder - ${CMD_PREFIX}docker exec syncd saisdkdump -f $sai_dump_filename + if [[ "$( docker container inspect -f '{{.State.Running}}' syncd )" == "true" ]]; then + if [[ x"$(sonic-db-cli APPL_DB EXISTS PORT_TABLE:PortInitDone)" == x"1" ]]; then + # Run saisdkdump only after the create_switch is known to be successful + ${CMD_PREFIX}docker exec syncd mkdir -p $sai_dump_folder + ${CMD_PREFIX}docker exec syncd saisdkdump -f $sai_dump_filename - if [ $? != 0 ]; then - echo "Failed to collect saisdkdump." - fi + if [ $? != 0 ]; then + echo "Failed to collect saisdkdump." + fi - copy_from_docker syncd $sai_dump_folder $sai_dump_folder - echo "$sai_dump_folder" - for file in `ls $sai_dump_folder`; do - save_file ${sai_dump_folder}/${file} sai_sdk_dump true - done + copy_from_docker syncd $sai_dump_folder $sai_dump_folder + echo "$sai_dump_folder" + for file in `ls $sai_dump_folder`; do + save_file ${sai_dump_folder}/${file} sai_sdk_dump true + done - ${CMD_PREFIX}rm -rf $sai_dump_folder - ${CMD_PREFIX}docker exec syncd rm -rf $sai_dump_folder + ${CMD_PREFIX}rm -rf $sai_dump_folder + ${CMD_PREFIX}docker exec syncd rm -rf $sai_dump_folder + fi + fi # run 'hw-management-generate-dump.sh' script and save the result file HW_DUMP_FILE=/usr/bin/hw-management-generate-dump.sh @@ -1129,9 +1149,9 @@ collect_mellanox_dfw_dumps() { ${CMD_PREFIX}save_symlink ${file} sai_sdk_dump log else if [ ! -z "${file##*.gz}" ]; then - ${CMD_PREFIX}save_file ${file} sai_sdk_dump true + ${CMD_PREFIX}save_file ${file} sai_sdk_dump true true else - ${CMD_PREFIX}save_file ${file} sai_sdk_dump false + ${CMD_PREFIX}save_file ${file} sai_sdk_dump false true fi fi done @@ -1206,39 +1226,47 @@ collect_broadcom() { fi if [ "$bcm_family" == "broadcom-dnx" ]; then - save_bcmcmd_all_ns "\"l2 show\"" "l2.summary" - save_bcmcmd_all_ns "\"field group list\"" "fpgroup.list.summary" - total_fp_groups=34 - for (( fp_grp=0; fp_grp<$total_fp_groups; fp_grp++ )) - do - save_bcmcmd_all_ns "\"field group info group=$fp_grp\"" "fpgroup$fp_grp.info.summary" - done - save_bcmcmd_all_ns "\"dbal table dump table=IPV4_UNICAST_PRIVATE_LPM_FORWARD\"" "l3.ipv4.lpm.summary" - save_bcmcmd_all_ns "\"dbal table dump table=IPV6_UNICAST_PRIVATE_LPM_FORWARD\"" "l3.ipv6.lpm.summary" - save_bcmcmd_all_ns "\"dbal table dump table=IPV4_UNICAST_PRIVATE_HOST\"" "l3.ipv4.host.summary" - save_bcmcmd_all_ns "\"dbal table dump table=IPV6_UNICAST_PRIVATE_HOST\"" "l3.ipv6.host.summary" - save_bcmcmd_all_ns "\"dbal table dump table=SUPER_FEC_1ST_HIERARCHY\"" "l3.egress.fec.summary" - save_bcmcmd_all_ns "\"dbal table dump table=ECMP_TABLE\"" "ecmp.table.summary" - save_bcmcmd_all_ns "\"dbal table dump table=ECMP_GROUP_PROFILE_TABLE\"" "ecmp.group.summary" - save_bcmcmd_all_ns "\"dbal table dump table=ING_VSI_INFO_DB\"" "ing.vsi.summary" - save_bcmcmd_all_ns "\"dbal table dump table=L3_MY_MAC_DA_PREFIXES\"" "l3.mymac.summary" - save_bcmcmd_all_ns "\"dbal table dump table=INGRESS_VLAN_MEMBERSHIP\"" "ing.vlan.summary" - save_bcmcmd_all_ns "\"dbal table dump table=LOCAL_SBC_IN_LIF_MATCH_INFO_SW\"" "sbc.inlif.summary" - save_bcmcmd_all_ns "\"dbal table dump table=SNIF_COMMAND_TABLE\"" "snif.command.summary" - save_bcmcmd_all_ns "\"port mgmt dump full\"" "port.mgmt.summary" - save_bcmcmd_all_ns "\"tm lag\"" "tm.lag.summary" - save_bcmcmd_all_ns "\"pp info fec\"" "pp.fec.summary" - save_bcmcmd_all_ns "\"nif sts\"" "nif.sts.summary" + supervisor=0 + PLATFORM_ENV_CONF=/usr/share/sonic/device/${platform}/platform_env.conf + if [ -f "$PLATFORM_ENV_CONF" ]; then + source $PLATFORM_ENV_CONF + fi + if [[ x"$supervisor" != x"1" ]]; then + + save_bcmcmd_all_ns "\"l2 show\"" "l2.summary" + save_bcmcmd_all_ns "\"field group list\"" "fpgroup.list.summary" + total_fp_groups=34 + for (( fp_grp=0; fp_grp<$total_fp_groups; fp_grp++ )) + do + save_bcmcmd_all_ns "\"field group info group=$fp_grp\"" "fpgroup$fp_grp.info.summary" + done + save_bcmcmd_all_ns "\"dbal table dump table=IPV4_UNICAST_PRIVATE_LPM_FORWARD\"" "l3.ipv4.lpm.summary" + save_bcmcmd_all_ns "\"dbal table dump table=IPV6_UNICAST_PRIVATE_LPM_FORWARD\"" "l3.ipv6.lpm.summary" + save_bcmcmd_all_ns "\"dbal table dump table=IPV4_UNICAST_PRIVATE_HOST\"" "l3.ipv4.host.summary" + save_bcmcmd_all_ns "\"dbal table dump table=IPV6_UNICAST_PRIVATE_HOST\"" "l3.ipv6.host.summary" + save_bcmcmd_all_ns "\"dbal table dump table=SUPER_FEC_1ST_HIERARCHY\"" "l3.egress.fec.summary" + save_bcmcmd_all_ns "\"dbal table dump table=ECMP_TABLE\"" "ecmp.table.summary" + save_bcmcmd_all_ns "\"dbal table dump table=ECMP_GROUP_PROFILE_TABLE\"" "ecmp.group.summary" + save_bcmcmd_all_ns "\"dbal table dump table=ING_VSI_INFO_DB\"" "ing.vsi.summary" + save_bcmcmd_all_ns "\"dbal table dump table=L3_MY_MAC_DA_PREFIXES\"" "l3.mymac.summary" + save_bcmcmd_all_ns "\"dbal table dump table=INGRESS_VLAN_MEMBERSHIP\"" "ing.vlan.summary" + save_bcmcmd_all_ns "\"dbal table dump table=LOCAL_SBC_IN_LIF_MATCH_INFO_SW\"" "sbc.inlif.summary" + save_bcmcmd_all_ns "\"dbal table dump table=SNIF_COMMAND_TABLE\"" "snif.command.summary" + save_bcmcmd_all_ns "\"port mgmt dump full\"" "port.mgmt.summary" + save_bcmcmd_all_ns "\"tm lag\"" "tm.lag.summary" + save_bcmcmd_all_ns "\"pp info fec\"" "pp.fec.summary" + save_bcmcmd_all_ns "\"nif sts\"" "nif.sts.summary" + save_bcmcmd_all_ns "\"tm ing q map\"" "tm.ingress.qmap.summary" + save_bcmcmd_all_ns "\"tm ing vsq resources\"" "tm.ing.vsq.res.summary" + for group in {a..f} + do + save_bcmcmd_all_ns "\"tm ing vsq non g=$group\"" "tm.ing.vsq.non.group-$group.summary" + done + fi save_bcmcmd_all_ns "\"port pm info\"" "port.pm.summary" save_bcmcmd_all_ns "\"conf show\"" "conf.show.summary" save_bcmcmd_all_ns "\"show counters\"" "show.counters.summary" save_bcmcmd_all_ns "\"diag counter g\"" "diag.counter.summary" - save_bcmcmd_all_ns "\"tm ing q map\"" "tm.ingress.qmap.summary" - save_bcmcmd_all_ns "\"tm ing vsq resources\"" "tm.ing.vsq.res.summary" - for group in {a..f} - do - save_bcmcmd_all_ns "\"tm ing vsq non g=$group\"" "tm.ing.vsq.non.group-$group.summary" - done save_bcmcmd_all_ns "\"fabric connectivity\"" "fabric.connect.summary" save_bcmcmd_all_ns "\"port status\"" "port.status.summary" else @@ -1291,7 +1319,7 @@ collect_barefoot() { done for file in $(find /tmp/bf_logs -type f); do - save_file "${file}" log true true + save_file "${file}" log true done } @@ -1347,16 +1375,12 @@ save_log_files() { # don't gzip already-gzipped log files :) # do not append the individual files to the main tarball if [ -z "${file##*.gz}" ]; then - save_file $file log false false + save_file $file log false else - save_file $file log true false + save_file $file log true fi done - # Append the log folder to the main tarball - ($TAR $V -rhf $TARFILE -C $DUMPDIR ${BASE}/log \ - || abort "${EXT_TAR_FAILED}" "tar append operation failed. Aborting for safety") \ - && $RM $V -rf $TARDIR/log end_t=$(date +%s%3N) echo "[ TAR /var/log Files ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO @@ -1381,11 +1405,7 @@ save_warmboot_files() { else mkdir -p $TARDIR $CP $V -rf /host/warmboot $TARDIR - - ($TAR $V --warning=no-file-removed -rhf $TARFILE -C $DUMPDIR --mode=+rw \ - $BASE/warmboot \ - || abort "${EXT_TAR_FAILED}" "Tar append operation failed. Aborting for safety.") \ - && $RM $V -rf $TARDIR + chmod ugo+rw -R $DUMPDIR/$BASE/warmboot fi end_t=$(date +%s%3N) echo "[ Warm-boot Files ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO @@ -1429,6 +1449,38 @@ save_crash_files() { fi } +############################################################################### +# Collect SAI failure dump files under /var/log/sai_failure_dump/. These files are +# created because of the orchagent abort triggered by SAI programming failure +# Globals: +# None +# Arguments: +# None +# Returns: +# None +############################################################################### +save_sai_failure_dump(){ + for file in $(find_files "/var/log/sai_failure_dump/"); do + if $TAR -tf $TARFILE | grep $BASE/log/$(basename $file); then + # if the files are already collected under the log/ dir + # just add a symbolic link + if [ ! -z "${file##*.gz}" ]; then + # files saved under log/ are zipped with gz + file=$file.gz + fi + ${CMD_PREFIX}save_symlink ${file} sai_failure_dump log + else + if [ ! -z "${file##*.gz}" ]; then + ${CMD_PREFIX}save_file ${file} sai_failure_dump true true + else + ${CMD_PREFIX}save_file ${file} sai_failure_dump false true + fi + fi + #Clean up the file once its part of tech support + rm -f $file + done +} + ############################################################################### # Get number of ASICs in the platform # Globals: @@ -1547,103 +1599,124 @@ main() { /proc/pagetypeinfo /proc/partitions /proc/sched_debug /proc/slabinfo \ /proc/softirqs /proc/stat /proc/swaps /proc/sysvipc /proc/timer_list \ /proc/uptime /proc/version /proc/vmallocinfo /proc/vmstat \ - /proc/zoneinfo \ - || abort "${EXT_PROCFS_SAVE_FAILED}" "Proc saving operation failed. Aborting for safety." - save_proc_stats + /proc/zoneinfo & + save_proc_stats & end_t=$(date +%s%3N) echo "[ Capture Proc State ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO + wait # Save all the processes within each docker - save_cmd "show services" services.summary + save_cmd "show services" services.summary & # Save reboot cause information - save_cmd "show reboot-cause" reboot.cause + save_cmd "show reboot-cause" reboot.cause & + wait local asic="$(/usr/local/bin/sonic-cfggen -y /etc/sonic/sonic_version.yml -v asic_type)" + local device_type=`sonic-db-cli CONFIG_DB hget 'DEVICE_METADATA|localhost' type` # 1st counter snapshot early. Need 2 snapshots to make sense of counters trend. save_counter_snapshot $asic 1 - save_cmd "systemd-analyze blame" "systemd.analyze.blame" - save_cmd "systemd-analyze dump" "systemd.analyze.dump" - save_cmd "systemd-analyze plot" "systemd.analyze.plot.svg" - - save_platform_info - - save_cmd "show vlan brief" "vlan.summary" - save_cmd "show version" "version" - save_cmd "show platform summary" "platform.summary" - save_cmd "cat /host/machine.conf" "machine.conf" - save_cmd "cat /boot/config-$(uname -r)" "boot.conf" - save_cmd "docker stats --no-stream" "docker.stats" - - save_cmd "sensors" "sensors" - save_cmd "lspci -vvv -xx" "lspci" - save_cmd "lsusb -v" "lsusb" - save_cmd "sysctl -a" "sysctl" - - save_ip_info - save_bridge_info - - save_frr_info - save_bgp_info - save_evpn_info - - save_cmd "show interface status -d all" "interface.status" - save_cmd "show interface transceiver presence" "interface.xcvrs.presence" - save_cmd "show interface transceiver eeprom --dom" "interface.xcvrs.eeprom" - save_cmd "show ip interface -d all" "ip.interface" - - save_cmd "lldpctl" "lldpctl" + save_cmd "systemd-analyze blame" "systemd.analyze.blame" & + save_cmd "systemd-analyze dump" "systemd.analyze.dump" & + save_cmd "systemd-analyze plot" "systemd.analyze.plot.svg" & + wait + + save_platform_info & + save_cmd "show vlan brief" "vlan.summary" & + save_cmd "show version" "version" & + save_cmd "show platform summary" "platform.summary" & + wait + + save_cmd "cat /host/machine.conf" "machine.conf" & + save_cmd "cat /boot/config-$(uname -r)" "boot.conf" & + save_cmd "docker stats --no-stream" "docker.stats" & + wait + + save_cmd "sensors" "sensors" & + save_cmd "lspci -vvv -xx" "lspci" & + save_cmd "lsusb -v" "lsusb" & + save_cmd "sysctl -a" "sysctl" & + wait + + save_ip_info & + save_bridge_info & + wait + + save_frr_info & + + save_bgp_info & + save_evpn_info & + wait + + save_cmd "show interface status -d all" "interface.status" & + save_cmd "show interface transceiver presence" "interface.xcvrs.presence" & + save_cmd "show interface transceiver eeprom --dom" "interface.xcvrs.eeprom" & + save_cmd "show ip interface -d all" "ip.interface" & + wait + + save_cmd "lldpctl" "lldpctl" & if [[ ( "$NUM_ASICS" > 1 ) ]]; then for (( i=0; i<$NUM_ASICS; i++ )) do - save_cmd "docker exec lldp$i lldpcli show statistics" "lldp$i.statistics" - save_cmd "docker logs bgp$i" "docker.bgp$i.log" - save_cmd "docker logs swss$i" "docker.swss$i.log" + save_cmd "docker exec lldp$i lldpcli show statistics" "lldp$i.statistics" & + save_cmd "docker logs bgp$i" "docker.bgp$i.log" & + save_cmd "docker logs swss$i" "docker.swss$i.log" & done else - save_cmd "docker exec lldp lldpcli show statistics" "lldp.statistics" - save_cmd "docker logs bgp" "docker.bgp.log" - save_cmd "docker logs swss" "docker.swss.log" + save_cmd "docker exec lldp lldpcli show statistics" "lldp.statistics" & + save_cmd "docker logs bgp" "docker.bgp.log" & + save_cmd "docker logs swss" "docker.swss.log" & fi - - save_cmd "ps aux" "ps.aux" - save_cmd "top -b -n 1" "top" - save_cmd "free" "free" - save_cmd "vmstat 1 5" "vmstat" - save_cmd "vmstat -m" "vmstat.m" - save_cmd "vmstat -s" "vmstat.s" - save_cmd "mount" "mount" - save_cmd "df" "df" - save_cmd "dmesg" "dmesg" - - save_nat_info - save_bfd_info - save_redis_info + wait + + save_cmd "ps aux" "ps.aux" & + save_cmd "top -b -n 1" "top" & + save_cmd "free" "free" & + wait + save_cmd "vmstat 1 5" "vmstat" & + save_cmd "vmstat -m" "vmstat.m" & + save_cmd "vmstat -s" "vmstat.s" & + wait + save_cmd "mount" "mount" & + save_cmd "df" "df" & + save_cmd "dmesg" "dmesg" & + wait + + save_nat_info & + save_bfd_info & + wait + save_redis_info & if $DEBUG_DUMP then - save_dump_state_all_ns + save_dump_state_all_ns & fi + wait - save_cmd "docker ps -a" "docker.ps" - save_cmd "docker top pmon" "docker.pmon" + save_cmd "docker ps -a" "docker.ps" & + save_cmd "docker top pmon" "docker.pmon" & if [[ -d ${PLUGINS_DIR} ]]; then local -r dump_plugins="$(find ${PLUGINS_DIR} -type f -executable)" for plugin in $dump_plugins; do # save stdout output of plugin and gzip it - save_cmd "$plugin" "$(basename $plugin)" true + save_cmd "$plugin" "$(basename $plugin)" true & done fi - - save_cmd "dpkg -l" "dpkg" - save_cmd "who -a" "who" - save_cmd "swapon -s" "swapon" - save_cmd "hdparm -i /dev/sda" "hdparm" - save_cmd "ps -AwwL -o user,pid,lwp,ppid,nlwp,pcpu,pri,nice,vsize,rss,tty,stat,wchan:12,start,bsdtime,command" "ps.extended" - - save_saidump + wait + + save_cmd "dpkg -l" "dpkg" & + save_cmd "who -a" "who" & + save_cmd "swapon -s" "swapon" & + wait + save_cmd "hdparm -i /dev/sda" "hdparm" & + save_cmd "ps -AwwL -o user,pid,lwp,ppid,nlwp,pcpu,pri,nice,vsize,rss,tty,stat,wchan:12,start,bsdtime,command" "ps.extended" & + wait + + if [[ "$device_type" != "SpineRouter" ]]; then + save_saidump + fi if [ "$asic" = "barefoot" ]; then collect_barefoot @@ -1664,9 +1737,6 @@ main() { # 2nd counter snapshot late. Need 2 snapshots to make sense of counters trend. save_counter_snapshot $asic 2 - $RM $V -rf $TARDIR - $MKDIR $V -p $TARDIR - $MKDIR $V -p $LOGDIR # Copying the /etc files to a directory and then tar it $CP -r /etc $TARDIR/etc rm_list=$(find -L $TARDIR/etc -maxdepth 5 -type l) @@ -1678,34 +1748,26 @@ main() { # Remove secret from /etc files before tar remove_secret_from_etc_files $TARDIR - start_t=$(date +%s%3N) - ($TAR $V --warning=no-file-removed -rhf $TARFILE -C $DUMPDIR --mode=+rw \ - --exclude="etc/alternatives" \ - --exclude="*/etc/passwd*" \ - --exclude="*/etc/shadow*" \ - --exclude="*/etc/group*" \ - --exclude="*/etc/gshadow*" \ - --exclude="*/etc/ssh*" \ - --exclude="*get_creds*" \ - --exclude="*snmpd.conf*" \ - --exclude="*/etc/mlnx" \ - --exclude="*/etc/mft" \ - --exclude="*/etc/sonic/*.cer" \ - --exclude="*/etc/sonic/*.crt" \ - --exclude="*/etc/sonic/*.pem" \ - --exclude="*/etc/sonic/*.key" \ - --exclude="*/etc/ssl/*.pem" \ - --exclude="*/etc/ssl/certs/*" \ - --exclude="*/etc/ssl/private/*" \ - $BASE/etc \ - || abort "${EXT_TAR_FAILED}" "Tar append operation failed. Aborting for safety.") \ - && $RM $V -rf $TARDIR - end_t=$(date +%s%3N) - echo "[ TAR /etc Files ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO + # Remove unecessary files + $RM $V -rf $TARDIR/etc/alternatives $TARDIR/etc/passwd* \ + $TARDIR/etc/shadow* $TARDIR/etc/group* $TARDIR/etc/gshadow* \ + $TARDIR/etc/ssh* $TARDIR/etc/mlnx $TARDIR/etc/mft \ + $TARDIR/etc/ssl/certs/* $TARDIR/etc/ssl/private/* + rm_list=$(find -L $TARDIR -type f \( -iname \*.cer -o -iname \*.crt -o \ + -iname \*.pem -o -iname \*.key -o -iname \*snmpd.conf\* -o -iname \*get_creds\* \)) + if [ ! -z "$rm_list" ] + then + rm $rm_list + fi + + save_log_files & + save_crash_files & + save_warmboot_files & + wait + + save_to_tar - save_log_files - save_crash_files - save_warmboot_files + save_sai_failure_dump if [[ "$asic" = "mellanox" ]]; then collect_mellanox_dfw_dumps @@ -1719,7 +1781,7 @@ main() { ############################################################################### finalize() { # Save techsupport timing profile info - save_file $TECHSUPPORT_TIME_INFO log false + save_file $TECHSUPPORT_TIME_INFO log false true if $DO_COMPRESS; then RC=0 diff --git a/scripts/portstat b/scripts/portstat index 0e3b9c438c..399733f69c 100755 --- a/scripts/portstat +++ b/scripts/portstat @@ -17,6 +17,7 @@ from collections import OrderedDict, namedtuple from natsort import natsorted from tabulate import tabulate from sonic_py_common import multi_asic +from sonic_py_common import device_info # mock the redis for unit test purposes # try: @@ -329,15 +330,16 @@ class Portstat(object): format_number_with_comma(data.tx_ok), format_brate(rates.tx_bps), format_util(rates.tx_bps, port_speed), + format_number_with_comma(data.tx_err), format_number_with_comma(data.tx_drop), format_number_with_comma(data.tx_ovr))) - - if use_json: - print(table_as_json(table, header)) - else: - print(tabulate(table, header, tablefmt='simple', stralign='right')) - if multi_asic.is_multi_asic(): + if table: + if use_json: + print(table_as_json(table, header)) + else: + print(tabulate(table, header, tablefmt='simple', stralign='right')) + if multi_asic.is_multi_asic() or device_info.is_chassis() and not use_json: print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") def cnstat_intf_diff_print(self, cnstat_new_dict, cnstat_old_dict, intf_list): @@ -550,12 +552,12 @@ class Portstat(object): format_number_with_comma(cntr.tx_err), format_number_with_comma(cntr.tx_drop), format_number_with_comma(cntr.tx_ovr))) - - if use_json: - print(table_as_json(table, header)) - else: - print(tabulate(table, header, tablefmt='simple', stralign='right')) - if multi_asic.is_multi_asic(): + if table: + if use_json: + print(table_as_json(table, header)) + else: + print(tabulate(table, header, tablefmt='simple', stralign='right')) + if multi_asic.is_multi_asic() or device_info.is_chassis() and not use_json: print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") def main(): diff --git a/scripts/sfpshow b/scripts/sfpshow index 0787688903..ac0adf5c6e 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -17,7 +17,7 @@ from natsort import natsorted from sonic_py_common.interface import front_panel_prefix, backplane_prefix, inband_prefix, recirc_prefix from sonic_py_common import multi_asic from utilities_common.sfp_helper import covert_application_advertisement_to_output_string -from utilities_common.sfp_helper import QSFP_DATA_MAP +from utilities_common.sfp_helper import QSFP_DATA_MAP, CMIS_DATA_MAP from tabulate import tabulate # Mock the redis DB for unit test purposes @@ -202,6 +202,36 @@ QSFP_DD_DOM_VALUE_UNIT_MAP = { 'voltage': 'Volts' } +ZR_PM_HEADER = ['Parameter', 'Unit', 'Min', 'Avg', 'Max', + 'Threshold\nHigh\nAlarm', 'Threshold\nHigh\nWarning', + 'Threshold\nCrossing\nAlert-High', + 'Threshold\nLow\nAlarm', 'Threshold\nLow\nWarning', + 'Threshold\nCrossing\nAlert-Low'] + +ZR_PM_VALUE_KEY_SUFFIXS = ['min', 'avg', 'max'] + +ZR_PM_THRESHOLD_KEY_SUFFIXS = ['highalarm', + 'highwarning', 'lowalarm', 'lowwarning'] + +# mapping from parameter_name to [unit, parameter_key_prefix] +ZR_PM_INFO_MAP = { + 'Tx Power': ['dBm', 'tx_power'], + 'Rx Total Power': ['dBm', 'rx_tot_power'], + 'Rx Signal Power': ['dBm', 'rx_sig_power'], + 'CD-short link': ['ps/nm', 'cd'], + 'PDL': ['dB', 'pdl'], + 'OSNR': ['dB', 'osnr'], + 'eSNR': ['dB', 'esnr'], + 'CFO': ['MHz', 'cfo'], + 'DGD': ['ps', 'dgd'], + 'SOPMD': ['ps^2', 'sopmd'], + 'SOP ROC': ['krad/s', 'soproc'], + 'Pre-FEC BER': ['N/A', 'prefec_ber'], + 'Post-FEC BER': ['N/A', 'uncorr_frames'], + 'EVM': ['%', 'evm'] +} + +ZR_PM_NOT_APPLICABLE_STR = 'Transceiver performance monitoring not applicable' def display_invalid_intf_eeprom(intf_name): output = intf_name + ': SFP EEPROM Not detected\n' @@ -215,6 +245,10 @@ def display_invalid_intf_presence(intf_name): click.echo(tabulate(port_table, header)) +def display_invalid_intf_pm(intf_name): + output = intf_name + ': %s\n' % ZR_PM_NOT_APPLICABLE_STR + click.echo(output) + class SFPShow(object): def __init__(self, intf_name, namespace_option, dump_dom=False): super(SFPShow, self).__init__() @@ -223,6 +257,7 @@ class SFPShow(object): self.dump_dom = dump_dom self.table = [] self.intf_eeprom: Dict[str, str] = {} + self.intf_pm: Dict[str, str] = {} self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace_option) # Convert dict values to cli output string @@ -249,14 +284,16 @@ class SFPShow(object): def convert_sfp_info_to_output_string(self, sfp_info_dict): indent = ' ' * 8 output = '' + is_sfp_cmis = 'cmis_rev' in sfp_info_dict - sorted_qsfp_data_map_keys = sorted(QSFP_DATA_MAP, key=QSFP_DATA_MAP.get) - for key in sorted_qsfp_data_map_keys: + data_map = CMIS_DATA_MAP if is_sfp_cmis else QSFP_DATA_MAP + sorted_data_map_keys = sorted(data_map, key=data_map.get) + for key in sorted_data_map_keys: if key == 'cable_type': output += '{}{}: {}\n'.format(indent, sfp_info_dict['cable_type'], sfp_info_dict['cable_length']) elif key == 'cable_length': pass - elif key == 'specification_compliance': + elif key == 'specification_compliance' and not(is_sfp_cmis): if sfp_info_dict['type'] == "QSFP-DD Double Density 8X Pluggable Transceiver": output += '{}{}: {}\n'.format(indent, QSFP_DATA_MAP[key], sfp_info_dict[key]) else: @@ -273,7 +310,7 @@ class SFPShow(object): elif key == 'application_advertisement': output += covert_application_advertisement_to_output_string(indent, sfp_info_dict) else: - output += '{}{}: {}\n'.format(indent, QSFP_DATA_MAP[key], sfp_info_dict[key]) + output += '{}{}: {}\n'.format(indent, data_map[key], sfp_info_dict[key]) return output @@ -402,6 +439,66 @@ class SFPShow(object): return output + def convert_pm_prefix_to_threshold_prefix(self, pm_prefix): + if pm_prefix == 'uncorr_frames': + return 'postfecber' + elif pm_prefix == 'cd': + return 'cdshort' + else: + return pm_prefix.replace('_', '') + + def beautify_pm_field(self, prefix, field): + if field is None: + return 'N/A' + elif prefix in {'prefec_ber'}: + return "{:.2E}".format(field) if field != 0 else '0.0' + else: + return str(field) + + def convert_interface_sfp_pm_to_cli_output_string(self, state_db, interface_name): + sfp_pm_dict = state_db.get_all( + self.db.STATE_DB, 'TRANSCEIVER_PM|{}'.format(interface_name)) + sfp_threshold_dict = state_db.get_all( + state_db.STATE_DB, 'TRANSCEIVER_DOM_THRESHOLD|{}'.format(interface_name)) + table = [] + indent_num = 4 + indent = ' ' * indent_num + if sfp_pm_dict: + output = '\n' + indent + for param_name, (unit, prefix) in ZR_PM_INFO_MAP.items(): + row = [param_name, unit] + values = [] + for suffix in ZR_PM_VALUE_KEY_SUFFIXS: + key = prefix + '_' + suffix + values.append( + float(sfp_pm_dict[key]) if key in sfp_pm_dict else None) + + thresholds = [] + for suffix in ZR_PM_THRESHOLD_KEY_SUFFIXS: + key = self.convert_pm_prefix_to_threshold_prefix( + prefix) + suffix + thresholds.append( + float(sfp_threshold_dict[key]) if key in sfp_threshold_dict else None) + + tca_high, tca_low = None, None + if values[2] is not None and thresholds[0] is not None: + # TCA-High: max > high_alarm + tca_high = values[2] > thresholds[0] + if values[0] is not None and thresholds[2] is not None: + # TCA-low: min < low_alarm + tca_low = values[0] < thresholds[2] + + for field in values + thresholds[:2] + [tca_high] + thresholds[2:] + [tca_low]: + row.append(self.beautify_pm_field(prefix, field)) + table.append(row) + + output += tabulate(table, + ZR_PM_HEADER, disable_numparse=True).replace('\n', '\n' + indent) + output += '\n' + else: + output = ZR_PM_NOT_APPLICABLE_STR + '\n' + return output + @multi_asic_util.run_on_multi_asic def get_eeprom(self): if self.intf_name is not None: @@ -441,6 +538,19 @@ class SFPShow(object): self.table += port_table + @multi_asic_util.run_on_multi_asic + def get_pm(self): + if self.intf_name is not None: + self.intf_pm[self.intf_name] = self.convert_interface_sfp_pm_to_cli_output_string( + self.db, self.intf_name) + else: + port_table_keys = self.db.keys(self.db.APPL_DB, "PORT_TABLE:*") + for i in port_table_keys: + interface = re.split(':', i, maxsplit=1)[-1].strip() + if interface and interface.startswith(front_panel_prefix()) and not interface.startswith((backplane_prefix(), inband_prefix(), recirc_prefix())): + self.intf_pm[interface] = self.convert_interface_sfp_pm_to_cli_output_string( + self.db, interface) + def display_eeprom(self): click.echo("\n".join([f"{k}: {v}" for k, v in natsorted(self.intf_eeprom.items())])) @@ -449,6 +559,9 @@ class SFPShow(object): sorted_port_table = natsorted(self.table) click.echo(tabulate(sorted_port_table, header)) + def display_pm(self): + click.echo( + "\n".join([f"{k}: {v}" for k, v in natsorted(self.intf_pm.items())])) # This is our main entrypoint - the main 'sfpshow' command @@ -476,6 +589,23 @@ def eeprom(port, dump_dom, namespace): sfp.get_eeprom() sfp.display_eeprom() +# 'info' subcommand + +@cli.command() +@click.option('-p', '--port', metavar='', help="Display SFP EEPROM data for port only") +@click.option('-n', '--namespace', default=None, help="Display interfaces for specific namespace") +def info(port, namespace): + if port and multi_asic.is_multi_asic() and namespace is None: + try: + namespace = multi_asic.get_namespace_for_port(port) + except Exception: + display_invalid_intf_eeprom(port) + sys.exit(1) + + sfp = SFPShow(port, namespace) + sfp.get_eeprom() + sfp.display_eeprom() + # 'presence' subcommand @@ -494,6 +624,24 @@ def presence(port, namespace): sfp.get_presence() sfp.display_presence() +# 'pm' subcommand + + +@cli.command() +@click.option('-p', '--port', metavar='', help="Display SFP PM for port only") +@click.option('-n', '--namespace', default=None, help="Display interfaces for specific namespace") +def pm(port, namespace): + if port and multi_asic.is_multi_asic() and namespace is None: + try: + namespace = multi_asic.get_namespace_for_port(port) + except Exception: + display_invalid_intf_pm(port) + sys.exit(1) + + sfp = SFPShow(port, namespace) + sfp.get_pm() + sfp.display_pm() + if __name__ == "__main__": cli() diff --git a/scripts/verify_image_sign.sh b/scripts/verify_image_sign.sh deleted file mode 100644 index d66148d597..0000000000 --- a/scripts/verify_image_sign.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/bin/sh -image_file="${1}" -cms_sig_file="sig.cms" -lines_for_lookup=50 -SECURE_UPGRADE_ENABLED=0 -DIR="$(dirname "$0")" -if [ -d "/sys/firmware/efi/efivars" ]; then - if ! [ -n "$(ls -A /sys/firmware/efi/efivars 2>/dev/null)" ]; then - mount -t efivarfs none /sys/firmware/efi/efivars 2>/dev/null - fi - SECURE_UPGRADE_ENABLED=$(bootctl status 2>/dev/null | grep -c "Secure Boot: enabled") -else - echo "efi not supported - exiting without verification" - exit 0 -fi - -. /usr/local/bin/verify_image_sign_common.sh - -if [ ${SECURE_UPGRADE_ENABLED} -eq 0 ]; then - echo "secure boot not enabled - exiting without image verification" - exit 0 -fi - -clean_up () -{ - if [ -d ${EFI_CERTS_DIR} ]; then rm -rf ${EFI_CERTS_DIR}; fi - if [ -d "${TMP_DIR}" ]; then rm -rf ${TMP_DIR}; fi - exit $1 -} - -TMP_DIR=$(mktemp -d) -DATA_FILE="${TMP_DIR}/data.bin" -CMS_SIG_FILE="${TMP_DIR}/${cms_sig_file}" -TAR_SIZE=$(head -n $lines_for_lookup $image_file | grep "payload_image_size=" | cut -d"=" -f2- ) -SHARCH_SIZE=$(sed '/^exit_marker$/q' $image_file | wc -c) -SIG_PAYLOAD_SIZE=$(($TAR_SIZE + $SHARCH_SIZE )) -# Extract cms signature from signed file -# Add extra byte for payload -sed -e '1,/^exit_marker$/d' $image_file | tail -c +$(( $TAR_SIZE + 1 )) > $CMS_SIG_FILE -# Extract image from signed file -head -c $SIG_PAYLOAD_SIZE $image_file > $DATA_FILE -# verify signature with certificate fetched with efi tools -EFI_CERTS_DIR=/tmp/efi_certs -[ -d $EFI_CERTS_DIR ] && rm -rf $EFI_CERTS_DIR -mkdir $EFI_CERTS_DIR -efi-readvar -v db -o $EFI_CERTS_DIR/db_efi >/dev/null || -{ - echo "Error: unable to read certs from efi db: $?" - clean_up 1 -} -# Convert one file to der certificates -sig-list-to-certs $EFI_CERTS_DIR/db_efi $EFI_CERTS_DIR/db >/dev/null|| -{ - echo "Error: convert sig list to certs: $?" - clean_up 1 -} -for file in $(ls $EFI_CERTS_DIR | grep "db-"); do - LOG=$(openssl x509 -in $EFI_CERTS_DIR/$file -inform der -out $EFI_CERTS_DIR/cert.pem 2>&1) - if [ $? -ne 0 ]; then - logger "cms_validation: $LOG" - fi - # Verify detached signature - LOG=$(verify_image_sign_common $image_file $DATA_FILE $CMS_SIG_FILE) - VALIDATION_RES=$? - if [ $VALIDATION_RES -eq 0 ]; then - RESULT="CMS Verified OK using efi keys" - echo "verification ok:$RESULT" - # No need to continue. - # Exit without error if any success signature verification. - clean_up 0 - fi -done -echo "Failure: CMS signature Verification Failed: $LOG" - -clean_up 1 \ No newline at end of file diff --git a/scripts/verify_image_sign_common.sh b/scripts/verify_image_sign_common.sh deleted file mode 100755 index ec6511bc6d..0000000000 --- a/scripts/verify_image_sign_common.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -verify_image_sign_common() { - image_file="${1}" - cms_sig_file="sig.cms" - TMP_DIR=$(mktemp -d) - DATA_FILE="${2}" - CMS_SIG_FILE="${3}" - - openssl version | awk '$2 ~ /(^0\.)|(^1\.(0\.|1\.0))/ { exit 1 }' - if [ $? -eq 0 ]; then - # for version 1.1.1 and later - no_check_time="-no_check_time" - else - # for version older than 1.1.1 use noattr - no_check_time="-noattr" - fi - - # making sure image verification is supported - EFI_CERTS_DIR=/tmp/efi_certs - RESULT="CMS Verification Failure" - LOG=$(openssl cms -verify $no_check_time -noout -CAfile $EFI_CERTS_DIR/cert.pem -binary -in ${CMS_SIG_FILE} -content ${DATA_FILE} -inform pem 2>&1 > /dev/null ) - VALIDATION_RES=$? - if [ $VALIDATION_RES -eq 0 ]; then - RESULT="CMS Verified OK" - if [ -d "${TMP_DIR}" ]; then rm -rf ${TMP_DIR}; fi - echo "verification ok:$RESULT" - # No need to continue. - # Exit without error if any success signature verification. - return 0 - fi - - if [ -d "${TMP_DIR}" ]; then rm -rf ${TMP_DIR}; fi - return 1 -} diff --git a/setup.py b/setup.py index 8e810ba006..6d7c5b7889 100644 --- a/setup.py +++ b/setup.py @@ -5,9 +5,34 @@ # under scripts/. Consider stop using scripts and use console_scripts instead # # https://stackoverflow.com/questions/18787036/difference-between-entry-points-console-scripts-and-scripts-in-setup-py +from __future__ import print_function +import sys import fastentrypoints from setuptools import setup +import pkg_resources +from packaging import version + +# sonic_dependencies, version requirement only supports '>=' +sonic_dependencies = [ + 'sonic-config-engine', + 'sonic-platform-common', + 'sonic-py-common', + 'sonic-yang-mgmt', +] + +for package in sonic_dependencies: + try: + package_dist = pkg_resources.get_distribution(package.split(">=")[0]) + except pkg_resources.DistributionNotFound: + print(package + " is not found!", file=sys.stderr) + print("Please build and install SONiC python wheels dependencies from sonic-buildimage", file=sys.stderr) + exit(1) + if ">=" in package: + if version.parse(package_dist.version) >= version.parse(package.split(">=")[1]): + continue + print(package + " version not match!", file=sys.stderr) + exit(1) setup( name='sonic-utilities', @@ -64,7 +89,7 @@ 'sonic_cli_gen', ], package_data={ - 'generic_config_updater': ['generic_config_updater.conf.json'], + 'generic_config_updater': ['gcu_services_validator.conf.json', 'gcu_field_operation_validators.conf.json'], 'show': ['aliases.ini'], 'sonic_installer': ['aliases.ini'], 'tests': ['acl_input/*', @@ -103,6 +128,7 @@ 'scripts/fanshow', 'scripts/fast-reboot', 'scripts/fast-reboot-dump.py', + 'scripts/fast-reboot-filter-routes.py', 'scripts/fdbclear', 'scripts/fdbshow', 'scripts/fibshow', @@ -155,8 +181,6 @@ 'scripts/memory_threshold_check_handler.py', 'scripts/techsupport_cleanup.py', 'scripts/storm_control.py', - 'scripts/verify_image_sign.sh', - 'scripts/verify_image_sign_common.sh', 'scripts/check_db_integrity.py', 'scripts/sysreadyshow' ], @@ -214,16 +238,12 @@ 'prettyprinter>=0.18.0', 'pyroute2>=0.5.14, <0.6.1', 'requests>=2.25.0', - 'sonic-config-engine', - 'sonic-platform-common', - 'sonic-py-common', - 'sonic-yang-mgmt', 'tabulate==0.8.2', 'toposort==1.6', 'www-authenticate==0.9.2', 'xmltodict==0.12.0', 'lazy-object-proxy', - ], + ] + sonic_dependencies, setup_requires= [ 'pytest-runner', 'wheel' diff --git a/sfputil/main.py b/sfputil/main.py index 8992e9238a..726ed2feba 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -906,11 +906,11 @@ def fetch_error_status_from_platform_api(port): " errors=['{}:{}'.format(sfp.index, 'OK (Not implemented)') for sfp in sfp_list]\n" \ "print(errors)\n" - get_error_status_command = "docker exec pmon python3 -c \"{}{}{}\"".format( - init_chassis_code, generate_sfp_list_code, get_error_status_code) + get_error_status_command = ["docker", "exec", "pmon", "python3", "-c", "{}{}{}".format( + init_chassis_code, generate_sfp_list_code, get_error_status_code)] # Fetch error status from pmon docker try: - output = subprocess.check_output(get_error_status_command, shell=True, universal_newlines=True) + output = subprocess.check_output(get_error_status_command, universal_newlines=True) except subprocess.CalledProcessError as e: click.Abort("Error! Unable to fetch error status for SPF modules. Error code = {}, error messages: {}".format(e.returncode, e.output)) return None diff --git a/show/feature.py b/show/feature.py index 547d8d1729..60ff80321a 100644 --- a/show/feature.py +++ b/show/feature.py @@ -156,11 +156,11 @@ def feature_autorestart(db, feature_name): feature_table = db.cfgdb.get_table('FEATURE') if feature_name: if feature_table and feature_name in feature_table: - body.append([feature_name, feature_table[feature_name]['auto_restart']]) + body.append([feature_name, feature_table[ feature_name ].get('auto_restart', 'unknown')]) else: click.echo("Can not find feature {}".format(feature_name)) sys.exit(1) else: for name in natsorted(list(feature_table.keys())): - body.append([name, feature_table[name]['auto_restart']]) + body.append([name, feature_table[ name ].get('auto_restart', 'unknown')]) click.echo(tabulate(body, header)) diff --git a/show/interfaces/__init__.py b/show/interfaces/__init__.py index 3e82a68e66..a7a562446b 100644 --- a/show/interfaces/__init__.py +++ b/show/interfaces/__init__.py @@ -446,6 +446,51 @@ def eeprom(interfacename, dump_dom, namespace, verbose): clicommon.run_command(cmd, display_cmd=verbose) +@transceiver.command() +@click.argument('interfacename', required=False) +@click.option('--namespace', '-n', 'namespace', default=None, show_default=True, + type=click.Choice(multi_asic_util.multi_asic_ns_choices()), help='Namespace name or all') +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def pm(interfacename, namespace, verbose): + """Show interface transceiver performance monitoring information""" + + ctx = click.get_current_context() + + cmd = "sfpshow pm" + + if interfacename is not None: + interfacename = try_convert_interfacename_from_alias( + ctx, interfacename) + + cmd += " -p {}".format(interfacename) + + if namespace is not None: + cmd += " -n {}".format(namespace) + + clicommon.run_command(cmd, display_cmd=verbose) + +@transceiver.command() +@click.argument('interfacename', required=False) +@click.option('--namespace', '-n', 'namespace', default=None, show_default=True, + type=click.Choice(multi_asic_util.multi_asic_ns_choices()), help='Namespace name or all') +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def info(interfacename, namespace, verbose): + """Show interface transceiver information""" + + ctx = click.get_current_context() + + cmd = "sfpshow info" + + if interfacename is not None: + interfacename = try_convert_interfacename_from_alias(ctx, interfacename) + + cmd += " -p {}".format(interfacename) + + if namespace is not None: + cmd += " -n {}".format(namespace) + + clicommon.run_command(cmd, display_cmd=verbose) + @transceiver.command() @click.argument('interfacename', required=False) @click.option('--verbose', is_flag=True, help="Enable verbose output") diff --git a/show/main.py b/show/main.py index 0c9fd46703..7f79cd4779 100755 --- a/show/main.py +++ b/show/main.py @@ -1257,14 +1257,18 @@ def table(verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def logging(process, lines, follow, verbose): """Show system log""" + if os.path.exists("/var/log.tmpfs"): + log_path = "/var/log.tmpfs" + else: + log_path = "/var/log" if follow: - cmd = "sudo tail -F /var/log/syslog" + cmd = "sudo tail -F {}/syslog".format(log_path) run_command(cmd, display_cmd=verbose) else: - if os.path.isfile("/var/log/syslog.1"): - cmd = "sudo cat /var/log/syslog.1 /var/log/syslog" + if os.path.isfile("{}/syslog.1".format(log_path)): + cmd = "sudo cat {}/syslog.1 {}/syslog".format(log_path, log_path) else: - cmd = "sudo cat /var/log/syslog" + cmd = "sudo cat {}/syslog".format(log_path) if process is not None: cmd += " | grep '{}'".format(process) @@ -1293,6 +1297,7 @@ def version(verbose): sys_date = datetime.now() click.echo("\nSONiC Software Version: SONiC.{}".format(version_info['build_version'])) + click.echo("SONiC OS Version: {}".format(version_info['sonic_os_version'])) click.echo("Distribution: Debian {}".format(version_info['debian_version'])) click.echo("Kernel: {}".format(version_info['kernel_version'])) click.echo("Build commit: {}".format(version_info['commit_id'])) @@ -1439,10 +1444,40 @@ def ports(portname, verbose): # 'bgp' subcommand ("show runningconfiguration bgp") @runningconfiguration.command() @click.option('--verbose', is_flag=True, help="Enable verbose output") -def bgp(verbose): - """Show BGP running configuration""" - cmd = 'sudo {} -c "show running-config"'.format(constants.RVTYSH_COMMAND) - run_command(cmd, display_cmd=verbose) +@click.option('--namespace', '-n', 'namespace', required=False, default=None, type=str, show_default=False, + help='Option needed for multi-asic only: provide namespace name', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def bgp(namespace, verbose): + """ + Show BGP running configuration + Note: + multi-asic can run 'show run bgp' and show from all asics, or 'show run bgp -n ' + single-asic only run 'show run bgp', '-n' is not available + """ + + if multi_asic.is_multi_asic(): + if namespace and namespace not in multi_asic.get_namespace_list(): + ctx = click.get_current_context() + ctx.fail("invalid value for -n/--namespace option. provide namespace from list {}".format(multi_asic.get_namespace_list())) + if not multi_asic.is_multi_asic() and namespace: + ctx = click.get_current_context() + ctx.fail("-n/--namespace is not available for single asic") + + output = "" + cmd = "show running-config bgp" + import utilities_common.bgp_util as bgp_util + if multi_asic.is_multi_asic(): + if not namespace: + ns_list = multi_asic.get_namespace_list() + for ns in ns_list: + output += "\n------------Showing running config bgp on {}------------\n".format(ns) + output += bgp_util.run_bgp_show_command(cmd, ns) + else: + output += "\n------------Showing running config bgp on {}------------\n".format(namespace) + output += bgp_util.run_bgp_show_command(cmd, namespace) + else: + output += bgp_util.run_bgp_show_command(cmd) + print(output) # 'interfaces' subcommand ("show runningconfiguration interfaces") @@ -2059,6 +2094,17 @@ def peer(db, peer_ip): click.echo(tabulate(bfd_body, bfd_headers)) +# 'suppress-fib-pending' subcommand ("show suppress-fib-pending") +@cli.command('suppress-fib-pending') +@clicommon.pass_db +def suppress_pending_fib(db): + """ Show the status of suppress pending FIB feature """ + + field_values = db.cfgdb.get_entry('DEVICE_METADATA', 'localhost') + state = field_values.get('suppress-fib-pending', 'disabled').title() + click.echo(state) + + # Load plugins and register them helper = util_base.UtilHelper() helper.load_and_register_plugins(plugins, cli) diff --git a/show/muxcable.py b/show/muxcable.py index d9f0a94f15..5df4bd8c2a 100644 --- a/show/muxcable.py +++ b/show/muxcable.py @@ -20,7 +20,7 @@ REDIS_TIMEOUT_MSECS = 0 SELECT_TIMEOUT = 1000 -HWMODE_MUXDIRECTION_TIMEOUT = 0.1 +HWMODE_MUXDIRECTION_TIMEOUT = 0.5 # The empty namespace refers to linux host namespace. EMPTY_NAMESPACE = '' @@ -35,6 +35,26 @@ VENDOR_NAME = "Credo" VENDOR_MODEL_REGEX = re.compile(r"CAC\w{3}321P2P\w{2}MS") +#define table names that interact with Cli +XCVRD_GET_BER_CMD_TABLE = "XCVRD_GET_BER_CMD" +XCVRD_GET_BER_RSP_TABLE = "XCVRD_GET_BER_RSP" +XCVRD_GET_BER_RES_TABLE = "XCVRD_GET_BER_RES" +XCVRD_GET_BER_CMD_ARG_TABLE = "XCVRD_GET_BER_CMD_ARG" + +def get_asic_index_for_port(port): + asic_index = None + if platform_sfputil is not None: + asic_index = platform_sfputil_helper.get_asic_id_for_logical_port(port) + if asic_index is None: + # TODO this import is only for unit test purposes, and should be removed once sonic_platform_base + # is fully mocked + import sonic_platform_base.sonic_sfp.sfputilhelper + asic_index = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper().get_asic_id_for_logical_port(port) + if asic_index is None: + port_name = platform_sfputil_helper.get_interface_alias(port, db) + click.echo("Got invalid asic index for port {}, cant retreive mux status".format(port_name)) + return 0 + return asic_index def db_connect(db_name, namespace=EMPTY_NAMESPACE): return swsscommon.DBConnector(db_name, REDIS_TIMEOUT_MSECS, True, namespace) @@ -261,9 +281,11 @@ def get_result(port, res_dict, cmd ,result, table_name): (status, fvp) = xcvrd_show_fw_res_tbl[asic_index].get(port) res_dir = dict(fvp) + delete_all_keys_in_db_table("STATE_DB", table_name) + return res_dir -def update_and_get_response_for_xcvr_cmd(cmd_name, rsp_name, exp_rsp, cmd_table_name, cmd_arg_table_name, rsp_table_name ,port, cmd_timeout_secs, param_dict= None, arg=None): +def update_and_get_response_for_xcvr_cmd(cmd_name, rsp_name, exp_rsp, cmd_table_name, cmd_arg_table_name, rsp_table_name , res_table_name, port, cmd_timeout_secs, param_dict= None, arg=None): res_dict = {} state_db, appl_db = {}, {} @@ -276,6 +298,8 @@ def update_and_get_response_for_xcvr_cmd(cmd_name, rsp_name, exp_rsp, cmd_table_ time_start = time.time() + delete_all_keys_in_db_tables_helper(cmd_table_name, rsp_table_name, cmd_arg_table_name, res_table_name) + sel = swsscommon.Select() namespaces = multi_asic.get_front_end_namespaces() for namespace in namespaces: @@ -390,11 +414,26 @@ def update_and_get_response_for_xcvr_cmd(cmd_name, rsp_name, exp_rsp, cmd_table_ firmware_rsp_tbl[asic_index]._del(port) break - delete_all_keys_in_db_table("STATE_DB", rsp_table_name) + + delete_all_keys_in_db_tables_helper(cmd_table_name, rsp_table_name, cmd_arg_table_name, None) return res_dict +def delete_all_keys_in_db_tables_helper(cmd_table_name, rsp_table_name, cmd_arg_table_name = None, res_table_name = None): + + delete_all_keys_in_db_table("APPL_DB", cmd_table_name) + delete_all_keys_in_db_table("STATE_DB", rsp_table_name) + if cmd_arg_table_name is not None: + delete_all_keys_in_db_table("APPL_DB", cmd_arg_table_name) + + if res_table_name is not None: + delete_all_keys_in_db_table("STATE_DB", res_table_name) + + return 0 + + + # 'muxcable' command ("show muxcable") # @@ -911,7 +950,7 @@ def berinfo(db, port, target, json_output): res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", port, 10, param_dict, "ber") + "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", None, port, 10, param_dict, "ber") if res_dict[1] == "True": result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES") @@ -963,7 +1002,7 @@ def eyeinfo(db, port, target, json_output): res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", port, 10, param_dict, "eye") + "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", None, port, 10, param_dict, "eye") if res_dict[1] == "True": result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES") @@ -1014,7 +1053,7 @@ def fecstatistics(db, port, target, json_output): res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", port, 10, param_dict, "fec_stats") + "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", None, port, 10, param_dict, "fec_stats") if res_dict[1] == "True": result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES") @@ -1065,7 +1104,7 @@ def pcsstatistics(db, port, target, json_output): res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", port, 10, param_dict, "pcs_stats") + "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", None, port, 10, param_dict, "pcs_stats") if res_dict[1] == "True": result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES") @@ -1114,7 +1153,7 @@ def debugdumpregisters(db, port, option, json_output): res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", port, 100, param_dict, "debug_dump") + "get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", None, port, 100, param_dict, "debug_dump") if res_dict[1] == "True": result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES") @@ -1158,7 +1197,7 @@ def alivecablestatus(db, port, json_output): res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "get_ber", "status", "True", "XCVRD_GET_BER_CMD", None, "XCVRD_GET_BER_RSP", port, 10, None, "cable_alive") + "get_ber", "status", "True", "XCVRD_GET_BER_CMD", None, "XCVRD_GET_BER_RSP", None, port, 10, None, "cable_alive") if res_dict[1] == "True": result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES") @@ -1230,7 +1269,7 @@ def get_hwmode_mux_direction_port(db, port): if port is not None: res_dict = update_and_get_response_for_xcvr_cmd( - "state", "state", "True", "XCVRD_SHOW_HWMODE_DIR_CMD", "XCVRD_SHOW_HWMODE_DIR_RES", "XCVRD_SHOW_HWMODE_DIR_RSP", port, HWMODE_MUXDIRECTION_TIMEOUT, None, "probe") + "state", "state", "True", "XCVRD_SHOW_HWMODE_DIR_CMD", "XCVRD_SHOW_HWMODE_DIR_RES", "XCVRD_SHOW_HWMODE_DIR_RSP", None, port, HWMODE_MUXDIRECTION_TIMEOUT, None, "probe") result = get_result(port, res_dict, "muxdirection" , result, "XCVRD_SHOW_HWMODE_DIR_RES") @@ -1239,6 +1278,67 @@ def get_hwmode_mux_direction_port(db, port): return res_dict +def create_active_active_mux_direction_json_result(result, port, db): + + port = platform_sfputil_helper.get_interface_alias(port, db) + result["HWMODE"][port] = {} + res_dict = get_grpc_cached_version_mux_direction_per_port(db, port) + result["HWMODE"][port]["Direction"] = res_dict["self_mux_direction"] + result["HWMODE"][port]["Presence"] = res_dict["presence"] + result["HWMODE"][port]["PeerDirection"] = res_dict["peer_mux_direction"] + result["HWMODE"][port]["ConnectivityState"] = res_dict["grpc_connection_status"] + + rc = res_dict["rc"] + + return rc + +def create_active_standby_mux_direction_json_result(result, port, db): + + res_dict = get_hwmode_mux_direction_port(db, port) + port = platform_sfputil_helper.get_interface_alias(port, db) + result["HWMODE"][port] = {} + result["HWMODE"][port]["Direction"] = res_dict[1] + result["HWMODE"][port]["Presence"] = res_dict[2] + + rc = res_dict[0] + + return rc + +def create_active_active_mux_direction_result(body, port, db): + + res_dict = get_grpc_cached_version_mux_direction_per_port(db, port) + temp_list = [] + port = platform_sfputil_helper.get_interface_alias(port, db) + temp_list.append(port) + temp_list.append(res_dict["self_mux_direction"]) + temp_list.append(res_dict["presence"]) + temp_list.append(res_dict["peer_mux_direction"]) + temp_list.append(res_dict["grpc_connection_status"]) + body.append(temp_list) + + rc = res_dict["rc"] + + return rc + +def create_active_standby_mux_direction_result(body, port, db): + + res_dict = get_hwmode_mux_direction_port(db, port) + + temp_list = [] + port = platform_sfputil_helper.get_interface_alias(port, db) + temp_list.append(port) + temp_list.append(res_dict[1]) + temp_list.append(res_dict[2]) + body.append(temp_list) + + rc = res_dict[0] + + delete_all_keys_in_db_table("APPL_DB", "XCVRD_SHOW_HWMODE_DIR_CMD") + delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RSP") + delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RES") + + return rc + @muxcable.group(cls=clicommon.AbbreviationGroup) def hwmode(): """Shows the muxcable hardware information directly""" @@ -1247,8 +1347,9 @@ def hwmode(): @hwmode.command() @click.argument('port', metavar='', required=False, default=None) +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format") @clicommon.pass_db -def muxdirection(db, port): +def muxdirection(db, port, json_output): """Shows the current direction of the muxcable {active/standy}""" port = platform_sfputil_helper.get_interface_name(port, db) @@ -1256,30 +1357,42 @@ def muxdirection(db, port): delete_all_keys_in_db_table("APPL_DB", "XCVRD_SHOW_HWMODE_DIR_CMD") delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RSP") delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RES") + per_npu_configdb = {} - if port is not None: + namespaces = multi_asic.get_front_end_namespaces() + for namespace in namespaces: + asic_id = multi_asic.get_asic_index_from_namespace(namespace) + + per_npu_configdb[asic_id] = ConfigDBConnector(use_unix_socket_path=False, namespace=namespace) + per_npu_configdb[asic_id].connect() + if port is not None: + + asic_index = get_asic_index_for_port(port) + cable_type = get_optional_value_for_key_in_config_tbl(per_npu_configdb[asic_index], port, "cable_type", "MUX_CABLE") if check_port_in_mux_cable_table(port) == False: click.echo("Not Y-cable port") return CONFIG_FAIL - res_dict = get_hwmode_mux_direction_port(db, port) - - body = [] - temp_list = [] - headers = ['Port', 'Direction', 'Presence'] - port = platform_sfputil_helper.get_interface_alias(port, db) - temp_list.append(port) - temp_list.append(res_dict[1]) - temp_list.append(res_dict[2]) - body.append(temp_list) - - rc = res_dict[0] - click.echo(tabulate(body, headers=headers)) + if json_output: + result = {} + result ["HWMODE"] = {} + if cable_type == "active-active": + rc = create_active_active_mux_direction_json_result(result, port, db) + else: + rc = False + rc = create_active_standby_mux_direction_json_result(result, port, db) + click.echo("{}".format(json.dumps(result, indent=4))) - delete_all_keys_in_db_table("APPL_DB", "XCVRD_SHOW_HWMODE_DIR_CMD") - delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RSP") - delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RES") + else: + body = [] + if cable_type == "active-active": + headers = ['Port', 'Direction', 'Presence', 'PeerDirection', 'ConnectivityState'] + rc = create_active_active_mux_direction_result(body, port, db) + else: + rc = create_active_standby_mux_direction_result(body, port, db) + headers = ['Port', 'Direction', 'Presence'] + click.echo(tabulate(body, headers=headers)) return rc @@ -1289,8 +1402,12 @@ def muxdirection(db, port): rc_exit = True body = [] + active_active = False + if json_output: + result = {} + result ["HWMODE"] = {} - for port in logical_port_list: + for port in natsorted(logical_port_list): if platform_sfputil is not None: physical_port_list = platform_sfputil_helper.logical_port_name_to_physical_port_list(port) @@ -1316,26 +1433,37 @@ def muxdirection(db, port): if port != logical_port_list_per_port[0]: continue - temp_list = [] + + asic_index = get_asic_index_for_port(port) + cable_type = get_optional_value_for_key_in_config_tbl(per_npu_configdb[asic_index], port, "cable_type", "MUX_CABLE") + if json_output: + if cable_type == "active-active": + rc = create_active_active_mux_direction_json_result(result, port, db) + active_active = True + else: + rc = create_active_standby_mux_direction_json_result(result, port, db) - res_dict = get_hwmode_mux_direction_port(db, port) + else: + if cable_type == 'active-active': + rc = create_active_active_mux_direction_result(body, port, db) + active_active = True + else: + rc = create_active_standby_mux_direction_result(body, port, db) + if rc != 0: + rc_exit = False - port = platform_sfputil_helper.get_interface_alias(port, db) - temp_list.append(port) - temp_list.append(res_dict[1]) - temp_list.append(res_dict[2]) - body.append(temp_list) - rc = res_dict[0] - if rc != 0: - rc_exit = False - headers = ['Port', 'Direction', 'Presence'] - click.echo(tabulate(body, headers=headers)) + if json_output: + click.echo("{}".format(json.dumps(result, indent=4))) + else: + if active_active: + + headers = ['Port', 'Direction', 'Presence', 'PeerDirection', 'ConnectivityState'] + else: + headers = ['Port', 'Direction', 'Presence'] + click.echo(tabulate(body, headers=headers)) - delete_all_keys_in_db_table("APPL_DB", "XCVRD_SHOW_HWMODE_DIR_CMD") - delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RSP") - delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RES") if rc_exit == False: sys.exit(EXIT_FAIL) @@ -1360,7 +1488,7 @@ def switchmode(db, port): res_dict[0] = CONFIG_FAIL res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "state", "state", "True", "XCVRD_SHOW_HWMODE_SWMODE_CMD", None, "XCVRD_SHOW_HWMODE_SWMODE_RSP", port, 1, None, "probe") + "state", "state", "True", "XCVRD_SHOW_HWMODE_SWMODE_CMD", None, "XCVRD_SHOW_HWMODE_SWMODE_RSP", None, port, 1, None, "probe") body = [] temp_list = [] @@ -1416,7 +1544,7 @@ def switchmode(db, port): res_dict[0] = CONFIG_FAIL res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "state", "state", "True", "XCVRD_SHOW_HWMODE_SWMODE_CMD", None, "XCVRD_SHOW_HWMODE_SWMODE_RSP", port, 1, None, "probe") + "state", "state", "True", "XCVRD_SHOW_HWMODE_SWMODE_CMD", None, "XCVRD_SHOW_HWMODE_SWMODE_RSP", None, port, 1, None, "probe") port = platform_sfputil_helper.get_interface_alias(port, db) temp_list.append(port) temp_list.append(res_dict[1]) @@ -1601,7 +1729,7 @@ def version(db, port, active): mux_info_dict["version_self_next"] = "N/A" res_dict = update_and_get_response_for_xcvr_cmd( - "firmware_version", "status", "True", "XCVRD_SHOW_FW_CMD", None, "XCVRD_SHOW_FW_RSP", port, 20, None, "probe") + "firmware_version", "status", "True", "XCVRD_SHOW_FW_CMD", None, "XCVRD_SHOW_FW_RSP", None, port, 20, None, "probe") if res_dict[1] == "True": mux_info_dict = get_response_for_version(port, mux_info_dict) @@ -1770,7 +1898,7 @@ def event_log(db, port, json_output): res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "show_event", "status", "True", "XCVRD_EVENT_LOG_CMD", None, "XCVRD_EVENT_LOG_RSP", port, 1000, None, "probe") + "show_event", "status", "True", "XCVRD_EVENT_LOG_CMD", None, "XCVRD_EVENT_LOG_RSP", None, port, 1000, None, "probe") if res_dict[1] == "True": result = get_event_logs(port, res_dict, mux_info_dict) @@ -1812,7 +1940,7 @@ def get_fec_anlt_speed(db, port, json_output): res_dict[1] = "unknown" res_dict = update_and_get_response_for_xcvr_cmd( - "get_fec", "status", "True", "XCVRD_GET_FEC_CMD", None, "XCVRD_GET_FEC_RSP", port, 10, None, "probe") + "get_fec", "status", "True", "XCVRD_GET_FEC_CMD", None, "XCVRD_GET_FEC_RSP", None, port, 10, None, "probe") if res_dict[1] == "True": result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_FEC_RES") @@ -2003,3 +2131,345 @@ def tunnel_route(db, port, json_output): click.echo(tabulate(print_data, headers=headers)) sys.exit(STATUS_SUCCESSFUL) + + +def get_grpc_cached_version_mux_direction_per_port(db, port): + + + state_db = {} + mux_info_dict = {} + mux_info_full_dict = {} + trans_info_full_dict = {} + mux_info_dict["rc"] = False + + # Getting all front asic namespace and correspding config and state DB connector + + namespaces = multi_asic.get_front_end_namespaces() + for namespace in namespaces: + asic_id = multi_asic.get_asic_index_from_namespace(namespace) + state_db[asic_id] = swsscommon.SonicV2Connector(use_unix_socket_path=False, namespace=namespace) + state_db[asic_id].connect(state_db[asic_id].STATE_DB) + + if platform_sfputil is not None: + asic_index = platform_sfputil_helper.get_asic_id_for_logical_port(port) + + if asic_index is None: + # TODO this import is only for unit test purposes, and should be removed once sonic_platform_base + # is fully mocked + import sonic_platform_base.sonic_sfp.sfputilhelper + asic_index = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper().get_asic_id_for_logical_port(port) + if asic_index is None: + click.echo("Got invalid asic index for port {}, cant retrieve mux cable table entries".format(port)) + return mux_info_dict + + + mux_info_full_dict[asic_index] = state_db[asic_index].get_all( + state_db[asic_index].STATE_DB, 'MUX_CABLE_INFO|{}'.format(port)) + trans_info_full_dict[asic_index] = state_db[asic_index].get_all( + state_db[asic_index].STATE_DB, 'TRANSCEIVER_STATUS|{}'.format(port)) + + res_dir = {} + res_dir = mux_info_full_dict[asic_index] + mux_info_dict["self_mux_direction"] = res_dir.get("self_mux_direction", None) + mux_info_dict["peer_mux_direction"] = res_dir.get("peer_mux_direction", None) + mux_info_dict["grpc_connection_status"] = res_dir.get("grpc_connection_status", None) + + trans_dir = {} + trans_dir = trans_info_full_dict[asic_index] + + status = trans_dir.get("status", "0") + presence = "True" if status == "1" else "False" + + mux_info_dict["presence"] = presence + + mux_info_dict["rc"] = True + + return mux_info_dict + + +@muxcable.group(cls=clicommon.AbbreviationGroup) +def grpc(): + """Shows the muxcable hardware information directly""" + pass + + +@grpc.command() +@click.argument('port', metavar='', required=False, default=None) +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format") +@clicommon.pass_db +def muxdirection(db, port, json_output): + """Shows the current direction of the FPGA facing port on Tx Side {active/standy}""" + + port = platform_sfputil_helper.get_interface_name(port, db) + + + if port is not None: + + if check_port_in_mux_cable_table(port) == False: + click.echo("Not Y-cable port") + return CONFIG_FAIL + + if json_output: + result = {} + result ["HWMODE"] = {} + rc = create_active_active_mux_direction_json_result(result, port, db) + click.echo("{}".format(json.dumps(result, indent=4))) + + else: + body = [] + + headers = ['Port', 'Direction', 'Presence', 'PeerDirection', 'ConnectivityState'] + rc = create_active_active_mux_direction_result(body, port, db) + click.echo(tabulate(body, headers=headers)) + + return rc + + else: + + + logical_port_list = platform_sfputil_helper.get_logical_list() + + rc_exit = True + body = [] + if json_output: + result = {} + result ["HWMODE"] = {} + + for port in natsorted(logical_port_list): + + if platform_sfputil is not None: + physical_port_list = platform_sfputil_helper.logical_port_name_to_physical_port_list(port) + + if not isinstance(physical_port_list, list): + continue + if len(physical_port_list) != 1: + continue + + if not check_port_in_mux_cable_table(port): + continue + + physical_port = physical_port_list[0] + logical_port_list_for_physical_port = platform_sfputil_helper.get_physical_to_logical() + + logical_port_list_per_port = logical_port_list_for_physical_port.get(physical_port, None) + + """ This check is required for checking whether or not this logical port is the one which is + actually mapped to physical port and by convention it is always the first port. + TODO: this should be removed with more logic to check which logical port maps to actual physical port + being used""" + + if port != logical_port_list_per_port[0]: + continue + + if json_output: + rc = create_active_active_mux_direction_json_result(result, port, db) + else: + rc = create_active_active_mux_direction_result(body, port, db) + + if rc != True: + rc_exit = False + + if json_output: + click.echo("{}".format(json.dumps(result, indent=4))) + else: + headers = ['Port', 'Direction', 'Presence', 'PeerDirection', 'ConnectivityState'] + + click.echo(tabulate(body, headers=headers)) + + if rc_exit == False: + sys.exit(EXIT_FAIL) + +@muxcable.command() +@click.argument('port', metavar='', required=True, default=None) +@click.argument('option', required=False, default=None) +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format") +@clicommon.pass_db +def queueinfo(db, port, option, json_output): + """Show muxcable queue info information, preagreed by vendors""" + + port = platform_sfputil_helper.get_interface_name(port, db) + + if port is not None: + + res_dict = {} + result = {} + param_dict = {} + param_dict["option"] = option + + + res_dict[0] = CONFIG_FAIL + res_dict[1] = "unknown" + + res_dict = update_and_get_response_for_xcvr_cmd( + "get_ber", "status", "True", XCVRD_GET_BER_CMD_TABLE, XCVRD_GET_BER_CMD_ARG_TABLE, XCVRD_GET_BER_RSP_TABLE, XCVRD_GET_BER_RES_TABLE, port, 100, param_dict, "queue_info") + + if res_dict[1] == "True": + result = get_result(port, res_dict, "fec" , result, XCVRD_GET_BER_RES_TABLE) + + + port = platform_sfputil_helper.get_interface_alias(port, db) + + if json_output: + click.echo("{}".format(json.dumps(result, indent=4))) + else: + headers = ['PORT', 'ATTR', 'VALUE'] + res = [[port]+[key] + [val] for key, val in result.items()] + click.echo(tabulate(res, headers=headers)) + else: + click.echo("Did not get a valid Port for queue info information".format(port)) + sys.exit(CONFIG_FAIL) + + +@muxcable.command() +@click.argument('port', metavar='', required=True, default=None) +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format") +@clicommon.pass_db +def health(db, port, json_output): + """Show muxcable health information as Ok or Not Ok""" + + """ + in order to detemine whether the health of the cable is Ok + the following are checked + - the vendor name is correct able to be read + - the FW is correctly loaded for SerDes by reading the appropriate register val + - the Counters for UART are displaying healthy status + i.e Error Counters , retry Counters for UART or internal xfer protocols are below a threshold + """ + + port = platform_sfputil_helper.get_interface_name(port, db) + + if port is not None: + + res_dict = {} + result = {} + + + res_dict[0] = CONFIG_FAIL + res_dict[1] = "unknown" + + res_dict = update_and_get_response_for_xcvr_cmd( + "get_ber", "status", "True", XCVRD_GET_BER_CMD_TABLE, None, XCVRD_GET_BER_RSP_TABLE, XCVRD_GET_BER_RES_TABLE, port, 10, None, "health_check") + + if res_dict[1] == "True": + result = get_result(port, res_dict, "fec" , result, XCVRD_GET_BER_RES_TABLE) + + + + port = platform_sfputil_helper.get_interface_alias(port, db) + + cable_health = result.get("health_check", None) + + if cable_health == "False": + result["health_check"] = "Not Ok" + elif cable_health == "True": + result["health_check"] = "Ok" + else: + result["health_check"] = "Unknown" + + + + if json_output: + click.echo("{}".format(json.dumps(result, indent=4))) + else: + headers = ['PORT', 'ATTR', 'HEALTH'] + res = [[port]+[key] + [val] for key, val in result.items()] + click.echo(tabulate(res, headers=headers)) + else: + click.echo("Did not get a valid Port for cable health status".format(port)) + sys.exit(CONFIG_FAIL) + +@muxcable.command() +@click.argument('port', metavar='', required=True, default=None) +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format") +@clicommon.pass_db +def resetcause(db, port, json_output): + """Show muxcable resetcause information """ + + port = platform_sfputil_helper.get_interface_name(port, db) + + """ + the reset cause only records NIC MCU reset status. The NIC MCU will automatically broadcast the reset cause status to each TORs, corresponding values returned + return 0 if the last reset is cold reset (ex. HW/SW reset, power reset the cable, or reboot the NIC server) + return 1 if the last reset is warm reset (ex. sudo config mux firmware activate....) + the value is persistent, no clear on read + """ + if port is not None: + + res_dict = {} + result = {} + + + res_dict[0] = CONFIG_FAIL + res_dict[1] = "unknown" + + res_dict = update_and_get_response_for_xcvr_cmd( + "get_ber", "status", "True", XCVRD_GET_BER_CMD_TABLE, None, XCVRD_GET_BER_RSP_TABLE, XCVRD_GET_BER_RES_TABLE, port, 10, None, "reset_cause") + + if res_dict[1] == "True": + result = get_result(port, res_dict, "fec" , result, XCVRD_GET_BER_RES_TABLE) + + + + port = platform_sfputil_helper.get_interface_alias(port, db) + + reset_cause = result.get("reset_cause", None) + + if reset_cause == "0": + result["reset_cause"] = "cold reset" + elif reset_cause == "1": + result["reset_cause"] = "warm reset" + else: + result["reset_cause"] = "Unknown" + + if json_output: + click.echo("{}".format(json.dumps(result, indent=4))) + else: + headers = ['PORT', 'ATTR', 'RESETCAUSE'] + res = [[port]+[key] + [val] for key, val in result.items()] + click.echo(tabulate(res, headers=headers)) + else: + click.echo("Did not get a valid Port for cable resetcause information".format(port)) + sys.exit(CONFIG_FAIL) + +@muxcable.command() +@click.argument('port', metavar='', required=True, default=None) +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format") +@clicommon.pass_db +def operationtime(db, port, json_output): + """Show muxcable operation time hh:mm:ss forrmat""" + + port = platform_sfputil_helper.get_interface_name(port, db) + + if port is not None: + + res_dict = {} + result = {} + + + res_dict[0] = CONFIG_FAIL + res_dict[1] = "unknown" + + res_dict = update_and_get_response_for_xcvr_cmd( + "get_ber", "status", "True", XCVRD_GET_BER_CMD_TABLE, None, XCVRD_GET_BER_RSP_TABLE, XCVRD_GET_BER_RES_TABLE, port, 10, None, "operation_time") + + if res_dict[1] == "True": + result = get_result(port, res_dict, "fec" , result, XCVRD_GET_BER_RES_TABLE) + + + + port = platform_sfputil_helper.get_interface_alias(port, db) + + actual_time = result.get("operation_time", 0) + if actual_time is not None: + time = '{0:02.0f}:{1:02.0f}'.format(*divmod(int(actual_time) * 60, 60)) + result['operation_time'] = time + + if json_output: + click.echo("{}".format(json.dumps(result, indent=4))) + else: + headers = ['PORT', 'ATTR', 'OPERATION_TIME'] + res = [[port]+[key] + [val] for key, val in result.items()] + click.echo(tabulate(res, headers=headers)) + else: + click.echo("Did not get a valid Port for operation time".format(port)) + sys.exit(CONFIG_FAIL) diff --git a/show/vnet.py b/show/vnet.py index ba6f81ce8d..239e6d2206 100644 --- a/show/vnet.py +++ b/show/vnet.py @@ -333,6 +333,29 @@ def routes(): """Show vnet routes related information""" pass +def pretty_print(table, r, epval, mac_addr, vni, state): + endpoints = epval.split(',') + row_width = 3 + max_len = 0 + for ep in endpoints: + max_len = len(ep) if len(ep) > max_len else max_len + if max_len > 15: + row_width = 2 + iter = 0 + while iter < len(endpoints): + if iter +row_width > len(endpoints): + r.append(",".join(endpoints[iter:])) + else: + r.append(",".join(endpoints[iter:iter + row_width])) + if iter == 0: + r.append(mac_addr) + r.append(vni) + r.append(state) + else: + r.extend(["", "", ""]) + iter += row_width + table.append(r) + r = ["",""] @routes.command() def all(): @@ -373,12 +396,17 @@ def all(): state_db_key = '|'.join(k.split(":",2)) val = appl_db.get_all(appl_db.APPL_DB, k) val_state = state_db.get_all(state_db.STATE_DB, state_db_key) - r.append(val.get('endpoint')) - r.append(val.get('mac_address')) - r.append(val.get('vni')) - if val_state: - r.append(val_state.get('state')) - table.append(r) + epval = val.get('endpoint') + if len(epval) < 40: + r.append(epval) + r.append(val.get('mac_address')) + r.append(val.get('vni')) + if val_state: + r.append(val_state.get('state')) + table.append(r) + continue + state = val_state.get('state') if val_state else "" + pretty_print(table, r, epval, val.get('mac_address'), val.get('vni'), state ) click.echo(tabulate(table, header)) diff --git a/sonic-utilities-data/templates/service_mgmt.sh.j2 b/sonic-utilities-data/templates/service_mgmt.sh.j2 index d206049015..5c8f4e4974 100644 --- a/sonic-utilities-data/templates/service_mgmt.sh.j2 +++ b/sonic-utilities-data/templates/service_mgmt.sh.j2 @@ -51,7 +51,8 @@ function check_warm_boot() function check_fast_boot() { - if [[ $($SONIC_DB_CLI STATE_DB GET "FAST_REBOOT|system") == "1" ]]; then + SYSTEM_FAST_REBOOT=`$SONIC_DB_CLI STATE_DB hget "FAST_RESTART_ENABLE_TABLE|system" enable` + if [[ x"${SYSTEM_FAST_REBOOT}" == x"true" ]]; then FAST_BOOT="true" else FAST_BOOT="false" diff --git a/sonic_installer/bootloader/grub.py b/sonic_installer/bootloader/grub.py index dcafc3f840..7ab5c6c0bc 100644 --- a/sonic_installer/bootloader/grub.py +++ b/sonic_installer/bootloader/grub.py @@ -153,17 +153,6 @@ def verify_image_platform(self, image_path): # Check if platform is inside image's target platforms return self.platform_in_platforms_asic(platform, image_path) - def verify_image_sign(self, image_path): - click.echo('Verifying image signature') - verification_script_name = 'verify_image_sign.sh' - script_path = os.path.join('/usr', 'local', 'bin', verification_script_name) - if not os.path.exists(script_path): - click.echo("Unable to find verification script in path " + script_path) - return False - verification_result = subprocess.run([script_path, image_path], capture_output=True) - click.echo(str(verification_result.stdout) + " " + str(verification_result.stderr)) - return verification_result.returncode == 0 - @classmethod def detect(cls): return os.path.isfile(os.path.join(HOST_PATH, 'grub/grub.cfg')) diff --git a/sonic_installer/main.py b/sonic_installer/main.py index d78259317e..ce1c15866d 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -511,8 +511,7 @@ def sonic_installer(): @click.option('-y', '--yes', is_flag=True, callback=abort_if_false, expose_value=False, prompt='New image will be installed, continue?') @click.option('-f', '--force', '--skip-secure-check', is_flag=True, - help="Force installation of an image of a non-secure type than secure running " + - " image, this flag does not affect secure upgrade image verification") + help="Force installation of an image of a non-secure type than secure running image") @click.option('--skip-platform-check', is_flag=True, help="Force installation of an image of a type which is not of the same platform") @click.option('--skip_migration', is_flag=True, @@ -577,14 +576,6 @@ def install(url, force, skip_platform_check=False, skip_migration=False, skip_pa "Aborting...", LOG_ERR) raise click.Abort() - # Calling verification script by default - signature will be checked if enabled in bios - echo_and_log("Verifing image {} signature...".format(binary_image_version)) - if not bootloader.verify_image_sign(image_path): - echo_and_log('Error: Failed verify image signature', LOG_ERR) - raise click.Abort() - else: - echo_and_log('Verification successful') - echo_and_log("Installing image {} and setting it as default...".format(binary_image_version)) with SWAPAllocator(not skip_setup_swap, swap_mem_size, total_mem_threshold, available_mem_threshold): bootloader.install_image(image_path) @@ -967,6 +958,5 @@ def verify_next_image(): sys.exit(1) click.echo('Image successfully verified') - if __name__ == '__main__': sonic_installer() diff --git a/sonic_package_manager/service_creator/feature.py b/sonic_package_manager/service_creator/feature.py index 90378d378f..43b6c309fe 100644 --- a/sonic_package_manager/service_creator/feature.py +++ b/sonic_package_manager/service_creator/feature.py @@ -105,7 +105,7 @@ def update(self, old_manifest: Manifest, new_manifest: Manifest): """ Migrate feature configuration. It can be that non-configurable - feature entries have to be updated. e.g: "has_timer" for example if + feature entries have to be updated. e.g: "delayed" for example if the new feature introduces a service timer or name of the service has changed, but user configurable entries are not changed). @@ -227,12 +227,12 @@ def get_default_feature_entries(state=None, owner=None) -> Dict[str, str]: @staticmethod def get_non_configurable_feature_entries(manifest) -> Dict[str, str]: - """ Get non-configurable feature table entries: e.g. 'has_timer' """ + """ Get non-configurable feature table entries: e.g. 'delayed' """ return { 'has_per_asic_scope': str(manifest['service']['asic-service']), 'has_global_scope': str(manifest['service']['host-service']), - 'has_timer': str(manifest['service']['delayed']), + 'delayed': str(manifest['service']['delayed']), 'check_up_status': str(manifest['service']['check_up_status']), 'support_syslog_rate_limit': str(manifest['service']['syslog']['support-rate-limit']), } diff --git a/tests/aclshow_test.py b/tests/aclshow_test.py index 90fe46f683..0abe509aad 100644 --- a/tests/aclshow_test.py +++ b/tests/aclshow_test.py @@ -46,6 +46,7 @@ RULE_9 DATAACL 9991 901 900 RULE_10 DATAACL 9989 1001 1000 DEFAULT_RULE DATAACL 1 2 1 +RULE_1 DATAACL_5 9999 N/A N/A RULE_NO_COUNTER DATAACL_NO_COUNTER 9995 N/A N/A RULE_6 EVERFLOW 9994 601 600 RULE_08 EVERFLOW 9992 0 0 @@ -89,8 +90,8 @@ # Expected output for aclshow -r RULE_4,RULE_6 -vv rule4_rule6_verbose_output = '' + \ """Reading ACL info... -Total number of ACL Tables: 11 -Total number of ACL Rules: 20 +Total number of ACL Tables: 12 +Total number of ACL Rules: 21 RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT ----------- ------------ ------ --------------- ------------- @@ -136,6 +137,7 @@ RULE_9 DATAACL 9991 0 0 RULE_10 DATAACL 9989 0 0 DEFAULT_RULE DATAACL 1 0 0 +RULE_1 DATAACL_5 9999 N/A N/A RULE_NO_COUNTER DATAACL_NO_COUNTER 9995 N/A N/A RULE_6 EVERFLOW 9994 0 0 RULE_08 EVERFLOW 9992 0 0 @@ -161,6 +163,7 @@ RULE_9 DATAACL 9991 0 0 RULE_10 DATAACL 9989 0 0 DEFAULT_RULE DATAACL 1 0 0 +RULE_1 DATAACL_5 9999 N/A N/A RULE_NO_COUNTER DATAACL_NO_COUNTER 9995 100 100 RULE_6 EVERFLOW 9994 0 0 RULE_08 EVERFLOW 9992 0 0 diff --git a/tests/config_mirror_session_test.py b/tests/config_mirror_session_test.py index 5585cab87a..ccbc196b50 100644 --- a/tests/config_mirror_session_test.py +++ b/tests/config_mirror_session_test.py @@ -1,7 +1,11 @@ import pytest import config.main as config +import jsonpatch from unittest import mock from click.testing import CliRunner +from mock import patch +from jsonpatch import JsonPatchConflict +from sonic_py_common import multi_asic ERR_MSG_IP_FAILURE = "does not appear to be an IPv4 or IPv6 network" ERR_MSG_IP_VERSION_FAILURE = "not a valid IPv4 address" @@ -172,7 +176,34 @@ def test_mirror_session_erspan_add(): mocked.assert_called_with("test_session", "100.1.1.1", "2.2.2.2", 8, 63, 0, 0, None, None, None) +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) +def test_mirror_session_erspan_add_invalid_yang_validation(): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["erspan"].commands["add"], + ["test_session", "100.1.1.1", "2.2.2.2", "8", "63", "10", "100"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + +@patch("config.main.ConfigDBConnector", spec=True, connect=mock.Mock()) +@patch("config.main.multi_asic.get_all_namespaces", mock.Mock(return_value={'front_ns': 'sample_ns'})) +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) +def test_mirror_session_erspan_add_multi_asic_invalid_yang_validation(mock_db_connector): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["erspan"].commands["add"], + ["test_session", "100.1.1.1", "2.2.2.2", "8", "63", "10", "100"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + def test_mirror_session_span_add(): + config.ADHOC_VALIDATION = True runner = CliRunner() # Verify invalid queue @@ -273,3 +304,54 @@ def test_mirror_session_span_add(): mocked.assert_called_with("test_session", "Ethernet0", "Ethernet4", "rx", 0, None) + +@patch("config.main.ConfigDBConnector", spec=True, connect=mock.Mock()) +@patch("config.main.multi_asic.get_all_namespaces", mock.Mock(return_value={'front_ns': 'sample_ns'})) +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) +def test_mirror_session_span_add_multi_asic_invalid_yang_validation(mock_db_connector): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethernet0", "Ethernet4", "rx", "0"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) +def test_mirror_session_span_add_invalid_yang_validation(): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethernet0", "Ethernet4", "rx", "0"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + +@patch("config.main.multi_asic.get_all_namespaces", mock.Mock(return_value={'front_ns': 'sample_ns'})) +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.main.ConfigDBConnector", spec=True, connect=mock.Mock()) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) +def test_mirror_session_remove_multi_asic_invalid_yang_validation(mock_db_connector): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["remove"], + ["mrr_sample"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) +def test_mirror_session_remove_invalid_yang_validation(): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["remove"], + ["mrr_sample"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output diff --git a/tests/config_snmp_test.py b/tests/config_snmp_test.py index 096f21cb80..76f5675690 100644 --- a/tests/config_snmp_test.py +++ b/tests/config_snmp_test.py @@ -118,6 +118,7 @@ def setup_class(cls): # Add snmp community tests def test_config_snmp_community_add_new_community_ro(self): + config.ADHOC_VALIDATION = True db = Db() runner = CliRunner() with mock.patch('utilities_common.cli.run_command') as mock_run_command: diff --git a/tests/config_test.py b/tests/config_test.py index 5fa50abd00..e1e3037fe9 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -8,6 +8,7 @@ import unittest import ipaddress from unittest import mock +from jsonpatch import JsonPatchConflict import click from click.testing import CliRunner @@ -114,10 +115,6 @@ Reloading Monit configuration ... """ -reload_config_with_untriggered_timer_output="""\ -Relevant services are not up. Retry later or use -f to avoid system checks -""" - def mock_run_command_side_effect(*args, **kwargs): command = args[0] @@ -154,41 +151,6 @@ def mock_run_command_side_effect_disabled_timer(*args, **kwargs): else: return '', 0 -def mock_run_command_side_effect_untriggered_timer(*args, **kwargs): - command = args[0] - - if kwargs.get('display_cmd'): - click.echo(click.style("Running command: ", fg='cyan') + click.style(command, fg='green')) - - if kwargs.get('return_cmd'): - if command == "systemctl list-dependencies --plain sonic-delayed.target | sed '1d'": - return 'snmp.timer', 0 - elif command == "systemctl list-dependencies --plain sonic.target | sed '1d'": - return 'swss', 0 - elif command == "systemctl is-enabled snmp.timer": - return 'enabled', 0 - elif command == "systemctl show snmp.timer --property=LastTriggerUSecMonotonic --value": - return '0', 0 - else: - return '', 0 - -def mock_run_command_side_effect_gnmi(*args, **kwargs): - command = args[0] - - if kwargs.get('display_cmd'): - click.echo(click.style("Running command: ", fg='cyan') + click.style(command, fg='green')) - - if kwargs.get('return_cmd'): - if command == "systemctl list-dependencies --plain sonic-delayed.target | sed '1d'": - return 'gnmi.timer', 0 - elif command == "systemctl list-dependencies --plain sonic.target | sed '1d'": - return 'swss', 0 - elif command == "systemctl is-enabled gnmi.timer": - return 'enabled', 0 - else: - return '', 0 - - # Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. sonic_cfggen = load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') @@ -234,32 +196,6 @@ def test_config_reload(self, get_cmd_module, setup_single_broadcom_asic): assert "\n".join([l.rstrip() for l in result.output.split('\n')][:1]) == reload_config_with_sys_info_command_output - def test_config_reload_untriggered_timer(self, get_cmd_module, setup_single_broadcom_asic): - with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect_untriggered_timer)) as mock_run_command: - (config, show) = get_cmd_module - - jsonfile_config = os.path.join(mock_db_path, "config_db.json") - jsonfile_init_cfg = os.path.join(mock_db_path, "init_cfg.json") - - # create object - config.INIT_CFG_FILE = jsonfile_init_cfg - config.DEFAULT_CONFIG_DB_FILE = jsonfile_config - - db = Db() - runner = CliRunner() - obj = {'config_db': db.cfgdb} - - # simulate 'config reload' to provoke load_sys_info option - result = runner.invoke(config.config.commands["reload"], ["-l", "-y"], obj=obj) - - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - - assert result.exit_code == 1 - - assert "\n".join([l.rstrip() for l in result.output.split('\n')][:2]) == reload_config_with_untriggered_timer_output - @classmethod def teardown_class(cls): print("TEARDOWN") @@ -292,25 +228,7 @@ def test_load_minigraph(self, get_cmd_module, setup_single_broadcom_asic): assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_command_output # Verify "systemctl reset-failed" is called for services under sonic.target mock_run_command.assert_any_call('systemctl reset-failed swss') - # Verify "systemctl reset-failed" is called for services under sonic-delayed.target - mock_run_command.assert_any_call('systemctl reset-failed snmp') - assert mock_run_command.call_count == 11 - - def test_load_minigraph_with_gnmi_timer(self, get_cmd_module, setup_single_broadcom_asic): - with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect_gnmi)) as mock_run_command: - (config, show) = get_cmd_module - runner = CliRunner() - result = runner.invoke(config.config.commands["load_minigraph"], ["-y"]) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_command_output - # Verify "systemctl reset-failed" is called for services under sonic.target - mock_run_command.assert_any_call('systemctl reset-failed swss') - # Verify "systemctl reset-failed" is called for services under sonic-delayed.target - mock_run_command.assert_any_call('systemctl reset-failed gnmi') - assert mock_run_command.call_count == 11 + assert mock_run_command.call_count == 8 def test_load_minigraph_with_port_config_bad_format(self, get_cmd_module, setup_single_broadcom_asic): with mock.patch( @@ -354,49 +272,6 @@ def test_load_minigraph_with_port_config(self, get_cmd_module, setup_single_broa port_config = [{"PORT": {"Ethernet0": {"admin_status": "up"}}}] self.check_port_config(db, config, port_config, "config interface startup Ethernet0") - def test_load_backend_acl(self, get_cmd_module, setup_single_broadcom_asic): - db = Db() - db.cfgdb.set_entry("DEVICE_METADATA", "localhost", {"storage_device": "true"}) - self.check_backend_acl(get_cmd_module, db, device_type='BackEndToRRouter', condition=True) - - def test_load_backend_acl_not_storage(self, get_cmd_module, setup_single_broadcom_asic): - db = Db() - self.check_backend_acl(get_cmd_module, db, device_type='BackEndToRRouter', condition=False) - - def test_load_backend_acl_storage_leaf(self, get_cmd_module, setup_single_broadcom_asic): - db = Db() - db.cfgdb.set_entry("DEVICE_METADATA", "localhost", {"storage_device": "true"}) - self.check_backend_acl(get_cmd_module, db, device_type='BackEndLeafRouter', condition=False) - - def test_load_backend_acl_storage_no_dataacl(self, get_cmd_module, setup_single_broadcom_asic): - db = Db() - db.cfgdb.set_entry("DEVICE_METADATA", "localhost", {"storage_device": "true"}) - db.cfgdb.set_entry("ACL_TABLE", "DATAACL", None) - self.check_backend_acl(get_cmd_module, db, device_type='BackEndToRRouter', condition=False) - - def check_backend_acl(self, get_cmd_module, db, device_type='BackEndToRRouter', condition=True): - def is_file_side_effect(filename): - return True if 'backend_acl' in filename else False - with mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): - with mock.patch('config.main._get_device_type', mock.MagicMock(return_value=device_type)): - with mock.patch( - "utilities_common.cli.run_command", - mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: - (config, show) = get_cmd_module - runner = CliRunner() - result = runner.invoke(config.config.commands["load_minigraph"], ["-y"], obj=db) - print(result.exit_code) - expected_output = ['Running command: acl-loader update incremental /etc/sonic/backend_acl.json', - 'Running command: /usr/local/bin/sonic-cfggen -d -t /usr/share/sonic/templates/backend_acl.j2,/etc/sonic/backend_acl.json' - ] - print(result.output) - assert result.exit_code == 0 - output = result.output.split('\n') - if condition: - assert set(expected_output).issubset(set(output)) - else: - assert not(set(expected_output).issubset(set(output))) - def check_port_config(self, db, config, port_config, expected_output): def read_json_file_side_effect(filename): return port_config @@ -693,7 +568,7 @@ def test_qos_wait_until_clear_empty(self): with mock.patch('swsscommon.swsscommon.SonicV2Connector.keys', side_effect=TestConfigQos._keys): TestConfigQos._keys_counter = 1 - empty = _wait_until_clear("BUFFER_POOL_TABLE:*", 0.5,2) + empty = _wait_until_clear(["BUFFER_POOL_TABLE:*"], 0.5,2) assert empty def test_qos_wait_until_clear_not_empty(self): @@ -701,9 +576,15 @@ def test_qos_wait_until_clear_not_empty(self): with mock.patch('swsscommon.swsscommon.SonicV2Connector.keys', side_effect=TestConfigQos._keys): TestConfigQos._keys_counter = 10 - empty = _wait_until_clear("BUFFER_POOL_TABLE:*", 0.5,2) + empty = _wait_until_clear(["BUFFER_POOL_TABLE:*"], 0.5,2) assert not empty + @mock.patch('config.main._wait_until_clear') + def test_qos_clear_no_wait(self, _wait_until_clear): + from config.main import _clear_qos + _clear_qos(True, False) + _wait_until_clear.assert_called_with(['BUFFER_*_TABLE:*', 'BUFFER_*_SET'], interval=0.5, timeout=0, verbose=False) + def test_qos_reload_single( self, get_cmd_module, setup_qos_mock_apis, setup_single_broadcom_asic @@ -1910,3 +1791,64 @@ def test_add_loopback_adhoc_validation(self): @classmethod def teardown_class(cls): print("TEARDOWN") + + +class TestConfigNtp(object): + @classmethod + def setup_class(cls): + print("SETUP") + import config.main + importlib.reload(config.main) + + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + def test_add_ntp_server_failed_yang_validation(self): + config.ADHOC_VALIDATION = False + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + result = runner.invoke(config.config.commands["ntp"], ["add", "10.10.10.x"], obj=obj) + print(result.exit_code) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + def test_add_ntp_server_invalid_ip(self): + config.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + result = runner.invoke(config.config.commands["ntp"], ["add", "10.10.10.x"], obj=obj) + print(result.exit_code) + print(result.output) + assert "Invalid IP address" in result.output + + def test_del_ntp_server_invalid_ip(self): + config.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + result = runner.invoke(config.config.commands["ntp"], ["del", "10.10.10.x"], obj=obj) + print(result.exit_code) + print(result.output) + assert "Invalid IP address" in result.output + + @patch("config.main.ConfigDBConnector.get_table", mock.Mock(return_value="10.10.10.10")) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + def test_del_ntp_server_invalid_ip_yang_validation(self): + config.ADHOC_VALIDATION = False + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + result = runner.invoke(config.config.commands["ntp"], ["del", "10.10.10.10"], obj=obj) + print(result.exit_code) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + @classmethod + def teardown_class(cls): + print("TEARDOWN") diff --git a/tests/conftest.py b/tests/conftest.py index 96b80df3e1..6e70f8c9aa 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -20,6 +20,7 @@ ) from . import config_int_ip_common import utilities_common.constants as constants +import config.main as config test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -169,6 +170,9 @@ def setup_single_bgp_instance(request): elif request.param == 'v6': bgp_mocked_json = os.path.join( test_path, 'mock_tables', 'ipv6_bgp_summary.json') + elif request.param == 'show_run_bgp': + bgp_mocked_json = os.path.join( + test_path, 'mock_tables', 'show_run_bgp.txt') elif request.param == 'ip_route': bgp_mocked_json = 'ip_route.json' elif request.param == 'ip_specific_route': @@ -193,6 +197,13 @@ def mock_show_bgp_summary(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.RV return mock_frr_data return "" + def mock_show_run_bgp(request): + if os.path.isfile(bgp_mocked_json): + with open(bgp_mocked_json) as json_data: + mock_frr_data = json_data.read() + return mock_frr_data + return "" + def mock_run_bgp_command_for_static(vtysh_cmd, bgp_namespace="", vtysh_shell_cmd=constants.RVTYSH_COMMAND): if vtysh_cmd == "show ip route vrf all static": return config_int_ip_common.show_ip_route_with_static_expected_output @@ -239,6 +250,9 @@ def mock_run_bgp_command(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.RVT elif request.param == "show_bgp_summary_no_neigh": bgp_util.run_bgp_command = mock.MagicMock( return_value=mock_show_bgp_summary_no_neigh("", "")) + elif request.param.startswith('show_run_bgp'): + bgp_util.run_bgp_command = mock.MagicMock( + return_value=mock_show_run_bgp(request)) else: bgp_util.run_bgp_command = mock.MagicMock( return_value=mock_show_bgp_summary("", "")) @@ -270,6 +284,10 @@ def setup_multi_asic_bgp_instance(request): m_asic_json_file = 'ip_special_recursive_route.json' elif request.param == 'ip_route_summary': m_asic_json_file = 'ip_route_summary.txt' + elif request.param == 'show_run_bgp': + m_asic_json_file = 'show_run_bgp.txt' + elif request.param == 'show_not_running_bgp': + m_asic_json_file = 'show_not_running_bgp.txt' elif request.param.startswith('bgp_v4_network') or \ request.param.startswith('bgp_v6_network') or \ request.param.startswith('bgp_v4_neighbor') or \ @@ -339,3 +357,17 @@ def setup_fib_commands(): import show.main as show return show + +@pytest.fixture(scope='function') +def mock_restart_dhcp_relay_service(): + print("We are mocking restart dhcp_relay") + origin_funcs = [] + origin_funcs.append(config.vlan.dhcp_relay_util.restart_dhcp_relay_service) + origin_funcs.append(config.vlan.is_dhcp_relay_running) + config.vlan.dhcp_relay_util.restart_dhcp_relay_service = mock.MagicMock(return_value=0) + config.vlan.is_dhcp_relay_running = mock.MagicMock(return_value=True) + + yield + + config.vlan.dhcp_relay_util.restart_dhcp_relay_service = origin_funcs[0] + config.vlan.is_dhcp_relay_running = origin_funcs[1] diff --git a/tests/console_test.py b/tests/console_test.py index 8161eda7dd..528f5f4ba8 100644 --- a/tests/console_test.py +++ b/tests/console_test.py @@ -1,8 +1,10 @@ import os import sys import subprocess +import jsonpatch import pexpect from unittest import mock +from mock import patch import pytest @@ -14,6 +16,7 @@ from utilities_common.db import Db from consutil.lib import * from sonic_py_common import device_info +from jsonpatch import JsonPatchConflict class TestConfigConsoleCommands(object): @classmethod @@ -28,6 +31,16 @@ def test_enable_console_switch(self): print(result.exit_code) print(sys.stderr, result.output) assert result.exit_code == 0 + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_enable_console_switch_yang_validation(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(config.config.commands["console"].commands["enable"]) + print(result.exit_code) + assert "Invalid ConfigDB. Error" in result.output def test_disable_console_switch(self): runner = CliRunner() @@ -38,6 +51,17 @@ def test_disable_console_switch(self): print(sys.stderr, result.output) assert result.exit_code == 0 + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_disable_console_switch_yang_validation(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(config.config.commands["console"].commands["disable"]) + print(result.exit_code) + print(sys.stderr, result.output) + assert "Invalid ConfigDB. Error" in result.output + def test_console_add_exists(self): runner = CliRunner() db = Db() @@ -95,6 +119,18 @@ def test_console_add_success(self): print(sys.stderr, result.output) assert result.exit_code == 0 + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) + def test_console_add_yang_validation(self): + runner = CliRunner() + db = Db() + + # add a console setting without flow control option + result = runner.invoke(config.config.commands["console"].commands["add"], ["0", '--baud', "9600"], obj=db) + print(result.exit_code) + print(sys.stderr, result.output) + assert "Invalid ConfigDB. Error" in result.output + def test_console_del_non_exists(self): runner = CliRunner() db = Db() @@ -117,6 +153,19 @@ def test_console_del_success(self): print(sys.stderr, result.output) assert result.exit_code == 0 + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + def test_console_del_yang_validation(self): + runner = CliRunner() + db = Db() + db.cfgdb.set_entry("CONSOLE_PORT", "1", { "baud_rate" : "9600" }) + + # add a console setting which the port exists + result = runner.invoke(config.config.commands["console"].commands["del"], ["1"], obj=db) + print(result.exit_code) + print(sys.stderr, result.output) + assert "Invalid ConfigDB. Error" in result.output + def test_update_console_remote_device_name_non_exists(self): runner = CliRunner() db = Db() @@ -163,6 +212,19 @@ def test_update_console_remote_device_name_reset(self): print(sys.stderr, result.output) assert result.exit_code == 0 + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_update_console_remote_device_name_reset_yang_validation(self): + runner = CliRunner() + db = Db() + db.cfgdb.set_entry("CONSOLE_PORT", 2, { "remote_device" : "switch1" }) + + # trying to reset a console line remote device configuration which is not exists + result = runner.invoke(config.config.commands["console"].commands["remote_device"], ["2"], obj=db) + print(result.exit_code) + print(sys.stderr, result.output) + assert "Invalid ConfigDB. Error" in result.output + def test_update_console_remote_device_name_success(self): runner = CliRunner() db = Db() @@ -174,6 +236,19 @@ def test_update_console_remote_device_name_success(self): print(sys.stderr, result.output) assert result.exit_code == 0 + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_update_console_remote_device_name_yang_validation(self): + runner = CliRunner() + db = Db() + db.cfgdb.set_entry("CONSOLE_PORT", "1", { "baud_rate" : "9600" }) + + # trying to set a console line remote device configuration + result = runner.invoke(config.config.commands["console"].commands["remote_device"], ["1", "switch1"], obj=db) + print(result.exit_code) + print(sys.stderr, result.output) + assert "Invalid ConfigDB. Error" in result.output + def test_update_console_baud_no_change(self): runner = CliRunner() db = Db() @@ -207,6 +282,19 @@ def test_update_console_baud_success(self): print(sys.stderr, result.output) assert result.exit_code == 0 + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_update_console_baud_yang_validation(self): + runner = CliRunner() + db = Db() + db.cfgdb.set_entry("CONSOLE_PORT", "1", { "baud_rate" : "9600" }) + + # trying to set a console line baud + result = runner.invoke(config.config.commands["console"].commands["baud"], ["1", "115200"], obj=db) + print(result.exit_code) + print(sys.stderr, result.output) + assert "Invalid ConfigDB. Error" in result.output + def test_update_console_flow_control_no_change(self): runner = CliRunner() db = Db() @@ -240,6 +328,19 @@ def test_update_console_flow_control_success(self): print(sys.stderr, result.output) assert result.exit_code == 0 + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_update_console_flow_control_yang_validation(self): + runner = CliRunner() + db = Db() + db.cfgdb.set_entry("CONSOLE_PORT", "1", { "baud_rate" : "9600", "flow_control" : "0" }) + + # trying to set a console line flow control option + result = runner.invoke(config.config.commands["console"].commands["flow_control"], ["enable", "1"], obj=db) + print(result.exit_code) + print(sys.stderr, result.output) + assert "Invalid ConfigDB. Error" in result.output + class TestConsutilLib(object): @classmethod def setup_class(cls): diff --git a/tests/counterpoll_input/config_db.json b/tests/counterpoll_input/config_db.json index 40ff750db6..38cde7c15e 100644 --- a/tests/counterpoll_input/config_db.json +++ b/tests/counterpoll_input/config_db.json @@ -2235,7 +2235,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "True", - "has_timer": "False" + "delayed": "False" }, "pmon": { "has_per_asic_scope": "False", @@ -2243,7 +2243,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "True", - "has_timer": "False" + "delayed": "False" }, "sflow": { "has_per_asic_scope": "False", @@ -2251,7 +2251,7 @@ "auto_restart": "enabled", "state": "disabled", "has_global_scope": "True", - "has_timer": "False" + "delayed": "False" }, "database": { "has_per_asic_scope": "True", @@ -2259,7 +2259,7 @@ "auto_restart": "disabled", "state": "enabled", "has_global_scope": "True", - "has_timer": "False" + "delayed": "False" }, "telemetry": { "has_per_asic_scope": "False", @@ -2268,7 +2268,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "True", - "has_timer": "True" + "delayed": "True" }, "snmp": { "has_per_asic_scope": "False", @@ -2276,7 +2276,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "True", - "has_timer": "True" + "delayed": "True" }, "bgp": { "has_per_asic_scope": "True", @@ -2284,7 +2284,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "False", - "has_timer": "False" + "delayed": "False" }, "radv": { "has_per_asic_scope": "False", @@ -2292,7 +2292,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "True", - "has_timer": "False" + "delayed": "False" }, "mgmt-framework": { "has_per_asic_scope": "False", @@ -2300,7 +2300,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "True", - "has_timer": "True" + "delayed": "True" }, "nat": { "has_per_asic_scope": "False", @@ -2308,7 +2308,7 @@ "auto_restart": "enabled", "state": "disabled", "has_global_scope": "True", - "has_timer": "False" + "delayed": "False" }, "teamd": { "has_per_asic_scope": "True", @@ -2316,7 +2316,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "False", - "has_timer": "False" + "delayed": "False" }, "dhcp_relay": { "has_per_asic_scope": "False", @@ -2324,7 +2324,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "True", - "has_timer": "False" + "delayed": "False" }, "swss": { "has_per_asic_scope": "True", @@ -2332,7 +2332,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "False", - "has_timer": "False" + "delayed": "False" }, "syncd": { "has_per_asic_scope": "True", @@ -2340,7 +2340,7 @@ "auto_restart": "enabled", "state": "enabled", "has_global_scope": "False", - "has_timer": "False" + "delayed": "False" } }, "DSCP_TO_TC_MAP": { @@ -2669,4 +2669,4 @@ "size": "56368" } } -} \ No newline at end of file +} diff --git a/tests/db_migrator_input/appl_db/routes_migrate_expected.json b/tests/db_migrator_input/appl_db/routes_migrate_expected.json new file mode 100644 index 0000000000..5cad371c31 --- /dev/null +++ b/tests/db_migrator_input/appl_db/routes_migrate_expected.json @@ -0,0 +1,12 @@ +{ + "ROUTE_TABLE:192.168.104.0/25": { + "nexthop": "10.0.0.57,10.0.0.59,10.0.0.61,10.0.0.63", + "ifname" : "PortChannel101,PortChannel102,PortChannel103,PortChannel104", + "weight": "" + }, + "ROUTE_TABLE:20c0:fe28:0:80::/64": { + "nexthop": "fc00::72,fc00::76,fc00::7a,fc00::7e", + "ifname" : "PortChannel101,PortChannel102,PortChannel103,PortChannel104", + "weight": "" + } +} diff --git a/tests/db_migrator_input/appl_db/routes_migrate_input.json b/tests/db_migrator_input/appl_db/routes_migrate_input.json new file mode 100644 index 0000000000..7249488cd6 --- /dev/null +++ b/tests/db_migrator_input/appl_db/routes_migrate_input.json @@ -0,0 +1,10 @@ +{ + "ROUTE_TABLE:192.168.104.0/25": { + "nexthop": "10.0.0.57,10.0.0.59,10.0.0.61,10.0.0.63", + "ifname" : "PortChannel101,PortChannel102,PortChannel103,PortChannel104" + }, + "ROUTE_TABLE:20c0:fe28:0:80::/64": { + "nexthop": "fc00::72,fc00::76,fc00::7a,fc00::7e", + "ifname" : "PortChannel101,PortChannel102,PortChannel103,PortChannel104" + } +} diff --git a/tests/db_migrator_input/config_db/feature-expected.json b/tests/db_migrator_input/config_db/feature-expected.json index 92653771fc..baf051a8bd 100644 --- a/tests/db_migrator_input/config_db/feature-expected.json +++ b/tests/db_migrator_input/config_db/feature-expected.json @@ -3,7 +3,7 @@ "auto_restart": "disabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" }, @@ -11,7 +11,7 @@ "auto_restart": "enabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" }, @@ -19,7 +19,7 @@ "auto_restart": "enabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "True", "high_mem_alert": "disabled", "state": "enabled" } diff --git a/tests/db_migrator_input/config_db/feature-input.json b/tests/db_migrator_input/config_db/feature-input.json index c6d512dad1..46a6cae613 100644 --- a/tests/db_migrator_input/config_db/feature-input.json +++ b/tests/db_migrator_input/config_db/feature-input.json @@ -8,7 +8,8 @@ "high_mem_alert": "disabled" }, "FEATURE|telemetry": { - "status": "enabled" + "status": "enabled", + "has_timer": "True" }, "FEATURE|syncd": { "state": "enabled" diff --git a/tests/db_migrator_input/config_db/routes_migrate_input.json b/tests/db_migrator_input/config_db/routes_migrate_input.json new file mode 100644 index 0000000000..672268b286 --- /dev/null +++ b/tests/db_migrator_input/config_db/routes_migrate_input.json @@ -0,0 +1,3 @@ +{ + "VERSIONS|DATABASE": {"VERSION": "version_1_0_1"} +} diff --git a/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-input.json b/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-input.json new file mode 100644 index 0000000000..2b24076d8f --- /dev/null +++ b/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-input.json @@ -0,0 +1,123 @@ +{ + "DEVICE_NEIGHBOR_METADATA|ARISTA01T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA02T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA03T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA04T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR|Ethernet0": { + "name": "ARISTA01T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet4": { + "name": "ARISTA02T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet8": { + "name": "ARISTA03T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet12": { + "name": "ARISTA04T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet16": { + "name": "Servers1", + "port": "eth0" + }, + "DEVICE_NEIGHBOR|Ethernet20": { + "name": "Servers2", + "port": "eth0" + }, + "PORT|Ethernet0": { + "admin_status": "up", + "alias": "Ethernet1/1", + "description": "", + "index": "1", + "lanes": "77,78", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet4": { + "admin_status": "up", + "alias": "Ethernet2/1", + "description": "", + "index": "2", + "lanes": "79,80", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet8": { + "admin_status": "up", + "alias": "Ethernet3/1", + "description": "", + "index": "3", + "lanes": "81,82", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet12": { + "admin_status": "up", + "alias": "Ethernet4/1", + "description": "", + "index": "4", + "lanes": "83,84", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet16": { + "admin_status": "up", + "alias": "Ethernet5/1", + "description": "", + "index": "5", + "lanes": "85,86", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet20": { + "admin_status": "up", + "alias": "Ethernet6/1", + "description": "", + "index": "6", + "lanes": "87,88", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "CABLE_LENGTH|AZURE": { + "Ethernet0": "300m", + "Ethernet4": "300m", + "Ethernet8": "300m", + "Ethernet12": "300m", + "Ethernet16": "5m", + "Ethernet20": "5m" + }, + "VERSIONS|DATABASE": { + "VERSION": "version_4_0_1" + } +} diff --git a/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-output.json b/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-output.json new file mode 100644 index 0000000000..16646fc08b --- /dev/null +++ b/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-output.json @@ -0,0 +1,123 @@ +{ + "DEVICE_NEIGHBOR_METADATA|ARISTA01T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA02T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA03T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA04T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR|Ethernet0": { + "name": "ARISTA01T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet4": { + "name": "ARISTA02T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet8": { + "name": "ARISTA03T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet12": { + "name": "ARISTA04T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet16": { + "name": "Servers1", + "port": "eth0" + }, + "DEVICE_NEIGHBOR|Ethernet20": { + "name": "Servers2", + "port": "eth0" + }, + "PORT|Ethernet0": { + "admin_status": "up", + "alias": "Ethernet1/1", + "description": "", + "index": "1", + "lanes": "77,78", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet4": { + "admin_status": "up", + "alias": "Ethernet2/1", + "description": "", + "index": "2", + "lanes": "79,80", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet8": { + "admin_status": "up", + "alias": "Ethernet3/1", + "description": "", + "index": "3", + "lanes": "81,82", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet12": { + "admin_status": "up", + "alias": "Ethernet4/1", + "description": "", + "index": "4", + "lanes": "83,84", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet16": { + "admin_status": "up", + "alias": "Ethernet5/1", + "description": "", + "index": "5", + "lanes": "85,86", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet20": { + "admin_status": "up", + "alias": "Ethernet6/1", + "description": "", + "index": "6", + "lanes": "87,88", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "CABLE_LENGTH|AZURE": { + "Ethernet0": "40m", + "Ethernet4": "40m", + "Ethernet8": "40m", + "Ethernet12": "40m", + "Ethernet16": "5m", + "Ethernet20": "5m" + }, + "VERSIONS|DATABASE": { + "VERSION": "version_4_0_1" + } +} diff --git a/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-same-cable-input.json b/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-same-cable-input.json new file mode 100644 index 0000000000..f36bc7c739 --- /dev/null +++ b/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-same-cable-input.json @@ -0,0 +1,123 @@ +{ + "DEVICE_NEIGHBOR_METADATA|ARISTA01T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA02T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA03T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA04T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR|Ethernet0": { + "name": "ARISTA01T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet4": { + "name": "ARISTA02T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet8": { + "name": "ARISTA03T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet12": { + "name": "ARISTA04T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet16": { + "name": "Servers1", + "port": "eth0" + }, + "DEVICE_NEIGHBOR|Ethernet20": { + "name": "Servers2", + "port": "eth0" + }, + "PORT|Ethernet0": { + "admin_status": "up", + "alias": "Ethernet1/1", + "description": "", + "index": "1", + "lanes": "77,78", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet4": { + "admin_status": "up", + "alias": "Ethernet2/1", + "description": "", + "index": "2", + "lanes": "79,80", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet8": { + "admin_status": "up", + "alias": "Ethernet3/1", + "description": "", + "index": "3", + "lanes": "81,82", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet12": { + "admin_status": "up", + "alias": "Ethernet4/1", + "description": "", + "index": "4", + "lanes": "83,84", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet16": { + "admin_status": "up", + "alias": "Ethernet5/1", + "description": "", + "index": "5", + "lanes": "85,86", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet20": { + "admin_status": "up", + "alias": "Ethernet6/1", + "description": "", + "index": "6", + "lanes": "87,88", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "CABLE_LENGTH|AZURE": { + "Ethernet0": "300m", + "Ethernet4": "300m", + "Ethernet8": "300m", + "Ethernet12": "300m", + "Ethernet16": "300m", + "Ethernet20": "300m" + }, + "VERSIONS|DATABASE": { + "VERSION": "version_4_0_1" + } +} diff --git a/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-same-cable-output.json b/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-same-cable-output.json new file mode 100644 index 0000000000..f36bc7c739 --- /dev/null +++ b/tests/db_migrator_input/config_db/sample-t0-edgezoneagg-config-same-cable-output.json @@ -0,0 +1,123 @@ +{ + "DEVICE_NEIGHBOR_METADATA|ARISTA01T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA02T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA03T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR_METADATA|ARISTA04T1": { + "hwsku": "Arista-VM", + "mgmt_addr": "10.64.247.200", + "type": "EdgeZoneAggregator" + }, + "DEVICE_NEIGHBOR|Ethernet0": { + "name": "ARISTA01T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet4": { + "name": "ARISTA02T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet8": { + "name": "ARISTA03T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet12": { + "name": "ARISTA04T1", + "port": "Ethernet1" + }, + "DEVICE_NEIGHBOR|Ethernet16": { + "name": "Servers1", + "port": "eth0" + }, + "DEVICE_NEIGHBOR|Ethernet20": { + "name": "Servers2", + "port": "eth0" + }, + "PORT|Ethernet0": { + "admin_status": "up", + "alias": "Ethernet1/1", + "description": "", + "index": "1", + "lanes": "77,78", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet4": { + "admin_status": "up", + "alias": "Ethernet2/1", + "description": "", + "index": "2", + "lanes": "79,80", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet8": { + "admin_status": "up", + "alias": "Ethernet3/1", + "description": "", + "index": "3", + "lanes": "81,82", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet12": { + "admin_status": "up", + "alias": "Ethernet4/1", + "description": "", + "index": "4", + "lanes": "83,84", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet16": { + "admin_status": "up", + "alias": "Ethernet5/1", + "description": "", + "index": "5", + "lanes": "85,86", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "PORT|Ethernet20": { + "admin_status": "up", + "alias": "Ethernet6/1", + "description": "", + "index": "6", + "lanes": "87,88", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000", + "tpid": "0x8100" + }, + "CABLE_LENGTH|AZURE": { + "Ethernet0": "300m", + "Ethernet4": "300m", + "Ethernet8": "300m", + "Ethernet12": "300m", + "Ethernet16": "300m", + "Ethernet20": "300m" + }, + "VERSIONS|DATABASE": { + "VERSION": "version_4_0_1" + } +} diff --git a/tests/db_migrator_input/init_cfg.json b/tests/db_migrator_input/init_cfg.json index 634477a4f9..a714b8cdfe 100644 --- a/tests/db_migrator_input/init_cfg.json +++ b/tests/db_migrator_input/init_cfg.json @@ -4,7 +4,7 @@ "auto_restart": "enabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" }, @@ -12,7 +12,7 @@ "auto_restart": "enabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" }, @@ -20,7 +20,7 @@ "auto_restart": "disabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "disabled" } diff --git a/tests/db_migrator_input/loglevel_db/logger_tables_input.json b/tests/db_migrator_input/loglevel_db/logger_tables_input.json index 02377ea0a4..ed1bc8057f 100644 --- a/tests/db_migrator_input/loglevel_db/logger_tables_input.json +++ b/tests/db_migrator_input/loglevel_db/logger_tables_input.json @@ -7,5 +7,8 @@ "LOGLEVEL": "SAI_LOG_LEVEL_NOTICE", "LOGOUTPUT": "SYSLOG" }, - "JINJA2_CACHE": {} -} \ No newline at end of file + "JINJA2_CACHE": {}, + "INVALID:INVALID": { + "invalid": "invalid" + } +} diff --git a/tests/db_migrator_input/state_db/fast_reboot_expected.json b/tests/db_migrator_input/state_db/fast_reboot_expected.json new file mode 100644 index 0000000000..e3a7a5fa14 --- /dev/null +++ b/tests/db_migrator_input/state_db/fast_reboot_expected.json @@ -0,0 +1,5 @@ +{ + "FAST_RESTART_ENABLE_TABLE|system": { + "enable": "false" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/state_db/fast_reboot_input.json b/tests/db_migrator_input/state_db/fast_reboot_input.json new file mode 100644 index 0000000000..7a73a41bfd --- /dev/null +++ b/tests/db_migrator_input/state_db/fast_reboot_input.json @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index 223f5d582e..c06bb11d11 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -409,7 +409,7 @@ def test_global_dscp_to_tc_map_migrator(self): dbmgtr_mlnx.migrate() resulting_table = dbmgtr_mlnx.configDB.get_table('PORT_QOS_MAP') assert resulting_table == {} - + class TestMoveLoggerTablesInWarmUpgrade(object): @classmethod def setup_class(cls): @@ -451,6 +451,38 @@ def test_move_logger_tables_in_warm_upgrade(self): diff = DeepDiff(resulting_table, expected_table, ignore_order=True) assert not diff +class TestFastRebootTableModification(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "2" + + @classmethod + def teardown_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "0" + dbconnector.dedicated_dbs['STATE_DB'] = None + + def mock_dedicated_state_db(self): + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db') + + def test_rename_fast_reboot_table_check_enable(self): + device_info.get_sonic_version_info = get_sonic_version_info_mlnx + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db', 'fast_reboot_input') + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'empty-config-input') + + import db_migrator + dbmgtr = db_migrator.DBMigrator(None) + dbmgtr.migrate() + + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db', 'fast_reboot_expected') + expected_db = SonicV2Connector(host='127.0.0.1') + expected_db.connect(expected_db.STATE_DB) + + resulting_table = dbmgtr.stateDB.get_all(dbmgtr.stateDB.STATE_DB, 'FAST_RESTART_ENABLE_TABLE|system') + expected_table = expected_db.get_all(expected_db.STATE_DB, 'FAST_RESTART_ENABLE_TABLE|system') + + diff = DeepDiff(resulting_table, expected_table, ignore_order=True) + assert not diff + class TestWarmUpgrade_to_2_0_2(object): @classmethod def setup_class(cls): @@ -518,3 +550,76 @@ def test_migrate_loopback_int(self): expected_keys = expected_appl_db.get_all(expected_appl_db.APPL_DB, key) diff = DeepDiff(resulting_keys, expected_keys, ignore_order=True) assert not diff + +class TestWarmUpgrade_without_route_weights(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "2" + + @classmethod + def teardown_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "0" + dbconnector.dedicated_dbs['CONFIG_DB'] = None + dbconnector.dedicated_dbs['APPL_DB'] = None + + def test_migrate_weights_for_nexthops(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'routes_migrate_input') + dbconnector.dedicated_dbs['APPL_DB'] = os.path.join(mock_db_path, 'appl_db', 'routes_migrate_input') + + import db_migrator + dbmgtr = db_migrator.DBMigrator(None) + dbmgtr.migrate() + dbconnector.dedicated_dbs['APPL_DB'] = os.path.join(mock_db_path, 'appl_db', 'routes_migrate_expected') + expected_db = Db() + + # verify migrated appDB + expected_appl_db = SonicV2Connector(host='127.0.0.1') + expected_appl_db.connect(expected_appl_db.APPL_DB) + expected_keys = expected_appl_db.keys(expected_appl_db.APPL_DB, "ROUTE_TABLE:*") + expected_keys.sort() + resulting_keys = dbmgtr.appDB.keys(dbmgtr.appDB.APPL_DB, "ROUTE_TABLE:*") + resulting_keys.sort() + assert expected_keys == resulting_keys + for key in expected_keys: + resulting_keys = dbmgtr.appDB.get_all(dbmgtr.appDB.APPL_DB, key) + expected_keys = expected_appl_db.get_all(expected_appl_db.APPL_DB, key) + diff = DeepDiff(resulting_keys, expected_keys, ignore_order=True) + assert not diff + +class TestWarmUpgrade_T0_EdgeZoneAggregator(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "2" + + @classmethod + def teardown_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "0" + dbconnector.dedicated_dbs['CONFIG_DB'] = None + + def test_warm_upgrade_t0_edgezone_aggregator_diff_cable_length(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'sample-t0-edgezoneagg-config-input') + import db_migrator + dbmgtr = db_migrator.DBMigrator(None) + dbmgtr.migrate() + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'sample-t0-edgezoneagg-config-output') + expected_db = Db() + + resulting_table = dbmgtr.configDB.get_table('CABLE_LENGTH') + expected_table = expected_db.cfgdb.get_table('CABLE_LENGTH') + + diff = DeepDiff(resulting_table, expected_table, ignore_order=True) + assert not diff + + def test_warm_upgrade_t0_edgezone_aggregator_same_cable_length(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'sample-t0-edgezoneagg-config-same-cable-input') + import db_migrator + dbmgtr = db_migrator.DBMigrator(None) + dbmgtr.migrate() + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'sample-t0-edgezoneagg-config-same-cable-output') + expected_db = Db() + + resulting_table = dbmgtr.configDB.get_table('CABLE_LENGTH') + expected_table = expected_db.cfgdb.get_table('CABLE_LENGTH') + + diff = DeepDiff(resulting_table, expected_table, ignore_order=True) + assert not diff diff --git a/tests/debug_test.py b/tests/debug_test.py new file mode 100644 index 0000000000..7ac182f434 --- /dev/null +++ b/tests/debug_test.py @@ -0,0 +1,599 @@ +import click +import pytest +import importlib +from unittest.mock import patch, MagicMock +from click.testing import CliRunner + +class TestDebugFrr(object): + @patch('subprocess.check_output', MagicMock(return_value='FRRouting')) + def setup(self): + print('SETUP') + import debug.main as debug + import undebug.main as undebug + importlib.reload(debug) + importlib.reload(undebug) + + # debug + @patch('debug.main.run_command') + def test_debug_bgp_allow_martians(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['allow-martians']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp allow-martians']) + + @patch('debug.main.run_command') + def test_debug_bgp_as4(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['as4']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp as4']) + + result = runner.invoke(debug.cli.commands['bgp'].commands['as4'], ['segment']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp as4 segment']) + + @patch('debug.main.run_command') + def test_debug_bgp_bestpath(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['bestpath'], ['dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp bestpath dummyprefix']) + + @patch('debug.main.run_command') + def test_debug_bgp_keepalives(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['keepalives']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp keepalives']) + + result = runner.invoke(debug.cli.commands['bgp'].commands['keepalives'], ['dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp keepalives dummyprefix']) + + @patch('debug.main.run_command') + def test_debug_bgp_neighbor_events(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['neighbor-events']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp neighbor-events']) + + result = runner.invoke(debug.cli.commands['bgp'].commands['neighbor-events'], ['dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp neighbor-events dummyprefix']) + + @patch('debug.main.run_command') + def test_debug_bgp_nht(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['nht']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp nht']) + + @patch('debug.main.run_command') + def test_debug_bgp_pbr(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['pbr']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp pbr']) + + result = runner.invoke(debug.cli.commands['bgp'].commands['pbr'], ['error']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp pbr error']) + + @patch('debug.main.run_command') + def test_debug_bgp_update_groups(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['update-groups']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp update-groups']) + + @patch('debug.main.run_command') + def test_debug_bgp_updates(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['updates']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp updates']) + + result = runner.invoke(debug.cli.commands['bgp'].commands['updates'], ['prefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp updates prefix']) + + result = runner.invoke(debug.cli.commands['bgp'].commands['updates'], ['prefix', 'dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp updates prefix dummyprefix']) + + + @patch('debug.main.run_command') + def test_debug_bgp_zebra(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['zebra']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp zebra']) + + result = runner.invoke(debug.cli.commands['bgp'].commands['zebra'], ['dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp zebra prefix dummyprefix']) + + @patch('debug.main.run_command') + def test_debug_zebra_dplane(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['dplane']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra dplane']) + + result = runner.invoke(debug.cli.commands['zebra'].commands['dplane'], ['detailed']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra dplane detailed']) + + @patch('debug.main.run_command') + def test_debug_zebra_events(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['events']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra events']) + + @patch('debug.main.run_command') + def test_debug_zebra_fpm(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['fpm']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra fpm']) + + @patch('debug.main.run_command') + def test_debug_zebra_kernel(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['kernel']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra kernel']) + + @patch('debug.main.run_command') + def test_debug_zebra_nht(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['nht']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra nht']) + + @patch('debug.main.run_command') + def test_debug_zebra_packet(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['packet']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra packet']) + + @patch('debug.main.run_command') + def test_debug_zebra_rib(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['rib']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra rib']) + + result = runner.invoke(debug.cli.commands['zebra'].commands['rib'], ['detailed']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra rib detailed']) + + @patch('debug.main.run_command') + def test_debug_zebra_vxlan(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['vxlan']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra vxlan']) + + # undebug + @patch('undebug.main.run_command') + def test_undebug_bgp_allow_martians(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['allow-martians']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp allow-martians']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_as4(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['as4']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp as4']) + + result = runner.invoke(undebug.cli.commands['bgp'].commands['as4'], ['segment']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp as4 segment']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_bestpath(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['bestpath'], ['dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp bestpath dummyprefix']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_keepalives(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['keepalives']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp keepalives']) + + result = runner.invoke(undebug.cli.commands['bgp'].commands['keepalives'], ['dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp keepalives dummyprefix']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_neighbor_events(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['neighbor-events']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp neighbor-events']) + + result = runner.invoke(undebug.cli.commands['bgp'].commands['neighbor-events'], ['dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp neighbor-events dummyprefix']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_nht(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['nht']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp nht']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_pbr(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['pbr']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp pbr']) + + result = runner.invoke(undebug.cli.commands['bgp'].commands['pbr'], ['error']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp pbr error']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_update_groups(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['update-groups']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp update-groups']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_updates(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['updates']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp updates']) + + result = runner.invoke(undebug.cli.commands['bgp'].commands['updates'], ['prefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp updates prefix']) + + result = runner.invoke(undebug.cli.commands['bgp'].commands['updates'], ['prefix', 'dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp updates prefix dummyprefix']) + + + @patch('undebug.main.run_command') + def test_undebug_bgp_zebra(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['zebra']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp zebra']) + + result = runner.invoke(undebug.cli.commands['bgp'].commands['zebra'], ['dummyprefix']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp zebra prefix dummyprefix']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_dplane(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['dplane']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra dplane']) + + result = runner.invoke(undebug.cli.commands['zebra'].commands['dplane'], ['detailed']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra dplane detailed']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_events(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['events']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra events']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_fpm(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['fpm']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra fpm']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_kernel(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['kernel']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra kernel']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_nht(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['nht']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra nht']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_packet(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['packet']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra packet']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_rib(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['rib']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra rib']) + + result = runner.invoke(undebug.cli.commands['zebra'].commands['rib'], ['detailed']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra rib detailed']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_vxlan(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['vxlan']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra vxlan']) + +class TestDebugQuagga(object): + @patch('subprocess.check_output', MagicMock(return_value='quagga')) + def setup(self): + print('SETUP') + import debug.main as debug + import undebug.main as undebug + importlib.reload(debug) + importlib.reload(undebug) + + # debug + @patch('debug.main.run_command') + def test_debug_bgp(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp']) + + @patch('debug.main.run_command') + def test_debug_bgp_events(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['events']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp events']) + + @patch('debug.main.run_command') + def test_debug_bgp_updates(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['updates']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp updates']) + + @patch('debug.main.run_command') + def test_debug_bgp_as4(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['as4']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp as4']) + + @patch('debug.main.run_command') + def test_debug_bgp_filters(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['filters']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp filters']) + + @patch('debug.main.run_command') + def test_debug_bgp_fsm(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['fsm']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp fsm']) + + @patch('debug.main.run_command') + def test_debug_bgp_keepalives(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['keepalives']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp keepalives']) + + @patch('debug.main.run_command') + def test_debug_bgp_zebra(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['bgp'].commands['zebra']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug bgp zebra']) + + @patch('debug.main.run_command') + def test_debug_zebra_events(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['events']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra events']) + + @patch('debug.main.run_command') + def test_debug_zebra_fpm(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['fpm']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra fpm']) + + @patch('debug.main.run_command') + def test_debug_zebra_kernel(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['kernel']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra kernel']) + + @patch('debug.main.run_command') + def test_debug_zebra_packet(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['packet']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra packet']) + + @patch('debug.main.run_command') + def test_debug_zebra_rib(self, run_command): + import debug.main as debug + runner = CliRunner() + result = runner.invoke(debug.cli.commands['zebra'].commands['rib']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'debug zebra rib']) + + # undebug + @patch('undebug.main.run_command') + def test_undebug_bgp(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_events(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['events']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp events']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_updates(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['updates']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp updates']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_as4(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['as4']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp as4']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_filters(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['filters']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp filters']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_fsm(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['fsm']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp fsm']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_keepalives(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['keepalives']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp keepalives']) + + @patch('undebug.main.run_command') + def test_undebug_bgp_zebra(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['bgp'].commands['zebra']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug bgp zebra']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_events(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['events']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra events']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_fpm(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['fpm']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra fpm']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_kernel(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['kernel']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra kernel']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_packet(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['packet']) + assert result.exit_code == 0 + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra packet']) + + @patch('undebug.main.run_command') + def test_undebug_zebra_rib(self, run_command): + import undebug.main as undebug + runner = CliRunner() + result = runner.invoke(undebug.cli.commands['zebra'].commands['rib']) + assert result.exit_code == 0 + + run_command.assert_called_with(['sudo', 'vtysh', '-c', 'no debug zebra rib']) + diff --git a/tests/fabricstat_test.py b/tests/fabricstat_test.py index fb76bb41d7..7c2174b761 100644 --- a/tests/fabricstat_test.py +++ b/tests/fabricstat_test.py @@ -120,6 +120,36 @@ 7 0 93 up """ +class TestFabricStat(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + def test_single_show_fabric_counters(self): + from .mock_tables import mock_single_asic + import importlib + importlib.reload(mock_single_asic) + from .mock_tables import dbconnector + dbconnector.load_database_config + dbconnector.load_namespace_config() + + return_code, result = get_result_and_return_code('fabricstat') + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == multi_asic_fabric_counters_asic0 + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join( + os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + + class TestMultiAsicFabricStat(object): @classmethod def setup_class(cls): diff --git a/tests/feature_test.py b/tests/feature_test.py index fa5c2870ea..8706e2a92f 100644 --- a/tests/feature_test.py +++ b/tests/feature_test.py @@ -130,6 +130,32 @@ telemetry enabled """ +show_feature_autorestart_missing_output="""\ +Feature AutoRestart +---------- -------------- +bar unknown +bgp enabled +database always_enabled +dhcp_relay enabled +lldp enabled +nat enabled +pmon enabled +radv enabled +restapi enabled +sflow enabled +snmp enabled +swss enabled +syncd enabled +teamd enabled +telemetry enabled +""" + +show_feature_autorestart_bar_missing_output="""\ +Feature AutoRestart +--------- ------------- +bar unknown +""" + show_feature_bgp_autorestart_output="""\ Feature AutoRestart --------- ------------- @@ -277,6 +303,25 @@ def test_show_unknown_autorestart_status(self, get_cmd_module): print(result.output) assert result.exit_code == 1 + def test_show_feature_autorestart_missing(self, get_cmd_module): + (config, show) = get_cmd_module + db = Db() + dbconn = db.db + db.cfgdb.set_entry("FEATURE", "bar", { "state": "enabled" }) + runner = CliRunner() + + result = runner.invoke(show.cli.commands["feature"].commands["autorestart"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_feature_autorestart_missing_output + + result = runner.invoke(show.cli.commands["feature"].commands["autorestart"], ["bar"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_feature_autorestart_bar_missing_output + def test_config_bgp_feature_state(self, get_cmd_module): (config, show) = get_cmd_module db = Db() diff --git a/tests/generic_config_updater/files/feature_patch_application_test_failure.json b/tests/generic_config_updater/files/feature_patch_application_test_failure.json new file mode 100644 index 0000000000..80c523ddfc --- /dev/null +++ b/tests/generic_config_updater/files/feature_patch_application_test_failure.json @@ -0,0 +1,35 @@ +{ + "RDMA_SHARED_POOL_SIZE_CHANGE__FAILURE": { + "desc": "For RDMA shared pool size tuning- adjust both shared pool and headroom pool", + "current_config": { + "BUFFER_POOL": { + "ingress_lossless_pool": { + "xoff": "4194112", + "type": "ingress", + "mode": "dynamic", + "size": "10875072" + }, + "egress_lossless_pool": { + "type": "egress", + "mode": "static", + "size": "15982720" + }, + "egress_lossy_pool": { + "type": "egress", + "mode": "dynamic", + "size": "9243812" + } + } + }, + "patch": [ + { + "op": "replace", + "path": "/BUFFER_POOL/ingress_lossless_pool/xoff", + "value": "invalid_xoff" + } + ], + "expected_error_substrings": [ + "Given patch will produce invalid config" + ] + } +} diff --git a/tests/generic_config_updater/files/feature_patch_application_test_success.json b/tests/generic_config_updater/files/feature_patch_application_test_success.json new file mode 100644 index 0000000000..7ca6cab4bb --- /dev/null +++ b/tests/generic_config_updater/files/feature_patch_application_test_success.json @@ -0,0 +1,62 @@ +{ + "RDMA_SHARED_POOL_SIZE_CHANGE__SUCCESS": { + "desc": "For RDMA shared pool size tuning- adjust both shared pool and headroom pool", + "current_config": { + "BUFFER_POOL": { + "ingress_lossless_pool": { + "xoff": "4194112", + "type": "ingress", + "mode": "dynamic", + "size": "10875072" + }, + "egress_lossless_pool": { + "type": "egress", + "mode": "static", + "size": "15982720" + }, + "egress_lossy_pool": { + "type": "egress", + "mode": "dynamic", + "size": "9243812" + } + } + }, + "patch": [ + { + "op": "replace", + "path": "/BUFFER_POOL/ingress_lossless_pool/xoff", + "value": "2155712" + }, + { + "op": "replace", + "path": "/BUFFER_POOL/ingress_lossless_pool/size", + "value": "12913472" + }, + { + "op": "replace", + "path": "/BUFFER_POOL/egress_lossy_pool/size", + "value": "5200000" + } + ], + "expected_config": { + "BUFFER_POOL": { + "ingress_lossless_pool": { + "xoff": "2155712", + "type": "ingress", + "mode": "dynamic", + "size": "12913472" + }, + "egress_lossless_pool": { + "type": "egress", + "mode": "static", + "size": "15982720" + }, + "egress_lossy_pool": { + "type": "egress", + "mode": "dynamic", + "size": "5200000" + } + } + } + } +} diff --git a/tests/generic_config_updater/gcu_feature_patch_application_test.py b/tests/generic_config_updater/gcu_feature_patch_application_test.py new file mode 100644 index 0000000000..9a52a04732 --- /dev/null +++ b/tests/generic_config_updater/gcu_feature_patch_application_test.py @@ -0,0 +1,117 @@ +import jsonpatch +import unittest +import copy +from unittest.mock import MagicMock, Mock +from mock import patch + +import generic_config_updater.change_applier +import generic_config_updater.patch_sorter as ps +import generic_config_updater.generic_updater as gu +from .gutest_helpers import Files +from generic_config_updater.gu_common import ConfigWrapper, PatchWrapper + +running_config = {} + +def set_entry(config_db, tbl, key, data): + global running_config + if data != None: + if tbl not in running_config: + running_config[tbl] = {} + running_config[tbl][key] = data + else: + assert tbl in running_config + assert key in running_config[tbl] + running_config[tbl].pop(key) + if not running_config[tbl]: + running_config.pop(tbl) + +def get_running_config(): + return running_config + +class TestFeaturePatchApplication(unittest.TestCase): + def setUp(self): + self.config_wrapper = ConfigWrapper() + + def test_feature_patch_application_success(self): + # Format of the JSON file containing the test-cases: + # + # { + # "":{ + # "desc":"", + # "current_config":, + # "patch":, + # "expected_config": + # }, + # . + # . + # . + # } + data = Files.FEATURE_PATCH_APPLICATION_TEST_SUCCESS + + for test_case_name in data: + with self.subTest(name=test_case_name): + self.run_single_success_case_applier(data[test_case_name]) + + def test_feature_patch_application_failure(self): + # Fromat of the JSON file containing the test-cases: + # + # { + # "":{ + # "desc":"", + # "current_config":, + # "patch":, + # "expected_error_substrings": + # }, + # . + # . + # . + # } + data = Files.FEATURE_PATCH_APPLICATION_TEST_FAILURE + + for test_case_name in data: + with self.subTest(name=test_case_name): + self.run_single_failure_case_applier(data[test_case_name]) + + def create_patch_applier(self, config): + global running_config + running_config = copy.deepcopy(config) + config_wrapper = self.config_wrapper + config_wrapper.get_config_db_as_json = MagicMock(side_effect=get_running_config) + change_applier = generic_config_updater.change_applier.ChangeApplier() + change_applier._get_running_config = MagicMock(side_effect=get_running_config) + patch_wrapper = PatchWrapper(config_wrapper) + return gu.PatchApplier(config_wrapper=config_wrapper, patch_wrapper=patch_wrapper, changeapplier=change_applier) + + @patch("generic_config_updater.change_applier.get_config_db") + @patch("generic_config_updater.change_applier.set_config") + def run_single_success_case_applier(self, data, mock_set, mock_db): + current_config = data["current_config"] + mock_set.side_effect = set_entry + expected_config = data["expected_config"] + patch = jsonpatch.JsonPatch(data["patch"]) + patch_applier = self.create_patch_applier(current_config) + patch_applier.apply(patch) + result_config = patch_applier.config_wrapper.get_config_db_as_json() + + self.assertEqual(expected_config, result_config) + + @patch("generic_config_updater.change_applier.get_config_db") + def run_single_failure_case_applier(self, data, mock_db): + current_config = data["current_config"] + patch = jsonpatch.JsonPatch(data["patch"]) + expected_error_substrings = data["expected_error_substrings"] + + try: + patch_applier = self.create_patch_applier(current_config) + patch_applier.apply(patch) + self.fail("An exception was supposed to be thrown") + except Exception as ex: + notfound_substrings = [] + error = str(ex) + + for substring in expected_error_substrings: + if substring not in error: + notfound_substrings.append(substring) + + if notfound_substrings: + self.fail(f"Did not find the expected substrings {notfound_substrings} in the error: '{error}'") diff --git a/tests/generic_config_updater/generic_updater_test.py b/tests/generic_config_updater/generic_updater_test.py index aab2eae275..96c25e3552 100644 --- a/tests/generic_config_updater/generic_updater_test.py +++ b/tests/generic_config_updater/generic_updater_test.py @@ -526,7 +526,7 @@ def setUp(self): def test_apply_patch__creates_applier_and_apply(self): # Arrange patch_applier = Mock() - patch_applier.apply.side_effect = create_side_effect_dict({(str(Files.SINGLE_OPERATION_SONIC_YANG_PATCH),): 0}) + patch_applier.apply.side_effect = create_side_effect_dict({(str(Files.SINGLE_OPERATION_SONIC_YANG_PATCH), "True"): 0}) factory = Mock() factory.create_patch_applier.side_effect = \ @@ -548,7 +548,7 @@ def test_apply_patch__creates_applier_and_apply(self): self.any_ignore_paths) # Assert - patch_applier.apply.assert_has_calls([call(Files.SINGLE_OPERATION_SONIC_YANG_PATCH)]) + patch_applier.apply.assert_has_calls([call(Files.SINGLE_OPERATION_SONIC_YANG_PATCH, True)]) def test_replace__creates_replacer_and_replace(self): # Arrange diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index dc18323661..a319a25ead 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -3,8 +3,10 @@ import jsonpatch import sonic_yang import unittest -from unittest.mock import MagicMock, Mock, patch +import mock +from unittest.mock import MagicMock, Mock +from mock import patch from .gutest_helpers import create_side_effect_dict, Files import generic_config_updater.gu_common as gu_common @@ -69,17 +71,61 @@ def setUp(self): self.config_wrapper_mock = gu_common.ConfigWrapper() self.config_wrapper_mock.get_config_db_as_json=MagicMock(return_value=Files.CONFIG_DB_AS_JSON) - def test_validate_field_operation_legal(self): + @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"asic_type": "mellanox", "build_version": "SONiC.20181131"})) + def test_validate_field_operation_legal__pfcwd(self): old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} target_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "40"}}} config_wrapper = gu_common.ConfigWrapper() config_wrapper.validate_field_operation(old_config, target_config) - - def test_validate_field_operation_illegal(self): - old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": 60}}} + + def test_validate_field_operation_illegal__pfcwd(self): + old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} target_config = {"PFC_WD": {"GLOBAL": {}}} config_wrapper = gu_common.ConfigWrapper() self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) + + @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"asic_type": "invalid-asic", "build_version": "SONiC.20181131"})) + def test_validate_field_modification_illegal__pfcwd(self): + old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} + target_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "80"}}} + config_wrapper = gu_common.ConfigWrapper() + self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) + + def test_validate_field_operation_legal__rm_loopback1(self): + old_config = { + "LOOPBACK_INTERFACE": { + "Loopback0": {}, + "Loopback0|10.1.0.32/32": {}, + "Loopback1": {}, + "Loopback1|10.1.0.33/32": {} + } + } + target_config = { + "LOOPBACK_INTERFACE": { + "Loopback0": {}, + "Loopback0|10.1.0.32/32": {} + } + } + config_wrapper = gu_common.ConfigWrapper() + config_wrapper.validate_field_operation(old_config, target_config) + + def test_validate_field_operation_illegal__rm_loopback0(self): + old_config = { + "LOOPBACK_INTERFACE": { + "Loopback0": {}, + "Loopback0|10.1.0.32/32": {}, + "Loopback1": {}, + "Loopback1|10.1.0.33/32": {} + } + } + target_config = { + "LOOPBACK_INTERFACE": { + "Loopback1": {}, + "Loopback1|10.1.0.33/32": {} + } + } + config_wrapper = gu_common.ConfigWrapper() + self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) def test_ctor__default_values_set(self): config_wrapper = gu_common.ConfigWrapper() diff --git a/tests/generic_config_updater/service_validator_test.py b/tests/generic_config_updater/service_validator_test.py index 2f51771d33..f14a3ad7b0 100644 --- a/tests/generic_config_updater/service_validator_test.py +++ b/tests/generic_config_updater/service_validator_test.py @@ -6,7 +6,7 @@ from collections import defaultdict from unittest.mock import patch -from generic_config_updater.services_validator import vlan_validator, rsyslog_validator, caclmgrd_validator +from generic_config_updater.services_validator import vlan_validator, rsyslog_validator, caclmgrd_validator, vlanintf_validator import generic_config_updater.gu_common @@ -152,6 +152,46 @@ def mock_time_sleep_call(sleep_time): { "cmd": "systemctl restart rsyslog", "rc": 1 }, # restart again; fails ] +test_vlanintf_data = [ + { "old": {}, "upd": {}, "cmd": "" }, + { + "old": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {} } }, + "upd": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {} } }, + "cmd": "" + }, + { + "old": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {} } }, + "upd": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.2/21": {} } }, + "cmd": "ip neigh flush dev Vlan1000 192.168.0.1/21" + }, + { + "old": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {} } }, + "upd": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {}, + "Vlan1000|192.168.0.2/21": {} } }, + "cmd": "" + }, + { + "old": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {} } }, + "upd": {}, + "cmd": "ip neigh flush dev Vlan1000 192.168.0.1/21" + } + ] + + class TestServiceValidator(unittest.TestCase): @patch("generic_config_updater.change_applier.os.system") @@ -177,6 +217,15 @@ def test_change_apply_os_system(self, mock_os_sys): rc = rsyslog_validator("", "", "") assert not rc, "rsyslog_validator expected to fail" + os_system_calls = [] + os_system_call_index = 0 + for entry in test_vlanintf_data: + if entry["cmd"]: + os_system_calls.append({"cmd": entry["cmd"], "rc": 0 }) + msg = "case failed: {}".format(str(entry)) + + vlanintf_validator(entry["old"], entry["upd"], None) + @patch("generic_config_updater.services_validator.time.sleep") def test_change_apply_time_sleep(self, mock_time_sleep): global time_sleep_calls, time_sleep_call_index diff --git a/tests/installer_bootloader_grub_test.py b/tests/installer_bootloader_grub_test.py index 10c9dc5ba7..ff35e13b37 100644 --- a/tests/installer_bootloader_grub_test.py +++ b/tests/installer_bootloader_grub_test.py @@ -53,11 +53,3 @@ def test_set_fips_grub(): # Cleanup the _tmp_host folder shutil.rmtree(tmp_host_path) - -def test_verify_image(): - - bootloader = grub.GrubBootloader() - image = f'{grub.IMAGE_PREFIX}expeliarmus-{grub.IMAGE_PREFIX}abcde' - - # command should fail - assert not bootloader.verify_image_sign(image) diff --git a/tests/ip_config_input/patch_ipv6.test b/tests/ip_config_input/patch_ipv6.test deleted file mode 100644 index 00b43fda4c..0000000000 --- a/tests/ip_config_input/patch_ipv6.test +++ /dev/null @@ -1,6 +0,0 @@ -[ - { - "path": "/INTERFACE/Ethernet12|FC00::1~1126", - "op": "remove" - } -] diff --git a/tests/ip_config_test.py b/tests/ip_config_test.py index f315b11d82..2f262a4a09 100644 --- a/tests/ip_config_test.py +++ b/tests/ip_config_test.py @@ -1,5 +1,3 @@ -import json -import jsonpatch import os import traceback from unittest import mock @@ -14,9 +12,6 @@ from utilities_common.db import Db import utilities_common.bgp_util as bgp_util -test_path = os.path.dirname(os.path.abspath(__file__)) -ip_config_input_path = os.path.join(test_path, "ip_config_input") - ERROR_MSG = "Error: IP address is not valid" INVALID_VRF_MSG ="""\ @@ -243,21 +238,6 @@ def test_add_del_interface_shortened_ipv6_with_leading_zeros(self): assert result.exit_code != 0 assert ('Ethernet68', '3000::1/64') not in db.cfgdb.get_table('INTERFACE') - def test_remove_interface_case_sensitive_mock_ipv6_w_apply_patch(self): - runner = CliRunner() - any_patch_as_json = [{"op": "remove", "path": "/INTERFACE/Ethernet12|FC00::1~1126"}] - any_patch = jsonpatch.JsonPatch(any_patch_as_json) - any_patch_as_text = json.dumps(any_patch_as_json) - ipv6_patch_file = os.path.join(ip_config_input_path, 'patch_ipv6.test') - - # config apply-patch patch - mock_generic_updater = mock.Mock() - with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): - with mock.patch('builtins.open', mock.mock_open(read_data=any_patch_as_text)): - result = runner.invoke(config.config.commands["apply-patch"], [ipv6_patch_file], catch_exceptions=False) - print(result.exit_code, result.output) - assert "converted ipv6 address to lowercase fc00::1~1126 with prefix /INTERFACE/Ethernet12| in value: /INTERFACE/Ethernet12|FC00::1~1126" in result.output - def test_intf_vrf_bind_unbind(self): runner = CliRunner() db = Db() diff --git a/tests/kube_test.py b/tests/kube_test.py index e49a2a55f8..5b51049e7b 100644 --- a/tests/kube_test.py +++ b/tests/kube_test.py @@ -1,5 +1,8 @@ +import mock + from click.testing import CliRunner from utilities_common.db import Db +from mock import patch show_no_server_output="""\ Kubernetes server is not configured @@ -110,8 +113,30 @@ def test_no_kube_server(self, get_cmd_module): result = runner.invoke(show.cli.commands["kubernetes"].commands["server"].commands["config"], [], obj=db) self.__check_res(result, "config command default value", show_server_output_5) + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_no_kube_server_yang_validation(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + db.cfgdb.delete_table("KUBERNETES_MASTER") + # Check server not configured + result = runner.invoke(show.cli.commands["kubernetes"].commands["server"].commands["config"], [], obj=db) + self.__check_res(result, "null server config test", show_no_server_output) + + # Add IP when not configured + result = runner.invoke(config.config.commands["kubernetes"].commands["server"], ["ip", "10.10.10.11"], obj=db) + assert "Invalid ConfigDB. Error" in result.output + + db.cfgdb.mod_entry("KUBERNETES_MASTER", "SERVER", {"ip": "10.10.10.11"}) + # Add IP when already configured + result = runner.invoke(config.config.commands["kubernetes"].commands["server"], ["ip", "10.10.10.12"], obj=db) + assert "Invalid ConfigDB. Error" in result.output + + def test_only_kube_server(self, get_cmd_module): (config, show) = get_cmd_module runner = CliRunner() diff --git a/tests/mclag_test.py b/tests/mclag_test.py index d68c25a82e..2401978e97 100644 --- a/tests/mclag_test.py +++ b/tests/mclag_test.py @@ -1,14 +1,19 @@ import os import traceback +import mock +import jsonpatch from click.testing import CliRunner import config.main as config +import config.mclag as mclag import show.main as show from utilities_common.db import Db - +from mock import patch +from jsonpatch import JsonPatchConflict MCLAG_DOMAIN_ID = "123" +MCLAG_NONEXISTENT_DOMAIN_ID = "234" MCLAG_INVALID_DOMAIN_ID1 = "-1" MCLAG_INVALID_DOMAIN_ID2 = "5000" MCLAG_DOMAIN_ID2 = "500" @@ -87,6 +92,7 @@ def verify_mclag_interface(self, db, domain_id, intf_str): return False def test_add_mclag_with_invalid_src_ip(self): + mclag.ADHOC_VALIDATION = True runner = CliRunner() db = Db() obj = {'db':db.cfgdb} @@ -223,9 +229,33 @@ def test_add_invalid_mclag_domain(self): result = runner.invoke(config.config.commands["mclag"].commands["add"], [5000, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_PEER_LINK], obj=obj) assert result.exit_code != 0, "mclag invalid domain test case with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) + def test_add_mclag_domain_invalid_yang_validation(self): + mclag.ADHOC_VALIDATION = False + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + # add invalid mclag domain + result = runner.invoke(config.config.commands["mclag"].commands["add"], [MCLAG_DOMAIN_ID, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_INVALID_PEER_LINK4], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + @patch("config.main.ConfigDBConnector.get_table", mock.Mock(return_value={"123": "xyz"})) + def test_add_mclag_domain_invalid_yang_validation_override(self): + mclag.ADHOC_VALIDATION = False + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + # add invalid mclag domain + result = runner.invoke(config.config.commands["mclag"].commands["add"], [MCLAG_DOMAIN_ID, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_INVALID_PEER_LINK4], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + def test_add_mclag_domain(self): + mclag.ADHOC_VALIDATION = True runner = CliRunner() db = Db() obj = {'db':db.cfgdb} @@ -378,10 +408,29 @@ def test_mclag_add_invalid_member(self): result = runner.invoke(config.config.commands["mclag"].commands["member"].commands["add"], [MCLAG_DOMAIN_ID, MCLAG_INVALID_PORTCHANNEL4], obj=obj) assert result.exit_code != 0, "mclag invalid member add case failed with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) + + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) + def test_mclag_add_invalid_member_yang_validation(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + mclag.ADHOC_VALIDATION = False + + # add valid mclag domain + db.cfgdb.set_entry("MCLAG_DOMAIN", MCLAG_DOMAIN_ID, {"source_ip": MCLAG_SRC_IP, "peer_ip": MCLAG_PEER_IP, "peer_link": MCLAG_PEER_LINK}) + + with mock.patch('validated_config_db_connector.device_info.is_yang_config_validation_enabled', mock.Mock(return_value=True)): + result = runner.invoke(config.config.commands["mclag"].commands["member"].commands["add"], [MCLAG_DOMAIN_ID, MCLAG_INVALID_MCLAG_MEMBER], obj=obj) + print(result.exit_code) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + def test_mclag_add_member(self): runner = CliRunner() db = Db() obj = {'db':db.cfgdb} + mclag.ADHOC_VALIDATION = True # add valid mclag domain @@ -447,8 +496,31 @@ def test_mclag_add_member(self): assert result.exit_code != 0, "mclag invalid member del case failed with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) + def test_mclag_add__unique_ip_yang_validation(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + db.cfgdb.set_entry("MCLAG_DOMAIN", MCLAG_DOMAIN_ID, {"source_ip": MCLAG_SRC_IP}) + + with mock.patch('validated_config_db_connector.device_info.is_yang_config_validation_enabled', return_value=True): + result = runner.invoke(config.config.commands["mclag"].commands["unique-ip"].commands["add"], [MCLAG_UNIQUE_IP_VLAN], obj=obj) + assert "Invalid ConfigDB. Error" in result.output - def test_mclag_add_unique_ip(self): + + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + def test_mclag_del_unique_ip_yang_validation(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + db.cfgdb.set_entry("MCLAG_DOMAIN", MCLAG_DOMAIN_ID, {"source_ip": MCLAG_SRC_IP}) + + with mock.patch('validated_config_db_connector.device_info.is_yang_config_validation_enabled', return_value=True): + result = runner.invoke(config.config.commands["mclag"].commands["unique-ip"].commands["del"], [MCLAG_UNIQUE_IP_VLAN], obj=obj) + assert "Failed to delete mclag unique IP" in result.output + + + def test_mclag_add_unique_ip(self, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() obj = {'db':db.cfgdb} @@ -483,7 +555,7 @@ def test_mclag_add_unique_ip(self): keys = db.cfgdb.get_keys('MCLAG_UNIQUE_IP') assert MCLAG_UNIQUE_IP_VLAN not in keys, "unique ip not conifgured" - def test_mclag_add_unique_ip_non_default_vrf(self): + def test_mclag_add_unique_ip_non_default_vrf(self, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() obj = {'db':db.cfgdb} @@ -544,12 +616,18 @@ def test_add_mclag_with_invalid_domain_id(self): result = runner.invoke(config.config.commands["mclag"].commands["add"], [MCLAG_INVALID_DOMAIN_ID2, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_PEER_LINK], obj=obj) assert result.exit_code != 0, "mclag invalid src ip test caase with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) - + def test_del_mclag_with_invalid_domain_id(self): + mclag.ADHOC_VALIDATION = True runner = CliRunner() db = Db() obj = {'db':db.cfgdb} + with mock.patch('config.main.ConfigDBConnector.get_entry', return_value=None): + # del mclag nonexistent domain_id + result = runner.invoke(config.config.commands["mclag"].commands["del"], [MCLAG_NONEXISTENT_DOMAIN_ID], obj=obj) + assert result.exit_code != 0, "mclag invalid domain id test case with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) + # del mclag with invalid domain_id result = runner.invoke(config.config.commands["mclag"].commands["del"], [MCLAG_INVALID_DOMAIN_ID1], obj=obj) assert result.exit_code != 0, "mclag invalid domain id test case with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) @@ -557,10 +635,10 @@ def test_del_mclag_with_invalid_domain_id(self): result = runner.invoke(config.config.commands["mclag"].commands["del"], [MCLAG_INVALID_DOMAIN_ID2], obj=obj) assert result.exit_code != 0, "mclag invalid domain id test case with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) result = runner.invoke(config.config.commands["mclag"].commands["del"], [MCLAG_DOMAIN_ID3], obj=obj) + print(result.output) assert result.exit_code == 0, "mclag invalid domain id test case with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) - def test_modify_mclag_domain(self): runner = CliRunner() db = Db() @@ -568,15 +646,14 @@ def test_modify_mclag_domain(self): # add mclag domain entry in db db.cfgdb.set_entry("MCLAG_DOMAIN", MCLAG_DOMAIN_ID, {"source_ip": MCLAG_SRC_IP}) - result = runner.invoke(config.config.commands["mclag"].commands["add"], [MCLAG_DOMAIN_ID, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_PEER_LINK], obj=obj) - assert result.exit_code != 0, "mclag add domain peer ip test caase with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) + assert result.exit_code == 0, "mclag add domain peer ip test caase with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) assert self.verify_mclag_domain_cfg(db, MCLAG_DOMAIN_ID, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_PEER_LINK) == True, "mclag config not found" - + print(result.output) # modify mclag config - result = runner.invoke(config.config.commands["mclag"].commands["add"], [MCLAG_DOMAIN_ID, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_PEER_LINK], obj=obj) - assert result.exit_code != 0, "test_mclag_domain_add_again with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) + result = runner.invoke(config.config.commands["mclag"].commands["add"], [MCLAG_DOMAIN_ID, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_PEER_LINK2], obj=obj) + assert result.exit_code == 0, "test_mclag_domain_add_again with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) assert self.verify_mclag_domain_cfg(db, MCLAG_DOMAIN_ID, MCLAG_SRC_IP, MCLAG_PEER_IP, MCLAG_PEER_LINK2) == True, "mclag config not modified" @@ -590,6 +667,7 @@ def test_add_mclag_domain_no_peer_link(self): assert result.exit_code != 0, "mclag add domain peer ip test caase with code {}:{} Output:{}".format(type(result.exit_code), result.exit_code, result.output) assert self.verify_mclag_domain_cfg(db, MCLAG_DOMAIN_ID, MCLAG_SRC_IP, MCLAG_PEER_IP) == False, "mclag config not found" + def test_del_mclag_domain_with_members(self): runner = CliRunner() db = Db() @@ -617,11 +695,45 @@ def test_del_mclag_domain_with_members(self): assert self.verify_mclag_interface(db, MCLAG_DOMAIN_ID, MCLAG_MEMBER_PO) == False, "mclag member not deleted" assert self.verify_mclag_domain_cfg(db, MCLAG_DOMAIN_ID) == False, "mclag domain not present" + + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + def test_del_mclag_domain_with_members_invalid_yang_validation(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + mclag.ADHOC_VALIDATION = False + + db.cfgdb.set_entry("MCLAG_DOMAIN", MCLAG_DOMAIN_ID, {"source_ip": MCLAG_SRC_IP, "peer_ip": MCLAG_PEER_IP, "peer_link": MCLAG_PEER_LINK}) + db.cfgdb.set_entry('MCLAG_INTERFACE', (MCLAG_DOMAIN_ID, MCLAG_MEMBER_PO), {'if_type':"PortChannel"} ) + db.cfgdb.set_entry('MCLAG_INTERFACE', (MCLAG_DOMAIN_ID, MCLAG_MEMBER_PO2), {'if_type':"PortChannel"} ) + + with mock.patch('validated_config_db_connector.device_info.is_yang_config_validation_enabled', return_value=True): + result = runner.invoke(config.config.commands["mclag"].commands["member"].commands["del"], [MCLAG_DOMAIN_ID, MCLAG_MEMBER_PO2], obj=obj) + assert "Failed to delete mclag member" in result.output + + with mock.patch('validated_config_db_connector.device_info.is_yang_config_validation_enabled', return_value=True): + result = runner.invoke(config.config.commands["mclag"].commands["del"], [MCLAG_DOMAIN_ID], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + def test_del_mclag_domain_invalid_yang_validation(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + mclag.ADHOC_VALIDATION = False + + db.cfgdb.set_entry("MCLAG_DOMAIN", MCLAG_DOMAIN_ID, {"source_ip": MCLAG_SRC_IP, "peer_ip": MCLAG_PEER_IP, "peer_link": MCLAG_PEER_LINK}) + with mock.patch('validated_config_db_connector.device_info.is_yang_config_validation_enabled', return_value=True): + result = runner.invoke(config.config.commands["mclag"].commands["del"], [MCLAG_DOMAIN_ID], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + def test_mclag_keepalive_for_non_existent_domain(self): runner = CliRunner() db = Db() obj = {'db':db.cfgdb} + mclag.ADHOC_VALIDATION = True # configure keepalive timer for non-existing domain result = runner.invoke(config.config.commands["mclag"].commands["keepalive-interval"], [MCLAG_DOMAIN_ID, MCLAG_INVALID_KEEPALIVE_TIMER], obj=obj) diff --git a/tests/mock_tables/appl_db.json b/tests/mock_tables/appl_db.json index 8554a07eaf..e330bdaddc 100644 --- a/tests/mock_tables/appl_db.json +++ b/tests/mock_tables/appl_db.json @@ -329,8 +329,8 @@ "endpoint_monitor":"100.251.7.1" }, "VNET_ROUTE_TUNNEL_TABLE:Vnet_v6_in_v6-0:fddd:a156:a251::a6:1/128": { - "endpoint": "fddd:a100:a251::a10:1,fddd:a101:a251::a10:1", - "endpoint_monitor":"fddd:a100:a251::a10:1,fddd:a101:a251::a10:1" + "endpoint": "fddd:a100:a251::a10:1,fddd:a101:a251::a10:1,fddd:a102:a251::a10:1,fddd:a103:a251::a10:1", + "endpoint_monitor":"fddd:a100:a251::a10:1,fddd:a101:a251::a10:1,fddd:a102:a251::a10:1,fddd:a103:a251::a10:1" }, "VNET_ROUTE_TUNNEL_TABLE:test_v4_in_v4-0:160.162.191.1/32": { "endpoint":"100.251.7.1", diff --git a/tests/mock_tables/asic0/config_db.json b/tests/mock_tables/asic0/config_db.json index 66b51f4ccb..de20194a64 100644 --- a/tests/mock_tables/asic0/config_db.json +++ b/tests/mock_tables/asic0/config_db.json @@ -246,5 +246,16 @@ "holdtime": "10", "asn": "65200", "keepalive": "3" + }, + "ACL_RULE|DATAACL_5|RULE_1": { + "IP_PROTOCOL": "126", + "PACKET_ACTION": "FORWARD", + "PRIORITY": "9999" + }, + "ACL_TABLE|DATAACL_5": { + "policy_desc": "DATAACL_5", + "ports@": "Ethernet124", + "type": "L3", + "stage": "ingress" } } diff --git a/tests/mock_tables/asic0/show_not_running_bgp.txt b/tests/mock_tables/asic0/show_not_running_bgp.txt new file mode 100644 index 0000000000..b156e857f1 --- /dev/null +++ b/tests/mock_tables/asic0/show_not_running_bgp.txt @@ -0,0 +1 @@ +Error response from daemon: Container 70e3d3bafd1ab5faf796892acff3e2ccbea3dcd5dcfefcc34f25f7cc916b67bb is not running diff --git a/tests/mock_tables/asic0/show_run_bgp.txt b/tests/mock_tables/asic0/show_run_bgp.txt new file mode 100644 index 0000000000..e5c9a9982c --- /dev/null +++ b/tests/mock_tables/asic0/show_run_bgp.txt @@ -0,0 +1,12 @@ +neighbor 10.0.0.1 remote-as 65200 +neighbor 10.0.0.1 peer-group TIER2_V4 +neighbor 10.0.0.1 description ARISTA01T2 +neighbor 10.0.0.5 remote-as 65200 +neighbor 10.0.0.5 peer-group TIER2_V4 +neighbor 10.0.0.5 description ARISTA03T2 +neighbor fc00::2 remote-as 65200 +neighbor fc00::2 peer-group TIER2_V6 +neighbor fc00::2 description ARISTA01T2 +neighbor fc00::6 remote-as 65200 +neighbor fc00::6 peer-group TIER2_V6 +neighbor fc00::6 description ARISTA03T2 diff --git a/tests/mock_tables/asic0/state_db.json b/tests/mock_tables/asic0/state_db.json index 21b4fa0eab..559af04826 100644 --- a/tests/mock_tables/asic0/state_db.json +++ b/tests/mock_tables/asic0/state_db.json @@ -51,6 +51,43 @@ "vcclowalarm": "2.9700", "vcclowwarning": "3.1349" }, + "TRANSCEIVER_INFO|Ethernet48": { + "type" : "QSFP-DD Double Density 8X Pluggable Transceiver", + "hardware_rev" : "1.1", + "serial" : "214455197", + "manufacturer" : "Acacia Comm Inc.", + "model" : "DP04QSDD-E20-001", + "connector" : "LC", + "encoding" : "N/A", + "ext_identifier" : "Power Class 8 (20.0W Max)", + "ext_rateselect_compliance" : "N/A", + "cable_type" : "Length Cable Assembly(m)", + "cable_length" : "0.0", + "nominal_bit_rate" : "0", + "specification_compliance" : "sm_media_interface", + "vendor_date" : "2021-11-19", + "vendor_oui" : "7c-b2-5c", + "application_advertisement" : "{1: {'host_electrical_interface_id': '400GAUI-8 C2M (Annex 120E)', 'module_media_interface_id': '400ZR, DWDM, amplified', 'media_lane_count': 1, 'host_lane_count': 8, 'host_lane_assignment_options': 1, 'media_lane_assignment_options': 1}, 2: {'host_electrical_interface_id': '400GAUI-8 C2M (Annex 120E)', 'module_media_interface_id': '400ZR, Single Wavelength, Unamplified', 'media_lane_count': 1, 'host_lane_count': 8, 'host_lane_assignment_options': 1, 'media_lane_assignment_options': 1}, 3: {'host_electrical_interface_id': '100GAUI-2 C2M (Annex 135G)', 'module_media_interface_id': '400ZR, DWDM, amplified', 'media_lane_count': 1, 'host_lane_count': 2, 'host_lane_assignment_options': 85, 'media_lane_assignment_options': 1}}", + "host_lane_count" : "8", + "media_lane_count" : "1", + "active_apsel_hostlane1" : "1", + "active_apsel_hostlane2" : "1", + "active_apsel_hostlane3" : "1", + "active_apsel_hostlane4" : "1", + "active_apsel_hostlane5" : "1", + "active_apsel_hostlane6" : "1", + "active_apsel_hostlane7" : "1", + "active_apsel_hostlane8" : "1", + "media_interface_technology" : "1550 nm DFB", + "vendor_rev" : "A", + "cmis_rev" : "4.1", + "active_firmware" : "61.20", + "inactive_firmware" : "161.10", + "supported_max_tx_power" : "4.0", + "supported_min_tx_power" : "-22.9", + "supported_max_laser_freq" : "196100", + "supported_min_laser_freq" : "191300" + }, "CHASSIS_INFO|chassis 1": { "psu_num": "2" }, @@ -249,5 +286,11 @@ "STATUS": "up", "REMOTE_MOD": "0", "REMOTE_PORT": "93" + }, + "ACL_TABLE_TABLE|DATAACL_5" : { + "status": "Active" + }, + "ACL_RULE_TABLE|DATAACL_5|RULE_1" : { + "status": "Active" } } diff --git a/tests/mock_tables/asic1/show_run_bgp.txt b/tests/mock_tables/asic1/show_run_bgp.txt new file mode 100644 index 0000000000..de81748cc6 --- /dev/null +++ b/tests/mock_tables/asic1/show_run_bgp.txt @@ -0,0 +1,12 @@ +neighbor 10.0.0.9 remote-as 65200 +neighbor 10.0.0.9 peer-group TIER2_V4 +neighbor 10.0.0.9 description ARISTA05T2 +neighbor 10.0.0.13 remote-as 65200 +neighbor 10.0.0.13 peer-group TIER2_V4 +neighbor 10.0.0.13 description ARISTA07T2 +neighbor fc00::a remote-as 65200 +neighbor fc00::a peer-group TIER2_V6 +neighbor fc00::a description ARISTA05T2 +neighbor fc00::e remote-as 65200 +neighbor fc00::e peer-group TIER2_V6 +neighbor fc00::e description ARISTA07T2 diff --git a/tests/mock_tables/asic1/state_db.json b/tests/mock_tables/asic1/state_db.json index dd775b9b50..7397d25b8f 100644 --- a/tests/mock_tables/asic1/state_db.json +++ b/tests/mock_tables/asic1/state_db.json @@ -1,21 +1,40 @@ { "TRANSCEIVER_INFO|Ethernet64": { - "type": "QSFP28 or later", - "vendor_rev": "AC", - "serial": "MT1706FT02064", - "manufacturer": "Mellanox", - "model": "MFA1A00-C003", - "vendor_oui": "00-02-c9", - "vendor_date": "2017-01-13 ", - "connector": "No separable connector", - "encoding": "64B66B", - "ext_identifier": "Power Class 3(2.5W max), CDR present in Rx Tx", - "ext_rateselect_compliance": "QSFP+ Rate Select Version 1", - "cable_type": "Length Cable Assembly(m)", - "cable_length": "3", - "specification_compliance": "{'10/40G Ethernet Compliance Code': '40G Active Cable (XLPPI)'}", - "nominal_bit_rate": "255", - "application_advertisement": "N/A" + "type" : "QSFP-DD Double Density 8X Pluggable Transceiver", + "hardware_rev" : "X.X", + "serial" : "0123456789", + "manufacturer" : "XXXX", + "model" : "XXX", + "connector" : "LC", + "encoding" : "N/A", + "ext_identifier" : "Power Class 8 (20.0W Max)", + "ext_rateselect_compliance" : "N/A", + "cable_type" : "Length Cable Assembly(m)", + "cable_length" : "0.0", + "nominal_bit_rate" : "0", + "specification_compliance" : "sm_media_interface", + "vendor_date" : "2021-11-19", + "vendor_oui" : "XX-XX-XX", + "application_advertisement" : "{1: {'host_electrical_interface_id': '400GAUI-8 C2M (Annex 120E)', 'module_media_interface_id': '400ZR, DWDM, amplified', 'media_lane_count': 1, 'host_lane_count': 8, 'host_lane_assignment_options': 1, 'media_lane_assignment_options': 1}, 2: {'host_electrical_interface_id': '400GAUI-8 C2M (Annex 120E)', 'module_media_interface_id': '400ZR, Single Wavelength, Unamplified', 'media_lane_count': 1, 'host_lane_count': 8, 'host_lane_assignment_options': 1, 'media_lane_assignment_options': 1}, 3: {'host_electrical_interface_id': '100GAUI-2 C2M (Annex 135G)', 'module_media_interface_id': '400ZR, DWDM, amplified', 'media_lane_count': 1, 'host_lane_count': 2, 'host_lane_assignment_options': 85, 'media_lane_assignment_options': 1}}", + "host_lane_count" : "8", + "media_lane_count" : "1", + "active_apsel_hostlane1" : "1", + "active_apsel_hostlane2" : "1", + "active_apsel_hostlane3" : "1", + "active_apsel_hostlane4" : "1", + "active_apsel_hostlane5" : "1", + "active_apsel_hostlane6" : "1", + "active_apsel_hostlane7" : "1", + "active_apsel_hostlane8" : "1", + "media_interface_technology" : "1550 nm DFB", + "vendor_rev" : "XX", + "cmis_rev" : "4.1", + "active_firmware" : "X.X", + "inactive_firmware" : "X.X", + "supported_max_tx_power" : "4.0", + "supported_min_tx_power" : "-22.9", + "supported_max_laser_freq" : "196100", + "supported_min_laser_freq" : "191300" }, "TRANSCEIVER_DOM_SENSOR|Ethernet64": { "temperature": "30.9258", diff --git a/tests/mock_tables/asic2/config_db.json b/tests/mock_tables/asic2/config_db.json index 532d85bcbb..bfda10a0d5 100644 --- a/tests/mock_tables/asic2/config_db.json +++ b/tests/mock_tables/asic2/config_db.json @@ -124,5 +124,16 @@ "state": "disabled", "auto_restart": "disabled", "high_mem_alert": "disabled" + }, + "ACL_RULE|DATAACL_5|RULE_1": { + "IP_PROTOCOL": "126", + "PACKET_ACTION": "FORWARD", + "PRIORITY": "9999" + }, + "ACL_TABLE|DATAACL_5": { + "policy_desc": "DATAACL_5", + "ports@": "Ethernet124", + "type": "L3", + "stage": "ingress" } } diff --git a/tests/mock_tables/asic2/state_db.json b/tests/mock_tables/asic2/state_db.json index f6e3eee4cf..c6c8c88898 100644 --- a/tests/mock_tables/asic2/state_db.json +++ b/tests/mock_tables/asic2/state_db.json @@ -207,5 +207,11 @@ "speed_target": "50", "led_status": "green", "timestamp": "20200813 01:32:30" + }, + "ACL_TABLE_TABLE|DATAACL_5" : { + "status": "Active" + }, + "ACL_RULE_TABLE|DATAACL_5|RULE_1" : { + "status": "Active" } } diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 899dada260..22744365f1 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -496,6 +496,11 @@ "PACKET_ACTION": "FORWARD", "PRIORITY": "9995" }, + "ACL_RULE|DATAACL_5|RULE_1": { + "IP_PROTOCOL": "126", + "PACKET_ACTION": "FORWARD", + "PRIORITY": "9999" + }, "ACL_TABLE|NULL_ROUTE_V4": { "policy_desc": "DATAACL", "ports@": "PortChannel0002,PortChannel0005,PortChannel0008,PortChannel0011,PortChannel0014,PortChannel0017,PortChannel0020,PortChannel0023", @@ -533,6 +538,12 @@ "type": "L3V6", "stage": "egress" }, + "ACL_TABLE|DATAACL_5": { + "policy_desc": "DATAACL_5", + "ports@": "Ethernet124", + "type": "L3", + "stage": "ingress" + }, "ACL_TABLE|EVERFLOW": { "policy_desc": "EVERFLOW", "ports@": "PortChannel0002,PortChannel0005,PortChannel0008,PortChannel0011,PortChannel0014,PortChannel0017,PortChannel0020,PortChannel0023,Ethernet100,Ethernet104,Ethernet92,Ethernet96,Ethernet84,Ethernet88,Ethernet76,Ethernet80,Ethernet108,Ethernet112,Ethernet64,Ethernet120,Ethernet116,Ethernet124,Ethernet72,Ethernet68", diff --git a/tests/mock_tables/counters_db.json b/tests/mock_tables/counters_db.json index 03b29cdded..f2caba2449 100644 --- a/tests/mock_tables/counters_db.json +++ b/tests/mock_tables/counters_db.json @@ -1626,6 +1626,146 @@ "oid:0x1500000000067d": "SAI_QUEUE_TYPE_UNICAST_VOQ", "oid:0x1500000000067e": "SAI_QUEUE_TYPE_UNICAST_VOQ" }, + "COUNTERS_FABRIC_PORT_NAME_MAP" : { + "PORT0": "oid:0x1000000000143", + "PORT1": "oid:0x1000000000144", + "PORT2": "oid:0x1000000000145", + "PORT3": "oid:0x1000000000146", + "PORT4": "oid:0x1000000000147", + "PORT5": "oid:0x1000000000148", + "PORT6": "oid:0x1000000000149", + "PORT7": "oid:0x100000000014a" + }, + "COUNTERS:oid:0x1000000000143": { + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_OUT_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_OCTETS": "1113", + "SAI_PORT_STAT_IF_IN_ERRORS": "0", + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS": "6", + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "5", + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "1759692040", + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "5" + }, + "COUNTERS:oid:0x1000000000144": { + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_OUT_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_ERRORS": "0", + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "0", + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "58977677898", + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "0" + }, + "COUNTERS:oid:0x1000000000145": { + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_OUT_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_OCTETS": "371", + "SAI_PORT_STAT_IF_IN_ERRORS": "0", + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS": "2", + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "0", + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "1769448760", + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "0" + }, + "COUNTERS:oid:0x1000000000146": { + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_OUT_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_ERRORS": "0", + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "0", + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "58976477608", + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "0" + }, + "COUNTERS:oid:0x1000000000147": { + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_OUT_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_OCTETS": "1855", + "SAI_PORT_STAT_IF_IN_ERRORS": "0", + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS": "10", + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "73", + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "1763293100", + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "73" + }, + "COUNTERS:oid:0x1000000000148": { + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_OUT_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_ERRORS": "0", + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "44196", + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "58975150569", + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "0" + }, + "COUNTERS:oid:0x1000000000149": { + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_OUT_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_OCTETS": "742", + "SAI_PORT_STAT_IF_IN_ERRORS": "0", + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS": "4", + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "10", + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "1763174090", + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "0" + }, + "COUNTERS:oid:0x100000000014a": { + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS": "0", + "SAI_PORT_STAT_IF_OUT_OCTETS": "0", + "SAI_PORT_STAT_IF_IN_OCTETS": "1855", + "SAI_PORT_STAT_IF_IN_ERRORS": "0", + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS": "10", + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "187", + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "1768439529", + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "1331" + }, + "COUNTERS_FABRIC_QUEUE_NAME_MAP" : { + "PORT0:0": "oid:0x15000000000186", + "PORT1:0": "oid:0x15000000000187", + "PORT2:0": "oid:0x15000000000188", + "PORT3:0": "oid:0x15000000000189", + "PORT4:0": "oid:0x1500000000018a", + "PORT5:0": "oid:0x1500000000018b", + "PORT6:0": "oid:0x1500000000018c", + "PORT7:0": "oid:0x1500000000018d" + }, + "COUNTERS:oid:0x15000000000186": { + "SAI_QUEUE_STAT_WATERMARK_LEVEL": "20", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "763", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL": "12" + }, + "COUNTERS:oid:0x15000000000187": { + "SAI_QUEUE_STAT_WATERMARK_LEVEL": "0", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "0", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL": "0" + }, + "COUNTERS:oid:0x15000000000188": { + "SAI_QUEUE_STAT_WATERMARK_LEVEL": "8", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "104", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL": "8" + }, + "COUNTERS:oid:0x15000000000189": { + "SAI_QUEUE_STAT_WATERMARK_LEVEL": "0", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "0", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL": "0" + }, + "COUNTERS:oid:0x1500000000018a": { + "SAI_QUEUE_STAT_WATERMARK_LEVEL": "22", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "1147", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL": "14" + }, + "COUNTERS:oid:0x1500000000018b": { + "SAI_QUEUE_STAT_WATERMARK_LEVEL": "0", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "0", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL": "0" + }, + "COUNTERS:oid:0x1500000000018c": { + "SAI_QUEUE_STAT_WATERMARK_LEVEL": "10", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "527", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL": "8" + }, + "COUNTERS:oid:0x1500000000018d": { + "SAI_QUEUE_STAT_WATERMARK_LEVEL": "17", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "1147", + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL": "14" + }, "COUNTERS_DEBUG_NAME_PORT_STAT_MAP": { "DEBUG_0": "SAI_PORT_STAT_IN_DROP_REASON_RANGE_BASE", "DEBUG_2": "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS" diff --git a/tests/mock_tables/show_run_bgp.txt b/tests/mock_tables/show_run_bgp.txt new file mode 100644 index 0000000000..9a3ae8b13e --- /dev/null +++ b/tests/mock_tables/show_run_bgp.txt @@ -0,0 +1,64 @@ +router bgp 65100 +bgp router-id 10.1.0.32 +bgp log-neighbor-changes +no bgp ebgp-requires-policy +no bgp default ipv4-unicast +bgp graceful-restart restart-time 240 +bgp graceful-restart select-defer-time 45 +bgp graceful-restart +bgp graceful-restart preserve-fw-state +bgp bestpath as-path multipath-relax +neighbor BGPSLBPassive peer-group +neighbor BGPSLBPassive remote-as 65432 +neighbor BGPSLBPassive passive +neighbor BGPSLBPassive ebgp-multihop 255 +neighbor BGPSLBPassive update-source 10.1.0.32 +neighbor BGPVac peer-group +neighbor BGPVac remote-as 65432 +neighbor BGPVac passive +neighbor BGPVac ebgp-multihop 255 +neighbor BGPVac update-source 10.1.0.32 +neighbor PEER_V4 peer-group +neighbor PEER_V6 peer-group +neighbor 10.0.0.57 remote-as 64600 +neighbor 10.0.0.57 peer-group PEER_V4 +neighbor 10.0.0.57 description ARISTA01T1 +neighbor 10.0.0.57 timers 3 10 +neighbor 10.0.0.57 timers connect 10 +neighbor 10.0.0.59 remote-as 64600 +neighbor 10.0.0.59 peer-group PEER_V4 +neighbor 10.0.0.59 description ARISTA02T1 +neighbor 10.0.0.59 timers 3 10 +neighbor 10.0.0.59 timers connect 10 +neighbor 10.0.0.61 remote-as 64600 +neighbor 10.0.0.61 peer-group PEER_V4 +neighbor 10.0.0.61 description ARISTA03T1 +neighbor 10.0.0.61 timers 3 10 +neighbor 10.0.0.61 timers connect 10 +neighbor 10.0.0.63 remote-as 64600 +neighbor 10.0.0.63 peer-group PEER_V4 +neighbor 10.0.0.63 description ARISTA04T1 +neighbor 10.0.0.63 timers 3 10 +neighbor 10.0.0.63 timers connect 10 +neighbor fc00::72 remote-as 64600 +neighbor fc00::72 peer-group PEER_V6 +neighbor fc00::72 description ARISTA01T1 +neighbor fc00::72 timers 3 10 +neighbor fc00::72 timers connect 10 +neighbor fc00::76 remote-as 64600 +neighbor fc00::76 peer-group PEER_V6 +neighbor fc00::76 description ARISTA02T1 +neighbor fc00::76 timers 3 10 +neighbor fc00::76 timers connect 10 +neighbor fc00::7a remote-as 64600 +neighbor fc00::7a peer-group PEER_V6 +neighbor fc00::7a description ARISTA03T1 +neighbor fc00::7a timers 3 10 +neighbor fc00::7a timers connect 10 +neighbor fc00::7e remote-as 64600 +neighbor fc00::7e peer-group PEER_V6 +neighbor fc00::7e description ARISTA04T1 +neighbor fc00::7e timers 3 10 +neighbor fc00::7e timers connect 10 +bgp listen range 10.255.0.0/25 peer-group BGPSLBPassive +bgp listen range 192.168.0.0/21 peer-group BGPVac diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 12552997b9..cd1a194ba8 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -227,6 +227,189 @@ "nominal_bit_rate": "Not supported for CMIS cables", "application_advertisement": "{1: {'host_electrical_interface_id': '400G CR8', 'module_media_interface_id': 'Copper cable', 'media_lane_count': 8, 'host_lane_count': 8, 'host_lane_assignment_options': 1, 'media_lane_assignment_options': 2}, 2: {'host_electrical_interface_id': '200GBASE-CR4 (Clause 136)'}}" }, + "TRANSCEIVER_DOM_THRESHOLD|Ethernet44":{ + "temphighalarm": "80.0", + "templowalarm": "-5.0", + "temphighwarning": "75.0", + "templowwarning": "0.0", + "vcchighalarm": "3.45", + "vcclowalarm": "3.1", + "vcchighwarning": "3.4", + "vcclowwarning": "3.15", + "rxpowerhighalarm": "2.0", + "rxpowerlowalarm": "-21.024", + "rxpowerhighwarning": "0.0", + "rxpowerlowwarning": "-18.013", + "txpowerhighalarm": "-5.0", + "txpowerlowalarm": "-16.99", + "txpowerhighwarning": "-6.0", + "txpowerlowwarning": "-16.003", + "txbiashighalarm": "450.0", + "txbiaslowalarm": "100.0", + "txbiashighwarning": "420.0", + "txbiaslowwarning": "110.0", + "lasertemphighalarm": "80.0", + "lasertemplowalarm": "-5.0", + "lasertemphighwarning": "75.0", + "lasertemplowwarning": "0.0", + "prefecberhighalarm": "0.0125", + "prefecberlowalarm": "0.0", + "prefecberhighwarning": "0.011000000000000001", + "prefecberlowwarning": "0.0", + "postfecberhighalarm": "1000", + "postfecberlowalarm": "0.0", + "postfecberhighwarning": "1.0", + "postfecberlowwarning": "0.0", + "biasxihighalarm": "99.00053406576639", + "biasxilowalarm": "0.9994659342336156", + "biasxihighwarning": "94.99961852445259", + "biasxilowwarning": "5.000381475547417", + "biasxqhighalarm": "99.00053406576639", + "biasxqlowalarm": "0.9994659342336156", + "biasxqhighwarning": "94.99961852445259", + "biasxqlowwarning": "5.000381475547417", + "biasxphighalarm": "99.00053406576639", + "biasxplowalarm": "0.9994659342336156", + "biasxphighwarning": "94.99961852445259", + "biasxplowwarning": "5.000381475547417", + "biasyihighalarm": "99.00053406576639", + "biasyilowalarm": "0.9994659342336156", + "biasyihighwarning": "94.99961852445259", + "biasyilowwarning": "5.000381475547417", + "biasyqhighalarm": "99.00053406576639", + "biasyqlowalarm": "0.9994659342336156", + "biasyqhighwarning": "94.99961852445259", + "biasyqlowwarning": "5.000381475547417", + "biasyphighalarm": "99.00053406576639", + "biasyplowalarm": "0.9994659342336156", + "biasyphighwarning": "94.99961852445259", + "biasyplowwarning": "5.000381475547417", + "cdshorthighalarm": "1000", + "cdshortlowalarm": "-1000", + "cdshorthighwarning": "500", + "cdshortlowwarning": "-500", + "cdlonghighalarm": "400000", + "cdlonglowalarm": "-400000", + "cdlonghighwarning": "200000", + "cdlonglowwarning": "-200000", + "dgdhighalarm": "7.0", + "dgdlowalarm": "0.0", + "dgdhighwarning": "7.0", + "dgdlowwarning": "0.0", + "sopmdhighalarm": "655.35", + "sopmdlowalarm": "0.0", + "sopmdhighwarning": "655.35", + "sopmdlowwarning": "0.0", + "pdlhighalarm": "4.0", + "pdllowalarm": "0.0", + "pdlhighwarning": "4.0", + "pdllowwarning": "0.0", + "osnrhighalarm": "99.0", + "osnrlowalarm": "0.0", + "osnrhighwarning": "99.0", + "osnrlowwarning": "0.0", + "esnrhighalarm": "99.0", + "esnrlowalarm": "0.0", + "esnrhighwarning": "99.0", + "esnrlowwarning": "0.0", + "cfohighalarm": "3800", + "cfolowalarm": "-3800", + "cfohighwarning": "3800", + "cfolowwarning": "-3800", + "txcurrpowerhighalarm": "-5.0", + "txcurrpowerlowalarm": "-17.0", + "txcurrpowerhighwarning": "-6.0", + "txcurrpowerlowwarning": "-16.0", + "rxtotpowerhighalarm": "2.0", + "rxtotpowerlowalarm": "-21.0", + "rxtotpowerhighwarning": "0.0", + "rxtotpowerlowwarning": "-18.0", + "rxsigpowerhighalarm": "13.0", + "rxsigpowerlowalarm": "-18.0", + "rxsigpowerhighwarning": "10.0", + "rxsigpowerlowwarning": "-15.0" + }, + "TRANSCEIVER_PM|Ethernet44":{ + "prefec_ber_avg": "0.00046578129838019075", + "prefec_ber_min": "0.00045750117895600233", + "prefec_ber_max": "0.000575639239547097", + "uncorr_frames_avg": "0.0", + "uncorr_frames_min": "0.0", + "uncorr_frames_max": "0.0", + "cd_avg": "0", + "cd_min": "0", + "cd_max": "0", + "dgd_avg": "5.56", + "dgd_min": "5.37", + "dgd_max": "5.81", + "sopmd_avg": "0.0", + "sopmd_min": "0.0", + "sopmd_max": "0.0", + "pdl_avg": "0.6", + "pdl_min": "0.5", + "pdl_max": "0.6", + "osnr_avg": "36.5", + "osnr_min": "36.5", + "osnr_max": "36.5", + "esnr_avg": "30.5", + "esnr_min": "30.5", + "esnr_max": "30.5", + "cfo_avg": "70", + "cfo_min": "54", + "cfo_max": "121", + "evm_avg": "100.0", + "evm_min": "100.0", + "evm_max": "100.0", + "soproc_avg": "1", + "soproc_min": "1", + "soproc_max": "2", + "tx_power_avg": "-8.23", + "tx_power_min": "-8.22", + "tx_power_max": "-8.24", + "rx_tot_power_avg": "-10.62", + "rx_tot_power_min": "-10.61", + "rx_tot_power_max": "-10.62", + "rx_sig_power_avg": "0", + "rx_sig_power_min": "-40", + "rx_sig_power_max": "40" + }, + "TRANSCEIVER_INFO|Ethernet64": { + "type" : "QSFP-DD Double Density 8X Pluggable Transceiver", + "hardware_rev" : "X.X", + "serial" : "0123456789", + "manufacturer" : "XXXX", + "model" : "XXX", + "connector" : "LC", + "encoding" : "N/A", + "ext_identifier" : "Power Class 8 (20.0W Max)", + "ext_rateselect_compliance" : "N/A", + "cable_type" : "Length Cable Assembly(m)", + "cable_length" : "0.0", + "nominal_bit_rate" : "0", + "specification_compliance" : "sm_media_interface", + "vendor_date" : "2021-11-19", + "vendor_oui" : "XX-XX-XX", + "application_advertisement" : "{1: {'host_electrical_interface_id': '400GAUI-8 C2M (Annex 120E)', 'module_media_interface_id': '400ZR, DWDM, amplified', 'media_lane_count': 1, 'host_lane_count': 8, 'host_lane_assignment_options': 1, 'media_lane_assignment_options': 1}, 2: {'host_electrical_interface_id': '400GAUI-8 C2M (Annex 120E)', 'module_media_interface_id': '400ZR, Single Wavelength, Unamplified', 'media_lane_count': 1, 'host_lane_count': 8, 'host_lane_assignment_options': 1, 'media_lane_assignment_options': 1}, 3: {'host_electrical_interface_id': '100GAUI-2 C2M (Annex 135G)', 'module_media_interface_id': '400ZR, DWDM, amplified', 'media_lane_count': 1, 'host_lane_count': 2, 'host_lane_assignment_options': 85, 'media_lane_assignment_options': 1}}", + "host_lane_count" : "8", + "media_lane_count" : "1", + "active_apsel_hostlane1" : "1", + "active_apsel_hostlane2" : "1", + "active_apsel_hostlane3" : "1", + "active_apsel_hostlane4" : "1", + "active_apsel_hostlane5" : "1", + "active_apsel_hostlane6" : "1", + "active_apsel_hostlane7" : "1", + "active_apsel_hostlane8" : "1", + "media_interface_technology" : "1550 nm DFB", + "vendor_rev" : "XX", + "cmis_rev" : "4.1", + "active_firmware" : "X.X", + "inactive_firmware" : "X.X", + "supported_max_tx_power" : "4.0", + "supported_min_tx_power" : "-22.9", + "supported_max_laser_freq" : "196100", + "supported_min_laser_freq" : "191300" + }, "TRANSCEIVER_STATUS|Ethernet0": { "status": "67", "error": "Blocking Error|High temperature" @@ -403,6 +586,24 @@ "version_self_next": "0.2MS", "version_self_active": "0.2MS", "version_self_inactive": "0.2MS", + "peer_mux_direction": "active", + "self_mux_direction": "active", + "grpc_connection_status": "READY", + "Value": "AABB" + }, + "MUX_CABLE_INFO|Ethernet4": { + "version_peer_next": "0.2MS", + "version_peer_active": "0.2MS", + "version_peer_inactive": "0.2MS", + "version_nic_next": "0.2MS", + "version_nic_active": "0.2MS", + "version_nic_inactive": "0.2MS", + "version_self_next": "0.2MS", + "version_self_active": "0.2MS", + "version_self_inactive": "0.2MS", + "peer_mux_direction": "active", + "self_mux_direction": "standby", + "grpc_connection_status": "READY", "Value": "AABB" }, "MUX_CABLE_INFO|Ethernet12": { @@ -940,7 +1141,7 @@ "state":"active" }, "VNET_ROUTE_TUNNEL_TABLE|Vnet_v6_in_v6-0|fddd:a156:a251::a6:1/128": { - "active_endpoints":"fddd:a100:a251::a10:1,fddd:a101:a251::a10:1", + "active_endpoints":"fddd:a100:a251::a10:1,fddd:a101:a251::a10:1,fddd:a102:a251::a10:1,fddd:a103:a251::a10:1", "state":"active" }, "BFD_SESSION_TABLE|default|default|100.251.7.1": { @@ -975,5 +1176,45 @@ }, "ADVERTISE_NETWORK_TABLE|fccc:a250:a251::a6:1/128": { "profile": "" + }, + "FABRIC_PORT_TABLE|PORT0" : { + "STATUS": "up", + "REMOTE_MOD": "0", + "REMOTE_PORT": "79" + }, + "FABRIC_PORT_TABLE|PORT1" : { + "STATUS": "down" + }, + "FABRIC_PORT_TABLE|PORT2" : { + "STATUS": "up", + "REMOTE_MOD": "0", + "REMOTE_PORT": "94" + }, + "FABRIC_PORT_TABLE|PORT3" : { + "STATUS": "down" + }, + "FABRIC_PORT_TABLE|PORT4" : { + "STATUS": "up", + "REMOTE_MOD": "0", + "REMOTE_PORT": "85" + }, + "FABRIC_PORT_TABLE|PORT5" : { + "STATUS": "down" + }, + "FABRIC_PORT_TABLE|PORT6" : { + "STATUS": "up", + "REMOTE_MOD": "0", + "REMOTE_PORT": "84" + }, + "FABRIC_PORT_TABLE|PORT7" : { + "STATUS": "up", + "REMOTE_MOD": "0", + "REMOTE_PORT": "93" + }, + "ACL_TABLE_TABLE|DATAACL_5" : { + "status": "Active" + }, + "ACL_RULE_TABLE|DATAACL_5|RULE_1" : { + "status": "Active" } } diff --git a/tests/mock_tables/t1/config_db.json b/tests/mock_tables/t1/config_db.json index f1f835182f..42a0e2da6c 100644 --- a/tests/mock_tables/t1/config_db.json +++ b/tests/mock_tables/t1/config_db.json @@ -1798,7 +1798,7 @@ "auto_restart": "enabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1811,7 +1811,7 @@ "auto_restart": "disabled", "has_global_scope": "True", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1824,7 +1824,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "False", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1837,7 +1837,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1850,7 +1850,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "False", - "has_timer": "True", + "delayed": "True", "high_mem_alert": "disabled", "state": "enabled" } @@ -1863,7 +1863,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "False", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "disabled" } @@ -1876,7 +1876,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "False", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1889,7 +1889,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "False", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1902,7 +1902,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "False", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "disabled" } @@ -1915,7 +1915,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "False", - "has_timer": "True", + "delayed": "True", "high_mem_alert": "disabled", "state": "enabled" } @@ -1928,7 +1928,7 @@ "auto_restart": "enabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1941,7 +1941,7 @@ "auto_restart": "enabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1954,7 +1954,7 @@ "auto_restart": "enabled", "has_global_scope": "False", "has_per_asic_scope": "True", - "has_timer": "False", + "delayed": "False", "high_mem_alert": "disabled", "state": "enabled" } @@ -1967,7 +1967,7 @@ "auto_restart": "enabled", "has_global_scope": "True", "has_per_asic_scope": "False", - "has_timer": "True", + "delayed": "True", "high_mem_alert": "disabled", "state": "enabled", "status": "enabled" @@ -3510,4 +3510,4 @@ "VERSION": "version_1_0_4" } } -} \ No newline at end of file +} diff --git a/tests/multi_asic_intfutil_test.py b/tests/multi_asic_intfutil_test.py index 56e11fa0d3..37e5b5b0f0 100644 --- a/tests/multi_asic_intfutil_test.py +++ b/tests/multi_asic_intfutil_test.py @@ -10,18 +10,18 @@ scripts_path = os.path.join(modules_path, "scripts") intf_status_all = """\ - Interface Lanes Speed MTU FEC Alias Vlan Oper Admin Type Asym PFC ---------------- ------------ ------- ----- ----- -------------- --------------- ------ ------- --------------- ---------- - Ethernet0 33,34,35,36 40G 9100 N/A Ethernet1/1 PortChannel1002 up up QSFP28 or later off - Ethernet4 29,30,31,32 40G 9100 N/A Ethernet1/2 PortChannel1002 up up N/A off - Ethernet64 29,30,31,32 40G 9100 N/A Ethernet1/17 routed up up QSFP28 or later off - Ethernet-BP0 93,94,95,96 40G 9100 N/A Ethernet-BP0 PortChannel4001 up up N/A off - Ethernet-BP4 97,98,99,100 40G 9100 N/A Ethernet-BP4 PortChannel4001 up up N/A off - Ethernet-BP256 61,62,63,64 40G 9100 N/A Ethernet-BP256 PortChannel4009 up up N/A off - Ethernet-BP260 57,58,59,60 40G 9100 N/A Ethernet-BP260 PortChannel4009 up up N/A off -PortChannel1002 N/A 80G 9100 N/A N/A trunk up up N/A N/A -PortChannel4001 N/A 80G 9100 N/A N/A routed up up N/A N/A -PortChannel4009 N/A 80G 9100 N/A N/A routed up up N/A N/A + Interface Lanes Speed MTU FEC Alias Vlan Oper Admin Type Asym PFC +--------------- ------------ ------- ----- ----- -------------- --------------- ------ ------- ----------------------------------------------- ---------- + Ethernet0 33,34,35,36 40G 9100 N/A Ethernet1/1 PortChannel1002 up up QSFP28 or later off + Ethernet4 29,30,31,32 40G 9100 N/A Ethernet1/2 PortChannel1002 up up N/A off + Ethernet64 29,30,31,32 40G 9100 N/A Ethernet1/17 routed up up QSFP-DD Double Density 8X Pluggable Transceiver off + Ethernet-BP0 93,94,95,96 40G 9100 N/A Ethernet-BP0 PortChannel4001 up up N/A off + Ethernet-BP4 97,98,99,100 40G 9100 N/A Ethernet-BP4 PortChannel4001 up up N/A off + Ethernet-BP256 61,62,63,64 40G 9100 N/A Ethernet-BP256 PortChannel4009 up up N/A off + Ethernet-BP260 57,58,59,60 40G 9100 N/A Ethernet-BP260 PortChannel4009 up up N/A off +PortChannel1002 N/A 80G 9100 N/A N/A trunk up up N/A N/A +PortChannel4001 N/A 80G 9100 N/A N/A routed up up N/A N/A +PortChannel4009 N/A 80G 9100 N/A N/A routed up up N/A N/A """ intf_status = """\ Interface Lanes Speed MTU FEC Alias Vlan Oper Admin Type Asym PFC diff --git a/tests/muxcable_test.py b/tests/muxcable_test.py index b8eb3dce62..0405b27d87 100644 --- a/tests/muxcable_test.py +++ b/tests/muxcable_test.py @@ -378,6 +378,50 @@ } """ +show_muxcable_grpc_muxdirection_active_expected_output = """\ +Port Direction Presence PeerDirection ConnectivityState +---------- ----------- ---------- --------------- ------------------- +Ethernet12 active True active READY +""" + +show_muxcable_grpc_muxdirection_standby_expected_output = """\ +Port Direction Presence PeerDirection ConnectivityState +--------- ----------- ---------- --------------- ------------------- +Ethernet4 standby True active READY +""" + +show_muxcable_grpc_muxdirection_active_expected_all_output = """\ +Port Direction Presence PeerDirection ConnectivityState +--------- ----------- ---------- --------------- ------------------- +Ethernet0 active False active READY +""" + +show_muxcable_grpc_muxdirection_active_expected_all_output_json = """\ +{ + "HWMODE": { + "Ethernet0": { + "Direction": "active", + "Presence": "False", + "PeerDirection": "active", + "ConnectivityState": "READY" + } + } +} +""" + +show_muxcable_grpc_muxdirection_standby_expected_output_json = """\ +{ + "HWMODE": { + "Ethernet4": { + "Direction": "standby", + "Presence": "True", + "PeerDirection": "active", + "ConnectivityState": "READY" + } + } +} +""" + expected_muxcable_cableinfo_output = """\ Vendor Model -------- --------------- @@ -395,6 +439,17 @@ Ethernet12 active True """ +show_muxcable_hwmode_muxdirection_active_expected_output_json = """\ +{ + "HWMODE": { + "Ethernet12": { + "Direction": "active", + "Presence": "True" + } + } +} +""" + show_muxcable_hwmode_muxdirection_active_expected_output_alias = """\ Port Direction Presence ------ ----------- ---------- @@ -558,6 +613,48 @@ Ethernet0 server_ipv4 10.2.1.1 added added """ + +show_muxcable_operationtime_expected_port_output="""\ +PORT ATTR OPERATION_TIME +--------- -------------- ---------------- +Ethernet0 operation_time 200:00 +""" + +show_muxcable_health_expected_port_output="""\ +PORT ATTR HEALTH +--------- ------------ -------- +Ethernet0 health_check Ok +""" + + +show_muxcable_queueinfo_expected_port_output="""\ +PORT ATTR VALUE +--------- ---------- ------- +Ethernet0 uart_stat1 2 +Ethernet0 uart_stat2 1 +""" + +show_muxcable_resetcause_expected_port_output="""\ +PORT ATTR RESETCAUSE +--------- ----------- ------------ +Ethernet0 reset_cause warm reset +""" + + +show_muxcable_health_expected_port_output_json="""\ +{ + "health_check": "Ok" +} +""" + + + +show_muxcable_resetcause_expected_port_output_json="""\ +{ + "reset_cause": "warm reset" +} +""" + class TestMuxcable(object): @classmethod def setup_class(cls): @@ -2367,6 +2464,220 @@ def test_config_muxcable_telemetry_enable(self): "enable"], obj=db) assert result.exit_code == 0 + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "standby"})) + @mock.patch('show.muxcable.get_grpc_cached_version_mux_direction_per_port', mock.MagicMock(return_value={"self_mux_direction": "active", + "peer_mux_direction": "active", + "presence": "True", + "rc": 0, + "grpc_connection_status": "READY"})) + @mock.patch('show.muxcable.check_port_in_mux_cable_table', mock.MagicMock(return_value=True)) + @mock.patch('utilities_common.platform_sfputil_helper.get_logical_list', mock.MagicMock(return_value=["Ethernet0", "Ethernet12"])) + @mock.patch('utilities_common.platform_sfputil_helper.get_asic_id_for_logical_port', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.platform_sfputil', mock.MagicMock(return_value={0: ["Ethernet12", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.get_physical_to_logical', mock.MagicMock(return_value={0: ["Ethernet12", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.logical_port_name_to_physical_port_list', mock.MagicMock(return_value=[0])) + @mock.patch('sonic_y_cable.y_cable.check_read_side', mock.MagicMock(return_value=(1))) + @mock.patch('sonic_y_cable.y_cable.check_mux_direction', mock.MagicMock(return_value=(2))) + @mock.patch('re.match', mock.MagicMock(return_value=(True))) + def test_show_muxcable_grpc_muxdirection_port_standby_with_patch(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["grpc"].commands["muxdirection"], + ["Ethernet12"], obj=db) + assert result.exit_code == 0 + assert result.output == show_muxcable_grpc_muxdirection_active_expected_output + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "standby"})) + @mock.patch('show.muxcable.check_port_in_mux_cable_table', mock.MagicMock(return_value=True)) + @mock.patch('utilities_common.platform_sfputil_helper.get_logical_list', mock.MagicMock(return_value=["Ethernet0", "Ethernet4"])) + @mock.patch('utilities_common.platform_sfputil_helper.get_asic_id_for_logical_port', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.platform_sfputil', mock.MagicMock(return_value={0: ["Ethernet4", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.get_physical_to_logical', mock.MagicMock(return_value={0: ["Ethernet4", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.logical_port_name_to_physical_port_list', mock.MagicMock(return_value=[0])) + @mock.patch('sonic_y_cable.y_cable.check_read_side', mock.MagicMock(return_value=(1))) + @mock.patch('sonic_y_cable.y_cable.check_mux_direction', mock.MagicMock(return_value=(2))) + @mock.patch('re.match', mock.MagicMock(return_value=(True))) + def test_show_muxcable_grpc_muxdirection_port_standby(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["grpc"].commands["muxdirection"], + ["Ethernet4"], obj=db) + assert result.exit_code == 0 + assert result.output == show_muxcable_grpc_muxdirection_standby_expected_output + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "standby"})) + @mock.patch('show.muxcable.check_port_in_mux_cable_table', mock.MagicMock(return_value=True)) + @mock.patch('utilities_common.platform_sfputil_helper.get_logical_list', mock.MagicMock(return_value=["Ethernet0", "Ethernet4"])) + @mock.patch('utilities_common.platform_sfputil_helper.get_asic_id_for_logical_port', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.platform_sfputil', mock.MagicMock(return_value={0: ["Ethernet4", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.get_physical_to_logical', mock.MagicMock(return_value={0: ["Ethernet4", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.logical_port_name_to_physical_port_list', mock.MagicMock(return_value=[0])) + @mock.patch('sonic_y_cable.y_cable.check_read_side', mock.MagicMock(return_value=(1))) + @mock.patch('sonic_y_cable.y_cable.check_mux_direction', mock.MagicMock(return_value=(2))) + @mock.patch('re.match', mock.MagicMock(return_value=(True))) + def test_show_muxcable_grpc_muxdirection_port_standby_json(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["grpc"].commands["muxdirection"], + ["Ethernet4", "--json"], obj=db) + assert result.exit_code == 0 + assert result.output == show_muxcable_grpc_muxdirection_standby_expected_output_json + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "standby"})) + @mock.patch('show.muxcable.check_port_in_mux_cable_table', mock.MagicMock(return_value=True)) + @mock.patch('utilities_common.platform_sfputil_helper.get_logical_list', mock.MagicMock(return_value=["Ethernet0", "Ethernet4"])) + @mock.patch('utilities_common.platform_sfputil_helper.get_asic_id_for_logical_port', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.platform_sfputil', mock.MagicMock(return_value={0: ["Ethernet0", "Ethernet4"]})) + @mock.patch('utilities_common.platform_sfputil_helper.get_physical_to_logical', mock.MagicMock(return_value={0: ["Ethernet0", "Ethernet4"]})) + @mock.patch('utilities_common.platform_sfputil_helper.logical_port_name_to_physical_port_list', mock.MagicMock(return_value=[0])) + @mock.patch('sonic_y_cable.y_cable.check_read_side', mock.MagicMock(return_value=(1))) + @mock.patch('sonic_y_cable.y_cable.check_mux_direction', mock.MagicMock(return_value=(2))) + @mock.patch('re.match', mock.MagicMock(return_value=(True))) + def test_show_muxcable_grpc_muxdirection_port_all(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["grpc"].commands["muxdirection"], obj=db) + assert result.exit_code == 0 + assert result.output == show_muxcable_grpc_muxdirection_active_expected_all_output + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "standby"})) + @mock.patch('show.muxcable.check_port_in_mux_cable_table', mock.MagicMock(return_value=True)) + @mock.patch('utilities_common.platform_sfputil_helper.get_logical_list', mock.MagicMock(return_value=["Ethernet0", "Ethernet4"])) + @mock.patch('utilities_common.platform_sfputil_helper.get_asic_id_for_logical_port', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.platform_sfputil', mock.MagicMock(return_value={0: ["Ethernet0", "Ethernet4"]})) + @mock.patch('utilities_common.platform_sfputil_helper.get_physical_to_logical', mock.MagicMock(return_value={0: ["Ethernet0", "Ethernet4"]})) + @mock.patch('utilities_common.platform_sfputil_helper.logical_port_name_to_physical_port_list', mock.MagicMock(return_value=[0])) + @mock.patch('sonic_y_cable.y_cable.check_read_side', mock.MagicMock(return_value=(1))) + @mock.patch('sonic_y_cable.y_cable.check_mux_direction', mock.MagicMock(return_value=(2))) + @mock.patch('re.match', mock.MagicMock(return_value=(True))) + def test_show_muxcable_grpc_muxdirection_port_all_json(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["grpc"].commands["muxdirection"], ["--json"], obj=db) + assert result.exit_code == 0 + assert result.output == show_muxcable_grpc_muxdirection_active_expected_all_output_json + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "active"})) + @mock.patch('show.muxcable.get_hwmode_mux_direction_port', mock.MagicMock(return_value={0: 0, + 1: "active", + 2: "True"})) + @mock.patch('show.muxcable.check_port_in_mux_cable_table', mock.MagicMock(return_value=True)) + @mock.patch('utilities_common.platform_sfputil_helper.get_logical_list', mock.MagicMock(return_value=["Ethernet0", "Ethernet12"])) + @mock.patch('utilities_common.platform_sfputil_helper.get_asic_id_for_logical_port', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.platform_sfputil', mock.MagicMock(return_value={0: ["Ethernet12", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.get_physical_to_logical', mock.MagicMock(return_value={0: ["Ethernet12", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.logical_port_name_to_physical_port_list', mock.MagicMock(return_value=[0])) + @mock.patch('sonic_y_cable.y_cable.check_read_side', mock.MagicMock(return_value=(1))) + @mock.patch('sonic_y_cable.y_cable.check_mux_direction', mock.MagicMock(return_value=(1))) + @mock.patch('re.match', mock.MagicMock(return_value=(True))) + def test_show_muxcable_hwmode_muxdirection_port_active(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["hwmode"].commands["muxdirection"], + ["Ethernet12", "--json"], obj=db) + assert result.exit_code == 0 + assert result.output == show_muxcable_hwmode_muxdirection_active_expected_output_json + + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "True"})) + @mock.patch('show.muxcable.get_result', mock.MagicMock(return_value={"health_check": "True"})) + def test_show_mux_health(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["health"], + ["Ethernet0"], obj=db) + assert result.output == show_muxcable_health_expected_port_output + + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "True"})) + @mock.patch('show.muxcable.get_result', mock.MagicMock(return_value={"health_check": "True"})) + def test_show_mux_health_json(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["health"], + ["Ethernet0", "--json"], obj=db) + assert result.output == show_muxcable_health_expected_port_output_json + + + + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "True"})) + @mock.patch('show.muxcable.get_result', mock.MagicMock(return_value={"operation_time": "200"})) + def test_show_mux_operation_time(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["operationtime"], + ["Ethernet0"], obj=db) + assert result.output == show_muxcable_operationtime_expected_port_output + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "True"})) + @mock.patch('show.muxcable.get_result', mock.MagicMock(return_value={"uart_stat1": "2", + "uart_stat2": "1"})) + def test_show_mux_queue_info(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["queueinfo"], + ["Ethernet0"], obj=db) + assert result.output == show_muxcable_queueinfo_expected_port_output + + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "True"})) + @mock.patch('show.muxcable.get_result', mock.MagicMock(return_value={"reset_cause": "1"})) + def test_show_mux_resetcause(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["resetcause"], + ["Ethernet0"], obj=db) + assert result.output == show_muxcable_resetcause_expected_port_output + + + + @mock.patch('show.muxcable.delete_all_keys_in_db_table', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.update_and_get_response_for_xcvr_cmd', mock.MagicMock(return_value={0: 0, + 1: "True"})) + @mock.patch('show.muxcable.get_result', mock.MagicMock(return_value={"reset_cause": "1"})) + def test_show_mux_resetcause_json(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["resetcause"], + ["Ethernet0", "--json"], obj=db) + assert result.output == show_muxcable_resetcause_expected_port_output_json + + @classmethod def teardown_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "0" diff --git a/tests/nat_test.py b/tests/nat_test.py new file mode 100644 index 0000000000..e37f13bc71 --- /dev/null +++ b/tests/nat_test.py @@ -0,0 +1,267 @@ +import mock + +from click.testing import CliRunner +from utilities_common.db import Db +from mock import patch +from jsonpatch import JsonPatchConflict +import config.main as config +import config.nat as nat +import config.validated_config_db_connector as validated_config_db_connector + +class TestNat(object): + @classmethod + def setup_class(cls): + print("SETUP") + + def test_add_basic_invalid(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["basic", "65.66.45.1", "12.12.12.14x", "-nat_type", "dnat"], obj=obj) + assert "Please enter a valid local ip address" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["basic", "65.66.45.1x", "12.12.12.14", "-nat_type", "dnat"], obj=obj) + assert "Please enter a valid global ip address" in result.output + + @patch("config.nat.SonicV2Connector.get_all", mock.Mock(return_value={"MAX_NAT_ENTRIES": "9999"})) + @patch("config.nat.SonicV2Connector.exists", mock.Mock(return_value="True")) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) + def test_add_basic_yang_validation(self): + nat.ADHOC_VALIDATION = False + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["basic", "65.66.45.1", "12.12.12.14", "-nat_type", "dnat", "-twice_nat_id", "3"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["basic", "65.66.45.1", "12.12.12.14", "-nat_type", "dnat"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["basic", "65.66.45.1", "12.12.12.14", "-twice_nat_id", "3"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["basic", "65.66.45.1", "12.12.12.14"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + def test_add_tcp_invalid(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["tcp", "65.66.45.1", "100", "12.12.12.14x", "200", "-nat_type", "dnat"], obj=obj) + assert "Please enter a valid local ip address" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["tcp", "65.66.45.1x", "100", "12.12.12.14", "200", "-nat_type", "dnat"], obj=obj) + assert "Please enter a valid global ip address" in result.output + + @patch("config.nat.SonicV2Connector.get_all", mock.Mock(return_value={"MAX_NAT_ENTRIES": "9999"})) + @patch("config.nat.SonicV2Connector.exists", mock.Mock(return_value="True")) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) + def test_add_tcp_yang_validation(self): + nat.ADHOC_VALIDATION = False + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["tcp", "65.66.45.1", "100", "12.12.12.14", "200", "-nat_type", "dnat", "-twice_nat_id", "3"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["tcp", "65.66.45.1", "100", "12.12.12.14", "200", "-nat_type", "dnat"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["tcp", "65.66.45.1", "100", "12.12.12.14", "200", "-twice_nat_id", "3"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["tcp", "65.66.45.1", "100", "12.12.12.14", "200"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + def test_add_udp_invalid(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["udp", "65.66.45.1", "100", "12.12.12.14x", "200", "-nat_type", "dnat"], obj=obj) + assert "Please enter a valid local ip address" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["udp", "65.66.45.1x", "100", "12.12.12.14", "200", "-nat_type", "dnat"], obj=obj) + assert "Please enter a valid global ip address" in result.output + + @patch("config.nat.SonicV2Connector.get_all", mock.Mock(return_value={"MAX_NAT_ENTRIES": "9999"})) + @patch("config.nat.SonicV2Connector.exists", mock.Mock(return_value="True")) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) + def test_add_udp_yang_validation(self): + nat.ADHOC_VALIDATION = False + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["udp", "65.66.45.1", "100", "12.12.12.14", "200", "-nat_type", "dnat", "-twice_nat_id", "3"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["udp", "65.66.45.1", "100", "12.12.12.14", "200", "-nat_type", "dnat"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["udp", "65.66.45.1", "100", "12.12.12.14", "200", "-twice_nat_id", "3"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["add"].commands["static"], ["udp", "65.66.45.1", "100", "12.12.12.14", "200"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + def test_remove_basic(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["remove"].commands["static"].commands["basic"], ["65.66.45.1", "12.12.12.14x"], obj=obj) + assert "Please enter a valid local ip address" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["remove"].commands["static"].commands["basic"], ["65.66.45.1x", "12.12.12.14"], obj=obj) + assert "Please enter a valid global ip address" in result.output + + @patch("config.nat.ConfigDBConnector.get_entry", mock.Mock(return_value={"local_ip": "12.12.12.14"})) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + def test_remove_basic_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["remove"].commands["static"].commands["basic"], ["65.66.45.1", "12.12.12.14"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + def test_remove_udp(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["remove"].commands["static"].commands["udp"], ["65.66.45.1", "100", "12.12.12.14x", "200"], obj=obj) + assert "Please enter a valid local ip address" in result.output + + result = runner.invoke(config.config.commands["nat"].commands["remove"].commands["static"].commands["udp"], ["65.66.45.1x", "100", "12.12.12.14", "200"], obj=obj) + assert "Please enter a valid global ip address" in result.output + + @patch("config.nat.ConfigDBConnector.get_entry", mock.Mock(return_value={"local_ip": "12.12.12.14", "local_port": "200"})) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + def test_remove_udp_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["remove"].commands["static"].commands["udp"], ["65.66.45.1", "100", "12.12.12.14", "200"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("config.nat.ConfigDBConnector.get_table", mock.Mock(return_value={"sample_table_key": "sample_table_value"})) + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + def test_remove_static_all_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["remove"].commands["static"].commands["all"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_enable_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["feature"].commands["enable"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_disable_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["feature"].commands["disable"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_timeout_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["set"].commands["timeout"], ["301"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_tcp_timeout_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["set"].commands["tcp-timeout"], ["301"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_udp_timeout_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["set"].commands["udp-timeout"], ["301"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_reset_timeout_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["reset"].commands["timeout"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_reset_tcp_timeout_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["reset"].commands["tcp-timeout"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + def test_reset_udp_timeout_yang_validation(self): + nat.ADHOC_VALIDATION = True + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + + result = runner.invoke(config.config.commands["nat"].commands["reset"].commands["udp-timeout"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output diff --git a/tests/radius_test.py b/tests/radius_test.py index 49a1ac3ec4..928e629616 100644 --- a/tests/radius_test.py +++ b/tests/radius_test.py @@ -52,6 +52,16 @@ """ +show_radius_global_nasip_source_ip_output="""\ +RADIUS global auth_type pap (default) +RADIUS global retransmit 3 (default) +RADIUS global timeout 5 (default) +RADIUS global passkey (default) +RADIUS global nas_ip 1.1.1.1 +RADIUS global src_ip 2000::1 + +""" + config_radius_empty_output="""\ """ @@ -217,3 +227,43 @@ def test_config_radius_server_invalid_delete_yang_validation(self): ["delete", "10.10.10.x"]) print(result.output) assert "Invalid ConfigDB. Error" in result.output + + def test_config_radius_nasip_sourceip(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + db.cfgdb.delete_table("RADIUS") + db.cfgdb.delete_table("RADIUS_SERVER") + + result = runner.invoke(config.config.commands["radius"],\ + ["nasip", "1.1.1.1"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + result = runner.invoke(config.config.commands["radius"],\ + ["sourceip", "2000::1"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands["radius"], []) + print(result.exit_code) + print(result.output) + assert result.output == show_radius_default_output + + db.cfgdb.mod_entry("RADIUS", "global", \ + {'auth_type' : 'pap (default)', \ + 'retransmit': '3 (default)', \ + 'timeout' : '5 (default)', \ + 'passkey' : ' (default)', \ + 'nas_ip' : '1.1.1.1', \ + 'src_ip' : '2000::1', \ + } \ + ) + + result = runner.invoke(show.cli.commands["radius"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_radius_global_nasip_source_ip_output diff --git a/tests/scripts/create_mock_image.sh b/tests/scripts/create_mock_image.sh deleted file mode 100755 index f23032af0d..0000000000 --- a/tests/scripts/create_mock_image.sh +++ /dev/null @@ -1,40 +0,0 @@ -repo_dir=$1 -input_image=$2 -output_file=$3 -cert_file=$4 -key_file=$5 -tmp_dir= -clean_up() -{ - sudo rm -rf $tmp_dir - sudo rm -rf $output_file - exit $1 -} - -DIR="$(dirname "$0")" - -tmp_dir=$(mktemp -d) -sha1=$(cat $input_image | sha1sum | awk '{print $1}') -echo -n "." -cp $repo_dir/installer/sharch_body.sh $output_file || { - echo "Error: Problems copying sharch_body.sh" - clean_up 1 -} -# Replace variables in the sharch template -sed -i -e "s/%%IMAGE_SHA1%%/$sha1/" $output_file -echo -n "." -tar_size="$(wc -c < "${input_image}")" -cat $input_image >> $output_file -sed -i -e "s|%%PAYLOAD_IMAGE_SIZE%%|${tar_size}|" ${output_file} -CMS_SIG="${tmp_dir}/signature.sig" - -echo "$0 CMS signing ${input_image} with ${key_file}. Output file ${output_file}" -. $repo_dir/scripts/sign_image_dev.sh -sign_image_dev ${cert_file} ${key_file} $output_file ${CMS_SIG} || clean_up 1 - -cat ${CMS_SIG} >> ${output_file} -echo "Signature done." -# append signature to binary -sudo rm -rf ${CMS_SIG} -sudo rm -rf $tmp_dir -exit 0 diff --git a/tests/scripts/create_sign_and_verify_test_files.sh b/tests/scripts/create_sign_and_verify_test_files.sh deleted file mode 100755 index 0040c04a7a..0000000000 --- a/tests/scripts/create_sign_and_verify_test_files.sh +++ /dev/null @@ -1,91 +0,0 @@ -repo_dir=$1 -out_dir=$2 -mock_image="mock_img.bin" -output_file=$out_dir/output_file.bin -cert_file=$3 -other_cert_file=$4 -tmp_dir= -clean_up() -{ - sudo rm -rf $tmp_dir - sudo rm -rf $mock_image - exit $1 -} -DIR="$(dirname "$0")" -[ -d $out_dir ] || rm -rf $out_dir -mkdir $out_dir -tmp_dir=$(mktemp -d) -#generate self signed keys and certificate -key_file=$tmp_dir/private-key.pem -pub_key_file=$tmp_dir/public-key.pem -openssl ecparam -name secp256r1 -genkey -noout -out $key_file -openssl ec -in $key_file -pubout -out $pub_key_file -openssl req -new -x509 -key $key_file -out $cert_file -days 360 -subj "/C=US/ST=Test/L=Test/O=Test/CN=Test" -alt_key_file=$tmp_dir/alt-private-key.pem -alt_pub_key_file=$tmp_dir/alt-public-key.pem -openssl ecparam -name secp256r1 -genkey -noout -out $alt_key_file -openssl ec -in $alt_key_file -pubout -out $alt_pub_key_file -openssl req -new -x509 -key $alt_key_file -out $other_cert_file -days 360 -subj "/C=US/ST=Test/L=Test/O=Test/CN=Test" - -echo "this is a mock image\nThis is another line !2#4%6\n" > $mock_image -echo "Created a mock image with following text:" -cat $mock_image -# create signed mock image - -sh $DIR/create_mock_image.sh $repo_dir $mock_image $output_file $cert_file $key_file || { - echo "Error: unable to create mock image" - clean_up 1 -} - -[ -f "$output_file" ] || { - echo "signed mock image not created - exiting without testing" - clean_up 1 -} - -test_image_1=$out_dir/test_image_1.bin -cp -v $output_file $test_image_1 || { - echo "Error: Problems copying image" - clean_up 1 -} - -# test_image_1 = modified image size to something else - should fail on signature verification -image_size=$(sed -n 's/^payload_image_size=\(.*\)/\1/p' < $test_image_1) -sed -i "/payload_image_size=/c\payload_image_size=$(($image_size - 5))" $test_image_1 - -test_image_2=$out_dir/test_image_2.bin -cp -v $output_file $test_image_2 || { - echo "Error: Problems copying image" - clean_up 1 -} - -# test_image_2 = modified image sha1 to other sha1 value - should fail on signature verification -im_sha=$(sed -n 's/^payload_sha1=\(.*\)/\1/p' < $test_image_2) -sed -i "/payload_sha1=/c\payload_sha1=2f1bbd5a0d411253103e688e4e66c00c94bedd40" $test_image_2 - -tmp_image=$tmp_dir/"tmp_image.bin" -echo "this is a different image now" >> $mock_image -sh $DIR/create_mock_image.sh $repo_dir $mock_image $tmp_image $cert_file $key_file || { - echo "Error: unable to create mock image" - clean_up 1 -} -# test_image_3 = original mock image with wrong signature -# Extract cms signature from signed file -test_image_3=$out_dir/"test_image_3.bin" -tmp_sig="${tmp_dir}/tmp_sig.sig" -TMP_TAR_SIZE=$(head -n 50 $tmp_image | grep "payload_image_size=" | cut -d"=" -f2- ) -sed -e '1,/^exit_marker$/d' $tmp_image | tail -c +$(( $TMP_TAR_SIZE + 1 )) > $tmp_sig - -TAR_SIZE=$(head -n 50 $output_file | grep "payload_image_size=" | cut -d"=" -f2- ) -SHARCH_SIZE=$(sed '/^exit_marker$/q' $output_file | wc -c) -SIG_PAYLOAD_SIZE=$(($TAR_SIZE + $SHARCH_SIZE )) -head -c $SIG_PAYLOAD_SIZE $output_file > $test_image_3 -sudo rm -rf $tmp_image - -cat ${tmp_sig} >> ${test_image_3} - -# test_image_4 = modified image with original mock image signature -test_image_4=$out_dir/"test_image_4.bin" -head -c $SIG_PAYLOAD_SIZE $output_file > $test_image_4 -echo "this is additional line" >> $test_image_4 -cat ${tmp_sig} >> ${test_image_4} -clean_up 0 \ No newline at end of file diff --git a/tests/scripts/verify_image_sign_test.sh b/tests/scripts/verify_image_sign_test.sh deleted file mode 100755 index f4abd2584f..0000000000 --- a/tests/scripts/verify_image_sign_test.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -image_file="${1}" -cert_path="${2}" -cms_sig_file="sig.cms" -TMP_DIR=$(mktemp -d) -DATA_FILE="${TMP_DIR}/data.bin" -CMS_SIG_FILE="${TMP_DIR}/${cms_sig_file}" -lines_for_lookup=50 - -TAR_SIZE=$(head -n $lines_for_lookup $image_file | grep "payload_image_size=" | cut -d"=" -f2- ) -SHARCH_SIZE=$(sed '/^exit_marker$/q' $image_file | wc -c) -SIG_PAYLOAD_SIZE=$(($TAR_SIZE + $SHARCH_SIZE )) -# Extract cms signature from signed file - exit marker marks last sharch prefix + number of image lines + 1 for next linel -# Add extra byte for payload - extracting image signature from line after data file -sed -e '1,/^exit_marker$/d' $image_file | tail -c +$(( $TAR_SIZE + 1 )) > $CMS_SIG_FILE -# Extract image from signed file -head -c $SIG_PAYLOAD_SIZE $image_file > $DATA_FILE -EFI_CERTS_DIR=/tmp/efi_certs -[ -d $EFI_CERTS_DIR ] && rm -rf $EFI_CERTS_DIR -mkdir $EFI_CERTS_DIR -cp $cert_path $EFI_CERTS_DIR/cert.pem - -DIR="$(dirname "$0")" -. $DIR/verify_image_sign_common.sh -verify_image_sign_common $image_file $DATA_FILE $CMS_SIG_FILE -VERIFICATION_RES=$? -if [ -d "${TMP_DIR}" ]; then rm -rf ${TMP_DIR}; fi -[ -d $EFI_CERTS_DIR ] && rm -rf $EFI_CERTS_DIR -exit $VERIFICATION_RES \ No newline at end of file diff --git a/tests/sflow_test.py b/tests/sflow_test.py index 226e52ae5e..da03ff396e 100644 --- a/tests/sflow_test.py +++ b/tests/sflow_test.py @@ -3,6 +3,7 @@ import pytest from unittest import mock +from jsonpatch import JsonPatchConflict from click.testing import CliRunner from utilities_common.db import Db from mock import patch @@ -193,6 +194,25 @@ def test_config_sflow_collector(self): assert result.output == show_sflow_output return + + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) + def test_config_sflow_collector_invalid_yang_validation(self): + db = Db() + runner = CliRunner() + obj = {'db':db.cfgdb} + + config.ADHOC_VALIDTION = False + result = runner.invoke(config.config.commands["sflow"]. + commands["collector"].commands["del"], ["prod"], obj=obj) + print(result.exit_code, result.output) + assert "Invalid ConfigDB. Error" in result.output + + result = runner.invoke(config.config.commands["sflow"]. + commands["collector"].commands["add"], + ["prod", "fe80::6e82:6aff:fe1e:cd8e", "--vrf", "mgmt"], obj=obj) + assert "Invalid ConfigDB. Error" in result.output @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) diff --git a/tests/sfp_test.py b/tests/sfp_test.py index 6d5d9fa7af..b6b94ebff6 100644 --- a/tests/sfp_test.py +++ b/tests/sfp_test.py @@ -193,6 +193,68 @@ Vendor SN: INKAO2900002A """ +test_qsfp_dd_pm_output = """\ +Ethernet44: + Parameter Unit Min Avg Max Threshold Threshold Threshold Threshold Threshold Threshold + High High Crossing Low Low Crossing + Alarm Warning Alert-High Alarm Warning Alert-Low + --------------- ------ -------- -------- -------- ----------- ----------- ------------ ----------- ----------- ----------- + Tx Power dBm -8.22 -8.23 -8.24 -5.0 -6.0 False -16.99 -16.003 False + Rx Total Power dBm -10.61 -10.62 -10.62 2.0 0.0 False -21.0 -18.0 False + Rx Signal Power dBm -40.0 0.0 40.0 13.0 10.0 True -18.0 -15.0 True + CD-short link ps/nm 0.0 0.0 0.0 1000.0 500.0 False -1000.0 -500.0 False + PDL dB 0.5 0.6 0.6 4.0 4.0 False 0.0 0.0 False + OSNR dB 36.5 36.5 36.5 99.0 99.0 False 0.0 0.0 False + eSNR dB 30.5 30.5 30.5 99.0 99.0 False 0.0 0.0 False + CFO MHz 54.0 70.0 121.0 3800.0 3800.0 False -3800.0 -3800.0 False + DGD ps 5.37 5.56 5.81 7.0 7.0 False 0.0 0.0 False + SOPMD ps^2 0.0 0.0 0.0 655.35 655.35 False 0.0 0.0 False + SOP ROC krad/s 1.0 1.0 2.0 N/A N/A N/A N/A N/A N/A + Pre-FEC BER N/A 4.58E-04 4.66E-04 5.76E-04 1.25E-02 1.10E-02 0.0 0.0 0.0 0.0 + Post-FEC BER N/A 0.0 0.0 0.0 1000.0 1.0 False 0.0 0.0 False + EVM % 100.0 100.0 100.0 N/A N/A N/A N/A N/A N/A +""" + +test_cmis_eeprom_output = """\ +Ethernet64: SFP EEPROM detected + Active Firmware: X.X + Active application selected code assigned to host lane 1: 1 + Active application selected code assigned to host lane 2: 1 + Active application selected code assigned to host lane 3: 1 + Active application selected code assigned to host lane 4: 1 + Active application selected code assigned to host lane 5: 1 + Active application selected code assigned to host lane 6: 1 + Active application selected code assigned to host lane 7: 1 + Active application selected code assigned to host lane 8: 1 + Application Advertisement: 400GAUI-8 C2M (Annex 120E) - Host Assign (0x1) - 400ZR, DWDM, amplified - Media Assign (0x1) + 400GAUI-8 C2M (Annex 120E) - Host Assign (0x1) - 400ZR, Single Wavelength, Unamplified - Media Assign (0x1) + 100GAUI-2 C2M (Annex 135G) - Host Assign (0x55) - 400ZR, DWDM, amplified - Media Assign (0x1) + CMIS Rev: 4.1 + Connector: LC + Encoding: N/A + Extended Identifier: Power Class 8 (20.0W Max) + Extended RateSelect Compliance: N/A + Host Lane Count: 8 + Identifier: QSFP-DD Double Density 8X Pluggable Transceiver + Inactive Firmware: X.X + Length Cable Assembly(m): 0.0 + Media Interface Technology: 1550 nm DFB + Media Lane Count: 1 + Module Hardware Rev: X.X + Nominal Bit Rate(100Mbs): 0 + Specification compliance: sm_media_interface + Supported Max Laser Frequency: 196100 + Supported Max TX Power: 4.0 + Supported Min Laser Frequency: 191300 + Supported Min TX Power: -22.9 + Vendor Date Code(YYYY-MM-DD Lot): 2021-11-19 + Vendor Name: XXXX + Vendor OUI: XX-XX-XX + Vendor PN: XXX + Vendor Rev: XX + Vendor SN: 0123456789 +""" + test_sfp_eeprom_dom_all_output = """\ Ethernet0: SFP EEPROM detected Application Advertisement: N/A @@ -245,22 +307,42 @@ Ethernet4: SFP EEPROM Not detected Ethernet64: SFP EEPROM detected - Application Advertisement: N/A - Connector: No separable connector - Encoding: 64B66B - Extended Identifier: Power Class 3(2.5W max), CDR present in Rx Tx - Extended RateSelect Compliance: QSFP+ Rate Select Version 1 - Identifier: QSFP28 or later - Length Cable Assembly(m): 3 - Nominal Bit Rate(100Mbs): 255 - Specification compliance: - 10/40G Ethernet Compliance Code: 40G Active Cable (XLPPI) - Vendor Date Code(YYYY-MM-DD Lot): 2017-01-13 - Vendor Name: Mellanox - Vendor OUI: 00-02-c9 - Vendor PN: MFA1A00-C003 - Vendor Rev: AC - Vendor SN: MT1706FT02064 + Active Firmware: X.X + Active application selected code assigned to host lane 1: 1 + Active application selected code assigned to host lane 2: 1 + Active application selected code assigned to host lane 3: 1 + Active application selected code assigned to host lane 4: 1 + Active application selected code assigned to host lane 5: 1 + Active application selected code assigned to host lane 6: 1 + Active application selected code assigned to host lane 7: 1 + Active application selected code assigned to host lane 8: 1 + Application Advertisement: 400GAUI-8 C2M (Annex 120E) - Host Assign (0x1) - 400ZR, DWDM, amplified - Media Assign (0x1) + 400GAUI-8 C2M (Annex 120E) - Host Assign (0x1) - 400ZR, Single Wavelength, Unamplified - Media Assign (0x1) + 100GAUI-2 C2M (Annex 135G) - Host Assign (0x55) - 400ZR, DWDM, amplified - Media Assign (0x1) + CMIS Rev: 4.1 + Connector: LC + Encoding: N/A + Extended Identifier: Power Class 8 (20.0W Max) + Extended RateSelect Compliance: N/A + Host Lane Count: 8 + Identifier: QSFP-DD Double Density 8X Pluggable Transceiver + Inactive Firmware: X.X + Length Cable Assembly(m): 0.0 + Media Interface Technology: 1550 nm DFB + Media Lane Count: 1 + Module Hardware Rev: X.X + Nominal Bit Rate(100Mbs): 0 + Specification compliance: sm_media_interface + Supported Max Laser Frequency: 196100 + Supported Max TX Power: 4.0 + Supported Min Laser Frequency: 191300 + Supported Min TX Power: -22.9 + Vendor Date Code(YYYY-MM-DD Lot): 2021-11-19 + Vendor Name: XXXX + Vendor OUI: XX-XX-XX + Vendor PN: XXX + Vendor Rev: XX + Vendor SN: 0123456789 ChannelMonitorValues: RX1Power: 0.3802dBm RX2Power: -0.4871dBm @@ -315,22 +397,42 @@ Ethernet4: SFP EEPROM Not detected Ethernet64: SFP EEPROM detected - Application Advertisement: N/A - Connector: No separable connector - Encoding: 64B66B - Extended Identifier: Power Class 3(2.5W max), CDR present in Rx Tx - Extended RateSelect Compliance: QSFP+ Rate Select Version 1 - Identifier: QSFP28 or later - Length Cable Assembly(m): 3 - Nominal Bit Rate(100Mbs): 255 - Specification compliance: - 10/40G Ethernet Compliance Code: 40G Active Cable (XLPPI) - Vendor Date Code(YYYY-MM-DD Lot): 2017-01-13 - Vendor Name: Mellanox - Vendor OUI: 00-02-c9 - Vendor PN: MFA1A00-C003 - Vendor Rev: AC - Vendor SN: MT1706FT02064 + Active Firmware: X.X + Active application selected code assigned to host lane 1: 1 + Active application selected code assigned to host lane 2: 1 + Active application selected code assigned to host lane 3: 1 + Active application selected code assigned to host lane 4: 1 + Active application selected code assigned to host lane 5: 1 + Active application selected code assigned to host lane 6: 1 + Active application selected code assigned to host lane 7: 1 + Active application selected code assigned to host lane 8: 1 + Application Advertisement: 400GAUI-8 C2M (Annex 120E) - Host Assign (0x1) - 400ZR, DWDM, amplified - Media Assign (0x1) + 400GAUI-8 C2M (Annex 120E) - Host Assign (0x1) - 400ZR, Single Wavelength, Unamplified - Media Assign (0x1) + 100GAUI-2 C2M (Annex 135G) - Host Assign (0x55) - 400ZR, DWDM, amplified - Media Assign (0x1) + CMIS Rev: 4.1 + Connector: LC + Encoding: N/A + Extended Identifier: Power Class 8 (20.0W Max) + Extended RateSelect Compliance: N/A + Host Lane Count: 8 + Identifier: QSFP-DD Double Density 8X Pluggable Transceiver + Inactive Firmware: X.X + Length Cable Assembly(m): 0.0 + Media Interface Technology: 1550 nm DFB + Media Lane Count: 1 + Module Hardware Rev: X.X + Nominal Bit Rate(100Mbs): 0 + Specification compliance: sm_media_interface + Supported Max Laser Frequency: 196100 + Supported Max TX Power: 4.0 + Supported Min Laser Frequency: 191300 + Supported Min TX Power: -22.9 + Vendor Date Code(YYYY-MM-DD Lot): 2021-11-19 + Vendor Name: XXXX + Vendor OUI: XX-XX-XX + Vendor PN: XXX + Vendor Rev: XX + Vendor SN: 0123456789 """ test_sfp_presence_all_output = """\ @@ -341,6 +443,14 @@ Ethernet64 Present """ +test_qsfp_dd_pm_all_output = """\ +Ethernet0: Transceiver performance monitoring not applicable + +Ethernet4: Transceiver performance monitoring not applicable + +Ethernet64: Transceiver performance monitoring not applicable +""" + class TestSFP(object): @classmethod def setup_class(cls): @@ -434,6 +544,12 @@ def test_qsfp_dd_eeprom_adv_app(self): print(result.output) assert result.output == test_qsfp_dd_eeprom_adv_app_output + def test_cmis_info(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["info"], ["Ethernet64"]) + assert result.exit_code == 0 + assert result.output == test_cmis_eeprom_output + def test_rj45_eeprom(self): runner = CliRunner() result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["Ethernet36"]) @@ -441,6 +557,17 @@ def test_rj45_eeprom(self): expected = "Ethernet36: SFP EEPROM is not applicable for RJ45 port" assert result_lines == expected + def test_qsfp_dd_pm(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["pm"], ["Ethernet44"]) + assert result.exit_code == 0 + assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_qsfp_dd_pm_output + + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["pm"], ["Ethernet200"]) + result_lines = result.output.strip('\n') + expected = "Ethernet200: Transceiver performance monitoring not applicable" + assert result_lines == expected + @classmethod def teardown_class(cls): print("TEARDOWN") @@ -497,23 +624,31 @@ def test_sfp_eeprom_with_ns(self): expected = "Ethernet200: SFP EEPROM Not detected" assert result_lines == expected - def test_sfp_eeprom_with_ns(self): + def test_qsfp_dd_pm_with_ns(self): runner = CliRunner() - result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["Ethernet0 -n asic0"]) - assert result.exit_code == 0 - assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_sfp_eeprom_output - - result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["Ethernet200 -n asic0"]) + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["pm"], ["Ethernet0 -n asic0"]) result_lines = result.output.strip('\n') - expected = "Ethernet200: SFP EEPROM Not detected" + expected = "Ethernet0: Transceiver performance monitoring not applicable" assert result_lines == expected + def test_cmis_sfp_info_with_ns(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["info"], ["Ethernet64 -n asic1"]) + assert result.exit_code == 0 + assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_cmis_eeprom_output + def test_sfp_eeprom_all(self): runner = CliRunner() result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"]) assert result.exit_code == 0 assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_sfp_eeprom_all_output + def test_sfp_info_all(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["info"]) + assert result.exit_code == 0 + assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_sfp_eeprom_all_output + def test_sfp_eeprom_dom_all(self): runner = CliRunner() result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["-d"]) @@ -527,6 +662,12 @@ def test_is_rj45_port(self): sys.modules.pop('sonic_platform') assert platform_sfputil_helper.is_rj45_port("Ethernet0") == False + def test_qsfp_dd_pm_all(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["pm"]) + assert result.exit_code == 0 + assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_qsfp_dd_pm_all_output + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/show_acl_test.py b/tests/show_acl_test.py new file mode 100644 index 0000000000..1b2cdc60a9 --- /dev/null +++ b/tests/show_acl_test.py @@ -0,0 +1,95 @@ +import os +import pytest +from click.testing import CliRunner + +import acl_loader.main as acl_loader_show +from acl_loader import * +from acl_loader.main import * +from importlib import reload + +root_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(root_path) +scripts_path = os.path.join(modules_path, "scripts") + + +@pytest.fixture() +def setup_teardown_single_asic(): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + yield + os.environ["UTILITIES_UNIT_TESTING"] = "0" + + +@pytest.fixture(scope="class") +def setup_teardown_multi_asic(): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + from .mock_tables import mock_multi_asic_3_asics + reload(mock_multi_asic_3_asics) + from .mock_tables import dbconnector + dbconnector.load_namespace_config() + yield + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + from .mock_tables import mock_single_asic + reload(mock_single_asic) + + +class TestShowACLSingleASIC(object): + def test_show_acl_table(self, setup_teardown_single_asic): + runner = CliRunner() + aclloader = AclLoader() + context = { + "acl_loader": aclloader + } + result = runner.invoke(acl_loader_show.cli.commands['show'].commands['table'], ['DATAACL_5'], obj=context) + assert result.exit_code == 0 + # We only care about the third line, which contains the 'Active' + result_top = result.output.split('\n')[2] + expected_output = "DATAACL_5 L3 Ethernet124 DATAACL_5 ingress Active" + assert result_top == expected_output + + def test_show_acl_rule(self, setup_teardown_single_asic): + runner = CliRunner() + aclloader = AclLoader() + context = { + "acl_loader": aclloader + } + result = runner.invoke(acl_loader_show.cli.commands['show'].commands['rule'], ['DATAACL_5'], obj=context) + assert result.exit_code == 0 + # We only care about the third line, which contains the 'Active' + result_top = result.output.split('\n')[2] + expected_output = "DATAACL_5 RULE_1 9999 FORWARD IP_PROTOCOL: 126 Active" + assert result_top == expected_output + + +class TestShowACLMultiASIC(object): + def test_show_acl_table(self, setup_teardown_multi_asic): + runner = CliRunner() + aclloader = AclLoader() + context = { + "acl_loader": aclloader + } + result = runner.invoke(acl_loader_show.cli.commands['show'].commands['table'], ['DATAACL_5'], obj=context) + assert result.exit_code == 0 + # We only care about the third line, which contains the 'Active' + result_top = result.output.split('\n')[2] + expected_output = "DATAACL_5 L3 Ethernet124 DATAACL_5 ingress {'asic0': 'Active', 'asic2': 'Active'}" + assert result_top == expected_output + + def test_show_acl_rule(self, setup_teardown_multi_asic): + runner = CliRunner() + aclloader = AclLoader() + context = { + "acl_loader": aclloader + } + result = runner.invoke(acl_loader_show.cli.commands['show'].commands['rule'], ['DATAACL_5'], obj=context) + assert result.exit_code == 0 + # We only care about the third line, which contains the 'Active' + result_top = result.output.split('\n')[2] + expected_output = "DATAACL_5 RULE_1 9999 FORWARD IP_PROTOCOL: 126 {'asic0': 'Active', 'asic2': 'Active'}" + assert result_top == expected_output + + diff --git a/tests/show_run_bgp_test.py b/tests/show_run_bgp_test.py new file mode 100644 index 0000000000..4d3ff843a0 --- /dev/null +++ b/tests/show_run_bgp_test.py @@ -0,0 +1,228 @@ +import os +import pytest +import importlib +from click.testing import CliRunner + +from utilities_common import multi_asic +from utilities_common import constants + +from unittest.mock import patch + +from sonic_py_common import device_info +import show.main as show + + +show_run_bgp_sasic = \ +"""router bgp 65100 +bgp router-id 10.1.0.32 +bgp log-neighbor-changes +no bgp ebgp-requires-policy +no bgp default ipv4-unicast +bgp graceful-restart restart-time 240 +bgp graceful-restart select-defer-time 45 +bgp graceful-restart +bgp graceful-restart preserve-fw-state +bgp bestpath as-path multipath-relax +neighbor BGPSLBPassive peer-group +neighbor BGPSLBPassive remote-as 65432 +neighbor BGPSLBPassive passive +neighbor BGPSLBPassive ebgp-multihop 255 +neighbor BGPSLBPassive update-source 10.1.0.32 +neighbor BGPVac peer-group +neighbor BGPVac remote-as 65432 +neighbor BGPVac passive +neighbor BGPVac ebgp-multihop 255 +neighbor BGPVac update-source 10.1.0.32 +neighbor PEER_V4 peer-group +neighbor PEER_V6 peer-group +neighbor 10.0.0.57 remote-as 64600 +neighbor 10.0.0.57 peer-group PEER_V4 +neighbor 10.0.0.57 description ARISTA01T1 +neighbor 10.0.0.57 timers 3 10 +neighbor 10.0.0.57 timers connect 10 +neighbor 10.0.0.59 remote-as 64600 +neighbor 10.0.0.59 peer-group PEER_V4 +neighbor 10.0.0.59 description ARISTA02T1 +neighbor 10.0.0.59 timers 3 10 +neighbor 10.0.0.59 timers connect 10 +neighbor 10.0.0.61 remote-as 64600 +neighbor 10.0.0.61 peer-group PEER_V4 +neighbor 10.0.0.61 description ARISTA03T1 +neighbor 10.0.0.61 timers 3 10 +neighbor 10.0.0.61 timers connect 10 +neighbor 10.0.0.63 remote-as 64600 +neighbor 10.0.0.63 peer-group PEER_V4 +neighbor 10.0.0.63 description ARISTA04T1 +neighbor 10.0.0.63 timers 3 10 +neighbor 10.0.0.63 timers connect 10 +neighbor fc00::72 remote-as 64600 +neighbor fc00::72 peer-group PEER_V6 +neighbor fc00::72 description ARISTA01T1 +neighbor fc00::72 timers 3 10 +neighbor fc00::72 timers connect 10 +neighbor fc00::76 remote-as 64600 +neighbor fc00::76 peer-group PEER_V6 +neighbor fc00::76 description ARISTA02T1 +neighbor fc00::76 timers 3 10 +neighbor fc00::76 timers connect 10 +neighbor fc00::7a remote-as 64600 +neighbor fc00::7a peer-group PEER_V6 +neighbor fc00::7a description ARISTA03T1 +neighbor fc00::7a timers 3 10 +neighbor fc00::7a timers connect 10 +neighbor fc00::7e remote-as 64600 +neighbor fc00::7e peer-group PEER_V6 +neighbor fc00::7e description ARISTA04T1 +neighbor fc00::7e timers 3 10 +neighbor fc00::7e timers connect 10 +bgp listen range 10.255.0.0/25 peer-group BGPSLBPassive +bgp listen range 192.168.0.0/21 peer-group BGPVac + +""" + +show_run_bgp_masic = \ +""" +------------Showing running config bgp on asic0------------ +neighbor 10.0.0.1 remote-as 65200 +neighbor 10.0.0.1 peer-group TIER2_V4 +neighbor 10.0.0.1 description ARISTA01T2 +neighbor 10.0.0.5 remote-as 65200 +neighbor 10.0.0.5 peer-group TIER2_V4 +neighbor 10.0.0.5 description ARISTA03T2 +neighbor fc00::2 remote-as 65200 +neighbor fc00::2 peer-group TIER2_V6 +neighbor fc00::2 description ARISTA01T2 +neighbor fc00::6 remote-as 65200 +neighbor fc00::6 peer-group TIER2_V6 +neighbor fc00::6 description ARISTA03T2 + +------------Showing running config bgp on asic1------------ +neighbor 10.0.0.9 remote-as 65200 +neighbor 10.0.0.9 peer-group TIER2_V4 +neighbor 10.0.0.9 description ARISTA05T2 +neighbor 10.0.0.13 remote-as 65200 +neighbor 10.0.0.13 peer-group TIER2_V4 +neighbor 10.0.0.13 description ARISTA07T2 +neighbor fc00::a remote-as 65200 +neighbor fc00::a peer-group TIER2_V6 +neighbor fc00::a description ARISTA05T2 +neighbor fc00::e remote-as 65200 +neighbor fc00::e peer-group TIER2_V6 +neighbor fc00::e description ARISTA07T2 + +""" + +show_run_bgp_masic_asic0 = \ +""" +------------Showing running config bgp on asic0------------ +neighbor 10.0.0.1 remote-as 65200 +neighbor 10.0.0.1 peer-group TIER2_V4 +neighbor 10.0.0.1 description ARISTA01T2 +neighbor 10.0.0.5 remote-as 65200 +neighbor 10.0.0.5 peer-group TIER2_V4 +neighbor 10.0.0.5 description ARISTA03T2 +neighbor fc00::2 remote-as 65200 +neighbor fc00::2 peer-group TIER2_V6 +neighbor fc00::2 description ARISTA01T2 +neighbor fc00::6 remote-as 65200 +neighbor fc00::6 peer-group TIER2_V6 +neighbor fc00::6 description ARISTA03T2 + +""" + +show_run_bgp_not_running = \ +""" +------------Showing running config bgp on asic0------------ +Error response from daemon: Container 70e3d3bafd1ab5faf796892acff3e2ccbea3dcd5dcfefcc34f25f7cc916b67bb is not running + +""" + +class TestShowRunBgpSingleAsic(object): + @classmethod + def setup_class(cls): + print("SETUP") + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + from .mock_tables import dbconnector + dbconnector.load_namespace_config() + + @pytest.mark.parametrize('setup_single_bgp_instance', + [ + 'show_run_bgp', + ], + indirect=['setup_single_bgp_instance']) + + def test_show_run_bgp_single(self, + setup_single_bgp_instance): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["bgp"], []) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_run_bgp_sasic + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + from .mock_tables import dbconnector + dbconnector.load_database_config() + + +class TestShowRunBgpMultiAsic(object): + @classmethod + def setup_class(cls): + print("SETUP") + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + from .mock_tables import dbconnector + dbconnector.load_namespace_config() + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + [ + 'show_run_bgp', + ], + indirect=['setup_multi_asic_bgp_instance']) + def test_show_run_bgp_all_asics(self, + setup_multi_asic_bgp_instance): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["bgp"], []) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_run_bgp_masic + + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + [ + 'show_run_bgp', + ], + indirect=['setup_multi_asic_bgp_instance']) + def test_show_run_bgp_asic0(self, + setup_multi_asic_bgp_instance): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["bgp"], ["-nasic0"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_run_bgp_masic_asic0 + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + [ + 'show_not_running_bgp', + ], + indirect=['setup_multi_asic_bgp_instance']) + def test_bgp0_not_running(self, + setup_multi_asic_bgp_instance): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["bgp"], ["-nasic0"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_run_bgp_not_running + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + from .mock_tables import dbconnector + dbconnector.load_database_config diff --git a/tests/show_test.py b/tests/show_test.py index 87c1b5a17e..ddb59078b2 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -1,9 +1,12 @@ import os import sys +import pytest import show.main as show from click.testing import CliRunner from unittest import mock -from unittest.mock import call, MagicMock +from unittest.mock import call, MagicMock, patch + +EXPECTED_BASE_COMMAND = 'sudo ' test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -49,3 +52,98 @@ def teardown_class(cls): print("TEARDOWN") os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) os.environ["UTILITIES_UNIT_TESTING"] = "0" + +@patch('show.main.run_command') +@pytest.mark.parametrize( + "cli_arguments,expected", + [ + ([], 'cat /var/log/syslog'), + (['xcvrd'], "cat /var/log/syslog | grep 'xcvrd'"), + (['-l', '10'], 'cat /var/log/syslog | tail -10'), + (['-f'], 'tail -F /var/log/syslog'), + ] +) +def test_show_logging_default(run_command, cli_arguments, expected): + runner = CliRunner() + result = runner.invoke(show.cli.commands["logging"], cli_arguments) + run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected, display_cmd=False) + +@patch('show.main.run_command') +@patch('os.path.isfile', MagicMock(return_value=True)) +@pytest.mark.parametrize( + "cli_arguments,expected", + [ + ([], 'cat /var/log/syslog.1 /var/log/syslog'), + (['xcvrd'], "cat /var/log/syslog.1 /var/log/syslog | grep 'xcvrd'"), + (['-l', '10'], 'cat /var/log/syslog.1 /var/log/syslog | tail -10'), + (['-f'], 'tail -F /var/log/syslog'), + ] +) +def test_show_logging_syslog_1(run_command, cli_arguments, expected): + runner = CliRunner() + result = runner.invoke(show.cli.commands["logging"], cli_arguments) + run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected, display_cmd=False) + +@patch('show.main.run_command') +@patch('os.path.exists', MagicMock(return_value=True)) +@pytest.mark.parametrize( + "cli_arguments,expected", + [ + ([], 'cat /var/log.tmpfs/syslog'), + (['xcvrd'], "cat /var/log.tmpfs/syslog | grep 'xcvrd'"), + (['-l', '10'], 'cat /var/log.tmpfs/syslog | tail -10'), + (['-f'], 'tail -F /var/log.tmpfs/syslog'), + ] +) +def test_show_logging_tmpfs(run_command, cli_arguments, expected): + runner = CliRunner() + result = runner.invoke(show.cli.commands["logging"], cli_arguments) + run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected, display_cmd=False) + +@patch('show.main.run_command') +@patch('os.path.isfile', MagicMock(return_value=True)) +@patch('os.path.exists', MagicMock(return_value=True)) +@pytest.mark.parametrize( + "cli_arguments,expected", + [ + ([], 'cat /var/log.tmpfs/syslog.1 /var/log.tmpfs/syslog'), + (['xcvrd'], "cat /var/log.tmpfs/syslog.1 /var/log.tmpfs/syslog | grep 'xcvrd'"), + (['-l', '10'], 'cat /var/log.tmpfs/syslog.1 /var/log.tmpfs/syslog | tail -10'), + (['-f'], 'tail -F /var/log.tmpfs/syslog'), + ] +) +def test_show_logging_tmpfs_syslog_1(run_command, cli_arguments, expected): + runner = CliRunner() + result = runner.invoke(show.cli.commands["logging"], cli_arguments) + run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected, display_cmd=False) + +def side_effect_subprocess_popen(*args, **kwargs): + mock = MagicMock() + if args[0] == "uptime": + mock.stdout.read.return_value = "05:58:07 up 25 days" + elif args[0].startswith("sudo docker images"): + mock.stdout.read.return_value = "REPOSITORY TAG" + return mock + +@patch('sonic_py_common.device_info.get_sonic_version_info', MagicMock(return_value={ + "build_version": "release-1.1-7d94c0c28", + "sonic_os_version": "11", + "debian_version": "11.6", + "kernel_version": "5.10", + "commit_id": "7d94c0c28", + "build_date": "Wed Feb 15 06:17:08 UTC 2023", + "built_by": "AzDevOps"})) +@patch('sonic_py_common.device_info.get_platform_info', MagicMock(return_value={ + "platform": "x86_64-kvm_x86_64-r0", + "hwsku": "Force10-S6000", + "asic_type": "vs", + "asic_count": 1})) +@patch('sonic_py_common.device_info.get_chassis_info', MagicMock(return_value={ + "serial": "N/A", + "model": "N/A", + "revision": "N/A"})) +@patch('subprocess.Popen', MagicMock(side_effect=side_effect_subprocess_popen)) +def test_show_version(): + runner = CliRunner() + result = runner.invoke(show.cli.commands["version"]) + assert "SONiC OS Version: 11" in result.output diff --git a/tests/show_vnet_test.py b/tests/show_vnet_test.py index 5317b9b3ff..eff75a583f 100644 --- a/tests/show_vnet_test.py +++ b/tests/show_vnet_test.py @@ -2,6 +2,7 @@ from click.testing import CliRunner from utilities_common.db import Db import show.main as show +import show.vnet as vnet class TestShowVnetRoutesAll(object): @classmethod @@ -9,6 +10,49 @@ def setup_class(cls): print("SETUP") os.environ["UTILITIES_UNIT_TESTING"] = "1" + def test_Preety_print(self): + table =[] + row = ["Vnet_v6_in_v6-0", "fddd:a156:a251::a6:1/128"] + mac_addr = "" + vni = "" + state = "active" + epval = "fddd:a100:a251::a10:1,fddd:a101:a251::a10:1" + + vnet.pretty_print(table, row, epval, mac_addr, vni, state) + expected_output = [['Vnet_v6_in_v6-0', 'fddd:a156:a251::a6:1/128', 'fddd:a100:a251::a10:1,fddd:a101:a251::a10:1', '', '', 'active']] + assert table == expected_output + + table =[] + row = ["Vnet_v6_in_v6-0", "fddd:a156:a251::a6:1/128"] + epval = "fddd:a100:a251::a10:1,fddd:a101:a251::a10:1,fddd:a100:a251::a11:1,fddd:a100:a251::a12:1,fddd:a100:a251::a13:1" + vnet.pretty_print(table, row, epval, mac_addr, vni, state) + expected_output = [ + ['Vnet_v6_in_v6-0', 'fddd:a156:a251::a6:1/128', 'fddd:a100:a251::a10:1,fddd:a101:a251::a10:1', '', '', 'active'], + ['', '', 'fddd:a100:a251::a11:1,fddd:a100:a251::a12:1', '', '', ''], + ['', '', 'fddd:a100:a251::a13:1', '', '', ''] + ] + assert table == expected_output + + table =[] + row = ["Vnet_v6_in_v6-0", "fddd:a156:a251::a6:1/128"] + epval = "192.168.1.1,192.168.1.2,192.168.1.3,192.168.1.4,192.168.1.5,192.168.1.6,192.168.1.7,192.168.1.8,192.168.1.9,192.168.1.10,192.168.1.11,192.168.1.12,192.168.1.13,192.168.1.14,192.168.1.15" + vnet.pretty_print(table, row, epval, mac_addr, vni, state) + expected_output =[ + ['Vnet_v6_in_v6-0', 'fddd:a156:a251::a6:1/128', '192.168.1.1,192.168.1.2,192.168.1.3', '', '', 'active'], + ['', '', '192.168.1.4,192.168.1.5,192.168.1.6', '', '', ''], + ['', '', '192.168.1.7,192.168.1.8,192.168.1.9', '', '', ''], + ['', '', '192.168.1.10,192.168.1.11,192.168.1.12', '', '', ''], + ['', '', '192.168.1.13,192.168.1.14,192.168.1.15', '', '', '']] + assert table == expected_output + + table =[] + row = ["Vnet_v6_in_v6-0", "fddd:a156:a251::a6:1/128"] + epval = "192.168.1.1" + vnet.pretty_print(table, row, epval, mac_addr, vni, state) + expected_output =[ + ['Vnet_v6_in_v6-0', 'fddd:a156:a251::a6:1/128', '192.168.1.1', '', '', 'active']] + assert table == expected_output + def test_show_vnet_routes_all_basic(self): runner = CliRunner() db = Db() @@ -22,6 +66,7 @@ def test_show_vnet_routes_all_basic(self): vnet name prefix endpoint mac address vni status --------------- ------------------------ ------------------------------------------- ------------- ----- -------- Vnet_v6_in_v6-0 fddd:a156:a251::a6:1/128 fddd:a100:a251::a10:1,fddd:a101:a251::a10:1 active + fddd:a102:a251::a10:1,fddd:a103:a251::a10:1 test_v4_in_v4-0 160.162.191.1/32 100.251.7.1 active test_v4_in_v4-0 160.163.191.1/32 100.251.7.1 active test_v4_in_v4-0 160.164.191.1/32 100.251.7.1 diff --git a/tests/show_vnet_vxlan_cli_test.py b/tests/show_vnet_vxlan_cli_test.py index f0cee3b257..c9aa5b6223 100644 --- a/tests/show_vnet_vxlan_cli_test.py +++ b/tests/show_vnet_vxlan_cli_test.py @@ -9,32 +9,12 @@ #test_path = os.path.dirname(os.path.abspath(__file__)) - - class TestShowVnet(object): @classmethod def setup_class(cls): print("SETUP") os.environ["UTILITIES_UNIT_TESTING"] = "1" - def test_show_vnet_routes_all_basic(self): - runner = CliRunner() - db = Db() - result = runner.invoke(show.cli.commands['vnet'].commands['routes'].commands['all'], [], obj=db) - assert result.exit_code == 0 - expected_output = """\ -vnet name prefix nexthop interface ------------ -------- --------- ----------- - -vnet name prefix endpoint mac address vni status ---------------- ------------------------ ------------------------------------------- ------------- ----- -------- -Vnet_v6_in_v6-0 fddd:a156:a251::a6:1/128 fddd:a100:a251::a10:1,fddd:a101:a251::a10:1 active -test_v4_in_v4-0 160.162.191.1/32 100.251.7.1 active -test_v4_in_v4-0 160.163.191.1/32 100.251.7.1 active -test_v4_in_v4-0 160.164.191.1/32 100.251.7.1 -""" - assert result.output == expected_output - def test_show_vnet_endpoint(self): runner = CliRunner() db = Db() @@ -45,6 +25,8 @@ def test_show_vnet_endpoint(self): --------------------- --------------------- -------------- -------- fddd:a100:a251::a10:1 fddd:a100:a251::a10:1 1 Unknown fddd:a101:a251::a10:1 fddd:a101:a251::a10:1 1 Down +fddd:a102:a251::a10:1 fddd:a102:a251::a10:1 1 Unknown +fddd:a103:a251::a10:1 fddd:a103:a251::a10:1 1 Unknown 100.251.7.1 100.251.7.1 3 Up """ assert result.output == expected_output diff --git a/tests/sign_and_verify_test.py b/tests/sign_and_verify_test.py deleted file mode 100644 index 77d58a4ac9..0000000000 --- a/tests/sign_and_verify_test.py +++ /dev/null @@ -1,70 +0,0 @@ - -import subprocess -import os -import sys -import shutil - - -class TestSignVerify(object): - def _run_verification_script_and_check(self, image, cert_file_path, success_str, expected_value=0): - res = subprocess.run(['sh', self._verification_script, image, cert_file_path]) - assert res.returncode == expected_value - print(success_str) - - def test_basic_signature_verification(self): - self._run_verification_script_and_check(os.path.join(self._out_dir_path, 'output_file.bin'), - self._cert_file_path, "test case 1 - basic verify signature - SUCCESS") - - # change image size to something else - should fail on signature verification - def test_modified_image_size(self): - self._run_verification_script_and_check(os.path.join(self._out_dir_path, 'test_image_1.bin'), - self._cert_file_path, "test case 2 - modified image size - SUCCESS", 1) - - def test_modified_image_sha1(self): - self._run_verification_script_and_check(os.path.join(self._out_dir_path, 'test_image_2.bin'), - self._cert_file_path, "test case 3 - modified image sha1 - SUCCESS", 1) - - def test_modified_image_data(self): - self._run_verification_script_and_check(os.path.join(self._out_dir_path, 'test_image_3.bin'), - self._cert_file_path, "test case 4 - modified image data - SUCCESS", 1) - - def test_modified_image_signature(self): - self._run_verification_script_and_check(os.path.join(self._out_dir_path, 'test_image_4.bin'), - self._cert_file_path, "test case 5 - modified image data - SUCCESS", 1) - - def test_verify_image_with_wrong_certificate(self): - self._run_verification_script_and_check(os.path.join(self._out_dir_path, 'output_file.bin'), - self._alt_cert_path, "test case 6 - verify with wrong signature - SUCCESS", 1) - - def __init__(self): - self._test_path = os.path.dirname(os.path.abspath(__file__)) - self._modules_path = os.path.dirname(self._test_path) - self._repo_path = os.path.join(self._modules_path, '../..') - self._test_scripts_path = os.path.join(self._test_path, "scripts") - sys.path.insert(0, self._test_path) - sys.path.insert(0, self._modules_path) - sys.path.insert(0, self._test_scripts_path) - script_path = os.path.join(self._test_scripts_path, 'create_sign_and_verify_test_files.sh') - self._verification_script = os.path.join(self._test_scripts_path, 'verify_image_sign_test.sh') - self._out_dir_path = '/tmp/sign_verify_test' - self._cert_file_path = os.path.join(self._out_dir_path, 'self_certificate.pem') - self._alt_cert_path = os.path.join(self._out_dir_path, 'alt_self_certificate.pem') - create_files_result = subprocess.run(['sh', script_path, self._repo_path, self._out_dir_path, - self._cert_file_path, - self._alt_cert_path]) - print(create_files_result) - assert create_files_result.returncode == 0 - - def __del__(self): - shutil.rmtree(self._out_dir_path) - - -if __name__ == '__main__': - t = TestSignVerify() - t.test_basic_signature_verification() - subprocess.run(['ls', '/tmp/sign_verify_test']) - t.test_modified_image_data() - t.test_modified_image_sha1() - t.test_modified_image_signature() - t.test_modified_image_size() - t.test_verify_image_with_wrong_certificate() diff --git a/tests/sonic_package_manager/test_service_creator.py b/tests/sonic_package_manager/test_service_creator.py index c97d362626..689a635411 100644 --- a/tests/sonic_package_manager/test_service_creator.py +++ b/tests/sonic_package_manager/test_service_creator.py @@ -218,7 +218,7 @@ def test_feature_registration(mock_sonic_db, manifest): 'set_owner': 'local', 'has_per_asic_scope': 'False', 'has_global_scope': 'True', - 'has_timer': 'False', + 'delayed': 'False', 'check_up_status': 'False', 'support_syslog_rate_limit': 'False', }) @@ -232,7 +232,7 @@ def test_feature_update(mock_sonic_db, manifest): 'set_owner': 'local', 'has_per_asic_scope': 'False', 'has_global_scope': 'True', - 'has_timer': 'False', + 'delayed': 'False', 'check_up_status': 'False', 'support_syslog_rate_limit': 'False', } @@ -256,7 +256,7 @@ def test_feature_update(mock_sonic_db, manifest): 'set_owner': 'local', 'has_per_asic_scope': 'False', 'has_global_scope': 'True', - 'has_timer': 'True', + 'delayed': 'True', 'check_up_status': 'False', 'support_syslog_rate_limit': 'False', }), @@ -278,7 +278,7 @@ def test_feature_registration_with_timer(mock_sonic_db, manifest): 'set_owner': 'local', 'has_per_asic_scope': 'False', 'has_global_scope': 'True', - 'has_timer': 'True', + 'delayed': 'True', 'check_up_status': 'False', 'support_syslog_rate_limit': 'False', }) @@ -298,7 +298,7 @@ def test_feature_registration_with_non_default_owner(mock_sonic_db, manifest): 'set_owner': 'kube', 'has_per_asic_scope': 'False', 'has_global_scope': 'True', - 'has_timer': 'False', + 'delayed': 'False', 'check_up_status': 'False', 'support_syslog_rate_limit': 'False', }) diff --git a/tests/suppress_pending_fib_test.py b/tests/suppress_pending_fib_test.py new file mode 100644 index 0000000000..04064d306e --- /dev/null +++ b/tests/suppress_pending_fib_test.py @@ -0,0 +1,34 @@ +from click.testing import CliRunner + +import config.main as config +import show.main as show +from utilities_common.db import Db + + +class TestSuppressFibPending: + def test_synchronous_mode(self): + runner = CliRunner() + + db = Db() + + result = runner.invoke(config.config.commands['suppress-fib-pending'], ['enabled'], obj=db) + print(result.output) + assert result.exit_code == 0 + assert db.cfgdb.get_entry('DEVICE_METADATA' , 'localhost')['suppress-fib-pending'] == 'enabled' + + result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) + assert result.exit_code == 0 + assert result.output == 'Enabled\n' + + result = runner.invoke(config.config.commands['suppress-fib-pending'], ['disabled'], obj=db) + print(result.output) + assert result.exit_code == 0 + assert db.cfgdb.get_entry('DEVICE_METADATA' , 'localhost')['suppress-fib-pending'] == 'disabled' + + result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) + assert result.exit_code == 0 + assert result.output == 'Disabled\n' + + result = runner.invoke(config.config.commands['suppress-fib-pending'], ['invalid-input'], obj=db) + print(result.output) + assert result.exit_code != 0 diff --git a/tests/test_sonic_installer.py b/tests/test_sonic_installer.py index 0f8fcdb8ca..c445dfb6e3 100644 --- a/tests/test_sonic_installer.py +++ b/tests/test_sonic_installer.py @@ -3,7 +3,6 @@ from sonic_installer.main import sonic_installer from click.testing import CliRunner from unittest.mock import patch, Mock, call -from sonic_installer.bootloader import GrubBootloader @patch("sonic_installer.main.SWAPAllocator") @patch("sonic_installer.main.get_bootloader") @@ -32,7 +31,7 @@ def test_install(run_command, run_command_or_raise, get_bootloader, swap, fs): mock_bootloader.get_binary_image_version = Mock(return_value=new_image_version) mock_bootloader.get_installed_images = Mock(return_value=[current_image_version]) mock_bootloader.get_image_path = Mock(return_value=new_image_folder) - mock_bootloader.verify_image_sign = Mock(return_value=True) + @contextmanager def rootfs_path_mock(path): yield mounted_image_folder @@ -46,13 +45,7 @@ def rootfs_path_mock(path): print(result.output) assert result.exit_code == 0 - mock_bootloader_verify_image_sign_fail = mock_bootloader - mock_bootloader_verify_image_sign_fail.verify_image_sign = Mock(return_value=False) - get_bootloader.return_value=mock_bootloader_verify_image_sign_fail - result = runner.invoke(sonic_installer.commands["install"], [sonic_image_filename, "-y"]) - print(result.output) - assert result.exit_code != 0 # Assert bootloader install API was called mock_bootloader.install_image.assert_called_with(f"./{sonic_image_filename}") # Assert all below commands were called, so we ensure that diff --git a/tests/verify_image_sign_test.sh b/tests/verify_image_sign_test.sh deleted file mode 100755 index f4abd2584f..0000000000 --- a/tests/verify_image_sign_test.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -image_file="${1}" -cert_path="${2}" -cms_sig_file="sig.cms" -TMP_DIR=$(mktemp -d) -DATA_FILE="${TMP_DIR}/data.bin" -CMS_SIG_FILE="${TMP_DIR}/${cms_sig_file}" -lines_for_lookup=50 - -TAR_SIZE=$(head -n $lines_for_lookup $image_file | grep "payload_image_size=" | cut -d"=" -f2- ) -SHARCH_SIZE=$(sed '/^exit_marker$/q' $image_file | wc -c) -SIG_PAYLOAD_SIZE=$(($TAR_SIZE + $SHARCH_SIZE )) -# Extract cms signature from signed file - exit marker marks last sharch prefix + number of image lines + 1 for next linel -# Add extra byte for payload - extracting image signature from line after data file -sed -e '1,/^exit_marker$/d' $image_file | tail -c +$(( $TAR_SIZE + 1 )) > $CMS_SIG_FILE -# Extract image from signed file -head -c $SIG_PAYLOAD_SIZE $image_file > $DATA_FILE -EFI_CERTS_DIR=/tmp/efi_certs -[ -d $EFI_CERTS_DIR ] && rm -rf $EFI_CERTS_DIR -mkdir $EFI_CERTS_DIR -cp $cert_path $EFI_CERTS_DIR/cert.pem - -DIR="$(dirname "$0")" -. $DIR/verify_image_sign_common.sh -verify_image_sign_common $image_file $DATA_FILE $CMS_SIG_FILE -VERIFICATION_RES=$? -if [ -d "${TMP_DIR}" ]; then rm -rf ${TMP_DIR}; fi -[ -d $EFI_CERTS_DIR ] && rm -rf $EFI_CERTS_DIR -exit $VERIFICATION_RES \ No newline at end of file diff --git a/tests/vlan_test.py b/tests/vlan_test.py index 66ec3606cf..19622777a0 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -1,5 +1,6 @@ import os import traceback +import pytest from unittest import mock from click.testing import CliRunner @@ -10,6 +11,15 @@ from importlib import reload import utilities_common.bgp_util as bgp_util +IP_VERSION_PARAMS_MAP = { + "ipv4": { + "table": "VLAN" + }, + "ipv6": { + "table": "DHCP_RELAY" + } +} + show_vlan_brief_output="""\ +-----------+-----------------+-----------------+----------------+-------------+ | VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | @@ -143,6 +153,8 @@ | 4000 | | PortChannel1001 | tagged | disabled | +-----------+-----------------+-----------------+----------------+-------------+ """ + + class TestVlan(object): _old_run_bgp_command = None @classmethod @@ -319,7 +331,7 @@ def test_config_vlan_add_rif_portchannel_member(self): assert result.exit_code != 0 assert "Error: PortChannel0001 is a router interface!" in result.output - def test_config_vlan_with_vxlanmap_del_vlan(self): + def test_config_vlan_with_vxlanmap_del_vlan(self, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() obj = {'config_db': db.cfgdb} @@ -343,7 +355,7 @@ def test_config_vlan_with_vxlanmap_del_vlan(self): assert result.exit_code != 0 assert "Error: vlan: 1027 can not be removed. First remove vxlan mapping" in result.output - def test_config_vlan_del_vlan(self): + def test_config_vlan_del_vlan(self, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() obj = {'config_db':db.cfgdb} @@ -401,7 +413,7 @@ def test_config_vlan_del_nonexist_vlan_member(self): assert result.exit_code != 0 assert "Error: Ethernet0 is not a member of Vlan1000" in result.output - def test_config_add_del_vlan_and_vlan_member(self): + def test_config_add_del_vlan_and_vlan_member(self, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() @@ -444,7 +456,7 @@ def test_config_add_del_vlan_and_vlan_member(self): assert result.exit_code == 0 assert result.output == show_vlan_brief_output - def test_config_add_del_vlan_and_vlan_member_in_alias_mode(self): + def test_config_add_del_vlan_and_vlan_member_in_alias_mode(self, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() @@ -521,7 +533,7 @@ def test_config_vlan_proxy_arp_with_nonexist_vlan_intf(self): assert result.exit_code != 0 assert "Interface Vlan1001 does not exist" in result.output - def test_config_vlan_proxy_arp_enable(self): + def test_config_vlan_proxy_arp_enable(self, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() @@ -533,7 +545,7 @@ def test_config_vlan_proxy_arp_enable(self): assert result.exit_code == 0 assert db.cfgdb.get_entry("VLAN_INTERFACE", "Vlan1000") == {"proxy_arp": "enabled"} - def test_config_vlan_proxy_arp_disable(self): + def test_config_vlan_proxy_arp_disable(self, mock_restart_dhcp_relay_service): runner = CliRunner() db = Db() @@ -584,6 +596,132 @@ def test_config_vlan_add_member_of_portchannel(self): assert result.exit_code != 0 assert "Error: Ethernet32 is part of portchannel!" in result.output + @pytest.mark.parametrize("ip_version", ["ipv4", "ipv6"]) + def test_config_add_del_vlan_dhcp_relay_with_empty_entry(self, ip_version, mock_restart_dhcp_relay_service): + runner = CliRunner() + db = Db() + + # add vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + exp_output = {"vlanid": "1001"} if ip_version == "ipv4" else {} + assert db.cfgdb.get_entry(IP_VERSION_PARAMS_MAP[ip_version]["table"], "Vlan1001") == exp_output + + # del vlan 1001 + with mock.patch("utilities_common.dhcp_relay_util.handle_restart_dhcp_relay_service") as mock_handle_restart: + result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + + assert result.exit_code == 0 + assert "Vlan1001" not in db.cfgdb.get_keys(IP_VERSION_PARAMS_MAP[ip_version]["table"]) + assert "Restart service dhcp_relay failed with error" not in result.output + + @pytest.mark.parametrize("ip_version", ["ipv4", "ipv6"]) + def test_config_add_del_vlan_dhcp_relay_with_non_empty_entry(self, ip_version, mock_restart_dhcp_relay_service): + runner = CliRunner() + db = Db() + + # add vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + exp_output = {"vlanid": "1001"} if ip_version == "ipv4" else {} + assert db.cfgdb.get_entry(IP_VERSION_PARAMS_MAP[ip_version]["table"], "Vlan1001") == exp_output + db.cfgdb.set_entry("DHCP_RELAY", "Vlan1001", {"dhcpv6_servers": ["fc02:2000::5"]}) + + # del vlan 1001 + with mock.patch("utilities_common.dhcp_relay_util.handle_restart_dhcp_relay_service") as mock_handle_restart: + result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + + assert result.exit_code == 0 + assert "Vlan1001" not in db.cfgdb.get_keys(IP_VERSION_PARAMS_MAP[ip_version]["table"]) + mock_handle_restart.assert_called_once() + assert "Restart service dhcp_relay failed with error" not in result.output + + @pytest.mark.parametrize("ip_version", ["ipv4", "ipv6"]) + def test_config_add_del_vlan_with_dhcp_relay_not_running(self, ip_version): + runner = CliRunner() + db = Db() + + # add vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + exp_output = {"vlanid": "1001"} if ip_version == "ipv4" else {} + assert db.cfgdb.get_entry(IP_VERSION_PARAMS_MAP[ip_version]["table"], "Vlan1001") == exp_output + + # del vlan 1001 + with mock.patch("utilities_common.dhcp_relay_util.handle_restart_dhcp_relay_service") \ + as mock_restart_dhcp_relay_service: + result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + + assert result.exit_code == 0 + assert "Vlan1001" not in db.cfgdb.get_keys(IP_VERSION_PARAMS_MAP[ip_version]["table"]) + assert mock_restart_dhcp_relay_service.call_count == 0 + assert "Restarting DHCP relay service..." not in result.output + assert "Restart service dhcp_relay failed with error" not in result.output + + def test_config_add_del_vlan_with_not_restart_dhcp_relay_ipv6(self): + runner = CliRunner() + db = Db() + + # add vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + db.cfgdb.set_entry("DHCP_RELAY", "Vlan1001", {"dhcpv6_servers": ["fc02:2000::5"]}) + + # del vlan 1001 + with mock.patch("utilities_common.dhcp_relay_util.handle_restart_dhcp_relay_service") \ + as mock_restart_dhcp_relay_service: + result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001", "--no_restart_dhcp_relay"], + obj=db) + print(result.exit_code) + print(result.output) + + assert result.exit_code != 0 + assert mock_restart_dhcp_relay_service.call_count == 0 + assert "Can't delete Vlan1001 because related DHCPv6 Relay config is exist" in result.output + + db.cfgdb.set_entry("DHCP_RELAY", "Vlan1001", None) + # del vlan 1001 + with mock.patch("utilities_common.dhcp_relay_util.handle_restart_dhcp_relay_service") \ + as mock_restart_dhcp_relay_service: + result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001", "--no_restart_dhcp_relay"], + obj=db) + print(result.exit_code) + print(result.output) + + assert result.exit_code == 0 + assert mock_restart_dhcp_relay_service.call_count == 0 + + @pytest.mark.parametrize("ip_version", ["ipv6"]) + def test_config_add_exist_vlan_dhcp_relay(self, ip_version): + runner = CliRunner() + db = Db() + + db.cfgdb.set_entry("DHCP_RELAY", "Vlan1001", {"vlanid": "1001"}) + # add vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "DHCPv6 relay config for Vlan1001 already exists" in result.output + @classmethod def teardown_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "0" diff --git a/undebug/main.py b/undebug/main.py index 3810add68b..17767973cc 100644 --- a/undebug/main.py +++ b/undebug/main.py @@ -1,9 +1,13 @@ +import re +import sys import click import subprocess +from shlex import join def run_command(command, pager=False): - click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) - p = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE) + command_str = join(command) + click.echo(click.style("Command: ", fg='cyan') + click.style(command_str, fg='green')) + p = subprocess.Popen(command, text=True, stdout=subprocess.PIPE) output = p.stdout.read() if pager: click.echo_via_pager(output) @@ -22,7 +26,8 @@ def cli(): pass -p = subprocess.check_output(["sudo vtysh -c 'show version'"], shell=True, text=True) +prefix_pattern = '^[A-Za-z0-9.:/]*$' +p = subprocess.check_output(["sudo", "vtysh", "-c", 'show version'], text=True) if 'FRRouting' in p: # # 'bgp' group for FRR ### @@ -35,66 +40,64 @@ def bgp(): @bgp.command('allow-martians') def allow_martians(): """BGP allow martian next hops""" - command = 'sudo vtysh -c "no debug bgp allow-martians"' + command = ["sudo", "vtysh", "-c", "no debug bgp allow-martians"] run_command(command) @bgp.command() @click.argument('additional', type=click.Choice(['segment']), required=False) def as4(additional): """BGP AS4 actions""" - command = 'sudo vtysh -c "no debug bgp as4' + command = ["sudo", "vtysh", "-c", "no debug bgp as4"] if additional is not None: - command += " segment" - command += '"' + command[-1] += " segment" run_command(command) @bgp.command() @click.argument('prefix', required=True) def bestpath(prefix): """BGP bestpath""" - command = 'sudo vtysh -c "no debug bgp bestpath %s"' % prefix + if not re.match(prefix_pattern, prefix): + sys.exit('Prefix contains only number, alphabet, period, colon, and forward slash') + command = ["sudo", "vtysh", "-c", "no debug bgp bestpath %s" % prefix] run_command(command) @bgp.command() @click.argument('prefix_or_iface', required=False) def keepalives(prefix_or_iface): """BGP Neighbor Keepalives""" - command = 'sudo vtysh -c "no debug bgp keepalives' + command = ["sudo", "vtysh", "-c", "no debug bgp keepalives"] if prefix_or_iface is not None: - command += " " + prefix_or_iface - command += '"' + command[-1] += ' ' + prefix_or_iface run_command(command) @bgp.command('neighbor-events') @click.argument('prefix_or_iface', required=False) def neighbor_events(prefix_or_iface): """BGP Neighbor Events""" - command = 'sudo vtysh -c "no debug bgp neighbor-events' + command = ["sudo", "vtysh", "-c", "no debug bgp neighbor-events"] if prefix_or_iface is not None: - command += " " + prefix_or_iface - command += '"' + command[-1] += ' ' + prefix_or_iface run_command(command) @bgp.command() def nht(): """BGP nexthop tracking events""" - command = 'sudo vtysh -c "no debug bgp nht"' + command = ["sudo", "vtysh", "-c", "no debug bgp nht"] run_command(command) @bgp.command() @click.argument('additional', type=click.Choice(['error']), required=False) def pbr(additional): """BGP policy based routing""" - command = 'sudo vtysh -c "no debug bgp pbr' + command = ["sudo", "vtysh", "-c", "no debug bgp pbr"] if additional is not None: - command += " error" - command += '"' + command[-1] += " error" run_command(command) @bgp.command('update-groups') def update_groups(): """BGP update-groups""" - command = 'sudo vtysh -c "no debug bgp update-groups"' + command = ["sudo", "vtysh", "-c", "no debug bgp update-groups"] run_command(command) @bgp.command() @@ -102,22 +105,26 @@ def update_groups(): @click.argument('prefix', required=False) def updates(direction, prefix): """BGP updates""" - command = 'sudo vtysh -c "no debug bgp updates' + bgp_cmd = "no debug bgp updates" if direction is not None: - command += " " + direction + bgp_cmd += ' ' + direction if prefix is not None: - command += " " + prefix - command += '"' + if not re.match(prefix_pattern, prefix): + sys.exit('Prefix contains only number, alphabet, period, colon, and forward slash') + bgp_cmd += ' ' + prefix + command = ["sudo", "vtysh", "-c", bgp_cmd] run_command(command) @bgp.command() @click.argument('prefix', required=False) def zebra(prefix): """BGP Zebra messages""" - command = 'sudo vtysh -c "no debug bgp zebra' + bgp_cmd = "no debug bgp zebra" if prefix is not None: - command += " prefix " + prefix - command += '"' + if not re.match(prefix_pattern, prefix): + sys.exit('Prefix contains only number, alphabet, period, colon, and forward slash') + bgp_cmd += ' prefix ' + prefix + command = ["sudo", "vtysh", "-c", bgp_cmd] run_command(command) # @@ -132,56 +139,55 @@ def zebra(): @click.argument('detailed', type=click.Choice(['detailed']), required=False) def dplane(detailed): """Debug zebra dataplane events""" - command = 'sudo vtysh -c "no debug zebra dplane' + zb_cmd = "no debug zebra dplane" if detailed is not None: - command += " detailed" - command += '"' + zb_cmd += " detailed" + command = ["sudo", "vtysh", "-c", zb_cmd] run_command(command) @zebra.command() def events(): """Debug option set for zebra events""" - command = 'sudo vtysh -c "no debug zebra events"' + command = ["sudo", "vtysh", "-c", "no debug zebra events"] run_command(command) @zebra.command() def fpm(): """Debug zebra FPM events""" - command = 'sudo vtysh -c "no debug zebra fpm"' + command = ["sudo", "vtysh", "-c", "no debug zebra fpm"] run_command(command) @zebra.command() def kernel(): """Debug option set for zebra between kernel interface""" - command = 'sudo vtysh -c "no debug zebra kernel"' + command = ["sudo", "vtysh", "-c", "no debug zebra kernel"] run_command(command) @zebra.command() def nht(): """Debug option set for zebra next hop tracking""" - command = 'sudo vtysh -c "no debug zebra nht"' + command = ["sudo", "vtysh", "-c", "no debug zebra nht"] run_command(command) @zebra.command() def packet(): """Debug option set for zebra packet""" - command = 'sudo vtysh -c "no debug zebra packet"' + command = ["sudo", "vtysh", "-c", "no debug zebra packet"] run_command(command) @zebra.command() @click.argument('detailed', type=click.Choice(['detailed']), required=False) def rib(detailed): """Debug RIB events""" - command = 'sudo vtysh -c "no debug zebra rib' + command = ["sudo", "vtysh", "-c", "no debug zebra rib"] if detailed is not None: - command += " detailed" - command += '"' + command[-1] += " detailed" run_command(command) @zebra.command() def vxlan(): """Debug option set for zebra VxLAN (EVPN)""" - command = 'sudo vtysh -c "no debug zebra vxlan"' + command = ["sudo", "vtysh", "-c", "no debug zebra vxlan"] run_command(command) else: @@ -193,49 +199,49 @@ def vxlan(): def bgp(ctx): """debug bgp off""" if ctx.invoked_subcommand is None: - command = 'sudo vtysh -c "no debug bgp"' + command = ["sudo", "vtysh", "-c", "no debug bgp"] run_command(command) @bgp.command() def events(): """debug bgp events off""" - command = 'sudo vtysh -c "no debug bgp events"' + command = ["sudo", "vtysh", "-c", "no debug bgp events"] run_command(command) @bgp.command() def updates(): """debug bgp updates off""" - command = 'sudo vtysh -c "no debug bgp updates"' + command = ["sudo", "vtysh", "-c", "no debug bgp updates"] run_command(command) @bgp.command() def as4(): """debug bgp as4 actions off""" - command = 'sudo vtysh -c "no debug bgp as4"' + command = ["sudo", "vtysh", "-c", "no debug bgp as4"] run_command(command) @bgp.command() def filters(): """debug bgp filters off""" - command = 'sudo vtysh -c "no debug bgp filters"' + command = ["sudo", "vtysh", "-c", "no debug bgp filters"] run_command(command) @bgp.command() def fsm(): """debug bgp finite state machine off""" - command = 'sudo vtysh -c "no debug bgp fsm"' + command = ["sudo", "vtysh", "-c", "no debug bgp fsm"] run_command(command) @bgp.command() def keepalives(): """debug bgp keepalives off""" - command = 'sudo vtysh -c "no debug bgp keepalives"' + command = ["sudo", "vtysh", "-c", "no debug bgp keepalives"] run_command(command) @bgp.command() def zebra(): """debug bgp zebra messages off""" - command = 'sudo vtysh -c "no debug bgp zebra"' + command = ["sudo", "vtysh", "-c", "no debug bgp zebra"] run_command(command) # @@ -249,31 +255,31 @@ def zebra(): @zebra.command() def events(): """debug option set for zebra events""" - command = 'sudo vtysh -c "no debug zebra events"' + command = ["sudo", "vtysh", "-c", "no debug zebra events"] run_command(command) @zebra.command() def fpm(): """debug zebra FPM events""" - command = 'sudo vtysh -c "no debug zebra fpm"' + command = ["sudo", "vtysh", "-c", "no debug zebra fpm"] run_command(command) @zebra.command() def kernel(): """debug option set for zebra between kernel interface""" - command = 'sudo vtysh -c "no debug zebra kernel"' + command = ["sudo", "vtysh", "-c", "no debug zebra kernel"] run_command(command) @zebra.command() def packet(): """debug option set for zebra packet""" - command = 'sudo vtysh -c "no debug zebra packet"' + command = ["sudo", "vtysh", "-c", "no debug zebra packet"] run_command(command) @zebra.command() def rib(): """debug RIB events""" - command = 'sudo vtysh -c "no debug zebra rib"' + command = ["sudo", "vtysh", "-c", "no debug zebra rib"] run_command(command) diff --git a/utilities_common/cli.py b/utilities_common/cli.py index ca9e061078..45b2cc5f3f 100644 --- a/utilities_common/cli.py +++ b/utilities_common/cli.py @@ -251,10 +251,10 @@ def is_vlanid_in_range(vid): return False -def check_if_vlanid_exist(config_db, vlan): +def check_if_vlanid_exist(config_db, vlan, table_name='VLAN'): """Check if vlan id exits in the config db or ot""" - if len(config_db.get_entry('VLAN', vlan)) != 0: + if len(config_db.get_entry(table_name, vlan)) != 0: return True return False diff --git a/utilities_common/dhcp_relay_util.py b/utilities_common/dhcp_relay_util.py new file mode 100644 index 0000000000..b9c0b4e20f --- /dev/null +++ b/utilities_common/dhcp_relay_util.py @@ -0,0 +1,20 @@ +import click +import utilities_common.cli as clicommon + + +def restart_dhcp_relay_service(): + """ + Restart dhcp_relay service + """ + click.echo("Restarting DHCP relay service...") + clicommon.run_command("systemctl stop dhcp_relay", display_cmd=False) + clicommon.run_command("systemctl reset-failed dhcp_relay", display_cmd=False) + clicommon.run_command("systemctl start dhcp_relay", display_cmd=False) + + +def handle_restart_dhcp_relay_service(): + try: + restart_dhcp_relay_service() + except SystemExit as e: + ctx = click.get_current_context() + ctx.fail("Restart service dhcp_relay failed with error {}".format(e)) diff --git a/utilities_common/multi_asic.py b/utilities_common/multi_asic.py index 9e213f67f1..b1f24e12e8 100644 --- a/utilities_common/multi_asic.py +++ b/utilities_common/multi_asic.py @@ -63,12 +63,13 @@ def get_ns_list_based_on_options(self): namespaces = multi_asic.get_all_namespaces() if self.namespace_option is None: if self.get_display_option() == constants.DISPLAY_ALL: - ns_list = namespaces['front_ns'] + namespaces['back_ns'] + ns_list = namespaces['front_ns'] + namespaces['back_ns'] + namespaces['fabric_ns'] else: ns_list = namespaces['front_ns'] else: if self.namespace_option not in namespaces['front_ns'] and \ - self.namespace_option not in namespaces['back_ns']: + self.namespace_option not in namespaces['back_ns'] and \ + self.namespace_option not in namespaces['fabric_ns']: raise ValueError( 'Unknown Namespace {}'.format(self.namespace_option)) ns_list = [self.namespace_option] diff --git a/utilities_common/sfp_helper.py b/utilities_common/sfp_helper.py index 6ae9b85a1c..a5bf7839a9 100644 --- a/utilities_common/sfp_helper.py +++ b/utilities_common/sfp_helper.py @@ -19,6 +19,29 @@ 'application_advertisement': 'Application Advertisement' } +QSFP_CMIS_DELTA_DATA_MAP = { + 'host_lane_count': 'Host Lane Count', + 'media_lane_count': 'Media Lane Count', + 'active_apsel_hostlane1': 'Active application selected code assigned to host lane 1', + 'active_apsel_hostlane2': 'Active application selected code assigned to host lane 2', + 'active_apsel_hostlane3': 'Active application selected code assigned to host lane 3', + 'active_apsel_hostlane4': 'Active application selected code assigned to host lane 4', + 'active_apsel_hostlane5': 'Active application selected code assigned to host lane 5', + 'active_apsel_hostlane6': 'Active application selected code assigned to host lane 6', + 'active_apsel_hostlane7': 'Active application selected code assigned to host lane 7', + 'active_apsel_hostlane8': 'Active application selected code assigned to host lane 8', + 'media_interface_technology': 'Media Interface Technology', + 'hardware_rev': 'Module Hardware Rev', + 'cmis_rev': 'CMIS Rev', + 'active_firmware': 'Active Firmware', + 'inactive_firmware': 'Inactive Firmware', + 'supported_max_tx_power': 'Supported Max TX Power', + 'supported_min_tx_power': 'Supported Min TX Power', + 'supported_max_laser_freq': 'Supported Max Laser Frequency', + 'supported_min_laser_freq': 'Supported Min Laser Frequency' +} + +CMIS_DATA_MAP = {**QSFP_DATA_MAP, **QSFP_CMIS_DELTA_DATA_MAP} def covert_application_advertisement_to_output_string(indent, sfp_info_dict): key = 'application_advertisement'