Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master'
Browse files Browse the repository at this point in the history
  • Loading branch information
abdosi committed Sep 13, 2023
2 parents 23508e7 + 82a4d71 commit b72f4be
Show file tree
Hide file tree
Showing 17 changed files with 364 additions and 66 deletions.
3 changes: 1 addition & 2 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,11 @@ stages:

- script: |
set -xe
sudo apt-get -y purge libhiredis-dev libnl-3-dev libnl-route-3-dev || true
sudo apt-get -y purge libnl-3-dev libnl-route-3-dev || true
sudo dpkg -i libnl-3-200_*.deb
sudo dpkg -i libnl-genl-3-200_*.deb
sudo dpkg -i libnl-route-3-200_*.deb
sudo dpkg -i libnl-nf-3-200_*.deb
sudo dpkg -i libhiredis0.14_*.deb
sudo dpkg -i libyang_1.0.73_amd64.deb
sudo dpkg -i libyang-cpp_1.0.73_amd64.deb
sudo dpkg -i python3-yang_1.0.73_amd64.deb
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
"mellanox_asics": {
"spc1": [ "ACS-MSN2700", "ACS-MSN2740", "ACS-MSN2100", "ACS-MSN2410", "ACS-MSN2010", "Mellanox-SN2700", "Mellanox-SN2700-D48C8" ],
"spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8" ],
"spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64" ]
"spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48" ]
},
"broadcom_asics": {
"th": [ "Force10-S6100", "Arista-7060CX-32S-C32", "Arista-7060CX-32S-C32-T1", "Arista-7060CX-32S-D48C8", "Celestica-DX010-C32", "Seastone-DX010" ],
Expand Down
1 change: 1 addition & 0 deletions generic_config_updater/patch_sorter.py
Original file line number Diff line number Diff line change
Expand Up @@ -562,6 +562,7 @@ def __init__(self, path_addressing):
["BGP_NEIGHBOR", "*", "nhopself"],
["BGP_NEIGHBOR", "*", "rrclient"],
["BGP_PEER_RANGE", "*", "*"],
["BGP_SENTINELS", "*", "*"],
["BGP_MONITORS", "*", "holdtime"],
["BGP_MONITORS", "*", "keepalive"],
["BGP_MONITORS", "*", "name"],
Expand Down
20 changes: 11 additions & 9 deletions scripts/db_migrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,8 @@ def __init__(self, namespace, socket=None):
self.asic_type = version_info.get('asic_type')
if not self.asic_type:
log.log_error("ASIC type information not obtained. DB migration will not be reliable")
self.hwsku = device_info.get_hwsku()

self.hwsku = device_info.get_localhost_info('hwsku', self.configDB)
if not self.hwsku:
log.log_error("HWSKU information not obtained. DB migration will not be reliable")

Expand Down Expand Up @@ -691,7 +692,7 @@ def migrate_routing_config_mode(self):
# overwrite the routing-config-mode as per minigraph parser
# Criteria for update:
# if config mode is missing in base OS or if base and target modes are not same
# Eg. in 201811 mode is "unified", and in newer branches mode is "separated"
# Eg. in 201811 mode is "unified", and in newer branches mode is "separated"
if ('docker_routing_config_mode' not in device_metadata_old and 'docker_routing_config_mode' in device_metadata_new) or \
(device_metadata_old.get('docker_routing_config_mode') != device_metadata_new.get('docker_routing_config_mode')):
device_metadata_old['docker_routing_config_mode'] = device_metadata_new.get('docker_routing_config_mode')
Expand Down Expand Up @@ -1033,12 +1034,14 @@ def version_4_0_0(self):
# reading FAST_REBOOT table can't be done with stateDB.get as it uses hget behind the scenes and the table structure is
# not using hash and won't work.
# FAST_REBOOT table exists only if fast-reboot was triggered.
keys = self.stateDB.keys(self.stateDB.STATE_DB, "FAST_REBOOT|system")
if keys:
enable_state = 'true'
else:
enable_state = 'false'
self.stateDB.set(self.stateDB.STATE_DB, 'FAST_RESTART_ENABLE_TABLE|system', 'enable', enable_state)
keys = self.stateDB.keys(self.stateDB.STATE_DB, "FAST_RESTART_ENABLE_TABLE|system")
if not keys:
keys = self.stateDB.keys(self.stateDB.STATE_DB, "FAST_REBOOT|system")
if keys:
enable_state = 'true'
else:
enable_state = 'false'
self.stateDB.set(self.stateDB.STATE_DB, 'FAST_RESTART_ENABLE_TABLE|system', 'enable', enable_state)
self.set_version('version_4_0_1')
return 'version_4_0_1'

Expand All @@ -1057,7 +1060,6 @@ def version_4_0_2(self):
Version 4_0_2.
"""
log.log_info('Handling version_4_0_2')

if self.stateDB.keys(self.stateDB.STATE_DB, "FAST_REBOOT|system"):
self.migrate_config_db_flex_counter_delay_status()

Expand Down
88 changes: 88 additions & 0 deletions scripts/sensorshow
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
#!/usr/bin/python3

'''
Script to show Voltage and Current Sensor status.
'''
from tabulate import tabulate
from natsort import natsorted
import argparse
import os
import sys

# mock the redis for unit test purposes #
try:
if os.environ["UTILITIES_UNIT_TESTING"] == "1":
modules_path = os.path.join(os.path.dirname(__file__), "..")
test_path = os.path.join(modules_path, "tests")
sys.path.insert(0, modules_path)
sys.path.insert(0, test_path)
import mock_tables.dbconnector
except KeyError:
pass

from swsscommon.swsscommon import SonicV2Connector

header = ['Sensor', '', 'High TH', 'Low TH', 'Crit High TH', 'Crit Low TH', 'Warning', 'Timestamp']

TIMESTAMP_FIELD_NAME = 'timestamp'
UNIT_FIELD_NAME = 'unit'
HIGH_THRESH_FIELD_NAME = 'high_threshold'
LOW_THRESH_FIELD_NAME = 'low_threshold'
CRIT_HIGH_THRESH_FIELD_NAME = 'critical_high_threshold'
CRIT_LOW_THRESH_FIELD_NAME = 'critical_low_threshold'
WARNING_STATUS_FIELD_NAME = 'warning_status'
VOLTAGE_INFO_TABLE_NAME = 'VOLTAGE_INFO'
CURRENT_INFO_TABLE_NAME = 'CURRENT_INFO'


class SensorShow(object):
def __init__(self, type):
self.db = SonicV2Connector(use_unix_socket_path=True)
self.db.connect(self.db.STATE_DB)
self.field_name = type
header[1] = type.capitalize()

if type == "voltage":
self.table_name = VOLTAGE_INFO_TABLE_NAME
else:
self.table_name = CURRENT_INFO_TABLE_NAME

def show(self):
keys = self.db.keys(self.db.STATE_DB, self.table_name + '*')
if not keys:
print('Sensor not detected')
return

table = []
for key in natsorted(keys):
key_list = key.split('|')
if len(key_list) != 2: # error data in DB, log it and ignore
print('Warn: Invalid key in table {}: {}'.format(self.table_name, key))
continue

name = key_list[1]
data_dict = self.db.get_all(self.db.STATE_DB, key)
#print(name, data_dict)
table.append((name,
"{} {}".format(data_dict[self.field_name], data_dict[UNIT_FIELD_NAME]),
data_dict[HIGH_THRESH_FIELD_NAME],
data_dict[LOW_THRESH_FIELD_NAME],
data_dict[CRIT_HIGH_THRESH_FIELD_NAME],
data_dict[CRIT_LOW_THRESH_FIELD_NAME],
data_dict[WARNING_STATUS_FIELD_NAME],
data_dict[TIMESTAMP_FIELD_NAME]
))

if table:
print(tabulate(table, header, tablefmt='simple', stralign='right'))
else:
print('No sensor data available')


if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--type", help="sensor type", required=True, choices=['voltage', 'current'])
args = parser.parse_args()

sensor_show = SensorShow(args.type)
sensor_show.show()
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,7 @@
'scripts/tempershow',
'scripts/tunnelstat',
'scripts/update_json.py',
'scripts/sensorshow',
'scripts/voqutil',
'scripts/warm-reboot',
'scripts/watermarkstat',
Expand Down
56 changes: 8 additions & 48 deletions sfputil/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -882,62 +882,22 @@ def fetch_error_status_from_platform_api(port):
"""
if port is None:
logical_port_list = natsort.natsorted(platform_sfputil.logical)
# Create a list containing the logical port names of all ports we're interested in
generate_sfp_list_code = \
"sfp_list = chassis.get_all_sfps()\n"
else:
physical_port_list = logical_port_name_to_physical_port_list(port)
logical_port_list = [port]
# Create a list containing the logical port names of all ports we're interested in
generate_sfp_list_code = \
"sfp_list = [chassis.get_sfp(x) for x in {}]\n".format(physical_port_list)

# Code to initialize chassis object
init_chassis_code = \
"import sonic_platform.platform\n" \
"platform = sonic_platform.platform.Platform()\n" \
"chassis = platform.get_chassis()\n"

# Code to fetch the error status
get_error_status_code = \
"try:\n"\
" errors=['{}:{}'.format(sfp.index, sfp.get_error_description()) for sfp in sfp_list]\n" \
"except NotImplementedError as e:\n"\
" errors=['{}:{}'.format(sfp.index, 'OK (Not implemented)') for sfp in sfp_list]\n" \
"print(errors)\n"

get_error_status_command = ["docker", "exec", "pmon", "python3", "-c", "{}{}{}".format(
init_chassis_code, generate_sfp_list_code, get_error_status_code)]
# Fetch error status from pmon docker
try:
output = subprocess.check_output(get_error_status_command, universal_newlines=True)
except subprocess.CalledProcessError as e:
click.Abort("Error! Unable to fetch error status for SPF modules. Error code = {}, error messages: {}".format(e.returncode, e.output))
return None

output_list = output.split('\n')
for output_str in output_list:
# The output of all SFP error status are a list consisting of element with convention of '<sfp no>:<error status>'
# Besides, there can be some logs captured during the platform API executing
# So, first of all, we need to skip all the logs until find the output list of SFP error status
if output_str[0] == '[' and output_str[-1] == ']':
output_list = ast.literal_eval(output_str)
break

output_dict = {}
for output in output_list:
sfp_index, error_status = output.split(':')
output_dict[int(sfp_index)] = error_status

output = []
for logical_port_name in logical_port_list:
physical_port_list = logical_port_name_to_physical_port_list(logical_port_name)
port_name = get_physical_port_name(logical_port_name, 1, False)
physical_port = logical_port_to_physical_port_index(logical_port_name)

if is_port_type_rj45(logical_port_name):
output.append([port_name, "N/A"])
output.append([logical_port_name, "N/A"])
else:
output.append([port_name, output_dict.get(physical_port_list[0])])
try:
error_description = platform_chassis.get_sfp(physical_port).get_error_description()
output.append([logical_port_name, error_description])
except NotImplementedError:
click.echo("get_error_description NOT implemented for port {}".format(logical_port_name))
sys.exit(ERROR_NOT_IMPLEMENTED)

return output

Expand Down
17 changes: 17 additions & 0 deletions show/platform.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,23 @@ def temperature():
cmd = ['tempershow']
clicommon.run_command(cmd)


# 'voltage' subcommand ("show platform voltage")
@platform.command()
def voltage():
"""Show device voltage information"""
cmd = ["sensorshow", "-t", "voltage"]
clicommon.run_command(cmd)


# 'current' subcommand ("show platform current")
@platform.command()
def current():
"""Show device current information"""
cmd = ["sensorshow", "-t", "current"]
clicommon.run_command(cmd)


# 'firmware' subcommand ("show platform firmware")
@platform.command(
context_settings=dict(
Expand Down
7 changes: 6 additions & 1 deletion sonic_package_manager/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -659,6 +659,10 @@ def reset(self, name: str, force: bool = False, skip_host_plugins: bool = False)
allow_downgrade=True,
skip_host_plugins=skip_host_plugins)

@under_lock
def get_docker_client(self, dockerd_sock:str):
return docker.DockerClient(base_url=f'unix://{dockerd_sock}', timeout=120)

@under_lock
def migrate_packages(self,
old_package_database: PackageDatabase,
Expand Down Expand Up @@ -701,7 +705,8 @@ def migrate_package(old_package_entry,
# dockerd_sock is defined, so use docked_sock to connect to
# dockerd and fetch package image from it.
log.info(f'installing {name} from old docker library')
docker_api = DockerApi(docker.DockerClient(base_url=f'unix://{dockerd_sock}'))
docker_client = self.get_docker_client(dockerd_sock)
docker_api = DockerApi(docker_client)

image = docker_api.get_image(old_package_entry.image_id)

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"FAST_RESTART_ENABLE_TABLE|system": {
"enable": "true"
}
}
21 changes: 20 additions & 1 deletion tests/db_migrator_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -538,6 +538,25 @@ def test_rename_fast_reboot_table_check_enable(self):
diff = DeepDiff(resulting_table, expected_table, ignore_order=True)
assert not diff

def test_ignore_rename_fast_reboot_table(self):
device_info.get_sonic_version_info = get_sonic_version_info_mlnx
dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db', 'fast_reboot_upgrade_from_202205')
dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'empty-config-input')

import db_migrator
dbmgtr = db_migrator.DBMigrator(None)
dbmgtr.migrate()

dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db', 'fast_reboot_upgrade_from_202205')
expected_db = SonicV2Connector(host='127.0.0.1')
expected_db.connect(expected_db.STATE_DB)

resulting_table = dbmgtr.stateDB.get_all(dbmgtr.stateDB.STATE_DB, 'FAST_RESTART_ENABLE_TABLE|system')
expected_table = expected_db.get_all(expected_db.STATE_DB, 'FAST_RESTART_ENABLE_TABLE|system')

diff = DeepDiff(resulting_table, expected_table, ignore_order=True)
assert not diff

class TestWarmUpgrade_to_2_0_2(object):
@classmethod
def setup_class(cls):
Expand Down Expand Up @@ -744,4 +763,4 @@ def test_fast_reboot_upgrade_to_4_0_3(self):
expected_db = self.mock_dedicated_config_db(db_after_migrate)
advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_4_0_3')
assert not self.check_config_db(dbmgtr.configDB, expected_db.cfgdb)
assert dbmgtr.CURRENT_VERSION == expected_db.cfgdb.get_entry('VERSIONS', 'DATABASE')['VERSION']
assert dbmgtr.CURRENT_VERSION == expected_db.cfgdb.get_entry('VERSIONS', 'DATABASE')['VERSION']
30 changes: 29 additions & 1 deletion tests/generic_config_updater/files/change_applier_test.data.json
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,23 @@
"src_address": "10.1.0.32"
}
},
"BGP_SENTINELS": {
"BGPSentinelV6": {
"ip_range": [
"2603:10a0:321:82f9::/64",
"2603:10a1:30a:8000::/59"
],
"name": "BGPSentinelV6",
"src_address": "fc00:1::32"
},
"BGPSentinel": {
"ip_range": [
"10.1.0.0/24"
],
"name": "BGPSentinel",
"src_address": "10.1.0.32"
}
},
"BUFFER_PG": {
"Ethernet0|3-4": {
"profile": "[BUFFER_PROFILE|pg_lossless_40000_300m_profile]"
Expand Down Expand Up @@ -253,7 +270,8 @@
},
"remove": {
"BGP_NEIGHBOR": { "10.0.0.57": {} },
"BGP_PEER_RANGE": { "BGPSLBPassive": {} }
"BGP_PEER_RANGE": { "BGPSLBPassive": {} },
"BGP_SENTINELS": { "BGPSentinelV6": {} }
},
"services_validated": [ "vlan_validate", "acl_validate" ]
},
Expand Down Expand Up @@ -297,6 +315,16 @@
"name": "BGPSLBPassive",
"src_address": "10.1.0.32"
}
},
"BGP_SENTINELS": {
"BGPSentinelV6": {
"ip_range": [
"2603:10a0:321:82f9::/64",
"2603:10a1:30a:8000::/59"
],
"name": "BGPSentinelV6",
"src_address": "fc00:1::32"
}
}
},
"remove": {
Expand Down
Loading

0 comments on commit b72f4be

Please sign in to comment.