From ca75f3538b3715ce4313f10d20cb279762f8761b Mon Sep 17 00:00:00 2001 From: bingwang Date: Wed, 27 Apr 2022 05:19:51 -0700 Subject: [PATCH 1/7] Update orchagent to support new field pfcwd_sw_enable Signed-off-by: bingwang --- cfgmgr/buffermgr.cpp | 181 ++++++++++++++++++------- cfgmgr/buffermgr.h | 9 +- cfgmgr/buffermgrd.cpp | 3 +- orchagent/pfcwdorch.cpp | 10 +- orchagent/port.h | 3 +- orchagent/portsorch.cpp | 37 ++++++ orchagent/portsorch.h | 3 + orchagent/qosorch.cpp | 18 ++- orchagent/qosorch.h | 1 + tests/test_buffer_traditional.py | 107 ++++++++++++++- tests/test_pfcwd.py | 219 ++++++++++++++++++++++++++++++- 11 files changed, 521 insertions(+), 70 deletions(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index f79c5112e8..8d084b0eca 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -133,11 +133,11 @@ Create/update two tables: profile (in m_cfgBufferProfileTable) and port buffer ( } } */ -task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) +task_process_status BufferMgr::doSpeedUpdateTask(string port) { - vector fvVectorPg, fvVectorProfile; string cable; string speed; + string pfc_enable; if (m_cableLenLookup.count(port) == 0) { @@ -152,9 +152,25 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) return task_process_status::task_success; } - speed = m_speedLookup[port]; + if (m_portStatusLookup.count(port) == 0) + { + // admin_statue is not available yet. This can happen when notification of `PORT_QOS_MAP` table + // comes first. + SWSS_LOG_INFO("pfc_enable status is not available for port %s", port.c_str()); + return task_process_status::task_need_retry; + } + + if (m_portPfcStatus.count(port) == 0) + { + // PORT_QOS_MAP is not ready yet. The notification is cleared, and buffer pg + // will be handled when `pfc_enable` in `PORT_QOS_MAP` table is available + SWSS_LOG_INFO("pfc_enable status is not available for port %s", port.c_str()); + return task_process_status::task_success; + } + pfc_enable = m_portPfcStatus[port]; - string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + LOSSLESS_PGS; + speed = m_speedLookup[port]; + // key format is pg_lossless___profile string buffer_profile_key = "pg_lossless_" + speed + "_" + cable + "_profile"; string profile_ref = string("[") + @@ -163,25 +179,33 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) buffer_profile_key + "]"; - m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); + + vector lossless_pgs = tokenize(pfc_enable, ','); - if (!admin_up && m_platform == "mellanox") + if (m_portStatusLookup[port] == "down" && m_platform == "mellanox") { - // Remove the entry in BUFFER_PG table if any - if (!fvVectorPg.empty()) + for (auto lossless_pg : lossless_pgs) { - for (auto &prop : fvVectorPg) + // Remove the entry in BUFFER_PG table if any + vector fvVectorPg; + string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + lossless_pg; + + m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); + if (!fvVectorPg.empty()) { - if (fvField(prop) == "profile") + for (auto &prop : fvVectorPg) { - if (fvValue(prop) == profile_ref) + if (fvField(prop) == "profile") { - SWSS_LOG_NOTICE("Removing PG %s from port %s which is administrative down", buffer_pg_key.c_str(), port.c_str()); - m_cfgBufferPgTable.del(buffer_pg_key); - } - else - { - SWSS_LOG_NOTICE("Not default profile %s is configured on PG %s, won't reclaim buffer", fvValue(prop).c_str(), buffer_pg_key.c_str()); + if (fvValue(prop) == profile_ref) + { + SWSS_LOG_NOTICE("Removing PG %s from port %s which is administrative down", buffer_pg_key.c_str(), port.c_str()); + m_cfgBufferPgTable.del(buffer_pg_key); + } + else + { + SWSS_LOG_NOTICE("Not default profile %s is configured on PG %s, won't reclaim buffer", fvValue(prop).c_str(), buffer_pg_key.c_str()); + } } } } @@ -189,14 +213,15 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) return task_process_status::task_success; } - + if (m_pgProfileLookup.count(speed) == 0 || m_pgProfileLookup[speed].count(cable) == 0) { - SWSS_LOG_ERROR("Unable to create/update PG profile for port %s. No PG profile configured for speed %s and cable length %s", - port.c_str(), speed.c_str(), cable.c_str()); - return task_process_status::task_invalid_entry; + SWSS_LOG_ERROR("Unable to create/update PG profile for port %s. No PG profile configured for speed %s and cable length %s", + port.c_str(), speed.c_str(), cable.c_str()); + return task_process_status::task_invalid_entry; } + vector fvVectorProfile; // check if profile already exists - if yes - skip creation m_cfgBufferProfileTable.get(buffer_profile_key, fvVectorProfile); // Create record in BUFFER_PROFILE table @@ -233,22 +258,29 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) { SWSS_LOG_NOTICE("Reusing existing profile '%s'", buffer_profile_key.c_str()); } - - /* Check if PG Mapping is already then log message and return. */ - for (auto& prop : fvVectorPg) + + for (auto lossless_pg : lossless_pgs) { - if ((fvField(prop) == "profile") && (profile_ref == fvValue(prop))) + vector fvVectorPg; + string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + lossless_pg; + + m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); + + /* Check if PG Mapping is already then log message and return. */ + for (auto& prop : fvVectorPg) { - SWSS_LOG_NOTICE("PG to Buffer Profile Mapping %s already present", buffer_pg_key.c_str()); - return task_process_status::task_success; + if ((fvField(prop) == "profile") && (profile_ref == fvValue(prop))) + { + SWSS_LOG_NOTICE("PG to Buffer Profile Mapping %s already present", buffer_pg_key.c_str()); + continue; + } } - } - fvVectorPg.clear(); + fvVectorPg.clear(); - fvVectorPg.push_back(make_pair("profile", profile_ref)); - SWSS_LOG_INFO("Setting buffer profile to PG %s", buffer_pg_key.c_str()); - m_cfgBufferPgTable.set(buffer_pg_key, fvVectorPg); + fvVectorPg.push_back(make_pair("profile", profile_ref)); + m_cfgBufferPgTable.set(buffer_pg_key, fvVectorPg); + } return task_process_status::task_success; } @@ -388,6 +420,47 @@ void BufferMgr::doBufferMetaTask(Consumer &consumer) } } +/* +Parse PORT_QOS_MAP to retrieve on which queue PFC is enable, and +cached in a map +*/ +void BufferMgr::doPortQosTableTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple tuple = it->second; + string port_name = kfvKey(tuple); + string op = kfvOp(tuple); + if (op == SET_COMMAND) + { + bool update_pfc_enable = false; + for (auto itp : kfvFieldsValues(tuple)) + { + if (fvField(itp) == "pfc_enable") + { + if (m_portPfcStatus.count(port_name) == 0 || m_portPfcStatus[port_name] != fvValue(itp)) + { + m_portPfcStatus[port_name] = fvValue(itp); + update_pfc_enable = true; + } + SWSS_LOG_INFO("Got pfc enable status for port %s status %s", port_name.c_str(), fvValue(itp).c_str()); + break; + } + } + if (update_pfc_enable) + { + // The return status is ignored + doSpeedUpdateTask(port_name); + } + } + it = consumer.m_toSync.erase(it); + } + +} + void BufferMgr::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -441,6 +514,12 @@ void BufferMgr::doTask(Consumer &consumer) return; } + if (table_name == CFG_PORT_QOS_MAP_TABLE_NAME) + { + doPortQosTableTask(consumer); + return; + } + auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) { @@ -473,7 +552,7 @@ void BufferMgr::doTask(Consumer &consumer) } if (fvField(i) == "admin_status") { - admin_up = ("up" == fvValue(i)); + m_portStatusLookup[port] = fvValue(i); } } @@ -481,29 +560,29 @@ void BufferMgr::doTask(Consumer &consumer) { // create/update profile for port SWSS_LOG_DEBUG("Port %s Speed %s admin status %d", port.c_str(), m_speedLookup[port].c_str(), admin_up); - task_status = doSpeedUpdateTask(port, admin_up); + task_status = doSpeedUpdateTask(port); SWSS_LOG_DEBUG("Return code for doSpeedUpdateTask %d", task_status); } } - } - switch (task_status) - { - case task_process_status::task_failed: - SWSS_LOG_ERROR("Failed to process table update"); - return; - case task_process_status::task_need_retry: - SWSS_LOG_INFO("Unable to process table update. Will retry..."); - ++it; - break; - case task_process_status::task_invalid_entry: - SWSS_LOG_ERROR("Failed to process invalid entry, drop it"); - it = consumer.m_toSync.erase(it); - break; - default: - it = consumer.m_toSync.erase(it); - break; + switch (task_status) + { + case task_process_status::task_failed: + SWSS_LOG_ERROR("Failed to process table update"); + return; + case task_process_status::task_need_retry: + SWSS_LOG_INFO("Unable to process table update. Will retry..."); + ++it; + break; + case task_process_status::task_invalid_entry: + SWSS_LOG_ERROR("Failed to process invalid entry, drop it"); + it = consumer.m_toSync.erase(it); + break; + default: + it = consumer.m_toSync.erase(it); + break; + } } } } diff --git a/cfgmgr/buffermgr.h b/cfgmgr/buffermgr.h index d8da025616..54e1acb1e8 100644 --- a/cfgmgr/buffermgr.h +++ b/cfgmgr/buffermgr.h @@ -11,7 +11,6 @@ namespace swss { #define INGRESS_LOSSLESS_PG_POOL_NAME "ingress_lossless_pool" -#define LOSSLESS_PGS "3-4" #define BUFFERMGR_TIMER_PERIOD 10 @@ -28,6 +27,8 @@ typedef std::map pg_profile_lookup_t; typedef std::map port_cable_length_t; typedef std::map port_speed_t; +typedef std::map port_pfc_status_t; +typedef std::map port_admin_status_t; class BufferMgr : public Orch { @@ -56,11 +57,12 @@ class BufferMgr : public Orch pg_profile_lookup_t m_pgProfileLookup; port_cable_length_t m_cableLenLookup; + port_admin_status_t m_portStatusLookup; port_speed_t m_speedLookup; std::string getPgPoolMode(); void readPgProfileLookupFile(std::string); task_process_status doCableTask(std::string port, std::string cable_length); - task_process_status doSpeedUpdateTask(std::string port, bool admin_up); + task_process_status doSpeedUpdateTask(std::string port); void doBufferTableTask(Consumer &consumer, ProducerStateTable &applTable); void transformSeperator(std::string &name); @@ -68,6 +70,9 @@ class BufferMgr : public Orch void doTask(Consumer &consumer); void doBufferMetaTask(Consumer &consumer); + + port_pfc_status_t m_portPfcStatus; + void doPortQosTableTask(Consumer &consumer); }; } diff --git a/cfgmgr/buffermgrd.cpp b/cfgmgr/buffermgrd.cpp index 71cff8d6c2..9926596d9e 100644 --- a/cfgmgr/buffermgrd.cpp +++ b/cfgmgr/buffermgrd.cpp @@ -210,7 +210,8 @@ int main(int argc, char **argv) CFG_BUFFER_QUEUE_TABLE_NAME, CFG_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, CFG_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, - CFG_DEVICE_METADATA_TABLE_NAME + CFG_DEVICE_METADATA_TABLE_NAME, + CFG_PORT_QOS_MAP_TABLE_NAME }; cfgOrchList.emplace_back(new BufferMgr(&cfgDb, &applDb, pg_lookup_file, cfg_buffer_tables)); } diff --git a/orchagent/pfcwdorch.cpp b/orchagent/pfcwdorch.cpp index be4c1e51c4..62765ab0a1 100644 --- a/orchagent/pfcwdorch.cpp +++ b/orchagent/pfcwdorch.cpp @@ -399,9 +399,9 @@ void PfcWdSwOrch::enableBigRedSwitchMode() continue; } - if (!gPortsOrch->getPortPfc(port.m_port_id, &pfcMask)) + if (!gPortsOrch->getPortPfcWatchdogStatus(port.m_port_id, &pfcMask)) { - SWSS_LOG_ERROR("Failed to get PFC mask on port %s", port.m_alias.c_str()); + SWSS_LOG_ERROR("Failed to get PFC watchdog mask on port %s", port.m_alias.c_str()); return; } @@ -443,9 +443,9 @@ void PfcWdSwOrch::enableBigRedSwitchMode() continue; } - if (!gPortsOrch->getPortPfc(port.m_port_id, &pfcMask)) + if (!gPortsOrch->getPortPfcWatchdogStatus(port.m_port_id, &pfcMask)) { - SWSS_LOG_ERROR("Failed to get PFC mask on port %s", port.m_alias.c_str()); + SWSS_LOG_ERROR("Failed to get PFC watchdog mask on port %s", port.m_alias.c_str()); return; } @@ -489,7 +489,7 @@ bool PfcWdSwOrch::registerInWdDb(const Port& port, uint8_t pfcMask = 0; - if (!gPortsOrch->getPortPfc(port.m_port_id, &pfcMask)) + if (!gPortsOrch->getPortPfcWatchdogStatus(port.m_port_id, &pfcMask)) { SWSS_LOG_ERROR("Failed to get PFC mask on port %s", port.m_alias.c_str()); return false; diff --git a/orchagent/port.h b/orchagent/port.h index ad77f0a5d2..a2a1ce30b2 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -106,7 +106,8 @@ class Port std::vector m_queue_ids; std::vector m_priority_group_ids; sai_port_priority_flow_control_mode_t m_pfc_asym = SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_COMBINED; - uint8_t m_pfc_bitmask = 0; + uint8_t m_pfc_bitmask = 0; // PFC enable bit mask + uint8_t m_pfcwd_sw_bitmask = 0; // PFC software watchdog enable uint32_t m_nat_zone_id = 0; uint32_t m_vnid = VNID_NONE; uint32_t m_fdb_count = 0; diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index a0cd7c2d55..ed74acd0c0 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -1040,6 +1040,43 @@ bool PortsOrch::setPortPfc(sai_object_id_t portId, uint8_t pfc_bitmask) return true; } +bool PortsOrch::setPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t pfcwd_bitmask) +{ + SWSS_LOG_ENTER(); + + Port p; + + if (!getPort(portId, p)) + { + SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, portId); + return false; + } + + p.m_pfcwd_sw_bitmask = pfcwd_bitmask; + + m_portList[p.m_alias] = p; + + SWSS_LOG_INFO("Set PFC watchdog port id=0x%" PRIx64 ", bitmast=0x%x", portId, pfcwd_bitmask); + return true; +} + +bool PortsOrch::getPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t *pfcwd_bitmask) +{ + SWSS_LOG_ENTER(); + + Port p; + + if (!pfcwd_bitmask || !getPort(portId, p)) + { + SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, portId); + return false; + } + + *pfcwd_bitmask = p.m_pfcwd_sw_bitmask; + + return true; +} + bool PortsOrch::setPortPfcAsym(Port &port, string pfc_asym) { SWSS_LOG_ENTER(); diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 564e66d34a..152a762893 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -122,6 +122,9 @@ class PortsOrch : public Orch, public Subject bool getPortPfc(sai_object_id_t portId, uint8_t *pfc_bitmask); bool setPortPfc(sai_object_id_t portId, uint8_t pfc_bitmask); + bool setPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t pfc_bitmask); + bool getPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t *pfc_bitmask); + void generateQueueMap(); void generatePriorityGroupMap(); void generatePortCounterMap(); diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 7b2e43e0dd..81f6c00cfe 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -1356,6 +1356,7 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) string op = kfvOp(tuple); sai_uint8_t pfc_enable = 0; + sai_uint8_t pfcwd_sw_enable = 0; map> update_list; for (auto it = kfvFieldsValues(tuple).begin(); it != kfvFieldsValues(tuple).end(); it++) { @@ -1376,14 +1377,24 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) update_list[qos_to_attr_map[map_type_name]] = make_pair(map_name, id); } - if (fvField(*it) == pfc_enable_name) + else if (fvField(*it) == pfc_enable_name || fvField(*it) == pfcwd_sw_enable_name) { + sai_uint8_t bitmask = 0; vector queue_indexes; queue_indexes = tokenize(fvValue(*it), list_item_delimiter); for(string q_ind : queue_indexes) { sai_uint8_t q_val = (uint8_t)stoi(q_ind); - pfc_enable |= (uint8_t)(1 << q_val); + bitmask |= (uint8_t)(1 << q_val); + } + + if (fvField(*it) == pfc_enable_name) + { + pfc_enable = bitmask; + } + else + { + pfcwd_sw_enable = bitmask; } } } @@ -1436,6 +1447,9 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) SWSS_LOG_INFO("Applied PFC bits 0x%x to port %s", pfc_enable, port_name.c_str()); } + + // Save pfd_wd bitmask unconditionally + gPortsOrch->setPortPfcWatchdogStatus(port.m_port_id, pfcwd_sw_enable); } SWSS_LOG_NOTICE("Applied QoS maps to ports"); diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index 37002be566..b5da05a68a 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -13,6 +13,7 @@ const string dot1p_to_tc_field_name = "dot1p_to_tc_map"; const string pfc_to_pg_map_name = "pfc_to_pg_map"; const string pfc_to_queue_map_name = "pfc_to_queue_map"; const string pfc_enable_name = "pfc_enable"; +const string pfcwd_sw_enable_name = "pfcwd_sw_enable"; const string tc_to_pg_map_field_name = "tc_to_pg_map"; const string tc_to_queue_field_name = "tc_to_queue_map"; const string scheduler_field_name = "scheduler"; diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 44bea70620..ae8f0b9e2e 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -3,7 +3,7 @@ class TestBuffer(object): - LOSSLESS_PGS = [3, 4] + lossless_pgs = [] INTF = "Ethernet0" def setup_db(self, dvs): @@ -15,6 +15,10 @@ def setup_db(self, dvs): # enable PG watermark self.set_pg_wm_status('enable') + def get_pfc_enable_queues(self): + qos_map = self.config_db.get_entry("PORT_QOS_MAP", self.INTF) + return qos_map['pfc_enable'].split(',') + def get_pg_oid(self, pg): fvs = dict() fvs = self.counter_db.get_entry("COUNTERS_PG_NAME_MAP", "") @@ -51,19 +55,32 @@ def get_asic_buf_pg_profiles(self): buf_pg_entries = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", self.pg_name_map[pg]) self.buf_pg_profile[pg] = buf_pg_entries["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] - def change_cable_len(self, cable_len): + def change_cable_len(self, cable_len, extra_port=None): fvs = dict() fvs[self.INTF] = cable_len + if extra_port: + fvs[extra_port] = cable_len self.config_db.update_entry("CABLE_LENGTH", "AZURE", fvs) + def set_port_qos_table(self, port, pfc_enable_flag): + fvs=dict() + fvs['pfc_enable'] = pfc_enable_flag + self.config_db.update_entry("PORT_QOS_MAP", port, fvs) + self.lossless_pgs = pfc_enable_flag.split(',') + + def get_pg_name_map(self): + pg_name_map = dict() + for pg in self.lossless_pgs: + pg_name = "{}:{}".format(self.INTF, pg) + pg_name_map[pg_name] = self.get_pg_oid(pg_name) + return pg_name_map + @pytest.fixture def setup_teardown_test(self, dvs): try: self.setup_db(dvs) - pg_name_map = dict() - for pg in self.LOSSLESS_PGS: - pg_name = "{}:{}".format(self.INTF, pg) - pg_name_map[pg_name] = self.get_pg_oid(pg_name) + self.set_port_qos_table(self.INTF, '2,3,4,6') + pg_name_map = self.get_pg_name_map() yield pg_name_map finally: self.teardown() @@ -119,7 +136,8 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", test_lossless_profile) # buffer pgs should still point to the original buffer profile - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":3-4", {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(orig_lossless_profile)}) + for pg in self.lossless_pgs: + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": orig_lossless_profile}) fvs = dict() for pg in self.pg_name_map: fvs["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = self.buf_pg_profile[pg] @@ -152,3 +170,78 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): if orig_speed: dvs.runcmd("config interface speed {} {}".format(self.INTF, orig_speed)) dvs.runcmd("config interface shutdown {}".format(self.INTF)) + + # To verify the BUFFER_PG is not hardcoded to 3,4 + # buffermgrd will read 'pfc_enable' entry and apply lossless profile to that queue + def test_buffer_pg_update(self, dvs, setup_teardown_test): + self.pg_name_map = setup_teardown_test + orig_cable_len = None + orig_speed = None + test_speed = None + extra_port = "Ethernet4" + try: + # Retrieve cable len + fvs_cable_len = self.config_db.get_entry("CABLE_LENGTH", "AZURE") + orig_cable_len = fvs_cable_len[self.INTF] + if orig_cable_len == "0m": + cable_len_for_test = "300m" + fvs_cable_len[self.INTF] = cable_len_for_test + fvs_cable_len[extra_port] = cable_len_for_test + + self.config_db.update_entry("CABLE_LENGTH", "AZURE", fvs_cable_len) + else: + cable_len_for_test = orig_cable_len + # Ethernet4 is set to up, while no 'pfc_enable' available. `Ethernet0` is not supposed to be impacted + dvs.port_admin_set(extra_port, "up") + + dvs.port_admin_set(self.INTF, "up") + + # Retrieve port speed + fvs_port = self.config_db.get_entry("PORT", self.INTF) + orig_speed = fvs_port["speed"] + + # Make sure the buffer PG has been created + orig_lossless_profile = "pg_lossless_{}_{}_profile".format(orig_speed, cable_len_for_test) + self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", orig_lossless_profile) + self.orig_profiles = self.get_asic_buf_profile() + + # get the orig buf profiles attached to the pgs + self.get_asic_buf_pg_profiles() + + # Update port speed + if orig_speed == "100000": + test_speed = "40000" + elif orig_speed == "40000": + test_speed = "100000" + # change intf speed to 'test_speed' + dvs.port_field_set(self.INTF, "speed", test_speed) + dvs.port_field_set(extra_port, "speed", test_speed) + # Verify new profile is generated + new_lossless_profile = "pg_lossless_{}_{}_profile".format(test_speed, cable_len_for_test) + self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", new_lossless_profile) + + # Verify BUFFER_PG is updated + for pg in self.lossless_pgs: + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": new_lossless_profile}) + + fvs_negative = {} + for pg in self.pg_name_map: + # verify that buffer pgs do not point to the old profile since we cannot deduce the new profile oid + fvs_negative["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = self.buf_pg_profile[pg] + self.asic_db.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", self.pg_name_map[pg], fvs_negative) + + # Add pfc_enable field for extra port + self.set_port_qos_table(extra_port, '2,3,4,6') + time.sleep(1) + # Verify BUFFER_PG is updated when pfc_enable is available + for pg in self.lossless_pgs: + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", extra_port + ":" + pg, {"profile": new_lossless_profile}) + finally: + if orig_cable_len: + self.change_cable_len(orig_cable_len, extra_port) + if orig_speed: + dvs.port_field_set(self.INTF, "speed", orig_speed) + dvs.port_field_set(extra_port, "speed", orig_speed) + dvs.port_admin_set(self.INTF, "down") + dvs.port_admin_set(extra_port, "down") + diff --git a/tests/test_pfcwd.py b/tests/test_pfcwd.py index c569bc8a43..249609aee2 100644 --- a/tests/test_pfcwd.py +++ b/tests/test_pfcwd.py @@ -77,7 +77,224 @@ def test_PfcWdAclCreationDeletion(self, dvs, dvs_acl, testlog): finally: dvs_acl.remove_acl_table(PFCWD_TABLE_NAME) -# + + +class TestPfcwdFunc(object): + @pytest.fixture + def setup_teardown_test(self, dvs): + self.get_db_handle(dvs) + + self.test_ports = ["Ethernet0"] + + self.setup_test(dvs) + self.get_port_oids() + self.get_queue_oids() + + yield + + self.teardown_test(dvs) + + def setup_test(self, dvs): + # get original cable len for test ports + fvs = self.config_db.get_entry("CABLE_LENGTH", "AZURE") + self.orig_cable_len = dict() + for port in self.test_ports: + self.orig_cable_len[port] = fvs[port] + # set cable len to non zero value. if port is down, default cable len is 0 + self.set_cable_len(port, "5m") + # startup port + dvs.port_admin_set(port, "up") + + # enable pfcwd + self.set_flex_counter_status("PFCWD", "enable") + # enable queue so that queue oids are generated + self.set_flex_counter_status("QUEUE", "enable") + + def teardown_test(self, dvs): + # disable pfcwd + self.set_flex_counter_status("PFCWD", "disable") + # disable queue + self.set_flex_counter_status("QUEUE", "disable") + + for port in self.test_ports: + if self.orig_cable_len: + self.set_cable_len(port, self.orig_cable_len[port]) + # shutdown port + dvs.port_admin_set(port, "down") + + def get_db_handle(self, dvs): + self.app_db = dvs.get_app_db() + self.asic_db = dvs.get_asic_db() + self.config_db = dvs.get_config_db() + self.counters_db = dvs.get_counters_db() + + def set_flex_counter_status(self, key, state): + fvs = {'FLEX_COUNTER_STATUS': state} + self.config_db.update_entry("FLEX_COUNTER_TABLE", key, fvs) + time.sleep(1) + + def get_queue_oids(self): + self.queue_oids = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") + + def get_port_oids(self): + self.port_oids = self.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "") + + def _get_bitmask(self, queues): + mask = 0 + if queues is not None: + for queue in queues: + mask = mask | 1 << queue + + return str(mask) + + def set_ports_pfc(self, status='enable', pfc_queues=[3,4]): + keyname = 'pfcwd_sw_enable' + for port in self.test_ports: + if 'enable' in status: + queues = ",".join([str(q) for q in pfc_queues]) + fvs = {keyname: queues, 'pfc_enable': queues} + self.config_db.create_entry("PORT_QOS_MAP", port, fvs) + else: + self.config_db.delete_entry("PORT_QOS_MAP", port) + + def set_cable_len(self, port_name, cable_len): + fvs = {port_name: cable_len} + self.config_db.update_entry("CABLE_LEN", "AZURE", fvs) + + def start_pfcwd_on_ports(self, poll_interval="200", detection_time="200", restoration_time="200", action="drop"): + pfcwd_info = {"POLL_INTERVAL": poll_interval} + self.config_db.update_entry("PFC_WD", "GLOBAL", pfcwd_info) + + pfcwd_info = {"action": action, + "detection_time" : detection_time, + "restoration_time": restoration_time + } + for port in self.test_ports: + self.config_db.update_entry("PFC_WD", port, pfcwd_info) + + def stop_pfcwd_on_ports(self): + for port in self.test_ports: + self.config_db.delete_entry("PFC_WD", port) + + def verify_ports_pfc(self, queues=None): + mask = self._get_bitmask(queues) + fvs = {"SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL" : mask} + for port in self.test_ports: + self.asic_db.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", self.port_oids[port], fvs) + + def verify_pfcwd_state(self, queues, state="stormed"): + fvs = {"PFC_WD_STATUS": state} + for port in self.test_ports: + for queue in queues: + queue_name = port + ":" + str(queue) + self.counters_db.wait_for_field_match("COUNTERS", self.queue_oids[queue_name], fvs) + + def verify_pfcwd_counters(self, queues, restore="0"): + fvs = {"PFC_WD_QUEUE_STATS_DEADLOCK_DETECTED" : "1", + "PFC_WD_QUEUE_STATS_DEADLOCK_RESTORED" : restore + } + for port in self.test_ports: + for queue in queues: + queue_name = port + ":" + str(queue) + self.counters_db.wait_for_field_match("COUNTERS", self.queue_oids[queue_name], fvs) + + def reset_pfcwd_counters(self, queues): + fvs = {"PFC_WD_QUEUE_STATS_DEADLOCK_DETECTED" : "0", + "PFC_WD_QUEUE_STATS_DEADLOCK_RESTORED" : "0" + } + for port in self.test_ports: + for queue in queues: + queue_name = port + ":" + str(queue) + self.counters_db.update_entry("COUNTERS", self.queue_oids[queue_name], fvs) + + def set_storm_state(self, queues, state="enabled"): + fvs = {"DEBUG_STORM": state} + for port in self.test_ports: + for queue in queues: + queue_name = port + ":" + str(queue) + self.counters_db.update_entry("COUNTERS", self.queue_oids[queue_name], fvs) + + def test_pfcwd_software_single_queue(self, dvs, setup_teardown_test): + try: + # enable PFC on queues + test_queues = [3, 4] + self.set_ports_pfc(pfc_queues=test_queues) + + # verify in asic db + self.verify_ports_pfc(test_queues) + + # start pfcwd + self.start_pfcwd_on_ports() + + # start pfc storm + storm_queue = [3] + self.set_storm_state(storm_queue) + + # verify pfcwd is triggered + self.verify_pfcwd_state(storm_queue) + + # verify pfcwd counters + self.verify_pfcwd_counters(storm_queue) + + # verify if queue is disabled + self.verify_ports_pfc(queues=[4]) + + # stop storm + self.set_storm_state(storm_queue, state="disabled") + + # verify pfcwd state is restored + self.verify_pfcwd_state(storm_queue, state="operational") + + # verify pfcwd counters + self.verify_pfcwd_counters(storm_queue, restore="1") + + # verify if queue is enabled + self.verify_ports_pfc(test_queues) + + finally: + self.reset_pfcwd_counters(storm_queue) + self.stop_pfcwd_on_ports() + + def test_pfcwd_software_multi_queue(self, dvs, setup_teardown_test): + try: + # enable PFC on queues + test_queues = [3, 4] + self.set_ports_pfc(pfc_queues=test_queues) + + # verify in asic db + self.verify_ports_pfc(test_queues) + + # start pfcwd + self.start_pfcwd_on_ports() + + # start pfc storm + self.set_storm_state(test_queues) + + # verify pfcwd is triggered + self.verify_pfcwd_state(test_queues) + + # verify pfcwd counters + self.verify_pfcwd_counters(test_queues) + + # verify if queue is disabled. Expected mask is 0 + self.verify_ports_pfc() + + # stop storm + self.set_storm_state(test_queues, state="disabled") + + # verify pfcwd state is restored + self.verify_pfcwd_state(test_queues, state="operational") + + # verify pfcwd counters + self.verify_pfcwd_counters(test_queues, restore="1") + + # verify if queue is enabled + self.verify_ports_pfc(test_queues) + + finally: + self.reset_pfcwd_counters(test_queues) + self.stop_pfcwd_on_ports() + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): From 97f30278d6065dc90f38b7d5733ebe38c8fb8f61 Mon Sep 17 00:00:00 2001 From: bingwang Date: Thu, 28 Apr 2022 08:33:31 +0000 Subject: [PATCH 2/7] Fix vstest Signed-off-by: bingwang --- tests/conftest.py | 10 ++++++++++ tests/test_buffer_traditional.py | 6 +++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 9b565cfa06..dafbc04309 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1101,6 +1101,16 @@ def getVlanOid(self, vlanId): break return vlan_oid + def port_field_set(self, port, field, value): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "PORT") + fvs = swsscommon.FieldValuePairs([(field, value)]) + tbl.set(port, fvs) + time.sleep(1) + + def port_admin_set(self, port, status): + self.port_field_set(port, "admin_status", status) + # deps: acl_portchannel, fdb def getCrmCounterValue(self, key, counter): counters_db = swsscommon.DBConnector(swsscommon.COUNTERS_DB, self.redis_sock, 0) diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index ae8f0b9e2e..c90bb80919 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -137,7 +137,7 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): # buffer pgs should still point to the original buffer profile for pg in self.lossless_pgs: - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": orig_lossless_profile}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(orig_lossless_profile)}) fvs = dict() for pg in self.pg_name_map: fvs["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = self.buf_pg_profile[pg] @@ -222,7 +222,7 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): # Verify BUFFER_PG is updated for pg in self.lossless_pgs: - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": new_lossless_profile}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(new_lossless_profile)}) fvs_negative = {} for pg in self.pg_name_map: @@ -235,7 +235,7 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): time.sleep(1) # Verify BUFFER_PG is updated when pfc_enable is available for pg in self.lossless_pgs: - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", extra_port + ":" + pg, {"profile": new_lossless_profile}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", extra_port + ":" + pg, {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(new_lossless_profile)}) finally: if orig_cable_len: self.change_cable_len(orig_cable_len, extra_port) From 9a36c4ec7c8dc8f8ad3e5b984b9f2a67ffb19077 Mon Sep 17 00:00:00 2001 From: bingwang Date: Thu, 19 May 2022 07:21:05 +0000 Subject: [PATCH 3/7] Combine BUFFER_QUEUE 3 and 4 Signed-off-by: bingwang --- cfgmgr/buffermgr.cpp | 7 ++++++- tests/test_buffer_traditional.py | 8 +++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index 8d084b0eca..8668ed901e 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -168,7 +168,12 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port) return task_process_status::task_success; } pfc_enable = m_portPfcStatus[port]; - + // Replace 2,3,4,6 to 2,3-4,6 to be back compatible + auto pos = pfc_enable.find("3,4"); + if (pos != string::npos) + { + pfc_enable.replace(pos, 3, "3-4"); + } speed = m_speedLookup[port]; // key format is pg_lossless___profile diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index c90bb80919..f330578564 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -67,6 +67,8 @@ def set_port_qos_table(self, port, pfc_enable_flag): fvs['pfc_enable'] = pfc_enable_flag self.config_db.update_entry("PORT_QOS_MAP", port, fvs) self.lossless_pgs = pfc_enable_flag.split(',') + # Replace 3,4 with the combination 3-4 to be back compatible + self.lossless_pg_combinations = pfc_enable_flag.replace('3,4', '3-4').split(',') def get_pg_name_map(self): pg_name_map = dict() @@ -136,7 +138,7 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", test_lossless_profile) # buffer pgs should still point to the original buffer profile - for pg in self.lossless_pgs: + for pg in self.lossless_pg_combinations: self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(orig_lossless_profile)}) fvs = dict() for pg in self.pg_name_map: @@ -221,7 +223,7 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", new_lossless_profile) # Verify BUFFER_PG is updated - for pg in self.lossless_pgs: + for pg in self.lossless_pg_combinations: self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(new_lossless_profile)}) fvs_negative = {} @@ -234,7 +236,7 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): self.set_port_qos_table(extra_port, '2,3,4,6') time.sleep(1) # Verify BUFFER_PG is updated when pfc_enable is available - for pg in self.lossless_pgs: + for pg in self.lossless_pg_combinations: self.app_db.wait_for_field_match("BUFFER_PG_TABLE", extra_port + ":" + pg, {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(new_lossless_profile)}) finally: if orig_cable_len: From 587ab96bf35412cc65a738f20969da93799f0e16 Mon Sep 17 00:00:00 2001 From: bingwang Date: Mon, 30 May 2022 10:02:27 +0000 Subject: [PATCH 4/7] Sync code Signed-off-by: bingwang --- cfgmgr/buffermgr.cpp | 38 +++++++++++++++++++++++--------- tests/test_buffer_traditional.py | 4 +++- 2 files changed, 30 insertions(+), 12 deletions(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index 8668ed901e..9c200f455c 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -11,6 +11,7 @@ #include "exec.h" #include "shellcmd.h" #include "warm_restart.h" +#include "converter.h" using namespace std; using namespace swss; @@ -168,12 +169,6 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port) return task_process_status::task_success; } pfc_enable = m_portPfcStatus[port]; - // Replace 2,3,4,6 to 2,3-4,6 to be back compatible - auto pos = pfc_enable.find("3,4"); - if (pos != string::npos) - { - pfc_enable.replace(pos, 3, "3-4"); - } speed = m_speedLookup[port]; // key format is pg_lossless___profile @@ -186,10 +181,27 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port) vector lossless_pgs = tokenize(pfc_enable, ','); + // Convert to bitmap + unsigned long lossless_pg_id = 0; + for (auto pg : lossless_pgs) + { + try + { + uint8_t cur_pg = to_uint(pg); + lossless_pg_id |= (1< lossless_pg_combinations = generateIdListFromMap(lossless_pg_id, sizeof(lossless_pg_id)); if (m_portStatusLookup[port] == "down" && m_platform == "mellanox") { - for (auto lossless_pg : lossless_pgs) + for (auto lossless_pg : lossless_pg_combinations) { // Remove the entry in BUFFER_PG table if any vector fvVectorPg; @@ -264,23 +276,27 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port) SWSS_LOG_NOTICE("Reusing existing profile '%s'", buffer_profile_key.c_str()); } - for (auto lossless_pg : lossless_pgs) + for (auto lossless_pg : lossless_pg_combinations) { vector fvVectorPg; string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + lossless_pg; m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); - + bool profile_existing = false; /* Check if PG Mapping is already then log message and return. */ for (auto& prop : fvVectorPg) { if ((fvField(prop) == "profile") && (profile_ref == fvValue(prop))) { SWSS_LOG_NOTICE("PG to Buffer Profile Mapping %s already present", buffer_pg_key.c_str()); - continue; + profile_existing = true; + break; } } - + if (profile_existing) + { + continue; + } fvVectorPg.clear(); fvVectorPg.push_back(make_pair("profile", profile_ref)); diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index f330578564..ce3de0b577 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -81,7 +81,8 @@ def get_pg_name_map(self): def setup_teardown_test(self, dvs): try: self.setup_db(dvs) - self.set_port_qos_table(self.INTF, '2,3,4,6') + self.set_port_qos_table(self.INTF, '3,4') + self.lossless_pg_combinations = ['3-4'] pg_name_map = self.get_pg_name_map() yield pg_name_map finally: @@ -234,6 +235,7 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): # Add pfc_enable field for extra port self.set_port_qos_table(extra_port, '2,3,4,6') + self.lossless_pg_combinations = ['2-4', '6'] time.sleep(1) # Verify BUFFER_PG is updated when pfc_enable is available for pg in self.lossless_pg_combinations: From b12855291bbc4f27deb40dacefa5eebd75a98a83 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Sat, 4 Jun 2022 18:04:26 +0800 Subject: [PATCH 5/7] Update buffermgr.cpp --- cfgmgr/buffermgr.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index 9c200f455c..477bc01f58 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -155,7 +155,7 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port) if (m_portStatusLookup.count(port) == 0) { - // admin_statue is not available yet. This can happen when notification of `PORT_QOS_MAP` table + // admin_status is not available yet. This can happen when notification of `PORT_QOS_MAP` table // comes first. SWSS_LOG_INFO("pfc_enable status is not available for port %s", port.c_str()); return task_process_status::task_need_retry; @@ -301,6 +301,7 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port) fvVectorPg.push_back(make_pair("profile", profile_ref)); m_cfgBufferPgTable.set(buffer_pg_key, fvVectorPg); + SWSS_LOG_INFO("Setting buffer profile to PG %s", buffer_pg_key.c_str()); } return task_process_status::task_success; } @@ -564,7 +565,6 @@ void BufferMgr::doTask(Consumer &consumer) } else if (m_pgfile_processed && table_name == CFG_PORT_TABLE_NAME) { - bool admin_up = false; for (auto i : kfvFieldsValues(t)) { if (fvField(i) == "speed") @@ -580,7 +580,7 @@ void BufferMgr::doTask(Consumer &consumer) if (m_speedLookup.count(port) != 0) { // create/update profile for port - SWSS_LOG_DEBUG("Port %s Speed %s admin status %d", port.c_str(), m_speedLookup[port].c_str(), admin_up); + SWSS_LOG_DEBUG("Port %s Speed %s admin status %d", port.c_str(), m_speedLookup[port].c_str(), m_portStatusLookup[port].c_str()); task_status = doSpeedUpdateTask(port); SWSS_LOG_DEBUG("Return code for doSpeedUpdateTask %d", task_status); } From f329e63a828e2a67ec4fe908961ea7c663bcfdc1 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Sat, 4 Jun 2022 18:05:16 +0800 Subject: [PATCH 6/7] Update portsorch.cpp --- orchagent/portsorch.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index ed74acd0c0..d8294c8d8f 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -1056,7 +1056,7 @@ bool PortsOrch::setPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t pfcwd_b m_portList[p.m_alias] = p; - SWSS_LOG_INFO("Set PFC watchdog port id=0x%" PRIx64 ", bitmast=0x%x", portId, pfcwd_bitmask); + SWSS_LOG_INFO("Set PFC watchdog port id=0x%" PRIx64 ", bitmask=0x%x", portId, pfcwd_bitmask); return true; } From 3a2a912478a89c55c7f913baba6c5dd309fed3c5 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Sat, 4 Jun 2022 19:49:12 +0800 Subject: [PATCH 7/7] Update buffermgr.cpp --- cfgmgr/buffermgr.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index 477bc01f58..ee22cd37c9 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -580,7 +580,7 @@ void BufferMgr::doTask(Consumer &consumer) if (m_speedLookup.count(port) != 0) { // create/update profile for port - SWSS_LOG_DEBUG("Port %s Speed %s admin status %d", port.c_str(), m_speedLookup[port].c_str(), m_portStatusLookup[port].c_str()); + SWSS_LOG_DEBUG("Port %s Speed %s admin status %s", port.c_str(), m_speedLookup[port].c_str(), m_portStatusLookup[port].c_str()); task_status = doSpeedUpdateTask(port); SWSS_LOG_DEBUG("Return code for doSpeedUpdateTask %d", task_status); }