diff --git a/.azure-pipelines/build-template.yml b/.azure-pipelines/build-template.yml index d351d5703a8..632bdb3107c 100644 --- a/.azure-pipelines/build-template.yml +++ b/.azure-pipelines/build-template.yml @@ -96,6 +96,8 @@ jobs: path: '$(Build.SourcesDirectory)/${{ parameters.sairedis_artifact_name }}' displayName: "Download sonic sairedis deb packages" - task: DownloadPipelineArtifact@2 + ${{ if eq(parameters.buildimage_pipeline, 141) }}: + continueOnError: True inputs: source: specific project: build @@ -105,6 +107,24 @@ jobs: runBranch: 'refs/heads/master' path: '$(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }}' displayName: "Download sonic buildimage deb packages" + - script: | + buildimage_artifact_downloaded=n + [ -d "$(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }}/target" ] && buildimage_artifact_downloaded=y + echo "buildimage_artifact_downloaded=$buildimage_artifact_downloaded" + echo "##vso[task.setvariable variable=buildimage_artifact_downloaded]$buildimage_artifact_downloaded" + condition: eq(${{ parameters.buildimage_pipeline }}, 141) + displayName: "Check if sonic buildimage deb packages downloaded" + - task: DownloadPipelineArtifact@2 + condition: and(eq(variables.buildimage_artifact_downloaded, 'n'), eq(${{ parameters.buildimage_pipeline }}, 141)) + inputs: + source: specific + project: build + pipeline: ${{ parameters.buildimage_pipeline }} + artifact: 'sonic-buildimage.marvell-armhf1' + runVersion: specific + runId: 80637 + path: '$(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }}' + displayName: "Download sonic buildimage deb packages from 80637" - script: | cd $(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }} sudo dpkg -i target/debs/buster/libnl-3-200_*.deb diff --git a/.azure-pipelines/gcov.yml b/.azure-pipelines/gcov.yml index e58ee2b0a55..37e4371e4a6 100644 --- a/.azure-pipelines/gcov.yml +++ b/.azure-pipelines/gcov.yml @@ -46,11 +46,23 @@ jobs: ${{ if eq(parameters.pool, 'default') }}: vmImage: 'ubuntu-20.04' + variables: + DIFF_COVER_CHECK_THRESHOLD: 0 + DIFF_COVER_ENABLE: 'true' + container: image: sonicdev-microsoft.azurecr.io:443/${{ parameters.sonic_slave }}:latest steps: + - script: | + set -ex + # Install .NET CORE + curl -sSL https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add - + sudo apt-add-repository https://packages.microsoft.com/debian/10/prod + sudo apt-get update + sudo apt-get install -y dotnet-sdk-5.0 + displayName: "Install .NET CORE" - script: | sudo apt-get install -y lcov displayName: "Install dependencies" @@ -93,6 +105,7 @@ jobs: sudo ./gcov_support.sh generate sudo ./gcov_support.sh merge_container_info $(Build.ArtifactStagingDirectory) sudo cp -rf gcov_output $(Build.ArtifactStagingDirectory) + sudo cp -rf $(Build.ArtifactStagingDirectory)/gcov_output/AllMergeReport/coverage.xml $(System.DefaultWorkingDirectory)/ ls -lh $(Build.ArtifactStagingDirectory) popd workingDirectory: $(Pipeline.Workspace) diff --git a/.azure-pipelines/test-docker-sonic-vs-template.yml b/.azure-pipelines/test-docker-sonic-vs-template.yml index 237778af4a1..002b7749e10 100644 --- a/.azure-pipelines/test-docker-sonic-vs-template.yml +++ b/.azure-pipelines/test-docker-sonic-vs-template.yml @@ -1,7 +1,7 @@ parameters: - name: timeout type: number - default: 240 + default: 360 - name: log_artifact_name type: string @@ -21,10 +21,13 @@ jobs: displayName: vstest timeoutInMinutes: ${{ parameters.timeout }} - pool: - vmImage: 'ubuntu-20.04' + pool: sonic-common steps: + - script: | + ls -A1 | xargs -I{} sudo rm -rf {} + displayName: "Clean workspace" + - checkout: self - task: DownloadPipelineArtifact@2 inputs: artifact: docker-sonic-vs diff --git a/.gitignore b/.gitignore index bfba2723056..c2522ba7117 100644 --- a/.gitignore +++ b/.gitignore @@ -85,3 +85,5 @@ tests/mock_tests/tests.trs tests/test-suite.log tests/tests.log tests/tests.trs +orchagent/p4orch/tests/**/*gcda +orchagent/p4orch/tests/**/*gcno diff --git a/cfgmgr/buffermgrdyn.cpp b/cfgmgr/buffermgrdyn.cpp index 0888e9e6c64..b4578c2370b 100644 --- a/cfgmgr/buffermgrdyn.cpp +++ b/cfgmgr/buffermgrdyn.cpp @@ -22,7 +22,6 @@ * In internal maps: table name removed from the index * 2. Maintain maps for pools, profiles and PGs in CONFIG_DB and APPL_DB * 3. Keys of maps in this file don't contain the TABLE_NAME - * 3. */ using namespace std; using namespace swss; @@ -37,6 +36,7 @@ BufferMgrDynamic::BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBC m_zeroProfilesLoaded(false), m_supportRemoving(true), m_cfgDefaultLosslessBufferParam(cfgDb, CFG_DEFAULT_LOSSLESS_BUFFER_PARAMETER), + m_cfgDeviceMetaDataTable(cfgDb, CFG_DEVICE_METADATA_TABLE_NAME), m_applBufferPoolTable(applDb, APP_BUFFER_POOL_TABLE_NAME), m_applBufferProfileTable(applDb, APP_BUFFER_PROFILE_TABLE_NAME), m_applBufferObjectTables({ProducerStateTable(applDb, APP_BUFFER_PG_TABLE_NAME), ProducerStateTable(applDb, APP_BUFFER_QUEUE_TABLE_NAME)}), @@ -73,6 +73,30 @@ BufferMgrDynamic::BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBC string checkHeadroomPluginName = "buffer_check_headroom_" + platform + ".lua"; m_platform = platform; + m_specific_platform = platform; // default for non-Mellanox + m_model_number = 0; + + // Retrieve the type of mellanox platform + if (m_platform == "mellanox") + { + m_cfgDeviceMetaDataTable.hget("localhost", "platform", m_specific_platform); + if (!m_specific_platform.empty()) + { + // Mellanox model number follows "sn" in the platform name and is 4 digits long + std::size_t sn_pos = m_specific_platform.find("sn"); + if (sn_pos != std::string::npos) + { + std::string model_number = m_specific_platform.substr (sn_pos + 2, 4); + if (!model_number.empty()) + { + m_model_number = atoi(model_number.c_str()); + } + } + } + if (!m_model_number) { + SWSS_LOG_ERROR("Failed to retrieve Mellanox model number"); + } + } try { @@ -471,7 +495,9 @@ string BufferMgrDynamic::getDynamicProfileName(const string &speed, const string if (m_platform == "mellanox") { - if ((speed != "400000") && (lane_count == 8)) + if ((lane_count == 8) && + (((m_model_number / 1000 == 4) && (speed != "400000")) || + ((m_model_number / 1000 == 5) && (speed != "800000")))) { // On Mellanox platform, ports with 8 lanes have different(double) xon value then other ports // For ports at speed other than 400G can have @@ -482,7 +508,8 @@ string BufferMgrDynamic::getDynamicProfileName(const string &speed, const string // Eg. // - A 100G port with 8 lanes will use buffer profile "pg_profile_100000_5m_8lane_profile" // - A 100G port with 4 lanes will use buffer profile "pg_profile_100000_5m_profile" - // Currently, 400G ports can only have 8 lanes. So we don't add this to the profile + // Currently, for 4xxx models, 400G ports can only have 8 lanes, + // and for 5xxx models, 800G ports can only have 8 lanes. So we don't add this to the profile. buffer_profile_key = buffer_profile_key + "_8lane"; } } diff --git a/cfgmgr/buffermgrdyn.h b/cfgmgr/buffermgrdyn.h index d316aee73cb..ef1e4f567f5 100644 --- a/cfgmgr/buffermgrdyn.h +++ b/cfgmgr/buffermgrdyn.h @@ -150,7 +150,10 @@ class BufferMgrDynamic : public Orch using Orch::doTask; private: - std::string m_platform; + std::string m_platform; // vendor, e.g. "mellanox" + std::string m_specific_platform; // name of platform, e.g. "x86_64-mlnx_msn3420-r0" + unsigned int m_model_number; // model number extracted from specific platform, e.g. 3420 + std::vector m_bufferDirections; const std::string m_bufferObjectNames[BUFFER_DIR_MAX]; const std::string m_bufferDirectionNames[BUFFER_DIR_MAX]; @@ -234,7 +237,7 @@ class BufferMgrDynamic : public Orch // Other tables Table m_cfgDefaultLosslessBufferParam; - + Table m_cfgDeviceMetaDataTable; Table m_stateBufferMaximumTable; Table m_applPortTable; diff --git a/cfgmgr/coppmgr.cpp b/cfgmgr/coppmgr.cpp index 834b2c5ff0b..1721cc8593f 100644 --- a/cfgmgr/coppmgr.cpp +++ b/cfgmgr/coppmgr.cpp @@ -78,31 +78,42 @@ bool CoppMgr::checkTrapGroupPending(string trap_group_name) /* Feature name and CoPP Trap table name must match */ void CoppMgr::setFeatureTrapIdsStatus(string feature, bool enable) { - bool disabled_trap = (m_coppDisabledTraps.find(feature) != m_coppDisabledTraps.end()); - - if ((enable && !disabled_trap) || (!enable && disabled_trap)) + bool disabled_trap {true}; + string always_enabled; + if (m_coppTrapConfMap.find(feature) != m_coppTrapConfMap.end()) { - return; + always_enabled = m_coppTrapConfMap[feature].is_always_enabled; + } + if (always_enabled == "true" || isFeatureEnabled(feature)) + { + disabled_trap = false; } - if (m_coppTrapConfMap.find(feature) == m_coppTrapConfMap.end()) + if ((enable && !disabled_trap) || (!enable && disabled_trap)) { - if (!enable) - { - m_coppDisabledTraps.insert(feature); - } return; } + string trap_group = m_coppTrapConfMap[feature].trap_group; bool prev_group_state = checkTrapGroupPending(trap_group); - if (!enable) + // update features cache + auto state = "disabled"; + if (enable) { - m_coppDisabledTraps.insert(feature); + state = "enabled"; } - else + if (m_featuresCfgTable.find(feature) != m_featuresCfgTable.end()) { - m_coppDisabledTraps.erase(feature); + auto vect = m_featuresCfgTable[feature]; + for (long unsigned int i=0; i < vect.size(); i++) + { + if (vect[i].first == "state") + { + vect[i].second = state; + } + } + m_featuresCfgTable.at(feature) = vect; } /* Trap group moved to pending state when feature is disabled. Remove trap group @@ -140,25 +151,46 @@ void CoppMgr::setFeatureTrapIdsStatus(string feature, bool enable) } } -bool CoppMgr::isTrapIdDisabled(string trap_id) +bool CoppMgr::isFeatureEnabled(std::string feature) { - for (auto &m: m_coppDisabledTraps) + if (m_featuresCfgTable.find(feature) != m_featuresCfgTable.end()) { - if (m_coppTrapConfMap.find(m) == m_coppTrapConfMap.end()) + std::vector feature_fvs = m_featuresCfgTable[feature]; + for (auto i: feature_fvs) { - continue; + if (fvField(i) == "state" && (fvValue(i) == "enabled" || fvValue(i) == "always_enabled")) + { + return true; + } } - vector trap_id_list; + } + return false; +} - trap_id_list = tokenize(m_coppTrapConfMap[m].trap_ids, list_item_delimiter); - if(std::find(trap_id_list.begin(), trap_id_list.end(), trap_id) != trap_id_list.end()) +bool CoppMgr::isTrapIdDisabled(string trap_id) +{ + // check if trap is always_enabled + string trap_name; + for (auto &t: m_coppTrapConfMap) + { + if (m_coppTrapConfMap[t.first].trap_ids.find(trap_id) != string::npos) { - return true; + trap_name = t.first; + if (m_coppTrapConfMap[t.first].is_always_enabled == "true") + { + return false; + } + break; } + } + if (isFeatureEnabled(trap_name)) + { + return false; } - return false; + return true; } + void CoppMgr::mergeConfig(CoppCfg &init_cfg, CoppCfg &m_cfg, std::vector &cfg_keys, Table &cfgTable) { /* Read the init configuration first. If the same key is present in @@ -254,14 +286,7 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c { std::vector feature_fvs; m_cfgFeatureTable.get(i, feature_fvs); - - for (auto j: feature_fvs) - { - if (fvField(j) == "state" && fvValue(j) == "disabled") - { - m_coppDisabledTraps.insert(i); - } - } + m_featuresCfgTable.emplace(i, feature_fvs); } mergeConfig(m_coppTrapInitCfg, trap_cfg, trap_cfg_keys, m_cfgCoppTrapTable); @@ -270,6 +295,7 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c { string trap_group; string trap_ids; + string is_always_enabled = "false"; std::vector trap_fvs = i.second; for (auto j: trap_fvs) @@ -282,13 +308,22 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c { trap_group = fvValue(j); } + else if (fvField(j) == COPP_ALWAYS_ENABLED_FIELD) + { + is_always_enabled = fvValue(j); + } } + if (!trap_group.empty() && !trap_ids.empty()) { addTrapIdsToTrapGroup(trap_group, trap_ids); m_coppTrapConfMap[i.first].trap_group = trap_group; m_coppTrapConfMap[i.first].trap_ids = trap_ids; - setCoppTrapStateOk(i.first); + m_coppTrapConfMap[i.first].is_always_enabled = is_always_enabled; + if (is_always_enabled == "true" || isFeatureEnabled(i.first)) + { + setCoppTrapStateOk(i.first); + } } } @@ -384,7 +419,6 @@ void CoppMgr::removeTrapIdsFromTrapGroup(string trap_group, string trap_ids) void CoppMgr::getTrapGroupTrapIds(string trap_group, string &trap_ids) { - trap_ids.clear(); for (auto it: m_coppTrapIdTrapGroupMap) { @@ -406,6 +440,36 @@ void CoppMgr::getTrapGroupTrapIds(string trap_group, string &trap_ids) } } +void CoppMgr::removeTrap(string key) +{ + string trap_ids; + std::vector fvs; + removeTrapIdsFromTrapGroup(m_coppTrapConfMap[key].trap_group, m_coppTrapConfMap[key].trap_ids); + getTrapGroupTrapIds(m_coppTrapConfMap[key].trap_group, trap_ids); + FieldValueTuple fv(COPP_TRAP_ID_LIST_FIELD, trap_ids); + fvs.push_back(fv); + if (!checkTrapGroupPending(m_coppTrapConfMap[key].trap_group)) + { + m_appCoppTable.set(m_coppTrapConfMap[key].trap_group, fvs); + setCoppGroupStateOk(m_coppTrapConfMap[key].trap_group); + } +} + +void CoppMgr::addTrap(string trap_ids, string trap_group) +{ + string trap_group_trap_ids; + std::vector fvs; + addTrapIdsToTrapGroup(trap_group, trap_ids); + getTrapGroupTrapIds(trap_group, trap_group_trap_ids); + FieldValueTuple fv1(COPP_TRAP_ID_LIST_FIELD, trap_group_trap_ids); + fvs.push_back(fv1); + if (!checkTrapGroupPending(trap_group)) + { + m_appCoppTable.set(trap_group, fvs); + setCoppGroupStateOk(trap_group); + } +} + void CoppMgr::doCoppTrapTask(Consumer &consumer) { auto it = consumer.m_toSync.begin(); @@ -418,12 +482,21 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) vector fvs; string trap_ids = ""; string trap_group = ""; + string is_always_enabled = ""; bool conf_present = false; if (m_coppTrapConfMap.find(key) != m_coppTrapConfMap.end()) { trap_ids = m_coppTrapConfMap[key].trap_ids; trap_group = m_coppTrapConfMap[key].trap_group; + if (m_coppTrapConfMap[key].is_always_enabled.empty()) + { + is_always_enabled = "false"; + } + else + { + is_always_enabled = m_coppTrapConfMap[key].is_always_enabled; + } conf_present = true; } @@ -441,6 +514,10 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) { trap_ids = fvValue(i); } + else if (fvField(i) == COPP_ALWAYS_ENABLED_FIELD) + { + is_always_enabled = fvValue(i); + } else if (fvField(i) == "NULL") { null_cfg = true; @@ -450,20 +527,9 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) { if (conf_present) { - SWSS_LOG_DEBUG("Deleting trap key %s", key.c_str()); - removeTrapIdsFromTrapGroup(m_coppTrapConfMap[key].trap_group, - m_coppTrapConfMap[key].trap_ids); - trap_ids.clear(); + removeTrap(key); setCoppTrapStateOk(key); - getTrapGroupTrapIds(m_coppTrapConfMap[key].trap_group, trap_ids); - fvs.clear(); - FieldValueTuple fv(COPP_TRAP_ID_LIST_FIELD, trap_ids); - fvs.push_back(fv); - if (!checkTrapGroupPending(m_coppTrapConfMap[key].trap_group)) - { - m_appCoppTable.set(m_coppTrapConfMap[key].trap_group, fvs); - setCoppGroupStateOk(m_coppTrapConfMap[key].trap_group); - } + m_coppTrapConfMap.erase(key); } it = consumer.m_toSync.erase(it); @@ -472,38 +538,126 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) /*Duplicate check*/ if (conf_present && (trap_group == m_coppTrapConfMap[key].trap_group) && - (trap_ids == m_coppTrapConfMap[key].trap_ids)) + (trap_ids == m_coppTrapConfMap[key].trap_ids) && + (is_always_enabled == m_coppTrapConfMap[key].is_always_enabled)) { it = consumer.m_toSync.erase(it); continue; } + /* Incomplete configuration. Do not process until both trap group * and trap_ids are available */ if (trap_group.empty() || trap_ids.empty()) { + if (is_always_enabled.empty()) + { + it = consumer.m_toSync.erase(it); + continue; + } + + if (is_always_enabled != m_coppTrapConfMap[key].is_always_enabled) + { + m_coppTrapConfMap[key].is_always_enabled = is_always_enabled; + if (is_always_enabled == "true") + { + if (m_coppTrapConfMap.find(key) != m_coppTrapConfMap.end()) + { + addTrap(m_coppTrapConfMap[key].trap_ids, m_coppTrapConfMap[key].trap_group); + } + // else if it has info in the init cfg map + else if (m_coppTrapInitCfg.find(key) != m_coppTrapInitCfg.end()) + { + auto fvs = m_coppTrapInitCfg[key]; + string init_trap_ids = ""; + string init_trap_group = ""; + for (auto i: fvs) + { + if (fvField(i) == COPP_TRAP_GROUP_FIELD) + { + init_trap_group = fvValue(i); + } + else if (fvField(i) == COPP_TRAP_ID_LIST_FIELD) + { + init_trap_ids = fvValue(i); + } + } + addTrap(init_trap_ids, init_trap_group); + } + } + else + { + /* if the value was changed from true to false, + check if there is a feature enabled. + if no, remove the trap. is yes, do nothing. */ + + m_coppTrapConfMap[key].is_always_enabled = is_always_enabled; + if (isFeatureEnabled(key)) + { + it = consumer.m_toSync.erase(it); + continue; + } + + removeTrap(key); + delCoppTrapStateOk(key); + } + it = consumer.m_toSync.erase(it); + continue; + } + } + /* if always_enabled field has been changed */ + if (conf_present && + (trap_group == m_coppTrapConfMap[key].trap_group) && + (trap_ids == m_coppTrapConfMap[key].trap_ids)) + { + m_coppTrapConfMap[key].is_always_enabled = is_always_enabled; + if (is_always_enabled == "true") + { + /* if the value was changed from false to true, + if the trap is not installed, install it. + otherwise, do nothing. */ + + // if the feature was not enabled, install the trap + if (!isFeatureEnabled(key)) + { + addTrap(trap_ids, trap_group); + } + + it = consumer.m_toSync.erase(it); + continue; + } + else + { + /* if the value was changed from true to false, + check if there is a feature enabled. + if no, remove the trap. is yes, do nothing. */ + + if (isFeatureEnabled(key)) + { + it = consumer.m_toSync.erase(it); + continue; + } + + removeTrap(key); + delCoppTrapStateOk(key); + } it = consumer.m_toSync.erase(it); continue; } + /* Remove the current trap IDs and add the new trap IDS to recompute the - * trap IDs for the trap group + * trap IDs for the trap group */ if (conf_present) { removeTrapIdsFromTrapGroup(m_coppTrapConfMap[key].trap_group, m_coppTrapConfMap[key].trap_ids); } - fvs.clear(); - string trap_group_trap_ids; - addTrapIdsToTrapGroup(trap_group, trap_ids); - getTrapGroupTrapIds(trap_group, trap_group_trap_ids); - FieldValueTuple fv1(COPP_TRAP_ID_LIST_FIELD, trap_group_trap_ids); - fvs.push_back(fv1); - if (!checkTrapGroupPending(trap_group)) - { - m_appCoppTable.set(trap_group, fvs); - setCoppGroupStateOk(trap_group); - } + + m_coppTrapConfMap[key].trap_group = trap_group; + m_coppTrapConfMap[key].trap_ids = trap_ids; + m_coppTrapConfMap[key].is_always_enabled = is_always_enabled; + addTrap(trap_ids, trap_group); /* When the trap table's trap group is changed, the old trap group * should also be reprogrammed as some of its associated traps got @@ -511,7 +665,7 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) */ if (conf_present && (trap_group != m_coppTrapConfMap[key].trap_group)) { - trap_group_trap_ids.clear(); + string trap_group_trap_ids; fvs.clear(); getTrapGroupTrapIds(m_coppTrapConfMap[key].trap_group, trap_group_trap_ids); FieldValueTuple fv2(COPP_TRAP_ID_LIST_FIELD, trap_group_trap_ids); @@ -524,6 +678,7 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) } m_coppTrapConfMap[key].trap_group = trap_group; m_coppTrapConfMap[key].trap_ids = trap_ids; + m_coppTrapConfMap[key].is_always_enabled = is_always_enabled; setCoppTrapStateOk(key); } else if (op == DEL_COMMAND) @@ -546,8 +701,9 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) setCoppGroupStateOk(m_coppTrapConfMap[key].trap_group); } } - if (conf_present) + if (conf_present && !m_coppTrapConfMap[key].trap_group.empty() && !m_coppTrapConfMap[key].trap_ids.empty()) { + m_coppTrapConfMap.erase(key); } delCoppTrapStateOk(key); @@ -559,6 +715,7 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) if (m_coppTrapInitCfg.find(key) != m_coppTrapInitCfg.end()) { auto fvs = m_coppTrapInitCfg[key]; + is_always_enabled.clear(); for (auto i: fvs) { if (fvField(i) == COPP_TRAP_GROUP_FIELD) @@ -569,21 +726,24 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) { trap_ids = fvValue(i); } + else if (fvField(i) == COPP_ALWAYS_ENABLED_FIELD) + { + is_always_enabled = fvValue(i); + } } - vector g_fvs; - string trap_group_trap_ids; - addTrapIdsToTrapGroup(trap_group, trap_ids); - getTrapGroupTrapIds(trap_group, trap_group_trap_ids); - FieldValueTuple fv1(COPP_TRAP_ID_LIST_FIELD, trap_group_trap_ids); - g_fvs.push_back(fv1); - if (!checkTrapGroupPending(trap_group)) + if (is_always_enabled.empty()) { - m_appCoppTable.set(trap_group, g_fvs); - setCoppGroupStateOk(trap_group); + is_always_enabled = "false"; } + m_coppTrapConfMap[key].trap_group = trap_group; m_coppTrapConfMap[key].trap_ids = trap_ids; - setCoppTrapStateOk(key); + m_coppTrapConfMap[key].is_always_enabled = is_always_enabled; + if (is_always_enabled == "true" || isFeatureEnabled(key)) + { + addTrap(trap_ids, trap_group); + setCoppTrapStateOk(key); + } } } it = consumer.m_toSync.erase(it); @@ -706,6 +866,7 @@ void CoppMgr::doCoppGroupTask(Consumer &consumer) } } + void CoppMgr::doFeatureTask(Consumer &consumer) { auto it = consumer.m_toSync.begin(); @@ -715,17 +876,20 @@ void CoppMgr::doFeatureTask(Consumer &consumer) string key = kfvKey(t); string op = kfvOp(t); - vector fvs; string trap_ids; if (op == SET_COMMAND) { + if (m_featuresCfgTable.find(key) == m_featuresCfgTable.end()) + { + m_featuresCfgTable.emplace(key, kfvFieldsValues(t)); + } for (auto i : kfvFieldsValues(t)) { if (fvField(i) == "state") { bool status = false; - if (fvValue(i) == "enabled") + if (fvValue(i) == "enabled" || fvValue(i) == "always_enabled") { status = true; } diff --git a/cfgmgr/coppmgr.h b/cfgmgr/coppmgr.h index b010489f2ee..1d53756fceb 100644 --- a/cfgmgr/coppmgr.h +++ b/cfgmgr/coppmgr.h @@ -14,6 +14,7 @@ namespace swss { /* COPP Trap Table Fields */ #define COPP_TRAP_ID_LIST_FIELD "trap_ids" #define COPP_TRAP_GROUP_FIELD "trap_group" +#define COPP_ALWAYS_ENABLED_FIELD "always_enabled" /* COPP Group Table Fields */ #define COPP_GROUP_QUEUE_FIELD "queue" @@ -42,6 +43,7 @@ struct CoppTrapConf { std::string trap_ids; std::string trap_group; + std::string is_always_enabled; }; /* TrapName to TrapConf map */ @@ -70,10 +72,10 @@ class CoppMgr : public Orch CoppTrapConfMap m_coppTrapConfMap; CoppTrapIdTrapGroupMap m_coppTrapIdTrapGroupMap; CoppGroupFvs m_coppGroupFvs; - std::set m_coppDisabledTraps; CoppCfg m_coppGroupInitCfg; CoppCfg m_coppTrapInitCfg; - + CoppCfg m_featuresCfgTable; + void doTask(Consumer &consumer); void doCoppGroupTask(Consumer &consumer); @@ -96,8 +98,13 @@ class CoppMgr : public Orch std::vector &modified_fvs); void parseInitFile(void); bool isTrapGroupInstalled(std::string key); + bool isFeatureEnabled(std::string feature); void mergeConfig(CoppCfg &init_cfg, CoppCfg &m_cfg, std::vector &cfg_keys, Table &cfgTable); + void removeTrap(std::string key); + void addTrap(std::string trap_ids, std::string trap_group); + + }; } diff --git a/cfgmgr/intfmgr.cpp b/cfgmgr/intfmgr.cpp index 8489d09bb37..93281dbcd96 100644 --- a/cfgmgr/intfmgr.cpp +++ b/cfgmgr/intfmgr.cpp @@ -67,6 +67,13 @@ IntfMgr::IntfMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c setWarmReplayDoneState(); } } + + string swtype; + Table cfgDeviceMetaDataTable(cfgDb, CFG_DEVICE_METADATA_TABLE_NAME); + if(cfgDeviceMetaDataTable.hget("localhost", "switch_type", swtype)) + { + mySwitchType = swtype; + } } void IntfMgr::setIntfIp(const string &alias, const string &opCmd, @@ -86,9 +93,23 @@ void IntfMgr::setIntfIp(const string &alias, const string &opCmd, } else { + string metric = ""; + // Kernel adds connected route with default metric of 256. But the metric is not + // communicated to frr unless the ip address is added with explicit metric + // In voq system, We need the static route to the remote neighbor and connected + // route to have the same metric to enable BGP to choose paths from routes learned + // via eBGP and iBGP over the internal inband port be part of same ecmp group. + // For v4 both the metrics (connected and static) are default 0 so we do not need + // to set the metric explicitly. + if(mySwitchType == "voq") + { + metric = " metric 256"; + } + (prefixLen < 127) ? - (cmd << IP_CMD << " -6 address " << shellquote(opCmd) << " " << shellquote(ipPrefixStr) << " broadcast " << shellquote(broadcastIpStr) << " dev " << shellquote(alias)) : - (cmd << IP_CMD << " -6 address " << shellquote(opCmd) << " " << shellquote(ipPrefixStr) << " dev " << shellquote(alias)); + (cmd << IP_CMD << " -6 address " << shellquote(opCmd) << " " << shellquote(ipPrefixStr) << " broadcast " << shellquote(broadcastIpStr) << + " dev " << shellquote(alias) << metric) : + (cmd << IP_CMD << " -6 address " << shellquote(opCmd) << " " << shellquote(ipPrefixStr) << " dev " << shellquote(alias) << metric); } int ret = swss::exec(cmd.str(), res); diff --git a/cfgmgr/intfmgr.h b/cfgmgr/intfmgr.h index 84c0020eb05..683e208c0e9 100644 --- a/cfgmgr/intfmgr.h +++ b/cfgmgr/intfmgr.h @@ -37,6 +37,7 @@ class IntfMgr : public Orch std::set m_loopbackIntfList; std::set m_pendingReplayIntfList; std::set m_ipv6LinkLocalModeList; + std::string mySwitchType; void setIntfIp(const std::string &alias, const std::string &opCmd, const IpPrefix &ipPrefix); void setIntfVrf(const std::string &alias, const std::string &vrfName); diff --git a/cfgmgr/nbrmgr.cpp b/cfgmgr/nbrmgr.cpp index 39d8edf9b08..d6d5f410e10 100644 --- a/cfgmgr/nbrmgr.cpp +++ b/cfgmgr/nbrmgr.cpp @@ -509,7 +509,12 @@ bool NbrMgr::addKernelRoute(string odev, IpAddress ip_addr) } else { - cmd = string("") + IP_CMD + " -6 route add " + ip_str + "/128 dev " + odev; + // In voq system, We need the static route to the remote neighbor and connected + // route to have the same metric to enable BGP to choose paths from routes learned + // via eBGP and iBGP over the internal inband port be part of same ecmp group. + // For v4 both the metrics (connected and static) are default 0 so we do not need + // to set the metric explicitly. + cmd = string("") + IP_CMD + " -6 route add " + ip_str + "/128 dev " + odev + " metric 256"; SWSS_LOG_NOTICE("IPv6 Route Add cmd: %s",cmd.c_str()); } diff --git a/cfgmgr/tunnelmgr.cpp b/cfgmgr/tunnelmgr.cpp index 7f4dc4dd3d4..a81438470fe 100644 --- a/cfgmgr/tunnelmgr.cpp +++ b/cfgmgr/tunnelmgr.cpp @@ -9,6 +9,7 @@ #include "tokenize.h" #include "shellcmd.h" #include "exec.h" +#include "warm_restart.h" using namespace std; using namespace swss; @@ -107,7 +108,8 @@ static int cmdIpTunnelRouteDel(const std::string& pfx, std::string & res) TunnelMgr::TunnelMgr(DBConnector *cfgDb, DBConnector *appDb, const std::vector &tableNames) : Orch(cfgDb, tableNames), m_appIpInIpTunnelTable(appDb, APP_TUNNEL_DECAP_TABLE_NAME), - m_cfgPeerTable(cfgDb, CFG_PEER_SWITCH_TABLE_NAME) + m_cfgPeerTable(cfgDb, CFG_PEER_SWITCH_TABLE_NAME), + m_cfgTunnelTable(cfgDb, CFG_TUNNEL_TABLE_NAME) { std::vector peer_keys; m_cfgPeerTable.getKeys(peer_keys); @@ -126,6 +128,23 @@ TunnelMgr::TunnelMgr(DBConnector *cfgDb, DBConnector *appDb, const std::vector tunnel_keys; + m_cfgTunnelTable.getKeys(tunnel_keys); + + for (auto tunnel: tunnel_keys) + { + m_tunnelReplay.insert(tunnel); + } + if (m_tunnelReplay.empty()) + { + finalizeWarmReboot(); + } + + } + auto consumerStateTable = new swss::ConsumerStateTable(appDb, APP_TUNNEL_ROUTE_TABLE_NAME, TableConsumable::DEFAULT_POP_BATCH_SIZE, default_orch_pri); @@ -191,6 +210,11 @@ void TunnelMgr::doTask(Consumer &consumer) ++it; } } + + if (!replayDone && m_tunnelReplay.empty() && WarmStart::isWarmStart()) + { + finalizeWarmReboot(); + } } bool TunnelMgr::doTunnelTask(const KeyOpFieldsValuesTuple & t) @@ -230,8 +254,16 @@ bool TunnelMgr::doTunnelTask(const KeyOpFieldsValuesTuple & t) SWSS_LOG_NOTICE("Peer/Remote IP not configured"); } - m_appIpInIpTunnelTable.set(tunnelName, kfvFieldsValues(t)); + /* If the tunnel is already in hardware (i.e. present in the replay), + * don't try to create it again since it will cause an OA crash + * (warmboot case) + */ + if (m_tunnelReplay.find(tunnelName) == m_tunnelReplay.end()) + { + m_appIpInIpTunnelTable.set(tunnelName, kfvFieldsValues(t)); + } } + m_tunnelReplay.erase(tunnelName); m_tunnelCache[tunnelName] = tunInfo; } else @@ -356,3 +388,13 @@ bool TunnelMgr::configIpTunnel(const TunnelInfo& tunInfo) return true; } + + +void TunnelMgr::finalizeWarmReboot() +{ + replayDone = true; + WarmStart::setWarmStartState("tunnelmgrd", WarmStart::REPLAYED); + SWSS_LOG_NOTICE("tunnelmgrd warmstart state set to REPLAYED"); + WarmStart::setWarmStartState("tunnelmgrd", WarmStart::RECONCILED); + SWSS_LOG_NOTICE("tunnelmgrd warmstart state set to RECONCILED"); +} diff --git a/cfgmgr/tunnelmgr.h b/cfgmgr/tunnelmgr.h index e2b601abe92..53d2f272786 100644 --- a/cfgmgr/tunnelmgr.h +++ b/cfgmgr/tunnelmgr.h @@ -4,6 +4,8 @@ #include "producerstatetable.h" #include "orch.h" +#include + namespace swss { struct TunnelInfo @@ -28,12 +30,18 @@ class TunnelMgr : public Orch bool configIpTunnel(const TunnelInfo& info); + void finalizeWarmReboot(); + ProducerStateTable m_appIpInIpTunnelTable; Table m_cfgPeerTable; + Table m_cfgTunnelTable; std::map m_tunnelCache; std::map m_intfCache; std::string m_peerIp; + + std::set m_tunnelReplay; + bool replayDone = false; }; } diff --git a/cfgmgr/tunnelmgrd.cpp b/cfgmgr/tunnelmgrd.cpp index 0165eb94b5f..0a6a84eaeb5 100644 --- a/cfgmgr/tunnelmgrd.cpp +++ b/cfgmgr/tunnelmgrd.cpp @@ -11,6 +11,7 @@ #include "exec.h" #include "schema.h" #include "tunnelmgr.h" +#include "warm_restart.h" using namespace std; using namespace swss; @@ -54,6 +55,9 @@ int main(int argc, char **argv) DBConnector cfgDb("CONFIG_DB", 0); DBConnector appDb("APPL_DB", 0); + WarmStart::initialize("tunnelmgrd", "swss"); + WarmStart::checkWarmStart("tunnelmgrd", "swss"); + TunnelMgr tunnelmgr(&cfgDb, &appDb, cfgTunTables); std::vector cfgOrchList = {&tunnelmgr}; diff --git a/configure.ac b/configure.ac index 81ee0a50176..5e5ce44171a 100644 --- a/configure.ac +++ b/configure.ac @@ -138,6 +138,7 @@ AC_CONFIG_FILES([ swssconfig/Makefile cfgmgr/Makefile tests/Makefile + orchagent/p4orch/tests/Makefile ]) # If no SAI library is installed, compile with SAIVS and run unit tests diff --git a/debian/rules b/debian/rules index a8a8b835fba..a594bb54d40 100755 --- a/debian/rules +++ b/debian/rules @@ -29,7 +29,7 @@ include /usr/share/dpkg/default.mk ifeq ($(ENABLE_GCOV), y) override_dh_auto_configure: - dh_auto_configure -- --enable-gcov + dh_auto_configure -- --enable-gcov CFLAGS="-g -O0" CXXFLAGS="-g -O0" endif override_dh_auto_install: diff --git a/doc/Configuration.md b/doc/Configuration.md deleted file mode 100644 index 40865366f62..00000000000 --- a/doc/Configuration.md +++ /dev/null @@ -1,1433 +0,0 @@ -# SONiC Configuration Database Manual - -Table of Contents -================= - - * [Introduction](#introduction) - * [Configuration](#configuration) - * [Config Load and Save](#config-load-and-save) - * [Incremental Configuration](#incremental-configuration) - * [Redis and Json Schema](#redis-and-json-schema) - * [ACL and Mirroring](#acl-and-mirroring) - * [BGP Sessions](#bgp-sessions) - * [BUFFER_PG](#buffer_pg) - * [Buffer pool](#buffer-pool) - * [Buffer profile](#buffer-profile) - * [Buffer queue](#buffer-queue) - * [Cable length](#cable-length) - * [COPP_TABLE](#copp_table) - * [CRM](#crm) - * [Data Plane L3 Interfaces](#data-plane-l3-interfaces) - * [DEFAULT_LOSSLESS_BUFFER_PARAMETER](#DEFAULT_LOSSLESS_BUFFER_PARAMETER) - * [Device Metadata](#device-metadata) - * [Device neighbor metada](#device-neighbor-metada) - * [DSCP_TO_TC_MAP](#dscp_to_tc_map) - * [FLEX_COUNTER_TABLE](#flex_counter_table) - * [L2 Neighbors](#l2-neighbors) - * [Loopback Interface](#loopback-interface) - * [LOSSLESS_TRAFFIC_PATTERN](#LOSSLESS_TRAFFIC_PATTERN) - * [Management Interface](#management-interface) - * [Management port](#management-port) - * [Management VRF](#management-vrf) - * [MAP_PFC_PRIORITY_TO_QUEUE](#map_pfc_priority_to_queue) - * [NTP Global Configuration](#ntp-global-configuration) - * [NTP and SYSLOG servers](#ntp-and-syslog-servers) - * [Port](#port) - * [Port Channel](#port-channel) - * [Portchannel member](#portchannel-member) - * [Scheduler](#scheduler) - * [Port QoS Map](#port-qos-map) - * [Queue](#queue) - * [Tacplus Server](#tacplus-server) - * [TC to Priority group map](#tc-to-priority-group-map) - * [TC to Queue map](#tc-to-queue-map) - * [Versions](#versions) - * [VLAN](#vlan) - * [VLAN_MEMBER](#vlan_member) - * [Virtual router](#virtual-router) - * [WRED_PROFILE](#wred_profile) - * [For Developers](#for-developers) - * [Generating Application Config by Jinja2 Template](#generating-application-config-by-jinja2-template) - * [Incremental Configuration by Subscribing to ConfigDB](#incremental-configuration-by-subscribing-to-configdb) - - - -# Introduction -This document lists the configuration commands schema applied in the SONiC eco system. All these commands find relevance in collecting system information, analysis and even for trouble shooting. All the commands are categorized under relevant topics with corresponding examples. - -# Configuration - -SONiC is managing configuration in a single source of truth - a redisDB -instance that we refer as ConfigDB. Applications subscribe to ConfigDB -and generate their running configuration correspondingly. - -(Before Sep 2017, we were using an XML file named minigraph.xml to -configure SONiC devices. For historical documentation, please refer to -[Configuration with -Minigraph](https://github.com/Azure/SONiC/wiki/Configuration-with-Minigraph-(~Sep-2017))) - -# **Config Load and Save** - -In current version of SONiC, ConfigDB is implemented as database 4 of -local redis. When system boots, configurations will be loaded from -/etc/sonic/config_db.json file into redis. Please note that ConfigDB -content won't be written back into /etc/sonic/config_db.json file -automatically. In order to do that, a config save command need to be -manually executed from CLI. Similarly, config load will trigger a force -load of json file into DB. Generally, content in -/etc/sonic/config_db.json can be considered as starting config, and -content in redisDB running config. - -We keep a way to load configuration from minigraph and write into -ConfigDB for backward compatibility. To do that, run `config -load_minigraph`. - -### Incremental Configuration - -The design of ConfigDB supports incremental configuration - application -could subscribe to changes in ConfigDB and response correspondingly. -However, this feature is not implemented by all applications yet. By Sep -2017 now, the only application that supports incremental configuration -is BGP (docker-fpm-quagga). For other applications, a manual restart is -required after configuration changes in ConfigDB. - -# **Redis and Json Schema** - -ConfigDB uses a table-object schema that is similar with -[AppDB](https://github.com/Azure/sonic-swss/blob/4c56d23b9ff4940bdf576cf7c9e5aa77adcbbdcc/doc/swss-schema.md), -and `config_db.json` is a straight-forward serialization of DB. As an -example, the following fragments could be BGP-related configuration in -redis and json, correspondingly: - - -***Redis format*** -``` -127.0.0.1:6379[4]> keys BGP_NEIGHBOR:* - -1) "BGP_NEIGHBOR:10.0.0.31" -2) "BGP_NEIGHBOR:10.0.0.39" -3) "BGP_NEIGHBOR:10.0.0.11" -4) "BGP_NEIGHBOR:10.0.0.7" - -... - -127.0.0.1:6379[4]> hgetall BGP_NEIGHBOR:10.0.0.3 - -1) "admin_status" -2) "up" -3) "peer_addr" -4) "10.0.0.2" -5) "asn" -6) "65200" -7) "name" -8) "ARISTA07T2" -``` - -***Json format*** -``` -"BGP_NEIGHBOR": { - "10.0.0.57": { - "rrclient": "0", - "name": "ARISTA01T1", - "local_addr": "10.0.0.56", - "nhopself": "0", - "holdtime": "10", - "asn": "64600", - "keepalive": "3" - }, - "10.0.0.59": { - "rrclient": "0", - "name": "ARISTA02T1", - "local_addr": "10.0.0.58", - "nhopself": "0", - "holdtime": "10", - "asn": "64600", - "keepalive": "3" - }, -} -``` - -Full sample config_db.json files are availables at -[here](https://github.com/Azure/SONiC/blob/gh-pages/doc/config_db.json) -and -[here](https://github.com/Azure/SONiC/blob/gh-pages/doc/config_db_t0.json). - - -### ACL and Mirroring - -ACL and mirroring related configuration are defined in -**MIRROR_SESSION**, **ACL_TABLE** and **ACL_RULE** tables. Those -tables are in progress of migrating from APPDB. Please refer to their -schema in APPDB -[here](https://github.com/Azure/sonic-swss/blob/4c56d23b9ff4940bdf576cf7c9e5aa77adcbbdcc/doc/swss-schema.md) -and migration plan -[here](https://github.com/Azure/SONiC/wiki/ACL-Configuration-Requirement-Description). - -``` -{ -"MIRROR_SESSION": { - "everflow0": { - "src_ip": "10.1.0.32", - "dst_ip": "2.2.2.2" - } - }, - -"ACL_TABLE": { - "DATAACL": { - "policy_desc" : "data_acl", - "type": "l3", - "ports": [ - "Ethernet0", - "Ethernet4", - "Ethernet8", - "Ethernet12" - ] - } - } -} -``` - -***Below ACL table added as per the mail*** -``` -{ -"ACL_TABLE": { - "aaa": { - "type": "L3", - "ports": "Ethernet0" - } - }, -"ACL_RULE": { - "aaa|rule_0": { - "PRIORITY": "55", - "PACKET_ACTION": "DROP", - "L4_SRC_PORT": "0" - }, - "aaa|rule_1": { - "PRIORITY": "55", - "PACKET_ACTION": "DROP", - "L4_SRC_PORT": "1" - } - } -} -``` - -***Below ACL table added by comparig minigraph.xml & config_db.json*** - -``` -{ -"ACL_TABLE": { - "EVERFLOW": { - "type": "MIRROR", - "policy_desc": "EVERFLOW", - "ports": [ - "PortChannel0001", - "PortChannel0002", - "PortChannel0003", - "PortChannel0004" - ] - }, - "EVERFLOWV6": { - "type": "MIRRORV6", - "policy_desc": "EVERFLOWV6", - "ports": [ - "PortChannel0001", - "PortChannel0002", - "PortChannel0003", - "PortChannel0004" - ] - }, - "SNMP_ACL": { - "services": [ - "SNMP" - ], - "type": "CTRLPLANE", - "policy_desc": "SNMP_ACL" - }, - "SSH_ONLY": { - "services": [ - "SSH" - ], - "type": "CTRLPLANE", - "policy_desc": "SSH_ONLY" - } - }, - -"ACL_RULE": { - "SNMP_ACL|DEFAULT_RULE": { - "PRIORITY": "1", - "PACKET_ACTION": "DROP", - "ETHER_TYPE": "2048" - }, - "SNMP_ACL|RULE_1": { - "PRIORITY": "9999", - "PACKET_ACTION": "ACCEPT", - "SRC_IP": "1.1.1.1/32", - "IP_PROTOCOL": "17" - }, - "SNMP_ACL|RULE_2": { - "PRIORITY": "9998", - "PACKET_ACTION": "ACCEPT", - "SRC_IP": "2.2.2.2/32", - "IP_PROTOCOL": "17" - }, - "SSH_ONLY|DEFAULT_RULE": { - "PRIORITY": "1", - "PACKET_ACTION": "DROP", - "ETHER_TYPE": "2048" - }, - "SSH_ONLY|RULE_1": { - "PRIORITY": "9999", - "PACKET_ACTION": "ACCEPT", - "SRC_IP": "4.4.4.4/8", - "IP_PROTOCOL": "6" - } - } -} - -``` - -***ACL table type configuration example*** -``` -{ - "ACL_TABLE_TYPE": { - "CUSTOM_L3": { - "MATCHES": [ - "IN_PORTS", - "OUT_PORTS", - "SRC_IP" - ], - "ACTIONS": [ - "PACKET_ACTION", - "MIRROR_INGRESS_ACTION" - ], - "BIND_POINTS": [ - "PORT", - "LAG" - ] - } - }, - "ACL_TABLE": { - "DATAACL": { - "STAGE": "INGRESS", - "TYPE": "CUSTOM_L3", - "PORTS": [ - "Ethernet0", - "PortChannel1" - ] - } - }, - "ACL_RULE": { - "DATAACL|RULE0": { - "PRIORITY": "999", - "PACKET_ACTION": "DROP", - "SRC_IP": "1.1.1.1/32", - } - } -} -``` - -### BGP Sessions - -BGP session configuration is defined in **BGP_NEIGHBOR** table. BGP -neighbor address is used as key of bgp neighbor objects. Object -attributes include remote AS number, neighbor router name, and local -peering address. Dynamic neighbor is also supported by defining peer -group name and IP ranges in **BGP_PEER_RANGE** table. - -``` -{ -"BGP_NEIGHBOR": { - "10.0.0.61": { - "local_addr": "10.0.0.60", - "asn": 64015, - "name": "ARISTA15T0" - }, - "10.0.0.49": { - "local_addr": "10.0.0.48", - "asn": 64009, - "name": "ARISTA09T0" - }, - - "10.0.0.63": { - "rrclient": "0", - "name": "ARISTA04T1", - "local_addr": "10.0.0.62", - "nhopself": "0", - "holdtime": "10", - "asn": "64600", - "keepalive": "3" - } - -"BGP_PEER_RANGE": { - "BGPSLBPassive": { - "name": "BGPSLBPassive", - "ip_range": [ - "10.250.0.0/27" - ] - }, - "BGPVac": { - "name": "BGPVac", - "ip_range": [ - "10.2.0.0/16" - ] - } - } -} -``` - -### BUFFER_PG - -When the system is running in traditional buffer model, profiles needs to explicitly configured: - -``` -{ -"BUFFER_PG": { - "Ethernet0|3-4": { - "profile": "pg_lossless_40000_5m_profile" - }, - "Ethernet1|3-4": { - "profile": "pg_lossless_40000_5m_profile" - }, - "Ethernet2|3-4": { - "profile": "pg_lossless_40000_5m_profile" - } - } -} - -``` - -When the system is running in dynamic buffer model, profiles can be: - - - either calculated dynamically according to ports' configuration and just configured as "NULL"; - - or configured explicitly. - -``` -{ -"BUFFER_PG": { - "Ethernet0|3-4": { - "profile": "NULL" - }, - "Ethernet1|3-4": { - "profile": "NULL" - }, - "Ethernet2|3-4": { - "profile": "static_profile" - } - } -} - -``` - -### Buffer pool - -When the system is running in traditional buffer model, the size of all of the buffer pools and xoff of ingress_lossless_pool need to be configured explicitly. - -``` -{ -"BUFFER_POOL": { - "egress_lossless_pool": { - "type": "egress", - "mode": "static", - "size": "15982720" - }, - "egress_lossy_pool": { - "type": "egress", - "mode": "dynamic", - "size": "9243812" - }, - "ingress_lossless_pool": { - "xoff": "4194112", - "type": "ingress", - "mode": "dynamic", - "size": "10875072" - } - } -} - -``` - -When the system is running in dynamic buffer model, the size of some of the buffer pools can be omitted and will be dynamically calculated. - -``` -{ -"BUFFER_POOL": { - "egress_lossless_pool": { - "type": "egress", - "mode": "static", - "size": "15982720" - }, - "egress_lossy_pool": { - "type": "egress", - "mode": "dynamic", - }, - "ingress_lossless_pool": { - "type": "ingress", - "mode": "dynamic", - } - } -} - -``` - - -### Buffer profile - -``` -{ -"BUFFER_PROFILE": { - "egress_lossless_profile": { - "static_th": "3995680", - "pool": "egress_lossless_pool", - "size": "1518" - }, - "egress_lossy_profile": { - "dynamic_th": "3", - "pool": "egress_lossy_pool", - "size": "1518" - }, - "ingress_lossy_profile": { - "dynamic_th": "3", - "pool": "ingress_lossless_pool", - "size": "0" - }, - "pg_lossless_40000_5m_profile": { - "xon_offset": "2288", - "dynamic_th": "-3", - "xon": "2288", - "xoff": "66560", - "pool": "ingress_lossless_pool", - "size": "1248" - }, - "pg_lossless_40000_40m_profile": { - "xon_offset": "2288", - "dynamic_th": "-3", - "xon": "2288", - "xoff": "71552", - "pool": "ingress_lossless_pool", - "size": "1248" - } - } -} - -``` - -When the system is running in dynamic buffer model and the headroom_type is dynamic, only dynamic_th needs to be configured and rest of fields can be omitted. -This kind of profiles will be handled by buffer manager and won't be applied to SAI. - -``` -{ - { - "non_default_dynamic_th_profile": { - "dynamic_th": 1, - "headroom_type": "dynamic" - } - } -} -``` - -### Buffer queue - -``` -{ -"BUFFER_QUEUE": { - "Ethernet50,Ethernet52,Ethernet54,Ethernet56|0-2": { - "profile": "egress_lossy_profile" - }, - "Ethernet50,Ethernet52,Ethernet54,Ethernet56|3-4": { - "profile": "egress_lossless_profile" - }, - "Ethernet50,Ethernet52,Ethernet54,Ethernet56|5-6": { - "profile": "egress_lossy_profile" - } - } -} - -``` - - -### Cable length - -``` -{ -"CABLE_LENGTH": { - "AZURE": { - "Ethernet8": "5m", - "Ethernet9": "5m", - "Ethernet2": "5m", - "Ethernet58": "5m", - "Ethernet59": "5m", - "Ethernet50": "40m", - "Ethernet51": "5m", - "Ethernet52": "40m", - "Ethernet53": "5m", - "Ethernet54": "40m", - "Ethernet55": "5m", - "Ethernet56": "40m" - } - } -} - -``` - -### COPP_TABLE - -``` -{ -"COPP_TABLE": { - "default": { - "cbs": "600", - "cir": "600", - "meter_type": "packets", - "mode": "sr_tcm", - "queue": "0", - "red_action": "drop" - }, - - "trap.group.arp": { - "cbs": "600", - "cir": "600", - "meter_type": "packets", - "mode": "sr_tcm", - "queue": "4", - "red_action": "drop", - "trap_action": "trap", - "trap_ids": "arp_req,arp_resp,neigh_discovery", - "trap_priority": "4" - }, - - "trap.group.lldp.dhcp.udld": { - "queue": "4", - "trap_action": "trap", - "trap_ids": "lldp,dhcp,udld", - "trap_priority": "4" - }, - - "trap.group.bgp.lacp": { - "queue": "4", - "trap_action": "trap", - "trap_ids": "bgp,bgpv6,lacp", - "trap_priority": "4" - }, - - "trap.group.ip2me": { - "cbs": "600", - "cir": "600", - "meter_type": "packets", - "mode": "sr_tcm", - "queue": "1", - "red_action": "drop", - "trap_action": "trap", - "trap_ids": "ip2me", - "trap_priority": "1" - } - } -} -``` - -### CRM - -``` -{ -"CRM": { - "Config": { - "acl_table_threshold_type": "percentage", - "nexthop_group_threshold_type": "percentage", - "fdb_entry_high_threshold": "85", - "acl_entry_threshold_type": "percentage", - "ipv6_neighbor_low_threshold": "70", - "nexthop_group_member_low_threshold": "70", - "acl_group_high_threshold": "85", - "ipv4_route_high_threshold": "85", - "acl_counter_high_threshold": "85", - "ipv4_route_low_threshold": "70", - "ipv4_route_threshold_type": "percentage", - "ipv4_neighbor_low_threshold": "70", - "acl_group_threshold_type": "percentage", - "ipv4_nexthop_high_threshold": "85", - "ipv6_route_threshold_type": "percentage", - "snat_entry_threshold_type": "percentage", - "snat_entry_high_threshold": "85", - "snat_entry_low_threshold": "70", - "dnat_entry_threshold_type": "percentage", - "dnat_entry_high_threshold": "85", - "dnat_entry_low_threshold": "70", - "ipmc_entry_threshold_type": "percentage", - "ipmc_entry_high_threshold": "85", - "ipmc_entry_low_threshold": "70" - } - } -} - -``` - -### Data Plane L3 Interfaces - -IP configuration for data plane are defined in **INTERFACE**, -**PORTCHANNEL_INTERFACE**, and **VLAN_INTERFACE** table. The objects -in all three tables have the interface (could be physical port, port -channel, or vlan) that IP address is attached to as first-level key, and -IP prefix as second-level key. IP interface objects don't have any -attributes. - -``` -{ -"INTERFACE": { - "Ethernet0|10.0.0.0/31": {}, - "Ethernet4|10.0.0.2/31": {}, - "Ethernet8|10.0.0.4/31": {} - ... - }, - -"PORTCHANNEL_INTERFACE": { - "PortChannel01|10.0.0.56/31": {}, - "PortChannel01|FC00::71/126": {}, - "PortChannel02|10.0.0.58/31": {}, - "PortChannel02|FC00::75/126": {} - ... - }, -"VLAN_INTERFACE": { - "Vlan1000|192.168.0.1/27": {} - } -} - -``` - - -### DEFAULT_LOSSLESS_BUFFER_PARAMETER - -This table stores the default lossless buffer parameters for dynamic buffer calculation. - -``` -{ - "DEFAULT_LOSSLESS_BUFFER_PARAMETER": { - "AZURE": { - "default_dynamic_th": "0", - "over_subscribe_ratio": "2" - } - } -} -``` - -### Device Metadata - -The **DEVICE_METADATA** table contains only one object named -*localhost*. In this table the device metadata such as hostname, hwsku, -deployment envionment id and deployment type are specified. BGP local AS -number is also specified in this table as current only single BGP -instance is supported in SONiC. - -``` -{ -"DEVICE_METADATA": { - "localhost": { - "hwsku": "Force10-S6100", - "default_bgp_status": "up", - "docker_routing_config_mode": "unified", - "hostname": "sonic-s6100-01", - "platform": "x86_64-dell_s6100_c2538-r0", - "mac": "4c:76:25:f4:70:82", - "default_pfcwd_status": "disable", - "bgp_asn": "65100", - "deployment_id": "1", - "type": "ToRRouter", - "buffer_model": "traditional" - } - } -} - -``` - - -### Device neighbor metada - -``` -{ -"DEVICE_NEIGHBOR_METADATA": { - "ARISTA01T1": { - "lo_addr": "None", - "mgmt_addr": "10.11.150.45", - "hwsku": "Arista-VM", - "type": "LeafRouter" - }, - "ARISTA02T1": { - "lo_addr": "None", - "mgmt_addr": "10.11.150.46", - "hwsku": "Arista-VM", - "type": "LeafRouter" - } - } -} - -``` - - -### DSCP_TO_TC_MAP -``` -{ -"DSCP_TO_TC_MAP": { - "AZURE": { - "1": "1", - "0": "1", - "3": "3", - "2": "1", - "5": "2", - "4": "4", - "7": "1", - "6": "1", - "9": "1", - "8": "0" - } - } -} - -``` - - -### MPLS_TC_TO_TC_MAP -``` -{ -"MPLS_TC_TO_TC_MAP": { - "AZURE": { - "0": "0", - "1": "1", - "2": "1", - "3": "2", - "4": "2", - "5": "3", - "6": "3", - "7": "4" - } - } -} - -``` - -### FLEX_COUNTER_TABLE - -``` -{ -"FLEX_COUNTER_TABLE": { - "PFCWD": { - "FLEX_COUNTER_STATUS": "enable" - }, - "PORT": { - "FLEX_COUNTER_STATUS": "enable" - }, - "QUEUE": { - "FLEX_COUNTER_STATUS": "enable" - } - } -} - -``` - - -### L2 Neighbors - -The L2 neighbor and connection information can be configured in -**DEVICE_NEIGHBOR** table. Those information are used mainly for LLDP. -While mandatory fields include neighbor name acting as object key and -remote port / local port information in attributes, optional information -about neighbor device such as device type, hwsku, management address and -loopback address can also be defined. - -``` -{ -"DEVICE_NEIGHBOR": { - "ARISTA04T1": { - "mgmt_addr": "10.20.0.163", - "hwsku": "Arista", - "lo_addr": null, - "local_port": "Ethernet124", - "type": "LeafRouter", - "port": "Ethernet1" - }, - "ARISTA03T1": { - "mgmt_addr": "10.20.0.162", - "hwsku": "Arista", - "lo_addr": null, - "local_port": "Ethernet120", - "type": "LeafRouter", - "port": "Ethernet1" - }, - "ARISTA02T1": { - "mgmt_addr": "10.20.0.161", - "hwsku": "Arista", - "lo_addr": null, - "local_port": "Ethernet116", - "type": "LeafRouter", - "port": "Ethernet1" - }, - "ARISTA01T1": { - "mgmt_addr": "10.20.0.160", - "hwsku": "Arista", - "lo_addr": null, - "local_port": "Ethernet112", - "type": "LeafRouter", - "port": "Ethernet1" - } - } -} -``` - -### Loopback Interface - -Loopback interface configuration lies in **LOOPBACK_INTERFACE** table -and has similar schema with data plane interfaces. The loopback device -name and loopback IP prefix act as multi-level key for loopback -interface objects. - -``` -{ -"LOOPBACK_INTERFACE": { - "Loopback0|10.1.0.32/32": {}, - "Loopback0|FC00:1::32/128": {} - } -} - -``` - -### LOSSLESS_TRAFFIC_PATTERN - -The LOSSLESS_TRAFFIC_PATTERN table stores parameters related to -lossless traffic for dynamic buffer calculation - -``` -{ - "LOSSLESS_TRAFFIC_PATTERN": { - "AZURE": { - "mtu": "1024", - "small_packet_percentage": "100" - } - } -} -``` - -### Management Interface - -Management interfaces are defined in **MGMT_INTERFACE** table. Object -key is composed of management interface name and IP prefix. Attribute -***gwaddr*** specify the gateway address of the prefix. -***forced_mgmt_routes*** attribute can be used to specify addresses / -prefixes traffic to which are forced to go through management network -instead of data network. - -``` -{ -"MGMT_INTERFACE": { - "eth0|10.11.150.11/16": { - "gwaddr": "10.11.0.1" - }, - "eth0|FC00:2::32/64": { - "forced_mgmt_routes": [ - "10.0.0.100/31", - "10.250.0.8", - "10.255.0.0/28" - ], - "gwaddr": "fc00:2::1" - } - } -} - -``` - -### Management port - -``` -{ -"MGMT_PORT": { - "eth0": { - "alias": "eth0", - "admin_status": "up" - } - } -} - -``` - - -### Management VRF - -``` -{ -"MGMT_VRF_CONFIG": { - "vrf_global": { - "mgmtVrfEnabled": "true" - } - } -} -``` - -### MAP_PFC_PRIORITY_TO_QUEUE - -``` -{ -"MAP_PFC_PRIORITY_TO_QUEUE": { - "AZURE": { - "1": "1", - "0": "0", - "3": "3", - "2": "2", - "5": "5", - "4": "4", - "7": "7", - "6": "6" - } - } -} -``` -### NTP Global Configuration - -These configuration options are used to modify the way that -ntp binds to the ports on the switch and which port it uses to -make ntp update requests from. - -***NTP VRF*** - -If this option is set to `default` then ntp will run within the default vrf -**when the management vrf is enabled**. If the mgmt vrf is enabled and this value is -not set to default then ntp will run within the mgmt vrf. - -This option **has no effect** if the mgmt vrf is not enabled. - -``` -{ -"NTP": { - "global": { - "vrf": "default" - } - } -} -``` - - -***NTP Source Port*** - -This option sets the port which ntp will choose to send time update requests from by. - -NOTE: If a Loopback interface is defined on the switch ntp will choose this by default, so this setting -is **required** if the switch has a Loopback interface and the ntp peer does not have defined routes -for that address. - -``` -{ -"NTP": { - "global": { - "src_intf": "Ethernet1" - } - } -} -``` - -### NTP and SYSLOG servers - -These information are configured in individual tables. Domain name or IP -address of the server is used as object key. Currently there are no -attributes in those objects. - -***NTP server*** -``` -{ -"NTP_SERVER": { - "2.debian.pool.ntp.org": {}, - "1.debian.pool.ntp.org": {}, - "3.debian.pool.ntp.org": {}, - "0.debian.pool.ntp.org": {} - }, - -"NTP_SERVER": { - "23.92.29.245": {}, - "204.2.134.164": {} - } -} -``` - -***Syslogserver*** -``` -{ -"SYSLOG_SERVER": { - "10.0.0.5": {}, - "10.0.0.6": {}, - "10.11.150.5": {} - } -} -``` - -### Port - -In this table the physical port configurations are defined. Each object -will have port name as its key, and port name alias and port speed as -optional attributes. - -``` -{ -"PORT": { - "Ethernet0": { - "index": "0", - "lanes": "101,102", - "description": "fortyGigE1/1/1", - "mtu": "9100", - "alias": "fortyGigE1/1/1", - "speed": "40000" - }, - "Ethernet1": { - "index": "1", - "lanes": "103,104", - "description": "fortyGigE1/1/2", - "mtu": "9100", - "alias": "fortyGigE1/1/2", - "admin_status": "up", - "speed": "40000" - }, - "Ethernet63": { - "index": "63", - "lanes": "87,88", - "description": "fortyGigE1/4/16", - "mtu": "9100", - "alias": "fortyGigE1/4/16", - "speed": "40000" - } - } -} - -``` - -### Port Channel - -Port channels are defined in **PORTCHANNEL** table with port channel -name as object key and member list as attribute. - -``` -{ -"PORTCHANNEL": { - "PortChannel0003": { - "admin_status": "up", - "min_links": "1", - "members": [ - "Ethernet54" - ], - "mtu": "9100" - }, - "PortChannel0004": { - "admin_status": "up", - "min_links": "1", - "members": [ - "Ethernet56" - ], - "mtu": "9100" - } - } -} -``` - - -### Portchannel member - -``` -{ -"PORTCHANNEL_MEMBER": { - "PortChannel0001|Ethernet50": {}, - "PortChannel0002|Ethernet52": {}, - "PortChannel0003|Ethernet54": {}, - "PortChannel0004|Ethernet56": {} - } -} - -``` -### Scheduler - -``` -{ -"SCHEDULER": { - "scheduler.0": { - "type": "STRICT" - }, - "scheduler.1": { - "type": "WRR" - "weight": "1", - "meter_type": "bytes", - "pir": "1250000000", - "pbs": "8192" - }, - "scheduler.port": { - "meter_type": "bytes", - "pir": "1000000000", - "pbs": "8192" - } - } -} -``` - -### Port QoS Map - -``` -{ -"PORT_QOS_MAP": { - "Ethernet50,Ethernet52,Ethernet54,Ethernet56": { - "tc_to_pg_map": "AZURE", - "tc_to_queue_map": "AZURE", - "pfc_enable": "3,4", - "pfc_to_queue_map": "AZURE", - "dscp_to_tc_map": "AZURE", - "dscp_to_fc_map": "AZURE", - "exp_to_fc_map": "AZURE", - "scheduler": "scheduler.port" - } - } -} -``` - -### Queue -``` -{ -"QUEUE": { - "Ethernet56|4": { - "wred_profile": "AZURE_LOSSLESS", - "scheduler": "scheduler.1" - }, - "Ethernet56|5": { - "scheduler": "scheduler.0" - }, - "Ethernet56|6": { - "scheduler": "scheduler.0" - } - } -} -``` - - -### Tacplus Server - -``` -{ -"TACPLUS_SERVER": { - "10.0.0.8": { - "priority": "1", - "tcp_port": "49" - }, - "10.0.0.9": { - "priority": "1", - "tcp_port": "49" - } - } -} -``` - - -### TC to Priority group map - -``` -{ -"TC_TO_PRIORITY_GROUP_MAP": { - "AZURE": { - "1": "1", - "0": "0", - "3": "3", - "2": "2", - "5": "5", - "4": "4", - "7": "7", - "6": "6" - } - } -} -``` - -### TC to Queue map - -``` -{ -"TC_TO_QUEUE_MAP": { - "AZURE": { - "1": "1", - "0": "0", - "3": "3", - "2": "2", - "5": "5", - "4": "4", - "7": "7", - "6": "6" - } - } -} -``` - -### Versions - -This table is where the curret version of the software is recorded. -``` -{ - "VERSIONS": { - "DATABASE": { - "VERSION": "version_1_0_1" - } - } -} -``` - -### VLAN - -This table is where VLANs are defined. VLAN name is used as object key, -and member list as well as an integer id are defined as attributes. If a -DHCP relay is required for this VLAN, a dhcp_servers attribute must be -specified for that VLAN, the value of which is a list that must contain -the domain name or IP address of one or more DHCP servers. - -``` -{ -"VLAN": { - "Vlan1000": { - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ], - "members": [ - "Ethernet0", - "Ethernet4", - "Ethernet8", - "Ethernet12" - ], - "vlanid": "1000" - } - } -} -``` - -### VLAN_MEMBER - -VLAN member table has Vlan name together with physical port or port -channel name as object key, and tagging mode as attributes. - -``` -{ -"VLAN_MEMBER": { - "Vlan1000|PortChannel47": { - "tagging_mode": "untagged" - }, - "Vlan1000|Ethernet8": { - "tagging_mode": "untagged" - }, - "Vlan2000|PortChannel47": { - "tagging_mode": "tagged" - } - } -} -``` - -### Virtual router - -The virtual router table allows to insert or update a new virtual router -instance. The key of the instance is its name. The attributes in the -table allow to change properties of a virtual router. Attributes: - -- 'v4' contains boolean value 'true' or 'false'. Enable or - disable IPv4 in the virtual router -- 'v6' contains boolean value 'true' or 'false'. Enable or - disable IPv6 in the virtual router -- 'src_mac' contains MAC address. What source MAC address will be - used for packets egressing from the virtual router -- 'ttl_action' contains packet action. Defines the action for - packets with TTL == 0 or TTL == 1 -- 'ip_opt_action' contains packet action. Defines the action for - packets with IP options -- 'l3_mc_action' contains packet action. Defines the action for - unknown L3 multicast packets - -The packet action could be: - -- 'drop' -- 'forward' -- 'copy' -- 'copy_cancel' -- 'trap' -- 'log' -- 'deny' -- 'transit' - - -***TBD*** -``` -'VRF:rid1': { - 'v4': 'true', - 'v6': 'false', - 'src_mac': '02:04:05:06:07:08', - 'ttl_action': 'copy', - 'ip_opt_action': 'deny', - 'l3_mc_action': 'drop' -} -``` - - -### WRED_PROFILE - -``` -{ -"WRED_PROFILE": { - "AZURE_LOSSLESS": { - "red_max_threshold": "2097152", - "wred_green_enable": "true", - "ecn": "ecn_all", - "green_min_threshold": "1048576", - "red_min_threshold": "1048576", - "wred_yellow_enable": "true", - "yellow_min_threshold": "1048576", - "green_max_threshold": "2097152", - "green_drop_probability": "5", - "yellow_max_threshold": "2097152", - "wred_red_enable": "true", - "yellow_drop_probability": "5", - "red_drop_probability": "5" - } - } -} -``` - -### BREAKOUT_CFG - -This table is introduced as part of Dynamic Port Breakout(DPB) feature. -It shows the current breakout mode of all ports(root ports). -The list of root ports, all possible breakout modes, and default breakout modes - are obtained/derived from platform.json and hwsku.json files. - -``` -"BREAKOUT_CFG": { - "Ethernet0": { - "brkout_mode": "4x25G[10G]" - }, - "Ethernet4": { - "brkout_mode": "4x25G[10G]" - }, - "Ethernet8": { - "brkout_mode": "4x25G[10G]" - }, - - ...... - - "Ethernet116": { - "brkout_mode": "2x50G" - }, - "Ethernet120": { - "brkout_mode": "2x50G" - }, - "Ethernet124": { - "brkout_mode": "2x50G" - } -} -``` - -For Developers -============== - -Generating Application Config by Jinja2 Template ------------------------------------------------- - -To be added. - -Incremental Configuration by Subscribing to ConfigDB ----------------------------------------------------- - -Detail instruction to be added. A sample could be found in this -[PR](https://github.com/Azure/sonic-buildimage/pull/861) that -implemented dynamic configuration for BGP. diff --git a/gearsyncd/gearboxparser.cpp b/gearsyncd/gearboxparser.cpp index 1ae8118266c..dfd68be2ec3 100644 --- a/gearsyncd/gearboxparser.cpp +++ b/gearsyncd/gearboxparser.cpp @@ -151,6 +151,12 @@ bool GearboxParser::parse() val = phy["context_id"]; attr = std::make_pair("context_id", std::to_string(val.get())); attrs.push_back(attr); + if (phy.find("macsec_ipg") != phy.end()) + { + val = phy["macsec_ipg"]; + attr = std::make_pair("macsec_ipg", std::to_string(val.get())); + attrs.push_back(attr); + } if (phy.find("hwinfo") == phy.end()) { SWSS_LOG_ERROR("missing 'hwinfo' field in 'phys' item %d in gearbox configuration", iter); diff --git a/mclagsyncd/mclaglink.cpp b/mclagsyncd/mclaglink.cpp index 68b700fdb99..b09660ee56f 100644 --- a/mclagsyncd/mclaglink.cpp +++ b/mclagsyncd/mclaglink.cpp @@ -31,6 +31,7 @@ #include "mclagsyncd/mclaglink.h" #include "mclagsyncd/mclag.h" #include +#include #include #include "macaddress.h" #include @@ -188,8 +189,13 @@ void MclagLink::mclagsyncdFetchMclagInterfaceConfigFromConfigdb() void MclagLink::setPortIsolate(char *msg) { - char *platform = getenv("platform"); - if ((NULL != platform) && (strstr(platform, BRCM_PLATFORM_SUBSTRING))) + static const unordered_set supported { + BRCM_PLATFORM_SUBSTRING, + BFN_PLATFORM_SUBSTRING + }; + + const char *platform = getenv("platform"); + if (platform != nullptr && supported.find(string(platform)) != supported.end()) { mclag_sub_option_hdr_t *op_hdr = NULL; string isolate_src_port; diff --git a/mclagsyncd/mclaglink.h b/mclagsyncd/mclaglink.h index 9c23c97686a..a811f8cb2eb 100644 --- a/mclagsyncd/mclaglink.h +++ b/mclagsyncd/mclaglink.h @@ -50,7 +50,9 @@ #endif /* INET_ADDRSTRLEN */ #define MAX_L_PORT_NAME 20 + #define BRCM_PLATFORM_SUBSTRING "broadcom" +#define BFN_PLATFORM_SUBSTRING "barefoot" using namespace std; @@ -187,9 +189,10 @@ namespace swss { typedef std::tuple vlan_mbr; class MclagLink : public Selectable { - + private: - Select *m_select; + const int MSG_BATCH_SIZE; + unsigned int m_bufSize; char *m_messageBuffer; char *m_messageBuffer_send; @@ -200,11 +203,12 @@ namespace swss { int m_server_socket; int m_connection_socket; + Select *m_select; + bool is_iccp_up = false; std::string m_system_mac; std::set m_vlan_mbrship; //set of vlan,mbr tuples - const int MSG_BATCH_SIZE; std::map *p_learn; unique_ptr p_state_db; diff --git a/orchagent/Makefile.am b/orchagent/Makefile.am index 7225917e4da..f6026d613f5 100644 --- a/orchagent/Makefile.am +++ b/orchagent/Makefile.am @@ -18,6 +18,7 @@ dist_swss_DATA = \ pfc_detect_barefoot.lua \ pfc_detect_nephos.lua \ pfc_detect_cisco-8000.lua \ + pfc_detect_vs.lua \ pfc_restore.lua \ pfc_restore_cisco-8000.lua \ port_rates.lua \ @@ -91,10 +92,23 @@ orchagent_SOURCES = \ lagid.cpp \ bfdorch.cpp \ srv6orch.cpp \ - response_publisher.cpp + response_publisher.cpp \ + nvgreorch.cpp orchagent_SOURCES += flex_counter/flex_counter_manager.cpp flex_counter/flex_counter_stat_manager.cpp flex_counter/flow_counter_handler.cpp orchagent_SOURCES += debug_counter/debug_counter.cpp debug_counter/drop_counter.cpp +orchagent_SOURCES += p4orch/p4orch.cpp \ + p4orch/p4orch_util.cpp \ + p4orch/p4oidmapper.cpp \ + p4orch/router_interface_manager.cpp \ + p4orch/neighbor_manager.cpp \ + p4orch/next_hop_manager.cpp \ + p4orch/route_manager.cpp \ + p4orch/acl_util.cpp \ + p4orch/acl_table_manager.cpp \ + p4orch/acl_rule_manager.cpp \ + p4orch/wcmp_manager.cpp \ + p4orch/mirror_session_manager.cpp orchagent_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) orchagent_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) diff --git a/orchagent/aclorch.cpp b/orchagent/aclorch.cpp index 900299b3d5c..c3a9f23ec78 100644 --- a/orchagent/aclorch.cpp +++ b/orchagent/aclorch.cpp @@ -2646,6 +2646,7 @@ void AclOrch::init(vector& connectors, PortsOrch *portOrch, Mirr // purposes. string platform = getenv("platform") ? getenv("platform") : ""; if (platform == BRCM_PLATFORM_SUBSTRING || + platform == CISCO_8000_PLATFORM_SUBSTRING || platform == MLNX_PLATFORM_SUBSTRING || platform == BFN_PLATFORM_SUBSTRING || platform == MRVL_PLATFORM_SUBSTRING || @@ -2676,6 +2677,7 @@ void AclOrch::init(vector& connectors, PortsOrch *portOrch, Mirr // In Mellanox platform, V4 and V6 rules are stored in different tables if (platform == MLNX_PLATFORM_SUBSTRING || + platform == CISCO_8000_PLATFORM_SUBSTRING || platform == MRVL_PLATFORM_SUBSTRING) { m_isCombinedMirrorV6Table = false; diff --git a/orchagent/acltable.h b/orchagent/acltable.h index 081170984f9..3ec7f1a757e 100644 --- a/orchagent/acltable.h +++ b/orchagent/acltable.h @@ -19,8 +19,9 @@ extern "C" { #define ACL_TABLE_TYPE_BPOINT_TYPES "BIND_POINTS" #define ACL_TABLE_TYPE_ACTIONS "ACTIONS" -#define STAGE_INGRESS "INGRESS" -#define STAGE_EGRESS "EGRESS" +#define STAGE_INGRESS "INGRESS" +#define STAGE_EGRESS "EGRESS" +#define STAGE_PRE_INGRESS "PRE_INGRESS" #define TABLE_TYPE_L3 "L3" #define TABLE_TYPE_L3V6 "L3V6" @@ -39,7 +40,31 @@ typedef enum { ACL_STAGE_UNKNOWN, ACL_STAGE_INGRESS, - ACL_STAGE_EGRESS + ACL_STAGE_EGRESS, + ACL_STAGE_PRE_INGRESS } acl_stage_type_t; typedef std::unordered_map acl_stage_type_lookup_t; +typedef std::map acl_stage_lookup_t; +typedef std::map acl_stage_to_switch_attr_lookup_t; + +struct AclTableGroupMember +{ + sai_object_id_t m_group_oid; + sai_object_id_t m_group_member_oid; + uint32_t m_priority; + AclTableGroupMember() : m_group_oid(SAI_NULL_OBJECT_ID), m_group_member_oid(SAI_NULL_OBJECT_ID), m_priority(0) + {} +}; + +static const acl_stage_lookup_t aclStageLookup = { + {STAGE_INGRESS, SAI_ACL_STAGE_INGRESS}, + {STAGE_EGRESS, SAI_ACL_STAGE_EGRESS}, + {STAGE_PRE_INGRESS, SAI_ACL_STAGE_PRE_INGRESS}, +}; + +static const acl_stage_to_switch_attr_lookup_t aclStageToSwitchAttrLookup = { + {SAI_ACL_STAGE_INGRESS, SAI_SWITCH_ATTR_INGRESS_ACL}, + {SAI_ACL_STAGE_EGRESS, SAI_SWITCH_ATTR_EGRESS_ACL}, + {SAI_ACL_STAGE_PRE_INGRESS, SAI_SWITCH_ATTR_PRE_INGRESS_ACL}, +}; diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index e7204344d5a..41101480d59 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -48,7 +48,11 @@ BufferOrch::BufferOrch(DBConnector *applDb, DBConnector *confDb, DBConnector *st m_flexCounterTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_TABLE)), m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)), m_countersDb(new DBConnector("COUNTERS_DB", 0)), - m_stateBufferMaximumValueTable(stateDb, STATE_BUFFER_MAXIMUM_VALUE_TABLE) + m_stateBufferMaximumValueTable(stateDb, STATE_BUFFER_MAXIMUM_VALUE_TABLE), + m_ingressZeroBufferPool(SAI_NULL_OBJECT_ID), + m_egressZeroBufferPool(SAI_NULL_OBJECT_ID), + m_ingressZeroPoolRefCount(0), + m_egressZeroPoolRefCount(0) { SWSS_LOG_ENTER(); initTableHandlers(); @@ -310,6 +314,65 @@ const object_reference_map &BufferOrch::getBufferPoolNameOidMap(void) return *m_buffer_type_maps[APP_BUFFER_POOL_TABLE_NAME]; } +void BufferOrch::lockZeroBufferPool(bool ingress) +{ + if (ingress) + m_ingressZeroPoolRefCount++; + else + m_egressZeroPoolRefCount++; +} + +void BufferOrch::unlockZeroBufferPool(bool ingress) +{ + sai_object_id_t pool = SAI_NULL_OBJECT_ID; + if (ingress) + { + if (--m_ingressZeroPoolRefCount <= 0) + { + pool = m_ingressZeroBufferPool; + m_ingressZeroBufferPool = SAI_NULL_OBJECT_ID; + } + } + else + { + if (--m_egressZeroPoolRefCount <= 0) + { + pool = m_egressZeroBufferPool; + m_egressZeroBufferPool = SAI_NULL_OBJECT_ID; + } + } + + if (pool != SAI_NULL_OBJECT_ID) + { + auto sai_status = sai_buffer_api->remove_buffer_pool(pool); + if (SAI_STATUS_SUCCESS != sai_status) + { + SWSS_LOG_ERROR("Failed to remove buffer pool, rv:%d", sai_status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_BUFFER, sai_status); + if (handle_status != task_process_status::task_success) + { + return; + } + } + else + { + SWSS_LOG_NOTICE("Zero buffer pool has been successfully removed"); + } + } +} + +void BufferOrch::setZeroBufferPool(bool ingress, sai_object_id_t pool) +{ + if (ingress) + { + m_ingressZeroBufferPool = pool; + } + else + { + m_egressZeroBufferPool = pool; + } +} + task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); @@ -318,6 +381,8 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) string map_type_name = APP_BUFFER_POOL_TABLE_NAME; string object_name = kfvKey(tuple); string op = kfvOp(tuple); + sai_buffer_pool_type_t pool_direction = SAI_BUFFER_POOL_TYPE_INGRESS; + bool creating_zero_pool = false; SWSS_LOG_DEBUG("object name:%s", object_name.c_str()); if (m_buffer_type_maps[map_type_name]->find(object_name) != m_buffer_type_maps[map_type_name]->end()) @@ -326,6 +391,17 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) SWSS_LOG_DEBUG("found existing object:%s of type:%s", object_name.c_str(), map_type_name.c_str()); } SWSS_LOG_DEBUG("processing command:%s", op.c_str()); + if (object_name == "ingress_zero_pool") + { + creating_zero_pool = true; + pool_direction = SAI_BUFFER_POOL_TYPE_INGRESS; + } + else if (object_name == "egress_zero_pool") + { + creating_zero_pool = true; + pool_direction = SAI_BUFFER_POOL_TYPE_EGRESS; + } + if (op == SET_COMMAND) { vector attribs; @@ -372,6 +448,11 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) return task_process_status::task_invalid_entry; } attr.id = SAI_BUFFER_POOL_ATTR_TYPE; + if (creating_zero_pool && pool_direction != static_cast(attr.value.u32)) + { + SWSS_LOG_ERROR("Wrong pool direction for pool %s", object_name.c_str()); + return task_process_status::task_invalid_entry; + } attribs.push_back(attr); } else if (field == buffer_pool_mode_field_name) @@ -437,18 +518,53 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) } else { - sai_status = sai_buffer_api->create_buffer_pool(&sai_object, gSwitchId, (uint32_t)attribs.size(), attribs.data()); - if (SAI_STATUS_SUCCESS != sai_status) + if (creating_zero_pool) { - SWSS_LOG_ERROR("Failed to create buffer pool %s with type %s, rv:%d", object_name.c_str(), map_type_name.c_str(), sai_status); - task_process_status handle_status = handleSaiCreateStatus(SAI_API_BUFFER, sai_status); - if (handle_status != task_process_status::task_success) + if (pool_direction == SAI_BUFFER_POOL_TYPE_INGRESS) { - return handle_status; + sai_object = m_ingressZeroBufferPool; + } + else if (pool_direction == SAI_BUFFER_POOL_TYPE_EGRESS) + { + sai_object = m_egressZeroBufferPool; + } + } + + if (SAI_NULL_OBJECT_ID == sai_object) + { + sai_status = sai_buffer_api->create_buffer_pool(&sai_object, gSwitchId, (uint32_t)attribs.size(), attribs.data()); + if (SAI_STATUS_SUCCESS != sai_status) + { + SWSS_LOG_ERROR("Failed to create buffer pool %s with type %s, rv:%d", object_name.c_str(), map_type_name.c_str(), sai_status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_BUFFER, sai_status); + if (handle_status != task_process_status::task_success) + { + return handle_status; + } + } + + SWSS_LOG_NOTICE("Created buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); + } + else + { + SWSS_LOG_NOTICE("No need to create buffer pool %s since it has been created", object_name.c_str()); + } + + if (creating_zero_pool) + { + if (pool_direction == SAI_BUFFER_POOL_TYPE_INGRESS) + { + m_ingressZeroPoolRefCount++; + m_ingressZeroBufferPool = sai_object; + } + else if (pool_direction == SAI_BUFFER_POOL_TYPE_EGRESS) + { + m_egressZeroPoolRefCount++; + m_egressZeroBufferPool = sai_object; } } + (*(m_buffer_type_maps[map_type_name]))[object_name].m_saiObjectId = sai_object; - SWSS_LOG_NOTICE("Created buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); // Here we take the PFC watchdog approach to update the COUNTERS_DB metadata (e.g., PFC_WD_DETECTION_TIME per queue) // at initialization (creation and registration phase) // Specifically, we push the buffer pool name to oid mapping upon the creation of the oid @@ -470,18 +586,40 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) if (SAI_NULL_OBJECT_ID != sai_object) { clearBufferPoolWatermarkCounterIdList(sai_object); - sai_status = sai_buffer_api->remove_buffer_pool(sai_object); - if (SAI_STATUS_SUCCESS != sai_status) + bool remove = true; + if (sai_object == m_ingressZeroBufferPool) { - SWSS_LOG_ERROR("Failed to remove buffer pool %s with type %s, rv:%d", object_name.c_str(), map_type_name.c_str(), sai_status); - task_process_status handle_status = handleSaiRemoveStatus(SAI_API_BUFFER, sai_status); - if (handle_status != task_process_status::task_success) + if (--m_ingressZeroPoolRefCount > 0) + remove = false; + else + m_ingressZeroBufferPool = SAI_NULL_OBJECT_ID; + } + else if (sai_object == m_egressZeroBufferPool) + { + if (--m_egressZeroPoolRefCount > 0) + remove = false; + else + m_egressZeroBufferPool = SAI_NULL_OBJECT_ID; + } + if (remove) + { + sai_status = sai_buffer_api->remove_buffer_pool(sai_object); + if (SAI_STATUS_SUCCESS != sai_status) { - return handle_status; + SWSS_LOG_ERROR("Failed to remove buffer pool %s with type %s, rv:%d", object_name.c_str(), map_type_name.c_str(), sai_status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_BUFFER, sai_status); + if (handle_status != task_process_status::task_success) + { + return handle_status; + } } + SWSS_LOG_NOTICE("Removed buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); + } + else + { + SWSS_LOG_NOTICE("Will not remove buffer pool %s since it is still referenced", object_name.c_str()); } } - SWSS_LOG_NOTICE("Removed buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); auto it_to_delete = (m_buffer_type_maps[map_type_name])->find(object_name); (m_buffer_type_maps[map_type_name])->erase(it_to_delete); m_countersDb->hdel(COUNTERS_BUFFER_POOL_NAME_MAP, object_name); diff --git a/orchagent/bufferorch.h b/orchagent/bufferorch.h index 05fdd7917fd..24af140b4a2 100644 --- a/orchagent/bufferorch.h +++ b/orchagent/bufferorch.h @@ -37,6 +37,14 @@ class BufferOrch : public Orch static type_map m_buffer_type_maps; void generateBufferPoolWatermarkCounterIdList(void); const object_reference_map &getBufferPoolNameOidMap(void); + sai_object_id_t getZeroBufferPool(bool ingress) + { + return ingress ? m_ingressZeroBufferPool : m_egressZeroBufferPool; + } + + void lockZeroBufferPool(bool ingress); + void unlockZeroBufferPool(bool ingress); + void setZeroBufferPool(bool direction, sai_object_id_t pool); private: typedef task_process_status (BufferOrch::*buffer_table_handler)(KeyOpFieldsValuesTuple &tuple); @@ -71,6 +79,11 @@ class BufferOrch : public Orch unique_ptr m_countersDb; bool m_isBufferPoolWatermarkCounterIdListGenerated = false; + + sai_object_id_t m_ingressZeroBufferPool; + sai_object_id_t m_egressZeroBufferPool; + int m_ingressZeroPoolRefCount; + int m_egressZeroPoolRefCount; }; #endif /* SWSS_BUFFORCH_H */ diff --git a/orchagent/bulker.h b/orchagent/bulker.h index a4a49b105da..2ff86644acb 100644 --- a/orchagent/bulker.h +++ b/orchagent/bulker.h @@ -414,6 +414,11 @@ class EntityBulker return creating_entries.count(entry); } + bool bulk_entry_pending_removal(const Te& entry) const + { + return removing_entries.find(entry) != removing_entries.end(); + } + private: std::unordered_map< // A map of Te, // entry -> diff --git a/orchagent/cbf/cbfnhgorch.cpp b/orchagent/cbf/cbfnhgorch.cpp index 403945c7a94..76435ad12d2 100644 --- a/orchagent/cbf/cbfnhgorch.cpp +++ b/orchagent/cbf/cbfnhgorch.cpp @@ -308,7 +308,7 @@ bool CbfNhg::sync() nhg_attr.value.u32 = static_cast(m_members.size()); nhg_attrs.push_back(move(nhg_attr)); - if (nhg_attr.value.u32 > gNhgMapOrch->getMaxFcVal()) + if (nhg_attr.value.u32 > gNhgMapOrch->getMaxNumFcs()) { /* If there are more members than FCs then this may be an error, as some members won't be used. */ SWSS_LOG_WARN("More CBF NHG members configured than supported Forwarding Classes"); @@ -632,6 +632,11 @@ bool CbfNhg::syncMembers(const set &members) nhgm.to_string().c_str(), to_string().c_str()); throw std::logic_error("Syncing already synced NHG member"); } + else if (nhgm.getNhgId() == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_WARN("CBF NHG member %s is not yet synced", nhgm.to_string().c_str()); + return false; + } /* * Check if the group exists in NhgOrch. @@ -710,10 +715,9 @@ vector CbfNhg::createNhgmAttrs(const CbfNhgMember &nhgm) const { SWSS_LOG_ENTER(); - if (!isSynced() || (nhgm.getNhgId() == SAI_NULL_OBJECT_ID)) + if (!isSynced()) { - SWSS_LOG_ERROR("CBF next hop group %s or next hop group %s are not synced", - to_string().c_str(), nhgm.to_string().c_str()); + SWSS_LOG_ERROR("CBF next hop group %s is not synced", to_string().c_str()); throw logic_error("CBF next hop group member attributes data is insufficient"); } diff --git a/orchagent/cbf/nhgmaporch.cpp b/orchagent/cbf/nhgmaporch.cpp index d765e3e90ec..fd83fe4b12f 100644 --- a/orchagent/cbf/nhgmaporch.cpp +++ b/orchagent/cbf/nhgmaporch.cpp @@ -294,34 +294,34 @@ void NhgMapOrch::decRefCount(const string &index) } /* - * Get the max FC value supported by the switch. + * Get the maximum number of FC classes supported by the switch. */ -sai_uint8_t NhgMapOrch::getMaxFcVal() +sai_uint8_t NhgMapOrch::getMaxNumFcs() { SWSS_LOG_ENTER(); - static int max_fc_val = -1; + static int max_num_fcs = -1; /* - * Get the maximum value allowed for FC if it wasn't already initialized. + * Get the maximum number of FC classes if it wasn't already initialized. */ - if (max_fc_val == -1) + if (max_num_fcs == -1) { sai_attribute_t attr; attr.id = SAI_SWITCH_ATTR_MAX_NUMBER_OF_FORWARDING_CLASSES; if (sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr) == SAI_STATUS_SUCCESS) { - max_fc_val = attr.value.u8; + max_num_fcs = attr.value.u8; } else { SWSS_LOG_WARN("Switch does not support FCs"); - max_fc_val = 0; + max_num_fcs = 0; } } - return static_cast(max_fc_val); + return static_cast(max_num_fcs); } /* @@ -343,7 +343,7 @@ pair> NhgMapOrch::getMap(const ve } unordered_map fc_map; - sai_uint8_t max_fc_val = getMaxFcVal(); + sai_uint8_t max_num_fcs = getMaxNumFcs(); /* * Create the map while validating that the values are positive @@ -353,13 +353,13 @@ pair> NhgMapOrch::getMap(const ve try { /* - * Check the FC value is valid. + * Check the FC value is valid. FC value must be in range [0, max_num_fcs). */ auto fc = stoi(fvField(*it)); - if ((fc < 0) || (fc > max_fc_val)) + if ((fc < 0) || (fc >= max_num_fcs)) { - SWSS_LOG_ERROR("FC value %d is either negative or greater than max value %d", fc, max_fc_val); + SWSS_LOG_ERROR("FC value %d is either negative or greater than max value %d", fc, max_num_fcs - 1); success = false; break; } diff --git a/orchagent/cbf/nhgmaporch.h b/orchagent/cbf/nhgmaporch.h index c345e7d5668..7d7317a1d6b 100644 --- a/orchagent/cbf/nhgmaporch.h +++ b/orchagent/cbf/nhgmaporch.h @@ -43,9 +43,9 @@ class NhgMapOrch : public Orch void decRefCount(const string &key); /* - * Get the max FC value supported by the switch. + * Get the maximum number of FC classes supported by the switch. */ - static sai_uint8_t getMaxFcVal(); + static sai_uint8_t getMaxNumFcs(); private: /* diff --git a/orchagent/copporch.cpp b/orchagent/copporch.cpp index d193e215c34..8a58ae73a08 100644 --- a/orchagent/copporch.cpp +++ b/orchagent/copporch.cpp @@ -1079,16 +1079,20 @@ bool CoppOrch::getAttribsFromTrapGroup (vector &fv_tuple, genetlink_attribs.push_back(attr); attr.id = SAI_HOSTIF_ATTR_NAME; + auto size = sizeof(attr.value.chardata); strncpy(attr.value.chardata, fvValue(*i).c_str(), - sizeof(attr.value.chardata)); + size - 1); + attr.value.chardata[size - 1] = '\0'; genetlink_attribs.push_back(attr); } else if (fvField(*i) == copp_genetlink_mcgrp_name) { attr.id = SAI_HOSTIF_ATTR_GENETLINK_MCGRP_NAME; + auto size = sizeof(attr.value.chardata); strncpy(attr.value.chardata, fvValue(*i).c_str(), - sizeof(attr.value.chardata)); + size - 1); + attr.value.chardata[size - 1] = '\0'; genetlink_attribs.push_back(attr); } else diff --git a/orchagent/copporch.h b/orchagent/copporch.h index 096979ebb8e..d774db64bab 100644 --- a/orchagent/copporch.h +++ b/orchagent/copporch.h @@ -65,6 +65,16 @@ class CoppOrch : public Orch CoppOrch(swss::DBConnector* db, std::string tableName); void generateHostIfTrapCounterIdList(); void clearHostIfTrapCounterIdList(); + + inline object_map getTrapGroupMap() + { + return m_trap_group_map; + } + + inline TrapGroupHostIfMap getTrapGroupHostIfMap() + { + return m_trap_group_hostif_map; + } protected: object_map m_trap_group_map; diff --git a/orchagent/crmorch.cpp b/orchagent/crmorch.cpp index fbc9d43691a..a7e897f822f 100644 --- a/orchagent/crmorch.cpp +++ b/orchagent/crmorch.cpp @@ -66,13 +66,49 @@ const map crmResSaiAvailAttrMap = { CrmResourceType::CRM_IPMC_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_IPMC_ENTRY}, { CrmResourceType::CRM_SNAT_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_SNAT_ENTRY }, { CrmResourceType::CRM_DNAT_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_DNAT_ENTRY }, +}; + +const map crmResSaiObjAttrMap = +{ + { CrmResourceType::CRM_IPV4_ROUTE, SAI_OBJECT_TYPE_ROUTE_ENTRY }, + { CrmResourceType::CRM_IPV6_ROUTE, SAI_OBJECT_TYPE_ROUTE_ENTRY }, + { CrmResourceType::CRM_IPV4_NEXTHOP, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_IPV6_NEXTHOP, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_IPV4_NEIGHBOR, SAI_OBJECT_TYPE_NEIGHBOR_ENTRY }, + { CrmResourceType::CRM_IPV6_NEIGHBOR, SAI_OBJECT_TYPE_NEIGHBOR_ENTRY }, + { CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_NEXTHOP_GROUP, SAI_OBJECT_TYPE_NEXT_HOP_GROUP }, + { CrmResourceType::CRM_ACL_TABLE, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_ACL_GROUP, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_ACL_ENTRY, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_ACL_COUNTER, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_FDB_ENTRY, SAI_OBJECT_TYPE_FDB_ENTRY }, + { CrmResourceType::CRM_IPMC_ENTRY, SAI_OBJECT_TYPE_NULL}, + { CrmResourceType::CRM_SNAT_ENTRY, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_DNAT_ENTRY, SAI_OBJECT_TYPE_NULL }, { CrmResourceType::CRM_MPLS_INSEG, SAI_OBJECT_TYPE_INSEG_ENTRY }, - { CrmResourceType::CRM_MPLS_NEXTHOP, SAI_SWITCH_ATTR_AVAILABLE_IPV4_NEXTHOP_ENTRY }, + { CrmResourceType::CRM_MPLS_NEXTHOP, SAI_OBJECT_TYPE_NEXT_HOP }, { CrmResourceType::CRM_SRV6_MY_SID_ENTRY, SAI_OBJECT_TYPE_MY_SID_ENTRY }, - { CrmResourceType::CRM_SRV6_NEXTHOP, SAI_SWITCH_ATTR_AVAILABLE_IPV6_NEXTHOP_ENTRY }, + { CrmResourceType::CRM_SRV6_NEXTHOP, SAI_OBJECT_TYPE_NEXT_HOP }, { CrmResourceType::CRM_NEXTHOP_GROUP_MAP, SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MAP }, }; +const map crmResAddrFamilyAttrMap = +{ + { CrmResourceType::CRM_IPV4_ROUTE, SAI_ROUTE_ENTRY_ATTR_IP_ADDR_FAMILY }, + { CrmResourceType::CRM_IPV6_ROUTE, SAI_ROUTE_ENTRY_ATTR_IP_ADDR_FAMILY }, + { CrmResourceType::CRM_IPV4_NEIGHBOR, SAI_NEIGHBOR_ENTRY_ATTR_IP_ADDR_FAMILY }, + { CrmResourceType::CRM_IPV6_NEIGHBOR, SAI_NEIGHBOR_ENTRY_ATTR_IP_ADDR_FAMILY }, +}; + +const map crmResAddrFamilyValMap = +{ + { CrmResourceType::CRM_IPV4_ROUTE, SAI_IP_ADDR_FAMILY_IPV4 }, + { CrmResourceType::CRM_IPV6_ROUTE, SAI_IP_ADDR_FAMILY_IPV6 }, + { CrmResourceType::CRM_IPV4_NEIGHBOR, SAI_IP_ADDR_FAMILY_IPV4 }, + { CrmResourceType::CRM_IPV6_NEIGHBOR, SAI_IP_ADDR_FAMILY_IPV6 }, +}; + const map crmThreshTypeResMap = { { "ipv4_route_threshold_type", CrmResourceType::CRM_IPV4_ROUTE }, @@ -325,7 +361,6 @@ void CrmOrch::handleSetCommand(const string& key, const vector& else { SWSS_LOG_ERROR("Failed to parse CRM %s configuration. Unknown attribute %s.\n", key.c_str(), field.c_str()); - return; } } catch (const exception& e) @@ -465,6 +500,74 @@ void CrmOrch::doTask(SelectableTimer &timer) checkCrmThresholds(); } +bool CrmOrch::getResAvailability(CrmResourceType type, CrmResourceEntry &res) +{ + sai_attribute_t attr; + uint64_t availCount = 0; + sai_status_t status = SAI_STATUS_SUCCESS; + + sai_object_type_t objType = crmResSaiObjAttrMap.at(type); + + if (objType != SAI_OBJECT_TYPE_NULL) + { + uint32_t attrCount = 0; + + if ((type == CrmResourceType::CRM_IPV4_ROUTE) || (type == CrmResourceType::CRM_IPV6_ROUTE) || + (type == CrmResourceType::CRM_IPV4_NEIGHBOR) || (type == CrmResourceType::CRM_IPV6_NEIGHBOR)) + { + attr.id = crmResAddrFamilyAttrMap.at(type); + attr.value.s32 = crmResAddrFamilyValMap.at(type); + attrCount = 1; + } + else if (type == CrmResourceType::CRM_MPLS_NEXTHOP) + { + attr.id = SAI_NEXT_HOP_ATTR_TYPE; + attr.value.s32 = SAI_NEXT_HOP_TYPE_MPLS; + attrCount = 1; + } + else if (type == CrmResourceType::CRM_SRV6_NEXTHOP) + { + attr.id = SAI_NEXT_HOP_ATTR_TYPE; + attr.value.s32 = SAI_NEXT_HOP_TYPE_SRV6_SIDLIST; + attrCount = 1; + } + + status = sai_object_type_get_availability(gSwitchId, objType, attrCount, &attr, &availCount); + } + + if ((status != SAI_STATUS_SUCCESS) || (objType == SAI_OBJECT_TYPE_NULL)) + { + if (crmResSaiAvailAttrMap.find(type) != crmResSaiAvailAttrMap.end()) + { + attr.id = crmResSaiAvailAttrMap.at(type); + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + } + + if ((status == SAI_STATUS_NOT_SUPPORTED) || + (status == SAI_STATUS_NOT_IMPLEMENTED) || + SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || + SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) + { + // mark unsupported resources + res.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; + SWSS_LOG_NOTICE("CRM resource %s not supported", crmResTypeNameMap.at(type).c_str()); + return false; + } + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get availability counter for %s CRM resourse", crmResTypeNameMap.at(type).c_str()); + return false; + } + + availCount = attr.value.u32; + } + + res.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = static_cast(availCount); + + return true; +} + void CrmOrch::getResAvailableCounters() { SWSS_LOG_ENTER(); @@ -491,33 +594,13 @@ void CrmOrch::getResAvailableCounters() case CrmResourceType::CRM_IPMC_ENTRY: case CrmResourceType::CRM_SNAT_ENTRY: case CrmResourceType::CRM_DNAT_ENTRY: + case CrmResourceType::CRM_MPLS_INSEG: + case CrmResourceType::CRM_NEXTHOP_GROUP_MAP: + case CrmResourceType::CRM_SRV6_MY_SID_ENTRY: + case CrmResourceType::CRM_MPLS_NEXTHOP: + case CrmResourceType::CRM_SRV6_NEXTHOP: { - sai_attribute_t attr; - attr.id = crmResSaiAvailAttrMap.at(res.first); - - sai_status_t status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); - if (status != SAI_STATUS_SUCCESS) - { - if ((status == SAI_STATUS_NOT_SUPPORTED) || - (status == SAI_STATUS_NOT_IMPLEMENTED) || - SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || - SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) - { - // mark unsupported resources - res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; - SWSS_LOG_NOTICE("Switch attribute %u not supported", attr.id); - break; - } - SWSS_LOG_ERROR("Failed to get switch attribute %u , rv:%d", attr.id, status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); - if (handle_status != task_process_status::task_success) - { - break; - } - } - - res.second.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = attr.value.u32; - + getResAvailability(res.first, res.second); break; } @@ -579,119 +662,6 @@ void CrmOrch::getResAvailableCounters() break; } - case CrmResourceType::CRM_MPLS_INSEG: - case CrmResourceType::CRM_NEXTHOP_GROUP_MAP: - { - sai_object_type_t objType = static_cast(crmResSaiAvailAttrMap.at(res.first)); - uint64_t availCount = 0; - sai_status_t status = sai_object_type_get_availability(gSwitchId, objType, 0, nullptr, &availCount); - if (status != SAI_STATUS_SUCCESS) - { - if ((status == SAI_STATUS_NOT_SUPPORTED) || - (status == SAI_STATUS_NOT_IMPLEMENTED) || - SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || - SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) - { - // mark unsupported resources - res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; - SWSS_LOG_NOTICE("CRM Resource %s not supported", crmResTypeNameMap.at(res.first).c_str()); - break; - } - SWSS_LOG_ERROR("Failed to get availability for object_type %u , rv:%d", objType, status); - break; - } - - res.second.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = static_cast(availCount); - - break; - } - - case CrmResourceType::CRM_MPLS_NEXTHOP: - { - sai_object_type_t objType = static_cast(crmResSaiAvailAttrMap.at(res.first)); - sai_attribute_t attr; - uint64_t availCount = 0; - - attr.id = SAI_NEXT_HOP_ATTR_TYPE; - attr.value.s32 = SAI_NEXT_HOP_TYPE_MPLS; - sai_status_t status = sai_object_type_get_availability(gSwitchId, objType, 1, &attr, &availCount); - if (status != SAI_STATUS_SUCCESS) - { - if ((status == SAI_STATUS_NOT_SUPPORTED) || - (status == SAI_STATUS_NOT_IMPLEMENTED) || - SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || - SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) - { - // mark unsupported resources - res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; - SWSS_LOG_NOTICE("CRM Resource %s not supported", crmResTypeNameMap.at(res.first).c_str()); - break; - } - SWSS_LOG_ERROR("Failed to get availability for object_type %u , rv:%d", objType, status); - break; - } - - res.second.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = static_cast(availCount); - - break; - } - - case CrmResourceType::CRM_SRV6_MY_SID_ENTRY: - { - sai_object_type_t objType = static_cast(crmResSaiAvailAttrMap.at(res.first)); - uint64_t availCount = 0; - sai_status_t status = sai_object_type_get_availability(gSwitchId, objType, 0, nullptr, &availCount); - if (status != SAI_STATUS_SUCCESS) - { - if ((status == SAI_STATUS_NOT_SUPPORTED) || - (status == SAI_STATUS_NOT_IMPLEMENTED) || - SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || - SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) - { - // mark unsupported resources - res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; - SWSS_LOG_NOTICE("CRM Resource %s not supported", crmResTypeNameMap.at(res.first).c_str()); - break; - } - SWSS_LOG_ERROR("Failed to get availability for object_type %u , rv:%d", objType, status); - break; - } - - res.second.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = static_cast(availCount); - - break; - } - - case CrmResourceType::CRM_SRV6_NEXTHOP: - { - sai_object_type_t objType = static_cast(crmResSaiAvailAttrMap.at(res.first)); - sai_attribute_t attr; - uint64_t availCount = 0; - - attr.id = SAI_NEXT_HOP_ATTR_TYPE; - attr.value.s32 = SAI_NEXT_HOP_TYPE_SRV6_SIDLIST; - sai_status_t status = sai_object_type_get_availability(gSwitchId, objType, 1, &attr, &availCount); - if (status != SAI_STATUS_SUCCESS) - { - if ((status == SAI_STATUS_NOT_SUPPORTED) || - (status == SAI_STATUS_NOT_IMPLEMENTED) || - SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || - SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) - { - // mark unsupported resources - res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; - SWSS_LOG_NOTICE("CRM Resource %s not supported", crmResTypeNameMap.at(res.first).c_str()); - break; - } - SWSS_LOG_ERROR("Failed to get availability for object_type %u , rv:%d", objType, status); - break; - } - - res.second.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = static_cast(availCount); - - break; - } - default: SWSS_LOG_ERROR("Failed to get CRM resource type %u. Unknown resource type.\n", static_cast(res.first)); return; diff --git a/orchagent/crmorch.h b/orchagent/crmorch.h index 345caa2cf6b..f63e2a31c26 100644 --- a/orchagent/crmorch.h +++ b/orchagent/crmorch.h @@ -98,6 +98,7 @@ class CrmOrch : public Orch void doTask(Consumer &consumer); void handleSetCommand(const std::string& key, const std::vector& data); void doTask(swss::SelectableTimer &timer); + bool getResAvailability(CrmResourceType type, CrmResourceEntry &res); void getResAvailableCounters(); void updateCrmCountersTable(); void checkCrmThresholds(); diff --git a/orchagent/macsecorch.cpp b/orchagent/macsecorch.cpp index 36fbb44f896..cb11fb35e53 100644 --- a/orchagent/macsecorch.cpp +++ b/orchagent/macsecorch.cpp @@ -1183,6 +1183,18 @@ bool MACsecOrch::updateMACsecPort(MACsecPort &macsec_port, const TaskArgs &port_ if (get_value(port_attr, "enable_encrypt", alpha_boolean)) { macsec_port.m_enable_encrypt = alpha_boolean.operator bool(); + if (!updateMACsecSCs( + macsec_port, + [&macsec_port, this](MACsecOrch::MACsecSC &macsec_sc) + { + sai_attribute_t attr; + attr.id = SAI_MACSEC_SC_ATTR_ENCRYPTION_ENABLE; + attr.value.booldata = macsec_port.m_enable_encrypt; + return this->updateMACsecAttr(SAI_OBJECT_TYPE_MACSEC_SC, macsec_sc.m_sc_id, attr); + })) + { + return false; + } } if (get_value(port_attr, "send_sci", alpha_boolean)) { @@ -1212,42 +1224,76 @@ bool MACsecOrch::updateMACsecPort(MACsecPort &macsec_port, const TaskArgs &port_ SWSS_LOG_WARN("Unknown Cipher Suite %s", cipher_suite.c_str()); return false; } + if (!updateMACsecSCs( + macsec_port, + [&macsec_port, this](MACsecOrch::MACsecSC &macsec_sc) + { + sai_attribute_t attr; + attr.id = SAI_MACSEC_SC_ATTR_MACSEC_CIPHER_SUITE; + attr.value.s32 = macsec_port.m_cipher_suite; + return this->updateMACsecAttr(SAI_OBJECT_TYPE_MACSEC_SC, macsec_sc.m_sc_id, attr); + })) + { + return false; + } } swss::AlphaBoolean enable = false; if (get_value(port_attr, "enable", enable) && enable.operator bool() != macsec_port.m_enable) { - std::vector macsec_scs; macsec_port.m_enable = enable.operator bool(); - for (auto &sc : macsec_port.m_egress_scs) + if (!updateMACsecSCs( + macsec_port, + [&macsec_port, &recover, this](MACsecOrch::MACsecSC &macsec_sc) + { + // Change the ACL entry action from packet action to MACsec flow + if (macsec_port.m_enable) + { + if (!this->setMACsecFlowActive(macsec_sc.m_entry_id, macsec_sc.m_flow_id, true)) + { + SWSS_LOG_WARN("Cannot change the ACL entry action from packet action to MACsec flow"); + return false; + } + auto entry_id = macsec_sc.m_entry_id; + auto flow_id = macsec_sc.m_flow_id; + recover.add_action([this, entry_id, flow_id]() + { this->setMACsecFlowActive(entry_id, flow_id, false); }); + } + else + { + this->setMACsecFlowActive(macsec_sc.m_entry_id, macsec_sc.m_flow_id, false); + } + return true; + })) { - macsec_scs.push_back(&sc.second); + return false; } - for (auto &sc : macsec_port.m_ingress_scs) + } + + recover.clear(); + return true; +} + +bool MACsecOrch::updateMACsecSCs(MACsecPort &macsec_port, std::function action) +{ + SWSS_LOG_ENTER(); + + auto sc = macsec_port.m_egress_scs.begin(); + while (sc != macsec_port.m_egress_scs.end()) + { + if (!action((sc++)->second)) { - macsec_scs.push_back(&sc.second); + return false; } - for (auto &macsec_sc : macsec_scs) + } + sc = macsec_port.m_ingress_scs.begin(); + while (sc != macsec_port.m_ingress_scs.end()) + { + if (!action((sc++)->second)) { - // Change the ACL entry action from packet action to MACsec flow - if (macsec_port.m_enable) - { - if (!setMACsecFlowActive(macsec_sc->m_entry_id, macsec_sc->m_flow_id, true)) - { - SWSS_LOG_WARN("Cannot change the ACL entry action from packet action to MACsec flow"); - return false; - } - auto entry_id = macsec_sc->m_entry_id; - auto flow_id = macsec_sc->m_flow_id; - recover.add_action([this, entry_id, flow_id]() { this->setMACsecFlowActive(entry_id, flow_id, false); }); - } - else - { - setMACsecFlowActive(macsec_sc->m_entry_id, macsec_sc->m_flow_id, false); - } + return false; } } - recover.clear(); return true; } @@ -1263,17 +1309,21 @@ bool MACsecOrch::deleteMACsecPort( bool result = true; - for (auto &sc : macsec_port.m_egress_scs) + auto sc = macsec_port.m_egress_scs.begin(); + while (sc != macsec_port.m_egress_scs.end()) { - const std::string port_sci = swss::join(':', port_name, sc.first); + const std::string port_sci = swss::join(':', port_name, sc->first); + sc ++; if (deleteMACsecSC(port_sci, SAI_MACSEC_DIRECTION_EGRESS) != task_success) { result &= false; } } - for (auto &sc : macsec_port.m_ingress_scs) + sc = macsec_port.m_ingress_scs.begin(); + while (sc != macsec_port.m_ingress_scs.end()) { - const std::string port_sci = swss::join(':', port_name, sc.first); + const std::string port_sci = swss::join(':', port_name, sc->first); + sc ++; if (deleteMACsecSC(port_sci, SAI_MACSEC_DIRECTION_INGRESS) != task_success) { result &= false; @@ -1661,9 +1711,11 @@ task_process_status MACsecOrch::deleteMACsecSC( auto result = task_success; - for (auto &sa : ctx.get_macsec_sc()->m_sa_ids) + auto sa = ctx.get_macsec_sc()->m_sa_ids.begin(); + while (sa != ctx.get_macsec_sc()->m_sa_ids.end()) { - const std::string port_sci_an = swss::join(':', port_sci, sa.first); + const std::string port_sci_an = swss::join(':', port_sci, sa->first); + sa ++; deleteMACsecSA(port_sci_an, direction); } @@ -1721,6 +1773,42 @@ bool MACsecOrch::deleteMACsecSC(sai_object_id_t sc_id) return true; } +bool MACsecOrch::updateMACsecAttr(sai_object_type_t object_type, sai_object_id_t object_id, const sai_attribute_t &attr) +{ + SWSS_LOG_ENTER(); + + sai_status_t status = SAI_STATUS_SUCCESS; + + if (object_type == SAI_OBJECT_TYPE_MACSEC_PORT) + { + status = sai_macsec_api->set_macsec_port_attribute(object_id, &attr); + } + else if (object_type == SAI_OBJECT_TYPE_MACSEC_SC) + { + status = sai_macsec_api->set_macsec_sc_attribute(object_id, &attr); + } + else if (object_type == SAI_OBJECT_TYPE_MACSEC_SA) + { + status = sai_macsec_api->set_macsec_sa_attribute(object_id, &attr); + } + else + { + SWSS_LOG_ERROR("Wrong type %s", sai_serialize_object_type(object_type).c_str()); + return false; + } + + if (status != SAI_STATUS_SUCCESS) + { + task_process_status handle_status = handleSaiSetStatus(SAI_API_MACSEC, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + return true; +} + task_process_status MACsecOrch::createMACsecSA( const std::string &port_sci_an, const TaskArgs &sa_attr, diff --git a/orchagent/macsecorch.h b/orchagent/macsecorch.h index 8856347118a..6702c75cf61 100644 --- a/orchagent/macsecorch.h +++ b/orchagent/macsecorch.h @@ -132,6 +132,7 @@ class MACsecOrch : public Orch sai_object_id_t switch_id, sai_macsec_direction_t direction); bool updateMACsecPort(MACsecPort &macsec_port, const TaskArgs & port_attr); + bool updateMACsecSCs(MACsecPort &macsec_port, std::function action); bool deleteMACsecPort( const MACsecPort &macsec_port, const std::string &port_name, @@ -179,6 +180,8 @@ class MACsecOrch : public Orch sai_macsec_direction_t direction); bool deleteMACsecSC(sai_object_id_t sc_id); + bool updateMACsecAttr(sai_object_type_t object_type, sai_object_id_t object_id, const sai_attribute_t &attr); + /* MACsec SA */ task_process_status createMACsecSA( const std::string &port_sci_an, diff --git a/orchagent/main.cpp b/orchagent/main.cpp index de96234a2d4..6ab699bcc54 100644 --- a/orchagent/main.cpp +++ b/orchagent/main.cpp @@ -574,6 +574,36 @@ int main(int argc, char **argv) attr.value.u64 = gSwitchId; attrs.push_back(attr); + if (gMySwitchType == "voq" || gMySwitchType == "fabric") + { + /* We set this long timeout in order for orchagent to wait enough time for + * response from syncd. It is needed since switch create takes more time + * than default time to create switch if there are lots of front panel ports + * and systems ports to initialize + */ + + if (gMySwitchType == "voq") + { + attr.value.u64 = (5 * SAI_REDIS_DEFAULT_SYNC_OPERATION_RESPONSE_TIMEOUT); + } + else if (gMySwitchType == "fabric") + { + attr.value.u64 = (10 * SAI_REDIS_DEFAULT_SYNC_OPERATION_RESPONSE_TIMEOUT); + } + + attr.id = SAI_REDIS_SWITCH_ATTR_SYNC_OPERATION_RESPONSE_TIMEOUT; + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Failed to set SAI REDIS response timeout"); + } + else + { + SWSS_LOG_NOTICE("SAI REDIS response timeout set successfully to %" PRIu64 " ", attr.value.u64); + } + } + status = sai_switch_api->create_switch(&gSwitchId, (uint32_t)attrs.size(), attrs.data()); if (status != SAI_STATUS_SUCCESS) { @@ -582,6 +612,22 @@ int main(int argc, char **argv) } SWSS_LOG_NOTICE("Create a switch, id:%" PRIu64, gSwitchId); + if (gMySwitchType == "voq" || gMySwitchType == "fabric") + { + /* Set syncd response timeout back to the default value */ + attr.id = SAI_REDIS_SWITCH_ATTR_SYNC_OPERATION_RESPONSE_TIMEOUT; + attr.value.u64 = SAI_REDIS_DEFAULT_SYNC_OPERATION_RESPONSE_TIMEOUT; + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Failed to set SAI REDIS response timeout to default"); + } + else + { + SWSS_LOG_NOTICE("SAI REDIS response timeout set successfully to default: %" PRIu64 " ", attr.value.u64); + } + } if (gMySwitchType != "fabric") { diff --git a/orchagent/mplsrouteorch.cpp b/orchagent/mplsrouteorch.cpp index 122bb6e8e16..ef40987a19d 100644 --- a/orchagent/mplsrouteorch.cpp +++ b/orchagent/mplsrouteorch.cpp @@ -598,8 +598,12 @@ bool RouteOrch::addLabelRoute(LabelRouteBulkContext& ctx, const NextHopGroupKey * in m_syncdLabelRoutes, then we need to update the route with a new next hop * (group) id. The old next hop (group) is then not used and the reference * count will decrease by 1. + * + * In case the entry is already pending removal in the bulk, it would be removed + * from m_syncdLabelRoutes during the bulk call. Therefore, such entries need to be + * re-created rather than set attribute. */ - if (it_route == m_syncdLabelRoutes.at(vrf_id).end()) + if (it_route == m_syncdLabelRoutes.at(vrf_id).end() || gLabelRouteBulker.bulk_entry_pending_removal(inseg_entry)) { vector inseg_attrs; if (blackhole) diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index c1eba4c0e34..42bf0643679 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -685,19 +685,6 @@ void NeighOrch::doTask(Consumer &consumer) IpAddress ip_address(key.substr(found+1)); - /* Verify Ipv4 LinkLocal and skip neighbor entry added for RFC5549 */ - if ((ip_address.getAddrScope() == IpAddress::LINK_SCOPE) && (ip_address.isV4())) - { - /* Check if this prefix is not a configured ip, if so allow */ - IpPrefix ipll_prefix(ip_address.getV4Addr(), 16); - if (!m_intfsOrch->isPrefixSubnet (ipll_prefix, alias)) - { - SWSS_LOG_NOTICE("Skip IPv4LL neighbor %s, Intf:%s op: %s ", ip_address.to_string().c_str(), alias.c_str(), op.c_str()); - it = consumer.m_toSync.erase(it); - continue; - } - } - NeighborEntry neighbor_entry = { ip_address, alias }; if (op == SET_COMMAND) @@ -807,6 +794,18 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress memcpy(neighbor_attr.value.mac, macAddress.getMac(), 6); neighbor_attrs.push_back(neighbor_attr); + if ((ip_address.getAddrScope() == IpAddress::LINK_SCOPE) && (ip_address.isV4())) + { + /* Check if this prefix is a configured ip, if not allow */ + IpPrefix ipll_prefix(ip_address.getV4Addr(), 16); + if (!m_intfsOrch->isPrefixSubnet (ipll_prefix, alias)) + { + neighbor_attr.id = SAI_NEIGHBOR_ENTRY_ATTR_NO_HOST_ROUTE; + neighbor_attr.value.booldata = 1; + neighbor_attrs.push_back(neighbor_attr); + } + } + MuxOrch* mux_orch = gDirectory.get(); bool hw_config = isHwConfigured(neighborEntry); @@ -1541,10 +1540,6 @@ bool NeighOrch::addVoqEncapIndex(string &alias, IpAddress &ip, vector gDirectory; +extern PortsOrch* gPortsOrch; +extern sai_object_id_t gSwitchId; +extern sai_object_id_t gUnderlayIfId; +extern sai_object_id_t gVirtualRouterId; +extern sai_tunnel_api_t *sai_tunnel_api; + +static const std::vector nvgreMapTypes = { + MAP_T_VLAN, + MAP_T_BRIDGE +}; + +static const std::map nvgreEncapTunnelMap = { + { MAP_T_VLAN, SAI_TUNNEL_MAP_TYPE_VLAN_ID_TO_VSID }, + { MAP_T_BRIDGE, SAI_TUNNEL_MAP_TYPE_BRIDGE_IF_TO_VSID } +}; + +static inline sai_tunnel_map_type_t get_encap_nvgre_mapper(map_type_t map) +{ + return nvgreEncapTunnelMap.at(map); +} + +static const std::map nvgreDecapTunnelMap = { + { MAP_T_VLAN, SAI_TUNNEL_MAP_TYPE_VSID_TO_VLAN_ID }, + { MAP_T_BRIDGE, SAI_TUNNEL_MAP_TYPE_VSID_TO_BRIDGE_IF } +}; + +static inline sai_tunnel_map_type_t get_decap_nvgre_mapper(map_type_t map) +{ + return nvgreDecapTunnelMap.at(map); +} + +static const map> nvgreEncapTunnelMapKeyVal = +{ + { MAP_T_VLAN, + { SAI_TUNNEL_MAP_ENTRY_ATTR_VLAN_ID_KEY, SAI_TUNNEL_MAP_ENTRY_ATTR_VSID_ID_VALUE } + }, + { MAP_T_BRIDGE, + { SAI_TUNNEL_MAP_ENTRY_ATTR_BRIDGE_ID_KEY, SAI_TUNNEL_MAP_ENTRY_ATTR_VSID_ID_VALUE } + } +}; + +static inline sai_tunnel_map_entry_attr_t get_encap_nvgre_map_key(map_type_t map) +{ + return nvgreEncapTunnelMapKeyVal.at(map).first; +} + +static inline sai_tunnel_map_entry_attr_t get_encap_nvgre_map_val(map_type_t map) +{ + return nvgreEncapTunnelMapKeyVal.at(map).second; +} + +static const map> nvgreDecapTunnelMapKeyVal = +{ + { MAP_T_VLAN, + { SAI_TUNNEL_MAP_ENTRY_ATTR_VSID_ID_KEY, SAI_TUNNEL_MAP_ENTRY_ATTR_VLAN_ID_VALUE } + }, + { MAP_T_BRIDGE, + { SAI_TUNNEL_MAP_ENTRY_ATTR_VSID_ID_KEY, SAI_TUNNEL_MAP_ENTRY_ATTR_BRIDGE_ID_VALUE } + } +}; + +static inline sai_tunnel_map_entry_attr_t get_decap_nvgre_map_key(map_type_t map) +{ + return nvgreDecapTunnelMapKeyVal.at(map).first; +} + +static inline sai_tunnel_map_entry_attr_t get_decap_nvgre_map_val(map_type_t map) +{ + return nvgreDecapTunnelMapKeyVal.at(map).second; +} + +/** @brief Creates tunnel mapper in SAI. + * + * @param sai_tunnel_map_type SAI tunnel map type e.g. VSID_TO_VLAN + * + * @return Tunnel map SAI identifier. + */ +sai_object_id_t NvgreTunnel::sai_create_tunnel_map(sai_tunnel_map_type_t sai_tunnel_map_type) +{ + sai_attribute_t attr; + std::vector tunnel_map_attrs; + + attr.id = SAI_TUNNEL_MAP_ATTR_TYPE; + attr.value.u32 = sai_tunnel_map_type; + + tunnel_map_attrs.push_back(attr); + + sai_object_id_t tunnel_map_id; + sai_status_t status = sai_tunnel_api->create_tunnel_map( + &tunnel_map_id, + gSwitchId, + static_cast(tunnel_map_attrs.size()), + tunnel_map_attrs.data() + ); + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't create the NVGRE tunnel map object"); + } + + return tunnel_map_id; +} + +/** @brief Removes tunnel mapper in SAI. + * + * @param sai_tunnel_map_type SAI tunnel map identifier. + * + * @return void. + */ +void NvgreTunnel::sai_remove_tunnel_map(sai_object_id_t tunnel_map_id) +{ + sai_status_t status = sai_tunnel_api->remove_tunnel_map(tunnel_map_id); + + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't remove the NVGRE tunnel map object"); + } +} + + +/** @brief Creates tunnel in SAI. + * + * @param ids Pointer to structure where stored tunnel and tunnel mappers identifiers. + * @param src_ip Pointer to source IP address. + * + * @return SAI tunnel identifier. + */ +sai_object_id_t NvgreTunnel::sai_create_tunnel(struct tunnel_sai_ids_t &ids, const sai_ip_address_t &src_ip, sai_object_id_t underlay_rif) +{ + sai_attribute_t attr; + std::vector tunnel_attrs; + + attr.id = SAI_TUNNEL_ATTR_TYPE; + attr.value.s32 = SAI_TUNNEL_TYPE_NVGRE; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE; + attr.value.oid = underlay_rif; + tunnel_attrs.push_back(attr); + + sai_object_id_t decap_map_list[MAP_T_MAX]; + uint8_t num_decap_map = 0; + + for (auto map_type : nvgreMapTypes) + { + decap_map_list[num_decap_map] = ids.tunnel_decap_id.at(map_type); + num_decap_map++; + } + + attr.id = SAI_TUNNEL_ATTR_DECAP_MAPPERS; + attr.value.objlist.count = num_decap_map; + attr.value.objlist.list = decap_map_list; + tunnel_attrs.push_back(attr); + + sai_object_id_t encap_map_list[MAP_T_MAX]; + uint8_t num_encap_map = 0; + + for (auto map_type : nvgreMapTypes) + { + encap_map_list[num_encap_map] = ids.tunnel_encap_id.at(map_type); + num_encap_map++; + } + + attr.id = SAI_TUNNEL_ATTR_ENCAP_MAPPERS; + attr.value.objlist.count = num_encap_map; + attr.value.objlist.list = encap_map_list; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_ATTR_ENCAP_SRC_IP; + attr.value.ipaddr = src_ip; + tunnel_attrs.push_back(attr); + + sai_object_id_t tunnel_id; + sai_status_t status = sai_tunnel_api->create_tunnel( + &tunnel_id, + gSwitchId, + static_cast(tunnel_attrs.size()), + tunnel_attrs.data() + ); + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't create the NVGRE tunnel object"); + } + + return tunnel_id; +} + +/** @brief Removes tunnel in SAI. + * + * @param tunnel_id Pointer to tunnel identifier. + * + * @return void. + */ +void NvgreTunnel::sai_remove_tunnel(sai_object_id_t tunnel_id) +{ + sai_status_t status = sai_tunnel_api->remove_tunnel(tunnel_id); + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't remove the NVGRE tunnel object"); + } +} + +/** @brief Creates tunnel termination in SAI. + * + * @param tunnel_id Tunnel identifier. + * @param src_ip Pointer to source IP address. + * @param default_vrid Virtual router identifier. + * + * @return SAI tunnel termination identifier. + */ +sai_object_id_t NvgreTunnel::sai_create_tunnel_termination(sai_object_id_t tunnel_id, const sai_ip_address_t &src_ip, sai_object_id_t default_vrid) +{ + sai_attribute_t attr; + std::vector tunnel_attrs; + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE; + attr.value.s32 = SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID; + attr.value.oid = default_vrid; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP; + attr.value.ipaddr = src_ip; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE; + attr.value.s32 = SAI_TUNNEL_TYPE_NVGRE; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID; + attr.value.oid = tunnel_id; + tunnel_attrs.push_back(attr); + + sai_object_id_t term_table_id; + sai_status_t status = sai_tunnel_api->create_tunnel_term_table_entry( + &term_table_id, + gSwitchId, + static_cast(tunnel_attrs.size()), + tunnel_attrs.data() + ); + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't create a tunnel term table object"); + } + + return term_table_id; +} + +/** @brief Removes tunnel termination in SAI. + * + * @param tunnel_id Pointer to tunnel termination identifier. + * + * @return void. + */ +void NvgreTunnel::sai_remove_tunnel_termination(sai_object_id_t tunnel_term_id) +{ + sai_status_t status = sai_tunnel_api->remove_tunnel_term_table_entry(tunnel_term_id); + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't remove a tunnel term object"); + } +} + +void NvgreTunnel::createNvgreMappers() +{ + for (auto map_type : nvgreMapTypes) + { + tunnel_ids_.tunnel_encap_id.insert( + make_pair(map_type, sai_create_tunnel_map(get_encap_nvgre_mapper(map_type))) + ); + } + + for (auto map_type : nvgreMapTypes) + { + tunnel_ids_.tunnel_decap_id.insert( + make_pair(map_type, sai_create_tunnel_map(get_decap_nvgre_mapper(map_type))) + ); + } +} + +void NvgreTunnel::removeNvgreMappers() +{ + for (auto map_type : nvgreMapTypes) + { + sai_remove_tunnel_map(getEncapMapId(map_type)); + } + + for (auto map_type : nvgreMapTypes) + { + sai_remove_tunnel_map(getDecapMapId(map_type)); + } + + tunnel_ids_.tunnel_encap_id.clear(); + tunnel_ids_.tunnel_decap_id.clear(); +} + +void NvgreTunnel::createNvgreTunnel() +{ + sai_ip_address_t ip_addr; + swss::copy(ip_addr, src_ip_); + + tunnel_ids_.tunnel_id = sai_create_tunnel(tunnel_ids_, ip_addr, gUnderlayIfId); + tunnel_ids_.tunnel_term_id = sai_create_tunnel_termination(tunnel_ids_.tunnel_id, ip_addr, gVirtualRouterId); + + SWSS_LOG_INFO("NVGRE tunnel '%s' was created", tunnel_name_.c_str()); +} + +void NvgreTunnel::removeNvgreTunnel() +{ + try + { + sai_remove_tunnel_termination(tunnel_ids_.tunnel_term_id); + sai_remove_tunnel(tunnel_ids_.tunnel_id); + } + catch(const std::runtime_error& error) + { + SWSS_LOG_ERROR("Error while removing tunnel entry. Tunnel: %s. Error: %s", tunnel_name_.c_str(), error.what()); + } + + SWSS_LOG_INFO("NVGRE tunnel '%s' was removed", tunnel_name_.c_str()); + + tunnel_ids_.tunnel_id = SAI_NULL_OBJECT_ID; + tunnel_ids_.tunnel_term_id = SAI_NULL_OBJECT_ID; +} + +NvgreTunnel::NvgreTunnel(std::string tunnelName, IpAddress srcIp) : + tunnel_name_(tunnelName), + src_ip_(srcIp) +{ + createNvgreMappers(); + createNvgreTunnel(); +} + +NvgreTunnel::~NvgreTunnel() +{ + removeNvgreTunnel(); + removeNvgreMappers(); +} + +bool NvgreTunnelOrch::addOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + + auto src_ip = request.getAttrIP("src_ip"); + const auto& tunnel_name = request.getKeyString(0); + + if (isTunnelExists(tunnel_name)) + { + SWSS_LOG_WARN("NVGRE tunnel '%s' already exists", tunnel_name.c_str()); + return true; + } + + nvgre_tunnel_table_[tunnel_name] = std::unique_ptr(new NvgreTunnel(tunnel_name, src_ip)); + + return true; +} + +bool NvgreTunnelOrch::delOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + + const auto& tunnel_name = request.getKeyString(0); + + if (!isTunnelExists(tunnel_name)) + { + SWSS_LOG_ERROR("NVGRE tunnel '%s' doesn't exist", tunnel_name.c_str()); + return true; + } + + nvgre_tunnel_table_.erase(tunnel_name); + + SWSS_LOG_INFO("NVGRE tunnel '%s' was removed", tunnel_name.c_str()); + + return true; +} + +/** @brief Creates tunnel map entry in SAI. + * + * @param map_type map type - VLAN or BRIDGE. + * @param vsid Virtual Subnet ID value. + * @param vlan_id VLAN ID value. + * @param bridge_obj_id SAI bridge object. + * @param encap encapsulation flag. + * + * @return SAI tunnel map entry ID. + */ +sai_object_id_t NvgreTunnel::sai_create_tunnel_map_entry( + map_type_t map_type, + sai_uint32_t vsid, + sai_vlan_id_t vlan_id, + sai_object_id_t bridge_obj_id, + bool encap) +{ + sai_attribute_t attr; + sai_object_id_t tunnel_map_entry_id; + std::vector tunnel_map_entry_attrs; + + attr.id = SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE; + attr.value.u32 = (encap) ? get_encap_nvgre_mapper(map_type) : get_decap_nvgre_mapper(map_type); + tunnel_map_entry_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP; + attr.value.oid = (encap) ? getEncapMapId(map_type) : getDecapMapId(map_type); + tunnel_map_entry_attrs.push_back(attr); + + attr.id = (encap) ? get_encap_nvgre_map_key(map_type) : get_decap_nvgre_map_val(map_type); + if (bridge_obj_id != SAI_NULL_OBJECT_ID) + { + attr.value.oid = bridge_obj_id; + } + else + { + attr.value.u16 = vlan_id; + } + + tunnel_map_entry_attrs.push_back(attr); + + attr.id = (encap) ? get_encap_nvgre_map_val(map_type) : get_decap_nvgre_map_key(map_type); + attr.value.u32 = vsid; + tunnel_map_entry_attrs.push_back(attr); + + sai_status_t status = sai_tunnel_api->create_tunnel_map_entry(&tunnel_map_entry_id, gSwitchId, + static_cast (tunnel_map_entry_attrs.size()), + tunnel_map_entry_attrs.data()); + + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't create the NVGRE tunnel map entry object"); + } + + return tunnel_map_entry_id; +} + + +bool NvgreTunnel::addDecapMapperEntry( + map_type_t map_type, + uint32_t vsid, + sai_vlan_id_t vlan_id, + std::string tunnel_map_entry_name, + sai_object_id_t bridge_obj) +{ + auto tunnel_map_entry_id = sai_create_tunnel_map_entry(map_type, vsid, vlan_id, bridge_obj); + + nvgre_tunnel_map_table_[tunnel_map_entry_name].map_entry_id = tunnel_map_entry_id; + nvgre_tunnel_map_table_[tunnel_map_entry_name].vlan_id = vlan_id; + nvgre_tunnel_map_table_[tunnel_map_entry_name].vsid = vsid; + + SWSS_LOG_INFO("NVGRE decap tunnel map entry '%s' for tunnel '%s' was created", + tunnel_map_entry_name.c_str(), tunnel_name_.c_str()); + + return true; +} + +bool NvgreTunnelMapOrch::addOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + + auto tunnel_name = request.getKeyString(0); + NvgreTunnelOrch* tunnel_orch = gDirectory.get(); + + if (!tunnel_orch->isTunnelExists(tunnel_name)) + { + SWSS_LOG_WARN("NVGRE tunnel '%s' doesn't exist", tunnel_name.c_str()); + return true; + } + + auto tunnel_obj = tunnel_orch->getNvgreTunnel(tunnel_name); + const auto full_tunnel_map_entry_name = request.getFullKey(); + + if (tunnel_obj->isTunnelMapExists(full_tunnel_map_entry_name)) + { + SWSS_LOG_WARN("NVGRE tunnel map '%s' already exist", full_tunnel_map_entry_name.c_str()); + return true; + } + + sai_vlan_id_t vlan_id = (sai_vlan_id_t) request.getAttrVlan("vlan_id"); + Port port; + + if (!gPortsOrch->getVlanByVlanId(vlan_id, port)) + { + SWSS_LOG_WARN("VLAN ID doesn't exist: %d", vlan_id); + return true; + } + + auto vsid = static_cast(request.getAttrUint("vsid")); + if (vsid > NVGRE_VSID_MAX_VALUE) + { + SWSS_LOG_WARN("VSID is invalid: %d", vsid); + return true; + } + + if (!tunnel_obj->addDecapMapperEntry(MAP_T_VLAN, vsid, vlan_id, full_tunnel_map_entry_name)) + { + return true; + } + + return true; +} + +/** @brief Removes tunnel map entry in SAI. + * + * @param obj_id SAI tunnel map identifier. + * + * @return void. + */ +void NvgreTunnel::sai_remove_tunnel_map_entry(sai_object_id_t obj_id) +{ + sai_status_t status = SAI_STATUS_SUCCESS; + + if (obj_id != SAI_NULL_OBJECT_ID) + { + status = sai_tunnel_api->remove_tunnel_map_entry(obj_id); + } + + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't delete the NVGRE tunnel map entry object"); + } +} + +bool NvgreTunnel::delMapperEntry(std::string tunnel_map_entry_name) +{ + auto tunnel_map_entry_id = getMapEntryId(tunnel_map_entry_name); + + try + { + sai_remove_tunnel_map_entry(tunnel_map_entry_id); + } + catch (const std::runtime_error& error) + { + SWSS_LOG_ERROR("Error while removing decap tunnel map %s: %s", + tunnel_map_entry_name.c_str(), error.what()); + return false; + } + + nvgre_tunnel_map_table_.erase(tunnel_map_entry_name); + + SWSS_LOG_INFO("NVGRE tunnel map entry '%s' for tunnel '%s' was removed", + tunnel_map_entry_name.c_str(), tunnel_name_.c_str()); + + return true; +} + +bool NvgreTunnelMapOrch::delOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + + const auto& tunnel_name = request.getKeyString(0); + NvgreTunnelOrch* tunnel_orch = gDirectory.get(); + auto tunnel_obj = tunnel_orch->getNvgreTunnel(tunnel_name); + const auto& full_tunnel_map_entry_name = request.getFullKey(); + + if (!tunnel_orch->isTunnelExists(tunnel_name)) + { + SWSS_LOG_WARN("NVGRE tunnel '%s' does not exist", tunnel_name.c_str()); + return true; + } + + if (!tunnel_obj->isTunnelMapExists(full_tunnel_map_entry_name)) + { + SWSS_LOG_WARN("NVGRE tunnel map '%s' does not exist", + full_tunnel_map_entry_name.c_str()); + return true; + } + + if (!tunnel_obj->delMapperEntry(full_tunnel_map_entry_name)) + { + return true; + } + + return true; +} diff --git a/orchagent/nvgreorch.h b/orchagent/nvgreorch.h new file mode 100644 index 00000000000..82092565ac7 --- /dev/null +++ b/orchagent/nvgreorch.h @@ -0,0 +1,167 @@ +#pragma once + +#include + +#include "sai.h" +#include "orch.h" +#include "request_parser.h" +#include "portsorch.h" + +typedef enum { + MAP_T_VLAN = 0, + MAP_T_BRIDGE = 1, + MAP_T_MAX = 2 +} map_type_t; + +struct tunnel_sai_ids_t +{ + std::map tunnel_encap_id; + std::map tunnel_decap_id; + sai_object_id_t tunnel_id; + sai_object_id_t tunnel_term_id; +}; + +typedef struct nvgre_tunnel_map_entry_s +{ + sai_object_id_t map_entry_id; + sai_vlan_id_t vlan_id; + uint32_t vsid; +} nvgre_tunnel_map_entry_t; + +const request_description_t nvgre_tunnel_request_description = { + { REQ_T_STRING }, + { + { "src_ip", REQ_T_IP }, + }, + { "src_ip" } +}; + +typedef std::map NvgreTunnelMapTable; + +class NvgreTunnel +{ +public: + NvgreTunnel(std::string tunnelName, IpAddress srcIp); + ~NvgreTunnel(); + + bool isTunnelMapExists(const std::string& name) const + { + return nvgre_tunnel_map_table_.find(name) != std::end(nvgre_tunnel_map_table_); + } + + sai_object_id_t getDecapMapId(map_type_t type) const + { + return tunnel_ids_.tunnel_decap_id.at(type); + } + + sai_object_id_t getEncapMapId(map_type_t type) const + { + return tunnel_ids_.tunnel_encap_id.at(type); + } + + sai_object_id_t getMapEntryId(std::string tunnel_map_entry_name) + { + return nvgre_tunnel_map_table_.at(tunnel_map_entry_name).map_entry_id; + } + + sai_object_id_t getMapEntryVlanId(std::string tunnel_map_entry_name) + { + return nvgre_tunnel_map_table_.at(tunnel_map_entry_name).vlan_id; + } + + sai_object_id_t getMapEntryVsid(std::string tunnel_map_entry_name) + { + return nvgre_tunnel_map_table_.at(tunnel_map_entry_name).vsid; + } + + bool addDecapMapperEntry(map_type_t map_type, uint32_t vsid, sai_vlan_id_t vlan_id, std::string tunnel_map_entry_name, sai_object_id_t bridge_obj=SAI_NULL_OBJECT_ID); + + bool delMapperEntry(std::string tunnel_map_entry_name); + +private: + void createNvgreMappers(); + void removeNvgreMappers(); + + void createNvgreTunnel(); + void removeNvgreTunnel(); + + sai_object_id_t sai_create_tunnel_map(sai_tunnel_map_type_t sai_tunnel_map_type); + void sai_remove_tunnel_map(sai_object_id_t tunnel_map_id); + + sai_object_id_t sai_create_tunnel(struct tunnel_sai_ids_t &ids, const sai_ip_address_t &src_ip, sai_object_id_t underlay_rif); + void sai_remove_tunnel(sai_object_id_t tunnel_id); + + sai_object_id_t sai_create_tunnel_termination(sai_object_id_t tunnel_id, const sai_ip_address_t &src_ip, sai_object_id_t default_vrid); + void sai_remove_tunnel_termination(sai_object_id_t tunnel_term_id); + + sai_object_id_t sai_create_tunnel_map_entry(map_type_t map_type, sai_uint32_t vsid, sai_vlan_id_t vlan_id, sai_object_id_t bridge_obj_id, bool encap=false); + void sai_remove_tunnel_map_entry(sai_object_id_t obj_id); + + std::string tunnel_name_; + IpAddress src_ip_; + tunnel_sai_ids_t tunnel_ids_; + + NvgreTunnelMapTable nvgre_tunnel_map_table_; +}; + +typedef std::map> NvgreTunnelTable; + +class NvgreTunnelRequest : public Request +{ +public: + NvgreTunnelRequest() : Request(nvgre_tunnel_request_description, '|') { } +}; + +class NvgreTunnelOrch : public Orch2 +{ +public: + NvgreTunnelOrch(DBConnector *db, const std::string& tableName) : + Orch2(db, tableName, request_) + { } + + bool isTunnelExists(const std::string& tunnelName) const + { + return nvgre_tunnel_table_.find(tunnelName) != std::end(nvgre_tunnel_table_); + } + + NvgreTunnel* getNvgreTunnel(const std::string& tunnelName) + { + return nvgre_tunnel_table_.at(tunnelName).get(); + } + +private: + virtual bool addOperation(const Request& request); + virtual bool delOperation(const Request& request); + + NvgreTunnelRequest request_; + NvgreTunnelTable nvgre_tunnel_table_; +}; + +const request_description_t nvgre_tunnel_map_request_description = { + { REQ_T_STRING, REQ_T_STRING }, + { + { "vsid", REQ_T_UINT }, + { "vlan_id", REQ_T_VLAN }, + }, + { "vsid", "vlan_id" } +}; + +class NvgreTunnelMapRequest : public Request +{ +public: + NvgreTunnelMapRequest() : Request(nvgre_tunnel_map_request_description, '|') { } +}; + +class NvgreTunnelMapOrch : public Orch2 +{ +public: + NvgreTunnelMapOrch(DBConnector *db, const std::string& tableName) : + Orch2(db, tableName, request_) + {} + +private: + virtual bool addOperation(const Request& request); + virtual bool delOperation(const Request& request); + + NvgreTunnelMapRequest request_; +}; \ No newline at end of file diff --git a/orchagent/orch.cpp b/orchagent/orch.cpp index 0992e329a43..a9c5c9afcb9 100644 --- a/orchagent/orch.cpp +++ b/orchagent/orch.cpp @@ -410,7 +410,8 @@ void Orch::removeMeFromObjsReferencedByMe( const string &table, const string &obj_name, const string &field, - const string &old_referenced_obj_name) + const string &old_referenced_obj_name, + bool remove_field) { vector objects = tokenize(old_referenced_obj_name, list_item_delimiter); for (auto &obj : objects) @@ -426,6 +427,12 @@ void Orch::removeMeFromObjsReferencedByMe( referenced_table.c_str(), ref_obj_name.c_str(), to_string(old_referenced_obj.m_objsDependingOnMe.size()).c_str()); } + + if (remove_field) + { + auto &referencing_object = (*type_maps[table])[obj_name]; + referencing_object.m_objsReferencingByMe.erase(field); + } } void Orch::setObjectReference( @@ -439,7 +446,7 @@ void Orch::setObjectReference( auto field_ref = obj.m_objsReferencingByMe.find(field); if (field_ref != obj.m_objsReferencingByMe.end()) - removeMeFromObjsReferencedByMe(type_maps, table, obj_name, field, field_ref->second); + removeMeFromObjsReferencedByMe(type_maps, table, obj_name, field, field_ref->second, false); obj.m_objsReferencingByMe[field] = referenced_obj; @@ -459,16 +466,44 @@ void Orch::setObjectReference( } } +bool Orch::doesObjectExist( + type_map &type_maps, + const string &table, + const string &obj_name, + const string &field, + string &referenced_obj) +{ + auto &&searchRef = (*type_maps[table]).find(obj_name); + if (searchRef != (*type_maps[table]).end()) + { + auto &obj = searchRef->second; + auto &&searchReferencingObjectRef = obj.m_objsReferencingByMe.find(field); + if (searchReferencingObjectRef != obj.m_objsReferencingByMe.end()) + { + referenced_obj = searchReferencingObjectRef->second; + return true; + } + } + + return false; +} + void Orch::removeObject( type_map &type_maps, const string &table, const string &obj_name) { - auto &obj = (*type_maps[table])[obj_name]; + auto &&searchRef = (*type_maps[table]).find(obj_name); + if (searchRef == (*type_maps[table]).end()) + { + return; + } + + auto &obj = searchRef->second; for (auto field_ref : obj.m_objsReferencingByMe) { - removeMeFromObjsReferencedByMe(type_maps, table, obj_name, field_ref.first, field_ref.second); + removeMeFromObjsReferencedByMe(type_maps, table, obj_name, field_ref.first, field_ref.second, false); } // Update the field store @@ -847,7 +882,7 @@ task_process_status Orch::handleSaiCreateStatus(sai_api_t api, sai_status_t stat default: SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); + abort(); } break; case SAI_API_HOSTIF: @@ -865,7 +900,7 @@ task_process_status Orch::handleSaiCreateStatus(sai_api_t api, sai_status_t stat default: SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); + abort(); } default: switch (status) @@ -876,7 +911,7 @@ task_process_status Orch::handleSaiCreateStatus(sai_api_t api, sai_status_t stat default: SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); + abort(); } } return task_need_retry; @@ -917,12 +952,12 @@ task_process_status Orch::handleSaiSetStatus(sai_api_t api, sai_status_t status, default: SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); + abort(); } default: SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); + abort(); } return task_need_retry; @@ -950,7 +985,7 @@ task_process_status Orch::handleSaiRemoveStatus(sai_api_t api, sai_status_t stat default: SWSS_LOG_ERROR("Encountered failure in remove operation, exiting orchagent, SAI API: %s, status: %s", sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); + abort(); } return task_need_retry; } diff --git a/orchagent/orch.h b/orchagent/orch.h index 46a5d446ce9..ea5b4827847 100644 --- a/orchagent/orch.h +++ b/orchagent/orch.h @@ -233,9 +233,11 @@ class Orch bool parseReference(type_map &type_maps, std::string &ref, const std::string &table_name, std::string &object_name); ref_resolve_status resolveFieldRefArray(type_map&, const std::string&, const std::string&, swss::KeyOpFieldsValuesTuple&, std::vector&, std::string&); void setObjectReference(type_map&, const std::string&, const std::string&, const std::string&, const std::string&); + bool doesObjectExist(type_map&, const std::string&, const std::string&, const std::string&, std::string&); void removeObject(type_map&, const std::string&, const std::string&); bool isObjectBeingReferenced(type_map&, const std::string&, const std::string&); std::string objectReferenceInfo(type_map&, const std::string&, const std::string&); + void removeMeFromObjsReferencedByMe(type_map &type_maps, const std::string &table, const std::string &obj_name, const std::string &field, const std::string &old_referenced_obj_name, bool remove_field=true); /* Note: consumer will be owned by this class */ void addExecutor(Executor* executor); @@ -250,7 +252,6 @@ class Orch ResponsePublisher m_publisher; private: - void removeMeFromObjsReferencedByMe(type_map &type_maps, const std::string &table, const std::string &obj_name, const std::string &field, const std::string &old_referenced_obj_name); void addConsumer(swss::DBConnector *db, std::string tableName, int pri = default_orch_pri); }; diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index 8329a68f8c2..f59c5c39022 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -41,6 +41,7 @@ PbhOrch *gPbhOrch; MirrorOrch *gMirrorOrch; CrmOrch *gCrmOrch; BufferOrch *gBufferOrch; +QosOrch *gQosOrch; SwitchOrch *gSwitchOrch; Directory gDirectory; NatOrch *gNatOrch; @@ -49,6 +50,7 @@ IsoGrpOrch *gIsoGrpOrch; MACsecOrch *gMacsecOrch; DebugCounterOrch *gDebugCounterOrch; CoppOrch *gCoppOrch; +P4Orch *gP4Orch; BfdOrch *gBfdOrch; Srv6Orch *gSrv6Orch; @@ -92,6 +94,9 @@ bool OrchDaemon::init() SWSS_LOG_ENTER(); string platform = getenv("platform") ? getenv("platform") : ""; + + gCrmOrch = new CrmOrch(m_configDb, CFG_CRM_TABLE_NAME); + TableConnector stateDbSwitchTable(m_stateDb, "SWITCH_CAPABILITY"); TableConnector app_switch_table(m_applDb, APP_SWITCH_TABLE_NAME); TableConnector conf_asic_sensors(m_configDb, CFG_ASIC_SENSORS_TABLE_NAME); @@ -119,7 +124,6 @@ bool OrchDaemon::init() { APP_MCLAG_FDB_TABLE_NAME, FdbOrch::fdborch_pri} }; - gCrmOrch = new CrmOrch(m_configDb, CFG_CRM_TABLE_NAME); gPortsOrch = new PortsOrch(m_applDb, m_stateDb, ports_tables, m_chassisAppDb); TableConnector stateDbFdb(m_stateDb, STATE_FDB_TABLE_NAME); TableConnector stateMclagDbFdb(m_stateDb, STATE_MCLAG_REMOTE_FDB_TABLE_NAME); @@ -198,6 +202,10 @@ bool OrchDaemon::init() EvpnNvoOrch* evpn_nvo_orch = new EvpnNvoOrch(m_applDb, APP_VXLAN_EVPN_NVO_TABLE_NAME); gDirectory.set(evpn_nvo_orch); + NvgreTunnelOrch *nvgre_tunnel_orch = new NvgreTunnelOrch(m_configDb, CFG_NVGRE_TUNNEL_TABLE_NAME); + gDirectory.set(nvgre_tunnel_orch); + NvgreTunnelMapOrch *nvgre_tunnel_map_orch = new NvgreTunnelMapOrch(m_configDb, CFG_NVGRE_TUNNEL_MAP_TABLE_NAME); + gDirectory.set(nvgre_tunnel_map_orch); vector qos_tables = { CFG_TC_TO_QUEUE_MAP_TABLE_NAME, @@ -214,7 +222,7 @@ bool OrchDaemon::init() CFG_DSCP_TO_FC_MAP_TABLE_NAME, CFG_EXP_TO_FC_MAP_TABLE_NAME }; - QosOrch *qos_orch = new QosOrch(m_configDb, qos_tables); + gQosOrch = new QosOrch(m_configDb, qos_tables); vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, @@ -324,7 +332,7 @@ bool OrchDaemon::init() * For cases when Orch has to process tables in specific order, like PortsOrch during warm start, it has to override Orch::doTask() */ - m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, mux_orch, mux_cb_orch, gIntfsOrch, gNeighOrch, gNhgMapOrch, gNhgOrch, gCbfNhgOrch, gRouteOrch, gCoppOrch, qos_orch, wm_orch, policer_orch, tunnel_decap_orch, sflow_orch, gDebugCounterOrch, gMacsecOrch, gBfdOrch, gSrv6Orch}; + m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, mux_orch, mux_cb_orch, gIntfsOrch, gNeighOrch, gNhgMapOrch, gNhgOrch, gCbfNhgOrch, gRouteOrch, gCoppOrch, gQosOrch, wm_orch, policer_orch, tunnel_decap_orch, sflow_orch, gDebugCounterOrch, gMacsecOrch, gBfdOrch, gSrv6Orch}; bool initialize_dtel = false; if (platform == BFN_PLATFORM_SUBSTRING || platform == VS_PLATFORM_SUBSTRING) @@ -423,6 +431,8 @@ bool OrchDaemon::init() m_orchList.push_back(gIsoGrpOrch); m_orchList.push_back(gFgNhgOrch); m_orchList.push_back(mux_st_orch); + m_orchList.push_back(nvgre_tunnel_orch); + m_orchList.push_back(nvgre_tunnel_map_orch); if (m_fabricEnabled) { @@ -447,7 +457,7 @@ bool OrchDaemon::init() CFG_PFC_WD_TABLE_NAME }; - if (platform == MLNX_PLATFORM_SUBSTRING) + if ((platform == MLNX_PLATFORM_SUBSTRING) || (platform == VS_PLATFORM_SUBSTRING)) { static const vector portStatIds = @@ -605,6 +615,10 @@ bool OrchDaemon::init() m_orchList.push_back(&CounterCheckOrch::getInstance(m_configDb)); + vector p4rt_tables = {APP_P4RT_TABLE_NAME}; + gP4Orch = new P4Orch(m_applDb, p4rt_tables, vrf_orch, gCoppOrch); + m_orchList.push_back(gP4Orch); + if (WarmStart::isWarmStart()) { bool suc = warmRestoreAndSyncUp(); @@ -628,7 +642,7 @@ void OrchDaemon::flush() if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to flush redis pipeline %d", status); - exit(EXIT_FAILURE); + abort(); } // check if logroate is requested diff --git a/orchagent/orchdaemon.h b/orchagent/orchdaemon.h index aca8c286970..35e531aa15c 100644 --- a/orchagent/orchdaemon.h +++ b/orchagent/orchdaemon.h @@ -40,8 +40,10 @@ #include "mlagorch.h" #include "muxorch.h" #include "macsecorch.h" +#include "p4orch/p4orch.h" #include "bfdorch.h" #include "srv6orch.h" +#include "nvgreorch.h" using namespace swss; diff --git a/orchagent/p4orch/p4orch.cpp b/orchagent/p4orch/p4orch.cpp index ada1fa2c778..57d50aa5ce2 100644 --- a/orchagent/p4orch/p4orch.cpp +++ b/orchagent/p4orch/p4orch.cpp @@ -171,9 +171,9 @@ void P4Orch::handlePortStatusChangeNotification(const std::string &op, const std { m_wcmpManager->pruneNextHops(port.m_alias); } - - sai_deserialize_free_port_oper_status_ntf(count, port_oper_status); } + + sai_deserialize_free_port_oper_status_ntf(count, port_oper_status); } } diff --git a/orchagent/pfc_detect_barefoot.lua b/orchagent/pfc_detect_barefoot.lua index b270549a29b..c413c5999cd 100644 --- a/orchagent/pfc_detect_barefoot.lua +++ b/orchagent/pfc_detect_barefoot.lua @@ -36,63 +36,68 @@ for i = n, 1, -1 do local queue_index = redis.call('HGET', 'COUNTERS_QUEUE_INDEX_MAP', KEYS[i]) local port_id = redis.call('HGET', 'COUNTERS_QUEUE_PORT_MAP', KEYS[i]) - local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' - local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION' + -- If there is no entry in COUNTERS_QUEUE_INDEX_MAP or COUNTERS_QUEUE_PORT_MAP then + -- it means KEYS[i] queue is inserted into FLEX COUNTER DB but the corresponding + -- maps haven't been updated yet. + if queue_index and port_id then + local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' + local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION' - -- Get all counters - local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') - local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') - local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) - local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) + -- Get all counters + local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') + local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') + local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) + local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) - if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then - occupancy_bytes = tonumber(occupancy_bytes) - packets = tonumber(packets) - pfc_rx_packets = tonumber(pfc_rx_packets) - pfc_duration = tonumber(pfc_duration) + if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then + occupancy_bytes = tonumber(occupancy_bytes) + packets = tonumber(packets) + pfc_rx_packets = tonumber(pfc_rx_packets) + pfc_duration = tonumber(pfc_duration) - local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') - local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') - local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - -- DEBUG CODE START. Uncomment to enable - local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') - -- DEBUG CODE END. + local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') + local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + -- DEBUG CODE START. Uncomment to enable + local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') + -- DEBUG CODE END. - -- If this is not a first run, then we have last values available - if packets_last and pfc_rx_packets_last and pfc_duration_last then - packets_last = tonumber(packets_last) - pfc_rx_packets_last = tonumber(pfc_rx_packets_last) - pfc_duration_last = tonumber(pfc_duration_last) + -- If this is not a first run, then we have last values available + if packets_last and pfc_rx_packets_last and pfc_duration_last then + packets_last = tonumber(packets_last) + pfc_rx_packets_last = tonumber(pfc_rx_packets_last) + pfc_duration_last = tonumber(pfc_duration_last) - -- Check actual condition of queue being in PFC storm - if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or - -- DEBUG CODE START. Uncomment to enable - (debug_storm == "enabled") or - -- DEBUG CODE END. - (occupancy_bytes == 0 and packets - packets_last == 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then - if time_left <= poll_time then - redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') - redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') - is_deadlock = true - time_left = detection_time + -- Check actual condition of queue being in PFC storm + if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or + -- DEBUG CODE START. Uncomment to enable + (debug_storm == "enabled") or + -- DEBUG CODE END. + (occupancy_bytes == 0 and packets - packets_last == 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then + if time_left <= poll_time then + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') + is_deadlock = true + time_left = detection_time + else + time_left = time_left - poll_time + end else - time_left = time_left - poll_time - end - else - if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + end + time_left = detection_time end - time_left = detection_time end - end - -- Save values for next run - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) - if is_deadlock == false then - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + -- Save values for next run + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) + if is_deadlock == false then + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + end end end end diff --git a/orchagent/pfc_detect_broadcom.lua b/orchagent/pfc_detect_broadcom.lua index 4f82b933176..29ed2d16339 100644 --- a/orchagent/pfc_detect_broadcom.lua +++ b/orchagent/pfc_detect_broadcom.lua @@ -35,61 +35,66 @@ for i = n, 1, -1 do local queue_index = redis.call('HGET', 'COUNTERS_QUEUE_INDEX_MAP', KEYS[i]) local port_id = redis.call('HGET', 'COUNTERS_QUEUE_PORT_MAP', KEYS[i]) - local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' - local pfc_on2off_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_ON2OFF_RX_PKTS' + -- If there is no entry in COUNTERS_QUEUE_INDEX_MAP or COUNTERS_QUEUE_PORT_MAP then + -- it means KEYS[i] queue is inserted into FLEX COUNTER DB but the corresponding + -- maps haven't been updated yet. + if queue_index and port_id then + local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' + local pfc_on2off_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_ON2OFF_RX_PKTS' - -- Get all counters - local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') - local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') - local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) - local pfc_on2off = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_on2off_key) - local queue_pause_status = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_ATTR_PAUSE_STATUS') + -- Get all counters + local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') + local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') + local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) + local pfc_on2off = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_on2off_key) + local queue_pause_status = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_ATTR_PAUSE_STATUS') - if occupancy_bytes and packets and pfc_rx_packets and pfc_on2off and queue_pause_status then - occupancy_bytes = tonumber(occupancy_bytes) - packets = tonumber(packets) - pfc_rx_packets = tonumber(pfc_rx_packets) - pfc_on2off = tonumber(pfc_on2off) + if occupancy_bytes and packets and pfc_rx_packets and pfc_on2off and queue_pause_status then + occupancy_bytes = tonumber(occupancy_bytes) + packets = tonumber(packets) + pfc_rx_packets = tonumber(pfc_rx_packets) + pfc_on2off = tonumber(pfc_on2off) - local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') - local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') - local pfc_on2off_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_on2off_key .. '_last') - local queue_pause_status_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_ATTR_PAUSE_STATUS_last') + local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') + local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + local pfc_on2off_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_on2off_key .. '_last') + local queue_pause_status_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_ATTR_PAUSE_STATUS_last') - -- DEBUG CODE START. Uncomment to enable - local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') - -- DEBUG CODE END. + -- DEBUG CODE START. Uncomment to enable + local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') + -- DEBUG CODE END. - -- If this is not a first run, then we have last values available - if packets_last and pfc_rx_packets_last and pfc_on2off_last and queue_pause_status_last then - packets_last = tonumber(packets_last) - pfc_rx_packets_last = tonumber(pfc_rx_packets_last) - pfc_on2off_last = tonumber(pfc_on2off_last) + -- If this is not a first run, then we have last values available + if packets_last and pfc_rx_packets_last and pfc_on2off_last and queue_pause_status_last then + packets_last = tonumber(packets_last) + pfc_rx_packets_last = tonumber(pfc_rx_packets_last) + pfc_on2off_last = tonumber(pfc_on2off_last) - -- Check actual condition of queue being in PFC storm - if (pfc_rx_packets - pfc_rx_packets_last > 0 and pfc_on2off - pfc_on2off_last == 0 and queue_pause_status_last == 'true' and queue_pause_status == 'true') or - (debug_storm == "enabled") then - if time_left <= poll_time then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') - is_deadlock = true - time_left = detection_time + -- Check actual condition of queue being in PFC storm + if (pfc_rx_packets - pfc_rx_packets_last > 0 and pfc_on2off - pfc_on2off_last == 0 and queue_pause_status_last == 'true' and queue_pause_status == 'true') or + (debug_storm == "enabled") then + if time_left <= poll_time then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') + is_deadlock = true + time_left = detection_time + else + time_left = time_left - poll_time + end else - time_left = time_left - poll_time - end - else - if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + end + time_left = detection_time end - time_left = detection_time end - end - -- Save values for next run - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_ATTR_PAUSE_STATUS_last', queue_pause_status) - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_on2off_key .. '_last', pfc_on2off) + -- Save values for next run + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_ATTR_PAUSE_STATUS_last', queue_pause_status) + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_on2off_key .. '_last', pfc_on2off) + end end end end diff --git a/orchagent/pfc_detect_innovium.lua b/orchagent/pfc_detect_innovium.lua index cedd51baa32..8deedeaa4f4 100644 --- a/orchagent/pfc_detect_innovium.lua +++ b/orchagent/pfc_detect_innovium.lua @@ -36,72 +36,77 @@ for i = n, 1, -1 do local queue_index = redis.call('HGET', 'COUNTERS_QUEUE_INDEX_MAP', KEYS[i]) local port_id = redis.call('HGET', 'COUNTERS_QUEUE_PORT_MAP', KEYS[i]) - local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' - local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION' - - -- Get all counters - local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') - local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') - local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) - local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) - - if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then - occupancy_bytes = tonumber(occupancy_bytes) - packets = tonumber(packets) - pfc_rx_packets = tonumber(pfc_rx_packets) - pfc_duration = tonumber(pfc_duration) - - local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') - local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') - local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - -- DEBUG CODE START. Uncomment to enable - local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') - -- DEBUG CODE END. - - -- If this is not a first run, then we have last values available - if packets_last and pfc_rx_packets_last and pfc_duration_last then - packets_last = tonumber(packets_last) - pfc_rx_packets_last = tonumber(pfc_rx_packets_last) - pfc_duration_last = tonumber(pfc_duration_last) - - -- Check actual condition of queue being in PFC storm - -- if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) then - -- redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'K7_debug_1', 'YES') - - -- if (debug_storm == "enabled") then - -- redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'K7_debug_2', 'YES') - - -- if (occupancy_bytes == 0 and packets - packets_last == 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then - -- redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'K7_debug_3', 'YES') - - - if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or - -- DEBUG CODE START. Uncomment to enable - (debug_storm == "enabled") or - -- DEBUG CODE END. - (occupancy_bytes == 0 and pfc_rx_packets - pfc_rx_packets_last > 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then - if time_left <= poll_time then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') - is_deadlock = true - time_left = detection_time + -- If there is no entry in COUNTERS_QUEUE_INDEX_MAP or COUNTERS_QUEUE_PORT_MAP then + -- it means KEYS[i] queue is inserted into FLEX COUNTER DB but the corresponding + -- maps haven't been updated yet. + if queue_index and port_id then + local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' + local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION' + + -- Get all counters + local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') + local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') + local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) + local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) + + if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then + occupancy_bytes = tonumber(occupancy_bytes) + packets = tonumber(packets) + pfc_rx_packets = tonumber(pfc_rx_packets) + pfc_duration = tonumber(pfc_duration) + + local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') + local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + -- DEBUG CODE START. Uncomment to enable + local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') + -- DEBUG CODE END. + + -- If this is not a first run, then we have last values available + if packets_last and pfc_rx_packets_last and pfc_duration_last then + packets_last = tonumber(packets_last) + pfc_rx_packets_last = tonumber(pfc_rx_packets_last) + pfc_duration_last = tonumber(pfc_duration_last) + + -- Check actual condition of queue being in PFC storm + -- if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) then + -- redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'K7_debug_1', 'YES') + + -- if (debug_storm == "enabled") then + -- redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'K7_debug_2', 'YES') + + -- if (occupancy_bytes == 0 and packets - packets_last == 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then + -- redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'K7_debug_3', 'YES') + + + if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or + -- DEBUG CODE START. Uncomment to enable + (debug_storm == "enabled") or + -- DEBUG CODE END. + (occupancy_bytes == 0 and pfc_rx_packets - pfc_rx_packets_last > 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then + if time_left <= poll_time then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') + is_deadlock = true + time_left = detection_time + else + time_left = time_left - poll_time + end else - time_left = time_left - poll_time - end - else - if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + end + time_left = detection_time end - time_left = detection_time end - end - -- Save values for next run - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) - if is_deadlock == false then - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) - redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + -- Save values for next run + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) + if is_deadlock == false then + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + end end end end diff --git a/orchagent/pfc_detect_mellanox.lua b/orchagent/pfc_detect_mellanox.lua index 6df16241e91..e805ad9cff1 100644 --- a/orchagent/pfc_detect_mellanox.lua +++ b/orchagent/pfc_detect_mellanox.lua @@ -36,64 +36,69 @@ for i = n, 1, -1 do local queue_index = redis.call('HGET', 'COUNTERS_QUEUE_INDEX_MAP', KEYS[i]) local port_id = redis.call('HGET', 'COUNTERS_QUEUE_PORT_MAP', KEYS[i]) - local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' - local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION_US' + -- If there is no entry in COUNTERS_QUEUE_INDEX_MAP or COUNTERS_QUEUE_PORT_MAP then + -- it means KEYS[i] queue is inserted into FLEX COUNTER DB but the corresponding + -- maps haven't been updated yet. + if queue_index and port_id then + local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' + local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION_US' - -- Get all counters - local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') - local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') - local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) - local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) + -- Get all counters + local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') + local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') + local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) + local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) - if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then - occupancy_bytes = tonumber(occupancy_bytes) - packets = tonumber(packets) - pfc_rx_packets = tonumber(pfc_rx_packets) - pfc_duration = tonumber(pfc_duration) + if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then + occupancy_bytes = tonumber(occupancy_bytes) + packets = tonumber(packets) + pfc_rx_packets = tonumber(pfc_rx_packets) + pfc_duration = tonumber(pfc_duration) - local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') - local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') - local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - -- DEBUG CODE START. Uncomment to enable - local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') - -- DEBUG CODE END. + local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') + local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + -- DEBUG CODE START. Uncomment to enable + local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') + -- DEBUG CODE END. - -- If this is not a first run, then we have last values available - if packets_last and pfc_rx_packets_last and pfc_duration_last then - packets_last = tonumber(packets_last) - pfc_rx_packets_last = tonumber(pfc_rx_packets_last) - pfc_duration_last = tonumber(pfc_duration_last) - local storm_condition = (pfc_duration - pfc_duration_last) > (poll_time * 0.8) + -- If this is not a first run, then we have last values available + if packets_last and pfc_rx_packets_last and pfc_duration_last then + packets_last = tonumber(packets_last) + pfc_rx_packets_last = tonumber(pfc_rx_packets_last) + pfc_duration_last = tonumber(pfc_duration_last) + local storm_condition = (pfc_duration - pfc_duration_last) > (poll_time * 0.8) - -- Check actual condition of queue being in PFC storm - if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or - -- DEBUG CODE START. Uncomment to enable - (debug_storm == "enabled") or - -- DEBUG CODE END. - (occupancy_bytes == 0 and packets - packets_last == 0 and storm_condition) then - if time_left <= poll_time then - redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') - redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') - is_deadlock = true - time_left = detection_time + -- Check actual condition of queue being in PFC storm + if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or + -- DEBUG CODE START. Uncomment to enable + (debug_storm == "enabled") or + -- DEBUG CODE END. + (occupancy_bytes == 0 and packets - packets_last == 0 and storm_condition) then + if time_left <= poll_time then + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') + is_deadlock = true + time_left = detection_time + else + time_left = time_left - poll_time + end else - time_left = time_left - poll_time - end - else - if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + end + time_left = detection_time end - time_left = detection_time end - end - -- Save values for next run - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) - if is_deadlock == false then - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + -- Save values for next run + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) + if is_deadlock == false then + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + end end end end diff --git a/orchagent/pfc_detect_nephos.lua b/orchagent/pfc_detect_nephos.lua index d152fc5f8c7..648904e17a5 100644 --- a/orchagent/pfc_detect_nephos.lua +++ b/orchagent/pfc_detect_nephos.lua @@ -35,65 +35,70 @@ for i = n, 1, -1 do local queue_index = redis.call('HGET', 'COUNTERS_QUEUE_INDEX_MAP', KEYS[i]) local port_id = redis.call('HGET', 'COUNTERS_QUEUE_PORT_MAP', KEYS[i]) - local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' - local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION' + -- If there is no entry in COUNTERS_QUEUE_INDEX_MAP or COUNTERS_QUEUE_PORT_MAP then + -- it means KEYS[i] queue is inserted into FLEX COUNTER DB but the corresponding + -- maps haven't been updated yet. + if queue_index and port_id then + local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' + local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION' - -- Get all counters - local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') - local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') - local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) - local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) + -- Get all counters + local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') + local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') + local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) + local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) - if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then - occupancy_bytes = tonumber(occupancy_bytes) - packets = tonumber(packets) - pfc_rx_packets = tonumber(pfc_rx_packets) - pfc_duration = tonumber(pfc_duration) + if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then + occupancy_bytes = tonumber(occupancy_bytes) + packets = tonumber(packets) + pfc_rx_packets = tonumber(pfc_rx_packets) + pfc_duration = tonumber(pfc_duration) - local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') - local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') - local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - -- DEBUG CODE START. Uncomment to enable - local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') - -- DEBUG CODE END. + local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') + local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + -- DEBUG CODE START. Uncomment to enable + local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') + -- DEBUG CODE END. - -- If this is not a first run, then we have last values available - if packets_last and pfc_rx_packets_last and pfc_duration_last then - packets_last = tonumber(packets_last) - pfc_rx_packets_last = tonumber(pfc_rx_packets_last) - pfc_duration_last = tonumber(pfc_duration_last) + -- If this is not a first run, then we have last values available + if packets_last and pfc_rx_packets_last and pfc_duration_last then + packets_last = tonumber(packets_last) + pfc_rx_packets_last = tonumber(pfc_rx_packets_last) + pfc_duration_last = tonumber(pfc_duration_last) - -- Check actual condition of queue being in PFC storm - if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or - -- DEBUG CODE START. Uncomment to enable - (debug_storm == "enabled") or - -- DEBUG CODE END. - (occupancy_bytes == 0 and packets - packets_last == 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then - if time_left <= poll_time then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') - is_deadlock = true - time_left = detection_time + -- Check actual condition of queue being in PFC storm + if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or + -- DEBUG CODE START. Uncomment to enable + (debug_storm == "enabled") or + -- DEBUG CODE END. + (occupancy_bytes == 0 and packets - packets_last == 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then + if time_left <= poll_time then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') + is_deadlock = true + time_left = detection_time + else + time_left = time_left - poll_time + end else - time_left = time_left - poll_time - end - else - if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + end + time_left = detection_time end - time_left = detection_time end - end - -- Save values for next run - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) - redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + -- Save values for next run + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + end end end end end return rets - + diff --git a/orchagent/pfc_detect_vs.lua b/orchagent/pfc_detect_vs.lua new file mode 100644 index 00000000000..e805ad9cff1 --- /dev/null +++ b/orchagent/pfc_detect_vs.lua @@ -0,0 +1,108 @@ +-- KEYS - queue IDs +-- ARGV[1] - counters db index +-- ARGV[2] - counters table name +-- ARGV[3] - poll time interval (milliseconds) +-- return queue Ids that satisfy criteria + +local counters_db = ARGV[1] +local counters_table_name = ARGV[2] +local poll_time = tonumber(ARGV[3]) * 1000 + +local rets = {} + +redis.call('SELECT', counters_db) + +-- Iterate through each queue +local n = table.getn(KEYS) +for i = n, 1, -1 do + local counter_keys = redis.call('HKEYS', counters_table_name .. ':' .. KEYS[i]) + local counter_num = 0 + local old_counter_num = 0 + local is_deadlock = false + local pfc_wd_status = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_STATUS') + local pfc_wd_action = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_ACTION') + + local big_red_switch_mode = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'BIG_RED_SWITCH_MODE') + if not big_red_switch_mode and (pfc_wd_status == 'operational' or pfc_wd_action == 'alert') then + local detection_time = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME') + if detection_time then + detection_time = tonumber(detection_time) + local time_left = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT') + if not time_left then + time_left = detection_time + else + time_left = tonumber(time_left) + end + + local queue_index = redis.call('HGET', 'COUNTERS_QUEUE_INDEX_MAP', KEYS[i]) + local port_id = redis.call('HGET', 'COUNTERS_QUEUE_PORT_MAP', KEYS[i]) + -- If there is no entry in COUNTERS_QUEUE_INDEX_MAP or COUNTERS_QUEUE_PORT_MAP then + -- it means KEYS[i] queue is inserted into FLEX COUNTER DB but the corresponding + -- maps haven't been updated yet. + if queue_index and port_id then + local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' + local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION_US' + + -- Get all counters + local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') + local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') + local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) + local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) + + if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then + occupancy_bytes = tonumber(occupancy_bytes) + packets = tonumber(packets) + pfc_rx_packets = tonumber(pfc_rx_packets) + pfc_duration = tonumber(pfc_duration) + + local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') + local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + -- DEBUG CODE START. Uncomment to enable + local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') + -- DEBUG CODE END. + + -- If this is not a first run, then we have last values available + if packets_last and pfc_rx_packets_last and pfc_duration_last then + packets_last = tonumber(packets_last) + pfc_rx_packets_last = tonumber(pfc_rx_packets_last) + pfc_duration_last = tonumber(pfc_duration_last) + local storm_condition = (pfc_duration - pfc_duration_last) > (poll_time * 0.8) + + -- Check actual condition of queue being in PFC storm + if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or + -- DEBUG CODE START. Uncomment to enable + (debug_storm == "enabled") or + -- DEBUG CODE END. + (occupancy_bytes == 0 and packets - packets_last == 0 and storm_condition) then + if time_left <= poll_time then + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') + is_deadlock = true + time_left = detection_time + else + time_left = time_left - poll_time + end + else + if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + end + time_left = detection_time + end + end + + -- Save values for next run + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) + if is_deadlock == false then + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + end + end + end + end + end +end + +return rets diff --git a/orchagent/pfc_restore.lua b/orchagent/pfc_restore.lua index 7b137a40d34..4c278526876 100644 --- a/orchagent/pfc_restore.lua +++ b/orchagent/pfc_restore.lua @@ -32,36 +32,41 @@ for i = n, 1, -1 do local queue_index = redis.call('HGET', 'COUNTERS_QUEUE_INDEX_MAP', KEYS[i]) local port_id = redis.call('HGET', 'COUNTERS_QUEUE_PORT_MAP', KEYS[i]) - local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' + -- If there is no entry in COUNTERS_QUEUE_INDEX_MAP or COUNTERS_QUEUE_PORT_MAP then + -- it means KEYS[i] queue is inserted into FLEX COUNTER DB but the corresponding + -- maps haven't been updated yet. + if queue_index and port_id then + local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' - local pfc_rx_packets = tonumber(redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key)) - local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') - -- DEBUG CODE START. Uncomment to enable - local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') - -- DEBUG CODE END. - if pfc_rx_packets_last then - pfc_rx_packets_last = tonumber(pfc_rx_packets_last) + local pfc_rx_packets = tonumber(redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key)) + local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + -- DEBUG CODE START. Uncomment to enable + local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') + -- DEBUG CODE END. + if pfc_rx_packets_last then + pfc_rx_packets_last = tonumber(pfc_rx_packets_last) - -- Check actual condition of queue being restored from PFC storm - if (pfc_rx_packets - pfc_rx_packets_last == 0) - -- DEBUG CODE START. Uncomment to enable - and (debug_storm ~= "enabled") - -- DEBUG CODE END. - then - if time_left <= poll_time then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') - time_left = restoration_time + -- Check actual condition of queue being restored from PFC storm + if (pfc_rx_packets - pfc_rx_packets_last == 0) + -- DEBUG CODE START. Uncomment to enable + and (debug_storm ~= "enabled") + -- DEBUG CODE END. + then + if time_left <= poll_time then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + time_left = restoration_time + else + time_left = time_left - poll_time + end else - time_left = time_left - poll_time + time_left = restoration_time end - else - time_left = restoration_time end - end - -- Save values for next run - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_RESTORATION_TIME_LEFT', time_left) - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + -- Save values for next run + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_RESTORATION_TIME_LEFT', time_left) + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + end end end diff --git a/orchagent/pfc_restore_cisco-8000.lua b/orchagent/pfc_restore_cisco-8000.lua index 686de0464be..172e67b9609 100644 --- a/orchagent/pfc_restore_cisco-8000.lua +++ b/orchagent/pfc_restore_cisco-8000.lua @@ -44,7 +44,7 @@ for i = n, 1, -1 do and (debug_storm ~= "enabled") -- DEBUG CODE END. then - if time_left <= 0 then + if time_left <= poll_time then redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') time_left = restoration_time else diff --git a/orchagent/pfcactionhandler.cpp b/orchagent/pfcactionhandler.cpp index e44521f849f..6fb497812df 100644 --- a/orchagent/pfcactionhandler.cpp +++ b/orchagent/pfcactionhandler.cpp @@ -3,6 +3,7 @@ #include "logger.h" #include "sai_serialize.h" #include "portsorch.h" +#include "bufferorch.h" #include #include @@ -26,6 +27,7 @@ extern sai_object_id_t gSwitchId; extern PortsOrch *gPortsOrch; extern AclOrch * gAclOrch; +extern BufferOrch *gBufferOrch; extern sai_port_api_t *sai_port_api; extern sai_queue_api_t *sai_queue_api; extern sai_buffer_api_t *sai_buffer_api; @@ -221,7 +223,7 @@ void PfcWdActionHandler::updateWdCounters(const string& queueIdStr, const PfcWdQ PfcWdSaiDlrInitHandler::PfcWdSaiDlrInitHandler(sai_object_id_t port, sai_object_id_t queue, uint8_t queueId, shared_ptr countersTable): - PfcWdActionHandler(port, queue, queueId, countersTable) + PfcWdZeroBufferHandler(port, queue, queueId, countersTable) { SWSS_LOG_ENTER(); @@ -262,39 +264,6 @@ PfcWdSaiDlrInitHandler::~PfcWdSaiDlrInitHandler(void) } } -bool PfcWdSaiDlrInitHandler::getHwCounters(PfcWdHwStats& counters) -{ - SWSS_LOG_ENTER(); - - static const vector queueStatIds = - { - SAI_QUEUE_STAT_PACKETS, - SAI_QUEUE_STAT_DROPPED_PACKETS, - }; - - vector queueStats; - queueStats.resize(queueStatIds.size()); - - sai_status_t status = sai_queue_api->get_queue_stats( - getQueue(), - static_cast(queueStatIds.size()), - queueStatIds.data(), - queueStats.data()); - - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to fetch queue 0x%" PRIx64 " stats: %d", getQueue(), status); - return false; - } - - counters.txPkt = queueStats[0]; - counters.txDropPkt = queueStats[1]; - counters.rxPkt = 0; - counters.rxDropPkt = 0; - - return true; -} - PfcWdAclHandler::PfcWdAclHandler(sai_object_id_t port, sai_object_id_t queue, uint8_t queueId, shared_ptr
countersTable): PfcWdLossyHandler(port, queue, queueId, countersTable) @@ -472,6 +441,14 @@ PfcWdLossyHandler::PfcWdLossyHandler(sai_object_id_t port, sai_object_id_t queue { SWSS_LOG_ENTER(); + string platform = getenv("platform") ? getenv("platform") : ""; + if (platform == CISCO_8000_PLATFORM_SUBSTRING) + { + SWSS_LOG_DEBUG("Skipping in constructor PfcWdLossyHandler for platform %s on port 0x%" PRIx64, + platform.c_str(), port); + return; + } + uint8_t pfcMask = 0; if (!gPortsOrch->getPortPfc(port, &pfcMask)) @@ -491,6 +468,14 @@ PfcWdLossyHandler::~PfcWdLossyHandler(void) { SWSS_LOG_ENTER(); + string platform = getenv("platform") ? getenv("platform") : ""; + if (platform == CISCO_8000_PLATFORM_SUBSTRING) + { + SWSS_LOG_DEBUG("Skipping in destructor PfcWdLossyHandler for platform %s on port 0x%" PRIx64, + platform.c_str(), getPort()); + return; + } + uint8_t pfcMask = 0; if (!gPortsOrch->getPortPfc(getPort(), &pfcMask)) @@ -732,6 +717,25 @@ PfcWdZeroBufferHandler::ZeroBufferProfile &PfcWdZeroBufferHandler::ZeroBufferPro return instance; } +sai_object_id_t& PfcWdZeroBufferHandler::ZeroBufferProfile::getPool(bool ingress) +{ + // If there is a cached zero buffer pool, just use it + // else fetch zero buffer pool from buffer orch + // If there is one, use it and increase the reference number. + // otherwise, just return NULL OID + // PfcWdZeroBufferHandler will create it later and notify buffer orch later + auto &poolId = ingress ? m_zeroIngressBufferPool : m_zeroEgressBufferPool; + if (poolId == SAI_NULL_OBJECT_ID) + { + poolId = gBufferOrch->getZeroBufferPool(ingress); + if (poolId != SAI_NULL_OBJECT_ID) + { + gBufferOrch->lockZeroBufferPool(ingress); + } + } + return poolId; +} + sai_object_id_t PfcWdZeroBufferHandler::ZeroBufferProfile::getZeroBufferProfile(bool ingress) { SWSS_LOG_ENTER(); @@ -750,29 +754,39 @@ void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile(bool ing sai_attribute_t attr; vector attribs; + sai_status_t status; - // Create zero pool - attr.id = SAI_BUFFER_POOL_ATTR_SIZE; - attr.value.u64 = 0; - attribs.push_back(attr); + auto &poolId = getPool(ingress); - attr.id = SAI_BUFFER_POOL_ATTR_TYPE; - attr.value.u32 = ingress ? SAI_BUFFER_POOL_TYPE_INGRESS : SAI_BUFFER_POOL_TYPE_EGRESS; - attribs.push_back(attr); + if (SAI_NULL_OBJECT_ID == poolId) + { + // Create zero pool + attr.id = SAI_BUFFER_POOL_ATTR_SIZE; + attr.value.u64 = 0; + attribs.push_back(attr); - attr.id = SAI_BUFFER_POOL_ATTR_THRESHOLD_MODE; - attr.value.u32 = SAI_BUFFER_POOL_THRESHOLD_MODE_DYNAMIC; - attribs.push_back(attr); + attr.id = SAI_BUFFER_POOL_ATTR_TYPE; + attr.value.u32 = ingress ? SAI_BUFFER_POOL_TYPE_INGRESS : SAI_BUFFER_POOL_TYPE_EGRESS; + attribs.push_back(attr); - sai_status_t status = sai_buffer_api->create_buffer_pool( - &getPool(ingress), + attr.id = SAI_BUFFER_POOL_ATTR_THRESHOLD_MODE; + attr.value.u32 = SAI_BUFFER_POOL_THRESHOLD_MODE_STATIC; + attribs.push_back(attr); + + status = sai_buffer_api->create_buffer_pool( + &poolId, gSwitchId, static_cast(attribs.size()), attribs.data()); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to create dynamic zero buffer pool for PFC WD: %d", status); - return; + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create dynamic zero buffer pool for PFC WD: %d", status); + return; + } + + // Pass the ownership to BufferOrch + gBufferOrch->setZeroBufferPool(ingress, poolId); + gBufferOrch->lockZeroBufferPool(ingress); } // Create zero profile @@ -783,15 +797,15 @@ void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile(bool ing attribs.push_back(attr); attr.id = SAI_BUFFER_PROFILE_ATTR_THRESHOLD_MODE; - attr.value.u32 = SAI_BUFFER_PROFILE_THRESHOLD_MODE_DYNAMIC; + attr.value.u32 = SAI_BUFFER_PROFILE_THRESHOLD_MODE_STATIC; attribs.push_back(attr); attr.id = SAI_BUFFER_PROFILE_ATTR_BUFFER_SIZE; attr.value.u64 = 0; attribs.push_back(attr); - attr.id = SAI_BUFFER_PROFILE_ATTR_SHARED_DYNAMIC_TH; - attr.value.s8 = -8; // ALPHA_0 + attr.id = SAI_BUFFER_PROFILE_ATTR_SHARED_STATIC_TH; + attr.value.s8 = 0; attribs.push_back(attr); status = sai_buffer_api->create_buffer_profile( @@ -810,16 +824,19 @@ void PfcWdZeroBufferHandler::ZeroBufferProfile::destroyZeroBufferProfile(bool in { SWSS_LOG_ENTER(); - sai_status_t status = sai_buffer_api->remove_buffer_profile(getProfile(ingress)); - if (status != SAI_STATUS_SUCCESS) + if (getProfile(ingress) != SAI_NULL_OBJECT_ID) { - SWSS_LOG_ERROR("Failed to remove static zero buffer profile for PFC WD: %d", status); - return; + sai_status_t status = sai_buffer_api->remove_buffer_profile(getProfile(ingress)); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove static zero buffer profile for PFC WD: %d", status); + return; + } } - status = sai_buffer_api->remove_buffer_pool(getPool(ingress)); - if (status != SAI_STATUS_SUCCESS) + auto &pool = ingress ? m_zeroIngressBufferPool : m_zeroEgressBufferPool; + if (pool != SAI_NULL_OBJECT_ID) { - SWSS_LOG_ERROR("Failed to remove static zero buffer pool for PFC WD: %d", status); + gBufferOrch->unlockZeroBufferPool(ingress); } } diff --git a/orchagent/pfcactionhandler.h b/orchagent/pfcactionhandler.h index 23cabaee101..22908fbe085 100644 --- a/orchagent/pfcactionhandler.h +++ b/orchagent/pfcactionhandler.h @@ -148,10 +148,7 @@ class PfcWdZeroBufferHandler: public PfcWdLossyHandler return ingress ? m_zeroIngressBufferProfile : m_zeroEgressBufferProfile; } - sai_object_id_t& getPool(bool ingress) - { - return ingress ? m_zeroIngressBufferPool : m_zeroEgressBufferPool; - } + sai_object_id_t& getPool(bool ingress); sai_object_id_t m_zeroIngressBufferPool = SAI_NULL_OBJECT_ID; sai_object_id_t m_zeroEgressBufferPool = SAI_NULL_OBJECT_ID; @@ -165,13 +162,12 @@ class PfcWdZeroBufferHandler: public PfcWdLossyHandler // PFC queue that implements drop action by draining queue via SAI // attribute SAI_QUEUE_ATTR_PFC_DLR_INIT. -class PfcWdSaiDlrInitHandler: public PfcWdActionHandler +class PfcWdSaiDlrInitHandler: public PfcWdZeroBufferHandler { public: PfcWdSaiDlrInitHandler(sai_object_id_t port, sai_object_id_t queue, uint8_t queueId, shared_ptr
countersTable); virtual ~PfcWdSaiDlrInitHandler(void); - virtual bool getHwCounters(PfcWdHwStats& counters); }; #endif diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 595b03acdee..e55990b2995 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -2172,7 +2172,12 @@ bool PortsOrch::createVlanHostIntf(Port& vl, string hostif_name) attrs.push_back(attr); attr.id = SAI_HOSTIF_ATTR_NAME; - strncpy(attr.value.chardata, hostif_name.c_str(), sizeof(attr.value.chardata)); + if (hostif_name.length() >= SAI_HOSTIF_NAME_SIZE) + { + SWSS_LOG_WARN("Host interface name %s is too long and will be truncated to %d bytes", hostif_name.c_str(), SAI_HOSTIF_NAME_SIZE - 1); + } + strncpy(attr.value.chardata, hostif_name.c_str(), SAI_HOSTIF_NAME_SIZE); + attr.value.chardata[SAI_HOSTIF_NAME_SIZE - 1] = '\0'; attrs.push_back(attr); sai_status_t status = sai_hostif_api->create_hostif(&vl.m_vlan_info.host_intf_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); @@ -2284,6 +2289,13 @@ sai_status_t PortsOrch::removePort(sai_object_id_t port_id) } /* else : port is in default state or not yet created */ + /* + * Remove port serdes (if exists) before removing port since this + * reference is dependency. + */ + + removePortSerdesAttribute(port_id); + sai_status_t status = sai_port_api->remove_port(port_id); if (status != SAI_STATUS_SUCCESS) { @@ -4209,6 +4221,11 @@ bool PortsOrch::addHostIntfs(Port &port, string alias, sai_object_id_t &host_int attr.id = SAI_HOSTIF_ATTR_NAME; strncpy((char *)&attr.value.chardata, alias.c_str(), SAI_HOSTIF_NAME_SIZE); + if (alias.length() >= SAI_HOSTIF_NAME_SIZE) + { + SWSS_LOG_WARN("Host interface name %s is too long and will be truncated to %d bytes", alias.c_str(), SAI_HOSTIF_NAME_SIZE - 1); + } + attr.value.chardata[SAI_HOSTIF_NAME_SIZE - 1] = '\0'; attrs.push_back(attr); sai_status_t status = sai_hostif_api->create_hostif(&host_intfs_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); @@ -5698,8 +5715,12 @@ void PortsOrch::doTask(NotificationConsumer &consumer) SWSS_LOG_NOTICE("%s oper speed is %d", port.m_alias.c_str(), speed); updateDbPortOperSpeed(port, speed); } + else + { + updateDbPortOperSpeed(port, 0); + } } - + /* update m_portList */ m_portList[port.m_alias] = port; } @@ -5759,9 +5780,9 @@ void PortsOrch::updateDbPortOperSpeed(Port &port, sai_uint32_t speed) SWSS_LOG_ENTER(); vector tuples; - FieldValueTuple tuple("speed", to_string(speed)); - tuples.push_back(tuple); - m_portTable->set(port.m_alias, tuples); + string speedStr = speed != 0 ? to_string(speed) : "N/A"; + tuples.emplace_back(std::make_pair("speed", speedStr)); + m_portStateTable.set(port.m_alias, tuples); // We don't set port.m_speed = speed here, because CONFIG_DB still hold the old // value. If we set it here, next time configure any attributes related port will @@ -5808,6 +5829,10 @@ void PortsOrch::refreshPortStatus() SWSS_LOG_INFO("%s oper speed is %d", port.m_alias.c_str(), speed); updateDbPortOperSpeed(port, speed); } + else + { + updateDbPortOperSpeed(port, 0); + } } } } diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 93ff93bb9ed..bdfcf47ad04 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -163,6 +163,8 @@ class PortsOrch : public Orch, public Subject bool getPortIPG(sai_object_id_t port_id, uint32_t &ipg); bool setPortIPG(sai_object_id_t port_id, uint32_t ipg); + bool getPortOperStatus(const Port& port, sai_port_oper_status_t& status) const; + private: unique_ptr
m_counterTable; unique_ptr
m_counterLagTable; @@ -324,7 +326,6 @@ class PortsOrch : public Orch, public Subject task_process_status setPortInterfaceType(sai_object_id_t id, sai_port_interface_type_t interface_type); task_process_status setPortAdvInterfaceTypes(sai_object_id_t id, std::vector &interface_types); - bool getPortOperStatus(const Port& port, sai_port_oper_status_t& status) const; void updatePortOperStatus(Port &port, sai_port_oper_status_t status); bool getPortOperSpeed(const Port& port, sai_uint32_t& speed) const; @@ -365,4 +366,3 @@ class PortsOrch : public Orch, public Subject }; #endif /* SWSS_PORTSORCH_H */ - diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index edd5db34432..d1a24cb5c9a 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -25,6 +25,7 @@ extern sai_acl_api_t* sai_acl_api; extern SwitchOrch *gSwitchOrch; extern PortsOrch *gPortsOrch; +extern QosOrch *gQosOrch; extern sai_object_id_t gSwitchId; extern CrmOrch *gCrmOrch; @@ -150,6 +151,12 @@ task_process_status QosMapHandler::processWorkItem(Consumer& consumer) SWSS_LOG_ERROR("Object with name:%s not found.", qos_object_name.c_str()); return task_process_status::task_invalid_entry; } + if (gQosOrch->isObjectBeingReferenced(QosOrch::getTypeMap(), qos_map_type_name, qos_object_name)) + { + auto hint = gQosOrch->objectReferenceInfo(QosOrch::getTypeMap(), qos_map_type_name, qos_object_name); + SWSS_LOG_NOTICE("Can't remove object %s due to being referenced (%s)", qos_object_name.c_str(), hint.c_str()); + return task_process_status::task_need_retry; + } if (!removeQosItem(sai_object)) { SWSS_LOG_ERROR("Failed to remove dscp_to_tc map. db name:%s sai object:%" PRIx64, qos_object_name.c_str(), sai_object); @@ -243,6 +250,9 @@ void DscpToTcMapHandler::applyDscpToTcMapToSwitch(sai_attr_id_t attr_id, sai_obj return; } + if (map_id != gQosOrch->m_globalDscpToTcMap) + gQosOrch->m_globalDscpToTcMap = map_id; + SWSS_LOG_NOTICE("Applied DSCP_TO_TC QoS map to switch successfully"); } @@ -276,6 +286,41 @@ sai_object_id_t DscpToTcMapHandler::addQosItem(const vector &at return sai_object; } +bool DscpToTcMapHandler::removeQosItem(sai_object_id_t sai_object) +{ + SWSS_LOG_ENTER(); + + if (sai_object == gQosOrch->m_globalDscpToTcMap) + { + // The current global dscp to tc map is about to be removed. + // Find another one to set to the switch or NULL in case this is the last one + const auto &dscpToTcObjects = (*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]); + bool found = false; + for (const auto &ref : dscpToTcObjects) + { + if (ref.second.m_saiObjectId == sai_object) + continue; + SWSS_LOG_NOTICE("Current global dscp_to_tc map is about to be removed, set it to %s %" PRIx64, ref.first.c_str(), ref.second.m_saiObjectId); + applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, ref.second.m_saiObjectId); + found = true; + break; + } + if (!found) + { + applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, SAI_NULL_OBJECT_ID); + } + } + + SWSS_LOG_DEBUG("Removing DscpToTcMap object:%" PRIx64, sai_object); + sai_status_t sai_status = sai_qos_map_api->remove_qos_map(sai_object); + if (SAI_STATUS_SUCCESS != sai_status) + { + SWSS_LOG_ERROR("Failed to remove DSCP_TO_TC map, status:%d", sai_status); + return false; + } + return true; +} + task_process_status QosOrch::handleDscpToTcTable(Consumer& consumer) { SWSS_LOG_ENTER(); @@ -840,7 +885,7 @@ bool DscpToFcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple & { SWSS_LOG_ENTER(); - sai_uint8_t max_fc_val = NhgMapOrch::getMaxFcVal(); + sai_uint8_t max_num_fcs = NhgMapOrch::getMaxNumFcs(); sai_attribute_t list_attr; list_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST; @@ -867,10 +912,11 @@ bool DscpToFcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple & } list_attr.value.qosmap.list[ind].key.dscp = static_cast(value); + // FC value must be in range [0, max_num_fcs) value = stoi(fvValue(*i)); - if ((value < 0) || (value > max_fc_val)) + if ((value < 0) || (value >= max_num_fcs)) { - SWSS_LOG_ERROR("FC value %d is either negative, or bigger than max value %d", value, max_fc_val); + SWSS_LOG_ERROR("FC value %d is either negative, or bigger than max value %d", value, max_num_fcs - 1); delete[] list_attr.value.qosmap.list; return false; } @@ -933,7 +979,7 @@ bool ExpToFcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &t { SWSS_LOG_ENTER(); - sai_uint8_t max_fc_val = NhgMapOrch::getMaxFcVal(); + sai_uint8_t max_num_fcs = NhgMapOrch::getMaxNumFcs(); sai_attribute_t list_attr; list_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST; @@ -960,10 +1006,11 @@ bool ExpToFcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &t } list_attr.value.qosmap.list[ind].key.mpls_exp = static_cast(value); + // FC value must be in range [0, max_num_fcs) value = stoi(fvValue(*i)); - if ((value < 0) || (value > max_fc_val)) + if ((value < 0) || (value >= max_num_fcs)) { - SWSS_LOG_ERROR("FC value %d is either negative, or bigger than max value %hu", value, max_fc_val); + SWSS_LOG_ERROR("FC value %d is either negative, or bigger than max value %hu", value, max_num_fcs - 1); delete[] list_attr.value.qosmap.list; return false; } @@ -1193,6 +1240,12 @@ task_process_status QosOrch::handleSchedulerTable(Consumer& consumer) SWSS_LOG_ERROR("Object with name:%s not found.", qos_object_name.c_str()); return task_process_status::task_invalid_entry; } + if (gQosOrch->isObjectBeingReferenced(QosOrch::getTypeMap(), qos_map_type_name, qos_object_name)) + { + auto hint = gQosOrch->objectReferenceInfo(QosOrch::getTypeMap(), qos_map_type_name, qos_object_name); + SWSS_LOG_NOTICE("Can't remove object %s due to being referenced (%s)", qos_object_name.c_str(), hint.c_str()); + return task_process_status::task_need_retry; + } sai_status = sai_scheduler_api->remove_scheduler(sai_object); if (SAI_STATUS_SUCCESS != sai_status) { @@ -1433,6 +1486,94 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer) SWSS_LOG_ERROR("Failed to parse range:%s", tokens[1].c_str()); return task_process_status::task_invalid_entry; } + + bool donotChangeScheduler = false; + bool donotChangeWredProfile = false; + sai_object_id_t sai_scheduler_profile; + sai_object_id_t sai_wred_profile; + + if (op == SET_COMMAND) + { + string scheduler_profile_name; + resolve_result = resolveFieldRefValue(m_qos_maps, scheduler_field_name, + qos_to_ref_table_map.at(scheduler_field_name), tuple, + sai_scheduler_profile, scheduler_profile_name); + if (ref_resolve_status::success != resolve_result) + { + if (resolve_result != ref_resolve_status::field_not_found) + { + if(ref_resolve_status::not_resolved == resolve_result) + { + SWSS_LOG_INFO("Missing or invalid scheduler reference"); + return task_process_status::task_need_retry; + } + SWSS_LOG_ERROR("Resolving scheduler reference failed"); + return task_process_status::task_failed; + } + + if (doesObjectExist(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, scheduler_field_name, scheduler_profile_name)) + { + SWSS_LOG_NOTICE("QUEUE|%s %s was configured but is not any more. Remove it", key.c_str(), scheduler_field_name.c_str()); + removeMeFromObjsReferencedByMe(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, scheduler_field_name, scheduler_profile_name); + sai_scheduler_profile = SAI_NULL_OBJECT_ID; + } + else + { + // Did not exist and do not exist. No action + donotChangeScheduler = true; + } + } + else + { + setObjectReference(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, scheduler_field_name, scheduler_profile_name); + SWSS_LOG_INFO("QUEUE %s Field %s %s has been resolved to %" PRIx64 , key.c_str(), scheduler_field_name.c_str(), scheduler_profile_name.c_str(), sai_scheduler_profile); + } + + string wred_profile_name; + resolve_result = resolveFieldRefValue(m_qos_maps, wred_profile_field_name, + qos_to_ref_table_map.at(wred_profile_field_name), tuple, + sai_wred_profile, wred_profile_name); + if (ref_resolve_status::success != resolve_result) + { + if (resolve_result != ref_resolve_status::field_not_found) + { + if(ref_resolve_status::not_resolved == resolve_result) + { + SWSS_LOG_INFO("Missing or invalid wred profile reference"); + return task_process_status::task_need_retry; + } + SWSS_LOG_ERROR("Resolving wred profile reference failed"); + return task_process_status::task_failed; + } + + if (doesObjectExist(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, wred_profile_field_name, wred_profile_name)) + { + SWSS_LOG_NOTICE("QUEUE|%s %s was configured but is not any more. Remove it", key.c_str(), wred_profile_field_name.c_str()); + removeMeFromObjsReferencedByMe(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, wred_profile_field_name, wred_profile_name); + sai_wred_profile = SAI_NULL_OBJECT_ID; + } + else + { + donotChangeWredProfile = true; + } + } + else + { + setObjectReference(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, wred_profile_field_name, wred_profile_name); + } + } + else if (op == DEL_COMMAND) + { + removeObject(QosOrch::getTypeMap(), CFG_QUEUE_TABLE_NAME, key); + sai_scheduler_profile = SAI_NULL_OBJECT_ID; + sai_wred_profile = SAI_NULL_OBJECT_ID; + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); + return task_process_status::task_invalid_entry; + } + for (string port_name : port_names) { Port port; @@ -1447,27 +1588,11 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer) { queue_ind = ind; SWSS_LOG_DEBUG("processing queue:%zd", queue_ind); - sai_object_id_t sai_scheduler_profile; - string scheduler_profile_name; - resolve_result = resolveFieldRefValue(m_qos_maps, scheduler_field_name, - qos_to_ref_table_map.at(scheduler_field_name), tuple, - sai_scheduler_profile, scheduler_profile_name); - if (ref_resolve_status::success == resolve_result) + + if (!donotChangeScheduler) { - if (op == SET_COMMAND) - { - result = applySchedulerToQueueSchedulerGroup(port, queue_ind, sai_scheduler_profile); - } - else if (op == DEL_COMMAND) - { - // NOTE: The map is un-bound from the port. But the map itself still exists. - result = applySchedulerToQueueSchedulerGroup(port, queue_ind, SAI_NULL_OBJECT_ID); - } - else - { - SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - return task_process_status::task_invalid_entry; - } + result = applySchedulerToQueueSchedulerGroup(port, queue_ind, sai_scheduler_profile); + if (!result) { SWSS_LOG_ERROR("Failed setting field:%s to port:%s, queue:%zd, line:%d", scheduler_field_name.c_str(), port.m_alias.c_str(), queue_ind, __LINE__); @@ -1475,38 +1600,11 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer) } SWSS_LOG_DEBUG("Applied scheduler to port:%s", port_name.c_str()); } - else if (resolve_result != ref_resolve_status::field_not_found) - { - if(ref_resolve_status::not_resolved == resolve_result) - { - SWSS_LOG_INFO("Missing or invalid scheduler reference"); - return task_process_status::task_need_retry; - } - SWSS_LOG_ERROR("Resolving scheduler reference failed"); - return task_process_status::task_failed; - } - sai_object_id_t sai_wred_profile; - string wred_profile_name; - resolve_result = resolveFieldRefValue(m_qos_maps, wred_profile_field_name, - qos_to_ref_table_map.at(wred_profile_field_name), tuple, - sai_wred_profile, wred_profile_name); - if (ref_resolve_status::success == resolve_result) + if (!donotChangeWredProfile) { - if (op == SET_COMMAND) - { - result = applyWredProfileToQueue(port, queue_ind, sai_wred_profile); - } - else if (op == DEL_COMMAND) - { - // NOTE: The map is un-bound from the port. But the map itself still exists. - result = applyWredProfileToQueue(port, queue_ind, SAI_NULL_OBJECT_ID); - } - else - { - SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - return task_process_status::task_invalid_entry; - } + result = applyWredProfileToQueue(port, queue_ind, sai_wred_profile); + if (!result) { SWSS_LOG_ERROR("Failed setting field:%s to port:%s, queue:%zd, line:%d", wred_profile_field_name.c_str(), port.m_alias.c_str(), queue_ind, __LINE__); @@ -1514,31 +1612,6 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer) } SWSS_LOG_DEBUG("Applied wred profile to port:%s", port_name.c_str()); } - else if (resolve_result != ref_resolve_status::field_not_found) - { - if (ref_resolve_status::empty == resolve_result) - { - SWSS_LOG_INFO("Missing wred reference. Unbind wred profile from queue"); - // NOTE: The wred profile is un-bound from the port. But the wred profile itself still exists - // and stays untouched. - result = applyWredProfileToQueue(port, queue_ind, SAI_NULL_OBJECT_ID); - if (!result) - { - SWSS_LOG_ERROR("Failed unbinding field:%s from port:%s, queue:%zd, line:%d", wred_profile_field_name.c_str(), port.m_alias.c_str(), queue_ind, __LINE__); - return task_process_status::task_failed; - } - } - else if (ref_resolve_status::not_resolved == resolve_result) - { - SWSS_LOG_INFO("Invalid wred reference"); - return task_process_status::task_need_retry; - } - else - { - SWSS_LOG_ERROR("Resolving wred reference failed"); - return task_process_status::task_failed; - } - } } } SWSS_LOG_DEBUG("finished"); @@ -1624,6 +1697,60 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) KeyOpFieldsValuesTuple tuple = consumer.m_toSync.begin()->second; string key = kfvKey(tuple); string op = kfvOp(tuple); + vector port_names = tokenize(key, list_item_delimiter); + + if (op == DEL_COMMAND) + { + /* Handle DEL command. Just set all the maps to oid:0x0 */ + for (string port_name : port_names) + { + Port port; + + /* Skip port which is not found */ + if (!gPortsOrch->getPort(port_name, port)) + { + SWSS_LOG_ERROR("Failed to apply QoS maps to port %s. Port is not found.", port_name.c_str()); + continue; + } + + for (auto &mapRef : qos_to_attr_map) + { + string referenced_obj; + if (!doesObjectExist(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, key, mapRef.first, referenced_obj)) + { + continue; + } + + sai_attribute_t attr; + attr.id = mapRef.second; + attr.value.oid = SAI_NULL_OBJECT_ID; + + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove %s on port %s, rv:%d", + mapRef.first.c_str(), port_name.c_str(), status); + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); + if (handle_status != task_process_status::task_success) + { + return task_process_status::task_invalid_entry; + } + } + SWSS_LOG_INFO("Removed %s on port %s", mapRef.first.c_str(), port_name.c_str()); + } + + if (!gPortsOrch->setPortPfc(port.m_port_id, 0)) + { + SWSS_LOG_ERROR("Failed to disable PFC on port %s", port_name.c_str()); + } + + SWSS_LOG_INFO("Disabled PFC on port %s", port_name.c_str()); + } + + removeObject(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, key); + + return task_process_status::task_success; + } sai_uint8_t pfc_enable = 0; map> update_list; @@ -1634,7 +1761,7 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) { sai_object_id_t id; string object_name; - string map_type_name = fvField(*it), map_name = fvValue(*it); + string &map_type_name = fvField(*it), &map_name = fvValue(*it); ref_resolve_status status = resolveFieldRefValue(m_qos_maps, map_type_name, qos_to_ref_table_map.at(map_type_name), tuple, id, object_name); if (status != ref_resolve_status::success) @@ -1644,6 +1771,7 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) } update_list[qos_to_attr_map[map_type_name]] = make_pair(map_name, id); + setObjectReference(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, key, map_type_name, object_name); } if (fvField(*it) == pfc_enable_name) @@ -1658,7 +1786,23 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) } } - vector port_names = tokenize(key, list_item_delimiter); + /* Remove any map that was configured but isn't there any longer. */ + for (auto &mapRef : qos_to_attr_map) + { + auto &sai_attribute = mapRef.second; + if (update_list.find(sai_attribute) == update_list.end()) + { + string referenced_obj; + if (!doesObjectExist(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, key, mapRef.first, referenced_obj)) + { + continue; + } + SWSS_LOG_NOTICE("PORT_QOS_MAP|%s %s was configured but is not any more. Remove it", key.c_str(), mapRef.first.c_str()); + removeMeFromObjsReferencedByMe(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, key, mapRef.first, referenced_obj); + update_list[mapRef.second] = make_pair("NULL", SAI_NULL_OBJECT_ID); + } + } + for (string port_name : port_names) { Port port; @@ -1692,7 +1836,7 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) } sai_uint8_t old_pfc_enable = 0; - if (!gPortsOrch->getPortPfc(port.m_port_id, &old_pfc_enable)) + if (!gPortsOrch->getPortPfc(port.m_port_id, &old_pfc_enable)) { SWSS_LOG_ERROR("Failed to retrieve PFC bits on port %s", port_name.c_str()); } diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index cd265d59ece..613bc7437ed 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -72,6 +72,7 @@ class DscpToTcMapHandler : public QosMapHandler public: bool convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) override; sai_object_id_t addQosItem(const vector &attributes) override; + bool removeQosItem(sai_object_id_t sai_object); protected: void applyDscpToTcMapToSwitch(sai_attr_id_t attr_id, sai_object_id_t sai_dscp_to_tc_map); }; @@ -196,5 +197,11 @@ class QosOrch : public Orch }; std::unordered_map m_scheduler_group_port_info; + + // SAI OID of the global dscp to tc map + sai_object_id_t m_globalDscpToTcMap; + + friend QosMapHandler; + friend DscpToTcMapHandler; }; #endif /* SWSS_QOSORCH_H */ diff --git a/orchagent/routeorch.cpp b/orchagent/routeorch.cpp index 8337e6cba15..e3c27b98182 100644 --- a/orchagent/routeorch.cpp +++ b/orchagent/routeorch.cpp @@ -80,7 +80,11 @@ RouteOrch::RouteOrch(DBConnector *db, vector &tableNames, SWSS_LOG_NOTICE("Maximum number of ECMP groups supported is %d", m_maxNextHopGroupCount); + m_stateDb = shared_ptr(new DBConnector("STATE_DB", 0)); + m_stateDefaultRouteTb = unique_ptr(new Table(m_stateDb.get(), STATE_ROUTE_TABLE_NAME)); + IpPrefix default_ip_prefix("0.0.0.0/0"); + updateDefRouteState("0.0.0.0/0"); sai_route_entry_t unicast_route_entry; unicast_route_entry.vr_id = gVirtualRouterId; @@ -106,6 +110,7 @@ RouteOrch::RouteOrch(DBConnector *db, vector &tableNames, SWSS_LOG_NOTICE("Create IPv4 default route with packet action drop"); IpPrefix v6_default_ip_prefix("::/0"); + updateDefRouteState("::/0"); copy(unicast_route_entry.destination, v6_default_ip_prefix); subnet(unicast_route_entry.destination, unicast_route_entry.destination); @@ -231,6 +236,16 @@ void RouteOrch::delLinkLocalRouteToMe(sai_object_id_t vrf_id, IpPrefix linklocal SWSS_LOG_NOTICE("Deleted link local ipv6 route %s to cpu", linklocal_prefix.to_string().c_str()); } +void RouteOrch::updateDefRouteState(string ip, bool add) +{ + vector tuples; + string state = add?"ok":"na"; + FieldValueTuple tuple("state", state); + tuples.push_back(tuple); + + m_stateDefaultRouteTb->set(ip, tuples); +} + bool RouteOrch::hasNextHopGroup(const NextHopGroupKey& nexthops) const { return m_syncdNextHopGroups.find(nexthops) != m_syncdNextHopGroups.end(); @@ -361,6 +376,13 @@ bool RouteOrch::validnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t& nhgm_attrs.push_back(nhgm_attr); } + if (m_switchOrch->checkOrderedEcmpEnable()) + { + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID; + nhgm_attr.value.u32 = nhopgroup->second.nhopgroup_members[nexthop].seq_id; + nhgm_attrs.push_back(nhgm_attr); + } + status = sai_next_hop_group_api->create_next_hop_group_member(&nexthop_id, gSwitchId, (uint32_t)nhgm_attrs.size(), nhgm_attrs.data()); @@ -378,7 +400,7 @@ bool RouteOrch::validnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t& ++count; gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); - nhopgroup->second.nhopgroup_members[nexthop] = nexthop_id; + nhopgroup->second.nhopgroup_members[nexthop].next_hop_id = nexthop_id; } if (!m_fgNhgOrch->validNextHopInNextHopGroup(nexthop)) @@ -406,7 +428,7 @@ bool RouteOrch::invalidnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t continue; } - nexthop_id = nhopgroup->second.nhopgroup_members[nexthop]; + nexthop_id = nhopgroup->second.nhopgroup_members[nexthop].next_hop_id; status = sai_next_hop_group_api->remove_next_hop_group_member(nexthop_id); if (status != SAI_STATUS_SUCCESS) @@ -775,6 +797,11 @@ void RouteOrch::doTask(Consumer& consumer) } } + sai_route_entry_t route_entry; + route_entry.vr_id = vrf_id; + route_entry.switch_id = gSwitchId; + copy(route_entry.destination, ip_prefix); + if (nhg.getSize() == 1 && nhg.hasIntfNextHop()) { if (alsv[0] == "unknown") @@ -818,6 +845,7 @@ void RouteOrch::doTask(Consumer& consumer) else if (m_syncdRoutes.find(vrf_id) == m_syncdRoutes.end() || m_syncdRoutes.at(vrf_id).find(ip_prefix) == m_syncdRoutes.at(vrf_id).end() || m_syncdRoutes.at(vrf_id).at(ip_prefix) != RouteNhg(nhg, ctx.nhg_index) || + gRouteBulker.bulk_entry_pending_removal(route_entry) || ctx.using_temp_nhg) { if (addRoute(ctx, nhg)) @@ -1220,7 +1248,7 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) vector nhg_attrs; nhg_attr.id = SAI_NEXT_HOP_GROUP_ATTR_TYPE; - nhg_attr.value.s32 = SAI_NEXT_HOP_GROUP_TYPE_ECMP; + nhg_attr.value.s32 = m_switchOrch->checkOrderedEcmpEnable() ? SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP : SAI_NEXT_HOP_GROUP_TYPE_ECMP; nhg_attrs.push_back(nhg_attr); sai_object_id_t next_hop_group_id; @@ -1274,6 +1302,13 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) nhgm_attrs.push_back(nhgm_attr); } + if (m_switchOrch->checkOrderedEcmpEnable()) + { + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID; + nhgm_attr.value.u32 = ((uint32_t)i) + 1; // To make non-zero sequence id + nhgm_attrs.push_back(nhgm_attr); + } + gNextHopGroupMemberBulker.create_entry(&nhgm_ids[i], (uint32_t)nhgm_attrs.size(), nhgm_attrs.data()); @@ -1298,7 +1333,8 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) if (nhopgroup_shared_set.find(nhid) != nhopgroup_shared_set.end()) { auto it = nhopgroup_shared_set[nhid].begin(); - next_hop_group_entry.nhopgroup_members[*it] = nhgm_id; + next_hop_group_entry.nhopgroup_members[*it].next_hop_id = nhgm_id; + next_hop_group_entry.nhopgroup_members[*it].seq_id = (uint32_t)i + 1; nhopgroup_shared_set[nhid].erase(it); if (nhopgroup_shared_set[nhid].empty()) { @@ -1307,7 +1343,8 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) } else { - next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second] = nhgm_id; + next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second].next_hop_id = nhgm_id; + next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second].seq_id = ((uint32_t)i) + 1; } } @@ -1352,12 +1389,12 @@ bool RouteOrch::removeNextHopGroup(const NextHopGroupKey &nexthops) if (m_neighOrch->isNextHopFlagSet(nhop->first, NHFLAGS_IFDOWN)) { SWSS_LOG_WARN("NHFLAGS_IFDOWN set for next hop group member %s with next_hop_id %" PRIx64, - nhop->first.to_string().c_str(), nhop->second); + nhop->first.to_string().c_str(), nhop->second.next_hop_id); nhop = nhgm.erase(nhop); continue; } - next_hop_ids.push_back(nhop->second); + next_hop_ids.push_back(nhop->second.next_hop_id); nhop = nhgm.erase(nhop); } @@ -1811,8 +1848,12 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) * in m_syncdRoutes, then we need to update the route with a new next hop * (group) id. The old next hop (group) is then not used and the reference * count will decrease by 1. + * + * In case the entry is already pending removal in the bulk, it would be removed + * from m_syncdRoutes during the bulk call. Therefore, such entries need to be + * re-created rather than set attribute. */ - if (it_route == m_syncdRoutes.at(vrf_id).end()) + if (it_route == m_syncdRoutes.at(vrf_id).end() || gRouteBulker.bulk_entry_pending_removal(route_entry)) { if (blackhole) { @@ -1855,6 +1896,25 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) } else { + if (!blackhole && vrf_id == gVirtualRouterId && ipPrefix.isDefaultRoute()) + { + // Always set packet action for default route to avoid conflict settings + // in case a SET follows a DEL on the default route in the same bulk. + // - On DEL default route, the packet action will be set to DROP + // - On SET default route, as the default route has NOT been removed from m_syncdRoute + // it calls SAI set_route_attributes instead of crate_route + // However, packet action is called only when a route entry is created + // This leads to conflict settings: + // - packet action: DROP + // - next hop: a valid next hop id + // To avoid this, we always set packet action for default route. + route_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + route_attr.value.s32 = SAI_PACKET_ACTION_FORWARD; + + object_statuses.emplace_back(); + gRouteBulker.set_entry_attribute(&object_statuses.back(), &route_entry, &route_attr); + } + route_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; route_attr.value.oid = next_hop_id; @@ -2147,6 +2207,11 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey } } + if (ipPrefix.isDefaultRoute()) + { + updateDefRouteState(ipPrefix.to_string(), true); + } + m_syncdRoutes[vrf_id][ipPrefix] = RouteNhg(nextHops, ctx.nhg_index); notifyNextHopChangeObservers(vrf_id, ipPrefix, nextHops, true); @@ -2262,6 +2327,8 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) } } + updateDefRouteState(ipPrefix.to_string()); + SWSS_LOG_INFO("Set route %s next hop ID to NULL", ipPrefix.to_string().c_str()); } else diff --git a/orchagent/routeorch.h b/orchagent/routeorch.h index 74cd4c4442e..2c8826ecf7a 100644 --- a/orchagent/routeorch.h +++ b/orchagent/routeorch.h @@ -24,7 +24,13 @@ #define LOOPBACK_PREFIX "Loopback" -typedef std::map NextHopGroupMembers; +struct NextHopGroupMemberEntry +{ + sai_object_id_t next_hop_id; // next hop sai oid + uint32_t seq_id; // Sequence Id of nexthop in the group +}; + +typedef std::map NextHopGroupMembers; struct NhgBase; @@ -225,6 +231,9 @@ class RouteOrch : public Orch, public Subject unsigned int m_maxNextHopGroupCount; bool m_resync; + shared_ptr m_stateDb; + unique_ptr m_stateDefaultRouteTb; + RouteTables m_syncdRoutes; LabelRouteTables m_syncdLabelRoutes; NextHopGroupTable m_syncdNextHopGroups; @@ -251,6 +260,8 @@ class RouteOrch : public Orch, public Subject bool addLabelRoutePost(const LabelRouteBulkContext& ctx, const NextHopGroupKey &nextHops); bool removeLabelRoutePost(const LabelRouteBulkContext& ctx); + void updateDefRouteState(string ip, bool add=false); + void doTask(Consumer& consumer); void doLabelTask(Consumer& consumer); diff --git a/orchagent/saihelper.cpp b/orchagent/saihelper.cpp index 8db9676f394..3b409f7217f 100644 --- a/orchagent/saihelper.cpp +++ b/orchagent/saihelper.cpp @@ -57,6 +57,7 @@ sai_qos_map_api_t* sai_qos_map_api; sai_buffer_api_t* sai_buffer_api; sai_acl_api_t* sai_acl_api; sai_hash_api_t* sai_hash_api; +sai_udf_api_t* sai_udf_api; sai_mirror_api_t* sai_mirror_api; sai_fdb_api_t* sai_fdb_api; sai_dtel_api_t* sai_dtel_api; @@ -186,6 +187,7 @@ void initSaiApi() sai_api_query(SAI_API_SCHEDULER_GROUP, (void **)&sai_scheduler_group_api); sai_api_query(SAI_API_ACL, (void **)&sai_acl_api); sai_api_query(SAI_API_HASH, (void **)&sai_hash_api); + sai_api_query(SAI_API_UDF, (void **)&sai_udf_api); sai_api_query(SAI_API_DTEL, (void **)&sai_dtel_api); sai_api_query(SAI_API_SAMPLEPACKET, (void **)&sai_samplepacket_api); sai_api_query(SAI_API_DEBUG_COUNTER, (void **)&sai_debug_counter_api); @@ -223,6 +225,7 @@ void initSaiApi() sai_log_set(SAI_API_SCHEDULER_GROUP, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_ACL, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_HASH, SAI_LOG_LEVEL_NOTICE); + sai_log_set(SAI_API_UDF, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_DTEL, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_SAMPLEPACKET, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_DEBUG_COUNTER, SAI_LOG_LEVEL_NOTICE); diff --git a/orchagent/switchorch.cpp b/orchagent/switchorch.cpp index daeace8b08a..48ecd1fd350 100644 --- a/orchagent/switchorch.cpp +++ b/orchagent/switchorch.cpp @@ -1,18 +1,23 @@ #include +#include #include #include "switchorch.h" +#include "crmorch.h" #include "converter.h" #include "notifier.h" #include "notificationproducer.h" #include "macaddress.h" +#include "return_code.h" using namespace std; using namespace swss; extern sai_object_id_t gSwitchId; extern sai_switch_api_t *sai_switch_api; +extern sai_acl_api_t *sai_acl_api; extern MacAddress gVxlanMacAddress; +extern CrmOrch *gCrmOrch; const map switch_attribute_map = { @@ -22,6 +27,7 @@ const map switch_attribute_map = {"ecmp_hash_seed", SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_SEED}, {"lag_hash_seed", SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_SEED}, {"fdb_aging_time", SAI_SWITCH_ATTR_FDB_AGING_TIME}, + {"debug_shell_enable", SAI_SWITCH_ATTR_SWITCH_SHELL_ENABLE}, {"vxlan_port", SAI_SWITCH_ATTR_VXLAN_DEFAULT_PORT}, {"vxlan_router_mac", SAI_SWITCH_ATTR_VXLAN_DEFAULT_ROUTER_MAC} }; @@ -39,6 +45,9 @@ const map packet_action_map = {"trap", SAI_PACKET_ACTION_TRAP} }; + +const std::set switch_non_sai_attribute_set = {"ordered_ecmp"}; + SwitchOrch::SwitchOrch(DBConnector *db, vector& connectors, TableConnector switchTable): Orch(connectors), m_switchTable(switchTable.first, switchTable.second), @@ -57,6 +66,92 @@ SwitchOrch::SwitchOrch(DBConnector *db, vector& connectors, Tabl Orch::addExecutor(executorT); } +void SwitchOrch::initAclGroupsBindToSwitch() +{ + // Create an ACL group per stage, INGRESS, EGRESS and PRE_INGRESS + for (auto stage_it : aclStageLookup) + { + sai_object_id_t group_oid; + auto status = createAclGroup(fvValue(stage_it), &group_oid); + if (!status.ok()) + { + status.prepend("Failed to create ACL group for stage " + fvField(stage_it) + ": "); + SWSS_LOG_THROW("%s", status.message().c_str()); + } + SWSS_LOG_NOTICE("Created ACL group for stage %s", fvField(stage_it).c_str()); + m_aclGroups[fvValue(stage_it)] = group_oid; + status = bindAclGroupToSwitch(fvValue(stage_it), group_oid); + if (!status.ok()) + { + status.prepend("Failed to bind ACL group to stage " + fvField(stage_it) + ": "); + SWSS_LOG_THROW("%s", status.message().c_str()); + } + } +} + +const std::map &SwitchOrch::getAclGroupOidsBindingToSwitch() +{ + return m_aclGroups; +} + +ReturnCode SwitchOrch::createAclGroup(const sai_acl_stage_t &group_stage, sai_object_id_t *acl_grp_oid) +{ + SWSS_LOG_ENTER(); + + std::vector acl_grp_attrs; + sai_attribute_t acl_grp_attr; + acl_grp_attr.id = SAI_ACL_TABLE_GROUP_ATTR_ACL_STAGE; + acl_grp_attr.value.s32 = group_stage; + acl_grp_attrs.push_back(acl_grp_attr); + + acl_grp_attr.id = SAI_ACL_TABLE_GROUP_ATTR_TYPE; + acl_grp_attr.value.s32 = SAI_ACL_TABLE_GROUP_TYPE_PARALLEL; + acl_grp_attrs.push_back(acl_grp_attr); + + acl_grp_attr.id = SAI_ACL_TABLE_ATTR_ACL_BIND_POINT_TYPE_LIST; + std::vector bpoint_list; + bpoint_list.push_back(SAI_ACL_BIND_POINT_TYPE_SWITCH); + acl_grp_attr.value.s32list.count = (uint32_t)bpoint_list.size(); + acl_grp_attr.value.s32list.list = bpoint_list.data(); + acl_grp_attrs.push_back(acl_grp_attr); + + CHECK_ERROR_AND_LOG_AND_RETURN(sai_acl_api->create_acl_table_group( + acl_grp_oid, gSwitchId, (uint32_t)acl_grp_attrs.size(), acl_grp_attrs.data()), + "Failed to create ACL group for stage " << group_stage); + if (group_stage == SAI_ACL_STAGE_INGRESS || group_stage == SAI_ACL_STAGE_PRE_INGRESS || + group_stage == SAI_ACL_STAGE_EGRESS) + { + gCrmOrch->incCrmAclUsedCounter(CrmResourceType::CRM_ACL_GROUP, (sai_acl_stage_t)group_stage, + SAI_ACL_BIND_POINT_TYPE_SWITCH); + } + SWSS_LOG_INFO("Suceeded to create ACL group %s in stage %d ", sai_serialize_object_id(*acl_grp_oid).c_str(), + group_stage); + return ReturnCode(); +} + +ReturnCode SwitchOrch::bindAclGroupToSwitch(const sai_acl_stage_t &group_stage, const sai_object_id_t &acl_grp_oid) +{ + SWSS_LOG_ENTER(); + + auto switch_attr_it = aclStageToSwitchAttrLookup.find(group_stage); + if (switch_attr_it == aclStageToSwitchAttrLookup.end()) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Failed to set ACL group(" << acl_grp_oid << ") to the SWITCH bind point at stage " + << group_stage); + } + sai_attribute_t attr; + attr.id = switch_attr_it->second; + attr.value.oid = acl_grp_oid; + auto sai_status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (sai_status != SAI_STATUS_SUCCESS) + { + LOG_ERROR_AND_RETURN(ReturnCode(sai_status) << "[SAI] Failed to set_switch_attribute with attribute.id=" + << attr.id << " and acl group oid=" << acl_grp_oid); + } + return ReturnCode(); +} + void SwitchOrch::doCfgSensorsTableTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -133,7 +228,51 @@ void SwitchOrch::doCfgSensorsTableTask(Consumer &consumer) } } +void SwitchOrch::setSwitchNonSaiAttributes(swss::FieldValueTuple &val) +{ + auto attribute = fvField(val); + auto value = fvValue(val); + if (attribute == "ordered_ecmp") + { + vector fvVector; + if (value == "true") + { + const auto* meta = sai_metadata_get_attr_metadata(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, SAI_NEXT_HOP_GROUP_ATTR_TYPE); + if (meta && meta->isenum) + { + vector values_list(meta->enummetadata->valuescount); + sai_s32_list_t values; + values.count = static_cast(values_list.size()); + values.list = values_list.data(); + + auto status = sai_query_attribute_enum_values_capability(gSwitchId, + SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + SAI_NEXT_HOP_GROUP_ATTR_TYPE, + &values); + if (status == SAI_STATUS_SUCCESS) + { + for (size_t i = 0; i < values.count; i++) + { + if (values.list[i] == SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP) + { + m_orderedEcmpEnable = true; + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_ORDERED_ECMP_CAPABLE, "true"); + set_switch_capability(fvVector); + SWSS_LOG_NOTICE("Ordered ECMP/Nexthop-Group is configured"); + return; + } + } + } + } + } + m_orderedEcmpEnable = false; + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_ORDERED_ECMP_CAPABLE, "false"); + set_switch_capability(fvVector); + SWSS_LOG_NOTICE("Ordered ECMP/Nexthop-Group is not configured"); + return; + } +} sai_status_t SwitchOrch::setSwitchTunnelVxlanParams(swss::FieldValueTuple &val) { auto attribute = fvField(val); @@ -205,7 +344,12 @@ void SwitchOrch::doAppSwitchTableTask(Consumer &consumer) { auto attribute = fvField(i); - if (switch_attribute_map.find(attribute) == switch_attribute_map.end()) + if (switch_non_sai_attribute_set.find(attribute) != switch_non_sai_attribute_set.end()) + { + setSwitchNonSaiAttributes(i); + continue; + } + else if (switch_attribute_map.find(attribute) == switch_attribute_map.end()) { // Check additionally 'switch_tunnel_attribute_map' for Switch Tunnel if (switch_tunnel_attribute_map.find(attribute) == switch_tunnel_attribute_map.end()) @@ -254,6 +398,10 @@ void SwitchOrch::doAppSwitchTableTask(Consumer &consumer) attr.value.u32 = to_uint(value); break; + case SAI_SWITCH_ATTR_SWITCH_SHELL_ENABLE: + attr.value.booldata = to_uint(value); + break; + case SAI_SWITCH_ATTR_VXLAN_DEFAULT_PORT: attr.value.u16 = to_uint(value); break; diff --git a/orchagent/switchorch.h b/orchagent/switchorch.h index 46d165bd48a..5b09a676408 100644 --- a/orchagent/switchorch.h +++ b/orchagent/switchorch.h @@ -1,5 +1,6 @@ #pragma once +#include "acltable.h" #include "orch.h" #include "timer.h" @@ -9,6 +10,7 @@ #define SWITCH_CAPABILITY_TABLE_PORT_TPID_CAPABLE "PORT_TPID_CAPABLE" #define SWITCH_CAPABILITY_TABLE_LAG_TPID_CAPABLE "LAG_TPID_CAPABLE" +#define SWITCH_CAPABILITY_TABLE_ORDERED_ECMP_CAPABLE "ORDERED_ECMP_CAPABLE" struct WarmRestartCheck { @@ -29,6 +31,15 @@ class SwitchOrch : public Orch bool setAgingFDB(uint32_t sec); void set_switch_capability(const std::vector& values); bool querySwitchDscpToTcCapability(sai_object_type_t sai_object, sai_attr_id_t attr_id); + + // Return reference to ACL group created for each stage and the bind point is + // the switch + const std::map &getAclGroupOidsBindingToSwitch(); + // Initialize the ACL groups bind to Switch + void initAclGroupsBindToSwitch(); + + bool checkOrderedEcmpEnable() { return m_orderedEcmpEnable; } + private: void doTask(Consumer &consumer); void doTask(swss::SelectableTimer &timer); @@ -37,11 +48,23 @@ class SwitchOrch : public Orch void initSensorsTable(); void querySwitchTpidCapability(); sai_status_t setSwitchTunnelVxlanParams(swss::FieldValueTuple &val); + void setSwitchNonSaiAttributes(swss::FieldValueTuple &val); + + + // Create the default ACL group for the given stage, bind point is + // SAI_ACL_BIND_POINT_TYPE_SWITCH and group type is + // SAI_ACL_TABLE_GROUP_TYPE_PARALLEL. + ReturnCode createAclGroup(const sai_acl_stage_t &group_stage, sai_object_id_t *acl_grp_oid); + + // Bind the ACL group to switch for the given stage. + // Set the SAI_SWITCH_ATTR_{STAGE}_ACL with the group oid. + ReturnCode bindAclGroupToSwitch(const sai_acl_stage_t &group_stage, const sai_object_id_t &acl_grp_oid); swss::NotificationConsumer* m_restartCheckNotificationConsumer; void doTask(swss::NotificationConsumer& consumer); swss::DBConnector *m_db; swss::Table m_switchTable; + std::map m_aclGroups; sai_object_id_t m_switchTunnelId; // ASIC temperature sensors @@ -56,6 +79,7 @@ class SwitchOrch : public Orch bool m_sensorsMaxTempSupported = true; bool m_sensorsAvgTempSupported = true; bool m_vxlanSportUserModeEnabled = false; + bool m_orderedEcmpEnable = false; // Information contained in the request from // external program for orchagent pre-shutdown state check diff --git a/orchagent/vnetorch.cpp b/orchagent/vnetorch.cpp index dc5838d8a51..9640e0ee3ab 100644 --- a/orchagent/vnetorch.cpp +++ b/orchagent/vnetorch.cpp @@ -40,7 +40,7 @@ extern CrmOrch *gCrmOrch; extern RouteOrch *gRouteOrch; extern MacAddress gVxlanMacAddress; extern BfdOrch *gBfdOrch; - +extern SwitchOrch *gSwitchOrch; /* * VRF Modeling and VNetVrf class definitions */ @@ -396,7 +396,7 @@ bool VNetOrch::addOperation(const Request& request) sai_attribute_t attr; vector attrs; set peer_list = {}; - bool peer = false, create = false; + bool peer = false, create = false, advertise_prefix = false; uint32_t vni=0; string tunnel; string scope; @@ -427,6 +427,10 @@ bool VNetOrch::addOperation(const Request& request) { scope = request.getAttrString("scope"); } + else if (name == "advertise_prefix") + { + advertise_prefix = request.getAttrBool("advertise_prefix"); + } else { SWSS_LOG_INFO("Unknown attribute: %s", name.c_str()); @@ -453,7 +457,7 @@ bool VNetOrch::addOperation(const Request& request) if (it == std::end(vnet_table_)) { - VNetInfo vnet_info = { tunnel, vni, peer_list, scope }; + VNetInfo vnet_info = { tunnel, vni, peer_list, scope, advertise_prefix }; obj = createObject(vnet_name, vnet_info, attrs); create = true; @@ -645,6 +649,7 @@ VNetRouteOrch::VNetRouteOrch(DBConnector *db, vector &tableNames, VNetOr state_db_ = shared_ptr(new DBConnector("STATE_DB", 0)); state_vnet_rt_tunnel_table_ = unique_ptr
(new Table(state_db_.get(), STATE_VNET_RT_TUNNEL_TABLE_NAME)); + state_vnet_rt_adv_table_ = unique_ptr
(new Table(state_db_.get(), STATE_ADVERTISE_NETWORK_TABLE_NAME)); gBfdOrch->attach(this); } @@ -675,9 +680,12 @@ bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &n vector next_hop_ids; set next_hop_set = nexthops.getNextHops(); std::map nhopgroup_members_set; + std::map nh_seq_id_in_nhgrp; + uint32_t seq_id = 0; for (auto it : next_hop_set) { + nh_seq_id_in_nhgrp[it] = ++seq_id; if (nexthop_info_[vnet].find(it.ip_address) != nexthop_info_[vnet].end() && nexthop_info_[vnet][it.ip_address].bfd_state != SAI_BFD_SESSION_STATE_UP) { continue; @@ -691,7 +699,7 @@ bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &n vector nhg_attrs; nhg_attr.id = SAI_NEXT_HOP_GROUP_ATTR_TYPE; - nhg_attr.value.s32 = SAI_NEXT_HOP_GROUP_TYPE_ECMP; + nhg_attr.value.s32 = gSwitchOrch->checkOrderedEcmpEnable() ? SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP : SAI_NEXT_HOP_GROUP_TYPE_ECMP; nhg_attrs.push_back(nhg_attr); sai_object_id_t next_hop_group_id; @@ -728,6 +736,13 @@ bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &n nhgm_attr.value.oid = nhid; nhgm_attrs.push_back(nhgm_attr); + if (gSwitchOrch->checkOrderedEcmpEnable()) + { + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID; + nhgm_attr.value.u32 = nh_seq_id_in_nhgrp[nhopgroup_members_set.find(nhid)->second]; + nhgm_attrs.push_back(nhgm_attr); + } + sai_object_id_t next_hop_group_member_id; status = sai_next_hop_group_api->create_next_hop_group_member(&next_hop_group_member_id, gSwitchId, @@ -860,7 +875,10 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP NextHopGroupInfo next_hop_group_entry; next_hop_group_entry.next_hop_group_id = vrf_obj->getTunnelNextHop(nexthop); next_hop_group_entry.ref_count = 0; - next_hop_group_entry.active_members[nexthop] = SAI_NULL_OBJECT_ID; + if (nexthop_info_[vnet].find(nexthop.ip_address) == nexthop_info_[vnet].end() || nexthop_info_[vnet][nexthop.ip_address].bfd_state == SAI_BFD_SESSION_STATE_UP) + { + next_hop_group_entry.active_members[nexthop] = SAI_NULL_OBJECT_ID; + } syncd_nexthop_groups_[vnet][nexthops] = next_hop_group_entry; } else @@ -1563,12 +1581,39 @@ void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextH fvVector.emplace_back("state", route_state); state_vnet_rt_tunnel_table_->set(state_db_key, fvVector); + + if (vnet_orch_->getAdvertisePrefix(vnet)) + { + if (route_state == "active") + { + addRouteAdvertisement(ipPrefix); + } + else + { + removeRouteAdvertisement(ipPrefix); + } + } } void VNetRouteOrch::removeRouteState(const string& vnet, IpPrefix& ipPrefix) { const string state_db_key = vnet + state_db_key_delimiter + ipPrefix.to_string(); state_vnet_rt_tunnel_table_->del(state_db_key); + removeRouteAdvertisement(ipPrefix); +} + +void VNetRouteOrch::addRouteAdvertisement(IpPrefix& ipPrefix) +{ + const string key = ipPrefix.to_string(); + vector fvs; + fvs.push_back(FieldValueTuple("", "")); + state_vnet_rt_adv_table_->set(key, fvs); +} + +void VNetRouteOrch::removeRouteAdvertisement(IpPrefix& ipPrefix) +{ + const string key = ipPrefix.to_string(); + state_vnet_rt_adv_table_->del(key); } void VNetRouteOrch::update(SubjectType type, void *cntx) @@ -1648,7 +1693,20 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) NextHopGroupKey nexthops = nhg_info_pair.first; NextHopGroupInfo& nhg_info = nhg_info_pair.second; - if (!(nexthops.contains(endpoint))) + std::set next_hop_set = nexthops.getNextHops(); + uint32_t seq_id = 0; + uint32_t nh_seq_id = 0; + for (auto nh: next_hop_set) + { + seq_id++; + if (nh == endpoint) + { + nh_seq_id = seq_id; + break; + } + } + + if (!nh_seq_id) { continue; } @@ -1670,6 +1728,13 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) nhgm_attr.value.oid = vrf_obj->getTunnelNextHop(endpoint); nhgm_attrs.push_back(nhgm_attr); + if (gSwitchOrch->checkOrderedEcmpEnable()) + { + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID; + nhgm_attr.value.u32 = nh_seq_id; + nhgm_attrs.push_back(nhgm_attr); + } + sai_status_t status = sai_next_hop_group_api->create_next_hop_group_member(&next_hop_group_member_id, gSwitchId, (uint32_t)nhgm_attrs.size(), diff --git a/orchagent/vnetorch.h b/orchagent/vnetorch.h index 7e493c5f30e..77c27853716 100644 --- a/orchagent/vnetorch.h +++ b/orchagent/vnetorch.h @@ -27,12 +27,13 @@ extern sai_object_id_t gVirtualRouterId; const request_description_t vnet_request_description = { { REQ_T_STRING }, { - { "src_mac", REQ_T_MAC_ADDRESS }, - { "vxlan_tunnel", REQ_T_STRING }, - { "vni", REQ_T_UINT }, - { "peer_list", REQ_T_SET }, - { "guid", REQ_T_STRING }, - { "scope", REQ_T_STRING }, + { "src_mac", REQ_T_MAC_ADDRESS }, + { "vxlan_tunnel", REQ_T_STRING }, + { "vni", REQ_T_UINT }, + { "peer_list", REQ_T_SET }, + { "guid", REQ_T_STRING }, + { "scope", REQ_T_STRING }, + { "advertise_prefix", REQ_T_BOOL}, }, { "vxlan_tunnel", "vni" } // mandatory attributes }; @@ -57,6 +58,7 @@ struct VNetInfo uint32_t vni; set peers; string scope; + bool advertise_prefix; }; typedef map vrid_list_t; @@ -83,7 +85,8 @@ class VNetObject tunnel_(vnetInfo.tunnel), peer_list_(vnetInfo.peers), vni_(vnetInfo.vni), - scope_(vnetInfo.scope) + scope_(vnetInfo.scope), + advertise_prefix_(vnetInfo.advertise_prefix) { } virtual bool updateObj(vector&) = 0; @@ -113,6 +116,11 @@ class VNetObject return scope_; } + bool getAdvertisePrefix() const + { + return advertise_prefix_; + } + virtual ~VNetObject() noexcept(false) {}; private: @@ -120,6 +128,7 @@ class VNetObject string tunnel_; uint32_t vni_; string scope_; + bool advertise_prefix_; }; struct nextHop @@ -223,6 +232,11 @@ class VNetOrch : public Orch2 return vnet_table_.at(name)->getTunnelName(); } + bool getAdvertisePrefix(const std::string& name) const + { + return vnet_table_.at(name)->getAdvertisePrefix(); + } + bool isVnetExecVrf() const { return (vnet_exec_ == VNET_EXEC::VNET_EXEC_VRF); @@ -338,6 +352,8 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer void delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops); void postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops); void removeRouteState(const string& vnet, IpPrefix& ipPrefix); + void addRouteAdvertisement(IpPrefix& ipPrefix); + void removeRouteAdvertisement(IpPrefix& ipPrefix); void updateVnetTunnel(const BfdUpdate&); bool updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op); @@ -362,6 +378,7 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer ProducerStateTable bfd_session_producer_; shared_ptr state_db_; unique_ptr
state_vnet_rt_tunnel_table_; + unique_ptr
state_vnet_rt_adv_table_; }; class VNetCfgRouteOrch : public Orch diff --git a/portsyncd/portsyncd.cpp b/portsyncd/portsyncd.cpp index c55c1685af1..37e0c4232f1 100644 --- a/portsyncd/portsyncd.cpp +++ b/portsyncd/portsyncd.cpp @@ -228,11 +228,6 @@ void handlePortConfigFromConfigDB(ProducerStateTable &p, DBConnector &cfgDb, boo void handlePortConfig(ProducerStateTable &p, map &port_cfg_map) { - string autoneg; - vector attrs; - vector autoneg_attrs; - vector force_attrs; - auto it = port_cfg_map.begin(); while (it != port_cfg_map.end()) { @@ -247,54 +242,7 @@ void handlePortConfig(ProducerStateTable &p, map /* No support for port delete yet */ if (op == SET_COMMAND) { - - for (auto i : values) - { - auto field = fvField(i); - if (field == "adv_speeds") - { - autoneg_attrs.push_back(i); - } - else if (field == "adv_interface_types") - { - autoneg_attrs.push_back(i); - } - else if (field == "speed") - { - force_attrs.push_back(i); - } - else if (field == "interface_type") - { - force_attrs.push_back(i); - } - else if (field == "autoneg") - { - autoneg = fvValue(i); - attrs.push_back(i); - } - else - { - attrs.push_back(i); - } - } - if (autoneg == "on") // autoneg is on, only put adv_speeds and adv_interface_types to APPL_DB - { - attrs.insert(attrs.end(), autoneg_attrs.begin(), autoneg_attrs.end()); - } - else if (autoneg == "off") // autoneg is off, only put speed and interface_type to APPL_DB - { - attrs.insert(attrs.end(), force_attrs.begin(), force_attrs.end()); - } - else // autoneg is not configured, put all attributes to APPL_DB - { - attrs.insert(attrs.end(), autoneg_attrs.begin(), autoneg_attrs.end()); - attrs.insert(attrs.end(), force_attrs.begin(), force_attrs.end()); - } - p.set(key, attrs); - attrs.clear(); - autoneg_attrs.clear(); - force_attrs.clear(); - autoneg.clear(); + p.set(key, values); } it = port_cfg_map.erase(it); diff --git a/tests/conftest.py b/tests/conftest.py index 31420005e0b..8339d9487f7 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -34,10 +34,8 @@ # a dynamic number of ports. GitHub Issue: Azure/sonic-swss#1384. NUM_PORTS = 32 -# FIXME: Voq asics will have 16 fabric ports created (defined in Azure/sonic-buildimage#6185). -# Right now, we set FABRIC_NUM_PORTS to 0, and change to 16 when PR#6185 merges. PR#6185 can't -# be merged before this PR. Otherwise it will cause swss voq test failures. -FABRIC_NUM_PORTS = 0 +# Voq asics will have 16 fabric ports created (defined in Azure/sonic-buildimage#7629). +FABRIC_NUM_PORTS = 16 def ensure_system(cmd): rc, output = subprocess.getstatusoutput(cmd) @@ -527,22 +525,12 @@ def _polling_function(): # Verify that all ports have been created asic_db = self.get_asic_db() - - # Verify that we have "at least" NUM_PORTS + FABRIC_NUM_PORTS, rather exact number. - # Right now, FABRIC_NUM_PORTS = 0. So it essentially waits for at least NUM_PORTS. - # This will allow us to merge Azure/sonic-buildimage#6185 that creates 16 fabric ports. - # When PR#6185 merges, FABRIC_NUM_PORTS should be 16, and so this verification (at least - # NUM_PORTS) still holds. - # Will update FABRIC_NUM_PORTS to 16, and revert back to wait exact NUM_PORTS + FABRIC_NUM_PORTS - # when PR#6185 merges. - wait_at_least_n_keys = True - - asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_ports + 1, wait_at_least_n_keys) # +1 CPU Port + asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_ports + 1) # +1 CPU Port # Verify that fabric ports are monitored in STATE_DB if metadata.get('switch_type', 'npu') in ['voq', 'fabric']: self.get_state_db() - self.state_db.wait_for_n_keys("FABRIC_PORT_TABLE", FABRIC_NUM_PORTS, wait_at_least_n_keys) + self.state_db.wait_for_n_keys("FABRIC_PORT_TABLE", FABRIC_NUM_PORTS) def net_cleanup(self) -> None: """Clean up network, remove extra links.""" diff --git a/tests/gcov_support.sh b/tests/gcov_support.sh index 4200e20813b..d96ee1c2505 100755 --- a/tests/gcov_support.sh +++ b/tests/gcov_support.sh @@ -146,7 +146,8 @@ lcov_merge_all() cp $1/lcov_cobertura.py $1/common_work/gcov/ python $1/common_work/gcov/lcov_cobertura.py total.info -o coverage.xml - sed -i "s#common_work/#$1/common_work/#" coverage.xml + sed -i "s#common_work/gcov/##" coverage.xml + sed -i "s#common_work.gcov.##" coverage.xml cd gcov_output/ if [ ! -d ${ALLMERGE_DIR} ]; then diff --git a/tests/mock_tests/Makefile.am b/tests/mock_tests/Makefile.am index 51df17f7299..2489bef6d28 100644 --- a/tests/mock_tests/Makefile.am +++ b/tests/mock_tests/Makefile.am @@ -1,5 +1,6 @@ FLEX_CTR_DIR = $(top_srcdir)/orchagent/flex_counter DEBUG_CTR_DIR = $(top_srcdir)/orchagent/debug_counter +P4_ORCH_DIR = $(top_srcdir)/orchagent/p4orch INCLUDES = -I $(FLEX_CTR_DIR) -I $(DEBUG_CTR_DIR) -I $(top_srcdir)/lib @@ -22,6 +23,8 @@ LDADD_GTEST = -L/usr/src/gtest tests_SOURCES = aclorch_ut.cpp \ portsorch_ut.cpp \ + routeorch_ut.cpp \ + qosorch_ut.cpp \ saispy_ut.cpp \ consumer_ut.cpp \ ut_saihelper.cpp \ @@ -85,10 +88,23 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/macsecorch.cpp \ $(top_srcdir)/orchagent/lagid.cpp \ $(top_srcdir)/orchagent/bfdorch.cpp \ - $(top_srcdir)/orchagent/srv6orch.cpp + $(top_srcdir)/orchagent/srv6orch.cpp \ + $(top_srcdir)/orchagent/nvgreorch.cpp tests_SOURCES += $(FLEX_CTR_DIR)/flex_counter_manager.cpp $(FLEX_CTR_DIR)/flex_counter_stat_manager.cpp $(FLEX_CTR_DIR)/flow_counter_handler.cpp tests_SOURCES += $(DEBUG_CTR_DIR)/debug_counter.cpp $(DEBUG_CTR_DIR)/drop_counter.cpp +tests_SOURCES += $(P4_ORCH_DIR)/p4orch.cpp \ + $(P4_ORCH_DIR)/p4orch_util.cpp \ + $(P4_ORCH_DIR)/p4oidmapper.cpp \ + $(P4_ORCH_DIR)/router_interface_manager.cpp \ + $(P4_ORCH_DIR)/neighbor_manager.cpp \ + $(P4_ORCH_DIR)/next_hop_manager.cpp \ + $(P4_ORCH_DIR)/route_manager.cpp \ + $(P4_ORCH_DIR)/acl_util.cpp \ + $(P4_ORCH_DIR)/acl_table_manager.cpp \ + $(P4_ORCH_DIR)/acl_rule_manager.cpp \ + $(P4_ORCH_DIR)/wcmp_manager.cpp \ + $(P4_ORCH_DIR)/mirror_session_manager.cpp tests_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) tests_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) -I$(top_srcdir)/orchagent diff --git a/tests/mock_tests/aclorch_ut.cpp b/tests/mock_tests/aclorch_ut.cpp index c0d7399570c..295fed20baa 100644 --- a/tests/mock_tests/aclorch_ut.cpp +++ b/tests/mock_tests/aclorch_ut.cpp @@ -342,6 +342,9 @@ namespace aclorch_test gVirtualRouterId = attr.value.oid; + ASSERT_EQ(gCrmOrch, nullptr); + gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); @@ -369,9 +372,6 @@ namespace aclorch_test ASSERT_EQ(gPortsOrch, nullptr); gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); - ASSERT_EQ(gCrmOrch, nullptr); - gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); - ASSERT_EQ(gVrfOrch, nullptr); gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); diff --git a/tests/mock_tests/bulker_ut.cpp b/tests/mock_tests/bulker_ut.cpp index a2cdaa07a30..6210cc0969d 100644 --- a/tests/mock_tests/bulker_ut.cpp +++ b/tests/mock_tests/bulker_ut.cpp @@ -106,4 +106,40 @@ namespace bulker_test ASSERT_EQ(ia->first.id, SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION); ASSERT_EQ(ia->first.value.s32, SAI_PACKET_ACTION_FORWARD); } + + TEST_F(BulkerTest, BulkerPendindRemoval) + { + // Create bulker + EntityBulker gRouteBulker(sai_route_api, 1000); + deque object_statuses; + + // Check max bulk size + ASSERT_EQ(gRouteBulker.max_bulk_size, 1000); + + // Create a dummy route entry + sai_route_entry_t route_entry_remove; + route_entry_remove.destination.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + route_entry_remove.destination.addr.ip4 = htonl(0x0a00000f); + route_entry_remove.destination.mask.ip4 = htonl(0xffffff00); + route_entry_remove.vr_id = 0x0; + route_entry_remove.switch_id = 0x0; + + // Put route entry into remove + object_statuses.emplace_back(); + gRouteBulker.remove_entry(&object_statuses.back(), &route_entry_remove); + + // Confirm route entry is pending removal + ASSERT_TRUE(gRouteBulker.bulk_entry_pending_removal(route_entry_remove)); + + // Create another dummy route entry that will not be removed + sai_route_entry_t route_entry_non_remove; + route_entry_non_remove.destination.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + route_entry_non_remove.destination.addr.ip4 = htonl(0x0a00010f); + route_entry_non_remove.destination.mask.ip4 = htonl(0xffffff00); + route_entry_non_remove.vr_id = 0x0; + route_entry_non_remove.switch_id = 0x0; + + // Confirm route entry is not pending removal + ASSERT_FALSE(gRouteBulker.bulk_entry_pending_removal(route_entry_non_remove)); + } } diff --git a/tests/mock_tests/mock_orchagent_main.h b/tests/mock_tests/mock_orchagent_main.h index 181ebac8897..3166f3d9624 100644 --- a/tests/mock_tests/mock_orchagent_main.h +++ b/tests/mock_tests/mock_orchagent_main.h @@ -9,13 +9,19 @@ #include "neighorch.h" #include "fdborch.h" #include "mirrororch.h" +#define private public #include "bufferorch.h" +#undef private +#include "qosorch.h" #include "vrforch.h" #include "vnetorch.h" #include "vxlanorch.h" #include "policerorch.h" #include "fgnhgorch.h" #include "flexcounterorch.h" +#include "tunneldecaporch.h" +#include "muxorch.h" +#include "nhgorch.h" #include "directory.h" extern int gBatchSize; @@ -43,7 +49,10 @@ extern NeighOrch *gNeighOrch; extern FdbOrch *gFdbOrch; extern MirrorOrch *gMirrorOrch; extern BufferOrch *gBufferOrch; +extern QosOrch *gQosOrch; extern VRFOrch *gVrfOrch; +extern NhgOrch *gNhgOrch; +extern Srv6Orch *gSrv6Orch; extern Directory gDirectory; extern sai_acl_api_t *sai_acl_api; @@ -60,4 +69,10 @@ extern sai_tunnel_api_t *sai_tunnel_api; extern sai_next_hop_api_t *sai_next_hop_api; extern sai_hostif_api_t *sai_hostif_api; extern sai_buffer_api_t *sai_buffer_api; +extern sai_qos_map_api_t *sai_qos_map_api; +extern sai_scheduler_api_t *sai_scheduler_api; +extern sai_scheduler_group_api_t *sai_scheduler_group_api; +extern sai_wred_api_t *sai_wred_api; extern sai_queue_api_t *sai_queue_api; +extern sai_udf_api_t* sai_udf_api; +extern sai_mpls_api_t* sai_mpls_api; diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 853fdbfb698..28df6610fdf 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -7,7 +7,9 @@ #include "mock_orchagent_main.h" #include "mock_table.h" #include "notifier.h" +#define private public #include "pfcactionhandler.h" +#undef private #include @@ -18,6 +20,105 @@ namespace portsorch_test using namespace std; + sai_queue_api_t ut_sai_queue_api; + sai_queue_api_t *pold_sai_queue_api; + sai_buffer_api_t ut_sai_buffer_api; + sai_buffer_api_t *pold_sai_buffer_api; + + string _ut_stub_queue_key; + sai_status_t _ut_stub_sai_get_queue_attribute( + _In_ sai_object_id_t queue_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) + { + if (attr_count == 1 && attr_list[0].id == SAI_QUEUE_ATTR_BUFFER_PROFILE_ID) + { + auto &typemapQueue = (*gBufferOrch->m_buffer_type_maps[APP_BUFFER_QUEUE_TABLE_NAME]); + auto &profileName = typemapQueue["Ethernet0:3-4"].m_objsReferencingByMe["profile"]; + auto profileNameVec = tokenize(profileName, ':'); + auto &typemapProfile = (*gBufferOrch->m_buffer_type_maps[APP_BUFFER_PROFILE_TABLE_NAME]); + attr_list[0].value.oid = typemapProfile[profileNameVec[1]].m_saiObjectId; + return SAI_STATUS_SUCCESS; + } + else + { + return pold_sai_queue_api->get_queue_attribute(queue_id, attr_count, attr_list); + } + } + + sai_status_t _ut_stub_sai_get_ingress_priority_group_attribute( + _In_ sai_object_id_t ingress_priority_group_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) + { + if (attr_count == 1 && attr_list[0].id == SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE) + { + auto &typemapPg = (*gBufferOrch->m_buffer_type_maps[APP_BUFFER_PG_TABLE_NAME]); + auto &profileName = typemapPg["Ethernet0:3-4"].m_objsReferencingByMe["profile"]; + auto profileNameVec = tokenize(profileName, ':'); + auto &typemapProfile = (*gBufferOrch->m_buffer_type_maps[APP_BUFFER_PROFILE_TABLE_NAME]); + attr_list[0].value.oid = typemapProfile[profileNameVec[1]].m_saiObjectId; + return SAI_STATUS_SUCCESS; + } + else + { + return pold_sai_buffer_api->get_ingress_priority_group_attribute(ingress_priority_group_id, attr_count, attr_list); + } + } + + int _sai_create_buffer_pool_count = 0; + sai_status_t _ut_stub_sai_create_buffer_pool( + _Out_ sai_object_id_t *buffer_pool_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + auto status = pold_sai_buffer_api->create_buffer_pool(buffer_pool_id, switch_id, attr_count, attr_list); + if (SAI_STATUS_SUCCESS == status) + _sai_create_buffer_pool_count++; + return status; + } + + int _sai_remove_buffer_pool_count = 0; + sai_status_t _ut_stub_sai_remove_buffer_pool( + _In_ sai_object_id_t buffer_pool_id) + { + auto status = pold_sai_buffer_api->remove_buffer_pool(buffer_pool_id); + if (SAI_STATUS_SUCCESS == status) + _sai_remove_buffer_pool_count++; + return status; + } + + void _hook_sai_buffer_and_queue_api() + { + ut_sai_buffer_api = *sai_buffer_api; + pold_sai_buffer_api = sai_buffer_api; + ut_sai_buffer_api.create_buffer_pool = _ut_stub_sai_create_buffer_pool; + ut_sai_buffer_api.remove_buffer_pool = _ut_stub_sai_remove_buffer_pool; + ut_sai_buffer_api.get_ingress_priority_group_attribute = _ut_stub_sai_get_ingress_priority_group_attribute; + sai_buffer_api = &ut_sai_buffer_api; + + ut_sai_queue_api = *sai_queue_api; + pold_sai_queue_api = sai_queue_api; + ut_sai_queue_api.get_queue_attribute = _ut_stub_sai_get_queue_attribute; + sai_queue_api = &ut_sai_queue_api; + } + + void _unhook_sai_buffer_and_queue_api() + { + sai_buffer_api = pold_sai_buffer_api; + sai_queue_api = pold_sai_queue_api; + } + + void clear_pfcwd_zero_buffer_handler() + { + auto &zeroProfile = PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance(); + zeroProfile.m_zeroIngressBufferPool = SAI_NULL_OBJECT_ID; + zeroProfile.m_zeroEgressBufferPool = SAI_NULL_OBJECT_ID; + zeroProfile.m_zeroIngressBufferProfile = SAI_NULL_OBJECT_ID; + zeroProfile.m_zeroEgressBufferProfile = SAI_NULL_OBJECT_ID; + } + struct PortsOrchTest : public ::testing::Test { shared_ptr m_app_db; @@ -103,6 +204,12 @@ namespace portsorch_test { ::testing_db::reset(); + auto buffer_maps = BufferOrch::m_buffer_type_maps; + for (auto &i : buffer_maps) + { + i.second->clear(); + } + delete gNeighOrch; gNeighOrch = nullptr; delete gFdbOrch; @@ -355,10 +462,12 @@ namespace portsorch_test TEST_F(PortsOrchTest, PfcZeroBufferHandlerLocksPortPgAndQueue) { + _hook_sai_buffer_and_queue_api(); Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); Table poolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table queueTable = Table(m_app_db.get(), APP_BUFFER_QUEUE_TABLE_NAME); // Get SAI default ports to populate DB auto ports = ut_helper::getInitialSaiPorts(); @@ -397,39 +506,71 @@ namespace portsorch_test Port port; gPortsOrch->getPort("Ethernet0", port); - auto countersTable = make_shared
(m_counters_db.get(), COUNTERS_TABLE); - auto dropHandler = make_unique(port.m_port_id, port.m_queue_ids[3], 3, countersTable); - // Create test buffer pool poolTable.set( - "test_pool", + "ingress_pool", { { "type", "ingress" }, { "mode", "dynamic" }, { "size", "4200000" }, }); + poolTable.set( + "egress_pool", + { + { "type", "egress" }, + { "mode", "dynamic" }, + { "size", "4200000" }, + }); // Create test buffer profile - profileTable.set("test_profile", { { "pool", "test_pool" }, + profileTable.set("test_profile", { { "pool", "ingress_pool" }, { "xon", "14832" }, { "xoff", "14832" }, { "size", "35000" }, { "dynamic_th", "0" } }); + profileTable.set("ingress_profile", { { "pool", "ingress_pool" }, + { "xon", "14832" }, + { "xoff", "14832" }, + { "size", "35000" }, + { "dynamic_th", "0" } }); + profileTable.set("egress_profile", { { "pool", "egress_pool" }, + { "size", "0" }, + { "dynamic_th", "0" } }); // Apply profile on PGs 3-4 all ports for (const auto &it : ports) { std::ostringstream oss; oss << it.first << ":3-4"; - pgTable.set(oss.str(), { { "profile", "test_profile" } }); + pgTable.set(oss.str(), { { "profile", "ingress_profile" } }); + queueTable.set(oss.str(), { {"profile", "egress_profile" } }); } gBufferOrch->addExistingData(&pgTable); gBufferOrch->addExistingData(&poolTable); gBufferOrch->addExistingData(&profileTable); + gBufferOrch->addExistingData(&queueTable); // process pool, profile and PGs static_cast(gBufferOrch)->doTask(); + auto countersTable = make_shared
(m_counters_db.get(), COUNTERS_TABLE); + auto current_create_buffer_pool_count = _sai_create_buffer_pool_count; + auto dropHandler = make_unique(port.m_port_id, port.m_queue_ids[3], 3, countersTable); + + current_create_buffer_pool_count += 2; + ASSERT_TRUE(current_create_buffer_pool_count == _sai_create_buffer_pool_count); + ASSERT_TRUE(PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance().getPool(true) == gBufferOrch->m_ingressZeroBufferPool); + ASSERT_TRUE(PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance().getPool(false) == gBufferOrch->m_egressZeroBufferPool); + ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 1); + ASSERT_TRUE(gBufferOrch->m_egressZeroPoolRefCount == 1); + + std::deque entries; + entries.push_back({"Ethernet0:3-4", "SET", {{ "profile", "test_profile"}}}); + auto pgConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); + pgConsumer->addToSync(entries); + entries.clear(); + static_cast(gBufferOrch)->doTask(); + // Port should have been updated by BufferOrch->doTask gPortsOrch->getPort("Ethernet0", port); auto profile_id = (*BufferOrch::m_buffer_type_maps["BUFFER_PROFILE_TABLE"])[string("test_profile")].m_saiObjectId; @@ -437,11 +578,32 @@ namespace portsorch_test ASSERT_TRUE(port.m_priority_group_pending_profile[3] == profile_id); ASSERT_TRUE(port.m_priority_group_pending_profile[4] == SAI_NULL_OBJECT_ID); - auto pgConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); + pgConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); pgConsumer->dumpPendingTasks(ts); ASSERT_TRUE(ts.empty()); // PG is stored in m_priority_group_pending_profile ts.clear(); + // Create a zero buffer pool after PFC storm + entries.push_back({"ingress_zero_pool", "SET", {{ "type", "ingress" }, + { "mode", "static" }, + { "size", "0" }}}); + auto poolConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_POOL_TABLE_NAME)); + poolConsumer->addToSync(entries); + entries.clear(); + static_cast(gBufferOrch)->doTask(); + // Reference increased + ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 2); + // Didn't create buffer pool again + ASSERT_TRUE(_sai_create_buffer_pool_count == current_create_buffer_pool_count); + + entries.push_back({"ingress_zero_pool", "DEL", {}}); + poolConsumer->addToSync(entries); + entries.clear(); + auto current_remove_buffer_pool_count = _sai_remove_buffer_pool_count; + static_cast(gBufferOrch)->doTask(); + ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 1); + ASSERT_TRUE(_sai_remove_buffer_pool_count == current_remove_buffer_pool_count); + // release zero buffer drop handler dropHandler.reset(); @@ -459,6 +621,139 @@ namespace portsorch_test pgConsumer->dumpPendingTasks(ts); ASSERT_TRUE(ts.empty()); // PG should be processed now ts.clear(); + clear_pfcwd_zero_buffer_handler(); + _unhook_sai_buffer_and_queue_api(); + } + + TEST_F(PortsOrchTest, PfcZeroBufferHandlerLocksPortWithZeroPoolCreated) + { + _hook_sai_buffer_and_queue_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); + Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); + Table poolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table queueTable = Table(m_app_db.get(), APP_BUFFER_QUEUE_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone, PortInitDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + + static_cast(gPortsOrch)->doTask(); + + // Apply configuration + // ports + static_cast(gPortsOrch)->doTask(); + + ASSERT_TRUE(gPortsOrch->allPortsReady()); + + // No more tasks + vector ts; + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + ts.clear(); + + // Simulate storm drop handler started on Ethernet0 TC 3 + Port port; + gPortsOrch->getPort("Ethernet0", port); + + // Create test buffer pool + poolTable.set("ingress_pool", + { + { "type", "ingress" }, + { "mode", "dynamic" }, + { "size", "4200000" }, + }); + poolTable.set("egress_pool", + { + { "type", "egress" }, + { "mode", "dynamic" }, + { "size", "4200000" }, + }); + poolTable.set("ingress_zero_pool", + { + { "type", "ingress" }, + { "mode", "static" }, + { "size", "0" } + }); + auto poolConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_POOL_TABLE_NAME)); + + // Create test buffer profile + profileTable.set("ingress_profile", { { "pool", "ingress_pool" }, + { "xon", "14832" }, + { "xoff", "14832" }, + { "size", "35000" }, + { "dynamic_th", "0" } }); + profileTable.set("egress_profile", { { "pool", "egress_pool" }, + { "size", "0" }, + { "dynamic_th", "0" } }); + + // Apply profile on PGs 3-4 all ports + for (const auto &it : ports) + { + std::ostringstream oss; + oss << it.first << ":3-4"; + pgTable.set(oss.str(), { { "profile", "ingress_profile" } }); + queueTable.set(oss.str(), { {"profile", "egress_profile" } }); + } + + gBufferOrch->addExistingData(&poolTable); + gBufferOrch->addExistingData(&profileTable); + gBufferOrch->addExistingData(&pgTable); + gBufferOrch->addExistingData(&queueTable); + + auto current_create_buffer_pool_count = _sai_create_buffer_pool_count + 3; // call SAI API create_buffer_pool for each pool + ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 0); + ASSERT_TRUE(gBufferOrch->m_egressZeroPoolRefCount == 0); + ASSERT_TRUE(gBufferOrch->m_ingressZeroBufferPool == SAI_NULL_OBJECT_ID); + ASSERT_TRUE(gBufferOrch->m_egressZeroBufferPool == SAI_NULL_OBJECT_ID); + + // process pool, profile and PGs + static_cast(gBufferOrch)->doTask(); + + ASSERT_TRUE(current_create_buffer_pool_count == _sai_create_buffer_pool_count); + ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 1); + ASSERT_TRUE(gBufferOrch->m_egressZeroPoolRefCount == 0); + ASSERT_TRUE(gBufferOrch->m_ingressZeroBufferPool != SAI_NULL_OBJECT_ID); + ASSERT_TRUE(gBufferOrch->m_egressZeroBufferPool == SAI_NULL_OBJECT_ID); + + auto countersTable = make_shared
(m_counters_db.get(), COUNTERS_TABLE); + auto dropHandler = make_unique(port.m_port_id, port.m_queue_ids[3], 3, countersTable); + + current_create_buffer_pool_count++; // Increased for egress zero pool + ASSERT_TRUE(current_create_buffer_pool_count == _sai_create_buffer_pool_count); + ASSERT_TRUE(PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance().getPool(true) == gBufferOrch->m_ingressZeroBufferPool); + ASSERT_TRUE(PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance().getPool(false) == gBufferOrch->m_egressZeroBufferPool); + ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 2); + ASSERT_TRUE(gBufferOrch->m_egressZeroPoolRefCount == 1); + + std::deque entries; + entries.push_back({"ingress_zero_pool", "DEL", {}}); + poolConsumer->addToSync(entries); + entries.clear(); + auto current_remove_buffer_pool_count = _sai_remove_buffer_pool_count; + static_cast(gBufferOrch)->doTask(); + ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 1); + ASSERT_TRUE(_sai_remove_buffer_pool_count == current_remove_buffer_pool_count); + + // release zero buffer drop handler + dropHandler.reset(); + clear_pfcwd_zero_buffer_handler(); + _unhook_sai_buffer_and_queue_api(); } /* This test checks that a LAG member validation happens on orchagent level diff --git a/tests/mock_tests/qosorch_ut.cpp b/tests/mock_tests/qosorch_ut.cpp new file mode 100644 index 00000000000..a77d19b38bf --- /dev/null +++ b/tests/mock_tests/qosorch_ut.cpp @@ -0,0 +1,789 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" + +extern string gMySwitchType; + + +namespace qosorch_test +{ + using namespace std; + + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_chassis_app_db; + + int sai_remove_qos_map_count; + int sai_remove_wred_profile_count; + int sai_remove_scheduler_count; + sai_object_id_t switch_dscp_to_tc_map_id; + + sai_remove_scheduler_fn old_remove_scheduler; + sai_scheduler_api_t ut_sai_scheduler_api, *pold_sai_scheduler_api; + sai_remove_wred_fn old_remove_wred; + sai_wred_api_t ut_sai_wred_api, *pold_sai_wred_api; + sai_remove_qos_map_fn old_remove_qos_map; + sai_qos_map_api_t ut_sai_qos_map_api, *pold_sai_qos_map_api; + sai_set_switch_attribute_fn old_set_switch_attribute_fn; + sai_switch_api_t ut_sai_switch_api, *pold_sai_switch_api; + + sai_status_t _ut_stub_sai_set_switch_attribute(sai_object_id_t switch_id, const sai_attribute_t *attr) + { + auto rc = old_set_switch_attribute_fn(switch_id, attr); + if (rc == SAI_STATUS_SUCCESS && attr->id == SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP) + switch_dscp_to_tc_map_id = attr->value.oid; + return rc; + } + + sai_status_t _ut_stub_sai_remove_qos_map(sai_object_id_t qos_map_id) + { + auto rc = old_remove_qos_map(qos_map_id); + if (rc == SAI_STATUS_SUCCESS) + sai_remove_qos_map_count++; + return rc; + } + + sai_status_t _ut_stub_sai_remove_wred(sai_object_id_t wred_id) + { + auto rc = old_remove_wred(wred_id); + if (rc == SAI_STATUS_SUCCESS) + sai_remove_wred_profile_count++; + return rc; + } + + sai_status_t _ut_stub_sai_remove_scheduler(sai_object_id_t scheduler_id) + { + auto rc = old_remove_scheduler(scheduler_id); + if (rc == SAI_STATUS_SUCCESS) + sai_remove_scheduler_count++; + return rc; + } + + struct QosOrchTest : public ::testing::Test + { + QosOrchTest() + { + } + + void CheckDependency(const string &referencingTableName, const string &referencingObjectName, const string &field, const string &dependentTableName, const string &dependentObjectName="") + { + auto &qosTypeMaps = QosOrch::getTypeMap(); + auto &referencingTable = (*qosTypeMaps[referencingTableName]); + auto &dependentTable = (*qosTypeMaps[dependentTableName]); + + if (dependentObjectName.empty()) + { + ASSERT_TRUE(referencingTable[referencingObjectName].m_objsReferencingByMe[field].empty()); + ASSERT_EQ(dependentTable[dependentObjectName].m_objsDependingOnMe.count(referencingObjectName), 0); + } + else + { + ASSERT_EQ(referencingTable[referencingObjectName].m_objsReferencingByMe[field], dependentTableName + ":" + dependentObjectName); + ASSERT_EQ(dependentTable[dependentObjectName].m_objsDependingOnMe.count(referencingObjectName), 1); + } + } + + void RemoveItem(const string &table, const string &key) + { + std::deque entries; + entries.push_back({key, "DEL", {}}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(table)); + consumer->addToSync(entries); + } + + template void ReplaceSaiRemoveApi(sai_api_t* &sai_api, + sai_api_t &ut_sai_api, + sai_api_t* &pold_sai_api, + sai_remove_func ut_remove, + sai_remove_func &sai_remove, + sai_remove_func &old_remove, + sai_remove_func &put_remove) + { + old_remove = sai_remove; + pold_sai_api = sai_api; + ut_sai_api = *pold_sai_api; + sai_api = &ut_sai_api; + put_remove = ut_remove; + } + + void SetUp() override + { + ASSERT_EQ(sai_route_api, nullptr); + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + + // Hack SAI APIs + ReplaceSaiRemoveApi(sai_qos_map_api, ut_sai_qos_map_api, pold_sai_qos_map_api, + _ut_stub_sai_remove_qos_map, sai_qos_map_api->remove_qos_map, + old_remove_qos_map, ut_sai_qos_map_api.remove_qos_map); + ReplaceSaiRemoveApi(sai_scheduler_api, ut_sai_scheduler_api, pold_sai_scheduler_api, + _ut_stub_sai_remove_scheduler, sai_scheduler_api->remove_scheduler, + old_remove_scheduler, ut_sai_scheduler_api.remove_scheduler); + ReplaceSaiRemoveApi(sai_wred_api, ut_sai_wred_api, pold_sai_wred_api, + _ut_stub_sai_remove_wred, sai_wred_api->remove_wred, + old_remove_wred, ut_sai_wred_api.remove_wred); + pold_sai_switch_api = sai_switch_api; + ut_sai_switch_api = *pold_sai_switch_api; + old_set_switch_attribute_fn = pold_sai_switch_api->set_switch_attribute; + sai_switch_api = &ut_sai_switch_api; + ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + + // Init switch and create dependencies + m_app_db = make_shared("APPL_DB", 0); + m_config_db = make_shared("CONFIG_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + if(gMySwitchType == "voq") + m_chassis_app_db = make_shared("CHASSIS_APP_DB", 0); + + sai_attribute_t attr; + + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + auto status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + // Get the default virtual router ID + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + + ASSERT_EQ(gCrmOrch, nullptr); + gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + + // Create dependencies ... + + const int portsorch_base_pri = 40; + + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + vector flex_counter_tables = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); + gDirectory.set(flexCounterOrch); + + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + + ASSERT_EQ(gVrfOrch, nullptr); + gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); + + ASSERT_EQ(gIntfsOrch, nullptr); + gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); + + const int fdborch_pri = 20; + + vector app_fdb_tables = { + { APP_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_VXLAN_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_MCLAG_FDB_TABLE_NAME, fdborch_pri} + }; + + TableConnector stateDbFdb(m_state_db.get(), STATE_FDB_TABLE_NAME); + TableConnector stateMclagDbFdb(m_state_db.get(), STATE_MCLAG_REMOTE_FDB_TABLE_NAME); + ASSERT_EQ(gFdbOrch, nullptr); + gFdbOrch = new FdbOrch(m_app_db.get(), app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); + + ASSERT_EQ(gNeighOrch, nullptr); + gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + + vector qos_tables = { + CFG_TC_TO_QUEUE_MAP_TABLE_NAME, + CFG_SCHEDULER_TABLE_NAME, + CFG_DSCP_TO_TC_MAP_TABLE_NAME, + CFG_MPLS_TC_TO_TC_MAP_TABLE_NAME, + CFG_DOT1P_TO_TC_MAP_TABLE_NAME, + CFG_QUEUE_TABLE_NAME, + CFG_PORT_QOS_MAP_TABLE_NAME, + CFG_WRED_PROFILE_TABLE_NAME, + CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, + CFG_DSCP_TO_FC_MAP_TABLE_NAME, + CFG_EXP_TO_FC_MAP_TABLE_NAME + }; + gQosOrch = new QosOrch(m_config_db.get(), qos_tables); + + // Recreate buffer orch to read populated data + vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME }; + + gBufferOrch = new BufferOrch(m_app_db.get(), m_config_db.get(), m_state_db.get(), buffer_tables); + + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate pot table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + Table tcToQueueMapTable = Table(m_config_db.get(), CFG_TC_TO_QUEUE_MAP_TABLE_NAME); + Table scheduleTable = Table(m_config_db.get(), CFG_SCHEDULER_TABLE_NAME); + Table dscpToTcMapTable = Table(m_config_db.get(), CFG_DSCP_TO_TC_MAP_TABLE_NAME); + Table dot1pToTcMapTable = Table(m_config_db.get(), CFG_DOT1P_TO_TC_MAP_TABLE_NAME); + Table queueTable = Table(m_config_db.get(), CFG_QUEUE_TABLE_NAME); + Table portQosMapTable = Table(m_config_db.get(), CFG_PORT_QOS_MAP_TABLE_NAME); + Table wredProfileTable = Table(m_config_db.get(), CFG_WRED_PROFILE_TABLE_NAME); + Table tcToPgMapTable = Table(m_config_db.get(), CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME); + Table pfcPriorityToPgMapTable = Table(m_config_db.get(), CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME); + Table pfcPriorityToQueueMapTable = Table(m_config_db.get(), CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME); + Table dscpToFcMapTable = Table(m_config_db.get(), CFG_DSCP_TO_FC_MAP_TABLE_NAME); + Table expToFcMapTable = Table(m_config_db.get(), CFG_EXP_TO_FC_MAP_TABLE_NAME); + + scheduleTable.set("scheduler.1", + { + {"type", "DWRR"}, + {"weight", "15"} + }); + + scheduleTable.set("scheduler.0", + { + {"type", "DWRR"}, + {"weight", "14"} + }); + + wredProfileTable.set("AZURE_LOSSLESS", + { + {"ecn", "ecn_all"}, + {"green_drop_probability", "5"}, + {"green_max_threshold", "2097152"}, + {"green_min_threshold", "1048576"}, + {"wred_green_enable", "true"}, + {"yellow_drop_probability", "5"}, + {"yellow_max_threshold", "2097152"}, + {"yellow_min_threshold", "1048576"}, + {"wred_yellow_enable", "true"}, + {"red_drop_probability", "5"}, + {"red_max_threshold", "2097152"}, + {"red_min_threshold", "1048576"}, + {"wred_red_enable", "true"} + }); + + tcToQueueMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + dscpToTcMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + tcToPgMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + dot1pToTcMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + pfcPriorityToPgMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + pfcPriorityToQueueMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + dot1pToTcMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + gQosOrch->addExistingData(&tcToQueueMapTable); + gQosOrch->addExistingData(&dscpToTcMapTable); + gQosOrch->addExistingData(&tcToPgMapTable); + gQosOrch->addExistingData(&pfcPriorityToPgMapTable); + gQosOrch->addExistingData(&pfcPriorityToQueueMapTable); + gQosOrch->addExistingData(&scheduleTable); + gQosOrch->addExistingData(&wredProfileTable); + + static_cast(gQosOrch)->doTask(); + } + + void TearDown() override + { + auto qos_maps = QosOrch::getTypeMap(); + for (auto &i : qos_maps) + { + i.second->clear(); + } + + gDirectory.m_values.clear(); + + delete gCrmOrch; + gCrmOrch = nullptr; + + delete gSwitchOrch; + gSwitchOrch = nullptr; + + delete gVrfOrch; + gVrfOrch = nullptr; + + delete gIntfsOrch; + gIntfsOrch = nullptr; + + delete gNeighOrch; + gNeighOrch = nullptr; + + delete gFdbOrch; + gFdbOrch = nullptr; + + delete gPortsOrch; + gPortsOrch = nullptr; + + delete gQosOrch; + gQosOrch = nullptr; + + sai_qos_map_api = pold_sai_qos_map_api; + sai_scheduler_api = pold_sai_scheduler_api; + sai_wred_api = pold_sai_wred_api; + sai_switch_api = pold_sai_switch_api; + ut_helper::uninitSaiApi(); + } + }; + + TEST_F(QosOrchTest, QosOrchTestPortQosMapRemoveOneField) + { + Table portQosMapTable = Table(m_config_db.get(), CFG_PORT_QOS_MAP_TABLE_NAME); + + portQosMapTable.set("Ethernet0", + { + {"dscp_to_tc_map", "AZURE"}, + {"pfc_to_pg_map", "AZURE"}, + {"pfc_to_queue_map", "AZURE"}, + {"tc_to_pg_map", "AZURE"}, + {"tc_to_queue_map", "AZURE"}, + {"pfc_enable", "3,4"} + }); + gQosOrch->addExistingData(&portQosMapTable); + static_cast(gQosOrch)->doTask(); + + // Check whether the dependencies have been recorded + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_pg_map", CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_queue_map", CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_pg_map", CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_queue_map", CFG_TC_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + + // Try removing AZURE from DSCP_TO_TC_MAP while it is still referenced + RemoveItem(CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + auto current_sai_remove_qos_map_count = sai_remove_qos_map_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(current_sai_remove_qos_map_count, sai_remove_qos_map_count); + // Dependency is not cleared + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + + // Remove dscp_to_tc_map from Ethernet0 via resetting the entry with field dscp_to_tc_map removed + std::deque entries; + entries.push_back({"Ethernet0", "SET", + { + {"pfc_to_pg_map", "AZURE"}, + {"pfc_to_queue_map", "AZURE"}, + {"tc_to_pg_map", "AZURE"}, + {"tc_to_queue_map", "AZURE"}, + {"pfc_enable", "3,4"} + }}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(current_sai_remove_qos_map_count + 1, sai_remove_qos_map_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]).count("AZURE"), 0); + // Dependency of dscp_to_tc_map should be cleared + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME); + // Dependencies of other items are not touched + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_pg_map", CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_queue_map", CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_pg_map", CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_queue_map", CFG_TC_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + } + + TEST_F(QosOrchTest, QosOrchTestQueueRemoveWredProfile) + { + std::deque entries; + Table queueTable = Table(m_config_db.get(), CFG_QUEUE_TABLE_NAME); + + queueTable.set("Ethernet0|3", + { + {"scheduler", "scheduler.1"}, + {"wred_profile", "AZURE_LOSSLESS"} + }); + gQosOrch->addExistingData(&queueTable); + static_cast(gQosOrch)->doTask(); + + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + + // Try removing scheduler from WRED_PROFILE table while it is still referenced + RemoveItem(CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + auto current_sai_remove_wred_profile_count = sai_remove_wred_profile_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(current_sai_remove_wred_profile_count, sai_remove_wred_profile_count); + // Make sure the dependency is untouched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + + // Remove wred_profile from Ethernet0 queue 3 + entries.push_back({"Ethernet0|3", "SET", + { + {"scheduler", "scheduler.1"} + }}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_QUEUE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain QUEUE table + static_cast(gQosOrch)->doTask(); + // Drain WRED_PROFILE table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency is cleared + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME); + // And the sai remove API has been called + ASSERT_EQ(current_sai_remove_wred_profile_count + 1, sai_remove_wred_profile_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_WRED_PROFILE_TABLE_NAME]).count("AZURE_LOSSLESS"), 0); + // Other field should be untouched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + } + + TEST_F(QosOrchTest, QosOrchTestQueueRemoveScheduler) + { + std::deque entries; + Table queueTable = Table(m_config_db.get(), CFG_QUEUE_TABLE_NAME); + + queueTable.set("Ethernet0|3", + { + {"scheduler", "scheduler.1"}, + {"wred_profile", "AZURE_LOSSLESS"} + }); + gQosOrch->addExistingData(&queueTable); + static_cast(gQosOrch)->doTask(); + + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + + // Try removing scheduler from QUEUE table while it is still referenced + RemoveItem(CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + auto current_sai_remove_scheduler_count = sai_remove_scheduler_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(current_sai_remove_scheduler_count, sai_remove_scheduler_count); + // Make sure the dependency is untouched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + + // Remove scheduler from Ethernet0 queue 3 + entries.push_back({"Ethernet0|3", "SET", + { + {"wred_profile", "AZURE_LOSSLESS"} + }}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_QUEUE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain QUEUE table + static_cast(gQosOrch)->doTask(); + // Drain SCHEDULER table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency is cleared + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME); + // And the sai remove API has been called + ASSERT_EQ(current_sai_remove_scheduler_count + 1, sai_remove_scheduler_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_SCHEDULER_TABLE_NAME]).count("scheduler.1"), 0); + // Other field should be untouched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + } + + TEST_F(QosOrchTest, QosOrchTestQueueReplaceFieldAndRemoveObject) + { + std::deque entries; + Table queueTable = Table(m_config_db.get(), CFG_QUEUE_TABLE_NAME); + auto queueConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_QUEUE_TABLE_NAME)); + auto wredProfileConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_WRED_PROFILE_TABLE_NAME)); + auto schedulerConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_SCHEDULER_TABLE_NAME)); + + queueTable.set("Ethernet0|3", + { + {"scheduler", "scheduler.1"}, + {"wred_profile", "AZURE_LOSSLESS"} + }); + gQosOrch->addExistingData(&queueTable); + static_cast(gQosOrch)->doTask(); + + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + + // Try replacing scheduler in QUEUE table: scheduler.1 => scheduler.0 + entries.push_back({"Ethernet0|3", "SET", + { + {"scheduler", "scheduler.0"}, + {"wred_profile", "AZURE_LOSSLESS"} + }}); + queueConsumer->addToSync(entries); + entries.clear(); + // Drain QUEUE table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency is updated + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0"); + // And the other field is not touched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + + RemoveItem(CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + auto current_sai_remove_scheduler_count = sai_remove_scheduler_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_scheduler_count, sai_remove_scheduler_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_SCHEDULER_TABLE_NAME]).count("scheduler.1"), 0); + + entries.push_back({"AZURE_LOSSLESS_1", "SET", + { + {"ecn", "ecn_all"}, + {"green_drop_probability", "5"}, + {"green_max_threshold", "2097152"}, + {"green_min_threshold", "1048576"}, + {"wred_green_enable", "true"}, + {"yellow_drop_probability", "5"}, + {"yellow_max_threshold", "2097152"}, + {"yellow_min_threshold", "1048576"}, + {"wred_yellow_enable", "true"}, + {"red_drop_probability", "5"}, + {"red_max_threshold", "2097152"}, + {"red_min_threshold", "1048576"}, + {"wred_red_enable", "true"} + }}); + wredProfileConsumer->addToSync(entries); + entries.clear(); + // Drain WRED_PROFILE table + static_cast(gQosOrch)->doTask(); + + // Replace wred_profile from Ethernet0 queue 3 + entries.push_back({"Ethernet0|3", "SET", + { + {"scheduler", "scheduler.0"}, + {"wred_profile", "AZURE_LOSSLESS_1"} + }}); + queueConsumer->addToSync(entries); + entries.clear(); + // Drain QUEUE table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency is updated + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS_1"); + // And the other field is not touched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0"); + + RemoveItem(CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + // Drain WRED_PROFILE table + auto current_sai_remove_wred_profile_count = sai_remove_wred_profile_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_wred_profile_count, sai_remove_wred_profile_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_WRED_PROFILE_TABLE_NAME]).count("AZURE_LOSSLESS"), 0); + + // Remove object + entries.push_back({"Ethernet0|3", "DEL", {}}); + queueConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + + // Make sure the dependency is updated + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME); + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME); + + // Remove scheduler object + entries.push_back({"scheduler.0", "DEL", {}}); + schedulerConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_scheduler_count, sai_remove_scheduler_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_SCHEDULER_TABLE_NAME]).count("scheduler.0"), 0); + + // Remove wred profile object + entries.push_back({"AZURE_LOSSLESS_1", "DEL", {}}); + wredProfileConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_wred_profile_count, sai_remove_wred_profile_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_WRED_PROFILE_TABLE_NAME]).count("AZURE_LOSSLESS_1"), 0); + } + + TEST_F(QosOrchTest, QosOrchTestPortQosMapReplaceOneFieldAndRemoveObject) + { + std::deque entries; + Table portQosMapTable = Table(m_config_db.get(), CFG_PORT_QOS_MAP_TABLE_NAME); + + portQosMapTable.set("Ethernet0", + { + {"dscp_to_tc_map", "AZURE"}, + {"pfc_to_pg_map", "AZURE"}, + {"pfc_to_queue_map", "AZURE"}, + {"tc_to_pg_map", "AZURE"}, + {"tc_to_queue_map", "AZURE"}, + {"pfc_enable", "3,4"} + }); + + static_cast(gQosOrch)->doTask(); + + entries.push_back({"AZURE_1", "SET", + { + {"1", "0"}, + {"0", "1"} + }}); + + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + + entries.push_back({"Ethernet0", "SET", + { + {"dscp_to_tc_map", "AZURE_1"}, + {"pfc_to_pg_map", "AZURE"}, + {"pfc_to_queue_map", "AZURE"}, + {"tc_to_pg_map", "AZURE"}, + {"tc_to_queue_map", "AZURE"}, + {"pfc_enable", "3,4"} + }}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + // Dependency is updated + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE_1"); + + // Try removing AZURE from DSCP_TO_TC_MAP + RemoveItem(CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + auto current_sai_remove_qos_map_count = sai_remove_qos_map_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_qos_map_count, sai_remove_qos_map_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]).count("AZURE"), 0); + // Global dscp to tc map should not be cleared + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE_1"].m_saiObjectId, switch_dscp_to_tc_map_id); + + // Make sure other dependencies are not touched + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_pg_map", CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_queue_map", CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_pg_map", CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_queue_map", CFG_TC_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + + // Remove port from PORT_QOS_MAP table + entries.push_back({"Ethernet0", "DEL", {}}); + consumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_PORT_QOS_MAP_TABLE_NAME]).count("Ethernet0"), 0); + + // Make sure the maps can be removed now. Checking anyone should suffice since all the maps are handled in the same way. + entries.push_back({"AZURE", "DEL", {}}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_qos_map_count, sai_remove_qos_map_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME]).count("AZURE"), 0); + + entries.push_back({"AZURE_1", "DEL", {}}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_qos_map_count, sai_remove_qos_map_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]).count("AZURE_1"), 0); + // Global dscp to tc map should be cleared + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE_1"].m_saiObjectId, SAI_NULL_OBJECT_ID); + } + + TEST_F(QosOrchTest, QosOrchTestGlobalDscpToTcMap) + { + // Make sure dscp to tc map is correct + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); + + // Create a new dscp to tc map + std::deque entries; + entries.push_back({"AZURE_1", "SET", + { + {"1", "0"}, + {"0", "1"} + }}); + + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE_1"].m_saiObjectId, switch_dscp_to_tc_map_id); + + entries.push_back({"AZURE_1", "DEL", {}}); + consumer->addToSync(entries); + entries.clear(); + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); + } +} diff --git a/tests/mock_tests/routeorch_ut.cpp b/tests/mock_tests/routeorch_ut.cpp new file mode 100644 index 00000000000..84f92a088c4 --- /dev/null +++ b/tests/mock_tests/routeorch_ut.cpp @@ -0,0 +1,419 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" +#include "bulker.h" + +extern string gMySwitchType; + + +namespace routeorch_test +{ + using namespace std; + + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_chassis_app_db; + + int create_route_count; + int set_route_count; + int remove_route_count; + int sai_fail_count; + + sai_route_api_t ut_sai_route_api; + sai_route_api_t *pold_sai_route_api; + + sai_bulk_create_route_entry_fn old_create_route_entries; + sai_bulk_remove_route_entry_fn old_remove_route_entries; + sai_bulk_set_route_entry_attribute_fn old_set_route_entries_attribute; + + sai_status_t _ut_stub_sai_bulk_create_route_entry( + _In_ uint32_t object_count, + _In_ const sai_route_entry_t *route_entry, + _In_ const uint32_t *attr_count, + _In_ const sai_attribute_t **attr_list, + _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses) + { + create_route_count++; + return old_create_route_entries(object_count, route_entry, attr_count, attr_list, mode, object_statuses); + } + + sai_status_t _ut_stub_sai_bulk_remove_route_entry( + _In_ uint32_t object_count, + _In_ const sai_route_entry_t *route_entry, + _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses) + { + remove_route_count++; + return old_remove_route_entries(object_count, route_entry, mode, object_statuses); + } + + sai_status_t _ut_stub_sai_bulk_set_route_entry_attribute( + _In_ uint32_t object_count, + _In_ const sai_route_entry_t *route_entry, + _In_ const sai_attribute_t *attr_list, + _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses) + { + set_route_count++; + + // Make sure there is not conflict settings + bool drop = false; + bool valid_nexthop = false; + for (uint32_t i = 0; i < object_count; i++) + { + if (route_entry[i].destination.mask.ip4 == 0) + { + if (attr_list[i].id == SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION) + { + drop = (attr_list[i].value.s32 == SAI_PACKET_ACTION_DROP); + } + else if (attr_list[i].id == SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID) + { + valid_nexthop = (attr_list[i].value.oid != SAI_NULL_OBJECT_ID); + } + } + } + + // Drop and a valid nexthop can not be provided for the same prefix + if (drop && valid_nexthop) + sai_fail_count++; + + return old_set_route_entries_attribute(object_count, route_entry, attr_list, mode, object_statuses); + } + + struct RouteOrchTest : public ::testing::Test + { + RouteOrchTest() + { + } + + void SetUp() override + { + ASSERT_EQ(sai_route_api, nullptr); + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + + // Hack the route create function + old_create_route_entries = sai_route_api->create_route_entries; + old_remove_route_entries = sai_route_api->remove_route_entries; + old_set_route_entries_attribute = sai_route_api->set_route_entries_attribute; + + pold_sai_route_api = sai_route_api; + ut_sai_route_api = *sai_route_api; + sai_route_api = &ut_sai_route_api; + + sai_route_api->create_route_entries = _ut_stub_sai_bulk_create_route_entry; + sai_route_api->remove_route_entries = _ut_stub_sai_bulk_remove_route_entry; + sai_route_api->set_route_entries_attribute = _ut_stub_sai_bulk_set_route_entry_attribute; + + // Init switch and create dependencies + m_app_db = make_shared("APPL_DB", 0); + m_config_db = make_shared("CONFIG_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + if(gMySwitchType == "voq") + m_chassis_app_db = make_shared("CHASSIS_APP_DB", 0); + + sai_attribute_t attr; + + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + auto status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + // Get the default virtual router ID + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + + ASSERT_EQ(gCrmOrch, nullptr); + gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + + // Create dependencies ... + + const int portsorch_base_pri = 40; + + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + vector flex_counter_tables = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); + gDirectory.set(flexCounterOrch); + + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + + ASSERT_EQ(gVrfOrch, nullptr); + gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); + + ASSERT_EQ(gIntfsOrch, nullptr); + gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); + + const int fdborch_pri = 20; + + vector app_fdb_tables = { + { APP_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_VXLAN_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_MCLAG_FDB_TABLE_NAME, fdborch_pri} + }; + + TableConnector stateDbFdb(m_state_db.get(), STATE_FDB_TABLE_NAME); + TableConnector stateMclagDbFdb(m_state_db.get(), STATE_MCLAG_REMOTE_FDB_TABLE_NAME); + ASSERT_EQ(gFdbOrch, nullptr); + gFdbOrch = new FdbOrch(m_app_db.get(), app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); + + ASSERT_EQ(gNeighOrch, nullptr); + gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + + TunnelDecapOrch *tunnel_decap_orch = new TunnelDecapOrch(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + vector mux_tables = { + CFG_MUX_CABLE_TABLE_NAME, + CFG_PEER_SWITCH_TABLE_NAME + }; + MuxOrch *mux_orch = new MuxOrch(m_config_db.get(), mux_tables, tunnel_decap_orch, gNeighOrch, gFdbOrch); + gDirectory.set(mux_orch); + + ASSERT_EQ(gFgNhgOrch, nullptr); + const int fgnhgorch_pri = 15; + + vector fgnhg_tables = { + { CFG_FG_NHG, fgnhgorch_pri }, + { CFG_FG_NHG_PREFIX, fgnhgorch_pri }, + { CFG_FG_NHG_MEMBER, fgnhgorch_pri } + }; + gFgNhgOrch = new FgNhgOrch(m_config_db.get(), m_app_db.get(), m_state_db.get(), fgnhg_tables, gNeighOrch, gIntfsOrch, gVrfOrch); + + ASSERT_EQ(gSrv6Orch, nullptr); + vector srv6_tables = { + APP_SRV6_SID_LIST_TABLE_NAME, + APP_SRV6_MY_SID_TABLE_NAME + }; + gSrv6Orch = new Srv6Orch(m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); + + ASSERT_EQ(gRouteOrch, nullptr); + const int routeorch_pri = 5; + vector route_tables = { + { APP_ROUTE_TABLE_NAME, routeorch_pri }, + { APP_LABEL_ROUTE_TABLE_NAME, routeorch_pri } + }; + gRouteOrch = new RouteOrch(m_app_db.get(), route_tables, gSwitchOrch, gNeighOrch, gIntfsOrch, gVrfOrch, gFgNhgOrch, gSrv6Orch); + gNhgOrch = new NhgOrch(m_app_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + + // Recreate buffer orch to read populated data + vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME }; + + gBufferOrch = new BufferOrch(m_app_db.get(), m_config_db.get(), m_state_db.get(), buffer_tables); + + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate pot table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + Table intfTable = Table(m_app_db.get(), APP_INTF_TABLE_NAME); + intfTable.set("Ethernet0", { {"NULL", "NULL" }, + {"mac_addr", "00:00:00:00:00:00" }}); + intfTable.set("Ethernet0:10.0.0.1/24", { { "scope", "global" }, + { "family", "IPv4" }}); + gIntfsOrch->addExistingData(&intfTable); + static_cast(gIntfsOrch)->doTask(); + + Table neighborTable = Table(m_app_db.get(), APP_NEIGH_TABLE_NAME); + + map neighborIp2Mac = {{"10.0.0.2", "00:00:0a:00:00:02" }, + {"10.0.0.3", "00:00:0a:00:00:03" } }; + neighborTable.set("Ethernet0:10.0.0.2", { {"neigh", neighborIp2Mac["10.0.0.2"]}, + {"family", "IPv4" }}); + neighborTable.set("Ethernet0:10.0.0.3", { {"neigh", neighborIp2Mac["10.0.0.3"]}, + {"family", "IPv4" }}); + gNeighOrch->addExistingData(&neighborTable); + static_cast(gNeighOrch)->doTask(); + + Table routeTable = Table(m_app_db.get(), APP_ROUTE_TABLE_NAME); + routeTable.set("1.1.1.0/24", { {"ifname", "Ethernet0" }, + {"nexthop", "10.0.0.2" }}); + routeTable.set("0.0.0.0/0", { {"ifname", "Ethernet0" }, + {"nexthop", "10.0.0.2" }}); + gRouteOrch->addExistingData(&routeTable); + static_cast(gRouteOrch)->doTask(); + } + + void TearDown() override + { + gDirectory.m_values.clear(); + + delete gCrmOrch; + gCrmOrch = nullptr; + + delete gSwitchOrch; + gSwitchOrch = nullptr; + + delete gVrfOrch; + gVrfOrch = nullptr; + + delete gIntfsOrch; + gIntfsOrch = nullptr; + + delete gNeighOrch; + gNeighOrch = nullptr; + + delete gFdbOrch; + gFdbOrch = nullptr; + + delete gFgNhgOrch; + gFgNhgOrch = nullptr; + + delete gSrv6Orch; + gSrv6Orch = nullptr; + + delete gRouteOrch; + gRouteOrch = nullptr; + + delete gPortsOrch; + gPortsOrch = nullptr; + + sai_route_api = pold_sai_route_api; + ut_helper::uninitSaiApi(); + } + }; + + TEST_F(RouteOrchTest, RouteOrchTestDelSetSameNexthop) + { + std::deque entries; + + // Setting route with same next hop but after a DEL in the same bulk + entries.push_back({"1.1.1.0/24", "DEL", { {} }}); + entries.push_back({"1.1.1.0/24", "SET", { {"ifname", "Ethernet0"}, + {"nexthop", "10.0.0.2"}}}); + auto consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + auto current_create_count = create_route_count; + auto current_remove_count = remove_route_count; + auto current_set_count = set_route_count; + + static_cast(gRouteOrch)->doTask(); + // Make sure both create and set has been called + ASSERT_EQ(current_create_count + 1, create_route_count); + ASSERT_EQ(current_remove_count + 1, remove_route_count); + ASSERT_EQ(current_set_count, set_route_count); + + entries.clear(); + + // Make sure SAI API won't be called if setting it for second time with the same next hop + entries.push_back({"1.1.1.0/24", "SET", { {"ifname", "Ethernet0"}, + {"nexthop", "10.0.0.2"}}}); + consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + current_create_count = create_route_count; + current_remove_count = remove_route_count; + current_set_count = set_route_count; + + static_cast(gRouteOrch)->doTask(); + // Make sure both create and set has been called + ASSERT_EQ(current_create_count, create_route_count); + ASSERT_EQ(current_remove_count, remove_route_count); + ASSERT_EQ(current_set_count, set_route_count); + } + + TEST_F(RouteOrchTest, RouteOrchTestDelSetDiffNexthop) + { + std::deque entries; + entries.push_back({"1.1.1.0/24", "DEL", { {} }}); + entries.push_back({"1.1.1.0/24", "SET", { {"ifname", "Ethernet0"}, + {"nexthop", "10.0.0.3"}}}); + + auto consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + auto current_create_count = create_route_count; + auto current_remove_count = remove_route_count; + auto current_set_count = set_route_count; + + static_cast(gRouteOrch)->doTask(); + // Make sure both create and remove has been called + ASSERT_EQ(current_create_count + 1, create_route_count); + ASSERT_EQ(current_remove_count + 1, remove_route_count); + ASSERT_EQ(current_set_count, set_route_count); + } + + TEST_F(RouteOrchTest, RouteOrchTestDelSetDefaultRoute) + { + std::deque entries; + entries.push_back({"0.0.0.0/0", "DEL", { {} }}); + entries.push_back({"0.0.0.0/0", "SET", { {"ifname", "Ethernet0"}, + {"nexthop", "10.0.0.3"}}}); + + auto consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + auto current_create_count = create_route_count; + auto current_remove_count = remove_route_count; + auto current_set_count = set_route_count; + + static_cast(gRouteOrch)->doTask(); + // Make sure both create and set has been called + ASSERT_EQ(current_create_count, create_route_count); + ASSERT_EQ(current_remove_count, remove_route_count); + ASSERT_EQ(current_set_count + 1, set_route_count); + ASSERT_EQ(sai_fail_count, 0); + } +} diff --git a/tests/mock_tests/ut_saihelper.cpp b/tests/mock_tests/ut_saihelper.cpp index 34b76e7e5a7..70eb96c99f8 100644 --- a/tests/mock_tests/ut_saihelper.cpp +++ b/tests/mock_tests/ut_saihelper.cpp @@ -77,7 +77,12 @@ namespace ut_helper sai_api_query(SAI_API_ACL, (void **)&sai_acl_api); sai_api_query(SAI_API_HOSTIF, (void **)&sai_hostif_api); sai_api_query(SAI_API_BUFFER, (void **)&sai_buffer_api); + sai_api_query(SAI_API_QOS_MAP, (void **)&sai_qos_map_api); + sai_api_query(SAI_API_SCHEDULER_GROUP, (void **)&sai_scheduler_group_api); + sai_api_query(SAI_API_SCHEDULER, (void **)&sai_scheduler_api); + sai_api_query(SAI_API_WRED, (void **)&sai_wred_api); sai_api_query(SAI_API_QUEUE, (void **)&sai_queue_api); + sai_api_query(SAI_API_MPLS, (void**)&sai_mpls_api); return SAI_STATUS_SUCCESS; } diff --git a/tests/p4rt/acl.py b/tests/p4rt/acl.py new file mode 100644 index 00000000000..283ba95ce66 --- /dev/null +++ b/tests/p4rt/acl.py @@ -0,0 +1,206 @@ +# Lint as: python3 +from swsscommon import swsscommon + +import util + +INGRESS_STAGE = "SAI_ACL_STAGE_INGRESS" +EGRESS_STAGE = "SAI_ACL_STAGE_EGRESS" +PRE_INGRESS_STAGE = "SAI_ACL_STAGE_PRE_INGRESS" + +class P4RtAclTableDefinitionWrapper(util.DBInterface): + """Interface to interact with APP DB and ASIC DB tables for P4RT ACL table definition object.""" + + # table name in ASIC_DB and SAI constants + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE" + SAI_ATTR_MATCH_ETHER_TYPE = "SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE" + SAI_ATTR_MATCH_IP_TYPE = "SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE" + SAI_ATTR_MATCH_DST_MAC = "SAI_ACL_TABLE_ATTR_FIELD_DST_MAC" + SAI_ATTR_MATCH_SRC_IPV6_WORD3 = "SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6_WORD3" + SAI_ATTR_MATCH_SRC_IPV6_WORD2 = "SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6_WORD2" + SAI_ATTR_MATCH_UDF_GROUP_MIN = "SAI_ACL_TABLE_ATTR_USER_DEFINED_FIELD_GROUP_MIN" + SAI_ATTR_MATCH_UDF_GROUP_1 = "SAI_ACL_TABLE_ATTR_USER_DEFINED_FIELD_GROUP_1" + SAI_ATTR_ACTION_TYPE_LIST = "SAI_ACL_TABLE_ATTR_ACL_ACTION_TYPE_LIST" + SAI_ACL_TABLE_ATTR_ACL_STAGE = "SAI_ACL_TABLE_ATTR_ACL_STAGE" + SAI_ACL_TABLE_ATTR_SIZE = "SAI_ACL_TABLE_ATTR_SIZE" + + # table name in APP_DB and attribute fields + APP_DB_TBL_NAME = swsscommon.APP_P4RT_TABLE_NAME + TBL_NAME = swsscommon.APP_P4RT_ACL_TABLE_DEFINITION_NAME + STAGE_FIELD = "stage" + PRIORITY_FIELD = "priority" + SIZE_FIELD = "size" + MATCH_FIELD_ETHER_TYPE = "match/ether_type" + MATCH_FIELD_ETHER_DST = "match/ether_dst" + MATCH_FIELD_IS_IP = "match/is_ip" + MATCH_FIELD_IS_IPV4 = "match/is_ipv4" + MATCH_FIELD_IS_IPV6 = "match/is_ipv6" + MATCH_FIELD_IS_ARP = "match/is_arp" + MATCH_FIELD_SRC_IPV6_64BIT = "match/src_ipv6_64bit" + MATCH_FIELD_ARP_TPA = "match/arp_tpa" + ACTION_COPY_AND_SET_TC = "action/copy_and_set_tc" + ACTION_PUNT_AND_SET_TC = "action/punt_and_set_tc" + ACTION_SET_QOS_QUEUE = "action/qos_queue" + METER_UNIT = "meter/unit" + COUNTER_UNIT = "counter/unit" + + +class P4RtAclRuleWrapper(util.DBInterface): + """Interface to interact with APP DB and ASIC DB tables for P4RT ACL entry object.""" + + # table name in ASIC_DB and SAI constants + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY" + SAI_ATTR_TABLE_ID = "SAI_ACL_ENTRY_ATTR_TABLE_ID" + SAI_ATTR_PRIORITY = "SAI_ACL_ENTRY_ATTR_PRIORITY" + SAI_ATTR_ADMIN_STATE = "SAI_ACL_ENTRY_ATTR_ADMIN_STATE" + SAI_ATTR_SET_POLICER = "SAI_ACL_ENTRY_ATTR_ACTION_SET_POLICER" + SAI_ATTR_COUNTER = "SAI_ACL_ENTRY_ATTR_ACTION_COUNTER" + SAI_ATTR_MATCH_ETHER_TYPE = "SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE" + SAI_ATTR_MATCH_IP_TYPE = "SAI_ACL_ENTRY_ATTR_FIELD_ACL_IP_TYPE" + SAI_ATTR_MATCH_DST_MAC = "SAI_ACL_ENTRY_ATTR_FIELD_DST_MAC" + SAI_ATTR_MATCH_SRC_IPV6_WORD3 = "SAI_ACL_ENTRY_ATTR_FIELD_SRC_IPV6_WORD3" + SAI_ATTR_MATCH_SRC_IPV6_WORD2 = "SAI_ACL_ENTRY_ATTR_FIELD_SRC_IPV6_WORD2" + SAI_ATTR_MATCH_UDF_GROUP_MIN = "SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN" + SAI_ATTR_MATCH_UDF_GROUP_1 = "SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1" + SAI_ATTR_ACTION_PACKET_ACTION = "SAI_ACL_ENTRY_ATTR_ACTION_PACKET_ACTION" + SAI_ATTR_ACTION_SET_TC = "SAI_ACL_ENTRY_ATTR_ACTION_SET_TC" + SAI_ATTR_ACTION_SET_USER_TRAP_ID = "SAI_ACL_ENTRY_ATTR_ACTION_SET_USER_TRAP_ID" + + # table name in APP_DB and attribute fields + APP_DB_TBL_NAME = swsscommon.APP_P4RT_TABLE_NAME + ACTION = "action" + METER_CIR = "meter/cir" + METER_CBURST = "meter/cburst" + METER_PIR = "meter/pir" + METER_PBURST = "meter/pburst" + + +class P4RtAclCounterWrapper(util.DBInterface): + """Interface to interact with APP DB and ASIC DB tables for P4RT ACL counter object.""" + + # table name in ASIC_DB and SAI constants + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_COUNTER" + SAI_ATTR_TABLE_ID = "SAI_ACL_COUNTER_ATTR_TABLE_ID" + SAI_ATTR_ENABLE_BYTE_COUNT = "SAI_ACL_COUNTER_ATTR_ENABLE_BYTE_COUNT" + SAI_ATTR_ENABLE_PACKET_COUNT = "SAI_ACL_COUNTER_ATTR_ENABLE_PACKET_COUNT" + + +class P4RtAclMeterWrapper(util.DBInterface): + """Interface in ASIC DB tables for P4RT ACL policer object.""" + + # table name in ASIC_DB and SAI constants + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_POLICER" + SAI_ATTR_METER_TYPE = "SAI_POLICER_ATTR_METER_TYPE" + SAI_ATTR_METER_MODE = "SAI_POLICER_ATTR_MODE" + SAI_ATTR_METER_CBS = "SAI_POLICER_ATTR_CBS" + SAI_ATTR_METER_CIR = "SAI_POLICER_ATTR_CIR" + SAI_ATTR_METER_PBS = "SAI_POLICER_ATTR_PBS" + SAI_ATTR_METER_PIR = "SAI_POLICER_ATTR_PIR" + SAI_ATTR_GREEN_PACKET_ACTION = "SAI_POLICER_ATTR_GREEN_PACKET_ACTION" + SAI_ATTR_RED_PACKET_ACTION = "SAI_POLICER_ATTR_RED_PACKET_ACTION" + SAI_ATTR_YELLOW_PACKET_ACTION = "SAI_POLICER_ATTR_YELLOW_PACKET_ACTION" + + +class P4RtAclGroupWrapper(util.DBInterface): + """Interface in ASIC DB tables for P4RT ACL group object.""" + + # table name in ASIC_DB and SAI constants + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP" + SAI_ACL_TABLE_GROUP_ATTR_ACL_STAGE = "SAI_ACL_TABLE_GROUP_ATTR_ACL_STAGE" + SAI_ACL_TABLE_GROUP_ATTR_TYPE = "SAI_ACL_TABLE_GROUP_ATTR_TYPE" + SAI_ACL_TABLE_ATTR_ACL_BIND_POINT_TYPE_LIST = "SAI_ACL_TABLE_ATTR_ACL_BIND_POINT_TYPE_LIST" + + def get_group_oids_by_stage(self, stage): + tbl = swsscommon.Table(self.asic_db, self.ASIC_DB_TBL_NAME) + keys = tbl.getKeys() + group_oids = [] + for key in keys: + (status, fvs) = tbl.get(key) + assert status == True + for name, val in fvs: + if name == self.SAI_ACL_TABLE_GROUP_ATTR_ACL_STAGE and val == stage: + group_oids.append(key) + break + return group_oids + + +class P4RtAclGroupMemberWrapper(util.DBInterface): + """Interface in ASIC DB tables for P4RT ACL group member object.""" + + # table name in ASIC_DB and SAI constants + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER" + SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_GROUP_ID = "SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_GROUP_ID" + SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_ID = "SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_ID" + SAI_ACL_TABLE_GROUP_MEMBER_ATTR_PRIORITY = "SAI_ACL_TABLE_GROUP_MEMBER_ATTR_PRIORITY" + + +class P4RtUserDefinedTrapWrapper(util.DBInterface): + """Interface in ASIC DB tables for SAI user defined trap object.""" + + # table name in ASIC_DB and SAI constants + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF_USER_DEFINED_TRAP" + SAI_HOSTIF_USER_DEFINED_TRAP_ATTR_TRAP_GROUP = "SAI_HOSTIF_USER_DEFINED_TRAP_ATTR_TRAP_GROUP" + SAI_HOSTIF_USER_DEFINED_TRAP_ATTR_TYPE = "SAI_HOSTIF_USER_DEFINED_TRAP_ATTR_TYPE" + + +class P4RtTrapGroupWrapper(util.DBInterface): + """Interface in APPL and ASIC DB tables for SAI trap group object.""" + + # table name in APPL_DB and attribute fields + APP_DB_TBL_NAME = "COPP_TABLE" + TBL_NAME_PREFIX = "trap.group.cpu.queue." + QUEUE = "queue" + HOSTIF_NAME = "genetlink_name" + HOSTIF_GENETLINK_MCGRP_NAME = "genetlink_mcgrp_name" + + # table name in ASIC_DB and SAI constants + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF_TRAP_GROUP" + SAI_HOSTIF_TRAP_GROUP_ATTR_QUEUE = "SAI_HOSTIF_TRAP_GROUP_ATTR_QUEUE" + + +class P4RtHostifWrapper(util.DBInterface): + """Interface in ASIC DB tables for SAI hostif object.""" + + # table name in ASIC_DB and SAI constants + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF" + SAI_HOSTIF_ATTR_TYPE = "SAI_HOSTIF_ATTR_TYPE" + SAI_HOSTIF_ATTR_NAME = "SAI_HOSTIF_ATTR_NAME" + SAI_HOSTIF_ATTR_GENETLINK_MCGRP_NAME = "SAI_HOSTIF_ATTR_GENETLINK_MCGRP_NAME" + + +class P4RtHostifTableEntryWrapper(util.DBInterface): + """Interface in ASIC DB tables for SAI hostif table entry object.""" + + # table name in ASIC_DB and SAI constants + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF_TABLE_ENTRY" + SAI_HOSTIF_TABLE_ENTRY_ATTR_TYPE = "SAI_HOSTIF_TABLE_ENTRY_ATTR_TYPE" + SAI_HOSTIF_TABLE_ENTRY_ATTR_TRAP_ID = "SAI_HOSTIF_TABLE_ENTRY_ATTR_TRAP_ID" + SAI_HOSTIF_TABLE_ENTRY_ATTR_CHANNEL_TYPE = "SAI_HOSTIF_TABLE_ENTRY_ATTR_CHANNEL_TYPE" + SAI_HOSTIF_TABLE_ENTRY_ATTR_HOST_IF = "SAI_HOSTIF_TABLE_ENTRY_ATTR_HOST_IF" + +class P4RtUdfGroupWrapper(util.DBInterface): + """Interface in ASIC DB tables for SAI UDF Group object.""" + + # table name in ASIC_DB and SAI constants + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_UDF_GROUP" + SAI_UDF_GROUP_ATTR_TYPE = "SAI_UDF_GROUP_ATTR_TYPE" + SAI_UDF_GROUP_ATTR_LENGTH = "SAI_UDF_GROUP_ATTR_LENGTH" + + SAI_UDF_GROUP_TYPE_GENERIC = "SAI_UDF_GROUP_TYPE_GENERIC" + + +class P4RtUdfMatchWrapper(util.DBInterface): + """Interface in ASIC DB tables for SAI UDF Match object.""" + + # table name in ASIC_DB and SAI constants + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_UDF_MATCH" + + +class P4RtUdfWrapper(util.DBInterface): + """Interface in ASIC DB tables for SAI UDF object.""" + + # table name in ASIC_DB and SAI constants + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_UDF" + SAI_UDF_ATTR_MATCH_ID = "SAI_UDF_ATTR_MATCH_ID" + SAI_UDF_ATTR_GROUP_ID = "SAI_UDF_ATTR_GROUP_ID" + SAI_UDF_ATTR_BASE = "SAI_UDF_ATTR_BASE" + SAI_UDF_ATTR_OFFSET = "SAI_UDF_ATTR_OFFSET" diff --git a/tests/p4rt/l3.py b/tests/p4rt/l3.py new file mode 100644 index 00000000000..c5f656aa2ef --- /dev/null +++ b/tests/p4rt/l3.py @@ -0,0 +1,348 @@ +from swsscommon import swsscommon + +import util +import json + + +class P4RtRouterInterfaceWrapper(util.DBInterface): + """Interface to interact with APP DB and ASIC DB tables for P4RT router interface object.""" + + # database and SAI constants + APP_DB_TBL_NAME = swsscommon.APP_P4RT_TABLE_NAME + TBL_NAME = swsscommon.APP_P4RT_ROUTER_INTERFACE_TABLE_NAME + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE" + SAI_ATTR_SRC_MAC = "SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS" + SAI_ATTR_TYPE = "SAI_ROUTER_INTERFACE_ATTR_TYPE" + SAI_ATTR_TYPE_PORT = "SAI_ROUTER_INTERFACE_TYPE_PORT" + SAI_ATTR_MTU = "SAI_ROUTER_INTERFACE_ATTR_MTU" + SAI_ATTR_PORT_ID = "SAI_ROUTER_INTERFACE_ATTR_PORT_ID" + SAI_ATTR_DEFAULT_MTU = "9100" + + # attribute fields for router interface object + PORT_FIELD = "port" + SRC_MAC_FIELD = "src_mac" + + # default router interface attribute values + DEFAULT_ROUTER_INTERFACE_ID = "16" + DEFAULT_PORT_ID = "Ethernet8" + DEFAULT_SRC_MAC = "00:11:22:33:44:55" + DEFAULT_ACTION = "set_port_and_src_mac" + + def generate_app_db_key(self, router_interface_id): + d = {} + d[util.prepend_match_field("router_interface_id") + ] = router_interface_id + key = json.dumps(d, separators=(",", ":")) + return self.TBL_NAME + ":" + key + + # create default router interface + def create_router_interface(self, + router_interace_id=None, port_id=None, + src_mac=None, action=None): + router_interface_id = router_interace_id or self.DEFAULT_ROUTER_INTERFACE_ID + port_id = port_id or self.DEFAULT_PORT_ID + src_mac = src_mac or self.DEFAULT_SRC_MAC + action = action or self.DEFAULT_ACTION + attr_list = [(util.prepend_param_field(self.PORT_FIELD), port_id), + (util.prepend_param_field(self.SRC_MAC_FIELD), src_mac), + (self.ACTION_FIELD, action)] + router_intf_key = self.generate_app_db_key(router_interface_id) + self.set_app_db_entry(router_intf_key, attr_list) + return router_interface_id, router_intf_key, attr_list + + +class P4RtNeighborWrapper(util.DBInterface): + """Interface to interact with APP DB and ASIC DB tables for P4RT neighbor object.""" + + # database and SAI constants + APP_DB_TBL_NAME = swsscommon.APP_P4RT_TABLE_NAME + TBL_NAME = swsscommon.APP_P4RT_NEIGHBOR_TABLE_NAME + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY" + SAI_ATTR_DST_MAC = "SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS" + + # attribute fields for neighbor object + DST_MAC_FIELD = "dst_mac" + + # default neighbor attribute values + DEFAULT_ROUTER_INTERFACE_ID = "16" + DEFAULT_IPV4_NEIGHBOR_ID = "12.0.0.1" + DEFAULT_IPV6_NEIGHBOR_ID = "fe80::21a:11ff:fe17:5f80" + DEFAULT_DST_MAC = "00:02:03:04:05:06" + DEFAULT_ACTION = "set_dst_mac" + + def generate_app_db_key(self, router_interface_id, neighbor_id): + d = {} + d[util.prepend_match_field("router_interface_id") + ] = router_interface_id + d[util.prepend_match_field("neighbor_id")] = neighbor_id + key = json.dumps(d, separators=(",", ":")) + return self.TBL_NAME + ":" + key + + # create default neighbor + def create_neighbor(self, router_interface_id=None, neighbor_id=None, + dst_mac=None, action=None, ipv4=True): + router_interface_id = router_interface_id or self.DEFAULT_ROUTER_INTERFACE_ID + neighbor_id = neighbor_id or (self.DEFAULT_IPV4_NEIGHBOR_ID if ipv4 + else self.DEFAULT_IPV6_NEIGHBOR_ID) + dst_mac = dst_mac or self.DEFAULT_DST_MAC + action = action or self.DEFAULT_ACTION + attr_list = [(util.prepend_param_field(self.DST_MAC_FIELD), dst_mac), + (self.ACTION_FIELD, action)] + neighbor_key = self.generate_app_db_key( + router_interface_id, neighbor_id) + self.set_app_db_entry(neighbor_key, attr_list) + return neighbor_id, neighbor_key, attr_list + + +class P4RtNextHopWrapper(util.DBInterface): + """Interface to interact with APP DB and ASIC DB tables for P4RT nexthop object.""" + + # database and SAI constants + APP_DB_TBL_NAME = swsscommon.APP_P4RT_TABLE_NAME + TBL_NAME = swsscommon.APP_P4RT_NEXTHOP_TABLE_NAME + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP" + SAI_ATTR_TYPE = "SAI_NEXT_HOP_ATTR_TYPE" + SAI_ATTR_IP = "SAI_NEXT_HOP_ATTR_IP" + SAI_ATTR_ROUTER_INTF_OID = "SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID" + + # attribute fields for nexthop object + RIF_FIELD = "router_interface_id" + NEIGHBOR_ID_FIELD = "neighbor_id" + + # default next hop attribute values + DEFAULT_ACTION = "set_nexthop" + DEFAULT_NEXTHOP_ID = "8" + DEFAULT_ROUTER_INTERFACE_ID = "16" + DEFAULT_IPV4_NEIGHBOR_ID = "12.0.0.1" + DEFAULT_IPV6_NEIGHBOR_ID = "fe80::21a:11ff:fe17:5f80" + + def generate_app_db_key(self, nexthop_id): + d = {} + d[util.prepend_match_field("nexthop_id")] = nexthop_id + key = json.dumps(d, separators=(",", ":")) + return self.TBL_NAME + ":" + key + + # create default next hop + def create_next_hop(self, router_interface_id=None, neighbor_id=None, + action=None, nexthop_id=None, ipv4=True): + action = action or self.DEFAULT_ACTION + router_interface_id = router_interface_id or self.DEFAULT_ROUTER_INTERFACE_ID + if ipv4 is True: + neighbor_id = neighbor_id or self.DEFAULT_IPV4_NEIGHBOR_ID + else: + neighbor_id = neighbor_id or self.DEFAULT_IPV6_NEIGHBOR_ID + nexthop_id = nexthop_id or self.DEFAULT_NEXTHOP_ID + attr_list = [(util.prepend_param_field(self.RIF_FIELD), router_interface_id), + (util.prepend_param_field(self.NEIGHBOR_ID_FIELD), neighbor_id), + (self.ACTION_FIELD, action)] + nexthop_key = self.generate_app_db_key(nexthop_id) + self.set_app_db_entry(nexthop_key, attr_list) + return nexthop_id, nexthop_key, attr_list + + # Fetch oid of the first newly created nexthop from created nexthops in ASIC + # db. This API should only be used when only one oid is expected to be + # created after the original entries. + # Original nexthop entries in asic db must be fetched using + # 'get_original_redis_entries' before fetching oid of newly created nexthop. + def get_newly_created_nexthop_oid(self): + nexthop_oid = None + nexthop_entries = util.get_keys(self.asic_db, self.ASIC_DB_TBL_NAME) + for key in nexthop_entries: + if key not in self._original_entries["{}:{}".format(self.asic_db, + self.ASIC_DB_TBL_NAME)]: + nexthop_oid = key + break + return nexthop_oid + + +class P4RtWcmpGroupWrapper(util.DBInterface): + """Interface to interact with APP DB and ASIC DB tables for P4RT wcmp group object.""" + + # database and SAI constants + APP_DB_TBL_NAME = swsscommon.APP_P4RT_TABLE_NAME + TBL_NAME = swsscommon.APP_P4RT_WCMP_GROUP_TABLE_NAME + ASIC_DB_GROUP_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP" + SAI_ATTR_GROUP_TYPE = "SAI_NEXT_HOP_GROUP_ATTR_TYPE" + SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP = "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP" + ASIC_DB_GROUP_MEMBER_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" + SAI_ATTR_GROUP_MEMBER_NEXTHOP_GROUP_ID = "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID" + SAI_ATTR_GROUP_MEMBER_NEXTHOP_ID = "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID" + SAI_ATTR_GROUP_MEMBER_WEIGHT = "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT" + + # attribute fields for wcmp group object + NEXTHOP_ID_FIELD = "nexthop_id" + WEIGHT_FIELD = "weight" + WATCH_PORT_FIELD = "watch_port" + ACTION_FIELD = "action" + ACTIONS_FIELD = "actions" + + # default wcmp group attributes + DEFAULT_WCMP_GROUP_ID = "group-a" + DEFAULT_WEIGHT = 2 + DEFAULT_ACTION = "set_nexthop_id" + DEFAULT_NEXTHOP_ID = "8" + DEFAULT_WATCH_PORT = "" + + # Fetch the oid of the first newly created wcmp group from created wcmp groups + # in AISC db. This API should only be used when only one oid is expected to be + # created after the original entries. + # Original wcmp group entries in asic db must be fetched using + # 'get_original_redis_entries' before fetching oid of newly created wcmp group. + def get_newly_created_wcmp_group_oid(self): + wcmp_group_oid = None + wcmp_group_entries = util.get_keys( + self.asic_db, self.ASIC_DB_GROUP_TBL_NAME) + for key in wcmp_group_entries: + if key not in self._original_entries["{}:{}".format( + self.asic_db, self.ASIC_DB_GROUP_TBL_NAME)]: + wcmp_group_oid = key + break + return wcmp_group_oid + + # Fetch key for the first newly created wcmp group member from created group + # members in ASIC db. This API should only be used when only one key is + # expected to be created after the original entries. + # Original wcmp group member entries in asic db must be fetched using + # 'get_original_redis_entries' before fetching asic db key of newly created + # wcmp group member. + def get_newly_created_wcmp_group_member_asic_db_key(self): + asic_db_wcmp_group_member_key = None + wcmp_group_member_entries = util.get_keys(self.asic_db, + self.ASIC_DB_GROUP_MEMBER_TBL_NAME) + for key in wcmp_group_member_entries: + if key not in self._original_entries["{}:{}".format( + self.asic_db, self.ASIC_DB_GROUP_MEMBER_TBL_NAME)]: + asic_db_wcmp_group_member_key = key + break + return asic_db_wcmp_group_member_key + + def generate_app_db_key(self, group_id): + d = {} + d[util.prepend_match_field("wcmp_group_id")] = group_id + key = json.dumps(d, separators=(",", ":")) + return self.TBL_NAME + ":" + key + + # create default wcmp group + def create_wcmp_group(self, nexthop_id=None, wcmp_group_id=None, + action=None, weight=None, watch_port=None): + wcmp_group_id = wcmp_group_id or self.DEFAULT_WCMP_GROUP_ID + weight = weight or self.DEFAULT_WEIGHT + action = action or self.DEFAULT_ACTION + nexthop_id = nexthop_id or self.DEFAULT_NEXTHOP_ID + watch_port = watch_port or self.DEFAULT_WATCH_PORT + action1 = {util.prepend_param_field(self.NEXTHOP_ID_FIELD): nexthop_id, + self.WEIGHT_FIELD: weight, self.ACTION_FIELD: action, + self.WATCH_PORT_FIELD: watch_port} + actions = [action1] + attr_list = [(self.ACTIONS_FIELD, json.dumps(actions))] + wcmp_group_key = self.generate_app_db_key(wcmp_group_id) + self.set_app_db_entry(wcmp_group_key, attr_list) + return wcmp_group_id, wcmp_group_key, attr_list + + def get_original_appl_db_entries_count(self): + return len(self._original_entries["%s:%s" % (self.appl_db, + (self.APP_DB_TBL_NAME + ":" + + self.TBL_NAME))]) + + def get_original_appl_state_db_entries_count(self): + return len(self._original_entries["%s:%s" % (self.appl_state_db, + (self.APP_DB_TBL_NAME + ":" + + self.TBL_NAME))]) + + def get_original_asic_db_group_entries_count(self): + return len(self._original_entries["%s:%s" % (self.asic_db, + self.ASIC_DB_GROUP_TBL_NAME)]) + + def get_original_asic_db_member_entries_count(self): + return len(self._original_entries["%s:%s" % (self.asic_db, + self.ASIC_DB_GROUP_MEMBER_TBL_NAME)]) + + +class P4RtRouteWrapper(util.DBInterface): + """Interface to interact with APP DB and ASIC DB tables for P4RT route object.""" + + # database and SAI constants + APP_DB_TBL_NAME = swsscommon.APP_P4RT_TABLE_NAME + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" + SAI_ATTR_PACKET_ACTION = "SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION" + SAI_ATTR_PACKET_ACTION_FORWARD = "SAI_PACKET_ACTION_FORWARD" + SAI_ATTR_PACKET_ACTION_DROP = "SAI_PACKET_ACTION_DROP" + SAI_ATTR_NEXTHOP_ID = "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID" + + # attribute fields for route object + NEXTHOP_ID_FIELD = "nexthop_id" + WCMP_GROUP_ID_FIELD = "wcmp_group_id" + + # default route attribute values + DEFAULT_ACTION = "set_nexthop_id" + DEFAULT_NEXTHOP_ID = "8" + DEFAULT_WCMP_GROUP_ID = "group-a" + DEFAULT_VRF_ID = "b4-traffic" + DEFAULT_DST = "10.11.12.0/24" + + def generate_app_db_key(self, vrf_id, dst): + assert self.ip_type is not None + d = {} + d[util.prepend_match_field("vrf_id")] = vrf_id + if self.ip_type == "IPV4": + d[util.prepend_match_field("ipv4_dst")] = dst + else: + d[util.prepend_match_field("ipv6_dst")] = dst + key = json.dumps(d, separators=(",", ":")) + return self.TBL_NAME + ":" + key + + def set_ip_type(self, ip_type): + assert ip_type in ("IPV4", "IPV6") + self.ip_type = ip_type + self.TBL_NAME = "FIXED_" + ip_type + "_TABLE" + + # Create default route. + def create_route(self, nexthop_id=None, wcmp_group_id=None, action=None, + vrf_id=None, dst=None): + action = action or self.DEFAULT_ACTION + vrf_id = vrf_id or self.DEFAULT_VRF_ID + dst = dst or self.DEFAULT_DST + if action == "set_wcmp_group_id": + wcmp_group_id = wcmp_group_id or self.DEFAULT_WCMP_GROUP_ID + attr_list = [(self.ACTION_FIELD, action), + (util.prepend_param_field( + self.WCMP_GROUP_ID_FIELD), wcmp_group_id)] + elif action == "set_nexthop_id": + nexthop_id = nexthop_id or self.DEFAULT_NEXTHOP_ID + attr_list = [(self.ACTION_FIELD, action), + (util.prepend_param_field(self.NEXTHOP_ID_FIELD), + nexthop_id)] + else: + attr_list = [(self.ACTION_FIELD, action)] + route_key = self.generate_app_db_key(vrf_id, dst) + self.set_app_db_entry(route_key, attr_list) + return route_key, attr_list + + # Fetch the asic_db_key for the first newly created route entry from created + # routes in ASIC db. This API should only be used when only one key is + # expected to be created after original entries. + # Original route entries in asic db must be fetched using + # 'get_original_redis_entries' before fetching asic db key of newly created + # route. + def get_newly_created_asic_db_key(self): + route_entries = util.get_keys(self.asic_db, self.ASIC_DB_TBL_NAME) + for key in route_entries: + if key not in self._original_entries["%s:%s" % (self.asic_db, + self.ASIC_DB_TBL_NAME)]: + asic_db_key = key + break + return asic_db_key + + def get_original_appl_db_entries_count(self): + return len(self._original_entries["%s:%s" % (self.appl_db, + (self.APP_DB_TBL_NAME + ":" + + self.TBL_NAME))]) + + def get_original_appl_state_db_entries_count(self): + return len(self._original_entries["%s:%s" % (self.appl_state_db, + (self.APP_DB_TBL_NAME + ":" + + self.TBL_NAME))]) + + def get_original_asic_db_entries_count(self): + return len(self._original_entries["%s:%s" % (self.asic_db, + self.ASIC_DB_TBL_NAME)]) diff --git a/tests/p4rt/test_l3.py b/tests/p4rt/test_l3.py new file mode 100644 index 00000000000..42f32facbdf --- /dev/null +++ b/tests/p4rt/test_l3.py @@ -0,0 +1,1845 @@ +from swsscommon import swsscommon + +import pytest +import json +import util +import l3 +import test_vrf + + +class TestP4RTL3(object): + + def _set_up(self, dvs): + self._p4rt_router_intf_obj = l3.P4RtRouterInterfaceWrapper() + self._p4rt_neighbor_obj = l3.P4RtNeighborWrapper() + self._p4rt_nexthop_obj = l3.P4RtNextHopWrapper() + self._p4rt_route_obj = l3.P4RtRouteWrapper() + self._p4rt_wcmp_group_obj = l3.P4RtWcmpGroupWrapper() + self._vrf_obj = test_vrf.TestVrf() + + self._p4rt_router_intf_obj.set_up_databases(dvs) + self._p4rt_neighbor_obj.set_up_databases(dvs) + self._p4rt_nexthop_obj.set_up_databases(dvs) + self._p4rt_route_obj.set_up_databases(dvs) + self._p4rt_wcmp_group_obj.set_up_databases(dvs) + self.response_consumer = swsscommon.NotificationConsumer( + self._p4rt_route_obj.appl_state_db, "APPL_DB_P4RT_TABLE_RESPONSE_CHANNEL") + + def _set_vrf(self, dvs): + # Create VRF. + self._vrf_obj.setup_db(dvs) + self.vrf_id = "b4-traffic" + self.vrf_state = self._vrf_obj.vrf_create(dvs, self.vrf_id, [], {}) + + def _clean_vrf(self, dvs): + # Remove VRF. + self._vrf_obj.vrf_remove(dvs, self.vrf_id, self.vrf_state) + + def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): + # Initialize L3 objects and database connectors. + self._set_up(dvs) + self._set_vrf(dvs) + + # Set IP type for route object. + self._p4rt_route_obj.set_ip_type("IPV4") + + # Maintain list of original Application and ASIC DB entries before + # adding new route. + db_list = ((self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME),) + self._p4rt_nexthop_obj.get_original_redis_entries(db_list) + db_list = ((self._p4rt_route_obj.appl_db, + "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, + self._p4rt_route_obj.TBL_NAME)), + (self._p4rt_route_obj.appl_state_db, + "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, + self._p4rt_route_obj.TBL_NAME)), + (self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME)) + self._p4rt_route_obj.get_original_redis_entries(db_list) + + # Fetch the original key to oid information from Redis DB. + key_to_oid_helper = util.KeyToOidDBHelper(dvs) + _, original_key_oid_info = key_to_oid_helper.get_db_info() + + # Create router interface. + router_interface_id, router_intf_key, attr_list = ( + self._p4rt_router_intf_obj.create_router_interface() + ) + util.verify_response(self.response_consumer, router_intf_key, + attr_list, "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count = 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create neighbor. + neighbor_id, neighbor_key, attr_list = ( + self._p4rt_neighbor_obj.create_neighbor() + ) + util.verify_response(self.response_consumer, neighbor_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create nexthop. + nexthop_id, nexthop_key, attr_list = ( + self._p4rt_nexthop_obj.create_next_hop() + ) + util.verify_response(self.response_consumer, nexthop_key, attr_list, + "SWSS_RC_SUCCESS") + # get nexthop_oid of newly created nexthop + nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() + assert nexthop_oid is not None + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create route entry. + route_key, attr_list = self._p4rt_route_obj.create_route(nexthop_id) + util.verify_response(self.response_consumer, route_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Query application database for route entries. + route_entries = util.get_keys( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for newly created route key. + (status, fvs) = util.get_key(self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query application state database for route entries. + state_route_entries = util.get_keys( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + assert len(state_route_entries) == ( + self._p4rt_route_obj.get_original_appl_state_db_entries_count() + 1 + ) + + # Query application state database for newly created route key. + (status, fvs) = util.get_key(self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query ASIC database for route entries. + route_entries = util.get_keys(self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_asic_db_entries_count() + 1 + ) + + # Query ASIC database for newly created route key. + asic_db_key = self._p4rt_route_obj.get_newly_created_asic_db_key() + assert asic_db_key is not None + (status, fvs) = util.get_key(self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME, + asic_db_key) + assert status == True + attr_list = [(self._p4rt_route_obj.SAI_ATTR_NEXTHOP_ID, nexthop_oid)] + util.verify_attr(fvs, attr_list) + + # Update route entry. + route_key, attr_list = self._p4rt_route_obj.create_route(action="drop") + util.verify_response(self.response_consumer, route_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count did not change in Redis DB. + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Query application database for route entries. + route_entries = util.get_keys( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for the updated route key. + (status, fvs) = util.get_key(self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key) + assert status == True + attr_list_appl_db = [(self._p4rt_route_obj.ACTION_FIELD, "drop"), + (util.prepend_param_field(self._p4rt_route_obj.NEXTHOP_ID_FIELD), nexthop_id)] + util.verify_attr(fvs, attr_list_appl_db) + + # Query application state database for route entries. + state_route_entries = util.get_keys( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + assert len(state_route_entries) == ( + self._p4rt_route_obj.get_original_appl_state_db_entries_count() + 1 + ) + + # Query application state database for the updated route key. + (status, fvs) = util.get_key(self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query ASIC database for route entries. + route_entries = util.get_keys(self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_asic_db_entries_count() + 1 + ) + + # Query ASIC database for the updated route key. + asic_db_key = self._p4rt_route_obj.get_newly_created_asic_db_key() + assert asic_db_key is not None + (status, fvs) = util.get_key(self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME, + asic_db_key) + assert status == True + attr_list = [(self._p4rt_route_obj.SAI_ATTR_NEXTHOP_ID, "oid:0x0"), + (self._p4rt_route_obj.SAI_ATTR_PACKET_ACTION, self._p4rt_route_obj.SAI_ATTR_PACKET_ACTION_DROP)] + util.verify_attr(fvs, attr_list) + + # Remove route entry. + self._p4rt_route_obj.remove_app_db_entry(route_key) + util.verify_response( + self.response_consumer, route_key, [], "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Remove nexthop. + self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) + util.verify_response(self.response_consumer, nexthop_key, [], + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Remove neighbor. + self._p4rt_neighbor_obj.remove_app_db_entry(neighbor_key) + util.verify_response(self.response_consumer, neighbor_key, [], + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Remove router interface. + self._p4rt_router_intf_obj.remove_app_db_entry(router_intf_key) + util.verify_response( + self.response_consumer, router_intf_key, [], "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count is same as the original count. + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + + # Query application database for route entries. + route_entries = util.get_keys( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_appl_db_entries_count() + ) + + # Verify that the route_key no longer exists in application database. + (status, fsv) = util.get_key(self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key) + assert status == False + + # Query application state database for route entries. + state_route_entries = util.get_keys( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + assert len(state_route_entries) == ( + self._p4rt_route_obj.get_original_appl_state_db_entries_count() + ) + + # Verify that the route_key no longer exists in application state + # database. + (status, fsv) = util.get_key(self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key) + assert status == False + + # Query ASIC database for route entries. + route_entries = util.get_keys(self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_asic_db_entries_count() + ) + + # Verify that removed route no longer exists in ASIC database. + (status, fvs) = util.get_key(self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME, + asic_db_key) + assert status == False + self._clean_vrf(dvs) + + def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): + # Initialize L3 objects and database connectors. + self._set_up(dvs) + self._set_vrf(dvs) + + # Set IP type for route object. + self._p4rt_route_obj.set_ip_type("IPV6") + + # Maintain list of original Application and ASIC DB entries before + # adding new route. + db_list = ((self._p4rt_route_obj.appl_db, + "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, + self._p4rt_route_obj.TBL_NAME)), + (self._p4rt_route_obj.appl_state_db, + "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, + self._p4rt_route_obj.TBL_NAME)), + (self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME)) + self._p4rt_route_obj.get_original_redis_entries(db_list) + db_list = ((self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME),) + self._p4rt_nexthop_obj.get_original_redis_entries(db_list) + db_list = ((self._p4rt_wcmp_group_obj.appl_db, + "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME)), + (self._p4rt_wcmp_group_obj.appl_state_db, + "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME)), + (self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME), + (self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME)) + self._p4rt_wcmp_group_obj.get_original_redis_entries(db_list) + + # Fetch the original key to oid information from Redis DB. + key_to_oid_helper = util.KeyToOidDBHelper(dvs) + _, original_key_oid_info = key_to_oid_helper.get_db_info() + + # Create router interface. + router_interface_id, router_intf_key, attr_list = ( + self._p4rt_router_intf_obj.create_router_interface() + ) + util.verify_response(self.response_consumer, router_intf_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count = 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create neighbor. + neighbor_id, neighbor_key, attr_list = ( + self._p4rt_neighbor_obj.create_neighbor(ipv4=False) + ) + util.verify_response(self.response_consumer, neighbor_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create nexthop. + nexthop_id, nexthop_key, attr_list = ( + self._p4rt_nexthop_obj.create_next_hop(ipv4=False) + ) + util.verify_response(self.response_consumer, nexthop_key, attr_list, + "SWSS_RC_SUCCESS") + # Get the oid of the newly created nexthop. + nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() + assert nexthop_oid is not None + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create wcmp group. + wcmp_group_id, wcmp_group_key, attr_list = ( + self._p4rt_wcmp_group_obj.create_wcmp_group() + ) + util.verify_response(self.response_consumer, wcmp_group_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 2 in Redis DB + # (1 each for WCMP group and member). + count += 2 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Query application database for wcmp group entries. + wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + assert len(wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for newly created wcmp group key. + (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query application state database for wcmp group entries. + state_wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + assert len(state_wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + + 1 + ) + + # Query application state database for newly created wcmp group key. + (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query ASIC database for wcmp group entries. + wcmp_group_entries = util.get_keys(self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME) + assert len(wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() + + 1 + ) + + # Query ASIC database for newly created wcmp group oid. + wcmp_group_oid = self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_oid() + assert wcmp_group_oid is not None + attr_list = [(self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_TYPE, + self._p4rt_wcmp_group_obj.SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP)] + (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + wcmp_group_oid) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query ASIC database for wcmp group member entries. + wcmp_group_member_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME) + assert len(wcmp_group_member_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + + 1 + ) + + # Query ASIC database for newly crated wcmp group member key. + asic_db_group_member_key = ( + self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_member_asic_db_key() + ) + assert asic_db_group_member_key is not None + attr_list = [(self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_GROUP_ID, + wcmp_group_oid), + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_ID, + nexthop_oid), + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_WEIGHT, + str(self._p4rt_wcmp_group_obj.DEFAULT_WEIGHT))] + (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, + asic_db_group_member_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Create route entry. + route_key, attr_list = self._p4rt_route_obj.create_route( + wcmp_group_id=wcmp_group_id, action="set_wcmp_group_id", dst="2001:db8::/32") + util.verify_response(self.response_consumer, route_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Query application database for route entries. + route_entries = util.get_keys( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for newly created route key. + (status, fvs) = util.get_key(self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query application state database for route entries. + state_route_entries = util.get_keys( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + assert len(state_route_entries) == ( + self._p4rt_route_obj.get_original_appl_state_db_entries_count() + 1 + ) + + # Query application state database for newly created route key. + (status, fvs) = util.get_key(self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query ASIC database for route entries. + route_entries = util.get_keys(self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_asic_db_entries_count() + 1 + ) + + # Query ASIC database for newly created route key. + asic_db_key = self._p4rt_route_obj.get_newly_created_asic_db_key() + assert asic_db_key is not None + (status, fvs) = util.get_key(self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME, + asic_db_key) + assert status == True + attr_list = [ + (self._p4rt_route_obj.SAI_ATTR_NEXTHOP_ID, wcmp_group_oid)] + util.verify_attr(fvs, attr_list) + + # Update route entry. + route_key, attr_list = self._p4rt_route_obj.create_route( + action="drop", dst="2001:db8::/32") + util.verify_response(self.response_consumer, route_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count did not change in Redis DB. + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Query application database for route entries. + route_entries = util.get_keys( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for the updated route key. + (status, fvs) = util.get_key(self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key) + assert status == True + attr_list_appl_db = [(self._p4rt_route_obj.ACTION_FIELD, "drop"), + (util.prepend_param_field(self._p4rt_route_obj.WCMP_GROUP_ID_FIELD), wcmp_group_id)] + util.verify_attr(fvs, attr_list_appl_db) + + # Query application state database for route entries. + state_route_entries = util.get_keys( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + assert len(state_route_entries) == ( + self._p4rt_route_obj.get_original_appl_state_db_entries_count() + 1 + ) + + # Query application state database for the updated route key. + (status, fvs) = util.get_key(self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query ASIC database for route entries. + route_entries = util.get_keys(self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_asic_db_entries_count() + 1 + ) + + # Query ASIC database for the updated route key. + asic_db_key = self._p4rt_route_obj.get_newly_created_asic_db_key() + assert asic_db_key is not None + (status, fvs) = util.get_key(self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME, + asic_db_key) + assert status == True + attr_list = [(self._p4rt_route_obj.SAI_ATTR_NEXTHOP_ID, "oid:0x0"), + (self._p4rt_route_obj.SAI_ATTR_PACKET_ACTION, self._p4rt_route_obj.SAI_ATTR_PACKET_ACTION_DROP)] + util.verify_attr(fvs, attr_list) + + # Remove route entry. + self._p4rt_route_obj.remove_app_db_entry(route_key) + util.verify_response( + self.response_consumer, route_key, [], "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Remove wcmp group entry. + self._p4rt_wcmp_group_obj.remove_app_db_entry(wcmp_group_key) + util.verify_response(self.response_consumer, wcmp_group_key, [], + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count decremented by 2 in Redis DB + # (1 each for WCMP group and member). + count -= 2 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Remove nexthop. + self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) + util.verify_response(self.response_consumer, nexthop_key, [], + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Remove neighbor. + self._p4rt_neighbor_obj.remove_app_db_entry(neighbor_key) + util.verify_response(self.response_consumer, neighbor_key, [], + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Remove router interface. + self._p4rt_router_intf_obj.remove_app_db_entry(router_intf_key) + util.verify_response( + self.response_consumer, router_intf_key, [], "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count is same as original count. + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + + # Query application database for route entries. + route_entries = util.get_keys( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_appl_db_entries_count() + ) + + # Verify that the route_key no longer exists in application database. + (status, fsv) = util.get_key(self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key) + assert status == False + + # Query application state database for route entries. + state_route_entries = util.get_keys( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + assert len(state_route_entries) == ( + self._p4rt_route_obj.get_original_appl_state_db_entries_count() + ) + + # Verify that the route_key no longer exists in application state + # database. + (status, fsv) = util.get_key(self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key) + assert status == False + + # Query ASIC database for route entries. + route_entries = util.get_keys(self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_asic_db_entries_count() + ) + + # Verify that removed route no longer exists in ASIC database. + (status, fvs) = util.get_key(self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME, + asic_db_key) + assert status == False + + # Query application database for wcmp group entries. + wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + assert len(wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_appl_db_entries_count() + ) + + # Verify that the route_key no longer exists in application database. + (status, fsv) = util.get_key(self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key) + assert status == False + + # Query application state database for wcmp group entries. + state_wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + assert len(state_wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + ) + + # Verify that the wcmp_group_key no longer exists in application state + # database. + (status, fsv) = util.get_key(self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key) + assert status == False + + # Query ASIC database for wcmp group entries. + wcmp_group_entries = util.get_keys(self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME) + assert len(wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() + ) + + # Verify that removed wcmp group no longer exists in ASIC database. + (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + wcmp_group_oid) + assert status == False + + # Query ASIC database for wcmp group member entries. + wcmp_group_member_entries = util.get_keys(self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME) + assert len(wcmp_group_member_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + ) + + # Verify that removed wcmp group member no longer exists in ASIC + # database. + (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, + asic_db_group_member_key) + assert status == False + + self._clean_vrf(dvs) + + def test_IPv4RouteAddWithInvalidNexthopFail(self, dvs, testlog): + marker = dvs.add_log_marker() + + # Initialize L3 objects and database connectors. + self._set_up(dvs) + self._set_vrf(dvs) + + # Set IP type for route object. + self._p4rt_route_obj.set_ip_type("IPV4") + + # Maintain list of original Application and ASIC DB entries before + # adding new route. + db_list = ((self._p4rt_route_obj.appl_db, + "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, + self._p4rt_route_obj.TBL_NAME)), + (self._p4rt_route_obj.appl_state_db, + "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, + self._p4rt_route_obj.TBL_NAME)), + (self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME)) + self._p4rt_route_obj.get_original_redis_entries(db_list) + + # Create route entry using invalid nexthop (expect failure). + route_key, attr_list = self._p4rt_route_obj.create_route() + err_log = "[OrchAgent] Nexthop ID '8' does not exist" + util.verify_response(self.response_consumer, route_key, attr_list, + "SWSS_RC_NOT_FOUND", err_log) + + # Query application database for route entries. + route_entries = util.get_keys( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for newly created route key. + (status, fvs) = util.get_key(self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query application database for route entries (no new route entry + # expected). + state_route_entries = util.get_keys( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + assert len(state_route_entries) == ( + self._p4rt_route_obj.get_original_appl_state_db_entries_count() + ) + + # Verify that the newly added route key does not exist in application + # state db. + (status, fvs) = util.get_key(self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key) + assert status == False + + # Query ASIC database for route entries (no new ASIC DB entry should be + # created for route entry). + route_entries = util.get_keys(self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_asic_db_entries_count() + ) + + # Remove route entry (expect failure). + self._p4rt_route_obj.remove_app_db_entry(route_key) + err_log = "[OrchAgent] Route entry does not exist" + util.verify_response( + self.response_consumer, route_key, [], "SWSS_RC_NOT_FOUND", err_log) + self._clean_vrf(dvs) + + def test_IPv6RouteAddWithInvalidWcmpFail(self, dvs, testlog): + marker = dvs.add_log_marker() + + # Initialize L3 objects and database connectors. + self._set_up(dvs) + self._set_vrf(dvs) + + # Set IP type for route object. + self._p4rt_route_obj.set_ip_type("IPV6") + + # Maintain list of original Application and ASIC DB entries before + # adding new route. + db_list = ((self._p4rt_route_obj.appl_db, + "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, + self._p4rt_route_obj.TBL_NAME)), + (self._p4rt_route_obj.appl_state_db, + "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, + self._p4rt_route_obj.TBL_NAME)), + (self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME)) + self._p4rt_route_obj.get_original_redis_entries(db_list) + + # Create route entry using invalid wcmp group (expect failure). + route_key, attr_list = self._p4rt_route_obj.create_route( + action="set_wcmp_group_id", wcmp_group_id="8") + err_log = "[OrchAgent] WCMP group '8' does not exist" + util.verify_response(self.response_consumer, route_key, attr_list, + "SWSS_RC_NOT_FOUND", err_log) + + # Query application database for route entries + route_entries = util.get_keys(self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for newly created route key. + (status, fvs) = util.get_key(self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query application state database for route entries (no new APPL STATE DB + # entry should be created for route entry). + state_route_entries = util.get_keys(self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + assert len(state_route_entries) == ( + self._p4rt_route_obj.get_original_appl_state_db_entries_count() + ) + + # Verify that newly created route key does not exist in application + # state db. + (status, fvs) = util.get_key(self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key) + assert status == False + + # Query ASIC database for route entries (no new ASIC DB entry should be + # created for route entry). + route_entries = util.get_keys(self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_asic_db_entries_count() + ) + + # Remove route entry (expect failure). + self._p4rt_route_obj.remove_app_db_entry(route_key) + err_log = "[OrchAgent] Route entry does not exist" + util.verify_response( + self.response_consumer, route_key, [], "SWSS_RC_NOT_FOUND", err_log) + self._clean_vrf(dvs) + + def test_PruneAndRestoreNextHop(self, dvs, testlog): + # Initialize L3 objects and database connectors. + self._set_up(dvs) + cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + + # Maintain original WCMP group entries for ASIC DB. + db_list = ((self._p4rt_wcmp_group_obj.appl_db, + "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME)), + (self._p4rt_wcmp_group_obj.appl_state_db, + "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME)), + (self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME), + (self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME)) + self._p4rt_wcmp_group_obj.get_original_redis_entries(db_list) + db_list = ((self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME),) + self._p4rt_nexthop_obj.get_original_redis_entries(db_list) + + # Fetch the original key to oid information from Redis DB. + key_to_oid_helper = util.KeyToOidDBHelper(dvs) + _, original_key_oid_info = key_to_oid_helper.get_db_info() + + # Bring up port under test. + port_name = "Ethernet0" + if_name = "eth0" + util.initialize_interface(dvs, port_name, "10.0.0.0/31") + util.set_interface_status(dvs, if_name, "up") + + # Create router interface. + router_interface_id, router_intf_key, attr_list = ( + self._p4rt_router_intf_obj.create_router_interface() + ) + util.verify_response( + self.response_consumer, router_intf_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count = 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create neighbor. + neighbor_id, neighbor_key, attr_list = ( + self._p4rt_neighbor_obj.create_neighbor() + ) + util.verify_response( + self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create nexthop. + nexthop_id, nexthop_key, attr_list = ( + self._p4rt_nexthop_obj.create_next_hop() + ) + util.verify_response( + self.response_consumer, nexthop_key, attr_list, "SWSS_RC_SUCCESS") + # Get nexthop_oid of newly created nexthop. + nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() + assert nexthop_oid is not None + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create wcmp group with one member. + wcmp_group_id, wcmp_group_key, attr_list = ( + self._p4rt_wcmp_group_obj.create_wcmp_group(watch_port=port_name) + ) + util.verify_response( + self.response_consumer, wcmp_group_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 2 in Redis DB + # (1 each for WCMP group and member). + count += 2 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Query application database for wcmp group entries. + wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + assert len(wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for newly created wcmp group key. + (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query application state database for wcmp group entries. + state_wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + assert len(state_wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + + 1 + ) + + # Query application state database for newly created wcmp group key. + (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query ASIC database for wcmp group entries. + wcmp_group_entries = util.get_keys(self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME) + assert len(wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() + + 1 + ) + + # Query ASIC database for newly created wcmp group oid. + wcmp_group_oid = self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_oid() + assert wcmp_group_oid is not None + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + wcmp_group_oid + ) + assert status == True + asic_attr_list = [ + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_TYPE, + (self._p4rt_wcmp_group_obj. + SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP)) + ] + util.verify_attr(fvs, asic_attr_list) + + # Query ASIC database for newly created wcmp group member key. + asic_db_group_member_key = self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_member_asic_db_key() + assert asic_db_group_member_key is not None + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, + asic_db_group_member_key + ) + assert status == True + asic_attr_list = [ + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_GROUP_ID, + wcmp_group_oid), + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_ID, + nexthop_oid), + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_WEIGHT, + str(self._p4rt_wcmp_group_obj.DEFAULT_WEIGHT)) + ] + util.verify_attr(fvs, asic_attr_list) + + # Force oper-down for the associated port. + util.set_interface_status(dvs, if_name) + + # Check ASIC DB to verify that associated member for watch_port is + # pruned. + wcmp_group_member_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME + ) + assert len(wcmp_group_member_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + ) + + # Check APPL STATE DB to verify no change. + (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Force oper-up for associated port. + util.set_interface_status(dvs, if_name, "up") + + # Check pruned next hop member is restored in ASIC DB. + wcmp_group_member_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME + ) + assert len(wcmp_group_member_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + + 1 + ) + asic_db_group_member_key = self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_member_asic_db_key() + assert asic_db_group_member_key is not None + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, + asic_db_group_member_key + ) + assert status == True + util.verify_attr(fvs, asic_attr_list) + + # Delete WCMP group member. + self._p4rt_wcmp_group_obj.remove_app_db_entry(wcmp_group_key) + + # Verify that P4RT key to OID count decremented by 2 in Redis DB + # (1 each for WCMP group and member). + count -= 2 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Verify that APPL STATE DB is now updated. + state_wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.appl_state_db, + (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME)) + assert len(state_wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + ) + + # Delete next hop. + self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Delete neighbor. + self._p4rt_neighbor_obj.remove_app_db_entry(neighbor_key) + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Delete router interface. + self._p4rt_router_intf_obj.remove_app_db_entry(router_intf_key) + + # Verify that P4RT key to OID count is same as the original count. + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + + def test_PruneNextHopOnWarmBoot(self, dvs, testlog): + # Initialize L3 objects and database connectors. + self._set_up(dvs) + cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + + # Maintain original WCMP group entries for ASIC DB. + db_list = ((self._p4rt_wcmp_group_obj.appl_db, + "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME)), + (self._p4rt_wcmp_group_obj.appl_state_db, + "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME)), + (self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME), + (self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME)) + self._p4rt_wcmp_group_obj.get_original_redis_entries(db_list) + db_list = ((self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME),) + self._p4rt_nexthop_obj.get_original_redis_entries(db_list) + + # Fetch the original key to oid information from Redis DB. + key_to_oid_helper = util.KeyToOidDBHelper(dvs) + _, original_key_oid_info = key_to_oid_helper.get_db_info() + + # Bring up port under test. + port_name = "Ethernet0" + if_name = "eth0" + util.initialize_interface(dvs, port_name, "10.0.0.0/31") + util.set_interface_status(dvs, if_name, "up") + + # Create router interface. + router_interface_id, router_intf_key, attr_list = ( + self._p4rt_router_intf_obj.create_router_interface() + ) + util.verify_response( + self.response_consumer, router_intf_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count = 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create neighbor. + neighbor_id, neighbor_key, attr_list = ( + self._p4rt_neighbor_obj.create_neighbor() + ) + util.verify_response( + self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create nexthop. + nexthop_id, nexthop_key, attr_list = ( + self._p4rt_nexthop_obj.create_next_hop() + ) + util.verify_response( + self.response_consumer, nexthop_key, attr_list, "SWSS_RC_SUCCESS") + # Get nexthop_oid of newly created nexthop. + nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() + assert nexthop_oid is not None + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create wcmp group with one member. + wcmp_group_id, wcmp_group_key, attr_list = ( + self._p4rt_wcmp_group_obj.create_wcmp_group(watch_port=port_name) + ) + util.verify_response( + self.response_consumer, wcmp_group_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 2 in Redis DB + # (1 each for WCMP group and member). + count += 2 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Query application database for wcmp group entries. + wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + assert len(wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for newly created wcmp group key. + (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query application state database for wcmp group entries. + state_wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + assert len(state_wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + + 1 + ) + + # Query application state database for newly created wcmp group key. + (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query ASIC database for wcmp group entries. + wcmp_group_entries = util.get_keys(self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME) + assert len(wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() + + 1 + ) + + # Query ASIC database for newly created wcmp group oid. + wcmp_group_oid = self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_oid() + assert wcmp_group_oid is not None + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + wcmp_group_oid + ) + assert status == True + asic_attr_list = [ + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_TYPE, + (self._p4rt_wcmp_group_obj. + SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP)) + ] + util.verify_attr(fvs, asic_attr_list) + + # Query ASIC database for wcmp group member entries. + wcmp_group_member_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME + ) + assert len(wcmp_group_member_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + + 1 + ) + + # Query ASIC database for newly created wcmp group member key. + asic_db_group_member_key = self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_member_asic_db_key() + assert asic_db_group_member_key is not None + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, + asic_db_group_member_key + ) + assert status == True + asic_attr_list = [ + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_GROUP_ID, + wcmp_group_oid), + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_ID, + nexthop_oid), + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_WEIGHT, + str(self._p4rt_wcmp_group_obj.DEFAULT_WEIGHT)) + ] + util.verify_attr(fvs, asic_attr_list) + + # Bring down the port. + util.set_interface_status(dvs, if_name) + + # Execute the warm reboot. + dvs.runcmd("config warm_restart enable swss") + dvs.stop_swss() + dvs.start_swss() + + # Make sure the system is stable. + dvs.check_swss_ready() + + # Verify that the associated next hop is pruned in ASIC DB. + wcmp_group_member_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME + ) + assert len(wcmp_group_member_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + ) + + # Delete WCMP group member. + self._p4rt_wcmp_group_obj.remove_app_db_entry(wcmp_group_key) + + # Verify that P4RT key to OID count decremented by 2 in Redis DB + # (1 each for WCMP group and member). + count -= 2 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Verify that APPL STATE DB is updated. + state_wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.appl_state_db, + (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME)) + assert len(state_wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + ) + + # Delete next hop. + self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Delete neighbor. + self._p4rt_neighbor_obj.remove_app_db_entry(neighbor_key) + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Delete router interface. + self._p4rt_router_intf_obj.remove_app_db_entry(router_intf_key) + + # Verify that P4RT key to OID count is same as the original count. + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + + def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): + # Initialize L3 objects and database connectors. + self._set_up(dvs) + cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + + # Maintain original WCMP group entries for ASIC DB. + db_list = ((self._p4rt_wcmp_group_obj.appl_db, + "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME)), + (self._p4rt_wcmp_group_obj.appl_state_db, + "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME)), + (self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME), + (self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME)) + self._p4rt_wcmp_group_obj.get_original_redis_entries(db_list) + db_list = ((self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME),) + self._p4rt_nexthop_obj.get_original_redis_entries(db_list) + + # Fetch the original key to oid information from Redis DB. + key_to_oid_helper = util.KeyToOidDBHelper(dvs) + _, original_key_oid_info = key_to_oid_helper.get_db_info() + + # Force oper-down on port under test. + port_name = "Ethernet0" + if_name = "eth0" + util.initialize_interface(dvs, port_name, "10.0.0.0/31") + util.set_interface_status(dvs, if_name) + + # Create router interface. + router_interface_id, router_intf_key, attr_list = ( + self._p4rt_router_intf_obj.create_router_interface() + ) + util.verify_response( + self.response_consumer, router_intf_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count = 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create neighbor. + neighbor_id, neighbor_key, attr_list = ( + self._p4rt_neighbor_obj.create_neighbor() + ) + util.verify_response( + self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create nexthop. + nexthop_id, nexthop_key, attr_list = ( + self._p4rt_nexthop_obj.create_next_hop() + ) + util.verify_response( + self.response_consumer, nexthop_key, attr_list, "SWSS_RC_SUCCESS") + # Get nexthop_oid of newly created nexthop. + nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() + assert nexthop_oid is not None + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create wcmp group with one member. + wcmp_group_id, wcmp_group_key, attr_list = ( + self._p4rt_wcmp_group_obj.create_wcmp_group(watch_port=port_name) + ) + util.verify_response( + self.response_consumer, wcmp_group_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB + # (WCMP group member is not created for operationally down watchport). + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Query application database for wcmp group entries. + wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + assert len(wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for newly created wcmp group key. + (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query application state database for wcmp group entries. + state_wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + assert len(state_wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + 1 + ) + + # Query application state database for newly created wcmp group key. + (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query ASIC database for wcmp group entries. + wcmp_group_entries = util.get_keys(self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME) + assert len(wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() + + 1 + ) + + # Query ASIC database for newly created wcmp group oid. + wcmp_group_oid = self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_oid() + assert wcmp_group_oid is not None + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + wcmp_group_oid + ) + assert status == True + asic_attr_list = [ + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_TYPE, + (self._p4rt_wcmp_group_obj. + SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP)) + ] + util.verify_attr(fvs, asic_attr_list) + + # Query ASIC database for wcmp group member entries (expect no entry). + wcmp_group_member_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME + ) + assert len(wcmp_group_member_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + ) + + # Bring up the port. + util.set_interface_status(dvs, if_name, "up") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB + # (WCMP group member is now expected to be created in SAI due to + # watchport now being operationally up) + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Verify that next hop member is now created in SAI. + wcmp_group_member_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME + ) + assert len(wcmp_group_member_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + + 1 + ) + asic_db_group_member_key = self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_member_asic_db_key() + assert asic_db_group_member_key is not None + (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.asic_db, + (self._p4rt_wcmp_group_obj. + ASIC_DB_GROUP_MEMBER_TBL_NAME), + asic_db_group_member_key) + assert status == True + asic_attr_list = [ + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_GROUP_ID, + wcmp_group_oid), + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_ID, + nexthop_oid), + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_WEIGHT, + str(self._p4rt_wcmp_group_obj.DEFAULT_WEIGHT)) + ] + util.verify_attr(fvs, asic_attr_list) + + # Delete WCMP group member. + self._p4rt_wcmp_group_obj.remove_app_db_entry(wcmp_group_key) + + # Verify that P4RT key to OID count decremented by 2 in Redis DB + # (1 each for WCMP group and member). + count -= 2 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Verify that APPL STATE DB is updated. + state_wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.appl_state_db, + (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME)) + assert len(state_wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + ) + + # Delete next hop. + self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Delete neighbor. + self._p4rt_neighbor_obj.remove_app_db_entry(neighbor_key) + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Delete router interface. + self._p4rt_router_intf_obj.remove_app_db_entry(router_intf_key) + + # Verify that P4RT key to OID count is same as the original count. + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + + def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): + # Initialize L3 objects and database connectors. + self._set_up(dvs) + cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + + # Maintain original WCMP group entries for ASIC DB. + db_list = ((self._p4rt_wcmp_group_obj.appl_db, + "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME)), + (self._p4rt_wcmp_group_obj.appl_state_db, + "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME)), + (self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME), + (self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME)) + self._p4rt_wcmp_group_obj.get_original_redis_entries(db_list) + db_list = ((self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME),) + self._p4rt_nexthop_obj.get_original_redis_entries(db_list) + + # Fetch the original key to oid information from Redis DB. + key_to_oid_helper = util.KeyToOidDBHelper(dvs) + _, original_key_oid_info = key_to_oid_helper.get_db_info() + + # Force oper-down on port under test. + port_name = "Ethernet0" + if_name = "eth0" + util.initialize_interface(dvs, port_name, "10.0.0.0/31") + util.set_interface_status(dvs, if_name) + + # Create router interface. + router_interface_id, router_intf_key, attr_list = ( + self._p4rt_router_intf_obj.create_router_interface() + ) + util.verify_response( + self.response_consumer, router_intf_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count = 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create neighbor. + neighbor_id, neighbor_key, attr_list = ( + self._p4rt_neighbor_obj.create_neighbor() + ) + util.verify_response( + self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create nexthop. + nexthop_id, nexthop_key, attr_list = ( + self._p4rt_nexthop_obj.create_next_hop() + ) + util.verify_response( + self.response_consumer, nexthop_key, attr_list, "SWSS_RC_SUCCESS") + # Get nexthop_oid of newly created nexthop. + nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() + assert nexthop_oid is not None + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create wcmp group with one member. + wcmp_group_id, wcmp_group_key, attr_list = ( + self._p4rt_wcmp_group_obj.create_wcmp_group(watch_port=port_name) + ) + util.verify_response( + self.response_consumer, wcmp_group_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB + # (WCMP group member is not created for operationally down watchport). + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Query application database for wcmp group entries. + wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + assert len(wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for newly created wcmp group key. + (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query application state database for wcmp group entries. + state_wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + assert len(state_wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + 1 + ) + + # Query application state database for newly created wcmp group key. + (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query ASIC database for wcmp group entries. + wcmp_group_entries = util.get_keys(self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME) + assert len(wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() + + 1 + ) + + # Query ASIC database for newly created wcmp group oid. + wcmp_group_oid = self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_oid() + assert wcmp_group_oid is not None + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + wcmp_group_oid + ) + assert status == True + asic_attr_list = [ + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_TYPE, + (self._p4rt_wcmp_group_obj. + SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP)) + ] + util.verify_attr(fvs, asic_attr_list) + + # Query ASIC database for wcmp group member entries. + wcmp_group_member_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME + ) + assert len(wcmp_group_member_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + + 1 + ) + + # Query ASIC database for wcmp group member entries (expect no entry). + wcmp_group_member_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME + ) + assert len( + wcmp_group_member_entries) == self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + + # Delete the pruned wcmp group member. + self._p4rt_wcmp_group_obj.remove_app_db_entry(wcmp_group_key) + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Verify that APPL STATE DB is updated. + state_wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.appl_state_db, + (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME)) + assert len(state_wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + ) + + # Verify that ASIC DB is updated. + wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME + ) + assert len(wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() + ) + wcmp_group_member_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME + ) + assert len(wcmp_group_member_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + ) + + # Delete next hop. + self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Delete neighbor. + self._p4rt_neighbor_obj.remove_app_db_entry(neighbor_key) + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Delete router interface. + self._p4rt_router_intf_obj.remove_app_db_entry(router_intf_key) + + # Verify that P4RT key to OID count is same as the original count. + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) diff --git a/tests/p4rt/test_p4rt_acl.py b/tests/p4rt/test_p4rt_acl.py new file mode 100644 index 00000000000..89015fc9d5e --- /dev/null +++ b/tests/p4rt/test_p4rt_acl.py @@ -0,0 +1,1253 @@ +# Lint as: python3 +from swsscommon import swsscommon + +import pytest +import util +import acl + + +def get_exist_entry(dvs, table): + db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(db, table) + entries = list(tbl.getKeys()) + return entries[0] + + +def verify_selected_attr_vals(db, table, key, expected_attrs): + tbl = swsscommon.Table(db, table) + keys = tbl.getKeys() + assert key in keys, "The desired key is not presented" + + status, fvs = tbl.get(key) + assert status, "Got an error when get a key" + + fv_dict = dict(fvs) + + for attr_name, expected_val in expected_attrs: + assert attr_name in fv_dict, "Attribute %s not found in %s" % (attr_name, key) + assert fv_dict[attr_name] == expected_val, "Wrong value %s for the attribute %s = %s" % ( + fv_dict[attr_name], + attr_name, + expected_val, + ) + + +class TestP4RTAcl(object): + def _set_up(self, dvs): + self._p4rt_acl_table_definition_obj = acl.P4RtAclTableDefinitionWrapper() + self._p4rt_acl_group_obj = acl.P4RtAclGroupWrapper() + self._p4rt_acl_group_member_obj = acl.P4RtAclGroupMemberWrapper() + self._p4rt_acl_rule_obj = acl.P4RtAclRuleWrapper() + self._p4rt_acl_counter_obj = acl.P4RtAclCounterWrapper() + self._p4rt_acl_meter_obj = acl.P4RtAclMeterWrapper() + self._p4rt_trap_group_obj = acl.P4RtTrapGroupWrapper() + self._p4rt_user_trap_obj = acl.P4RtUserDefinedTrapWrapper() + self._p4rt_hostif_obj = acl.P4RtHostifWrapper() + self._p4rt_hostif_table_entry_obj = acl.P4RtHostifTableEntryWrapper() + self._p4rt_udf_group_obj = acl.P4RtUdfGroupWrapper() + self._p4rt_udf_match_obj = acl.P4RtUdfMatchWrapper() + self._p4rt_udf_obj = acl.P4RtUdfWrapper() + + self._p4rt_acl_group_member_obj.set_up_databases(dvs) + self._p4rt_acl_group_obj.set_up_databases(dvs) + self._p4rt_acl_table_definition_obj.set_up_databases(dvs) + self._p4rt_acl_rule_obj.set_up_databases(dvs) + self._p4rt_acl_counter_obj.set_up_databases(dvs) + self._p4rt_acl_meter_obj.set_up_databases(dvs) + self._p4rt_trap_group_obj.set_up_databases(dvs) + self._p4rt_user_trap_obj.set_up_databases(dvs) + self._p4rt_hostif_obj.set_up_databases(dvs) + self._p4rt_hostif_table_entry_obj.set_up_databases(dvs) + self._p4rt_udf_group_obj.set_up_databases(dvs) + self._p4rt_udf_match_obj.set_up_databases(dvs) + self._p4rt_udf_obj.set_up_databases(dvs) + + self.response_consumer = swsscommon.NotificationConsumer( + self._p4rt_acl_table_definition_obj.appl_state_db, "APPL_DB_P4RT_TABLE_RESPONSE_CHANNEL" + ) + + @pytest.mark.skip(reason="p4orch is not enabled") + def test_AclRulesAddUpdateDelPass(self, dvs, testlog): + # initialize ACL table objects and database connectors + self._set_up(dvs) + + # maintain list of original Application and ASIC DB entries before adding + # new ACL table + original_appl_acl_tables = util.get_keys( + self._p4rt_acl_table_definition_obj.appl_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_acl_table_definition_obj.TBL_NAME, + ) + original_appl_state_acl_tables = util.get_keys( + self._p4rt_acl_table_definition_obj.appl_state_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_acl_table_definition_obj.TBL_NAME, + ) + original_asic_acl_tables = util.get_keys( + self._p4rt_acl_table_definition_obj.asic_db, + self._p4rt_acl_table_definition_obj.ASIC_DB_TBL_NAME, + ) + original_asic_udf_groups = util.get_keys( + self._p4rt_udf_group_obj.asic_db, self._p4rt_udf_group_obj.ASIC_DB_TBL_NAME + ) + original_asic_udf_matches = util.get_keys( + self._p4rt_udf_match_obj.asic_db, self._p4rt_udf_match_obj.ASIC_DB_TBL_NAME + ) + original_asic_udfs = util.get_keys( + self._p4rt_udf_obj.asic_db, self._p4rt_udf_obj.ASIC_DB_TBL_NAME + ) + + # query ASIC database for ACL groups + acl_groups_asic_keys = util.get_keys( + self._p4rt_acl_group_obj.asic_db, self._p4rt_acl_group_obj.ASIC_DB_TBL_NAME + ) + assert ( + len(acl_groups_asic_keys) == 3 + ) # INGRESS, EGRESS and PRE_INGRESS bind to SWITCH + switch_oid = get_exist_entry(dvs, "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH") + # Ingress + ingress_group_oids = self._p4rt_acl_group_obj.get_group_oids_by_stage( + acl.INGRESS_STAGE + ) + assert len(ingress_group_oids) == 1 + # Egress + egress_group_oids = self._p4rt_acl_group_obj.get_group_oids_by_stage( + acl.EGRESS_STAGE + ) + assert len(egress_group_oids) == 1 + # Pre_ingress + pre_ingress_group_oids = self._p4rt_acl_group_obj.get_group_oids_by_stage( + acl.PRE_INGRESS_STAGE + ) + assert len(pre_ingress_group_oids) == 1 + verify_selected_attr_vals( + self._p4rt_acl_group_obj.asic_db, + "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH", + switch_oid, + [("SAI_SWITCH_ATTR_PRE_INGRESS_ACL", pre_ingress_group_oids[0]), + ("SAI_SWITCH_ATTR_INGRESS_ACL",ingress_group_oids[0]), + ("SAI_SWITCH_ATTR_EGRESS_ACL", egress_group_oids[0])], + ) + + # Verify APP DB trap groups for QOS_QUEUE + genetlink_name = "genl_packet" + genetlink_mcgrp_name = "packets" + + for queue_num in range(1, 9): + attr_list = [ + (self._p4rt_trap_group_obj.QUEUE, str(queue_num)), + (self._p4rt_trap_group_obj.HOSTIF_NAME, genetlink_name), + ( + self._p4rt_trap_group_obj.HOSTIF_GENETLINK_MCGRP_NAME, + genetlink_mcgrp_name, + ), + ] + + # query application database for trap group + (status, fvs) = util.get_key( + self._p4rt_trap_group_obj.appl_db, + self._p4rt_trap_group_obj.APP_DB_TBL_NAME, + self._p4rt_trap_group_obj.TBL_NAME_PREFIX + str(queue_num), + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # create ACL table + table_name = "ACL_PUNT_TABLE_RULE_TEST" + stage = "INGRESS" + priority = "234" + size = "123" + ether_type = '{"kind":"sai_field","sai_field":"SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE","format":"HEX_STRING","bitwidth":8}' + ether_dst = '{"kind":"sai_field","sai_field":"SAI_ACL_TABLE_ATTR_FIELD_DST_MAC","format":"MAC","bitwidth":48}' + is_ip = '{"kind":"sai_field","sai_field":"SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE/IP","format":"HEX_STRING","bitwidth":1}' + is_ipv4 = '{"kind":"sai_field","sai_field":"SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE/IPV4ANY","format":"HEX_STRING","bitwidth":1}' + is_ipv6 = '{"kind":"sai_field","sai_field":"SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE/IPV6ANY","format":"HEX_STRING","bitwidth":1}' + is_arp = '{"kind":"sai_field","sai_field":"SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE/ARP","format":"HEX_STRING","bitwidth":1}' + arp_tpa = """{\"kind\":\"composite\",\"format\":\"HEX_STRING\",\"bitwidth\":32, + \"elements\":[{\"kind\":\"udf\",\"base\":\"SAI_UDF_BASE_L3\",\"bitwidth\":16,\"offset\":24}, + {\"kind\":\"udf\",\"base\":\"SAI_UDF_BASE_L3\",\"bitwidth\":16,\"offset\":26}]} + """ + src_ipv6_64bit = """{\"kind\":\"composite\",\"format\":\"IPV6\",\"bitwidth\":64, + \"elements\":[{\"kind\":\"sai_field\",\"sai_field\":\"SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6_WORD3\",\"bitwidth\":32}, + {\"kind\":\"sai_field\",\"sai_field\":\"SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6_WORD2\",\"bitwidth\":32}]} + """ + meter_unit = "PACKETS" + counter_unit = "BOTH" + copy_and_set_tc = '[{"action":"SAI_PACKET_ACTION_COPY"},{"action":"SAI_ACL_ENTRY_ATTR_ACTION_SET_TC","param":"traffic_class"}]' + punt_and_set_tc = '[{"action":"SAI_PACKET_ACTION_TRAP","packet_color":"SAI_PACKET_COLOR_RED"},{"action":"SAI_ACL_ENTRY_ATTR_ACTION_SET_TC","param":"traffic_class"}]' + qos_queue = '[{"action":"SAI_PACKET_ACTION_TRAP"},{"action":"QOS_QUEUE","param":"cpu_queue"}]' + + attr_list = [ + (self._p4rt_acl_table_definition_obj.STAGE_FIELD, stage), + (self._p4rt_acl_table_definition_obj.PRIORITY_FIELD, priority), + (self._p4rt_acl_table_definition_obj.SIZE_FIELD, size), + (self._p4rt_acl_table_definition_obj.MATCH_FIELD_ETHER_DST, ether_dst), + (self._p4rt_acl_table_definition_obj.MATCH_FIELD_ETHER_TYPE, ether_type), + (self._p4rt_acl_table_definition_obj.MATCH_FIELD_IS_IP, is_ip), + (self._p4rt_acl_table_definition_obj.MATCH_FIELD_IS_IPV4, is_ipv4), + (self._p4rt_acl_table_definition_obj.MATCH_FIELD_IS_IPV6, is_ipv6), + (self._p4rt_acl_table_definition_obj.MATCH_FIELD_IS_ARP, is_arp), + ( + self._p4rt_acl_table_definition_obj.MATCH_FIELD_SRC_IPV6_64BIT, + src_ipv6_64bit, + ), + (self._p4rt_acl_table_definition_obj.MATCH_FIELD_ARP_TPA, arp_tpa), + ( + self._p4rt_acl_table_definition_obj.ACTION_COPY_AND_SET_TC, + copy_and_set_tc, + ), + ( + self._p4rt_acl_table_definition_obj.ACTION_PUNT_AND_SET_TC, + punt_and_set_tc, + ), + (self._p4rt_acl_table_definition_obj.ACTION_SET_QOS_QUEUE, qos_queue), + (self._p4rt_acl_table_definition_obj.METER_UNIT, meter_unit), + (self._p4rt_acl_table_definition_obj.COUNTER_UNIT, counter_unit), + ] + + self._p4rt_acl_table_definition_obj.set_app_db_entry( + self._p4rt_acl_table_definition_obj.TBL_NAME + ":" + table_name, attr_list + ) + util.verify_response( + self.response_consumer, + self._p4rt_acl_table_definition_obj.TBL_NAME + ":" + table_name, + attr_list, + "SWSS_RC_SUCCESS", + ) + + # query application database for ACL tables + acl_tables = util.get_keys( + self._p4rt_acl_table_definition_obj.appl_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_acl_table_definition_obj.TBL_NAME, + ) + assert len(acl_tables) == len(original_appl_acl_tables) + 1 + + # query application database for newly created ACL table + (status, fvs) = util.get_key( + self._p4rt_acl_table_definition_obj.appl_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_acl_table_definition_obj.TBL_NAME, + table_name, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # query application state database for ACL tables + state_acl_tables = util.get_keys( + self._p4rt_acl_table_definition_obj.appl_state_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_acl_table_definition_obj.TBL_NAME, + ) + assert len(state_acl_tables) == len(original_appl_state_acl_tables) + 1 + + # query application state database for newly created ACL table + (status, fvs) = util.get_key( + self._p4rt_acl_table_definition_obj.appl_state_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_acl_table_definition_obj.TBL_NAME, + table_name, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # query ASIC database for default UDF wildcard match + udf_match_asic_db_key = original_asic_udf_matches[0] + + (status, fvs) = util.get_key( + self._p4rt_udf_match_obj.asic_db, + self._p4rt_udf_match_obj.ASIC_DB_TBL_NAME, + udf_match_asic_db_key, + ) + assert status == True + attr_list = [("NULL", "NULL")] + util.verify_attr(fvs, attr_list) + + # query ASIC database for UDF groups + udf_groups_asic = util.get_keys( + self._p4rt_udf_group_obj.asic_db, self._p4rt_udf_group_obj.ASIC_DB_TBL_NAME + ) + assert len(udf_groups_asic) == len(original_asic_udf_groups) + 2 + + # query ASIC database for newly created UDF groups + udf_groups_asic_db_keys = [ + key for key in udf_groups_asic if key not in original_asic_udf_groups + ] + assert len(udf_groups_asic_db_keys) == 2 + udf_groups_asic_db_keys.sort() + udf_group_min_asic_db_key = udf_groups_asic_db_keys[0] + udf_group_1_asic_db_key = udf_groups_asic_db_keys[1] + + (status, fvs) = util.get_key( + self._p4rt_udf_group_obj.asic_db, + self._p4rt_udf_group_obj.ASIC_DB_TBL_NAME, + udf_group_min_asic_db_key, + ) + assert status == True + attr_list = [ + ( + self._p4rt_udf_group_obj.SAI_UDF_GROUP_ATTR_TYPE, + self._p4rt_udf_group_obj.SAI_UDF_GROUP_TYPE_GENERIC, + ), + (self._p4rt_udf_group_obj.SAI_UDF_GROUP_ATTR_LENGTH, "2"), + ] + util.verify_attr(fvs, attr_list) + + (status, fvs) = util.get_key( + self._p4rt_udf_group_obj.asic_db, + self._p4rt_udf_group_obj.ASIC_DB_TBL_NAME, + udf_group_1_asic_db_key, + ) + assert status == True + attr_list = [ + ( + self._p4rt_udf_group_obj.SAI_UDF_GROUP_ATTR_TYPE, + self._p4rt_udf_group_obj.SAI_UDF_GROUP_TYPE_GENERIC, + ), + (self._p4rt_udf_group_obj.SAI_UDF_GROUP_ATTR_LENGTH, "2"), + ] + util.verify_attr(fvs, attr_list) + + # query ASIC database for UDFs + udfs_asic = util.get_keys( + self._p4rt_udf_obj.asic_db, self._p4rt_udf_obj.ASIC_DB_TBL_NAME + ) + assert len(udfs_asic) == len(original_asic_udfs) + 2 + + # query ASIC database for newly created UDFs + udfs_asic_db_keys = [key for key in udfs_asic if key not in original_asic_udfs] + assert len(udfs_asic_db_keys) == 2 + udfs_asic_db_keys.sort() + udf_0_asic_db_key = udfs_asic_db_keys[0] + udf_1_asic_db_key = udfs_asic_db_keys[1] + + (status, fvs) = util.get_key( + self._p4rt_udf_obj.asic_db, + self._p4rt_udf_obj.ASIC_DB_TBL_NAME, + udf_0_asic_db_key, + ) + assert status == True + attr_list = [ + (self._p4rt_udf_obj.SAI_UDF_ATTR_MATCH_ID, udf_match_asic_db_key), + (self._p4rt_udf_obj.SAI_UDF_ATTR_GROUP_ID, udf_group_min_asic_db_key), + (self._p4rt_udf_obj.SAI_UDF_ATTR_OFFSET, "24"), + (self._p4rt_udf_obj.SAI_UDF_ATTR_BASE, "SAI_UDF_BASE_L3"), + ] + util.verify_attr(fvs, attr_list) + + (status, fvs) = util.get_key( + self._p4rt_udf_obj.asic_db, + self._p4rt_udf_obj.ASIC_DB_TBL_NAME, + udf_1_asic_db_key, + ) + assert status == True + attr_list = [ + (self._p4rt_udf_obj.SAI_UDF_ATTR_MATCH_ID, udf_match_asic_db_key), + (self._p4rt_udf_obj.SAI_UDF_ATTR_GROUP_ID, udf_group_1_asic_db_key), + (self._p4rt_udf_obj.SAI_UDF_ATTR_OFFSET, "26"), + (self._p4rt_udf_obj.SAI_UDF_ATTR_BASE, "SAI_UDF_BASE_L3"), + ] + util.verify_attr(fvs, attr_list) + + # query ASIC database for ACL tables + acl_asic_tables = util.get_keys( + self._p4rt_acl_table_definition_obj.asic_db, + self._p4rt_acl_table_definition_obj.ASIC_DB_TBL_NAME, + ) + assert len(acl_asic_tables) == len(original_asic_acl_tables) + 1 + + # query ASIC database for newly created ACL table + table_asic_db_keys = [ + key for key in acl_asic_tables if key not in original_asic_acl_tables + ] + assert len(table_asic_db_keys) == 1 + table_asic_db_key = table_asic_db_keys[0] + + (status, fvs) = util.get_key( + self._p4rt_acl_table_definition_obj.asic_db, + self._p4rt_acl_table_definition_obj.ASIC_DB_TBL_NAME, + table_asic_db_key, + ) + assert status == True + attr_list = [ + ( + self._p4rt_acl_table_definition_obj.SAI_ACL_TABLE_ATTR_ACL_STAGE, + "SAI_ACL_STAGE_INGRESS", + ), + (self._p4rt_acl_table_definition_obj.SAI_ACL_TABLE_ATTR_SIZE, size), + (self._p4rt_acl_table_definition_obj.SAI_ATTR_MATCH_ETHER_TYPE, "true"), + (self._p4rt_acl_table_definition_obj.SAI_ATTR_MATCH_IP_TYPE, "true"), + (self._p4rt_acl_table_definition_obj.SAI_ATTR_MATCH_DST_MAC, "true"), + (self._p4rt_acl_table_definition_obj.SAI_ATTR_MATCH_SRC_IPV6_WORD3, "true"), + (self._p4rt_acl_table_definition_obj.SAI_ATTR_MATCH_SRC_IPV6_WORD2, "true"), + ( + self._p4rt_acl_table_definition_obj.SAI_ATTR_MATCH_UDF_GROUP_MIN, + udf_group_min_asic_db_key, + ), + ( + self._p4rt_acl_table_definition_obj.SAI_ATTR_MATCH_UDF_GROUP_1, + udf_group_1_asic_db_key, + ), + ( + self._p4rt_acl_table_definition_obj.SAI_ATTR_ACTION_TYPE_LIST, + "1:SAI_ACL_ACTION_TYPE_COUNTER", + ), + ] + util.verify_attr(fvs, attr_list) + + # maintain list of original Application and ASIC DB ACL entries before adding + # new ACL rule + original_appl_acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + original_appl_state_acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + original_asic_acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.asic_db, self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME + ) + original_asic_acl_counters = util.get_keys( + self._p4rt_acl_counter_obj.asic_db, + self._p4rt_acl_counter_obj.ASIC_DB_TBL_NAME, + ) + original_asic_acl_meters = util.get_keys( + self._p4rt_acl_meter_obj.asic_db, self._p4rt_acl_meter_obj.ASIC_DB_TBL_NAME + ) + + # create ACL rule 1 + rule_json_key1 = '{"match/ether_type":"0x0800","match/ether_dst":"00:1a:11:17:5f:80","match/src_ipv6_64bit":"fdf8:f53b:82e4::","match/arp_tpa":"0xff665543","priority":100}' + action = "copy_and_set_tc" + meter_cir = "80" + meter_cbs = "80" + meter_pir = "200" + meter_pbs = "200" + table_name_with_rule_key1 = table_name + ":" + rule_json_key1 + + attr_list = [ + (self._p4rt_acl_rule_obj.ACTION, action), + ("param/traffic_class", "1"), + (self._p4rt_acl_rule_obj.METER_CIR, meter_cir), + (self._p4rt_acl_rule_obj.METER_CBURST, meter_cbs), + (self._p4rt_acl_rule_obj.METER_PIR, meter_pir), + (self._p4rt_acl_rule_obj.METER_PBURST, meter_pbs), + ] + + self._p4rt_acl_rule_obj.set_app_db_entry(table_name_with_rule_key1, attr_list) + util.verify_response( + self.response_consumer, + table_name_with_rule_key1, + attr_list, + "SWSS_RC_SUCCESS", + ) + + # query application database for ACL rules + acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + assert len(acl_rules) == len(original_appl_acl_rules) + 1 + + # query application database for newly created ACL rule + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME, + table_name_with_rule_key1, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # query application state database for ACL rules + state_acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + assert len(state_acl_rules) == len(original_appl_state_acl_rules) + 1 + + # query application state database for newly created ACL rule + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME, + table_name_with_rule_key1, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # query ASIC database for ACL counters + acl_asic_counters = util.get_keys( + self._p4rt_acl_counter_obj.asic_db, + self._p4rt_acl_counter_obj.ASIC_DB_TBL_NAME, + ) + assert len(acl_asic_counters) == len(original_asic_acl_counters) + 1 + + # query ASIC database for newly created ACL counter + counter_asic_db_keys = [ + key for key in acl_asic_counters if key not in original_asic_acl_counters + ] + assert len(counter_asic_db_keys) == 1 + counter_asic_db_key1 = counter_asic_db_keys[0] + + (status, fvs) = util.get_key( + self._p4rt_acl_counter_obj.asic_db, + self._p4rt_acl_counter_obj.ASIC_DB_TBL_NAME, + counter_asic_db_key1, + ) + assert status == True + attr_list = [ + (self._p4rt_acl_counter_obj.SAI_ATTR_ENABLE_PACKET_COUNT, "true"), + (self._p4rt_acl_counter_obj.SAI_ATTR_ENABLE_BYTE_COUNT, "true"), + (self._p4rt_acl_counter_obj.SAI_ATTR_TABLE_ID, table_asic_db_key), + ] + util.verify_attr(fvs, attr_list) + + # query ASIC database for ACL meters + acl_asic_meters = util.get_keys( + self._p4rt_acl_meter_obj.asic_db, self._p4rt_acl_meter_obj.ASIC_DB_TBL_NAME + ) + assert len(acl_asic_meters) == len(original_asic_acl_meters) + 1 + + # query ASIC database for newly created ACL meter + meter_asic_db_keys = [ + key for key in acl_asic_meters if key not in original_asic_acl_meters + ] + assert len(meter_asic_db_keys) == 1 + meter_asic_db_key1 = meter_asic_db_keys[0] + + (status, fvs) = util.get_key( + self._p4rt_acl_meter_obj.asic_db, + self._p4rt_acl_meter_obj.ASIC_DB_TBL_NAME, + meter_asic_db_key1, + ) + assert status == True + attr_list = [ + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_TYPE, "SAI_METER_TYPE_PACKETS"), + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_MODE, "SAI_POLICER_MODE_TR_TCM"), + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_CIR, meter_cir), + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_CBS, meter_cbs), + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_PIR, meter_pir), + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_PBS, meter_pbs), + ] + util.verify_attr(fvs, attr_list) + + # query ASIC database for ACL rules + acl_asic_rules = util.get_keys( + self._p4rt_acl_rule_obj.asic_db, self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME + ) + assert len(acl_asic_rules) == len(original_asic_acl_rules) + 1 + + # query ASIC database for newly created ACL rule + rule_asic_db_keys = [ + key for key in acl_asic_rules if key not in original_asic_acl_rules + ] + assert len(rule_asic_db_keys) == 1 + rule_asic_db_key1 = rule_asic_db_keys[0] + + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.asic_db, + self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME, + rule_asic_db_key1, + ) + assert status == True + attr_list = [ + (self._p4rt_acl_rule_obj.SAI_ATTR_ACTION_SET_TC, "1"), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_ACTION_PACKET_ACTION, + "SAI_PACKET_ACTION_COPY", + ), + (self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_ETHER_TYPE, "2048&mask:0xffff"), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_IP_TYPE, + "SAI_ACL_IP_TYPE_ANY&mask:0xffffffffffffffff", + ), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_DST_MAC, + "00:1A:11:17:5F:80&mask:FF:FF:FF:FF:FF:FF", + ), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_SRC_IPV6_WORD3, + "fdf8:f53b::&mask:ffff:ffff::", + ), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_SRC_IPV6_WORD2, + "0:0:82e4::&mask:0:0:ffff:ffff::", + ), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_UDF_GROUP_MIN, + "2:255,102&mask:2:0xff,0xff", + ), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_UDF_GROUP_1, + "2:85,67&mask:2:0xff,0xff", + ), + (self._p4rt_acl_rule_obj.SAI_ATTR_TABLE_ID, table_asic_db_key), + (self._p4rt_acl_rule_obj.SAI_ATTR_SET_POLICER, meter_asic_db_key1), + (self._p4rt_acl_rule_obj.SAI_ATTR_COUNTER, counter_asic_db_key1), + (self._p4rt_acl_rule_obj.SAI_ATTR_ADMIN_STATE, "true"), + (self._p4rt_acl_rule_obj.SAI_ATTR_PRIORITY, "100"), + ] + util.verify_attr(fvs, attr_list) + + # Update action and meter of rule 1 to punt_and_set_tc + rule_json_key1 = '{"match/ether_type":"0x0800","match/ether_dst":"00:1a:11:17:5f:80","match/src_ipv6_64bit":"fdf8:f53b:82e4::","match/arp_tpa":"0xff665543","priority":100}' + action = "punt_and_set_tc" + meter_cir = "100" + meter_cbs = "100" + meter_pir = "400" + meter_pbs = "400" + table_name_with_rule_key1 = table_name + ":" + rule_json_key1 + + attr_list = [ + (self._p4rt_acl_rule_obj.ACTION, action), + ("param/traffic_class", "2"), + (self._p4rt_acl_rule_obj.METER_CIR, meter_cir), + (self._p4rt_acl_rule_obj.METER_CBURST, meter_cbs), + (self._p4rt_acl_rule_obj.METER_PIR, meter_pir), + (self._p4rt_acl_rule_obj.METER_PBURST, meter_pbs), + ] + + self._p4rt_acl_rule_obj.set_app_db_entry(table_name_with_rule_key1, attr_list) + util.verify_response( + self.response_consumer, + table_name_with_rule_key1, + attr_list, + "SWSS_RC_SUCCESS", + ) + + # query application database for ACL rules + acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + assert len(acl_rules) == len(original_appl_acl_rules) + 1 + + # query application database for updated ACL rule + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME, + table_name_with_rule_key1, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # query application state database for ACL rules + state_acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + assert len(state_acl_rules) == len(original_appl_state_acl_rules) + 1 + + # query application state database for updated ACL rule + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME, + table_name_with_rule_key1, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # query ASIC database for ACL counters + acl_asic_counters = util.get_keys( + self._p4rt_acl_counter_obj.asic_db, + self._p4rt_acl_counter_obj.ASIC_DB_TBL_NAME, + ) + assert len(acl_asic_counters) == len(original_asic_acl_counters) + 1 + + # query ASIC database for the ACL counter + counter_asic_db_keys = [ + key for key in acl_asic_counters if key not in original_asic_acl_counters + ] + assert len(counter_asic_db_keys) == 1 + counter_asic_db_key1 = counter_asic_db_keys[0] + + (status, fvs) = util.get_key( + self._p4rt_acl_counter_obj.asic_db, + self._p4rt_acl_counter_obj.ASIC_DB_TBL_NAME, + counter_asic_db_key1, + ) + assert status == True + attr_list = [ + (self._p4rt_acl_counter_obj.SAI_ATTR_ENABLE_PACKET_COUNT, "true"), + (self._p4rt_acl_counter_obj.SAI_ATTR_ENABLE_BYTE_COUNT, "true"), + (self._p4rt_acl_counter_obj.SAI_ATTR_TABLE_ID, table_asic_db_key), + ] + util.verify_attr(fvs, attr_list) + + # query ASIC database for ACL meters + acl_asic_meters = util.get_keys( + self._p4rt_acl_meter_obj.asic_db, self._p4rt_acl_meter_obj.ASIC_DB_TBL_NAME + ) + assert len(acl_asic_meters) == len(original_asic_acl_meters) + 1 + + # query ASIC database for updated ACL meter + meter_asic_db_keys = [ + key for key in acl_asic_meters if key not in original_asic_acl_meters + ] + assert len(meter_asic_db_keys) == 1 + meter_asic_db_key1 = meter_asic_db_keys[0] + + (status, fvs) = util.get_key( + self._p4rt_acl_meter_obj.asic_db, + self._p4rt_acl_meter_obj.ASIC_DB_TBL_NAME, + meter_asic_db_key1, + ) + assert status == True + attr_list = [ + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_TYPE, "SAI_METER_TYPE_PACKETS"), + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_MODE, "SAI_POLICER_MODE_TR_TCM"), + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_CIR, meter_cir), + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_CBS, meter_cbs), + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_PIR, meter_pir), + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_PBS, meter_pbs), + ( + self._p4rt_acl_meter_obj.SAI_ATTR_RED_PACKET_ACTION, + "SAI_PACKET_ACTION_TRAP", + ), + ] + util.verify_attr(fvs, attr_list) + + # query ASIC database for ACL rules + acl_asic_rules = util.get_keys( + self._p4rt_acl_rule_obj.asic_db, self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME + ) + assert len(acl_asic_rules) == len(original_asic_acl_rules) + 1 + + # query ASIC database for updated ACL rule + rule_asic_db_keys = [ + key for key in acl_asic_rules if key not in original_asic_acl_rules + ] + assert len(rule_asic_db_keys) == 1 + rule_asic_db_key1 = rule_asic_db_keys[0] + + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.asic_db, + self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME, + rule_asic_db_key1, + ) + assert status == True + attr_list = [ + (self._p4rt_acl_rule_obj.SAI_ATTR_ACTION_SET_TC, "2"), + (self._p4rt_acl_rule_obj.SAI_ATTR_ACTION_PACKET_ACTION, "disabled"), + (self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_ETHER_TYPE, "2048&mask:0xffff"), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_IP_TYPE, + "SAI_ACL_IP_TYPE_ANY&mask:0xffffffffffffffff", + ), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_DST_MAC, + "00:1A:11:17:5F:80&mask:FF:FF:FF:FF:FF:FF", + ), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_SRC_IPV6_WORD3, + "fdf8:f53b::&mask:ffff:ffff::", + ), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_SRC_IPV6_WORD2, + "0:0:82e4::&mask:0:0:ffff:ffff::", + ), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_UDF_GROUP_MIN, + "2:255,102&mask:2:0xff,0xff", + ), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_UDF_GROUP_1, + "2:85,67&mask:2:0xff,0xff", + ), + (self._p4rt_acl_rule_obj.SAI_ATTR_TABLE_ID, table_asic_db_key), + (self._p4rt_acl_rule_obj.SAI_ATTR_SET_POLICER, meter_asic_db_key1), + (self._p4rt_acl_rule_obj.SAI_ATTR_COUNTER, counter_asic_db_key1), + (self._p4rt_acl_rule_obj.SAI_ATTR_ADMIN_STATE, "true"), + (self._p4rt_acl_rule_obj.SAI_ATTR_PRIORITY, "100"), + ] + util.verify_attr(fvs, attr_list) + + # create ACL rule 2 with QOS_QUEUE action + rule_json_key2 = '{"match/is_ip":"0x1","match/ether_type":"0x0800 & 0xFFFF","match/ether_dst":"AA:BB:CC:DD:EE:FF & FF:FF:FF:FF:FF:FF","priority":100}' + action = "qos_queue" + meter_cir = "80" + meter_cbs = "80" + meter_pir = "200" + meter_pbs = "200" + table_name_with_rule_key2 = table_name + ":" + rule_json_key2 + + attr_list = [ + (self._p4rt_acl_rule_obj.ACTION, action), + ("param/cpu_queue", "5"), + (self._p4rt_acl_rule_obj.METER_CIR, meter_cir), + (self._p4rt_acl_rule_obj.METER_CBURST, meter_cbs), + (self._p4rt_acl_rule_obj.METER_PIR, meter_pir), + (self._p4rt_acl_rule_obj.METER_PBURST, meter_pbs), + ] + + self._p4rt_acl_rule_obj.set_app_db_entry(table_name_with_rule_key2, attr_list) + util.verify_response( + self.response_consumer, + table_name_with_rule_key2, + attr_list, + "SWSS_RC_SUCCESS", + ) + + # query application database for ACL rules + acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + assert len(acl_rules) == len(original_appl_acl_rules) + 2 + + # query application database for newly created ACL rule + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME, + table_name_with_rule_key2, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # query application state database for ACL rules + state_acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + assert len(state_acl_rules) == len(original_appl_state_acl_rules) + 2 + + # query application state database for newly created ACL rule + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME, + table_name_with_rule_key2, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # query ASIC database for ACL counters + acl_asic_counters = util.get_keys( + self._p4rt_acl_counter_obj.asic_db, + self._p4rt_acl_counter_obj.ASIC_DB_TBL_NAME, + ) + assert len(acl_asic_counters) == len(original_asic_acl_counters) + 2 + + # query ASIC database for newly created ACL counter + counter_asic_db_keys = [ + key + for key in acl_asic_counters + if key not in original_asic_acl_counters and key != counter_asic_db_key1 + ] + assert len(counter_asic_db_keys) == 1 + counter_asic_db_key2 = counter_asic_db_keys[0] + + (status, fvs) = util.get_key( + self._p4rt_acl_counter_obj.asic_db, + self._p4rt_acl_counter_obj.ASIC_DB_TBL_NAME, + counter_asic_db_key2, + ) + assert status == True + attr_list = [ + (self._p4rt_acl_counter_obj.SAI_ATTR_ENABLE_PACKET_COUNT, "true"), + (self._p4rt_acl_counter_obj.SAI_ATTR_ENABLE_BYTE_COUNT, "true"), + (self._p4rt_acl_counter_obj.SAI_ATTR_TABLE_ID, table_asic_db_key), + ] + util.verify_attr(fvs, attr_list) + + # query ASIC database for ACL meters + acl_asic_meters = util.get_keys( + self._p4rt_acl_meter_obj.asic_db, self._p4rt_acl_meter_obj.ASIC_DB_TBL_NAME + ) + assert len(acl_asic_meters) == len(original_asic_acl_meters) + 2 + + # query ASIC database for newly created ACL meter + meter_asic_db_keys = [ + key + for key in acl_asic_meters + if key not in original_asic_acl_meters and key != meter_asic_db_key1 + ] + assert len(meter_asic_db_keys) == 1 + meter_asic_db_key2 = meter_asic_db_keys[0] + + (status, fvs) = util.get_key( + self._p4rt_acl_meter_obj.asic_db, + self._p4rt_acl_meter_obj.ASIC_DB_TBL_NAME, + meter_asic_db_key2, + ) + assert status == True + attr_list = [ + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_TYPE, "SAI_METER_TYPE_PACKETS"), + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_MODE, "SAI_POLICER_MODE_TR_TCM"), + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_CIR, meter_cir), + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_CBS, meter_cbs), + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_PIR, meter_pir), + (self._p4rt_acl_meter_obj.SAI_ATTR_METER_PBS, meter_pbs), + ] + util.verify_attr(fvs, attr_list) + + # query ASIC database for trap groups + trap_group_keys = util.get_keys( + self._p4rt_trap_group_obj.asic_db, + self._p4rt_trap_group_obj.ASIC_DB_TBL_NAME, + ) + # default trap groups in and one trap groups per cpu queue + # are defined in files/image_config/copp/copp_cfg.j2 + # get trap group with cpu queue num 5 + for key in trap_group_keys: + (status, fvs) = util.get_key( + self._p4rt_trap_group_obj.asic_db, + self._p4rt_trap_group_obj.ASIC_DB_TBL_NAME, + key, + ) + assert status == True + if fvs[0][1] == "5": + trap_group_asic_db_key = key + break + + # query ASIC database for user defined traps + user_trap_keys = util.get_keys( + self._p4rt_user_trap_obj.asic_db, self._p4rt_user_trap_obj.ASIC_DB_TBL_NAME + ) + assert len(user_trap_keys) == 8 + + # get user trap with trap group oid + for key in user_trap_keys: + (status, fvs) = util.get_key( + self._p4rt_user_trap_obj.asic_db, + self._p4rt_user_trap_obj.ASIC_DB_TBL_NAME, + key, + ) + assert status == True + if ( + fvs[0][1] == trap_group_asic_db_key + or fvs[1][1] == trap_group_asic_db_key + ): + user_trap_asic_db_key = key + break + + # query ASIC database for ACL rules + acl_asic_rules = util.get_keys( + self._p4rt_acl_rule_obj.asic_db, self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME + ) + assert len(acl_asic_rules) == len(original_asic_acl_rules) + 2 + + # query ASIC database for newly created ACL rule + rule_asic_db_keys = [ + key + for key in acl_asic_rules + if key not in original_asic_acl_rules and key != rule_asic_db_key1 + ] + assert len(rule_asic_db_keys) == 1 + rule_asic_db_key2 = rule_asic_db_keys[0] + + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.asic_db, + self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME, + rule_asic_db_key2, + ) + assert status == True + attr_list = [ + ( + self._p4rt_acl_rule_obj.SAI_ATTR_ACTION_SET_USER_TRAP_ID, + user_trap_asic_db_key, + ), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_ACTION_PACKET_ACTION, + "SAI_PACKET_ACTION_TRAP", + ), + (self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_ETHER_TYPE, "2048&mask:0xffff"), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_IP_TYPE, + "SAI_ACL_IP_TYPE_IP&mask:0xffffffffffffffff", + ), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_DST_MAC, + "AA:BB:CC:DD:EE:FF&mask:FF:FF:FF:FF:FF:FF", + ), + (self._p4rt_acl_rule_obj.SAI_ATTR_TABLE_ID, table_asic_db_key), + (self._p4rt_acl_rule_obj.SAI_ATTR_SET_POLICER, meter_asic_db_key2), + (self._p4rt_acl_rule_obj.SAI_ATTR_COUNTER, counter_asic_db_key2), + (self._p4rt_acl_rule_obj.SAI_ATTR_ADMIN_STATE, "true"), + (self._p4rt_acl_rule_obj.SAI_ATTR_PRIORITY, "100"), + ] + util.verify_attr(fvs, attr_list) + + # remove ACL rule 1 + self._p4rt_acl_rule_obj.remove_app_db_entry(table_name_with_rule_key1) + util.verify_response( + self.response_consumer, table_name_with_rule_key1, [], "SWSS_RC_SUCCESS" + ) + + # query application database for ACL rules + acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + assert len(acl_rules) == len(original_appl_acl_rules) + 1 + + # verify that the ACL rule no longer exists in application database + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME, + table_name_with_rule_key1, + ) + assert status == False + + # query application state database for ACL rules + state_acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + assert len(state_acl_rules) == len(original_appl_state_acl_rules) + 1 + + # verify that the ACL rule no longer exists in application state database + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME, + table_name_with_rule_key1, + ) + assert status == False + + # query ASIC database for ACL rules + acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.asic_db, self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME + ) + assert len(acl_rules) == len(original_asic_acl_rules) + 1 + + # verify that removed ACL rule no longer exists in ASIC database + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.asic_db, + self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME, + rule_asic_db_key1, + ) + assert status == False + + # verify that removed ACL counter no longer exists in ASIC database + (status, fvs) = util.get_key( + self._p4rt_acl_counter_obj.asic_db, + self._p4rt_acl_counter_obj.ASIC_DB_TBL_NAME, + counter_asic_db_key1, + ) + assert status == False + + # verify that removed ACL meter no longer exists in ASIC database + (status, fvs) = util.get_key( + self._p4rt_acl_meter_obj.asic_db, + self._p4rt_acl_meter_obj.ASIC_DB_TBL_NAME, + meter_asic_db_key1, + ) + assert status == False + + # remove ACL rule 2 + self._p4rt_acl_rule_obj.remove_app_db_entry(table_name_with_rule_key2) + util.verify_response( + self.response_consumer, table_name_with_rule_key2, [], "SWSS_RC_SUCCESS" + ) + + # query application database for ACL rules + acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + assert len(acl_rules) == len(original_appl_acl_rules) + + # verify that the ACL rule no longer exists in application database + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME, + table_name_with_rule_key2, + ) + assert status == False + + # query application state database for ACL rules + state_acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + assert len(state_acl_rules) == len(original_appl_state_acl_rules) + + # verify that the ACL rule no longer exists in application state database + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME, + table_name_with_rule_key2, + ) + assert status == False + + # query ASIC database for ACL rules + acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.asic_db, self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME + ) + assert len(acl_rules) == len(original_asic_acl_rules) + + # verify that removed ACL rule no longer exists in ASIC database + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.asic_db, + self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME, + rule_asic_db_key2, + ) + assert status == False + + # verify that removed ACL counter no longer exists in ASIC database + (status, fvs) = util.get_key( + self._p4rt_acl_counter_obj.asic_db, + self._p4rt_acl_counter_obj.ASIC_DB_TBL_NAME, + counter_asic_db_key2, + ) + assert status == False + + # verify that removed ACL meter no longer exists in ASIC database + (status, fvs) = util.get_key( + self._p4rt_acl_meter_obj.asic_db, + self._p4rt_acl_meter_obj.ASIC_DB_TBL_NAME, + meter_asic_db_key2, + ) + assert status == False + + # remove ACL table + self._p4rt_acl_table_definition_obj.remove_app_db_entry( + self._p4rt_acl_table_definition_obj.TBL_NAME + ":" + table_name + ) + util.verify_response( + self.response_consumer, + self._p4rt_acl_table_definition_obj.TBL_NAME + ":" + table_name, + [], + "SWSS_RC_SUCCESS", + ) + + # query application database for ACL tables + acl_tables = util.get_keys( + self._p4rt_acl_table_definition_obj.appl_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_acl_table_definition_obj.TBL_NAME, + ) + assert len(acl_tables) == len(original_appl_acl_tables) + + # verify that the ACL table no longer exists in application database + (status, fvs) = util.get_key( + self._p4rt_acl_table_definition_obj.appl_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME, + self._p4rt_acl_table_definition_obj.TBL_NAME + ":" + table_name, + ) + assert status == False + + # query application state database for ACL tables + state_acl_tables = util.get_keys( + self._p4rt_acl_table_definition_obj.appl_state_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_acl_table_definition_obj.TBL_NAME, + ) + assert len(state_acl_tables) == len(original_appl_state_acl_tables) + + # verify that the ACL table no longer exists in application state database + (status, fvs) = util.get_key( + self._p4rt_acl_table_definition_obj.appl_state_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME, + self._p4rt_acl_table_definition_obj.TBL_NAME + ":" + table_name, + ) + assert status == False + + # query ASIC database for ACL tables + acl_tables = util.get_keys( + self._p4rt_acl_table_definition_obj.asic_db, + self._p4rt_acl_table_definition_obj.ASIC_DB_TBL_NAME, + ) + assert len(acl_tables) == len(original_asic_acl_tables) + + # verify that removed ACL table no longer exists in ASIC database + (status, fvs) = util.get_key( + self._p4rt_acl_table_definition_obj.asic_db, + self._p4rt_acl_table_definition_obj.ASIC_DB_TBL_NAME, + table_asic_db_key, + ) + assert status == False + + def test_AclRuleAddWithoutTableDefinitionFails(self, dvs, testlog): + # initialize ACL table objects and database connectors + self._set_up(dvs) + + # maintain list of original Application and ASIC DB ACL entries before adding + # new ACL rule + table_name = "ACL_PUNT_TABLE_RULE_TEST" + original_appl_acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + original_appl_state_acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + original_asic_acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.asic_db, self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME + ) + + # create ACL rule + rule_json_key = '{"match/ether_type":"0x0800","match/ether_dst":"00:1a:11:17:5f:80","match/src_ipv6_64bit":"fdf8:f53b:82e4::","match/arp_tpa":"0xff665543","priority":100}' + action = "copy_and_set_tc" + meter_cir = "80" + meter_cbs = "80" + meter_pir = "200" + meter_pbs = "200" + table_name_with_rule_key = table_name + ":" + rule_json_key + + attr_list = [ + (self._p4rt_acl_rule_obj.ACTION, action), + ("param/traffic_class", "1"), + (self._p4rt_acl_rule_obj.METER_CIR, meter_cir), + (self._p4rt_acl_rule_obj.METER_CBURST, meter_cbs), + (self._p4rt_acl_rule_obj.METER_PIR, meter_pir), + (self._p4rt_acl_rule_obj.METER_PBURST, meter_pbs), + ] + + self._p4rt_acl_rule_obj.set_app_db_entry(table_name_with_rule_key, attr_list) + util.verify_response( + self.response_consumer, + table_name_with_rule_key, + attr_list, + "SWSS_RC_INVALID_PARAM", + "[OrchAgent] Failed to find P4Orch Manager for ACL_PUNT_TABLE_RULE_TEST P4RT DB table", + ) + + # query application database for ACL rules + acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + assert len(acl_rules) == len(original_appl_acl_rules) + 1 + + # query application database for newly created ACL rule + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME, + table_name_with_rule_key, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # query application state database for ACL rules + state_acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + assert len(state_acl_rules) == len(original_appl_state_acl_rules) + + # query ASIC database for ACL rules + acl_asic_rules = util.get_keys( + self._p4rt_acl_rule_obj.asic_db, self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME + ) + assert len(acl_asic_rules) == len(original_asic_acl_rules) + + # query ASIC database for newly created ACL rule + rule_asic_db_keys = [ + key for key in acl_asic_rules if key not in original_asic_acl_rules + ] + assert len(rule_asic_db_keys) == 0 + + # cleanup application database + tbl = swsscommon.Table( + self._p4rt_acl_table_definition_obj.appl_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME, + ) + tbl._del(table_name_with_rule_key) diff --git a/tests/p4rt/test_p4rt_mirror.py b/tests/p4rt/test_p4rt_mirror.py new file mode 100644 index 00000000000..bc218df1474 --- /dev/null +++ b/tests/p4rt/test_p4rt_mirror.py @@ -0,0 +1,220 @@ +from swsscommon import swsscommon + +import util +import json + +class P4RtMirrorSessionWrapper(util.DBInterface): + """Interface to interact with APP DB and ASIC DB tables for P4RT mirror session object.""" + + # database and SAI constants + APP_DB_TBL_NAME = swsscommon.APP_P4RT_TABLE_NAME + TBL_NAME = swsscommon.APP_P4RT_MIRROR_SESSION_TABLE_NAME + ACTION = "action" + PORT = "port" + SRC_IP = "src_ip" + DST_IP = "dst_ip" + SRC_MAC = "src_mac" + DST_MAC = "dst_mac" + TTL = "ttl" + TOS = "tos" + + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION" + SAI_MIRROR_SESSION_ATTR_MONITOR_PORT = "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT" + SAI_MIRROR_SESSION_ATTR_TYPE = "SAI_MIRROR_SESSION_ATTR_TYPE" + SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE = "SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE" + SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION = "SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION" + SAI_MIRROR_SESSION_ATTR_TOS = "SAI_MIRROR_SESSION_ATTR_TOS" + SAI_MIRROR_SESSION_ATTR_TTL = "SAI_MIRROR_SESSION_ATTR_TTL" + SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS = "SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS" + SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS = "SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS" + SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS = "SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS" + SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS = "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS" + SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE = "SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE" + + def generate_app_db_key(self, mirror_session_id): + d = {} + d[util.prepend_match_field("mirror_session_id")] = mirror_session_id + key = json.dumps(d, separators=(",", ":")) + return self.TBL_NAME + ":" + key + +class TestP4RTMirror(object): + def _set_up(self, dvs): + self._p4rt_mirror_session_wrapper = P4RtMirrorSessionWrapper() + self._p4rt_mirror_session_wrapper.set_up_databases(dvs) + self._response_consumer = swsscommon.NotificationConsumer( + self._p4rt_mirror_session_wrapper.appl_state_db, "APPL_DB_P4RT_TABLE_RESPONSE_CHANNEL") + + def test_MirrorSessionAddModifyAndDelete(self, dvs, testlog): + # Initialize database connectors + self._set_up(dvs) + + # Maintain list of original Application and ASIC DB entries before adding + # new mirror session + original_appl_mirror_entries = util.get_keys( + self._p4rt_mirror_session_wrapper.appl_db, + self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME + ":" + self._p4rt_mirror_session_wrapper.TBL_NAME) + original_appl_state_mirror_entries = util.get_keys( + self._p4rt_mirror_session_wrapper.appl_state_db, + self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME + ":" + self._p4rt_mirror_session_wrapper.TBL_NAME) + original_asic_mirror_entries = util.get_keys( + self._p4rt_mirror_session_wrapper.asic_db, self._p4rt_mirror_session_wrapper.ASIC_DB_TBL_NAME) + + # 1. Create mirror session + mirror_session_id = "mirror_session1" + action = "mirror_as_ipv4_erspan" + port = "Ethernet8" + src_ip = "10.206.196.31" + dst_ip = "172.20.0.203" + src_mac = "00:02:03:04:05:06" + dst_mac = "00:1A:11:17:5F:80" + ttl = "0x40" + tos = "0x00" + + attr_list_in_app_db = [(self._p4rt_mirror_session_wrapper.ACTION, action), + (util.prepend_param_field(self._p4rt_mirror_session_wrapper.PORT), port), + (util.prepend_param_field(self._p4rt_mirror_session_wrapper.SRC_IP), src_ip), + (util.prepend_param_field(self._p4rt_mirror_session_wrapper.DST_IP), dst_ip), + (util.prepend_param_field(self._p4rt_mirror_session_wrapper.SRC_MAC), src_mac), + (util.prepend_param_field(self._p4rt_mirror_session_wrapper.DST_MAC), dst_mac), + (util.prepend_param_field(self._p4rt_mirror_session_wrapper.TTL), ttl), + (util.prepend_param_field(self._p4rt_mirror_session_wrapper.TOS), tos)] + mirror_session_key = self._p4rt_mirror_session_wrapper.generate_app_db_key( + mirror_session_id) + self._p4rt_mirror_session_wrapper.set_app_db_entry( + mirror_session_key, attr_list_in_app_db) + util.verify_response( + self._response_consumer, mirror_session_key, attr_list_in_app_db, "SWSS_RC_SUCCESS") + + # Query application database for mirror entries + appl_mirror_entries = util.get_keys( + self._p4rt_mirror_session_wrapper.appl_db, + self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME + ":" + self._p4rt_mirror_session_wrapper.TBL_NAME) + assert len(appl_mirror_entries) == len(original_appl_mirror_entries) + 1 + + # Query application database for newly created mirror key + (status, fvs) = util.get_key(self._p4rt_mirror_session_wrapper.appl_db, + self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME, + mirror_session_key) + assert status == True + util.verify_attr(fvs, attr_list_in_app_db) + + # Query application state database for mirror entries + appl_state_mirror_entries = util.get_keys( + self._p4rt_mirror_session_wrapper.appl_state_db, + self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME + ":" + self._p4rt_mirror_session_wrapper.TBL_NAME) + assert len(appl_state_mirror_entries) == len(original_appl_state_mirror_entries) + 1 + + # Query application state database for newly created mirror key + (status, fvs) = util.get_key(self._p4rt_mirror_session_wrapper.appl_state_db, + self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME, + mirror_session_key) + assert status == True + util.verify_attr(fvs, attr_list_in_app_db) + + # Query ASIC database for mirror entries + asic_mirror_entries = util.get_keys(self._p4rt_mirror_session_wrapper.asic_db, + self._p4rt_mirror_session_wrapper.ASIC_DB_TBL_NAME) + assert len(asic_mirror_entries) == len(original_asic_mirror_entries) + 1 + + # Query ASIC database for newly created mirror key + asic_db_key = None + for key in asic_mirror_entries: + # Get newly created entry + if key not in original_asic_mirror_entries: + asic_db_key = key + break + assert asic_db_key is not None + (status, fvs) = util.get_key(self._p4rt_mirror_session_wrapper.asic_db, + self._p4rt_mirror_session_wrapper.ASIC_DB_TBL_NAME, + asic_db_key) + assert status == True + + # Get oid of Ethernet8 + port_oid = util.get_port_oid_by_name(dvs, port) + assert port_oid != None + + expected_attr_list_in_asic_db = [ + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_MONITOR_PORT, port_oid), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_TYPE, "SAI_MIRROR_SESSION_TYPE_ENHANCED_REMOTE"), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE, "SAI_ERSPAN_ENCAPSULATION_TYPE_MIRROR_L3_GRE_TUNNEL"), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION, "4"), # MIRROR_SESSION_DEFAULT_IP_HDR_VER + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_TOS, "0"), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_TTL, "64"), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS, src_ip), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS, dst_ip), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS, src_mac), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS, dst_mac), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE, "35006") # GRE_PROTOCOL_ERSPAN 0x88be + ] + util.verify_attr(fvs, expected_attr_list_in_asic_db) + + # 2. Modify the existing mirror session. + new_dst_mac = "00:1A:11:17:5F:FF" + attr_list_in_app_db[5] = (util.prepend_param_field(self._p4rt_mirror_session_wrapper.DST_MAC), new_dst_mac) + self._p4rt_mirror_session_wrapper.set_app_db_entry( + mirror_session_key, attr_list_in_app_db) + util.verify_response( + self._response_consumer, mirror_session_key, attr_list_in_app_db, "SWSS_RC_SUCCESS") + + # Query application database for the modified mirror key + (status, fvs) = util.get_key(self._p4rt_mirror_session_wrapper.appl_db, + self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME, + mirror_session_key) + assert status == True + util.verify_attr(fvs, attr_list_in_app_db) + + # Query application state database for the modified mirror key + (status, fvs) = util.get_key(self._p4rt_mirror_session_wrapper.appl_state_db, + self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME, + mirror_session_key) + assert status == True + util.verify_attr(fvs, attr_list_in_app_db) + + # Query ASIC DB about the modified mirror session. + expected_attr_list_in_asic_db[9] = (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS, new_dst_mac) + (status, fvs) = util.get_key(self._p4rt_mirror_session_wrapper.asic_db, + self._p4rt_mirror_session_wrapper.ASIC_DB_TBL_NAME, + asic_db_key) + assert status == True + util.verify_attr(fvs, expected_attr_list_in_asic_db) + + # 3. Delete the mirror session. + self._p4rt_mirror_session_wrapper.remove_app_db_entry( + mirror_session_key) + util.verify_response( + self._response_consumer, mirror_session_key, [], "SWSS_RC_SUCCESS") + + # Query application database for mirror entries + appl_mirror_entries = util.get_keys( + self._p4rt_mirror_session_wrapper.appl_db, + self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME + ":" + self._p4rt_mirror_session_wrapper.TBL_NAME) + assert len(appl_mirror_entries) == len(original_appl_mirror_entries) + + # Query application database for the deleted mirror key + (status, fvs) = util.get_key(self._p4rt_mirror_session_wrapper.appl_db, + self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME, + mirror_session_key) + assert status == False + + # Query application state database for mirror entries + appl_state_mirror_entries = util.get_keys( + self._p4rt_mirror_session_wrapper.appl_state_db, + self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME + ":" + self._p4rt_mirror_session_wrapper.TBL_NAME) + assert len(appl_state_mirror_entries) == len(original_appl_state_mirror_entries) + + # Query application state database for the deleted mirror key + (status, fvs) = util.get_key(self._p4rt_mirror_session_wrapper.appl_state_db, + self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME, + mirror_session_key) + assert status == False + + # Query ASIC database for mirror entries + asic_mirror_entries = util.get_keys(self._p4rt_mirror_session_wrapper.asic_db, + self._p4rt_mirror_session_wrapper.ASIC_DB_TBL_NAME) + assert len(asic_mirror_entries) == len(original_asic_mirror_entries) + + # Query ASIC state database for the deleted mirror key + (status, fvs) = util.get_key(self._p4rt_mirror_session_wrapper.asic_db, + self._p4rt_mirror_session_wrapper.ASIC_DB_TBL_NAME, + asic_db_key) + assert status == False diff --git a/tests/p4rt/util.py b/tests/p4rt/util.py new file mode 100644 index 00000000000..831c7a5cbec --- /dev/null +++ b/tests/p4rt/util.py @@ -0,0 +1,135 @@ +""" Defines common P4RT utility functions.""" +from swsscommon import swsscommon + +import time + + +def _set_up_appl_db(dvs): + """ Initializes application database connector.""" + return swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + + +def _set_up_asic_db(dvs): + """ Initializes ASIC database connector.""" + return swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + +def _set_up_appl_state_db(dvs): + """ Initializes APPL STATE database connector.""" + return swsscommon.DBConnector(swsscommon.APPL_STATE_DB, dvs.redis_sock, 0) + + +def get_keys(db, tbl_name): + """ Retrieves keys from given database and table.""" + tbl = swsscommon.Table(db, tbl_name) + return tbl.getKeys() + + +def get_key(db, tbl_name, key): + """ Retrieves entry corresponding to given key in given database and table.""" + tbl = swsscommon.Table(db, tbl_name) + return tbl.get(key) + + +def verify_attr(fvs, attr_list): + """ Verifies attribute list for given key in a database table.""" + assert len(fvs) == len(attr_list) + d = dict(attr_list) + for fv in fvs: + if fv[0] in d: + assert fv[1] == d[fv[0]] + else: + assert False + +def prepend_match_field(match_field): + return "match/" + match_field + +def prepend_param_field(param_field): + return "param/" + param_field + +def verify_response(consumer, key, attr_list, status, err_message = "SWSS_RC_SUCCESS"): + """ Verifies a response.""" + consumer.readData() + (op, data, values) = consumer.pop() + assert data == key + assert op == status + assert len(values) >= 1 + assert values[0][0] == "err_str" + assert values[0][1] == err_message + values = values[1:] + verify_attr(values, attr_list) + + +def check_syslog(dvs, marker, process, err_log, expected_cnt): + """ Checks syslog on dvs docker. + + Scans /var/log/syslog for expected count (expected_cnt) of the error + log(err_log). Filters Logs starting at timestamp marked by "marker" based on + the given process. + """ + (exitcode, num) = dvs.runcmd([ + "sh", "-c", + "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep %s | grep -E \'%s\' | wc -l" + % (marker, process, err_log) + ]) + assert num.strip() == str(expected_cnt) + +def get_port_oid_by_name(dvs, port_name): + counters_db = swsscommon.DBConnector(swsscommon.COUNTERS_DB, dvs.redis_sock, 0) + port_map_tbl = swsscommon.Table(counters_db, "COUNTERS_PORT_NAME_MAP") + port_oid = None + for k in port_map_tbl.get("")[1]: + if k[0] == port_name: + port_oid = k[1] + return port_oid + +def initialize_interface(dvs, port_name, ip): + dvs.runcmd("config interface startup {}".format(port_name)) + dvs.runcmd("config interface ip add {} {}".format(port_name, ip)) + +def set_interface_status(dvs, if_name, status = "down", server = 0): + dvs.servers[0].runcmd("ip link set {} dev {}".format(status, if_name)) == 0 + time.sleep(1) + +class DBInterface(object): + """ Interface to interact with different redis databases on dvs.""" + + # common attribute fields for L3 objects + ACTION_FIELD = "action" + + def set_up_databases(self, dvs): + self.appl_db = _set_up_appl_db(dvs) + self.asic_db = _set_up_asic_db(dvs) + self.appl_state_db = _set_up_appl_state_db(dvs) + + def set_app_db_entry(self, key, attr_list): + fvs = swsscommon.FieldValuePairs(attr_list) + tbl = swsscommon.ProducerStateTable(self.appl_db, self.APP_DB_TBL_NAME) + tbl.set(key, fvs) + time.sleep(1) + + def remove_app_db_entry(self, key): + tbl = swsscommon.ProducerStateTable(self.appl_db, self.APP_DB_TBL_NAME) + tbl._del(key) + time.sleep(1) + + # Get list of original entries in redis on init. + def get_original_redis_entries(self, db_list): + self._original_entries = {} + for i in db_list: + db = i[0] + table = i[1] + self._original_entries["{}:{}".format(db, table)]= get_keys(db, table) + +class KeyToOidDBHelper(object): + """Provides helper APIs for P4RT key to OID mapping in Redis DB.""" + + # Table name in Redis DB for the mapping. + TBL_NAME = "P4RT_KEY_TO_OID" + KEY = "" + + def __init__(self, dvs): + self.table = swsscommon.Table(_set_up_appl_state_db(dvs), self.TBL_NAME) + + def get_db_info(self): + return self.table.get(self.KEY) diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index e44f2824f55..76ba36ee112 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -214,6 +214,7 @@ def test_changeSpeed(self, dvs, testlog): self.cleanup_db(dvs) + @pytest.mark.skip(reason="Failing. Under investigation") def test_changeCableLen(self, dvs, testlog): self.setup_db(dvs) diff --git a/tests/test_copp.py b/tests/test_copp.py index 19faac954f7..5885a489b52 100644 --- a/tests/test_copp.py +++ b/tests/test_copp.py @@ -151,17 +151,18 @@ "trap_action": "trap", "trap_priority": "5" } + copp_trap = { - "bgp,bgpv6": copp_group_queue4_group1, - "lacp": copp_group_queue4_group1, - "arp_req,arp_resp,neigh_discovery":copp_group_queue4_group2, - "lldp":copp_group_queue4_group3, - "dhcp,dhcpv6":copp_group_queue4_group3, - "udld":copp_group_queue4_group3, - "ip2me":copp_group_queue1_group1, - "src_nat_miss,dest_nat_miss": copp_group_queue1_group2, - "sample_packet": copp_group_queue2_group1, - "ttl_error": copp_group_default + "bgp": ["bgp;bgpv6", copp_group_queue4_group1], + "lacp": ["lacp", copp_group_queue4_group1, "always_enabled"], + "arp": ["arp_req;arp_resp;neigh_discovery", copp_group_queue4_group2, "always_enabled"], + "lldp": ["lldp", copp_group_queue4_group3], + "dhcp": ["dhcp;dhcpv6", copp_group_queue4_group3], + "udld": ["udld", copp_group_queue4_group3, "always_enabled"], + "ip2me": ["ip2me", copp_group_queue1_group1, "always_enabled"], + "nat": ["src_nat_miss;dest_nat_miss", copp_group_queue1_group2], + "sflow": ["sample_packet", copp_group_queue2_group1], + "ttl": ["ttl_error", copp_group_default] } disabled_traps = ["sample_packet"] @@ -201,7 +202,7 @@ def setup_copp(self, dvs): self.trap_ctbl = swsscommon.Table(self.cdb, "COPP_TRAP") self.trap_group_ctbl = swsscommon.Table(self.cdb, "COPP_GROUP") self.feature_tbl = swsscommon.Table(self.cdb, "FEATURE") - fvs = swsscommon.FieldValuePairs([("state", "disbled")]) + fvs = swsscommon.FieldValuePairs([("state", "disabled")]) self.feature_tbl.set("sflow", fvs) time.sleep(2) @@ -306,8 +307,12 @@ def test_defaults(self, dvs, testlog): self.setup_copp(dvs) trap_keys = self.trap_atbl.getKeys() for traps in copp_trap: - trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_info = copp_trap[traps] + trap_ids = trap_info[0].split(";") + trap_group = trap_info[1] + always_enabled = False + if len(trap_info) > 2: + always_enabled = True for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -325,6 +330,7 @@ def test_defaults(self, dvs, testlog): if trap_id not in disabled_traps: assert trap_found == True + def test_restricted_trap_sflow(self, dvs, testlog): self.setup_copp(dvs) fvs = swsscommon.FieldValuePairs([("state", "enabled")]) @@ -334,10 +340,14 @@ def test_restricted_trap_sflow(self, dvs, testlog): trap_keys = self.trap_atbl.getKeys() for traps in copp_trap: - trap_ids = traps.split(",") + trap_info = copp_trap[traps] + trap_ids = trap_info[0].split(";") + trap_group = trap_info[1] + always_enabled = False + if len(trap_info) > 2: + always_enabled = True if "sample_packet" not in trap_ids: continue - trap_group = copp_trap[traps] trap_found = False trap_type = traps_to_trap_type["sample_packet"] for key in trap_keys: @@ -363,10 +373,14 @@ def test_policer_set(self, dvs, testlog): trap_keys = self.trap_atbl.getKeys() for traps in copp_trap: - if copp_trap[traps] != copp_group_queue4_group2: + trap_info = copp_trap[traps] + trap_ids = trap_info[0].split(";") + trap_group = trap_info[1] + always_enabled = False + if len(trap_info) > 2: + always_enabled = True + if trap_group != copp_group_queue4_group2: continue - trap_ids = traps.split(",") - trap_group = copp_trap[traps] for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -390,12 +404,19 @@ def test_trap_group_set(self, dvs, testlog): traps = "bgp,bgpv6" fvs = swsscommon.FieldValuePairs([("trap_group", "queue1_group1")]) self.trap_ctbl.set("bgp", fvs) - copp_trap[traps] = copp_group_queue1_group1 + + for c_trap in copp_trap: + trap_info = copp_trap[c_trap] + ids = trap_info[0].replace(';', ',') + if traps == ids: + break + + trap_info[1] = copp_group_queue1_group1 time.sleep(2) trap_keys = self.trap_atbl.getKeys() trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_group = trap_info[1] for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -423,8 +444,14 @@ def test_trap_ids_set(self, dvs, testlog): old_traps = "bgp,bgpv6" trap_keys = self.trap_atbl.getKeys() + for c_trap in copp_trap: + trap_info = copp_trap[c_trap] + ids = trap_info[0].replace(';', ',') + if old_traps == ids: + break + trap_ids = old_traps.split(",") - trap_group = copp_trap[old_traps] + trap_group = trap_info[1] for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -451,7 +478,7 @@ def test_trap_ids_set(self, dvs, testlog): trap_keys = self.trap_atbl.getKeys() trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_group = trap_info[1] for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -478,10 +505,11 @@ def test_trap_action_set(self, dvs, testlog): trap_keys = self.trap_atbl.getKeys() for traps in copp_trap: - if copp_trap[traps] != copp_group_queue4_group1: + trap_info = copp_trap[traps] + if trap_info[1] != copp_group_queue4_group1: continue - trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_ids = trap_info[0].split(";") + trap_group = trap_info[1] for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -499,18 +527,21 @@ def test_trap_action_set(self, dvs, testlog): if trap_id not in disabled_traps: assert trap_found == True + def test_new_trap_add(self, dvs, testlog): self.setup_copp(dvs) global copp_trap traps = "eapol,isis,bfd_micro,bfdv6_micro,ldp" - fvs = swsscommon.FieldValuePairs([("trap_group", "queue1_group2"),("trap_ids", traps)]) + fvs = swsscommon.FieldValuePairs([("trap_group", "queue1_group2"),("trap_ids", traps),("always_enabled", "true")]) self.trap_ctbl.set(traps, fvs) - copp_trap[traps] = copp_group_queue1_group2 + + + copp_trap["eapol"] = [traps, copp_group_queue1_group2, "always_enabled"] time.sleep(2) trap_keys = self.trap_atbl.getKeys() trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_group = copp_group_queue1_group2 for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -534,13 +565,19 @@ def test_new_trap_del(self, dvs, testlog): traps = "eapol,isis,bfd_micro,bfdv6_micro,ldp" fvs = swsscommon.FieldValuePairs([("trap_group", "queue1_group2"),("trap_ids", traps)]) self.trap_ctbl.set(traps, fvs) - copp_trap[traps] = copp_group_queue1_group2 + for c_trap in copp_trap: + trap_info = copp_trap[c_trap] + ids = trap_info[0].replace(';', ',') + if traps == ids: + break + + trap_info[1] = copp_group_queue1_group2 time.sleep(2) self.trap_ctbl._del(traps) time.sleep(2) trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_group = trap_info[1] trap_keys = self.trap_atbl.getKeys() for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] @@ -568,14 +605,19 @@ def test_new_trap_group_add(self, dvs, testlog): fvs = swsscommon.FieldValuePairs(list_val) self.trap_group_ctbl.set("queue5_group1", fvs) traps = "igmp_v1_report" - t_fvs = swsscommon.FieldValuePairs([("trap_group", "queue5_group1"),("trap_ids", "igmp_v1_report")]) + t_fvs = swsscommon.FieldValuePairs([("trap_group", "queue5_group1"),("trap_ids", "igmp_v1_report"),("always_enabled", "true")]) self.trap_ctbl.set(traps, t_fvs) - copp_trap[traps] = copp_group_queue5_group1 + for c_trap in copp_trap: + trap_info = copp_trap[c_trap] + ids = trap_info[0].replace(';', ',') + if traps == ids: + break + trap_info[1] = copp_group_queue5_group1 time.sleep(2) trap_keys = self.trap_atbl.getKeys() trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_group = trap_info[1] for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -602,16 +644,21 @@ def test_new_trap_group_del(self, dvs, testlog): fvs = swsscommon.FieldValuePairs(list_val) self.trap_group_ctbl.set("queue5_group1", fvs) traps = "igmp_v1_report" - t_fvs = swsscommon.FieldValuePairs([("trap_group", "queue5_group1"),("trap_ids", "igmp_v1_report")]) + t_fvs = swsscommon.FieldValuePairs([("trap_group", "queue5_group1"),("trap_ids", "igmp_v1_report"),("always_enabled", "true")]) self.trap_ctbl.set(traps, t_fvs) - copp_trap[traps] = copp_group_queue5_group1 + for c_trap in copp_trap: + trap_info = copp_trap[c_trap] + ids = trap_info[0].replace(';', ',') + if traps == ids: + break + trap_info[1] = copp_group_queue5_group1 self.trap_group_ctbl._del("queue5_group1") time.sleep(2) trap_keys = self.trap_atbl.getKeys() trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_group = trap_info[1] for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -643,10 +690,11 @@ def test_override_trap_grp_cfg_del (self, dvs, testlog): trap_keys = self.trap_atbl.getKeys() for traps in copp_trap: - if copp_trap[traps] != copp_group_queue1_group1: + trap_info = copp_trap[traps] + if trap_info[1] != copp_group_queue1_group1: continue - trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_ids = trap_info[0].split(";") + trap_group = trap_info[1] for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -675,7 +723,7 @@ def test_override_trap_cfg_del(self, dvs, testlog): self.trap_ctbl._del("ip2me") time.sleep(2) trap_ids = traps.split(",") - trap_group = copp_trap["ip2me"] + trap_group = copp_trap["ip2me"][1] trap_keys = self.trap_atbl.getKeys() for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] @@ -705,7 +753,7 @@ def test_empty_trap_cfg(self, dvs, testlog): time.sleep(2) trap_id = "ip2me" - trap_group = copp_trap["ip2me"] + trap_group = copp_trap["ip2me"][1] trap_keys = self.trap_atbl.getKeys() trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -740,3 +788,56 @@ def test_empty_trap_cfg(self, dvs, testlog): self.validate_trap_group(key,trap_group) break assert trap_found == True + + + def test_disabled_feature_always_enabled_trap(self, dvs, testlog): + self.setup_copp(dvs) + fvs = swsscommon.FieldValuePairs([("trap_ids", "lldp"), ("trap_group", "queue4_group3"), ("always_enabled", "true")]) + self.trap_ctbl.set("lldp", fvs) + fvs = swsscommon.FieldValuePairs([("state", "disabled")]) + self.feature_tbl.set("lldp", fvs) + + time.sleep(2) + global copp_trap + + trap_keys = self.trap_atbl.getKeys() + for traps in copp_trap: + trap_info = copp_trap[traps] + trap_ids = trap_info[0].split(";") + trap_group = trap_info[1] + + if "lldp" not in trap_ids: + continue + + trap_found = False + trap_type = traps_to_trap_type["lldp"] + for key in trap_keys: + (status, fvs) = self.trap_atbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_HOSTIF_TRAP_ATTR_TRAP_TYPE": + if fv[1] == trap_type: + trap_found = True + if trap_found: + self.validate_trap_group(key,trap_group) + break + assert trap_found == True + + # change always_enabled to be false and check the trap is not installed: + fvs = swsscommon.FieldValuePairs([("trap_ids", "lldp"), ("trap_group", "queue4_group3"), ("always_enabled", "false")]) + self.trap_ctbl.set("lldp", fvs) + time.sleep(2) + + table_found = True + for key in trap_keys: + (status, fvs) = self.trap_atbl.get(key) + if status == False: + table_found = False + + # teardown + fvs = swsscommon.FieldValuePairs([("trap_ids", "lldp"), ("trap_group", "queue4_group3")]) + self.trap_ctbl.set("lldp", fvs) + fvs = swsscommon.FieldValuePairs([("state", "enabled")]) + self.feature_tbl.set("lldp", fvs) + + assert table_found == False diff --git a/tests/test_macsec.py b/tests/test_macsec.py index 0f945300e38..f74f31c0087 100644 --- a/tests/test_macsec.py +++ b/tests/test_macsec.py @@ -699,6 +699,54 @@ def test_macsec_term_orch(self, dvs: conftest.DockerVirtualSwitch, testlog): 1) assert(not inspector.get_macsec_port(macsec_port)) + def test_macsec_attribute_change(self, dvs: conftest.DockerVirtualSwitch, testlog): + port_name = "Ethernet0" + local_mac_address = "00-15-5D-78-FF-C1" + peer_mac_address = "00-15-5D-78-FF-C2" + macsec_port_identifier = 1 + macsec_port = "macsec_eth1" + sak = "0" * 32 + auth_key = "0" * 32 + packet_number = 1 + ssci = 1 + salt = "0" * 24 + + wpa = WPASupplicantMock(dvs) + inspector = MACsecInspector(dvs) + + self.init_macsec( + wpa, + port_name, + local_mac_address, + macsec_port_identifier) + wpa.set_macsec_control(port_name, True) + wpa.config_macsec_port(port_name, {"enable_encrypt": False}) + wpa.config_macsec_port(port_name, {"cipher_suite": "GCM-AES-256"}) + self.establish_macsec( + wpa, + port_name, + local_mac_address, + peer_mac_address, + macsec_port_identifier, + 0, + sak, + packet_number, + auth_key, + ssci, + salt) + macsec_info = inspector.get_macsec_port(macsec_port) + assert("encrypt off" in macsec_info) + assert("GCM-AES-256" in macsec_info) + self.deinit_macsec( + wpa, + inspector, + port_name, + macsec_port, + local_mac_address, + peer_mac_address, + macsec_port_identifier, + 0) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down diff --git a/tests/test_nat.py b/tests/test_nat.py index 3c4a5ddce33..9e87b5f54c9 100644 --- a/tests/test_nat.py +++ b/tests/test_nat.py @@ -1,4 +1,5 @@ import time +import pytest from dvslib.dvs_common import wait_for_result @@ -175,6 +176,7 @@ def test_DelNaPtStaticEntry(self, dvs, testlog): #check the entry is not there in asic db self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 0) + @pytest.mark.skip(reason="Failing. Under investigation") def test_AddTwiceNatEntry(self, dvs, testlog): # initialize self.setup_db(dvs) diff --git a/tests/test_nhg.py b/tests/test_nhg.py index df071b4d173..2d004f6c1ba 100644 --- a/tests/test_nhg.py +++ b/tests/test_nhg.py @@ -154,11 +154,13 @@ def init_test(self, dvs, num_intfs): self.app_db = self.dvs.get_app_db() self.asic_db = self.dvs.get_asic_db() self.config_db = self.dvs.get_config_db() + self.state_db = self.dvs.get_state_db() self.nhg_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_NEXTHOP_GROUP_TABLE_NAME) self.rt_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_ROUTE_TABLE_NAME) self.lr_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_LABEL_ROUTE_TABLE_NAME) self.cbf_nhg_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_CLASS_BASED_NEXT_HOP_GROUP_TABLE_NAME) self.fc_to_nhg_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_FC_TO_NHG_INDEX_MAP_TABLE_NAME) + self.switch_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_SWITCH_TABLE_NAME) # Set switch FC capability to 63 self.dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_MAX_NUMBER_OF_FORWARDING_CLASSES', '63') @@ -182,6 +184,16 @@ def route_exists(self, rt_prefix): def nhg_map_exists(self, nhg_map_index): return self.get_nhg_map_id(nhg_map_index) is not None + def enable_ordered_ecmp(self): + switch_fvs = swsscommon.FieldValuePairs([('ordered_ecmp', 'true')]) + self.switch_ps.set('switch', switch_fvs) + self.state_db.wait_for_field_match("SWITCH_CAPABILITY", "switch", {"ORDERED_ECMP_CAPABLE": "true"}) + + def disble_ordered_ecmp(self): + switch_fvs = swsscommon.FieldValuePairs([('ordered_ecmp', 'false')]) + self.switch_ps.set('switch', switch_fvs) + self.state_db.wait_for_field_match("SWITCH_CAPABILITY", "switch", {"ORDERED_ECMP_CAPABLE": "false"}) + class TestNhgExhaustBase(TestNextHopGroupBase): MAX_ECMP_COUNT = 512 MAX_PORT_COUNT = 10 @@ -887,8 +899,13 @@ def test_cbf_nhg_exhaust(self, dvs, testlog): class TestNextHopGroup(TestNextHopGroupBase): - def test_route_nhg(self, dvs, dvs_route, testlog): + @pytest.mark.parametrize('ordered_ecmp', ['false', 'true']) + def test_route_nhg(self, ordered_ecmp, dvs, dvs_route, testlog): self.init_test(dvs, 3) + nhip_seqid_map = {"10.0.0.1" : "1", "10.0.0.3" : "2" , "10.0.0.5" : "3" } + + if ordered_ecmp == 'true': + self.enable_ordered_ecmp() rtprefix = "2.2.2.0/24" @@ -911,6 +928,11 @@ def test_route_nhg(self, dvs, dvs_route, testlog): assert bool(fvs) + if ordered_ecmp == 'true': + assert fvs["SAI_NEXT_HOP_GROUP_ATTR_TYPE"] == "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP" + else: + assert fvs["SAI_NEXT_HOP_GROUP_ATTR_TYPE"] == "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP" + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) assert len(keys) == 3 @@ -923,6 +945,13 @@ def test_route_nhg(self, dvs, dvs_route, testlog): # verify weight attributes not in asic db assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT") is None + if ordered_ecmp == "true": + nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + nh_fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) + assert nhip_seqid_map[nh_fvs["SAI_NEXT_HOP_ATTR_IP"]] == fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + # Remove route 2.2.2.0/24 self.rt_ps._del(rtprefix) @@ -930,8 +959,9 @@ def test_route_nhg(self, dvs, dvs_route, testlog): dvs_route.check_asicdb_deleted_route_entries([rtprefix]) # Negative test with nexthops with incomplete weight info - fvs = swsscommon.FieldValuePairs([("nexthop","10.0.0.1,10.0.0.3,10.0.0.5"), - ("ifname", "Ethernet0,Ethernet4,Ethernet8"), + # To validate Order ECMP change the nexthop order + fvs = swsscommon.FieldValuePairs([("nexthop","10.0.0.5,10.0.0.1,10.0.0.3"), + ("ifname", "Ethernet8,Ethernet0,Ethernet4"), ("weight", "10,30")]) self.rt_ps.set(rtprefix, fvs) @@ -939,25 +969,33 @@ def test_route_nhg(self, dvs, dvs_route, testlog): rtkeys = dvs_route.check_asicdb_route_entries([rtprefix]) # assert the route points to next hop group - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", rtkeys[0]) + fvs = self.asic_db.get_entry(self.ASIC_RT_STR, rtkeys[0]) nhgid = fvs["SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID"] - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP", nhgid) + fvs = self.asic_db.get_entry(self.ASIC_NHG_STR, nhgid) assert bool(fvs) - keys = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) assert len(keys) == 3 for k in keys: - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", k) + fvs = self.asic_db.get_entry(self.ASIC_NHGM_STR, k) assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhgid # verify weight attributes not in asic db assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT") is None + + if ordered_ecmp == "true": + nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + nh_fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) + assert nhip_seqid_map[nh_fvs["SAI_NEXT_HOP_ATTR_IP"]] == fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + # Remove route 2.2.2.0/24 self.rt_ps._del(rtprefix) @@ -974,20 +1012,20 @@ def test_route_nhg(self, dvs, dvs_route, testlog): rtkeys = dvs_route.check_asicdb_route_entries([rtprefix]) # assert the route points to next hop group - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", rtkeys[0]) + fvs = self.asic_db.get_entry(self.ASIC_RT_STR, rtkeys[0]) nhgid = fvs["SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID"] - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP", nhgid) + fvs = self.asic_db.get_entry(self.ASIC_NHG_STR, nhgid) assert bool(fvs) - keys = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) assert len(keys) == 3 for k in keys: - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", k) + fvs = self.asic_db.get_entry(self.ASIC_NHGM_STR, k) assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhgid @@ -995,6 +1033,13 @@ def test_route_nhg(self, dvs, dvs_route, testlog): nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] weight = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT"] + if ordered_ecmp == "true": + nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + nh_fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) + assert nhip_seqid_map[nh_fvs["SAI_NEXT_HOP_ATTR_IP"]] == fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) nhip = fvs["SAI_NEXT_HOP_ATTR_IP"].split('.') expected_weight = int(nhip[3]) * 10 @@ -1011,11 +1056,11 @@ def test_route_nhg(self, dvs, dvs_route, testlog): # wait for route to be programmed time.sleep(1) - keys = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP") + keys = self.asic_db.get_keys(self.ASIC_NHG_STR) assert len(keys) == 2 - keys = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) assert len(keys) == 6 @@ -1035,7 +1080,8 @@ def test_route_nhg(self, dvs, dvs_route, testlog): assert len(keys) == 2 - i # bring links up one-by-one - for i in [0, 1, 2]: + # Bring link up in random order to verify sequence id is as per order + for i, val in enumerate([2,1,0]): self.flap_intf(i, 'up') keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) @@ -1045,13 +1091,23 @@ def test_route_nhg(self, dvs, dvs_route, testlog): for k in keys: fvs = self.asic_db.get_entry(self.ASIC_NHGM_STR, k) assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhgid - + if ordered_ecmp == "true": + nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + nh_fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) + assert nhip_seqid_map[nh_fvs["SAI_NEXT_HOP_ATTR_IP"]] == fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + # Remove route 2.2.2.0/24 self.rt_ps._del(rtprefix) # Wait for route 2.2.2.0/24 to be removed dvs_route.check_asicdb_deleted_route_entries([rtprefix]) + # Cleanup by disabling to get default behaviour + if ordered_ecmp == 'true': + self.disble_ordered_ecmp() + def test_label_route_nhg(self, dvs, testlog): self.init_test(dvs, 3) @@ -1875,6 +1931,9 @@ def create_cbf_invalid_nhg_map_test(): time.sleep(1) assert(not self.nhg_exists('cbfgroup3')) + # Cleanup + self.cbf_nhg_ps._del('cbfgroup3') + self.init_test(dvs, 4) mainline_cbf_nhg_test() @@ -1980,7 +2039,7 @@ def data_validation_test(): # Test validation errors nhg_maps = [ ('-1', '0'), # negative FC - ('64', '0'), # greater than max FC value + ('63', '0'), # greater than max FC value ('a', '0'), # non-integer FC ('0', '-1'), # negative NH index ('0', 'a'), # non-integer NH index @@ -2151,6 +2210,42 @@ def create_cbf_nhg_inexistent_map_test(): self.fc_to_nhg_ps._del(nhg_maps.pop()) self.asic_db.wait_for_n_keys(self.ASIC_NHG_MAP_STR, self.asic_nhg_maps_count) + # Test scenario: + # - Create a CBF NHG that has a member which is not yet synced. It shouldn't be synced. + # - Add the missing member and assert the CBF NHG is now synced. + def test_cbf_sync_before_member(self, dvs, testlog): + self.init_test(dvs, 2) + + # Create an FC to NH index selection map + nhg_map = [(str(i), '0' if i < 4 else '1') for i in range(8)] + fvs = swsscommon.FieldValuePairs(nhg_map) + self.fc_to_nhg_ps.set('cbfnhgmap1', fvs) + self.asic_db.wait_for_n_keys(self.ASIC_NHG_MAP_STR, self.asic_nhg_maps_count + 1) + + # Create a non-CBF NHG + fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1,10.0.0.3'), + ('ifname', 'Ethernet0,Ethernet4')]) + self.nhg_ps.set('group1', fvs) + self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 1) + + # Create a CBF NHG with a member that doesn't currently exist. Nothing should happen + fvs = swsscommon.FieldValuePairs([('members', 'group1,group2'), + ('selection_map', 'cbfnhgmap1')]) + self.cbf_nhg_ps.set('cbfgroup1', fvs) + time.sleep(1) + assert(len(self.asic_db.get_keys(self.ASIC_NHG_STR)) == self.asic_nhgs_count + 1) + + # Create the missing non-CBF NHG. This and the CBF NHG should be created. + fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1,10.0.0.3'), + ("ifname", "Ethernet0,Ethernet4")]) + self.nhg_ps.set("group2", fvs) + self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 3) + + # Cleanup + self.nhg_ps._del('cbfgroup1') + self.nhg_ps._del('group1') + self.nhg_ps._del('group2') + self.nhg_ps._del('cbfnhgmap1') # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_nvgre_tunnel.py b/tests/test_nvgre_tunnel.py new file mode 100644 index 00000000000..90fe560141c --- /dev/null +++ b/tests/test_nvgre_tunnel.py @@ -0,0 +1,381 @@ +import time +import json +import random +import time +import pytest + + +from swsscommon import swsscommon +from pprint import pprint + + +NVGRE_TUNNEL = 'NVGRE_TUNNEL' +NVGRE_TUNNEL_MAP = 'NVGRE_TUNNEL_MAP' + + +SAI_OBJECT_TYPE_TUNNEL = 'ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL' +SAI_OBJECT_TYPE_TUNNEL_MAP = 'ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP' +SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY = 'ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY' + + +def create_entry(tbl, key, pairs): + fvs = swsscommon.FieldValuePairs(pairs) + tbl.set(key, fvs) + time.sleep(1) + + +def create_entry_tbl(db, table, separator, key, pairs): + tbl = swsscommon.Table(db, table) + create_entry(tbl, key, pairs) + + +def delete_entry_tbl(db, table, key): + tbl = swsscommon.Table(db, table) + tbl._del(key) + time.sleep(1) + + +def get_all_created_entries(db, table, existed_entries): + tbl = swsscommon.Table(db, table) + entries = set(tbl.getKeys()) + new_entries = list(entries - existed_entries) + assert len(new_entries) >= 0, "DB entries was't created" + new_entries.sort() + return new_entries + + +def get_created_entries(db, table, existed_entries, count): + new_entries = get_all_created_entries(db, table, existed_entries) + assert len(new_entries) == count, "Wrong number of created entries." + return new_entries + + +def get_exist_entries(dvs, table): + db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(db, table) + return set(tbl.getKeys()) + + +def get_created_entry(db, table, existed_entries): + tbl = swsscommon.Table(db, table) + entries = set(tbl.getKeys()) + new_entries = list(entries - existed_entries) + assert len(new_entries) == 1, "Wrong number of created entries." + return new_entries[0] + + +def how_many_entries_exist(db, table): + tbl = swsscommon.Table(db, table) + return len(tbl.getKeys()) + + +def get_lo(dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE') + + entries = tbl.getKeys() + lo_id = None + for entry in entries: + status, fvs = tbl.get(entry) + assert status, "Got an error when get a key" + for key, value in fvs: + if key == 'SAI_ROUTER_INTERFACE_ATTR_TYPE' and value == 'SAI_ROUTER_INTERFACE_TYPE_LOOPBACK': + lo_id = entry + break + else: + assert False, 'Don\'t found loopback id' + + return lo_id + + +def check_object(db, table, key, expected_attributes): + tbl = swsscommon.Table(db, table) + keys = tbl.getKeys() + assert key in keys, "The desired key is not presented" + + status, fvs = tbl.get(key) + assert status, "Got an error when get a key" + + assert len(fvs) == len(expected_attributes), "Unexpected number of attributes" + + attr_keys = {entry[0] for entry in fvs} + + for name, value in fvs: + assert expected_attributes[name] == value, "Wrong value %s for the attribute %s = %s" % \ + (value, name, expected_attributes[name]) + + +loopback_id = 0 + + +class NvgreTunnel(object): + tunnel_ids = set() + tunnel_map_ids = set() + tunnel_map_entry_ids = set() + tunnel_map_map = {} + tunnel = {} + + + def fetch_exist_entries(self, dvs): + self.tunnel_ids = get_exist_entries(dvs, SAI_OBJECT_TYPE_TUNNEL) + self.tunnel_map_ids = get_exist_entries(dvs, SAI_OBJECT_TYPE_TUNNEL_MAP) + self.tunnel_map_entry_ids = get_exist_entries(dvs, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY) + + global loopback_id + if not loopback_id: + loopback_id = get_lo(dvs) + + + def create_nvgre_tunnel(self, dvs, tunnel_name, src_ip): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + create_entry_tbl(conf_db, NVGRE_TUNNEL, '|', tunnel_name, [ ('src_ip', src_ip) ]) + time.sleep(1) + + + def check_nvgre_tunnel(self, dvs, tunnel_name, src_ip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + global loopback_id + + tunnel_id = get_created_entry(asic_db, SAI_OBJECT_TYPE_TUNNEL, self.tunnel_ids) + tunnel_map_ids = get_created_entries(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, self.tunnel_map_ids, 4) + + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP) == (len(self.tunnel_map_ids) + 4), "The TUNNEL_MAP wasn't created" + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY) == len(self.tunnel_map_entry_ids), "The TUNNEL_MAP_ENTRY is created too early" + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL) == (len(self.tunnel_ids) + 1), "The TUNNEL wasn't created" + + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, tunnel_map_ids[0], { 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VLAN_ID_TO_VSID' }) + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, tunnel_map_ids[1], { 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_BRIDGE_IF_TO_VSID' }) + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, tunnel_map_ids[2], { 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VSID_TO_VLAN_ID' }) + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, tunnel_map_ids[3], { 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VSID_TO_BRIDGE_IF' }) + + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL, tunnel_id, + { + 'SAI_TUNNEL_ATTR_TYPE': 'SAI_TUNNEL_TYPE_NVGRE', + 'SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE': loopback_id, + 'SAI_TUNNEL_ATTR_DECAP_MAPPERS': f'2:{tunnel_map_ids[2]},{tunnel_map_ids[3]}', + 'SAI_TUNNEL_ATTR_ENCAP_MAPPERS': f'2:{tunnel_map_ids[0]},{tunnel_map_ids[1]}', + 'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip + } + ) + + self.tunnel_map_ids.update(tunnel_map_ids) + self.tunnel_ids.add(tunnel_id) + self.tunnel_map_map[tunnel_name] = tunnel_map_ids + self.tunnel[tunnel_name] = tunnel_id + + + def check_invalid_nvgre_tunnel(self, dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL) == len(self.tunnel_ids), "Invalid TUNNEL was created" + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP) == len(self.tunnel_map_ids), "Invalid TUNNEL_MAP was created" + + + def remove_nvgre_tunnel(self, dvs, tunnel_name): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + delete_entry_tbl(conf_db, NVGRE_TUNNEL, tunnel_name) + time.sleep(1) + + + def check_remove_nvgre_tunnel(self, dvs, tunnel_name): + self.fetch_exist_entries(dvs) + self.tunnel.pop(tunnel_name, None) + self.tunnel_map_map.pop(tunnel_name, None) + + + def create_nvgre_tunnel_map_entry(self, dvs, tunnel_name, tunnel_map_entry_name, vlan_id, vsid): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + create_entry_tbl( + conf_db, + NVGRE_TUNNEL_MAP, '|', f'{tunnel_name}|{tunnel_map_entry_name}', + [ + ('vsid', vsid), + ('vlan_id', f'Vlan{vlan_id}'), + ], + ) + time.sleep(1) + + + def check_nvgre_tunnel_map_entry(self, dvs, tunnel_name, vlan_id, vsid): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + if (self.tunnel_map_map.get(tunnel_name) is None): + tunnel_map_ids = get_created_entries(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, self.tunnel_map_ids, 4) + else: + tunnel_map_ids = self.tunnel_map_map[tunnel_name] + + tunnel_map_entry_id = get_created_entries(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY, self.tunnel_map_entry_ids, 1) + + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + 1), "The TUNNEL_MAP_ENTRY is created too early" + + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[0], + { + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VSID_TO_VLAN_ID', + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_ids[2], + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VSID_ID_KEY': vsid, + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VLAN_ID_VALUE': vlan_id, + } + ) + + self.tunnel_map_entry_ids.update(tunnel_map_entry_id) + + + def check_invalid_nvgre_tunnel_map_entry(self, dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY) == len(self.tunnel_map_entry_ids), "Invalid TUNNEL_MAP_ENTRY was created" + + + def remove_nvgre_tunnel_map_entry(self, dvs, tunnel_name, tunnel_map_entry_name): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + delete_entry_tbl(conf_db, NVGRE_TUNNEL_MAP, f'{tunnel_name}|{tunnel_map_entry_name}') + time.sleep(1) + + + def check_remove_nvgre_tunnel_map_entry(self, dvs): + self.fetch_exist_entries(dvs) + + +@pytest.mark.usefixtures('dvs_vlan_manager') +class TestNvgreTunnel(object): + + def get_nvgre_tunnel_obj(self): + return NvgreTunnel() + + + def test_nvgre_create_tunnel_map_entry(self, dvs, testlog): + try: + tunnel_name = 'tunnel_1' + tunnel_map_entry_name = 'entry_1' + src_ip = '10.0.0.1' + vlan_id = '500' + vsid = '850' + + nvgre_obj = self.get_nvgre_tunnel_obj() + nvgre_obj.fetch_exist_entries(dvs) + + self.dvs_vlan.create_vlan(vlan_id) + + nvgre_obj.create_nvgre_tunnel(dvs, tunnel_name, src_ip) + nvgre_obj.check_nvgre_tunnel(dvs, tunnel_name, src_ip) + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name, tunnel_map_entry_name, vlan_id, vsid) + nvgre_obj.check_nvgre_tunnel_map_entry(dvs, tunnel_name, vlan_id, vsid) + finally: + nvgre_obj.remove_nvgre_tunnel_map_entry(dvs, tunnel_name, tunnel_map_entry_name) + nvgre_obj.check_remove_nvgre_tunnel_map_entry(dvs) + + nvgre_obj.remove_nvgre_tunnel(dvs, tunnel_name) + nvgre_obj.check_remove_nvgre_tunnel(dvs, tunnel_name) + + self.dvs_vlan.remove_vlan(vlan_id) + + + def test_multiple_nvgre_tunnels_entries(self, dvs, testlog): + try: + tunnel_name_1 = 'tunnel_1' + tunnel_name_2 = 'tunnel_2' + tunnel_name_3 = 'tunnel_3' + entry_1 = 'entry_1' + entry_2 = 'entry_2' + entry_3 = 'entry_3' + entry_4 = 'entry_4' + + nvgre_obj = self.get_nvgre_tunnel_obj() + nvgre_obj.fetch_exist_entries(dvs) + + self.dvs_vlan.create_vlan('501') + self.dvs_vlan.create_vlan('502') + self.dvs_vlan.create_vlan('503') + self.dvs_vlan.create_vlan('504') + + nvgre_obj.create_nvgre_tunnel(dvs, tunnel_name_1, '10.0.0.1') + nvgre_obj.check_nvgre_tunnel(dvs, tunnel_name_1, '10.0.0.1') + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name_1, entry_1, '501', '801') + nvgre_obj.check_nvgre_tunnel_map_entry(dvs, tunnel_name_1, '501', '801') + + nvgre_obj.create_nvgre_tunnel(dvs, tunnel_name_2, '10.0.0.2') + nvgre_obj.check_nvgre_tunnel(dvs, tunnel_name_2, '10.0.0.2') + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name_2, entry_2, '502', '802') + nvgre_obj.check_nvgre_tunnel_map_entry(dvs, tunnel_name_2, '502', '802') + + nvgre_obj.create_nvgre_tunnel(dvs, tunnel_name_3, '10.0.0.3') + nvgre_obj.check_nvgre_tunnel(dvs, tunnel_name_3, '10.0.0.3') + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name_3, entry_3, '503', '803') + nvgre_obj.check_nvgre_tunnel_map_entry(dvs, tunnel_name_3, '503', '803') + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name_3, entry_4, '504', '804') + nvgre_obj.check_nvgre_tunnel_map_entry(dvs, tunnel_name_3, '504', '804') + finally: + nvgre_obj.remove_nvgre_tunnel_map_entry(dvs, tunnel_name_1, entry_1) + nvgre_obj.check_remove_nvgre_tunnel_map_entry(dvs) + + nvgre_obj.remove_nvgre_tunnel(dvs, tunnel_name_1) + nvgre_obj.check_remove_nvgre_tunnel(dvs, tunnel_name_1) + + nvgre_obj.remove_nvgre_tunnel_map_entry(dvs, tunnel_name_2, entry_2) + nvgre_obj.check_remove_nvgre_tunnel_map_entry(dvs) + + nvgre_obj.remove_nvgre_tunnel(dvs, tunnel_name_2) + nvgre_obj.check_remove_nvgre_tunnel(dvs, tunnel_name_2) + + nvgre_obj.remove_nvgre_tunnel_map_entry(dvs, tunnel_name_3, entry_3) + nvgre_obj.check_remove_nvgre_tunnel_map_entry(dvs) + + nvgre_obj.remove_nvgre_tunnel_map_entry(dvs, tunnel_name_3, entry_4) + nvgre_obj.check_remove_nvgre_tunnel_map_entry(dvs) + + nvgre_obj.remove_nvgre_tunnel(dvs, tunnel_name_3) + nvgre_obj.check_remove_nvgre_tunnel(dvs, tunnel_name_3) + + self.dvs_vlan.remove_vlan('501') + self.dvs_vlan.remove_vlan('502') + self.dvs_vlan.remove_vlan('503') + self.dvs_vlan.remove_vlan('504') + + + def test_invalid_nvgre_tunnel(self, dvs, testlog): + nvgre_obj = self.get_nvgre_tunnel_obj() + nvgre_obj.fetch_exist_entries(dvs) + + nvgre_obj.create_nvgre_tunnel(dvs, 'tunnel_1', '1111.1111.1111.1111') + nvgre_obj.check_invalid_nvgre_tunnel(dvs) + + + def test_invalid_nvgre_tunnel_map_entry(self, dvs, testlog): + try: + tunnel_name = 'tunnel_1' + tunnel_map_entry_name = 'entry_1' + src_ip = '10.0.0.1' + vlan_id = '500' + vsid = 'INVALID' + + nvgre_obj = self.get_nvgre_tunnel_obj() + nvgre_obj.fetch_exist_entries(dvs) + + self.dvs_vlan.create_vlan(vlan_id) + + nvgre_obj.create_nvgre_tunnel(dvs, tunnel_name, src_ip) + nvgre_obj.check_nvgre_tunnel(dvs, tunnel_name, src_ip) + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name, tunnel_map_entry_name, vlan_id, vsid) + nvgre_obj.check_invalid_nvgre_tunnel_map_entry(dvs) + finally: + nvgre_obj.remove_nvgre_tunnel(dvs, tunnel_name) + nvgre_obj.check_remove_nvgre_tunnel(dvs, tunnel_name) + + self.dvs_vlan.remove_vlan(vlan_id) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_pfcwd.py b/tests/test_pfcwd.py index c569bc8a43b..78cd8515741 100644 --- a/tests/test_pfcwd.py +++ b/tests/test_pfcwd.py @@ -77,6 +77,222 @@ def test_PfcWdAclCreationDeletion(self, dvs, dvs_acl, testlog): finally: dvs_acl.remove_acl_table(PFCWD_TABLE_NAME) + + +class TestPfcwdFunc(object): + @pytest.fixture + def setup_teardown_test(self, dvs): + self.get_db_handle(dvs) + + self.test_ports = ["Ethernet0"] + + self.setup_test(dvs) + self.get_port_oids() + self.get_queue_oids() + + yield + + self.teardown_test(dvs) + + def setup_test(self, dvs): + # get original cable len for test ports + fvs = self.config_db.get_entry("CABLE_LENGTH", "AZURE") + self.orig_cable_len = dict() + for port in self.test_ports: + self.orig_cable_len[port] = fvs[port] + # set cable len to non zero value. if port is down, default cable len is 0 + self.set_cable_len(port, "5m") + # startup port + dvs.runcmd("config interface startup {}".format(port)) + + # enable pfcwd + self.set_flex_counter_status("PFCWD", "enable") + # enable queue so that queue oids are generated + self.set_flex_counter_status("QUEUE", "enable") + + def teardown_test(self, dvs): + # disable pfcwd + self.set_flex_counter_status("PFCWD", "disable") + # disable queue + self.set_flex_counter_status("QUEUE", "disable") + + for port in self.test_ports: + if self.orig_cable_len: + self.set_cable_len(port, self.orig_cable_len[port]) + # shutdown port + dvs.runcmd("config interface shutdown {}".format(port)) + + def get_db_handle(self, dvs): + self.app_db = dvs.get_app_db() + self.asic_db = dvs.get_asic_db() + self.config_db = dvs.get_config_db() + self.counters_db = dvs.get_counters_db() + + def set_flex_counter_status(self, key, state): + fvs = {'FLEX_COUNTER_STATUS': state} + self.config_db.update_entry("FLEX_COUNTER_TABLE", key, fvs) + time.sleep(1) + + def get_queue_oids(self): + self.queue_oids = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") + + def get_port_oids(self): + self.port_oids = self.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "") + + def _get_bitmask(self, queues): + mask = 0 + if queues is not None: + for queue in queues: + mask = mask | 1 << queue + + return str(mask) + + def set_ports_pfc(self, status='enable', pfc_queues=[3,4]): + for port in self.test_ports: + if 'enable' in status: + fvs = {'pfc_enable': ",".join([str(q) for q in pfc_queues])} + self.config_db.create_entry("PORT_QOS_MAP", port, fvs) + else: + self.config_db.delete_entry("PORT_QOS_MAP", port) + + def set_cable_len(self, port_name, cable_len): + fvs = {port_name: cable_len} + self.config_db.update_entry("CABLE_LEN", "AZURE", fvs) + + def start_pfcwd_on_ports(self, poll_interval="200", detection_time="200", restoration_time="200", action="drop"): + pfcwd_info = {"POLL_INTERVAL": poll_interval} + self.config_db.update_entry("PFC_WD", "GLOBAL", pfcwd_info) + + pfcwd_info = {"action": action, + "detection_time" : detection_time, + "restoration_time": restoration_time + } + for port in self.test_ports: + self.config_db.update_entry("PFC_WD", port, pfcwd_info) + + def stop_pfcwd_on_ports(self): + for port in self.test_ports: + self.config_db.delete_entry("PFC_WD", port) + + def verify_ports_pfc(self, queues=None): + mask = self._get_bitmask(queues) + fvs = {"SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL" : mask} + for port in self.test_ports: + self.asic_db.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", self.port_oids[port], fvs) + + def verify_pfcwd_state(self, queues, state="stormed"): + fvs = {"PFC_WD_STATUS": state} + for port in self.test_ports: + for queue in queues: + queue_name = port + ":" + str(queue) + self.counters_db.wait_for_field_match("COUNTERS", self.queue_oids[queue_name], fvs) + + def verify_pfcwd_counters(self, queues, restore="0"): + fvs = {"PFC_WD_QUEUE_STATS_DEADLOCK_DETECTED" : "1", + "PFC_WD_QUEUE_STATS_DEADLOCK_RESTORED" : restore + } + for port in self.test_ports: + for queue in queues: + queue_name = port + ":" + str(queue) + self.counters_db.wait_for_field_match("COUNTERS", self.queue_oids[queue_name], fvs) + + def reset_pfcwd_counters(self, queues): + fvs = {"PFC_WD_QUEUE_STATS_DEADLOCK_DETECTED" : "0", + "PFC_WD_QUEUE_STATS_DEADLOCK_RESTORED" : "0" + } + for port in self.test_ports: + for queue in queues: + queue_name = port + ":" + str(queue) + self.counters_db.update_entry("COUNTERS", self.queue_oids[queue_name], fvs) + + def set_storm_state(self, queues, state="enabled"): + fvs = {"DEBUG_STORM": state} + for port in self.test_ports: + for queue in queues: + queue_name = port + ":" + str(queue) + self.counters_db.update_entry("COUNTERS", self.queue_oids[queue_name], fvs) + + def test_pfcwd_single_queue(self, dvs, setup_teardown_test): + try: + # enable PFC on queues + test_queues = [3, 4] + self.set_ports_pfc(pfc_queues=test_queues) + + # verify in asic db + self.verify_ports_pfc(test_queues) + + # start pfcwd + self.start_pfcwd_on_ports() + + # start pfc storm + storm_queue = [3] + self.set_storm_state(storm_queue) + + # verify pfcwd is triggered + self.verify_pfcwd_state(storm_queue) + + # verify pfcwd counters + self.verify_pfcwd_counters(storm_queue) + + # verify if queue is disabled + self.verify_ports_pfc(queues=[4]) + + # stop storm + self.set_storm_state(storm_queue, state="disabled") + + # verify pfcwd state is restored + self.verify_pfcwd_state(storm_queue, state="operational") + + # verify pfcwd counters + self.verify_pfcwd_counters(storm_queue, restore="1") + + # verify if queue is enabled + self.verify_ports_pfc(test_queues) + + finally: + self.reset_pfcwd_counters(storm_queue) + self.stop_pfcwd_on_ports() + + def test_pfcwd_multi_queue(self, dvs, setup_teardown_test): + try: + # enable PFC on queues + test_queues = [3, 4] + self.set_ports_pfc(pfc_queues=test_queues) + + # verify in asic db + self.verify_ports_pfc(test_queues) + + # start pfcwd + self.start_pfcwd_on_ports() + + # start pfc storm + self.set_storm_state(test_queues) + + # verify pfcwd is triggered + self.verify_pfcwd_state(test_queues) + + # verify pfcwd counters + self.verify_pfcwd_counters(test_queues) + + # verify if queue is disabled. Expected mask is 0 + self.verify_ports_pfc() + + # stop storm + self.set_storm_state(test_queues, state="disabled") + + # verify pfcwd state is restored + self.verify_pfcwd_state(test_queues, state="operational") + + # verify pfcwd counters + self.verify_pfcwd_counters(test_queues, restore="1") + + # verify if queue is enabled + self.verify_ports_pfc(test_queues) + + finally: + self.reset_pfcwd_counters(test_queues) + self.stop_pfcwd_on_ports() + # # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_qos_map.py b/tests/test_qos_map.py index 21a25742c95..301bd3c6d62 100644 --- a/tests/test_qos_map.py +++ b/tests/test_qos_map.py @@ -139,7 +139,7 @@ def test_dscp_to_fc(self, dvs): self.init_test(dvs) # Create a DSCP_TO_FC map - dscp_map = [(str(i), str(i)) for i in range(0, 64)] + dscp_map = [(str(i), str(i)) for i in range(0, 63)] self.dscp_ps.set("AZURE", swsscommon.FieldValuePairs(dscp_map)) self.asic_db.wait_for_n_keys(self.ASIC_QOS_MAP_STR, self.asic_qos_map_count + 1) @@ -153,7 +153,7 @@ def test_dscp_to_fc(self, dvs): assert(fvs.get("SAI_QOS_MAP_ATTR_TYPE") == "SAI_QOS_MAP_TYPE_DSCP_TO_FORWARDING_CLASS") # Modify the map - dscp_map = [(str(i), '0') for i in range(0, 64)] + dscp_map = [(str(i), '0') for i in range(0, 63)] self.dscp_ps.set("AZURE", swsscommon.FieldValuePairs(dscp_map)) time.sleep(1) @@ -174,7 +174,7 @@ def test_dscp_to_fc(self, dvs): ('-1', '0'), # negative DSCP ('64', '0'), # DSCP greater than max value ('0', '-1'), # negative FC - ('0', '64'), # FC greater than max value + ('0', '63'), # FC greater than max value ('a', '0'), # non-integer DSCP ('0', 'a'), # non-integet FC ] @@ -228,7 +228,7 @@ def test_exp_to_fc(self, dvs): ('-1', '0'), # negative EXP ('8', '0'), # EXP greater than max value ('0', '-1'), # negative FC - ('0', '64'), # FC greater than max value + ('0', '63'), # FC greater than max value ('a', '0'), # non-integer EXP ('0', 'a'), # non-integet FC ] @@ -258,7 +258,7 @@ def test_per_port_cbf_binding(self, dvs): self.init_test(dvs) # Create a DSCP_TO_FC map - dscp_map = [(str(i), str(i)) for i in range(0, 64)] + dscp_map = [(str(i), str(i)) for i in range(0, 63)] self.dscp_ps.set("AZURE", swsscommon.FieldValuePairs(dscp_map)) self.asic_db.wait_for_n_keys(self.ASIC_QOS_MAP_STR, self.asic_qos_map_count + 1) dscp_map_id = self.get_qos_id() diff --git a/tests/test_route.py b/tests/test_route.py index bae9865a657..9c56ef52a83 100644 --- a/tests/test_route.py +++ b/tests/test_route.py @@ -12,6 +12,7 @@ def setup_db(self, dvs): self.pdb = dvs.get_app_db() self.adb = dvs.get_asic_db() self.cdb = dvs.get_config_db() + self.sdb = dvs.get_state_db() def set_admin_status(self, interface, status): self.cdb.update_entry("PORT", interface, {"admin_status": status}) @@ -62,6 +63,23 @@ def _access_function(): wait_for_result(_access_function) + def check_route_state(self, prefix, value): + found = False + + route_entries = self.sdb.get_keys("ROUTE_TABLE") + for key in route_entries: + if key != prefix: + continue + found = True + fvs = self.sdb.get_entry("ROUTE_TABLE", key) + + assert fvs != {} + + for f,v in fvs.items(): + if f == "state": + assert v == value + assert found + def get_asic_db_key(self, destination): route_entries = self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") for route_entry in route_entries: @@ -123,6 +141,9 @@ def test_RouteAddRemoveIpv4Route(self, dvs, testlog): self.create_l3_intf("Ethernet0", "") self.create_l3_intf("Ethernet4", "") + # check STATE route database, initial state shall be "na" + self.check_route_state("0.0.0.0/0", "na") + # set ip address self.add_ip_address("Ethernet0", "10.0.0.0/31") self.add_ip_address("Ethernet4", "10.0.0.2/31") @@ -144,15 +165,25 @@ def test_RouteAddRemoveIpv4Route(self, dvs, testlog): # add route entry dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 2.2.2.0/24 10.0.0.1\"") + # add default route entry + fieldValues = {"nexthop": "10.0.0.1", "ifname": "Ethernet0"} + self.create_route_entry("0.0.0.0/0", fieldValues) + # check application database self.pdb.wait_for_entry("ROUTE_TABLE", "2.2.2.0/24") # check ASIC route database self.check_route_entries(["2.2.2.0/24"]) + # check STATE route database + self.check_route_state("0.0.0.0/0", "ok") + # remove route entry dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 2.2.2.0/24 10.0.0.1\"") + # remove default route entry + self.remove_route_entry("0.0.0.0/0") + # check application database self.pdb.wait_for_deleted_entry("ROUTE_TABLE", "2.2.2.0/24") @@ -170,6 +201,9 @@ def test_RouteAddRemoveIpv4Route(self, dvs, testlog): self.set_admin_status("Ethernet0", "down") self.set_admin_status("Ethernet4", "down") + # check STATE route database, state set to "na" after deleting the default route + self.check_route_state("0.0.0.0/0", "na") + # remove ip address and default route dvs.servers[0].runcmd("ip route del default dev eth0") dvs.servers[0].runcmd("ip address del 10.0.0.1/31 dev eth0") @@ -184,6 +218,9 @@ def test_RouteAddRemoveIpv6Route(self, dvs, testlog): self.create_l3_intf("Ethernet0", "") self.create_l3_intf("Ethernet4", "") + # check STATE route database, initial state shall be "na" + self.check_route_state("::/0", "na") + # bring up interface self.set_admin_status("Ethernet0", "up") self.set_admin_status("Ethernet4", "up") @@ -207,15 +244,25 @@ def test_RouteAddRemoveIpv6Route(self, dvs, testlog): # add route entry dvs.runcmd("vtysh -c \"configure terminal\" -c \"ipv6 route 3000::0/64 2000::2\"") + # add default route entry + fieldValues = {"nexthop": "2000::2", "ifname": "Ethernet0"} + self.create_route_entry("::/0", fieldValues) + # check application database self.pdb.wait_for_entry("ROUTE_TABLE", "3000::/64") # check ASIC route database self.check_route_entries(["3000::/64"]) + # check STATE route database + self.check_route_state("::/0", "ok") + # remove route entry dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ipv6 route 3000::0/64 2000::2\"") + # remove default route entry + self.remove_route_entry("::/0") + # check application database self.pdb.wait_for_deleted_entry("ROUTE_TABLE", "3000::/64") @@ -233,6 +280,9 @@ def test_RouteAddRemoveIpv6Route(self, dvs, testlog): self.set_admin_status("Ethernet0", "down") self.set_admin_status("Ethernet4", "down") + # check STATE route database, state set to "na" after deleting the default route + self.check_route_state("::/0", "na") + # remove ip address and default route dvs.servers[0].runcmd("ip -6 route del default dev eth0") dvs.servers[0].runcmd("ip -6 address del 2000::2/64 dev eth0") diff --git a/tests/test_virtual_chassis.py b/tests/test_virtual_chassis.py index a963a55f236..9f4d6ddedb9 100644 --- a/tests/test_virtual_chassis.py +++ b/tests/test_virtual_chassis.py @@ -1,4 +1,3 @@ -import pytest from swsscommon import swsscommon from dvslib.dvs_database import DVSDatabase import ast @@ -136,7 +135,6 @@ def test_voq_switch(self, vct): spcfg = ast.literal_eval(value) assert spcfg['count'] == sp_count, "Number of systems ports configured is invalid" - @pytest.mark.skip(reason="This test is not stable enough") def test_chassis_app_db_sync(self, vct): """Test chassis app db syncing. @@ -213,7 +211,6 @@ def test_chassis_system_interface(self, vct): # Remote system ports's switch id should not match local switch id assert spcfginfo["attached_switch_id"] != lc_switch_id, "RIF system port with wrong switch_id" - @pytest.mark.skip(reason="This test is not stable enough") def test_chassis_system_neigh(self, vct): """Test neigh record create/delete and syncing to chassis app db. @@ -384,11 +381,6 @@ def test_chassis_system_neigh(self, vct): assert mac == test_neigh_mac, "Encap index of remote neigh mismatch with allocated encap index" # Check for other mandatory attributes - # For remote neighbor, encap index must be imposed. So impose_index must be "true" - impose_index = remote_neigh_entry.get("SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_IMPOSE_INDEX") - assert impose_index != "", "Impose index attribute is not programmed for remote neigh in ASIC_DB" - assert impose_index == "true", "Impose index attribute is false for remote neigh" - # For remote neighbors, is_local must be "false" is_local = remote_neigh_entry.get("SAI_NEIGHBOR_ENTRY_ATTR_IS_LOCAL") assert is_local != "", "is_local attribute is not programmed for remote neigh in ASIC_DB" @@ -470,7 +462,6 @@ def test_chassis_system_neigh(self, vct): # Cleanup inband if configuration self.del_inbandif_port(vct, inband_port) - @pytest.mark.skip(reason="This test is not stable enough") def test_chassis_system_lag(self, vct): """Test PortChannel in VOQ based chassis systems. @@ -607,7 +598,6 @@ def test_chassis_system_lag(self, vct): break - @pytest.mark.skip(reason="This test is not stable enough") def test_chassis_system_lag_id_allocator_table_full(self, vct): """Test lag id allocator table full. @@ -685,7 +675,6 @@ def test_chassis_system_lag_id_allocator_table_full(self, vct): break - @pytest.mark.skip(reason="This test is not stable enough") def test_chassis_system_lag_id_allocator_del_id(self, vct): """Test lag id allocator's release id and re-use id processing. diff --git a/tests/test_vnet.py b/tests/test_vnet.py index a41f9ee39f7..41217de92e3 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -314,7 +314,7 @@ def delete_phy_interface(dvs, ifname, ipaddr): time.sleep(2) -def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope=""): +def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope="", advertise_prefix=False): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) @@ -327,6 +327,9 @@ def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope=""): if scope: attrs.append(('scope', scope)) + if advertise_prefix: + attrs.append(('advertise_prefix', 'true')) + # create the VXLAN tunnel Term entry in Config DB create_entry_tbl( conf_db, @@ -483,6 +486,23 @@ def check_remove_state_db_routes(dvs, vnet, prefix): assert vnet + '|' + prefix not in keys + +def check_routes_advertisement(dvs, prefix): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "ADVERTISE_NETWORK_TABLE") + keys = tbl.getKeys() + + assert prefix in keys + + +def check_remove_routes_advertisement(dvs, prefix): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "ADVERTISE_NETWORK_TABLE") + keys = tbl.getKeys() + + assert prefix not in keys + + loopback_id = 0 def_vr_id = 0 switch_mac = None @@ -503,16 +523,17 @@ class VnetVxlanVrfTunnel(object): ASIC_NEXT_HOP_GROUP_MEMBER = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" ASIC_BFD_SESSION = "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION" - tunnel_map_ids = set() - tunnel_map_entry_ids = set() - tunnel_ids = set() - tunnel_term_ids = set() - tunnel_map_map = {} - tunnel = {} - vnet_vr_ids = set() - vr_map = {} - nh_ids = {} - nhg_ids = {} + def __init__(self): + self.tunnel_map_ids = set() + self.tunnel_map_entry_ids = set() + self.tunnel_ids = set() + self.tunnel_term_ids = set() + self.tunnel_map_map = {} + self.tunnel = {} + self.vnet_vr_ids = set() + self.vr_map = {} + self.nh_ids = {} + self.nhg_ids = {} def fetch_exist_entries(self, dvs): self.vnet_vr_ids = get_exist_entries(dvs, self.ASIC_VRF_TABLE) @@ -798,7 +819,7 @@ def serialize_endpoint_group(self, endpoints): endpoints.sort() return ",".join(endpoints) - def check_next_hop_group_member(self, dvs, nhg, expected_endpoint, expected_attrs): + def check_next_hop_group_member(self, dvs, nhg, ordered_ecmp, expected_endpoint, expected_attrs): expected_endpoint_str = self.serialize_endpoint_group(expected_endpoint) asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) tbl_nhgm = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP_GROUP_MEMBER) @@ -817,11 +838,17 @@ def check_next_hop_group_member(self, dvs, nhg, expected_endpoint, expected_attr endpoint = nh_fvs["SAI_NEXT_HOP_ATTR_IP"] endpoints.append(endpoint) assert endpoint in expected_attrs + if ordered_ecmp == "true": + assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] == expected_attrs[endpoint]['SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID'] + del expected_attrs[endpoint]['SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID'] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + check_object(asic_db, self.ASIC_NEXT_HOP, nh_key, expected_attrs[endpoint]) assert self.serialize_endpoint_group(endpoints) == expected_endpoint_str - def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], route_ids=[], nhg=""): + def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], route_ids=[], nhg="", ordered_ecmp="false", nh_seq_id=None): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) endpoint_str = name + "|" + self.serialize_endpoint_group(endpoints) @@ -839,6 +866,8 @@ def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], r expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni[idx]}) if mac and mac[idx]: expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac[idx]}) + if ordered_ecmp == "true" and nh_seq_id: + expected_attr.update({'SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID': nh_seq_id[idx]}) expected_attrs[endpoint] = expected_attr if nhg: @@ -853,12 +882,12 @@ def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], r # Check routes in ingress VRF expected_nhg_attr = { - "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP", + "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP" if ordered_ecmp == "false" else "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP", } check_object(asic_db, self.ASIC_NEXT_HOP_GROUP, new_nhg, expected_nhg_attr) # Check nexthop group member - self.check_next_hop_group_member(dvs, new_nhg, endpoints, expected_attrs) + self.check_next_hop_group_member(dvs, new_nhg, ordered_ecmp, endpoints, expected_attrs) if route_ids: new_route = route_ids @@ -901,6 +930,32 @@ class TestVnetOrch(object): def get_vnet_obj(self): return VnetVxlanVrfTunnel() + @pytest.fixture(params=["true", "false"]) + def ordered_ecmp(self, dvs, request): + + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + if request.param == "true": + create_entry_pst( + app_db, + "SWITCH_TABLE", ':', "switch", + [ + ('ordered_ecmp', 'true') + ], + ) + dvs.get_state_db().wait_for_field_match("SWITCH_CAPABILITY", "switch", {"ORDERED_ECMP_CAPABLE": "true"}) + + yield request.param + + if request.param == "true": + create_entry_pst( + app_db, + "SWITCH_TABLE", ':', "switch", + [ + ('ordered_ecmp', 'false') + ], + ) + dvs.get_state_db().wait_for_field_match("SWITCH_CAPABILITY", "switch", {"ORDERED_ECMP_CAPABLE": "false"}) + ''' Test 1 - Create Vlan Interface, Tunnel and Vnet ''' @@ -929,6 +984,8 @@ def test_vnet_orch_1(self, dvs, testlog): create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet_2000', '10.10.10.1') vnet_obj.check_vnet_routes(dvs, 'Vnet_2000', '10.10.10.1', tunnel_name) check_state_db_routes(dvs, 'Vnet_2000', "100.100.1.1/32", ['10.10.10.1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") create_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet_2000', 'Vlan100') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2000') @@ -950,6 +1007,8 @@ def test_vnet_orch_1(self, dvs, testlog): create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet_2001', '10.10.10.2', "00:12:34:56:78:9A") vnet_obj.check_vnet_routes(dvs, 'Vnet_2001', '10.10.10.2', tunnel_name, "00:12:34:56:78:9A") check_state_db_routes(dvs, 'Vnet_2001', "100.100.2.1/32", ['10.10.10.2']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") create_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet_2001', 'Ethernet4') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2001') @@ -968,10 +1027,12 @@ def test_vnet_orch_1(self, dvs, testlog): delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet_2001') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2001') check_remove_state_db_routes(dvs, 'Vnet_2001', "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet_2000') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2000') check_remove_state_db_routes(dvs, 'Vnet_2000', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") delete_phy_interface(dvs, "Ethernet4", "100.102.1.1/24") vnet_obj.check_del_router_interface(dvs, "Ethernet4") @@ -1013,21 +1074,28 @@ def test_vnet_orch_2(self, dvs, testlog): create_vnet_routes(dvs, "1.1.1.10/32", 'Vnet_1', '100.1.1.10') vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '100.1.1.10', tunnel_name) check_state_db_routes(dvs, 'Vnet_1', "1.1.1.10/32", ['100.1.1.10']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "1.1.1.10/32") vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "1.1.1.11/32", 'Vnet_1', '100.1.1.10') vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '100.1.1.10', tunnel_name) check_state_db_routes(dvs, 'Vnet_1', "1.1.1.11/32", ['100.1.1.10']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "1.1.1.11/32") vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "1.1.1.12/32", 'Vnet_1', '200.200.1.200') vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '200.200.1.200', tunnel_name) check_state_db_routes(dvs, 'Vnet_1', "1.1.1.12/32", ['200.200.1.200']) + check_remove_routes_advertisement(dvs, "1.1.1.12/32") vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "1.1.1.14/32", 'Vnet_1', '200.200.1.201') vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '200.200.1.201', tunnel_name) check_state_db_routes(dvs, 'Vnet_1', "1.1.1.14/32", ['200.200.1.201']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "1.1.1.14/32") create_vnet_local_routes(dvs, "1.1.10.0/24", 'Vnet_1', 'Vlan1001') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_1') @@ -1044,11 +1112,15 @@ def test_vnet_orch_2(self, dvs, testlog): create_vnet_routes(dvs, "2.2.2.10/32", 'Vnet_2', '100.1.1.20') vnet_obj.check_vnet_routes(dvs, 'Vnet_2', '100.1.1.20', tunnel_name) check_state_db_routes(dvs, 'Vnet_2', "2.2.2.10/32", ['100.1.1.20']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "2.2.2.10/32") vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "2.2.2.11/32", 'Vnet_2', '100.1.1.20') vnet_obj.check_vnet_routes(dvs, 'Vnet_2', '100.1.1.20', tunnel_name) check_state_db_routes(dvs, 'Vnet_2', "2.2.2.11/32", ['100.1.1.20']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "2.2.2.11/32") create_vnet_local_routes(dvs, "2.2.10.0/24", 'Vnet_2', 'Vlan1002') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2') @@ -1064,26 +1136,32 @@ def test_vnet_orch_2(self, dvs, testlog): delete_vnet_routes(dvs, "2.2.2.11/32", 'Vnet_2') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2') check_remove_state_db_routes(dvs, 'Vnet_2', "2.2.2.11/32") + check_remove_routes_advertisement(dvs, "2.2.2.11/32") delete_vnet_routes(dvs, "2.2.2.10/32", 'Vnet_2') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2') check_remove_state_db_routes(dvs, 'Vnet_2', "2.2.2.10/32") + check_remove_routes_advertisement(dvs, "2.2.2.10/32") delete_vnet_routes(dvs, "1.1.1.14/32", 'Vnet_1') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.14/32") + check_remove_routes_advertisement(dvs, "1.1.1.14/32") delete_vnet_routes(dvs, "1.1.1.12/32", 'Vnet_1') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.12/32") + check_remove_routes_advertisement(dvs, "1.1.1.12/32") delete_vnet_routes(dvs, "1.1.1.11/32", 'Vnet_1') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.11/32") + check_remove_routes_advertisement(dvs, "1.1.1.11/32") delete_vnet_routes(dvs, "1.1.1.10/32", 'Vnet_1') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.10/32") + check_remove_routes_advertisement(dvs, "1.1.1.10/32") delete_vlan_interface(dvs, "Vlan1002", "2.2.10.1/24") vnet_obj.check_del_router_interface(dvs, "Vlan1002") @@ -1131,11 +1209,15 @@ def test_vnet_orch_3(self, dvs, testlog): create_vnet_routes(dvs, "5.5.5.10/32", 'Vnet_10', '50.1.1.10') vnet_obj.check_vnet_routes(dvs, 'Vnet_10', '50.1.1.10', tunnel_name) check_state_db_routes(dvs, 'Vnet_10', "5.5.5.10/32", ['50.1.1.10']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "5.5.5.10/32") vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "8.8.8.10/32", 'Vnet_20', '80.1.1.20') vnet_obj.check_vnet_routes(dvs, 'Vnet_10', '80.1.1.20', tunnel_name) check_state_db_routes(dvs, 'Vnet_20', "8.8.8.10/32", ['80.1.1.20']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "8.8.8.10/32") create_vnet_local_routes(dvs, "5.5.10.0/24", 'Vnet_10', 'Vlan2001') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_10') @@ -1154,10 +1236,12 @@ def test_vnet_orch_3(self, dvs, testlog): delete_vnet_routes(dvs, "5.5.5.10/32", 'Vnet_10') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_10') check_remove_state_db_routes(dvs, 'Vnet_10', "5.5.5.10/32") + check_remove_routes_advertisement(dvs, "5.5.5.10/32") delete_vnet_routes(dvs, "8.8.8.10/32", 'Vnet_20') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_20') check_remove_state_db_routes(dvs, 'Vnet_20', "8.8.8.10/32") + check_remove_routes_advertisement(dvs, "8.8.8.10/32") delete_vlan_interface(dvs, "Vlan2001", "5.5.10.1/24") vnet_obj.check_del_router_interface(dvs, "Vlan2001") @@ -1198,10 +1282,14 @@ def test_vnet_orch_4(self, dvs, testlog): create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000') vnet_obj.check_vnet_routes(dvs, 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000', tunnel_name) check_state_db_routes(dvs, 'Vnet3001', "100.100.1.1/32", ['2000:1000:2000:3000:4000:5000:6000:7000']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") create_vnet_routes(dvs, "100.100.1.2/32", 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000') vnet_obj.check_vnet_routes(dvs, 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000', tunnel_name) check_state_db_routes(dvs, 'Vnet3001', "100.100.1.2/32", ['2000:1000:2000:3000:4000:5000:6000:7000']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.2/32") create_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet3001', 'Vlan300') vnet_obj.check_vnet_local_routes(dvs, 'Vnet3001') @@ -1222,6 +1310,8 @@ def test_vnet_orch_4(self, dvs, testlog): create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet3002', 'fd:2::34', "00:12:34:56:78:9A") vnet_obj.check_vnet_routes(dvs, 'Vnet3002', 'fd:2::34', tunnel_name, "00:12:34:56:78:9A") check_state_db_routes(dvs, 'Vnet3002', "100.100.2.1/32", ['fd:2::34']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") create_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet3002', 'Ethernet60') vnet_obj.check_vnet_local_routes(dvs, 'Vnet3002') @@ -1240,20 +1330,26 @@ def test_vnet_orch_4(self, dvs, testlog): create_vnet_routes(dvs, "5.5.5.10/32", 'Vnet3003', 'fd:2::35') vnet_obj.check_vnet_routes(dvs, 'Vnet3004', 'fd:2::35', tunnel_name) check_state_db_routes(dvs, 'Vnet3003', "5.5.5.10/32", ['fd:2::35']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "5.5.5.10/32") create_vnet_routes(dvs, "8.8.8.10/32", 'Vnet3004', 'fd:2::36') vnet_obj.check_vnet_routes(dvs, 'Vnet3003', 'fd:2::36', tunnel_name) check_state_db_routes(dvs, 'Vnet3004', "8.8.8.10/32", ['fd:2::36']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "8.8.8.10/32") # Clean-up and verify remove flows delete_vnet_routes(dvs, "5.5.5.10/32", 'Vnet3003') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3003') check_remove_state_db_routes(dvs, 'Vnet3003', "5.5.5.10/32") + check_remove_routes_advertisement(dvs, "5.5.5.10/32") delete_vnet_routes(dvs, "8.8.8.10/32", 'Vnet3004') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3004') check_remove_state_db_routes(dvs, 'Vnet3004', "8.8.8.10/32") + check_remove_routes_advertisement(dvs, "8.8.8.10/32") delete_vnet_entry(dvs, 'Vnet3003') vnet_obj.check_del_vnet_entry(dvs, 'Vnet3003') @@ -1264,6 +1360,7 @@ def test_vnet_orch_4(self, dvs, testlog): delete_vnet_routes(dvs, "100.100.2.1/24", 'Vnet3002') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3002') check_remove_state_db_routes(dvs, 'Vnet3002', "100.100.2.1/24") + check_remove_routes_advertisement(dvs, "100.100.2.1/24") delete_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet3002') vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet3002') @@ -1283,10 +1380,12 @@ def test_vnet_orch_4(self, dvs, testlog): delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet3001') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3001') check_remove_state_db_routes(dvs, 'Vnet3001', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") delete_vnet_routes(dvs, "100.100.1.2/32", 'Vnet3001') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3001') check_remove_state_db_routes(dvs, 'Vnet3001', "100.100.1.2/32") + check_remove_routes_advertisement(dvs, "100.100.1.2/32") delete_vlan_interface(dvs, "Vlan300", "100.100.3.1/24") vnet_obj.check_del_router_interface(dvs, "Vlan300") @@ -1335,202 +1434,231 @@ def test_vnet_vxlan_multi_map(self, dvs, testlog): ''' Test 7 - Test for vnet tunnel routes with ECMP nexthop group ''' - def test_vnet_orch_7(self, dvs, testlog): + def test_vnet_orch_7(self, dvs, ordered_ecmp, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_7' + tunnel_name = 'tunnel_7' + ordered_ecmp + vnet_name = 'Vnet7' + ordered_ecmp vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7') - create_vnet_entry(dvs, 'Vnet7', tunnel_name, '10007', "") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10007', "") - vnet_obj.check_vnet_entry(dvs, 'Vnet7') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet7', '10007') + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10007') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7') # Create an ECMP tunnel route vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet7', '7.0.0.1,7.0.0.2,7.0.0.3') - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet7', ['7.0.0.1', '7.0.0.2', '7.0.0.3'], tunnel_name) - check_state_db_routes(dvs, 'Vnet7', "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3']) + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '7.0.0.3,7.0.0.2,7.0.0.1') + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['7.0.0.1', '7.0.0.2', '7.0.0.3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Set the tunnel route to another nexthop group - set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet7', '7.0.0.1,7.0.0.2,7.0.0.3,7.0.0.4') - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet7', ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet7', "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) + set_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '7.0.0.1,7.0.0.2,7.0.0.4,7.0.0.3') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name, route_ids=route1, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) assert nhg1_1 not in vnet_obj.nhgs # Create another tunnel route to the same set of endpoints - create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet7', '7.0.0.1,7.0.0.2,7.0.0.3,7.0.0.4') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet7', ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name) - check_state_db_routes(dvs, 'Vnet7', "100.100.2.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) + create_vnet_routes(dvs, "100.100.2.1/32", vnet_name, '7.0.0.1,7.0.0.2,7.0.0.3,7.0.0.4') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) + check_remove_routes_advertisement(dvs, "100.100.2.1/32") assert nhg2_1 == nhg1_2 # Remove one of the tunnel routes - delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet7') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet7', ["100.100.1.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet7', "100.100.1.1/32") + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Check the nexthop group still exists vnet_obj.fetch_exist_entries(dvs) assert nhg1_2 in vnet_obj.nhgs # Remove the other tunnel route - delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet7') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet7', ["100.100.2.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet7', "100.100.2.1/32") + delete_vnet_routes(dvs, "100.100.2.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") # Check the nexthop group is removed vnet_obj.fetch_exist_entries(dvs) assert nhg2_1 not in vnet_obj.nhgs - delete_vnet_entry(dvs, 'Vnet7') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet7') + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) ''' Test 8 - Test for ipv6 vnet tunnel routes with ECMP nexthop group ''' - def test_vnet_orch_8(self, dvs, testlog): + def test_vnet_orch_8(self, dvs, ordered_ecmp, testlog): + vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_8' + tunnel_name = 'tunnel_8' + ordered_ecmp + vnet_name = 'Vnet8' + ordered_ecmp + vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') - create_vnet_entry(dvs, 'Vnet8', tunnel_name, '10008', "") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10008', "") - vnet_obj.check_vnet_entry(dvs, 'Vnet8') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet8', '10008') + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10008') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') # Create an ECMP tunnel route vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name) - check_state_db_routes(dvs, 'Vnet8', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + create_vnet_routes(dvs, "fd:8:10::32/128", vnet_name, 'fd:8:1::1,fd:8:1::3,fd:8:1::2') + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) + check_state_db_routes(dvs, vnet_name, "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") # Set the tunnel route to another nexthop group - set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet8', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + set_vnet_routes(dvs, "fd:8:10::32/128", vnet_name, 'fd:8:1::2,fd:8:1::3,fd:8:1::1,fd:8:1::4') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, route_ids=route1, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) assert nhg1_1 not in vnet_obj.nhgs # Create another tunnel route to the same set of endpoints - create_vnet_routes(dvs, "fd:8:20::32/128", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name) - check_state_db_routes(dvs, 'Vnet8', "fd:8:20::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + create_vnet_routes(dvs, "fd:8:20::32/128", vnet_name, 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "fd:8:20::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + check_remove_routes_advertisement(dvs, "fd:8:20::32/128") assert nhg2_1 == nhg1_2 # Create another tunnel route with ipv4 prefix to the same set of endpoints - create_vnet_routes(dvs, "8.0.0.0/24", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') - route3, nhg3_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name) - check_state_db_routes(dvs, 'Vnet8', "8.0.0.0/24", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + create_vnet_routes(dvs, "8.0.0.0/24", vnet_name, 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') + route3, nhg3_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "8.0.0.0/24", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + check_remove_routes_advertisement(dvs, "8.0.0.0/24") assert nhg3_1 == nhg1_2 # Remove one of the tunnel routes - delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet8') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet8', ["fd:8:10::32/128"]) - check_remove_state_db_routes(dvs, 'Vnet8', "fd:8:10::32/128") + delete_vnet_routes(dvs, "fd:8:10::32/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:8:10::32/128") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") # Check the nexthop group still exists vnet_obj.fetch_exist_entries(dvs) assert nhg1_2 in vnet_obj.nhgs # Remove tunnel route 2 - delete_vnet_routes(dvs, "fd:8:20::32/128", 'Vnet8') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet8', ["fd:8:20::32/128"]) - check_remove_state_db_routes(dvs, 'Vnet8', "fd:8:20::32/128") + delete_vnet_routes(dvs, "fd:8:20::32/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:8:20::32/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:8:20::32/128") + check_remove_routes_advertisement(dvs, "fd:8:20::32/128") # Remove tunnel route 3 - delete_vnet_routes(dvs, "8.0.0.0/24", 'Vnet8') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet8', ["8.0.0.0/24"]) - check_remove_state_db_routes(dvs, 'Vnet8', "8.0.0.0/24") + delete_vnet_routes(dvs, "8.0.0.0/24", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["8.0.0.0/24"]) + check_remove_state_db_routes(dvs, vnet_name, "8.0.0.0/24") + check_remove_routes_advertisement(dvs, "8.0.0.0/24") # Check the nexthop group is removed vnet_obj.fetch_exist_entries(dvs) assert nhg2_1 not in vnet_obj.nhgs - delete_vnet_entry(dvs, 'Vnet8') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet8') + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) ''' Test 9 - Test for vnet tunnel routes with ECMP nexthop group with endpoint health monitor ''' - def test_vnet_orch_9(self, dvs, testlog): + def test_vnet_orch_9(self, dvs, ordered_ecmp, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_9' + tunnel_name = 'tunnel_9' + ordered_ecmp + vnet_name = 'Vnet9' + ordered_ecmp vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') - create_vnet_entry(dvs, 'Vnet9', tunnel_name, '10009', "") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10009', "") - vnet_obj.check_vnet_entry(dvs, 'Vnet9') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet9', '10009') + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10009') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet9', '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') # default bfd status is down, route should not be programmed in this status - vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.1.1/32"]) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", []) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Route should be properly configured when all bfd session states go up - update_bfd_session_state(dvs, '9.1.0.1', 'Up') update_bfd_session_state(dvs, '9.1.0.2', 'Up') update_bfd_session_state(dvs, '9.1.0.3', 'Up') + update_bfd_session_state(dvs, '9.1.0.1', 'Up') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Remove endpoint from group if it goes down update_bfd_session_state(dvs, '9.1.0.2', 'Down') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.3']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.3']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Create another tunnel route with endpoint group overlapped with route1 vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet9', '9.0.0.1,9.0.0.2,9.0.0.5', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.5') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1'], tunnel_name) - check_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32", ['9.0.0.1']) + create_vnet_routes(dvs, "100.100.2.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.5', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['9.0.0.1']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Update BFD session state and verify route change update_bfd_session_state(dvs, '9.1.0.5', 'Up') time.sleep(2) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32", ['9.0.0.1', '9.0.0.5']) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['9.0.0.1', '9.0.0.5']) + check_remove_routes_advertisement(dvs, "100.100.2.1/32") # Update BFD state and check route nexthop update_bfd_session_state(dvs, '9.1.0.3', 'Down') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Set the route1 to a new group - set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet9', '9.0.0.1,9.0.0.2,9.0.0.3,9.0.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4') + set_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3,9.0.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4') update_bfd_session_state(dvs, '9.1.0.4', 'Up') time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.4']) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.4'], tunnel_name, route_ids=route1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '4']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.4']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1539,8 +1667,9 @@ def test_vnet_orch_9(self, dvs, testlog): # Set BFD session state for a down endpoint to up update_bfd_session_state(dvs, '9.1.0.2', 'Up') time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.2', '9.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.4']) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.2', '9.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '4']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.4']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Set all endpoint to down state update_bfd_session_state(dvs, '9.1.0.1', 'Down') @@ -1550,15 +1679,19 @@ def test_vnet_orch_9(self, dvs, testlog): time.sleep(2) # Confirm the tunnel route is updated in ASIC - vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.1.1/32"]) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32", ['9.0.0.5']) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", []) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['3']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['9.0.0.5']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") # Remove tunnel route2 - delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet9') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.2.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32") + delete_vnet_routes(dvs, "100.100.2.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") # Check the corresponding nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1569,9 +1702,10 @@ def test_vnet_orch_9(self, dvs, testlog): check_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3', '9.1.0.4']) # Remove tunnel route 1 - delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet9') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.1.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32") + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1580,75 +1714,83 @@ def test_vnet_orch_9(self, dvs, testlog): # Confirm the BFD sessions are removed check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3', '9.1.0.4', '9.1.0.5']) - delete_vnet_entry(dvs, 'Vnet9') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet9') + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) ''' Test 10 - Test for ipv6 vnet tunnel routes with ECMP nexthop group with endpoint health monitor ''' - def test_vnet_orch_10(self, dvs, testlog): + def test_vnet_orch_10(self, dvs, ordered_ecmp, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_10' + tunnel_name = 'tunnel_10' + ordered_ecmp + vnet_name = 'Vnet10' + ordered_ecmp vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') - create_vnet_entry(dvs, 'Vnet10', tunnel_name, '10010', "") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10010', "") - vnet_obj.check_vnet_entry(dvs, 'Vnet10') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet10', '10010') + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10010') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "fd:10:10::1/128", 'Vnet10', 'fd:10:1::1,fd:10:1::2,fd:10:1::3', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3') + create_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3') # default bfd status is down, route should not be programmed in this status - vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:10::1/128"]) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", []) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", []) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") # Route should be properly configured when all bfd session states go up - update_bfd_session_state(dvs, 'fd:10:2::1', 'Up') update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') + update_bfd_session_state(dvs, 'fd:10:2::1', 'Up') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3'], tunnel_name) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3']) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") # Remove endpoint from group if it goes down update_bfd_session_state(dvs, 'fd:10:2::2', 'Down') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::3'], tunnel_name, route_ids=route1, nhg=nhg1_1) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::3']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::3'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::3']) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") # Create another tunnel route with endpoint group overlapped with route1 vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "fd:10:20::1/128", 'Vnet10', 'fd:10:1::1,fd:10:1::2,fd:10:1::5', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::5') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1'], tunnel_name) - check_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128", ['fd:10:1::1']) + create_vnet_routes(dvs, "fd:10:20::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::5', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) + check_state_db_routes(dvs, vnet_name, "fd:10:20::1/128", ['fd:10:1::1']) + check_remove_routes_advertisement(dvs, "fd:10:20::1/128") # Update BFD session state and verify route change update_bfd_session_state(dvs, 'fd:10:2::5', 'Up') time.sleep(2) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128", ['fd:10:1::1', 'fd:10:1::5']) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) + check_state_db_routes(dvs, vnet_name, "fd:10:20::1/128", ['fd:10:1::1', 'fd:10:1::5']) + check_remove_routes_advertisement(dvs, "fd:10:20::1/128") # Update BFD state and check route nexthop update_bfd_session_state(dvs, 'fd:10:2::3', 'Down') update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2'], tunnel_name, route_ids=route1, nhg=nhg1_1) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2']) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") # Set the route to a new group - set_vnet_routes(dvs, "fd:10:10::1/128", 'Vnet10', 'fd:10:1::1,fd:10:1::2,fd:10:1::3,fd:10:1::4', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3,fd:10:2::4') + set_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3,fd:10:1::4', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3,fd:10:2::4') update_bfd_session_state(dvs, 'fd:10:2::4', 'Up') time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4']) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4'], tunnel_name, route_ids=route1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '4']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4']) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) assert nhg1_1 not in vnet_obj.nhgs @@ -1656,8 +1798,10 @@ def test_vnet_orch_10(self, dvs, testlog): # Set BFD session state for a down endpoint to up update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4'], tunnel_name, route_ids=route1, nhg=nhg1_2) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4']) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4'], tunnel_name, route_ids=route1, nhg=nhg1_2, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4']) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") # Set all endpoint to down state update_bfd_session_state(dvs, 'fd:10:2::1', 'Down') @@ -1667,15 +1811,19 @@ def test_vnet_orch_10(self, dvs, testlog): time.sleep(2) # Confirm the tunnel route is updated in ASIC - vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:10::1/128"]) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128", ['fd:10:1::5']) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", []) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['3']) + check_state_db_routes(dvs, vnet_name, "fd:10:20::1/128", ['fd:10:1::5']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", []) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:10:20::1/128") # Remove tunnel route2 - delete_vnet_routes(dvs, "fd:10:20::1/128", 'Vnet10') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:20::1/128"]) - check_remove_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128") + delete_vnet_routes(dvs, "fd:10:20::1/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:20::1/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:10:20::1/128") + check_remove_routes_advertisement(dvs, "fd:10:20::1/128") # Check the corresponding nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1690,9 +1838,10 @@ def test_vnet_orch_10(self, dvs, testlog): check_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4']) # Remove tunnel route 1 - delete_vnet_routes(dvs, "fd:10:10::1/128", 'Vnet10') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:10::1/128"]) - check_remove_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128") + delete_vnet_routes(dvs, "fd:10:10::1/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:10:10::1/128") + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") # Confirm the BFD sessions are removed check_del_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4', 'fd:10:2::5']) @@ -1701,76 +1850,90 @@ def test_vnet_orch_10(self, dvs, testlog): vnet_obj.fetch_exist_entries(dvs) assert nhg1_2 not in vnet_obj.nhgs - delete_vnet_entry(dvs, 'Vnet10') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet10') + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) ''' Test 11 - Test for vnet tunnel routes with both single endpoint and ECMP group with endpoint health monitor ''' - def test_vnet_orch_11(self, dvs, testlog): + def test_vnet_orch_11(self, dvs, ordered_ecmp, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_11' + tunnel_name = 'tunnel_11' + ordered_ecmp + vnet_name = 'Vnet11' + ordered_ecmp vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, '11.11.11.11') - create_vnet_entry(dvs, 'Vnet11', tunnel_name, '100011', "") + create_vnet_entry(dvs, vnet_name, tunnel_name, '100011', "") - vnet_obj.check_vnet_entry(dvs, 'Vnet11') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet11', '100011') + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '100011') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '11.11.11.11') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet11', '11.0.0.1', ep_monitor='11.1.0.1') + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '11.0.0.1', ep_monitor='11.1.0.1') # default bfd status is down, route should not be programmed in this status - vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.1.1/32"]) - check_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32", []) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Route should be properly configured when bfd session state goes up update_bfd_session_state(dvs, '11.1.0.1', 'Up') time.sleep(2) - vnet_obj.check_vnet_routes(dvs, 'Vnet11', '11.0.0.1', tunnel_name) - check_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32", ['11.0.0.1']) + vnet_obj.check_vnet_routes(dvs, vnet_name, '11.0.0.1', tunnel_name) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['11.0.0.1']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Create another tunnel route with endpoint group overlapped with route1 vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet11', '11.0.0.1,11.0.0.2', ep_monitor='11.1.0.1,11.1.0.2') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet11', ['11.0.0.1'], tunnel_name) - check_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32", ['11.0.0.1']) + create_vnet_routes(dvs, "100.100.2.1/32", vnet_name, '11.0.0.2,11.0.0.1', ep_monitor='11.1.0.2,11.1.0.1') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['11.0.0.1'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['11.0.0.1']) + check_remove_routes_advertisement(dvs, "100.100.2.1/32") # Create a third tunnel route with another endpoint vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.3.1/32", 'Vnet11', '11.0.0.2', ep_monitor='11.1.0.2') + create_vnet_routes(dvs, "100.100.3.1/32", vnet_name, '11.0.0.2', ep_monitor='11.1.0.2') # Update BFD session state and verify route change update_bfd_session_state(dvs, '11.1.0.2', 'Up') time.sleep(2) - vnet_obj.check_vnet_routes(dvs, 'Vnet11', '11.0.0.2', tunnel_name) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet11', ['11.0.0.1', '11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet11', "100.100.3.1/32", ['11.0.0.2']) - check_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32", ['11.0.0.1', '11.0.0.2']) + vnet_obj.check_vnet_routes(dvs, vnet_name, '11.0.0.2', tunnel_name) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['11.0.0.1', '11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2']) + check_state_db_routes(dvs, vnet_name, "100.100.3.1/32", ['11.0.0.2']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['11.0.0.1', '11.0.0.2']) + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.3.1/32") + update_bfd_session_state(dvs, '11.1.0.1', 'Down') time.sleep(2) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet11', ['11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1) - vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.1.1/32"]) - check_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32", ['11.0.0.2']) - check_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32", []) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['2']) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['11.0.0.2']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + # Set the route1 to a new endpoint vnet_obj.fetch_exist_entries(dvs) - set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet11', '11.0.0.2', ep_monitor='11.1.0.2') - vnet_obj.check_vnet_routes(dvs, 'Vnet11', '11.0.0.2', tunnel_name) - check_state_db_routes(dvs, 'Vnet11', "100.100.3.1/32", ['11.0.0.2']) + set_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '11.0.0.2', ep_monitor='11.1.0.2') + vnet_obj.check_vnet_routes(dvs, vnet_name, '11.0.0.2', tunnel_name) + check_state_db_routes(dvs, vnet_name, "100.100.3.1/32", ['11.0.0.2']) + check_remove_routes_advertisement(dvs, "100.100.3.1/32") # Remove tunnel route2 - delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet11') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.2.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32") + delete_vnet_routes(dvs, "100.100.2.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") # Check the corresponding nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1781,21 +1944,151 @@ def test_vnet_orch_11(self, dvs, testlog): check_bfd_session(dvs, ['11.1.0.2']) # Remove tunnel route 1 - delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet11') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.1.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32") + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Remove tunnel route 3 - delete_vnet_routes(dvs, "100.100.3.1/32", 'Vnet11') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.3.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet11', "100.100.3.1/32") + delete_vnet_routes(dvs, "100.100.3.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.3.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.3.1/32") + check_remove_routes_advertisement(dvs, "100.100.3.1/32") # Confirm the BFD sessions are removed check_del_bfd_session(dvs, ['11.1.0.1', '11.1.0.2']) - delete_vnet_entry(dvs, 'Vnet11') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet11') + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + + + ''' + Test 12 - Test for vnet tunnel routes with ECMP nexthop group with endpoint health monitor and route advertisement + ''' + def test_vnet_orch_12(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_12' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '12.12.12.12') + create_vnet_entry(dvs, 'Vnet12', tunnel_name, '10012', "", advertise_prefix=True) + + vnet_obj.check_vnet_entry(dvs, 'Vnet12') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet12', '10012') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '12.12.12.12') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '12.1.0.1', 'Up') + update_bfd_session_state(dvs, '12.1.0.2', 'Up') + update_bfd_session_state(dvs, '12.1.0.3', 'Up') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.2', '12.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.2', '12.0.0.3']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Remove endpoint from group if it goes down + update_bfd_session_state(dvs, '12.1.0.2', 'Down') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.3']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Create another tunnel route with endpoint group overlapped with route1 + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.5', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1'], tunnel_name) + check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.1']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Update BFD session state and verify route change + update_bfd_session_state(dvs, '12.1.0.5', 'Up') + time.sleep(2) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.1', '12.0.0.5']) + check_routes_advertisement(dvs, "100.100.2.1/32") + + # Update BFD state and check route nexthop + update_bfd_session_state(dvs, '12.1.0.3', 'Down') + time.sleep(2) + + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Set the route1 to a new group + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3,12.0.0.4', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3,12.1.0.4') + update_bfd_session_state(dvs, '12.1.0.4', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.4']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + + # Set BFD session state for a down endpoint to up + update_bfd_session_state(dvs, '12.1.0.2', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.2', '12.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.2', '12.0.0.4']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Set all endpoint to down state + update_bfd_session_state(dvs, '12.1.0.1', 'Down') + update_bfd_session_state(dvs, '12.1.0.2', 'Down') + update_bfd_session_state(dvs, '12.1.0.3', 'Down') + update_bfd_session_state(dvs, '12.1.0.4', 'Down') + time.sleep(2) + + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.5']) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + check_routes_advertisement(dvs, "100.100.2.1/32") + + # Remove tunnel route2 + delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet12') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs + + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['12.1.0.5']) + check_bfd_session(dvs, ['12.1.0.1', '12.1.0.2', '12.1.0.3', '12.1.0.4']) + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['12.1.0.1', '12.1.0.2', '12.1.0.3', '12.1.0.4', '12.1.0.5']) + delete_vnet_entry(dvs, 'Vnet12') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet12') # Add Dummy always-pass test at end as workaroud diff --git a/tests/test_warm_reboot.py b/tests/test_warm_reboot.py index 36028dfc698..1a10c9455a3 100644 --- a/tests/test_warm_reboot.py +++ b/tests/test_warm_reboot.py @@ -2365,6 +2365,35 @@ def test_EverflowWarmReboot(self, dvs, dvs_acl): dvs.start_swss() dvs.check_swss_ready() + def test_TunnelMgrdWarmRestart(self, dvs): + tunnel_name = "MuxTunnel0" + tunnel_table = "TUNNEL_DECAP_TABLE" + tunnel_params = { + "tunnel_type": "IPINIP", + "dst_ip": "10.1.0.32", + "dscp_mode": "uniform", + "ecn_mode": "standard", + "ttl_mode": "pipe" + } + + pubsub = dvs.SubscribeAppDbObject(tunnel_table) + + dvs.runcmd("config warm_restart enable swss") + config_db = dvs.get_config_db() + config_db.create_entry("TUNNEL", tunnel_name, tunnel_params) + + app_db = dvs.get_app_db() + app_db.wait_for_matching_keys(tunnel_table, [tunnel_name]) + + nadd, ndel = dvs.CountSubscribedObjects(pubsub) + assert nadd == len(tunnel_params) + assert ndel == 1 # Expect 1 deletion as part of table creation + + dvs.runcmd("supervisorctl restart tunnelmgrd") + dvs.check_services_ready() + nadd, ndel = dvs.CountSubscribedObjects(pubsub) + assert nadd == 0 + assert ndel == 0 # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying