diff --git a/.azure-pipelines/build-template.yml b/.azure-pipelines/build-template.yml index d351d5703a..632bdb3107 100644 --- a/.azure-pipelines/build-template.yml +++ b/.azure-pipelines/build-template.yml @@ -96,6 +96,8 @@ jobs: path: '$(Build.SourcesDirectory)/${{ parameters.sairedis_artifact_name }}' displayName: "Download sonic sairedis deb packages" - task: DownloadPipelineArtifact@2 + ${{ if eq(parameters.buildimage_pipeline, 141) }}: + continueOnError: True inputs: source: specific project: build @@ -105,6 +107,24 @@ jobs: runBranch: 'refs/heads/master' path: '$(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }}' displayName: "Download sonic buildimage deb packages" + - script: | + buildimage_artifact_downloaded=n + [ -d "$(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }}/target" ] && buildimage_artifact_downloaded=y + echo "buildimage_artifact_downloaded=$buildimage_artifact_downloaded" + echo "##vso[task.setvariable variable=buildimage_artifact_downloaded]$buildimage_artifact_downloaded" + condition: eq(${{ parameters.buildimage_pipeline }}, 141) + displayName: "Check if sonic buildimage deb packages downloaded" + - task: DownloadPipelineArtifact@2 + condition: and(eq(variables.buildimage_artifact_downloaded, 'n'), eq(${{ parameters.buildimage_pipeline }}, 141)) + inputs: + source: specific + project: build + pipeline: ${{ parameters.buildimage_pipeline }} + artifact: 'sonic-buildimage.marvell-armhf1' + runVersion: specific + runId: 80637 + path: '$(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }}' + displayName: "Download sonic buildimage deb packages from 80637" - script: | cd $(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }} sudo dpkg -i target/debs/buster/libnl-3-200_*.deb diff --git a/.azure-pipelines/gcov.yml b/.azure-pipelines/gcov.yml index e58ee2b0a5..83b13bb873 100644 --- a/.azure-pipelines/gcov.yml +++ b/.azure-pipelines/gcov.yml @@ -46,11 +46,23 @@ jobs: ${{ if eq(parameters.pool, 'default') }}: vmImage: 'ubuntu-20.04' + variables: + DIFF_COVER_CHECK_THRESHOLD: 80 + DIFF_COVER_ENABLE: 'true' + container: image: sonicdev-microsoft.azurecr.io:443/${{ parameters.sonic_slave }}:latest steps: + - script: | + set -ex + # Install .NET CORE + curl -sSL https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add - + sudo apt-add-repository https://packages.microsoft.com/debian/10/prod + sudo apt-get update + sudo apt-get install -y dotnet-sdk-5.0 + displayName: "Install .NET CORE" - script: | sudo apt-get install -y lcov displayName: "Install dependencies" @@ -93,6 +105,7 @@ jobs: sudo ./gcov_support.sh generate sudo ./gcov_support.sh merge_container_info $(Build.ArtifactStagingDirectory) sudo cp -rf gcov_output $(Build.ArtifactStagingDirectory) + sudo cp -rf $(Build.ArtifactStagingDirectory)/gcov_output/AllMergeReport/coverage.xml $(System.DefaultWorkingDirectory)/ ls -lh $(Build.ArtifactStagingDirectory) popd workingDirectory: $(Pipeline.Workspace) diff --git a/.azure-pipelines/test-docker-sonic-vs-template.yml b/.azure-pipelines/test-docker-sonic-vs-template.yml index 237778af4a..7b1b3c4163 100644 --- a/.azure-pipelines/test-docker-sonic-vs-template.yml +++ b/.azure-pipelines/test-docker-sonic-vs-template.yml @@ -1,7 +1,7 @@ parameters: - name: timeout type: number - default: 240 + default: 480 - name: log_artifact_name type: string @@ -21,10 +21,13 @@ jobs: displayName: vstest timeoutInMinutes: ${{ parameters.timeout }} - pool: - vmImage: 'ubuntu-20.04' + pool: sonic-common steps: + - script: | + ls -A1 | xargs -I{} sudo rm -rf {} + displayName: "Clean workspace" + - checkout: self - task: DownloadPipelineArtifact@2 inputs: artifact: docker-sonic-vs diff --git a/cfgmgr/buffermgrdyn.cpp b/cfgmgr/buffermgrdyn.cpp index 0888e9e6c6..b4578c2370 100644 --- a/cfgmgr/buffermgrdyn.cpp +++ b/cfgmgr/buffermgrdyn.cpp @@ -22,7 +22,6 @@ * In internal maps: table name removed from the index * 2. Maintain maps for pools, profiles and PGs in CONFIG_DB and APPL_DB * 3. Keys of maps in this file don't contain the TABLE_NAME - * 3. */ using namespace std; using namespace swss; @@ -37,6 +36,7 @@ BufferMgrDynamic::BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBC m_zeroProfilesLoaded(false), m_supportRemoving(true), m_cfgDefaultLosslessBufferParam(cfgDb, CFG_DEFAULT_LOSSLESS_BUFFER_PARAMETER), + m_cfgDeviceMetaDataTable(cfgDb, CFG_DEVICE_METADATA_TABLE_NAME), m_applBufferPoolTable(applDb, APP_BUFFER_POOL_TABLE_NAME), m_applBufferProfileTable(applDb, APP_BUFFER_PROFILE_TABLE_NAME), m_applBufferObjectTables({ProducerStateTable(applDb, APP_BUFFER_PG_TABLE_NAME), ProducerStateTable(applDb, APP_BUFFER_QUEUE_TABLE_NAME)}), @@ -73,6 +73,30 @@ BufferMgrDynamic::BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBC string checkHeadroomPluginName = "buffer_check_headroom_" + platform + ".lua"; m_platform = platform; + m_specific_platform = platform; // default for non-Mellanox + m_model_number = 0; + + // Retrieve the type of mellanox platform + if (m_platform == "mellanox") + { + m_cfgDeviceMetaDataTable.hget("localhost", "platform", m_specific_platform); + if (!m_specific_platform.empty()) + { + // Mellanox model number follows "sn" in the platform name and is 4 digits long + std::size_t sn_pos = m_specific_platform.find("sn"); + if (sn_pos != std::string::npos) + { + std::string model_number = m_specific_platform.substr (sn_pos + 2, 4); + if (!model_number.empty()) + { + m_model_number = atoi(model_number.c_str()); + } + } + } + if (!m_model_number) { + SWSS_LOG_ERROR("Failed to retrieve Mellanox model number"); + } + } try { @@ -471,7 +495,9 @@ string BufferMgrDynamic::getDynamicProfileName(const string &speed, const string if (m_platform == "mellanox") { - if ((speed != "400000") && (lane_count == 8)) + if ((lane_count == 8) && + (((m_model_number / 1000 == 4) && (speed != "400000")) || + ((m_model_number / 1000 == 5) && (speed != "800000")))) { // On Mellanox platform, ports with 8 lanes have different(double) xon value then other ports // For ports at speed other than 400G can have @@ -482,7 +508,8 @@ string BufferMgrDynamic::getDynamicProfileName(const string &speed, const string // Eg. // - A 100G port with 8 lanes will use buffer profile "pg_profile_100000_5m_8lane_profile" // - A 100G port with 4 lanes will use buffer profile "pg_profile_100000_5m_profile" - // Currently, 400G ports can only have 8 lanes. So we don't add this to the profile + // Currently, for 4xxx models, 400G ports can only have 8 lanes, + // and for 5xxx models, 800G ports can only have 8 lanes. So we don't add this to the profile. buffer_profile_key = buffer_profile_key + "_8lane"; } } diff --git a/cfgmgr/buffermgrdyn.h b/cfgmgr/buffermgrdyn.h index d316aee73c..ef1e4f567f 100644 --- a/cfgmgr/buffermgrdyn.h +++ b/cfgmgr/buffermgrdyn.h @@ -150,7 +150,10 @@ class BufferMgrDynamic : public Orch using Orch::doTask; private: - std::string m_platform; + std::string m_platform; // vendor, e.g. "mellanox" + std::string m_specific_platform; // name of platform, e.g. "x86_64-mlnx_msn3420-r0" + unsigned int m_model_number; // model number extracted from specific platform, e.g. 3420 + std::vector m_bufferDirections; const std::string m_bufferObjectNames[BUFFER_DIR_MAX]; const std::string m_bufferDirectionNames[BUFFER_DIR_MAX]; @@ -234,7 +237,7 @@ class BufferMgrDynamic : public Orch // Other tables Table m_cfgDefaultLosslessBufferParam; - + Table m_cfgDeviceMetaDataTable; Table m_stateBufferMaximumTable; Table m_applPortTable; diff --git a/cfgmgr/coppmgr.cpp b/cfgmgr/coppmgr.cpp index 834b2c5ff0..1721cc8593 100644 --- a/cfgmgr/coppmgr.cpp +++ b/cfgmgr/coppmgr.cpp @@ -78,31 +78,42 @@ bool CoppMgr::checkTrapGroupPending(string trap_group_name) /* Feature name and CoPP Trap table name must match */ void CoppMgr::setFeatureTrapIdsStatus(string feature, bool enable) { - bool disabled_trap = (m_coppDisabledTraps.find(feature) != m_coppDisabledTraps.end()); - - if ((enable && !disabled_trap) || (!enable && disabled_trap)) + bool disabled_trap {true}; + string always_enabled; + if (m_coppTrapConfMap.find(feature) != m_coppTrapConfMap.end()) { - return; + always_enabled = m_coppTrapConfMap[feature].is_always_enabled; + } + if (always_enabled == "true" || isFeatureEnabled(feature)) + { + disabled_trap = false; } - if (m_coppTrapConfMap.find(feature) == m_coppTrapConfMap.end()) + if ((enable && !disabled_trap) || (!enable && disabled_trap)) { - if (!enable) - { - m_coppDisabledTraps.insert(feature); - } return; } + string trap_group = m_coppTrapConfMap[feature].trap_group; bool prev_group_state = checkTrapGroupPending(trap_group); - if (!enable) + // update features cache + auto state = "disabled"; + if (enable) { - m_coppDisabledTraps.insert(feature); + state = "enabled"; } - else + if (m_featuresCfgTable.find(feature) != m_featuresCfgTable.end()) { - m_coppDisabledTraps.erase(feature); + auto vect = m_featuresCfgTable[feature]; + for (long unsigned int i=0; i < vect.size(); i++) + { + if (vect[i].first == "state") + { + vect[i].second = state; + } + } + m_featuresCfgTable.at(feature) = vect; } /* Trap group moved to pending state when feature is disabled. Remove trap group @@ -140,25 +151,46 @@ void CoppMgr::setFeatureTrapIdsStatus(string feature, bool enable) } } -bool CoppMgr::isTrapIdDisabled(string trap_id) +bool CoppMgr::isFeatureEnabled(std::string feature) { - for (auto &m: m_coppDisabledTraps) + if (m_featuresCfgTable.find(feature) != m_featuresCfgTable.end()) { - if (m_coppTrapConfMap.find(m) == m_coppTrapConfMap.end()) + std::vector feature_fvs = m_featuresCfgTable[feature]; + for (auto i: feature_fvs) { - continue; + if (fvField(i) == "state" && (fvValue(i) == "enabled" || fvValue(i) == "always_enabled")) + { + return true; + } } - vector trap_id_list; + } + return false; +} - trap_id_list = tokenize(m_coppTrapConfMap[m].trap_ids, list_item_delimiter); - if(std::find(trap_id_list.begin(), trap_id_list.end(), trap_id) != trap_id_list.end()) +bool CoppMgr::isTrapIdDisabled(string trap_id) +{ + // check if trap is always_enabled + string trap_name; + for (auto &t: m_coppTrapConfMap) + { + if (m_coppTrapConfMap[t.first].trap_ids.find(trap_id) != string::npos) { - return true; + trap_name = t.first; + if (m_coppTrapConfMap[t.first].is_always_enabled == "true") + { + return false; + } + break; } + } + if (isFeatureEnabled(trap_name)) + { + return false; } - return false; + return true; } + void CoppMgr::mergeConfig(CoppCfg &init_cfg, CoppCfg &m_cfg, std::vector &cfg_keys, Table &cfgTable) { /* Read the init configuration first. If the same key is present in @@ -254,14 +286,7 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c { std::vector feature_fvs; m_cfgFeatureTable.get(i, feature_fvs); - - for (auto j: feature_fvs) - { - if (fvField(j) == "state" && fvValue(j) == "disabled") - { - m_coppDisabledTraps.insert(i); - } - } + m_featuresCfgTable.emplace(i, feature_fvs); } mergeConfig(m_coppTrapInitCfg, trap_cfg, trap_cfg_keys, m_cfgCoppTrapTable); @@ -270,6 +295,7 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c { string trap_group; string trap_ids; + string is_always_enabled = "false"; std::vector trap_fvs = i.second; for (auto j: trap_fvs) @@ -282,13 +308,22 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c { trap_group = fvValue(j); } + else if (fvField(j) == COPP_ALWAYS_ENABLED_FIELD) + { + is_always_enabled = fvValue(j); + } } + if (!trap_group.empty() && !trap_ids.empty()) { addTrapIdsToTrapGroup(trap_group, trap_ids); m_coppTrapConfMap[i.first].trap_group = trap_group; m_coppTrapConfMap[i.first].trap_ids = trap_ids; - setCoppTrapStateOk(i.first); + m_coppTrapConfMap[i.first].is_always_enabled = is_always_enabled; + if (is_always_enabled == "true" || isFeatureEnabled(i.first)) + { + setCoppTrapStateOk(i.first); + } } } @@ -384,7 +419,6 @@ void CoppMgr::removeTrapIdsFromTrapGroup(string trap_group, string trap_ids) void CoppMgr::getTrapGroupTrapIds(string trap_group, string &trap_ids) { - trap_ids.clear(); for (auto it: m_coppTrapIdTrapGroupMap) { @@ -406,6 +440,36 @@ void CoppMgr::getTrapGroupTrapIds(string trap_group, string &trap_ids) } } +void CoppMgr::removeTrap(string key) +{ + string trap_ids; + std::vector fvs; + removeTrapIdsFromTrapGroup(m_coppTrapConfMap[key].trap_group, m_coppTrapConfMap[key].trap_ids); + getTrapGroupTrapIds(m_coppTrapConfMap[key].trap_group, trap_ids); + FieldValueTuple fv(COPP_TRAP_ID_LIST_FIELD, trap_ids); + fvs.push_back(fv); + if (!checkTrapGroupPending(m_coppTrapConfMap[key].trap_group)) + { + m_appCoppTable.set(m_coppTrapConfMap[key].trap_group, fvs); + setCoppGroupStateOk(m_coppTrapConfMap[key].trap_group); + } +} + +void CoppMgr::addTrap(string trap_ids, string trap_group) +{ + string trap_group_trap_ids; + std::vector fvs; + addTrapIdsToTrapGroup(trap_group, trap_ids); + getTrapGroupTrapIds(trap_group, trap_group_trap_ids); + FieldValueTuple fv1(COPP_TRAP_ID_LIST_FIELD, trap_group_trap_ids); + fvs.push_back(fv1); + if (!checkTrapGroupPending(trap_group)) + { + m_appCoppTable.set(trap_group, fvs); + setCoppGroupStateOk(trap_group); + } +} + void CoppMgr::doCoppTrapTask(Consumer &consumer) { auto it = consumer.m_toSync.begin(); @@ -418,12 +482,21 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) vector fvs; string trap_ids = ""; string trap_group = ""; + string is_always_enabled = ""; bool conf_present = false; if (m_coppTrapConfMap.find(key) != m_coppTrapConfMap.end()) { trap_ids = m_coppTrapConfMap[key].trap_ids; trap_group = m_coppTrapConfMap[key].trap_group; + if (m_coppTrapConfMap[key].is_always_enabled.empty()) + { + is_always_enabled = "false"; + } + else + { + is_always_enabled = m_coppTrapConfMap[key].is_always_enabled; + } conf_present = true; } @@ -441,6 +514,10 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) { trap_ids = fvValue(i); } + else if (fvField(i) == COPP_ALWAYS_ENABLED_FIELD) + { + is_always_enabled = fvValue(i); + } else if (fvField(i) == "NULL") { null_cfg = true; @@ -450,20 +527,9 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) { if (conf_present) { - SWSS_LOG_DEBUG("Deleting trap key %s", key.c_str()); - removeTrapIdsFromTrapGroup(m_coppTrapConfMap[key].trap_group, - m_coppTrapConfMap[key].trap_ids); - trap_ids.clear(); + removeTrap(key); setCoppTrapStateOk(key); - getTrapGroupTrapIds(m_coppTrapConfMap[key].trap_group, trap_ids); - fvs.clear(); - FieldValueTuple fv(COPP_TRAP_ID_LIST_FIELD, trap_ids); - fvs.push_back(fv); - if (!checkTrapGroupPending(m_coppTrapConfMap[key].trap_group)) - { - m_appCoppTable.set(m_coppTrapConfMap[key].trap_group, fvs); - setCoppGroupStateOk(m_coppTrapConfMap[key].trap_group); - } + m_coppTrapConfMap.erase(key); } it = consumer.m_toSync.erase(it); @@ -472,38 +538,126 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) /*Duplicate check*/ if (conf_present && (trap_group == m_coppTrapConfMap[key].trap_group) && - (trap_ids == m_coppTrapConfMap[key].trap_ids)) + (trap_ids == m_coppTrapConfMap[key].trap_ids) && + (is_always_enabled == m_coppTrapConfMap[key].is_always_enabled)) { it = consumer.m_toSync.erase(it); continue; } + /* Incomplete configuration. Do not process until both trap group * and trap_ids are available */ if (trap_group.empty() || trap_ids.empty()) { + if (is_always_enabled.empty()) + { + it = consumer.m_toSync.erase(it); + continue; + } + + if (is_always_enabled != m_coppTrapConfMap[key].is_always_enabled) + { + m_coppTrapConfMap[key].is_always_enabled = is_always_enabled; + if (is_always_enabled == "true") + { + if (m_coppTrapConfMap.find(key) != m_coppTrapConfMap.end()) + { + addTrap(m_coppTrapConfMap[key].trap_ids, m_coppTrapConfMap[key].trap_group); + } + // else if it has info in the init cfg map + else if (m_coppTrapInitCfg.find(key) != m_coppTrapInitCfg.end()) + { + auto fvs = m_coppTrapInitCfg[key]; + string init_trap_ids = ""; + string init_trap_group = ""; + for (auto i: fvs) + { + if (fvField(i) == COPP_TRAP_GROUP_FIELD) + { + init_trap_group = fvValue(i); + } + else if (fvField(i) == COPP_TRAP_ID_LIST_FIELD) + { + init_trap_ids = fvValue(i); + } + } + addTrap(init_trap_ids, init_trap_group); + } + } + else + { + /* if the value was changed from true to false, + check if there is a feature enabled. + if no, remove the trap. is yes, do nothing. */ + + m_coppTrapConfMap[key].is_always_enabled = is_always_enabled; + if (isFeatureEnabled(key)) + { + it = consumer.m_toSync.erase(it); + continue; + } + + removeTrap(key); + delCoppTrapStateOk(key); + } + it = consumer.m_toSync.erase(it); + continue; + } + } + /* if always_enabled field has been changed */ + if (conf_present && + (trap_group == m_coppTrapConfMap[key].trap_group) && + (trap_ids == m_coppTrapConfMap[key].trap_ids)) + { + m_coppTrapConfMap[key].is_always_enabled = is_always_enabled; + if (is_always_enabled == "true") + { + /* if the value was changed from false to true, + if the trap is not installed, install it. + otherwise, do nothing. */ + + // if the feature was not enabled, install the trap + if (!isFeatureEnabled(key)) + { + addTrap(trap_ids, trap_group); + } + + it = consumer.m_toSync.erase(it); + continue; + } + else + { + /* if the value was changed from true to false, + check if there is a feature enabled. + if no, remove the trap. is yes, do nothing. */ + + if (isFeatureEnabled(key)) + { + it = consumer.m_toSync.erase(it); + continue; + } + + removeTrap(key); + delCoppTrapStateOk(key); + } it = consumer.m_toSync.erase(it); continue; } + /* Remove the current trap IDs and add the new trap IDS to recompute the - * trap IDs for the trap group + * trap IDs for the trap group */ if (conf_present) { removeTrapIdsFromTrapGroup(m_coppTrapConfMap[key].trap_group, m_coppTrapConfMap[key].trap_ids); } - fvs.clear(); - string trap_group_trap_ids; - addTrapIdsToTrapGroup(trap_group, trap_ids); - getTrapGroupTrapIds(trap_group, trap_group_trap_ids); - FieldValueTuple fv1(COPP_TRAP_ID_LIST_FIELD, trap_group_trap_ids); - fvs.push_back(fv1); - if (!checkTrapGroupPending(trap_group)) - { - m_appCoppTable.set(trap_group, fvs); - setCoppGroupStateOk(trap_group); - } + + m_coppTrapConfMap[key].trap_group = trap_group; + m_coppTrapConfMap[key].trap_ids = trap_ids; + m_coppTrapConfMap[key].is_always_enabled = is_always_enabled; + addTrap(trap_ids, trap_group); /* When the trap table's trap group is changed, the old trap group * should also be reprogrammed as some of its associated traps got @@ -511,7 +665,7 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) */ if (conf_present && (trap_group != m_coppTrapConfMap[key].trap_group)) { - trap_group_trap_ids.clear(); + string trap_group_trap_ids; fvs.clear(); getTrapGroupTrapIds(m_coppTrapConfMap[key].trap_group, trap_group_trap_ids); FieldValueTuple fv2(COPP_TRAP_ID_LIST_FIELD, trap_group_trap_ids); @@ -524,6 +678,7 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) } m_coppTrapConfMap[key].trap_group = trap_group; m_coppTrapConfMap[key].trap_ids = trap_ids; + m_coppTrapConfMap[key].is_always_enabled = is_always_enabled; setCoppTrapStateOk(key); } else if (op == DEL_COMMAND) @@ -546,8 +701,9 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) setCoppGroupStateOk(m_coppTrapConfMap[key].trap_group); } } - if (conf_present) + if (conf_present && !m_coppTrapConfMap[key].trap_group.empty() && !m_coppTrapConfMap[key].trap_ids.empty()) { + m_coppTrapConfMap.erase(key); } delCoppTrapStateOk(key); @@ -559,6 +715,7 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) if (m_coppTrapInitCfg.find(key) != m_coppTrapInitCfg.end()) { auto fvs = m_coppTrapInitCfg[key]; + is_always_enabled.clear(); for (auto i: fvs) { if (fvField(i) == COPP_TRAP_GROUP_FIELD) @@ -569,21 +726,24 @@ void CoppMgr::doCoppTrapTask(Consumer &consumer) { trap_ids = fvValue(i); } + else if (fvField(i) == COPP_ALWAYS_ENABLED_FIELD) + { + is_always_enabled = fvValue(i); + } } - vector g_fvs; - string trap_group_trap_ids; - addTrapIdsToTrapGroup(trap_group, trap_ids); - getTrapGroupTrapIds(trap_group, trap_group_trap_ids); - FieldValueTuple fv1(COPP_TRAP_ID_LIST_FIELD, trap_group_trap_ids); - g_fvs.push_back(fv1); - if (!checkTrapGroupPending(trap_group)) + if (is_always_enabled.empty()) { - m_appCoppTable.set(trap_group, g_fvs); - setCoppGroupStateOk(trap_group); + is_always_enabled = "false"; } + m_coppTrapConfMap[key].trap_group = trap_group; m_coppTrapConfMap[key].trap_ids = trap_ids; - setCoppTrapStateOk(key); + m_coppTrapConfMap[key].is_always_enabled = is_always_enabled; + if (is_always_enabled == "true" || isFeatureEnabled(key)) + { + addTrap(trap_ids, trap_group); + setCoppTrapStateOk(key); + } } } it = consumer.m_toSync.erase(it); @@ -706,6 +866,7 @@ void CoppMgr::doCoppGroupTask(Consumer &consumer) } } + void CoppMgr::doFeatureTask(Consumer &consumer) { auto it = consumer.m_toSync.begin(); @@ -715,17 +876,20 @@ void CoppMgr::doFeatureTask(Consumer &consumer) string key = kfvKey(t); string op = kfvOp(t); - vector fvs; string trap_ids; if (op == SET_COMMAND) { + if (m_featuresCfgTable.find(key) == m_featuresCfgTable.end()) + { + m_featuresCfgTable.emplace(key, kfvFieldsValues(t)); + } for (auto i : kfvFieldsValues(t)) { if (fvField(i) == "state") { bool status = false; - if (fvValue(i) == "enabled") + if (fvValue(i) == "enabled" || fvValue(i) == "always_enabled") { status = true; } diff --git a/cfgmgr/coppmgr.h b/cfgmgr/coppmgr.h index b010489f2e..1d53756fce 100644 --- a/cfgmgr/coppmgr.h +++ b/cfgmgr/coppmgr.h @@ -14,6 +14,7 @@ namespace swss { /* COPP Trap Table Fields */ #define COPP_TRAP_ID_LIST_FIELD "trap_ids" #define COPP_TRAP_GROUP_FIELD "trap_group" +#define COPP_ALWAYS_ENABLED_FIELD "always_enabled" /* COPP Group Table Fields */ #define COPP_GROUP_QUEUE_FIELD "queue" @@ -42,6 +43,7 @@ struct CoppTrapConf { std::string trap_ids; std::string trap_group; + std::string is_always_enabled; }; /* TrapName to TrapConf map */ @@ -70,10 +72,10 @@ class CoppMgr : public Orch CoppTrapConfMap m_coppTrapConfMap; CoppTrapIdTrapGroupMap m_coppTrapIdTrapGroupMap; CoppGroupFvs m_coppGroupFvs; - std::set m_coppDisabledTraps; CoppCfg m_coppGroupInitCfg; CoppCfg m_coppTrapInitCfg; - + CoppCfg m_featuresCfgTable; + void doTask(Consumer &consumer); void doCoppGroupTask(Consumer &consumer); @@ -96,8 +98,13 @@ class CoppMgr : public Orch std::vector &modified_fvs); void parseInitFile(void); bool isTrapGroupInstalled(std::string key); + bool isFeatureEnabled(std::string feature); void mergeConfig(CoppCfg &init_cfg, CoppCfg &m_cfg, std::vector &cfg_keys, Table &cfgTable); + void removeTrap(std::string key); + void addTrap(std::string trap_ids, std::string trap_group); + + }; } diff --git a/cfgmgr/intfmgr.cpp b/cfgmgr/intfmgr.cpp index fbea988203..1e00243b87 100644 --- a/cfgmgr/intfmgr.cpp +++ b/cfgmgr/intfmgr.cpp @@ -71,6 +71,13 @@ IntfMgr::IntfMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c setWarmReplayDoneState(); } } + + string swtype; + Table cfgDeviceMetaDataTable(cfgDb, CFG_DEVICE_METADATA_TABLE_NAME); + if(cfgDeviceMetaDataTable.hget("localhost", "switch_type", swtype)) + { + mySwitchType = swtype; + } } void IntfMgr::setIntfIp(const string &alias, const string &opCmd, @@ -90,9 +97,23 @@ void IntfMgr::setIntfIp(const string &alias, const string &opCmd, } else { + string metric = ""; + // Kernel adds connected route with default metric of 256. But the metric is not + // communicated to frr unless the ip address is added with explicit metric + // In voq system, We need the static route to the remote neighbor and connected + // route to have the same metric to enable BGP to choose paths from routes learned + // via eBGP and iBGP over the internal inband port be part of same ecmp group. + // For v4 both the metrics (connected and static) are default 0 so we do not need + // to set the metric explicitly. + if(mySwitchType == "voq") + { + metric = " metric 256"; + } + (prefixLen < 127) ? - (cmd << IP_CMD << " -6 address " << shellquote(opCmd) << " " << shellquote(ipPrefixStr) << " broadcast " << shellquote(broadcastIpStr) << " dev " << shellquote(alias)) : - (cmd << IP_CMD << " -6 address " << shellquote(opCmd) << " " << shellquote(ipPrefixStr) << " dev " << shellquote(alias)); + (cmd << IP_CMD << " -6 address " << shellquote(opCmd) << " " << shellquote(ipPrefixStr) << " broadcast " << shellquote(broadcastIpStr) << + " dev " << shellquote(alias) << metric) : + (cmd << IP_CMD << " -6 address " << shellquote(opCmd) << " " << shellquote(ipPrefixStr) << " dev " << shellquote(alias) << metric); } int ret = swss::exec(cmd.str(), res); diff --git a/cfgmgr/intfmgr.h b/cfgmgr/intfmgr.h index 307191454c..411b058cc2 100644 --- a/cfgmgr/intfmgr.h +++ b/cfgmgr/intfmgr.h @@ -37,6 +37,7 @@ class IntfMgr : public Orch std::set m_loopbackIntfList; std::set m_pendingReplayIntfList; std::set m_ipv6LinkLocalModeList; + std::string mySwitchType; void setIntfIp(const std::string &alias, const std::string &opCmd, const IpPrefix &ipPrefix); void setIntfVrf(const std::string &alias, const std::string &vrfName); diff --git a/cfgmgr/nbrmgr.cpp b/cfgmgr/nbrmgr.cpp index 39d8edf9b0..d6d5f410e1 100644 --- a/cfgmgr/nbrmgr.cpp +++ b/cfgmgr/nbrmgr.cpp @@ -509,7 +509,12 @@ bool NbrMgr::addKernelRoute(string odev, IpAddress ip_addr) } else { - cmd = string("") + IP_CMD + " -6 route add " + ip_str + "/128 dev " + odev; + // In voq system, We need the static route to the remote neighbor and connected + // route to have the same metric to enable BGP to choose paths from routes learned + // via eBGP and iBGP over the internal inband port be part of same ecmp group. + // For v4 both the metrics (connected and static) are default 0 so we do not need + // to set the metric explicitly. + cmd = string("") + IP_CMD + " -6 route add " + ip_str + "/128 dev " + odev + " metric 256"; SWSS_LOG_NOTICE("IPv6 Route Add cmd: %s",cmd.c_str()); } diff --git a/cfgmgr/tunnelmgr.cpp b/cfgmgr/tunnelmgr.cpp index 7f4dc4dd3d..a81438470f 100644 --- a/cfgmgr/tunnelmgr.cpp +++ b/cfgmgr/tunnelmgr.cpp @@ -9,6 +9,7 @@ #include "tokenize.h" #include "shellcmd.h" #include "exec.h" +#include "warm_restart.h" using namespace std; using namespace swss; @@ -107,7 +108,8 @@ static int cmdIpTunnelRouteDel(const std::string& pfx, std::string & res) TunnelMgr::TunnelMgr(DBConnector *cfgDb, DBConnector *appDb, const std::vector &tableNames) : Orch(cfgDb, tableNames), m_appIpInIpTunnelTable(appDb, APP_TUNNEL_DECAP_TABLE_NAME), - m_cfgPeerTable(cfgDb, CFG_PEER_SWITCH_TABLE_NAME) + m_cfgPeerTable(cfgDb, CFG_PEER_SWITCH_TABLE_NAME), + m_cfgTunnelTable(cfgDb, CFG_TUNNEL_TABLE_NAME) { std::vector peer_keys; m_cfgPeerTable.getKeys(peer_keys); @@ -126,6 +128,23 @@ TunnelMgr::TunnelMgr(DBConnector *cfgDb, DBConnector *appDb, const std::vector tunnel_keys; + m_cfgTunnelTable.getKeys(tunnel_keys); + + for (auto tunnel: tunnel_keys) + { + m_tunnelReplay.insert(tunnel); + } + if (m_tunnelReplay.empty()) + { + finalizeWarmReboot(); + } + + } + auto consumerStateTable = new swss::ConsumerStateTable(appDb, APP_TUNNEL_ROUTE_TABLE_NAME, TableConsumable::DEFAULT_POP_BATCH_SIZE, default_orch_pri); @@ -191,6 +210,11 @@ void TunnelMgr::doTask(Consumer &consumer) ++it; } } + + if (!replayDone && m_tunnelReplay.empty() && WarmStart::isWarmStart()) + { + finalizeWarmReboot(); + } } bool TunnelMgr::doTunnelTask(const KeyOpFieldsValuesTuple & t) @@ -230,8 +254,16 @@ bool TunnelMgr::doTunnelTask(const KeyOpFieldsValuesTuple & t) SWSS_LOG_NOTICE("Peer/Remote IP not configured"); } - m_appIpInIpTunnelTable.set(tunnelName, kfvFieldsValues(t)); + /* If the tunnel is already in hardware (i.e. present in the replay), + * don't try to create it again since it will cause an OA crash + * (warmboot case) + */ + if (m_tunnelReplay.find(tunnelName) == m_tunnelReplay.end()) + { + m_appIpInIpTunnelTable.set(tunnelName, kfvFieldsValues(t)); + } } + m_tunnelReplay.erase(tunnelName); m_tunnelCache[tunnelName] = tunInfo; } else @@ -356,3 +388,13 @@ bool TunnelMgr::configIpTunnel(const TunnelInfo& tunInfo) return true; } + + +void TunnelMgr::finalizeWarmReboot() +{ + replayDone = true; + WarmStart::setWarmStartState("tunnelmgrd", WarmStart::REPLAYED); + SWSS_LOG_NOTICE("tunnelmgrd warmstart state set to REPLAYED"); + WarmStart::setWarmStartState("tunnelmgrd", WarmStart::RECONCILED); + SWSS_LOG_NOTICE("tunnelmgrd warmstart state set to RECONCILED"); +} diff --git a/cfgmgr/tunnelmgr.h b/cfgmgr/tunnelmgr.h index e2b601abe9..53d2f27278 100644 --- a/cfgmgr/tunnelmgr.h +++ b/cfgmgr/tunnelmgr.h @@ -4,6 +4,8 @@ #include "producerstatetable.h" #include "orch.h" +#include + namespace swss { struct TunnelInfo @@ -28,12 +30,18 @@ class TunnelMgr : public Orch bool configIpTunnel(const TunnelInfo& info); + void finalizeWarmReboot(); + ProducerStateTable m_appIpInIpTunnelTable; Table m_cfgPeerTable; + Table m_cfgTunnelTable; std::map m_tunnelCache; std::map m_intfCache; std::string m_peerIp; + + std::set m_tunnelReplay; + bool replayDone = false; }; } diff --git a/cfgmgr/tunnelmgrd.cpp b/cfgmgr/tunnelmgrd.cpp index 0165eb94b5..0a6a84eaeb 100644 --- a/cfgmgr/tunnelmgrd.cpp +++ b/cfgmgr/tunnelmgrd.cpp @@ -11,6 +11,7 @@ #include "exec.h" #include "schema.h" #include "tunnelmgr.h" +#include "warm_restart.h" using namespace std; using namespace swss; @@ -54,6 +55,9 @@ int main(int argc, char **argv) DBConnector cfgDb("CONFIG_DB", 0); DBConnector appDb("APPL_DB", 0); + WarmStart::initialize("tunnelmgrd", "swss"); + WarmStart::checkWarmStart("tunnelmgrd", "swss"); + TunnelMgr tunnelmgr(&cfgDb, &appDb, cfgTunTables); std::vector cfgOrchList = {&tunnelmgr}; diff --git a/cfgmgr/vxlanmgr.cpp b/cfgmgr/vxlanmgr.cpp index e45c593803..a054968483 100644 --- a/cfgmgr/vxlanmgr.cpp +++ b/cfgmgr/vxlanmgr.cpp @@ -1031,29 +1031,59 @@ int VxlanMgr::deleteVxlanNetdevice(std::string vxlan_dev_name) return swss::exec(cmd, res); } +std::vector VxlanMgr::parseNetDev(const string& stdout){ + std::vector netdevs; + std::regex device_name_pattern("^\\d+:\\s+([^:]+)"); + std::smatch match_result; + auto lines = tokenize(stdout, '\n'); + for (const std::string & line : lines) + { + SWSS_LOG_DEBUG("line : %s\n",line.c_str()); + if (!std::regex_search(line, match_result, device_name_pattern)) + { + continue; + } + std::string dev_name = match_result[1]; + netdevs.push_back(dev_name); + } + return netdevs; +} + void VxlanMgr::getAllVxlanNetDevices() { std::string stdout; - const std::string cmd = std::string("") + IP_CMD + " link show type vxlan"; + + // Get VxLan Netdev Interfaces + std::string cmd = std::string("") + IP_CMD + " link show type vxlan"; int ret = swss::exec(cmd, stdout); if (ret != 0) { - SWSS_LOG_ERROR("Cannot get devices by command : %s", cmd.c_str()); - return; + SWSS_LOG_ERROR("Cannot get vxlan devices by command : %s", cmd.c_str()); + stdout.clear(); } - std::regex device_name_pattern("^\\d+:\\s+([^:]+)"); - std::smatch match_result; - auto lines = tokenize(stdout, '\n'); - for (const std::string & line : lines) + std::vector netdevs = parseNetDev(stdout); + for (auto netdev : netdevs) { - SWSS_LOG_INFO("line : %s\n",line.c_str()); - if (!std::regex_search(line, match_result, device_name_pattern)) + m_vxlanNetDevices[netdev] = VXLAN; + } + + // Get VxLanIf Netdev Interfaces + cmd = std::string("") + IP_CMD + " link show type bridge"; + ret = swss::exec(cmd, stdout); + if (ret != 0) + { + SWSS_LOG_ERROR("Cannot get vxlanIf devices by command : %s", cmd.c_str()); + stdout.clear(); + } + netdevs = parseNetDev(stdout); + for (auto netdev : netdevs) + { + if (netdev.find(VXLAN_IF_NAME_PREFIX) == 0) { - continue; + m_vxlanNetDevices[netdev] = VXLAN_IF; } - std::string vxlan_dev_name = match_result[1]; - m_vxlanNetDevices[vxlan_dev_name] = vxlan_dev_name; } + return; } @@ -1150,8 +1180,21 @@ void VxlanMgr::clearAllVxlanDevices() { for (auto it = m_vxlanNetDevices.begin(); it != m_vxlanNetDevices.end();) { - SWSS_LOG_INFO("Deleting Stale NetDevice vxlandevname %s\n", (it->first).c_str()); - deleteVxlanNetdevice(it->first); + std::string netdev_name = it->first; + std::string netdev_type = it->second; + SWSS_LOG_INFO("Deleting Stale NetDevice %s, type: %s\n", netdev_name.c_str(), netdev_type.c_str()); + VxlanInfo info; + std::string res; + if (netdev_type.compare(VXLAN)) + { + info.m_vxlan = netdev_name; + cmdDeleteVxlan(info, res); + } + else if(netdev_type.compare(VXLAN_IF)) + { + info.m_vxlanIf = netdev_name; + cmdDeleteVxlanIf(info, res); + } it = m_vxlanNetDevices.erase(it); } } diff --git a/cfgmgr/vxlanmgr.h b/cfgmgr/vxlanmgr.h index 1988e253ae..b8bb61c498 100644 --- a/cfgmgr/vxlanmgr.h +++ b/cfgmgr/vxlanmgr.h @@ -6,6 +6,7 @@ #include "orch.h" #include +#include #include #include #include @@ -70,6 +71,7 @@ class VxlanMgr : public Orch int createVxlanNetdevice(std::string vxlanTunnelName, std::string vni_id, std::string src_ip, std::string dst_ip, std::string vlan_id); int deleteVxlanNetdevice(std::string vxlan_dev_name); + std::vector parseNetDev(const std::string& stdout); void getAllVxlanNetDevices(); /* diff --git a/debian/rules b/debian/rules index a8a8b835fb..a594bb54d4 100755 --- a/debian/rules +++ b/debian/rules @@ -29,7 +29,7 @@ include /usr/share/dpkg/default.mk ifeq ($(ENABLE_GCOV), y) override_dh_auto_configure: - dh_auto_configure -- --enable-gcov + dh_auto_configure -- --enable-gcov CFLAGS="-g -O0" CXXFLAGS="-g -O0" endif override_dh_auto_install: diff --git a/doc/Configuration.md b/doc/Configuration.md deleted file mode 100644 index 40865366f6..0000000000 --- a/doc/Configuration.md +++ /dev/null @@ -1,1433 +0,0 @@ -# SONiC Configuration Database Manual - -Table of Contents -================= - - * [Introduction](#introduction) - * [Configuration](#configuration) - * [Config Load and Save](#config-load-and-save) - * [Incremental Configuration](#incremental-configuration) - * [Redis and Json Schema](#redis-and-json-schema) - * [ACL and Mirroring](#acl-and-mirroring) - * [BGP Sessions](#bgp-sessions) - * [BUFFER_PG](#buffer_pg) - * [Buffer pool](#buffer-pool) - * [Buffer profile](#buffer-profile) - * [Buffer queue](#buffer-queue) - * [Cable length](#cable-length) - * [COPP_TABLE](#copp_table) - * [CRM](#crm) - * [Data Plane L3 Interfaces](#data-plane-l3-interfaces) - * [DEFAULT_LOSSLESS_BUFFER_PARAMETER](#DEFAULT_LOSSLESS_BUFFER_PARAMETER) - * [Device Metadata](#device-metadata) - * [Device neighbor metada](#device-neighbor-metada) - * [DSCP_TO_TC_MAP](#dscp_to_tc_map) - * [FLEX_COUNTER_TABLE](#flex_counter_table) - * [L2 Neighbors](#l2-neighbors) - * [Loopback Interface](#loopback-interface) - * [LOSSLESS_TRAFFIC_PATTERN](#LOSSLESS_TRAFFIC_PATTERN) - * [Management Interface](#management-interface) - * [Management port](#management-port) - * [Management VRF](#management-vrf) - * [MAP_PFC_PRIORITY_TO_QUEUE](#map_pfc_priority_to_queue) - * [NTP Global Configuration](#ntp-global-configuration) - * [NTP and SYSLOG servers](#ntp-and-syslog-servers) - * [Port](#port) - * [Port Channel](#port-channel) - * [Portchannel member](#portchannel-member) - * [Scheduler](#scheduler) - * [Port QoS Map](#port-qos-map) - * [Queue](#queue) - * [Tacplus Server](#tacplus-server) - * [TC to Priority group map](#tc-to-priority-group-map) - * [TC to Queue map](#tc-to-queue-map) - * [Versions](#versions) - * [VLAN](#vlan) - * [VLAN_MEMBER](#vlan_member) - * [Virtual router](#virtual-router) - * [WRED_PROFILE](#wred_profile) - * [For Developers](#for-developers) - * [Generating Application Config by Jinja2 Template](#generating-application-config-by-jinja2-template) - * [Incremental Configuration by Subscribing to ConfigDB](#incremental-configuration-by-subscribing-to-configdb) - - - -# Introduction -This document lists the configuration commands schema applied in the SONiC eco system. All these commands find relevance in collecting system information, analysis and even for trouble shooting. All the commands are categorized under relevant topics with corresponding examples. - -# Configuration - -SONiC is managing configuration in a single source of truth - a redisDB -instance that we refer as ConfigDB. Applications subscribe to ConfigDB -and generate their running configuration correspondingly. - -(Before Sep 2017, we were using an XML file named minigraph.xml to -configure SONiC devices. For historical documentation, please refer to -[Configuration with -Minigraph](https://github.com/Azure/SONiC/wiki/Configuration-with-Minigraph-(~Sep-2017))) - -# **Config Load and Save** - -In current version of SONiC, ConfigDB is implemented as database 4 of -local redis. When system boots, configurations will be loaded from -/etc/sonic/config_db.json file into redis. Please note that ConfigDB -content won't be written back into /etc/sonic/config_db.json file -automatically. In order to do that, a config save command need to be -manually executed from CLI. Similarly, config load will trigger a force -load of json file into DB. Generally, content in -/etc/sonic/config_db.json can be considered as starting config, and -content in redisDB running config. - -We keep a way to load configuration from minigraph and write into -ConfigDB for backward compatibility. To do that, run `config -load_minigraph`. - -### Incremental Configuration - -The design of ConfigDB supports incremental configuration - application -could subscribe to changes in ConfigDB and response correspondingly. -However, this feature is not implemented by all applications yet. By Sep -2017 now, the only application that supports incremental configuration -is BGP (docker-fpm-quagga). For other applications, a manual restart is -required after configuration changes in ConfigDB. - -# **Redis and Json Schema** - -ConfigDB uses a table-object schema that is similar with -[AppDB](https://github.com/Azure/sonic-swss/blob/4c56d23b9ff4940bdf576cf7c9e5aa77adcbbdcc/doc/swss-schema.md), -and `config_db.json` is a straight-forward serialization of DB. As an -example, the following fragments could be BGP-related configuration in -redis and json, correspondingly: - - -***Redis format*** -``` -127.0.0.1:6379[4]> keys BGP_NEIGHBOR:* - -1) "BGP_NEIGHBOR:10.0.0.31" -2) "BGP_NEIGHBOR:10.0.0.39" -3) "BGP_NEIGHBOR:10.0.0.11" -4) "BGP_NEIGHBOR:10.0.0.7" - -... - -127.0.0.1:6379[4]> hgetall BGP_NEIGHBOR:10.0.0.3 - -1) "admin_status" -2) "up" -3) "peer_addr" -4) "10.0.0.2" -5) "asn" -6) "65200" -7) "name" -8) "ARISTA07T2" -``` - -***Json format*** -``` -"BGP_NEIGHBOR": { - "10.0.0.57": { - "rrclient": "0", - "name": "ARISTA01T1", - "local_addr": "10.0.0.56", - "nhopself": "0", - "holdtime": "10", - "asn": "64600", - "keepalive": "3" - }, - "10.0.0.59": { - "rrclient": "0", - "name": "ARISTA02T1", - "local_addr": "10.0.0.58", - "nhopself": "0", - "holdtime": "10", - "asn": "64600", - "keepalive": "3" - }, -} -``` - -Full sample config_db.json files are availables at -[here](https://github.com/Azure/SONiC/blob/gh-pages/doc/config_db.json) -and -[here](https://github.com/Azure/SONiC/blob/gh-pages/doc/config_db_t0.json). - - -### ACL and Mirroring - -ACL and mirroring related configuration are defined in -**MIRROR_SESSION**, **ACL_TABLE** and **ACL_RULE** tables. Those -tables are in progress of migrating from APPDB. Please refer to their -schema in APPDB -[here](https://github.com/Azure/sonic-swss/blob/4c56d23b9ff4940bdf576cf7c9e5aa77adcbbdcc/doc/swss-schema.md) -and migration plan -[here](https://github.com/Azure/SONiC/wiki/ACL-Configuration-Requirement-Description). - -``` -{ -"MIRROR_SESSION": { - "everflow0": { - "src_ip": "10.1.0.32", - "dst_ip": "2.2.2.2" - } - }, - -"ACL_TABLE": { - "DATAACL": { - "policy_desc" : "data_acl", - "type": "l3", - "ports": [ - "Ethernet0", - "Ethernet4", - "Ethernet8", - "Ethernet12" - ] - } - } -} -``` - -***Below ACL table added as per the mail*** -``` -{ -"ACL_TABLE": { - "aaa": { - "type": "L3", - "ports": "Ethernet0" - } - }, -"ACL_RULE": { - "aaa|rule_0": { - "PRIORITY": "55", - "PACKET_ACTION": "DROP", - "L4_SRC_PORT": "0" - }, - "aaa|rule_1": { - "PRIORITY": "55", - "PACKET_ACTION": "DROP", - "L4_SRC_PORT": "1" - } - } -} -``` - -***Below ACL table added by comparig minigraph.xml & config_db.json*** - -``` -{ -"ACL_TABLE": { - "EVERFLOW": { - "type": "MIRROR", - "policy_desc": "EVERFLOW", - "ports": [ - "PortChannel0001", - "PortChannel0002", - "PortChannel0003", - "PortChannel0004" - ] - }, - "EVERFLOWV6": { - "type": "MIRRORV6", - "policy_desc": "EVERFLOWV6", - "ports": [ - "PortChannel0001", - "PortChannel0002", - "PortChannel0003", - "PortChannel0004" - ] - }, - "SNMP_ACL": { - "services": [ - "SNMP" - ], - "type": "CTRLPLANE", - "policy_desc": "SNMP_ACL" - }, - "SSH_ONLY": { - "services": [ - "SSH" - ], - "type": "CTRLPLANE", - "policy_desc": "SSH_ONLY" - } - }, - -"ACL_RULE": { - "SNMP_ACL|DEFAULT_RULE": { - "PRIORITY": "1", - "PACKET_ACTION": "DROP", - "ETHER_TYPE": "2048" - }, - "SNMP_ACL|RULE_1": { - "PRIORITY": "9999", - "PACKET_ACTION": "ACCEPT", - "SRC_IP": "1.1.1.1/32", - "IP_PROTOCOL": "17" - }, - "SNMP_ACL|RULE_2": { - "PRIORITY": "9998", - "PACKET_ACTION": "ACCEPT", - "SRC_IP": "2.2.2.2/32", - "IP_PROTOCOL": "17" - }, - "SSH_ONLY|DEFAULT_RULE": { - "PRIORITY": "1", - "PACKET_ACTION": "DROP", - "ETHER_TYPE": "2048" - }, - "SSH_ONLY|RULE_1": { - "PRIORITY": "9999", - "PACKET_ACTION": "ACCEPT", - "SRC_IP": "4.4.4.4/8", - "IP_PROTOCOL": "6" - } - } -} - -``` - -***ACL table type configuration example*** -``` -{ - "ACL_TABLE_TYPE": { - "CUSTOM_L3": { - "MATCHES": [ - "IN_PORTS", - "OUT_PORTS", - "SRC_IP" - ], - "ACTIONS": [ - "PACKET_ACTION", - "MIRROR_INGRESS_ACTION" - ], - "BIND_POINTS": [ - "PORT", - "LAG" - ] - } - }, - "ACL_TABLE": { - "DATAACL": { - "STAGE": "INGRESS", - "TYPE": "CUSTOM_L3", - "PORTS": [ - "Ethernet0", - "PortChannel1" - ] - } - }, - "ACL_RULE": { - "DATAACL|RULE0": { - "PRIORITY": "999", - "PACKET_ACTION": "DROP", - "SRC_IP": "1.1.1.1/32", - } - } -} -``` - -### BGP Sessions - -BGP session configuration is defined in **BGP_NEIGHBOR** table. BGP -neighbor address is used as key of bgp neighbor objects. Object -attributes include remote AS number, neighbor router name, and local -peering address. Dynamic neighbor is also supported by defining peer -group name and IP ranges in **BGP_PEER_RANGE** table. - -``` -{ -"BGP_NEIGHBOR": { - "10.0.0.61": { - "local_addr": "10.0.0.60", - "asn": 64015, - "name": "ARISTA15T0" - }, - "10.0.0.49": { - "local_addr": "10.0.0.48", - "asn": 64009, - "name": "ARISTA09T0" - }, - - "10.0.0.63": { - "rrclient": "0", - "name": "ARISTA04T1", - "local_addr": "10.0.0.62", - "nhopself": "0", - "holdtime": "10", - "asn": "64600", - "keepalive": "3" - } - -"BGP_PEER_RANGE": { - "BGPSLBPassive": { - "name": "BGPSLBPassive", - "ip_range": [ - "10.250.0.0/27" - ] - }, - "BGPVac": { - "name": "BGPVac", - "ip_range": [ - "10.2.0.0/16" - ] - } - } -} -``` - -### BUFFER_PG - -When the system is running in traditional buffer model, profiles needs to explicitly configured: - -``` -{ -"BUFFER_PG": { - "Ethernet0|3-4": { - "profile": "pg_lossless_40000_5m_profile" - }, - "Ethernet1|3-4": { - "profile": "pg_lossless_40000_5m_profile" - }, - "Ethernet2|3-4": { - "profile": "pg_lossless_40000_5m_profile" - } - } -} - -``` - -When the system is running in dynamic buffer model, profiles can be: - - - either calculated dynamically according to ports' configuration and just configured as "NULL"; - - or configured explicitly. - -``` -{ -"BUFFER_PG": { - "Ethernet0|3-4": { - "profile": "NULL" - }, - "Ethernet1|3-4": { - "profile": "NULL" - }, - "Ethernet2|3-4": { - "profile": "static_profile" - } - } -} - -``` - -### Buffer pool - -When the system is running in traditional buffer model, the size of all of the buffer pools and xoff of ingress_lossless_pool need to be configured explicitly. - -``` -{ -"BUFFER_POOL": { - "egress_lossless_pool": { - "type": "egress", - "mode": "static", - "size": "15982720" - }, - "egress_lossy_pool": { - "type": "egress", - "mode": "dynamic", - "size": "9243812" - }, - "ingress_lossless_pool": { - "xoff": "4194112", - "type": "ingress", - "mode": "dynamic", - "size": "10875072" - } - } -} - -``` - -When the system is running in dynamic buffer model, the size of some of the buffer pools can be omitted and will be dynamically calculated. - -``` -{ -"BUFFER_POOL": { - "egress_lossless_pool": { - "type": "egress", - "mode": "static", - "size": "15982720" - }, - "egress_lossy_pool": { - "type": "egress", - "mode": "dynamic", - }, - "ingress_lossless_pool": { - "type": "ingress", - "mode": "dynamic", - } - } -} - -``` - - -### Buffer profile - -``` -{ -"BUFFER_PROFILE": { - "egress_lossless_profile": { - "static_th": "3995680", - "pool": "egress_lossless_pool", - "size": "1518" - }, - "egress_lossy_profile": { - "dynamic_th": "3", - "pool": "egress_lossy_pool", - "size": "1518" - }, - "ingress_lossy_profile": { - "dynamic_th": "3", - "pool": "ingress_lossless_pool", - "size": "0" - }, - "pg_lossless_40000_5m_profile": { - "xon_offset": "2288", - "dynamic_th": "-3", - "xon": "2288", - "xoff": "66560", - "pool": "ingress_lossless_pool", - "size": "1248" - }, - "pg_lossless_40000_40m_profile": { - "xon_offset": "2288", - "dynamic_th": "-3", - "xon": "2288", - "xoff": "71552", - "pool": "ingress_lossless_pool", - "size": "1248" - } - } -} - -``` - -When the system is running in dynamic buffer model and the headroom_type is dynamic, only dynamic_th needs to be configured and rest of fields can be omitted. -This kind of profiles will be handled by buffer manager and won't be applied to SAI. - -``` -{ - { - "non_default_dynamic_th_profile": { - "dynamic_th": 1, - "headroom_type": "dynamic" - } - } -} -``` - -### Buffer queue - -``` -{ -"BUFFER_QUEUE": { - "Ethernet50,Ethernet52,Ethernet54,Ethernet56|0-2": { - "profile": "egress_lossy_profile" - }, - "Ethernet50,Ethernet52,Ethernet54,Ethernet56|3-4": { - "profile": "egress_lossless_profile" - }, - "Ethernet50,Ethernet52,Ethernet54,Ethernet56|5-6": { - "profile": "egress_lossy_profile" - } - } -} - -``` - - -### Cable length - -``` -{ -"CABLE_LENGTH": { - "AZURE": { - "Ethernet8": "5m", - "Ethernet9": "5m", - "Ethernet2": "5m", - "Ethernet58": "5m", - "Ethernet59": "5m", - "Ethernet50": "40m", - "Ethernet51": "5m", - "Ethernet52": "40m", - "Ethernet53": "5m", - "Ethernet54": "40m", - "Ethernet55": "5m", - "Ethernet56": "40m" - } - } -} - -``` - -### COPP_TABLE - -``` -{ -"COPP_TABLE": { - "default": { - "cbs": "600", - "cir": "600", - "meter_type": "packets", - "mode": "sr_tcm", - "queue": "0", - "red_action": "drop" - }, - - "trap.group.arp": { - "cbs": "600", - "cir": "600", - "meter_type": "packets", - "mode": "sr_tcm", - "queue": "4", - "red_action": "drop", - "trap_action": "trap", - "trap_ids": "arp_req,arp_resp,neigh_discovery", - "trap_priority": "4" - }, - - "trap.group.lldp.dhcp.udld": { - "queue": "4", - "trap_action": "trap", - "trap_ids": "lldp,dhcp,udld", - "trap_priority": "4" - }, - - "trap.group.bgp.lacp": { - "queue": "4", - "trap_action": "trap", - "trap_ids": "bgp,bgpv6,lacp", - "trap_priority": "4" - }, - - "trap.group.ip2me": { - "cbs": "600", - "cir": "600", - "meter_type": "packets", - "mode": "sr_tcm", - "queue": "1", - "red_action": "drop", - "trap_action": "trap", - "trap_ids": "ip2me", - "trap_priority": "1" - } - } -} -``` - -### CRM - -``` -{ -"CRM": { - "Config": { - "acl_table_threshold_type": "percentage", - "nexthop_group_threshold_type": "percentage", - "fdb_entry_high_threshold": "85", - "acl_entry_threshold_type": "percentage", - "ipv6_neighbor_low_threshold": "70", - "nexthop_group_member_low_threshold": "70", - "acl_group_high_threshold": "85", - "ipv4_route_high_threshold": "85", - "acl_counter_high_threshold": "85", - "ipv4_route_low_threshold": "70", - "ipv4_route_threshold_type": "percentage", - "ipv4_neighbor_low_threshold": "70", - "acl_group_threshold_type": "percentage", - "ipv4_nexthop_high_threshold": "85", - "ipv6_route_threshold_type": "percentage", - "snat_entry_threshold_type": "percentage", - "snat_entry_high_threshold": "85", - "snat_entry_low_threshold": "70", - "dnat_entry_threshold_type": "percentage", - "dnat_entry_high_threshold": "85", - "dnat_entry_low_threshold": "70", - "ipmc_entry_threshold_type": "percentage", - "ipmc_entry_high_threshold": "85", - "ipmc_entry_low_threshold": "70" - } - } -} - -``` - -### Data Plane L3 Interfaces - -IP configuration for data plane are defined in **INTERFACE**, -**PORTCHANNEL_INTERFACE**, and **VLAN_INTERFACE** table. The objects -in all three tables have the interface (could be physical port, port -channel, or vlan) that IP address is attached to as first-level key, and -IP prefix as second-level key. IP interface objects don't have any -attributes. - -``` -{ -"INTERFACE": { - "Ethernet0|10.0.0.0/31": {}, - "Ethernet4|10.0.0.2/31": {}, - "Ethernet8|10.0.0.4/31": {} - ... - }, - -"PORTCHANNEL_INTERFACE": { - "PortChannel01|10.0.0.56/31": {}, - "PortChannel01|FC00::71/126": {}, - "PortChannel02|10.0.0.58/31": {}, - "PortChannel02|FC00::75/126": {} - ... - }, -"VLAN_INTERFACE": { - "Vlan1000|192.168.0.1/27": {} - } -} - -``` - - -### DEFAULT_LOSSLESS_BUFFER_PARAMETER - -This table stores the default lossless buffer parameters for dynamic buffer calculation. - -``` -{ - "DEFAULT_LOSSLESS_BUFFER_PARAMETER": { - "AZURE": { - "default_dynamic_th": "0", - "over_subscribe_ratio": "2" - } - } -} -``` - -### Device Metadata - -The **DEVICE_METADATA** table contains only one object named -*localhost*. In this table the device metadata such as hostname, hwsku, -deployment envionment id and deployment type are specified. BGP local AS -number is also specified in this table as current only single BGP -instance is supported in SONiC. - -``` -{ -"DEVICE_METADATA": { - "localhost": { - "hwsku": "Force10-S6100", - "default_bgp_status": "up", - "docker_routing_config_mode": "unified", - "hostname": "sonic-s6100-01", - "platform": "x86_64-dell_s6100_c2538-r0", - "mac": "4c:76:25:f4:70:82", - "default_pfcwd_status": "disable", - "bgp_asn": "65100", - "deployment_id": "1", - "type": "ToRRouter", - "buffer_model": "traditional" - } - } -} - -``` - - -### Device neighbor metada - -``` -{ -"DEVICE_NEIGHBOR_METADATA": { - "ARISTA01T1": { - "lo_addr": "None", - "mgmt_addr": "10.11.150.45", - "hwsku": "Arista-VM", - "type": "LeafRouter" - }, - "ARISTA02T1": { - "lo_addr": "None", - "mgmt_addr": "10.11.150.46", - "hwsku": "Arista-VM", - "type": "LeafRouter" - } - } -} - -``` - - -### DSCP_TO_TC_MAP -``` -{ -"DSCP_TO_TC_MAP": { - "AZURE": { - "1": "1", - "0": "1", - "3": "3", - "2": "1", - "5": "2", - "4": "4", - "7": "1", - "6": "1", - "9": "1", - "8": "0" - } - } -} - -``` - - -### MPLS_TC_TO_TC_MAP -``` -{ -"MPLS_TC_TO_TC_MAP": { - "AZURE": { - "0": "0", - "1": "1", - "2": "1", - "3": "2", - "4": "2", - "5": "3", - "6": "3", - "7": "4" - } - } -} - -``` - -### FLEX_COUNTER_TABLE - -``` -{ -"FLEX_COUNTER_TABLE": { - "PFCWD": { - "FLEX_COUNTER_STATUS": "enable" - }, - "PORT": { - "FLEX_COUNTER_STATUS": "enable" - }, - "QUEUE": { - "FLEX_COUNTER_STATUS": "enable" - } - } -} - -``` - - -### L2 Neighbors - -The L2 neighbor and connection information can be configured in -**DEVICE_NEIGHBOR** table. Those information are used mainly for LLDP. -While mandatory fields include neighbor name acting as object key and -remote port / local port information in attributes, optional information -about neighbor device such as device type, hwsku, management address and -loopback address can also be defined. - -``` -{ -"DEVICE_NEIGHBOR": { - "ARISTA04T1": { - "mgmt_addr": "10.20.0.163", - "hwsku": "Arista", - "lo_addr": null, - "local_port": "Ethernet124", - "type": "LeafRouter", - "port": "Ethernet1" - }, - "ARISTA03T1": { - "mgmt_addr": "10.20.0.162", - "hwsku": "Arista", - "lo_addr": null, - "local_port": "Ethernet120", - "type": "LeafRouter", - "port": "Ethernet1" - }, - "ARISTA02T1": { - "mgmt_addr": "10.20.0.161", - "hwsku": "Arista", - "lo_addr": null, - "local_port": "Ethernet116", - "type": "LeafRouter", - "port": "Ethernet1" - }, - "ARISTA01T1": { - "mgmt_addr": "10.20.0.160", - "hwsku": "Arista", - "lo_addr": null, - "local_port": "Ethernet112", - "type": "LeafRouter", - "port": "Ethernet1" - } - } -} -``` - -### Loopback Interface - -Loopback interface configuration lies in **LOOPBACK_INTERFACE** table -and has similar schema with data plane interfaces. The loopback device -name and loopback IP prefix act as multi-level key for loopback -interface objects. - -``` -{ -"LOOPBACK_INTERFACE": { - "Loopback0|10.1.0.32/32": {}, - "Loopback0|FC00:1::32/128": {} - } -} - -``` - -### LOSSLESS_TRAFFIC_PATTERN - -The LOSSLESS_TRAFFIC_PATTERN table stores parameters related to -lossless traffic for dynamic buffer calculation - -``` -{ - "LOSSLESS_TRAFFIC_PATTERN": { - "AZURE": { - "mtu": "1024", - "small_packet_percentage": "100" - } - } -} -``` - -### Management Interface - -Management interfaces are defined in **MGMT_INTERFACE** table. Object -key is composed of management interface name and IP prefix. Attribute -***gwaddr*** specify the gateway address of the prefix. -***forced_mgmt_routes*** attribute can be used to specify addresses / -prefixes traffic to which are forced to go through management network -instead of data network. - -``` -{ -"MGMT_INTERFACE": { - "eth0|10.11.150.11/16": { - "gwaddr": "10.11.0.1" - }, - "eth0|FC00:2::32/64": { - "forced_mgmt_routes": [ - "10.0.0.100/31", - "10.250.0.8", - "10.255.0.0/28" - ], - "gwaddr": "fc00:2::1" - } - } -} - -``` - -### Management port - -``` -{ -"MGMT_PORT": { - "eth0": { - "alias": "eth0", - "admin_status": "up" - } - } -} - -``` - - -### Management VRF - -``` -{ -"MGMT_VRF_CONFIG": { - "vrf_global": { - "mgmtVrfEnabled": "true" - } - } -} -``` - -### MAP_PFC_PRIORITY_TO_QUEUE - -``` -{ -"MAP_PFC_PRIORITY_TO_QUEUE": { - "AZURE": { - "1": "1", - "0": "0", - "3": "3", - "2": "2", - "5": "5", - "4": "4", - "7": "7", - "6": "6" - } - } -} -``` -### NTP Global Configuration - -These configuration options are used to modify the way that -ntp binds to the ports on the switch and which port it uses to -make ntp update requests from. - -***NTP VRF*** - -If this option is set to `default` then ntp will run within the default vrf -**when the management vrf is enabled**. If the mgmt vrf is enabled and this value is -not set to default then ntp will run within the mgmt vrf. - -This option **has no effect** if the mgmt vrf is not enabled. - -``` -{ -"NTP": { - "global": { - "vrf": "default" - } - } -} -``` - - -***NTP Source Port*** - -This option sets the port which ntp will choose to send time update requests from by. - -NOTE: If a Loopback interface is defined on the switch ntp will choose this by default, so this setting -is **required** if the switch has a Loopback interface and the ntp peer does not have defined routes -for that address. - -``` -{ -"NTP": { - "global": { - "src_intf": "Ethernet1" - } - } -} -``` - -### NTP and SYSLOG servers - -These information are configured in individual tables. Domain name or IP -address of the server is used as object key. Currently there are no -attributes in those objects. - -***NTP server*** -``` -{ -"NTP_SERVER": { - "2.debian.pool.ntp.org": {}, - "1.debian.pool.ntp.org": {}, - "3.debian.pool.ntp.org": {}, - "0.debian.pool.ntp.org": {} - }, - -"NTP_SERVER": { - "23.92.29.245": {}, - "204.2.134.164": {} - } -} -``` - -***Syslogserver*** -``` -{ -"SYSLOG_SERVER": { - "10.0.0.5": {}, - "10.0.0.6": {}, - "10.11.150.5": {} - } -} -``` - -### Port - -In this table the physical port configurations are defined. Each object -will have port name as its key, and port name alias and port speed as -optional attributes. - -``` -{ -"PORT": { - "Ethernet0": { - "index": "0", - "lanes": "101,102", - "description": "fortyGigE1/1/1", - "mtu": "9100", - "alias": "fortyGigE1/1/1", - "speed": "40000" - }, - "Ethernet1": { - "index": "1", - "lanes": "103,104", - "description": "fortyGigE1/1/2", - "mtu": "9100", - "alias": "fortyGigE1/1/2", - "admin_status": "up", - "speed": "40000" - }, - "Ethernet63": { - "index": "63", - "lanes": "87,88", - "description": "fortyGigE1/4/16", - "mtu": "9100", - "alias": "fortyGigE1/4/16", - "speed": "40000" - } - } -} - -``` - -### Port Channel - -Port channels are defined in **PORTCHANNEL** table with port channel -name as object key and member list as attribute. - -``` -{ -"PORTCHANNEL": { - "PortChannel0003": { - "admin_status": "up", - "min_links": "1", - "members": [ - "Ethernet54" - ], - "mtu": "9100" - }, - "PortChannel0004": { - "admin_status": "up", - "min_links": "1", - "members": [ - "Ethernet56" - ], - "mtu": "9100" - } - } -} -``` - - -### Portchannel member - -``` -{ -"PORTCHANNEL_MEMBER": { - "PortChannel0001|Ethernet50": {}, - "PortChannel0002|Ethernet52": {}, - "PortChannel0003|Ethernet54": {}, - "PortChannel0004|Ethernet56": {} - } -} - -``` -### Scheduler - -``` -{ -"SCHEDULER": { - "scheduler.0": { - "type": "STRICT" - }, - "scheduler.1": { - "type": "WRR" - "weight": "1", - "meter_type": "bytes", - "pir": "1250000000", - "pbs": "8192" - }, - "scheduler.port": { - "meter_type": "bytes", - "pir": "1000000000", - "pbs": "8192" - } - } -} -``` - -### Port QoS Map - -``` -{ -"PORT_QOS_MAP": { - "Ethernet50,Ethernet52,Ethernet54,Ethernet56": { - "tc_to_pg_map": "AZURE", - "tc_to_queue_map": "AZURE", - "pfc_enable": "3,4", - "pfc_to_queue_map": "AZURE", - "dscp_to_tc_map": "AZURE", - "dscp_to_fc_map": "AZURE", - "exp_to_fc_map": "AZURE", - "scheduler": "scheduler.port" - } - } -} -``` - -### Queue -``` -{ -"QUEUE": { - "Ethernet56|4": { - "wred_profile": "AZURE_LOSSLESS", - "scheduler": "scheduler.1" - }, - "Ethernet56|5": { - "scheduler": "scheduler.0" - }, - "Ethernet56|6": { - "scheduler": "scheduler.0" - } - } -} -``` - - -### Tacplus Server - -``` -{ -"TACPLUS_SERVER": { - "10.0.0.8": { - "priority": "1", - "tcp_port": "49" - }, - "10.0.0.9": { - "priority": "1", - "tcp_port": "49" - } - } -} -``` - - -### TC to Priority group map - -``` -{ -"TC_TO_PRIORITY_GROUP_MAP": { - "AZURE": { - "1": "1", - "0": "0", - "3": "3", - "2": "2", - "5": "5", - "4": "4", - "7": "7", - "6": "6" - } - } -} -``` - -### TC to Queue map - -``` -{ -"TC_TO_QUEUE_MAP": { - "AZURE": { - "1": "1", - "0": "0", - "3": "3", - "2": "2", - "5": "5", - "4": "4", - "7": "7", - "6": "6" - } - } -} -``` - -### Versions - -This table is where the curret version of the software is recorded. -``` -{ - "VERSIONS": { - "DATABASE": { - "VERSION": "version_1_0_1" - } - } -} -``` - -### VLAN - -This table is where VLANs are defined. VLAN name is used as object key, -and member list as well as an integer id are defined as attributes. If a -DHCP relay is required for this VLAN, a dhcp_servers attribute must be -specified for that VLAN, the value of which is a list that must contain -the domain name or IP address of one or more DHCP servers. - -``` -{ -"VLAN": { - "Vlan1000": { - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ], - "members": [ - "Ethernet0", - "Ethernet4", - "Ethernet8", - "Ethernet12" - ], - "vlanid": "1000" - } - } -} -``` - -### VLAN_MEMBER - -VLAN member table has Vlan name together with physical port or port -channel name as object key, and tagging mode as attributes. - -``` -{ -"VLAN_MEMBER": { - "Vlan1000|PortChannel47": { - "tagging_mode": "untagged" - }, - "Vlan1000|Ethernet8": { - "tagging_mode": "untagged" - }, - "Vlan2000|PortChannel47": { - "tagging_mode": "tagged" - } - } -} -``` - -### Virtual router - -The virtual router table allows to insert or update a new virtual router -instance. The key of the instance is its name. The attributes in the -table allow to change properties of a virtual router. Attributes: - -- 'v4' contains boolean value 'true' or 'false'. Enable or - disable IPv4 in the virtual router -- 'v6' contains boolean value 'true' or 'false'. Enable or - disable IPv6 in the virtual router -- 'src_mac' contains MAC address. What source MAC address will be - used for packets egressing from the virtual router -- 'ttl_action' contains packet action. Defines the action for - packets with TTL == 0 or TTL == 1 -- 'ip_opt_action' contains packet action. Defines the action for - packets with IP options -- 'l3_mc_action' contains packet action. Defines the action for - unknown L3 multicast packets - -The packet action could be: - -- 'drop' -- 'forward' -- 'copy' -- 'copy_cancel' -- 'trap' -- 'log' -- 'deny' -- 'transit' - - -***TBD*** -``` -'VRF:rid1': { - 'v4': 'true', - 'v6': 'false', - 'src_mac': '02:04:05:06:07:08', - 'ttl_action': 'copy', - 'ip_opt_action': 'deny', - 'l3_mc_action': 'drop' -} -``` - - -### WRED_PROFILE - -``` -{ -"WRED_PROFILE": { - "AZURE_LOSSLESS": { - "red_max_threshold": "2097152", - "wred_green_enable": "true", - "ecn": "ecn_all", - "green_min_threshold": "1048576", - "red_min_threshold": "1048576", - "wred_yellow_enable": "true", - "yellow_min_threshold": "1048576", - "green_max_threshold": "2097152", - "green_drop_probability": "5", - "yellow_max_threshold": "2097152", - "wred_red_enable": "true", - "yellow_drop_probability": "5", - "red_drop_probability": "5" - } - } -} -``` - -### BREAKOUT_CFG - -This table is introduced as part of Dynamic Port Breakout(DPB) feature. -It shows the current breakout mode of all ports(root ports). -The list of root ports, all possible breakout modes, and default breakout modes - are obtained/derived from platform.json and hwsku.json files. - -``` -"BREAKOUT_CFG": { - "Ethernet0": { - "brkout_mode": "4x25G[10G]" - }, - "Ethernet4": { - "brkout_mode": "4x25G[10G]" - }, - "Ethernet8": { - "brkout_mode": "4x25G[10G]" - }, - - ...... - - "Ethernet116": { - "brkout_mode": "2x50G" - }, - "Ethernet120": { - "brkout_mode": "2x50G" - }, - "Ethernet124": { - "brkout_mode": "2x50G" - } -} -``` - -For Developers -============== - -Generating Application Config by Jinja2 Template ------------------------------------------------- - -To be added. - -Incremental Configuration by Subscribing to ConfigDB ----------------------------------------------------- - -Detail instruction to be added. A sample could be found in this -[PR](https://github.com/Azure/sonic-buildimage/pull/861) that -implemented dynamic configuration for BGP. diff --git a/gearsyncd/gearboxparser.cpp b/gearsyncd/gearboxparser.cpp index 1ae8118266..dfd68be2ec 100644 --- a/gearsyncd/gearboxparser.cpp +++ b/gearsyncd/gearboxparser.cpp @@ -151,6 +151,12 @@ bool GearboxParser::parse() val = phy["context_id"]; attr = std::make_pair("context_id", std::to_string(val.get())); attrs.push_back(attr); + if (phy.find("macsec_ipg") != phy.end()) + { + val = phy["macsec_ipg"]; + attr = std::make_pair("macsec_ipg", std::to_string(val.get())); + attrs.push_back(attr); + } if (phy.find("hwinfo") == phy.end()) { SWSS_LOG_ERROR("missing 'hwinfo' field in 'phys' item %d in gearbox configuration", iter); diff --git a/mclagsyncd/mclaglink.cpp b/mclagsyncd/mclaglink.cpp index 68b700fdb9..b09660ee56 100644 --- a/mclagsyncd/mclaglink.cpp +++ b/mclagsyncd/mclaglink.cpp @@ -31,6 +31,7 @@ #include "mclagsyncd/mclaglink.h" #include "mclagsyncd/mclag.h" #include +#include #include #include "macaddress.h" #include @@ -188,8 +189,13 @@ void MclagLink::mclagsyncdFetchMclagInterfaceConfigFromConfigdb() void MclagLink::setPortIsolate(char *msg) { - char *platform = getenv("platform"); - if ((NULL != platform) && (strstr(platform, BRCM_PLATFORM_SUBSTRING))) + static const unordered_set supported { + BRCM_PLATFORM_SUBSTRING, + BFN_PLATFORM_SUBSTRING + }; + + const char *platform = getenv("platform"); + if (platform != nullptr && supported.find(string(platform)) != supported.end()) { mclag_sub_option_hdr_t *op_hdr = NULL; string isolate_src_port; diff --git a/mclagsyncd/mclaglink.h b/mclagsyncd/mclaglink.h index 9c23c97686..a811f8cb2e 100644 --- a/mclagsyncd/mclaglink.h +++ b/mclagsyncd/mclaglink.h @@ -50,7 +50,9 @@ #endif /* INET_ADDRSTRLEN */ #define MAX_L_PORT_NAME 20 + #define BRCM_PLATFORM_SUBSTRING "broadcom" +#define BFN_PLATFORM_SUBSTRING "barefoot" using namespace std; @@ -187,9 +189,10 @@ namespace swss { typedef std::tuple vlan_mbr; class MclagLink : public Selectable { - + private: - Select *m_select; + const int MSG_BATCH_SIZE; + unsigned int m_bufSize; char *m_messageBuffer; char *m_messageBuffer_send; @@ -200,11 +203,12 @@ namespace swss { int m_server_socket; int m_connection_socket; + Select *m_select; + bool is_iccp_up = false; std::string m_system_mac; std::set m_vlan_mbrship; //set of vlan,mbr tuples - const int MSG_BATCH_SIZE; std::map *p_learn; unique_ptr p_state_db; diff --git a/orchagent/Makefile.am b/orchagent/Makefile.am index 248c190f11..fedd7ef4ae 100644 --- a/orchagent/Makefile.am +++ b/orchagent/Makefile.am @@ -18,6 +18,7 @@ dist_swss_DATA = \ pfc_detect_barefoot.lua \ pfc_detect_nephos.lua \ pfc_detect_cisco-8000.lua \ + pfc_detect_vs.lua \ pfc_restore.lua \ pfc_restore_cisco-8000.lua \ port_rates.lua \ @@ -61,6 +62,7 @@ orchagent_SOURCES = \ mirrororch.cpp \ fdborch.cpp \ aclorch.cpp \ + pbh/pbhcap.cpp \ pbh/pbhcnt.cpp \ pbh/pbhmgr.cpp \ pbh/pbhrule.cpp \ @@ -91,7 +93,8 @@ orchagent_SOURCES = \ lagid.cpp \ bfdorch.cpp \ srv6orch.cpp \ - response_publisher.cpp + response_publisher.cpp \ + nvgreorch.cpp orchagent_SOURCES += flex_counter/flex_counter_manager.cpp flex_counter/flex_counter_stat_manager.cpp flex_counter/flow_counter_handler.cpp orchagent_SOURCES += debug_counter/debug_counter.cpp debug_counter/drop_counter.cpp diff --git a/orchagent/aclorch.cpp b/orchagent/aclorch.cpp index 900299b3d5..9e873a47db 100644 --- a/orchagent/aclorch.cpp +++ b/orchagent/aclorch.cpp @@ -2645,7 +2645,9 @@ void AclOrch::init(vector& connectors, PortsOrch *portOrch, Mirr // Broadcom and Mellanox. Virtual switch is also supported for testing // purposes. string platform = getenv("platform") ? getenv("platform") : ""; + string sub_platform = getenv("sub_platform") ? getenv("sub_platform") : ""; if (platform == BRCM_PLATFORM_SUBSTRING || + platform == CISCO_8000_PLATFORM_SUBSTRING || platform == MLNX_PLATFORM_SUBSTRING || platform == BFN_PLATFORM_SUBSTRING || platform == MRVL_PLATFORM_SUBSTRING || @@ -2675,8 +2677,11 @@ void AclOrch::init(vector& connectors, PortsOrch *portOrch, Mirr m_mirrorTableCapabilities[TABLE_TYPE_MIRRORV6] ? "yes" : "no"); // In Mellanox platform, V4 and V6 rules are stored in different tables + // In Broadcom DNX platform also, V4 and V6 rules are stored in different tables if (platform == MLNX_PLATFORM_SUBSTRING || - platform == MRVL_PLATFORM_SUBSTRING) + platform == CISCO_8000_PLATFORM_SUBSTRING || + platform == MRVL_PLATFORM_SUBSTRING || + (platform == BRCM_PLATFORM_SUBSTRING && sub_platform == BRCM_DNX_PLATFORM_SUBSTRING)) { m_isCombinedMirrorV6Table = false; } @@ -3615,7 +3620,9 @@ bool AclOrch::removeAclRule(string table_id, string rule_id) auto rule = getAclRule(table_id, rule_id); if (!rule) { - return false; + SWSS_LOG_NOTICE("ACL rule [%s] in table [%s] already deleted", + rule_id.c_str(), table_id.c_str()); + return true; } if (rule->hasCounter()) diff --git a/orchagent/bfdorch.cpp b/orchagent/bfdorch.cpp index 68295842b3..7bc5f4bfd3 100644 --- a/orchagent/bfdorch.cpp +++ b/orchagent/bfdorch.cpp @@ -6,6 +6,7 @@ #include "notifier.h" #include "sai_serialize.h" #include "directory.h" +#include "notifications.h" using namespace std; using namespace swss; @@ -21,6 +22,7 @@ extern sai_bfd_api_t* sai_bfd_api; extern sai_object_id_t gSwitchId; extern sai_object_id_t gVirtualRouterId; extern PortsOrch* gPortsOrch; +extern sai_switch_api_t* sai_switch_api; extern Directory gDirectory; const map session_type_map = @@ -57,6 +59,7 @@ BfdOrch::BfdOrch(DBConnector *db, string tableName, TableConnector stateDbBfdSes m_bfdStateNotificationConsumer = new swss::NotificationConsumer(notificationsDb, "NOTIFICATIONS"); auto bfdStateNotificatier = new Notifier(m_bfdStateNotificationConsumer, this, "BFD_STATE_NOTIFICATIONS"); Orch::addExecutor(bfdStateNotificatier); + register_state_change_notif = false; } BfdOrch::~BfdOrch(void) @@ -152,8 +155,52 @@ void BfdOrch::doTask(NotificationConsumer &consumer) } } +bool BfdOrch::register_bfd_state_change_notification(void) +{ + sai_attribute_t attr; + sai_status_t status; + sai_attr_capability_t capability; + + status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_SWITCH, + SAI_SWITCH_ATTR_BFD_SESSION_STATE_CHANGE_NOTIFY, + &capability); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Unable to query the BFD change notification capability"); + return false; + } + + if (!capability.set_implemented) + { + SWSS_LOG_ERROR("BFD register change notification not supported"); + return false; + } + + attr.id = SAI_SWITCH_ATTR_BFD_SESSION_STATE_CHANGE_NOTIFY; + attr.value.ptr = (void *)on_bfd_session_state_change; + + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to register BFD notification handler"); + return false; + } + return true; +} + bool BfdOrch::create_bfd_session(const string& key, const vector& data) { + if (!register_state_change_notif) + { + if (!register_bfd_state_change_notification()) + { + SWSS_LOG_ERROR("BFD session for %s cannot be created", key.c_str()); + return false; + } + register_state_change_notif = true; + } if (bfd_session_map.find(key) != bfd_session_map.end()) { SWSS_LOG_ERROR("BFD session for %s already exists", key.c_str()); diff --git a/orchagent/bfdorch.h b/orchagent/bfdorch.h index 6be1f8deae..019af037b8 100644 --- a/orchagent/bfdorch.h +++ b/orchagent/bfdorch.h @@ -26,12 +26,15 @@ class BfdOrch: public Orch, public Subject uint32_t bfd_gen_id(void); uint32_t bfd_src_port(void); + bool register_bfd_state_change_notification(void); + std::map bfd_session_map; std::map bfd_session_lookup; swss::Table m_stateBfdSessionTable; swss::NotificationConsumer* m_bfdStateNotificationConsumer; + bool register_state_change_notif; }; #endif /* SWSS_BFDORCH_H */ diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index e7204344d5..56f02b2b52 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -48,7 +48,11 @@ BufferOrch::BufferOrch(DBConnector *applDb, DBConnector *confDb, DBConnector *st m_flexCounterTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_TABLE)), m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)), m_countersDb(new DBConnector("COUNTERS_DB", 0)), - m_stateBufferMaximumValueTable(stateDb, STATE_BUFFER_MAXIMUM_VALUE_TABLE) + m_stateBufferMaximumValueTable(stateDb, STATE_BUFFER_MAXIMUM_VALUE_TABLE), + m_ingressZeroBufferPool(SAI_NULL_OBJECT_ID), + m_egressZeroBufferPool(SAI_NULL_OBJECT_ID), + m_ingressZeroPoolRefCount(0), + m_egressZeroPoolRefCount(0) { SWSS_LOG_ENTER(); initTableHandlers(); @@ -310,6 +314,65 @@ const object_reference_map &BufferOrch::getBufferPoolNameOidMap(void) return *m_buffer_type_maps[APP_BUFFER_POOL_TABLE_NAME]; } +void BufferOrch::lockZeroBufferPool(bool ingress) +{ + if (ingress) + m_ingressZeroPoolRefCount++; + else + m_egressZeroPoolRefCount++; +} + +void BufferOrch::unlockZeroBufferPool(bool ingress) +{ + sai_object_id_t pool = SAI_NULL_OBJECT_ID; + if (ingress) + { + if (--m_ingressZeroPoolRefCount <= 0) + { + pool = m_ingressZeroBufferPool; + m_ingressZeroBufferPool = SAI_NULL_OBJECT_ID; + } + } + else + { + if (--m_egressZeroPoolRefCount <= 0) + { + pool = m_egressZeroBufferPool; + m_egressZeroBufferPool = SAI_NULL_OBJECT_ID; + } + } + + if (pool != SAI_NULL_OBJECT_ID) + { + auto sai_status = sai_buffer_api->remove_buffer_pool(pool); + if (SAI_STATUS_SUCCESS != sai_status) + { + SWSS_LOG_ERROR("Failed to remove buffer pool, rv:%d", sai_status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_BUFFER, sai_status); + if (handle_status != task_process_status::task_success) + { + return; + } + } + else + { + SWSS_LOG_NOTICE("Zero buffer pool has been successfully removed"); + } + } +} + +void BufferOrch::setZeroBufferPool(bool ingress, sai_object_id_t pool) +{ + if (ingress) + { + m_ingressZeroBufferPool = pool; + } + else + { + m_egressZeroBufferPool = pool; + } +} + task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); @@ -318,6 +381,8 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) string map_type_name = APP_BUFFER_POOL_TABLE_NAME; string object_name = kfvKey(tuple); string op = kfvOp(tuple); + sai_buffer_pool_type_t pool_direction = SAI_BUFFER_POOL_TYPE_INGRESS; + bool creating_zero_pool = false; SWSS_LOG_DEBUG("object name:%s", object_name.c_str()); if (m_buffer_type_maps[map_type_name]->find(object_name) != m_buffer_type_maps[map_type_name]->end()) @@ -326,6 +391,17 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) SWSS_LOG_DEBUG("found existing object:%s of type:%s", object_name.c_str(), map_type_name.c_str()); } SWSS_LOG_DEBUG("processing command:%s", op.c_str()); + if (object_name == "ingress_zero_pool") + { + creating_zero_pool = true; + pool_direction = SAI_BUFFER_POOL_TYPE_INGRESS; + } + else if (object_name == "egress_zero_pool") + { + creating_zero_pool = true; + pool_direction = SAI_BUFFER_POOL_TYPE_EGRESS; + } + if (op == SET_COMMAND) { vector attribs; @@ -372,6 +448,11 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) return task_process_status::task_invalid_entry; } attr.id = SAI_BUFFER_POOL_ATTR_TYPE; + if (creating_zero_pool && pool_direction != static_cast(attr.value.u32)) + { + SWSS_LOG_ERROR("Wrong pool direction for pool %s", object_name.c_str()); + return task_process_status::task_invalid_entry; + } attribs.push_back(attr); } else if (field == buffer_pool_mode_field_name) @@ -437,18 +518,53 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) } else { - sai_status = sai_buffer_api->create_buffer_pool(&sai_object, gSwitchId, (uint32_t)attribs.size(), attribs.data()); - if (SAI_STATUS_SUCCESS != sai_status) + if (creating_zero_pool) { - SWSS_LOG_ERROR("Failed to create buffer pool %s with type %s, rv:%d", object_name.c_str(), map_type_name.c_str(), sai_status); - task_process_status handle_status = handleSaiCreateStatus(SAI_API_BUFFER, sai_status); - if (handle_status != task_process_status::task_success) + if (pool_direction == SAI_BUFFER_POOL_TYPE_INGRESS) { - return handle_status; + sai_object = m_ingressZeroBufferPool; + } + else if (pool_direction == SAI_BUFFER_POOL_TYPE_EGRESS) + { + sai_object = m_egressZeroBufferPool; + } + } + + if (SAI_NULL_OBJECT_ID == sai_object) + { + sai_status = sai_buffer_api->create_buffer_pool(&sai_object, gSwitchId, (uint32_t)attribs.size(), attribs.data()); + if (SAI_STATUS_SUCCESS != sai_status) + { + SWSS_LOG_ERROR("Failed to create buffer pool %s with type %s, rv:%d", object_name.c_str(), map_type_name.c_str(), sai_status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_BUFFER, sai_status); + if (handle_status != task_process_status::task_success) + { + return handle_status; + } + } + + SWSS_LOG_NOTICE("Created buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); + } + else + { + SWSS_LOG_NOTICE("No need to create buffer pool %s since it has been created", object_name.c_str()); + } + + if (creating_zero_pool) + { + if (pool_direction == SAI_BUFFER_POOL_TYPE_INGRESS) + { + m_ingressZeroPoolRefCount++; + m_ingressZeroBufferPool = sai_object; + } + else if (pool_direction == SAI_BUFFER_POOL_TYPE_EGRESS) + { + m_egressZeroPoolRefCount++; + m_egressZeroBufferPool = sai_object; } } + (*(m_buffer_type_maps[map_type_name]))[object_name].m_saiObjectId = sai_object; - SWSS_LOG_NOTICE("Created buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); // Here we take the PFC watchdog approach to update the COUNTERS_DB metadata (e.g., PFC_WD_DETECTION_TIME per queue) // at initialization (creation and registration phase) // Specifically, we push the buffer pool name to oid mapping upon the creation of the oid @@ -470,18 +586,40 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) if (SAI_NULL_OBJECT_ID != sai_object) { clearBufferPoolWatermarkCounterIdList(sai_object); - sai_status = sai_buffer_api->remove_buffer_pool(sai_object); - if (SAI_STATUS_SUCCESS != sai_status) + bool remove = true; + if (sai_object == m_ingressZeroBufferPool) { - SWSS_LOG_ERROR("Failed to remove buffer pool %s with type %s, rv:%d", object_name.c_str(), map_type_name.c_str(), sai_status); - task_process_status handle_status = handleSaiRemoveStatus(SAI_API_BUFFER, sai_status); - if (handle_status != task_process_status::task_success) + if (--m_ingressZeroPoolRefCount > 0) + remove = false; + else + m_ingressZeroBufferPool = SAI_NULL_OBJECT_ID; + } + else if (sai_object == m_egressZeroBufferPool) + { + if (--m_egressZeroPoolRefCount > 0) + remove = false; + else + m_egressZeroBufferPool = SAI_NULL_OBJECT_ID; + } + if (remove) + { + sai_status = sai_buffer_api->remove_buffer_pool(sai_object); + if (SAI_STATUS_SUCCESS != sai_status) { - return handle_status; + SWSS_LOG_ERROR("Failed to remove buffer pool %s with type %s, rv:%d", object_name.c_str(), map_type_name.c_str(), sai_status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_BUFFER, sai_status); + if (handle_status != task_process_status::task_success) + { + return handle_status; + } } + SWSS_LOG_NOTICE("Removed buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); + } + else + { + SWSS_LOG_NOTICE("Will not remove buffer pool %s since it is still referenced", object_name.c_str()); } } - SWSS_LOG_NOTICE("Removed buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); auto it_to_delete = (m_buffer_type_maps[map_type_name])->find(object_name); (m_buffer_type_maps[map_type_name])->erase(it_to_delete); m_countersDb->hdel(COUNTERS_BUFFER_POOL_NAME_MAP, object_name); @@ -523,13 +661,6 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup sai_attribute_t attr; if (field == buffer_pool_field_name) { - if (SAI_NULL_OBJECT_ID != sai_object) - { - // We should skip the profile's pool name because it's create only when setting a profile's attribute. - SWSS_LOG_INFO("Skip setting buffer profile's pool %s for profile %s", value.c_str(), object_name.c_str()); - continue; - } - sai_object_id_t sai_pool; ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_pool_field_name, buffer_to_ref_table_map.at(buffer_pool_field_name), diff --git a/orchagent/bufferorch.h b/orchagent/bufferorch.h index 05fdd7917f..24af140b4a 100644 --- a/orchagent/bufferorch.h +++ b/orchagent/bufferorch.h @@ -37,6 +37,14 @@ class BufferOrch : public Orch static type_map m_buffer_type_maps; void generateBufferPoolWatermarkCounterIdList(void); const object_reference_map &getBufferPoolNameOidMap(void); + sai_object_id_t getZeroBufferPool(bool ingress) + { + return ingress ? m_ingressZeroBufferPool : m_egressZeroBufferPool; + } + + void lockZeroBufferPool(bool ingress); + void unlockZeroBufferPool(bool ingress); + void setZeroBufferPool(bool direction, sai_object_id_t pool); private: typedef task_process_status (BufferOrch::*buffer_table_handler)(KeyOpFieldsValuesTuple &tuple); @@ -71,6 +79,11 @@ class BufferOrch : public Orch unique_ptr m_countersDb; bool m_isBufferPoolWatermarkCounterIdListGenerated = false; + + sai_object_id_t m_ingressZeroBufferPool; + sai_object_id_t m_egressZeroBufferPool; + int m_ingressZeroPoolRefCount; + int m_egressZeroPoolRefCount; }; #endif /* SWSS_BUFFORCH_H */ diff --git a/orchagent/bulker.h b/orchagent/bulker.h index a4a49b105d..2ff86644ac 100644 --- a/orchagent/bulker.h +++ b/orchagent/bulker.h @@ -414,6 +414,11 @@ class EntityBulker return creating_entries.count(entry); } + bool bulk_entry_pending_removal(const Te& entry) const + { + return removing_entries.find(entry) != removing_entries.end(); + } + private: std::unordered_map< // A map of Te, // entry -> diff --git a/orchagent/cbf/cbfnhgorch.cpp b/orchagent/cbf/cbfnhgorch.cpp index 403945c7a9..76435ad12d 100644 --- a/orchagent/cbf/cbfnhgorch.cpp +++ b/orchagent/cbf/cbfnhgorch.cpp @@ -308,7 +308,7 @@ bool CbfNhg::sync() nhg_attr.value.u32 = static_cast(m_members.size()); nhg_attrs.push_back(move(nhg_attr)); - if (nhg_attr.value.u32 > gNhgMapOrch->getMaxFcVal()) + if (nhg_attr.value.u32 > gNhgMapOrch->getMaxNumFcs()) { /* If there are more members than FCs then this may be an error, as some members won't be used. */ SWSS_LOG_WARN("More CBF NHG members configured than supported Forwarding Classes"); @@ -632,6 +632,11 @@ bool CbfNhg::syncMembers(const set &members) nhgm.to_string().c_str(), to_string().c_str()); throw std::logic_error("Syncing already synced NHG member"); } + else if (nhgm.getNhgId() == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_WARN("CBF NHG member %s is not yet synced", nhgm.to_string().c_str()); + return false; + } /* * Check if the group exists in NhgOrch. @@ -710,10 +715,9 @@ vector CbfNhg::createNhgmAttrs(const CbfNhgMember &nhgm) const { SWSS_LOG_ENTER(); - if (!isSynced() || (nhgm.getNhgId() == SAI_NULL_OBJECT_ID)) + if (!isSynced()) { - SWSS_LOG_ERROR("CBF next hop group %s or next hop group %s are not synced", - to_string().c_str(), nhgm.to_string().c_str()); + SWSS_LOG_ERROR("CBF next hop group %s is not synced", to_string().c_str()); throw logic_error("CBF next hop group member attributes data is insufficient"); } diff --git a/orchagent/cbf/nhgmaporch.cpp b/orchagent/cbf/nhgmaporch.cpp index d765e3e90e..fd83fe4b12 100644 --- a/orchagent/cbf/nhgmaporch.cpp +++ b/orchagent/cbf/nhgmaporch.cpp @@ -294,34 +294,34 @@ void NhgMapOrch::decRefCount(const string &index) } /* - * Get the max FC value supported by the switch. + * Get the maximum number of FC classes supported by the switch. */ -sai_uint8_t NhgMapOrch::getMaxFcVal() +sai_uint8_t NhgMapOrch::getMaxNumFcs() { SWSS_LOG_ENTER(); - static int max_fc_val = -1; + static int max_num_fcs = -1; /* - * Get the maximum value allowed for FC if it wasn't already initialized. + * Get the maximum number of FC classes if it wasn't already initialized. */ - if (max_fc_val == -1) + if (max_num_fcs == -1) { sai_attribute_t attr; attr.id = SAI_SWITCH_ATTR_MAX_NUMBER_OF_FORWARDING_CLASSES; if (sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr) == SAI_STATUS_SUCCESS) { - max_fc_val = attr.value.u8; + max_num_fcs = attr.value.u8; } else { SWSS_LOG_WARN("Switch does not support FCs"); - max_fc_val = 0; + max_num_fcs = 0; } } - return static_cast(max_fc_val); + return static_cast(max_num_fcs); } /* @@ -343,7 +343,7 @@ pair> NhgMapOrch::getMap(const ve } unordered_map fc_map; - sai_uint8_t max_fc_val = getMaxFcVal(); + sai_uint8_t max_num_fcs = getMaxNumFcs(); /* * Create the map while validating that the values are positive @@ -353,13 +353,13 @@ pair> NhgMapOrch::getMap(const ve try { /* - * Check the FC value is valid. + * Check the FC value is valid. FC value must be in range [0, max_num_fcs). */ auto fc = stoi(fvField(*it)); - if ((fc < 0) || (fc > max_fc_val)) + if ((fc < 0) || (fc >= max_num_fcs)) { - SWSS_LOG_ERROR("FC value %d is either negative or greater than max value %d", fc, max_fc_val); + SWSS_LOG_ERROR("FC value %d is either negative or greater than max value %d", fc, max_num_fcs - 1); success = false; break; } diff --git a/orchagent/cbf/nhgmaporch.h b/orchagent/cbf/nhgmaporch.h index c345e7d566..7d7317a1d6 100644 --- a/orchagent/cbf/nhgmaporch.h +++ b/orchagent/cbf/nhgmaporch.h @@ -43,9 +43,9 @@ class NhgMapOrch : public Orch void decRefCount(const string &key); /* - * Get the max FC value supported by the switch. + * Get the maximum number of FC classes supported by the switch. */ - static sai_uint8_t getMaxFcVal(); + static sai_uint8_t getMaxNumFcs(); private: /* diff --git a/orchagent/crmorch.cpp b/orchagent/crmorch.cpp index fbc9d43691..a7e897f822 100644 --- a/orchagent/crmorch.cpp +++ b/orchagent/crmorch.cpp @@ -66,13 +66,49 @@ const map crmResSaiAvailAttrMap = { CrmResourceType::CRM_IPMC_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_IPMC_ENTRY}, { CrmResourceType::CRM_SNAT_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_SNAT_ENTRY }, { CrmResourceType::CRM_DNAT_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_DNAT_ENTRY }, +}; + +const map crmResSaiObjAttrMap = +{ + { CrmResourceType::CRM_IPV4_ROUTE, SAI_OBJECT_TYPE_ROUTE_ENTRY }, + { CrmResourceType::CRM_IPV6_ROUTE, SAI_OBJECT_TYPE_ROUTE_ENTRY }, + { CrmResourceType::CRM_IPV4_NEXTHOP, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_IPV6_NEXTHOP, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_IPV4_NEIGHBOR, SAI_OBJECT_TYPE_NEIGHBOR_ENTRY }, + { CrmResourceType::CRM_IPV6_NEIGHBOR, SAI_OBJECT_TYPE_NEIGHBOR_ENTRY }, + { CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_NEXTHOP_GROUP, SAI_OBJECT_TYPE_NEXT_HOP_GROUP }, + { CrmResourceType::CRM_ACL_TABLE, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_ACL_GROUP, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_ACL_ENTRY, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_ACL_COUNTER, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_FDB_ENTRY, SAI_OBJECT_TYPE_FDB_ENTRY }, + { CrmResourceType::CRM_IPMC_ENTRY, SAI_OBJECT_TYPE_NULL}, + { CrmResourceType::CRM_SNAT_ENTRY, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_DNAT_ENTRY, SAI_OBJECT_TYPE_NULL }, { CrmResourceType::CRM_MPLS_INSEG, SAI_OBJECT_TYPE_INSEG_ENTRY }, - { CrmResourceType::CRM_MPLS_NEXTHOP, SAI_SWITCH_ATTR_AVAILABLE_IPV4_NEXTHOP_ENTRY }, + { CrmResourceType::CRM_MPLS_NEXTHOP, SAI_OBJECT_TYPE_NEXT_HOP }, { CrmResourceType::CRM_SRV6_MY_SID_ENTRY, SAI_OBJECT_TYPE_MY_SID_ENTRY }, - { CrmResourceType::CRM_SRV6_NEXTHOP, SAI_SWITCH_ATTR_AVAILABLE_IPV6_NEXTHOP_ENTRY }, + { CrmResourceType::CRM_SRV6_NEXTHOP, SAI_OBJECT_TYPE_NEXT_HOP }, { CrmResourceType::CRM_NEXTHOP_GROUP_MAP, SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MAP }, }; +const map crmResAddrFamilyAttrMap = +{ + { CrmResourceType::CRM_IPV4_ROUTE, SAI_ROUTE_ENTRY_ATTR_IP_ADDR_FAMILY }, + { CrmResourceType::CRM_IPV6_ROUTE, SAI_ROUTE_ENTRY_ATTR_IP_ADDR_FAMILY }, + { CrmResourceType::CRM_IPV4_NEIGHBOR, SAI_NEIGHBOR_ENTRY_ATTR_IP_ADDR_FAMILY }, + { CrmResourceType::CRM_IPV6_NEIGHBOR, SAI_NEIGHBOR_ENTRY_ATTR_IP_ADDR_FAMILY }, +}; + +const map crmResAddrFamilyValMap = +{ + { CrmResourceType::CRM_IPV4_ROUTE, SAI_IP_ADDR_FAMILY_IPV4 }, + { CrmResourceType::CRM_IPV6_ROUTE, SAI_IP_ADDR_FAMILY_IPV6 }, + { CrmResourceType::CRM_IPV4_NEIGHBOR, SAI_IP_ADDR_FAMILY_IPV4 }, + { CrmResourceType::CRM_IPV6_NEIGHBOR, SAI_IP_ADDR_FAMILY_IPV6 }, +}; + const map crmThreshTypeResMap = { { "ipv4_route_threshold_type", CrmResourceType::CRM_IPV4_ROUTE }, @@ -325,7 +361,6 @@ void CrmOrch::handleSetCommand(const string& key, const vector& else { SWSS_LOG_ERROR("Failed to parse CRM %s configuration. Unknown attribute %s.\n", key.c_str(), field.c_str()); - return; } } catch (const exception& e) @@ -465,6 +500,74 @@ void CrmOrch::doTask(SelectableTimer &timer) checkCrmThresholds(); } +bool CrmOrch::getResAvailability(CrmResourceType type, CrmResourceEntry &res) +{ + sai_attribute_t attr; + uint64_t availCount = 0; + sai_status_t status = SAI_STATUS_SUCCESS; + + sai_object_type_t objType = crmResSaiObjAttrMap.at(type); + + if (objType != SAI_OBJECT_TYPE_NULL) + { + uint32_t attrCount = 0; + + if ((type == CrmResourceType::CRM_IPV4_ROUTE) || (type == CrmResourceType::CRM_IPV6_ROUTE) || + (type == CrmResourceType::CRM_IPV4_NEIGHBOR) || (type == CrmResourceType::CRM_IPV6_NEIGHBOR)) + { + attr.id = crmResAddrFamilyAttrMap.at(type); + attr.value.s32 = crmResAddrFamilyValMap.at(type); + attrCount = 1; + } + else if (type == CrmResourceType::CRM_MPLS_NEXTHOP) + { + attr.id = SAI_NEXT_HOP_ATTR_TYPE; + attr.value.s32 = SAI_NEXT_HOP_TYPE_MPLS; + attrCount = 1; + } + else if (type == CrmResourceType::CRM_SRV6_NEXTHOP) + { + attr.id = SAI_NEXT_HOP_ATTR_TYPE; + attr.value.s32 = SAI_NEXT_HOP_TYPE_SRV6_SIDLIST; + attrCount = 1; + } + + status = sai_object_type_get_availability(gSwitchId, objType, attrCount, &attr, &availCount); + } + + if ((status != SAI_STATUS_SUCCESS) || (objType == SAI_OBJECT_TYPE_NULL)) + { + if (crmResSaiAvailAttrMap.find(type) != crmResSaiAvailAttrMap.end()) + { + attr.id = crmResSaiAvailAttrMap.at(type); + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + } + + if ((status == SAI_STATUS_NOT_SUPPORTED) || + (status == SAI_STATUS_NOT_IMPLEMENTED) || + SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || + SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) + { + // mark unsupported resources + res.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; + SWSS_LOG_NOTICE("CRM resource %s not supported", crmResTypeNameMap.at(type).c_str()); + return false; + } + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get availability counter for %s CRM resourse", crmResTypeNameMap.at(type).c_str()); + return false; + } + + availCount = attr.value.u32; + } + + res.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = static_cast(availCount); + + return true; +} + void CrmOrch::getResAvailableCounters() { SWSS_LOG_ENTER(); @@ -491,33 +594,13 @@ void CrmOrch::getResAvailableCounters() case CrmResourceType::CRM_IPMC_ENTRY: case CrmResourceType::CRM_SNAT_ENTRY: case CrmResourceType::CRM_DNAT_ENTRY: + case CrmResourceType::CRM_MPLS_INSEG: + case CrmResourceType::CRM_NEXTHOP_GROUP_MAP: + case CrmResourceType::CRM_SRV6_MY_SID_ENTRY: + case CrmResourceType::CRM_MPLS_NEXTHOP: + case CrmResourceType::CRM_SRV6_NEXTHOP: { - sai_attribute_t attr; - attr.id = crmResSaiAvailAttrMap.at(res.first); - - sai_status_t status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); - if (status != SAI_STATUS_SUCCESS) - { - if ((status == SAI_STATUS_NOT_SUPPORTED) || - (status == SAI_STATUS_NOT_IMPLEMENTED) || - SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || - SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) - { - // mark unsupported resources - res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; - SWSS_LOG_NOTICE("Switch attribute %u not supported", attr.id); - break; - } - SWSS_LOG_ERROR("Failed to get switch attribute %u , rv:%d", attr.id, status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); - if (handle_status != task_process_status::task_success) - { - break; - } - } - - res.second.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = attr.value.u32; - + getResAvailability(res.first, res.second); break; } @@ -579,119 +662,6 @@ void CrmOrch::getResAvailableCounters() break; } - case CrmResourceType::CRM_MPLS_INSEG: - case CrmResourceType::CRM_NEXTHOP_GROUP_MAP: - { - sai_object_type_t objType = static_cast(crmResSaiAvailAttrMap.at(res.first)); - uint64_t availCount = 0; - sai_status_t status = sai_object_type_get_availability(gSwitchId, objType, 0, nullptr, &availCount); - if (status != SAI_STATUS_SUCCESS) - { - if ((status == SAI_STATUS_NOT_SUPPORTED) || - (status == SAI_STATUS_NOT_IMPLEMENTED) || - SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || - SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) - { - // mark unsupported resources - res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; - SWSS_LOG_NOTICE("CRM Resource %s not supported", crmResTypeNameMap.at(res.first).c_str()); - break; - } - SWSS_LOG_ERROR("Failed to get availability for object_type %u , rv:%d", objType, status); - break; - } - - res.second.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = static_cast(availCount); - - break; - } - - case CrmResourceType::CRM_MPLS_NEXTHOP: - { - sai_object_type_t objType = static_cast(crmResSaiAvailAttrMap.at(res.first)); - sai_attribute_t attr; - uint64_t availCount = 0; - - attr.id = SAI_NEXT_HOP_ATTR_TYPE; - attr.value.s32 = SAI_NEXT_HOP_TYPE_MPLS; - sai_status_t status = sai_object_type_get_availability(gSwitchId, objType, 1, &attr, &availCount); - if (status != SAI_STATUS_SUCCESS) - { - if ((status == SAI_STATUS_NOT_SUPPORTED) || - (status == SAI_STATUS_NOT_IMPLEMENTED) || - SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || - SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) - { - // mark unsupported resources - res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; - SWSS_LOG_NOTICE("CRM Resource %s not supported", crmResTypeNameMap.at(res.first).c_str()); - break; - } - SWSS_LOG_ERROR("Failed to get availability for object_type %u , rv:%d", objType, status); - break; - } - - res.second.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = static_cast(availCount); - - break; - } - - case CrmResourceType::CRM_SRV6_MY_SID_ENTRY: - { - sai_object_type_t objType = static_cast(crmResSaiAvailAttrMap.at(res.first)); - uint64_t availCount = 0; - sai_status_t status = sai_object_type_get_availability(gSwitchId, objType, 0, nullptr, &availCount); - if (status != SAI_STATUS_SUCCESS) - { - if ((status == SAI_STATUS_NOT_SUPPORTED) || - (status == SAI_STATUS_NOT_IMPLEMENTED) || - SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || - SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) - { - // mark unsupported resources - res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; - SWSS_LOG_NOTICE("CRM Resource %s not supported", crmResTypeNameMap.at(res.first).c_str()); - break; - } - SWSS_LOG_ERROR("Failed to get availability for object_type %u , rv:%d", objType, status); - break; - } - - res.second.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = static_cast(availCount); - - break; - } - - case CrmResourceType::CRM_SRV6_NEXTHOP: - { - sai_object_type_t objType = static_cast(crmResSaiAvailAttrMap.at(res.first)); - sai_attribute_t attr; - uint64_t availCount = 0; - - attr.id = SAI_NEXT_HOP_ATTR_TYPE; - attr.value.s32 = SAI_NEXT_HOP_TYPE_SRV6_SIDLIST; - sai_status_t status = sai_object_type_get_availability(gSwitchId, objType, 1, &attr, &availCount); - if (status != SAI_STATUS_SUCCESS) - { - if ((status == SAI_STATUS_NOT_SUPPORTED) || - (status == SAI_STATUS_NOT_IMPLEMENTED) || - SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || - SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) - { - // mark unsupported resources - res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; - SWSS_LOG_NOTICE("CRM Resource %s not supported", crmResTypeNameMap.at(res.first).c_str()); - break; - } - SWSS_LOG_ERROR("Failed to get availability for object_type %u , rv:%d", objType, status); - break; - } - - res.second.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = static_cast(availCount); - - break; - } - default: SWSS_LOG_ERROR("Failed to get CRM resource type %u. Unknown resource type.\n", static_cast(res.first)); return; diff --git a/orchagent/crmorch.h b/orchagent/crmorch.h index 345caa2cf6..f63e2a31c2 100644 --- a/orchagent/crmorch.h +++ b/orchagent/crmorch.h @@ -98,6 +98,7 @@ class CrmOrch : public Orch void doTask(Consumer &consumer); void handleSetCommand(const std::string& key, const std::vector& data); void doTask(swss::SelectableTimer &timer); + bool getResAvailability(CrmResourceType type, CrmResourceEntry &res); void getResAvailableCounters(); void updateCrmCountersTable(); void checkCrmThresholds(); diff --git a/orchagent/fdborch.cpp b/orchagent/fdborch.cpp index daab3ad52e..a373a6ac21 100644 --- a/orchagent/fdborch.cpp +++ b/orchagent/fdborch.cpp @@ -475,6 +475,7 @@ void FdbOrch::update(sai_fdb_event_t type, } update.add = true; + update.entry.port_name = update.port.m_alias; if (!port_old.m_alias.empty()) { port_old.m_fdb_count--; diff --git a/orchagent/macsecorch.cpp b/orchagent/macsecorch.cpp index 36fbb44f89..cb11fb35e5 100644 --- a/orchagent/macsecorch.cpp +++ b/orchagent/macsecorch.cpp @@ -1183,6 +1183,18 @@ bool MACsecOrch::updateMACsecPort(MACsecPort &macsec_port, const TaskArgs &port_ if (get_value(port_attr, "enable_encrypt", alpha_boolean)) { macsec_port.m_enable_encrypt = alpha_boolean.operator bool(); + if (!updateMACsecSCs( + macsec_port, + [&macsec_port, this](MACsecOrch::MACsecSC &macsec_sc) + { + sai_attribute_t attr; + attr.id = SAI_MACSEC_SC_ATTR_ENCRYPTION_ENABLE; + attr.value.booldata = macsec_port.m_enable_encrypt; + return this->updateMACsecAttr(SAI_OBJECT_TYPE_MACSEC_SC, macsec_sc.m_sc_id, attr); + })) + { + return false; + } } if (get_value(port_attr, "send_sci", alpha_boolean)) { @@ -1212,42 +1224,76 @@ bool MACsecOrch::updateMACsecPort(MACsecPort &macsec_port, const TaskArgs &port_ SWSS_LOG_WARN("Unknown Cipher Suite %s", cipher_suite.c_str()); return false; } + if (!updateMACsecSCs( + macsec_port, + [&macsec_port, this](MACsecOrch::MACsecSC &macsec_sc) + { + sai_attribute_t attr; + attr.id = SAI_MACSEC_SC_ATTR_MACSEC_CIPHER_SUITE; + attr.value.s32 = macsec_port.m_cipher_suite; + return this->updateMACsecAttr(SAI_OBJECT_TYPE_MACSEC_SC, macsec_sc.m_sc_id, attr); + })) + { + return false; + } } swss::AlphaBoolean enable = false; if (get_value(port_attr, "enable", enable) && enable.operator bool() != macsec_port.m_enable) { - std::vector macsec_scs; macsec_port.m_enable = enable.operator bool(); - for (auto &sc : macsec_port.m_egress_scs) + if (!updateMACsecSCs( + macsec_port, + [&macsec_port, &recover, this](MACsecOrch::MACsecSC &macsec_sc) + { + // Change the ACL entry action from packet action to MACsec flow + if (macsec_port.m_enable) + { + if (!this->setMACsecFlowActive(macsec_sc.m_entry_id, macsec_sc.m_flow_id, true)) + { + SWSS_LOG_WARN("Cannot change the ACL entry action from packet action to MACsec flow"); + return false; + } + auto entry_id = macsec_sc.m_entry_id; + auto flow_id = macsec_sc.m_flow_id; + recover.add_action([this, entry_id, flow_id]() + { this->setMACsecFlowActive(entry_id, flow_id, false); }); + } + else + { + this->setMACsecFlowActive(macsec_sc.m_entry_id, macsec_sc.m_flow_id, false); + } + return true; + })) { - macsec_scs.push_back(&sc.second); + return false; } - for (auto &sc : macsec_port.m_ingress_scs) + } + + recover.clear(); + return true; +} + +bool MACsecOrch::updateMACsecSCs(MACsecPort &macsec_port, std::function action) +{ + SWSS_LOG_ENTER(); + + auto sc = macsec_port.m_egress_scs.begin(); + while (sc != macsec_port.m_egress_scs.end()) + { + if (!action((sc++)->second)) { - macsec_scs.push_back(&sc.second); + return false; } - for (auto &macsec_sc : macsec_scs) + } + sc = macsec_port.m_ingress_scs.begin(); + while (sc != macsec_port.m_ingress_scs.end()) + { + if (!action((sc++)->second)) { - // Change the ACL entry action from packet action to MACsec flow - if (macsec_port.m_enable) - { - if (!setMACsecFlowActive(macsec_sc->m_entry_id, macsec_sc->m_flow_id, true)) - { - SWSS_LOG_WARN("Cannot change the ACL entry action from packet action to MACsec flow"); - return false; - } - auto entry_id = macsec_sc->m_entry_id; - auto flow_id = macsec_sc->m_flow_id; - recover.add_action([this, entry_id, flow_id]() { this->setMACsecFlowActive(entry_id, flow_id, false); }); - } - else - { - setMACsecFlowActive(macsec_sc->m_entry_id, macsec_sc->m_flow_id, false); - } + return false; } } - recover.clear(); return true; } @@ -1263,17 +1309,21 @@ bool MACsecOrch::deleteMACsecPort( bool result = true; - for (auto &sc : macsec_port.m_egress_scs) + auto sc = macsec_port.m_egress_scs.begin(); + while (sc != macsec_port.m_egress_scs.end()) { - const std::string port_sci = swss::join(':', port_name, sc.first); + const std::string port_sci = swss::join(':', port_name, sc->first); + sc ++; if (deleteMACsecSC(port_sci, SAI_MACSEC_DIRECTION_EGRESS) != task_success) { result &= false; } } - for (auto &sc : macsec_port.m_ingress_scs) + sc = macsec_port.m_ingress_scs.begin(); + while (sc != macsec_port.m_ingress_scs.end()) { - const std::string port_sci = swss::join(':', port_name, sc.first); + const std::string port_sci = swss::join(':', port_name, sc->first); + sc ++; if (deleteMACsecSC(port_sci, SAI_MACSEC_DIRECTION_INGRESS) != task_success) { result &= false; @@ -1661,9 +1711,11 @@ task_process_status MACsecOrch::deleteMACsecSC( auto result = task_success; - for (auto &sa : ctx.get_macsec_sc()->m_sa_ids) + auto sa = ctx.get_macsec_sc()->m_sa_ids.begin(); + while (sa != ctx.get_macsec_sc()->m_sa_ids.end()) { - const std::string port_sci_an = swss::join(':', port_sci, sa.first); + const std::string port_sci_an = swss::join(':', port_sci, sa->first); + sa ++; deleteMACsecSA(port_sci_an, direction); } @@ -1721,6 +1773,42 @@ bool MACsecOrch::deleteMACsecSC(sai_object_id_t sc_id) return true; } +bool MACsecOrch::updateMACsecAttr(sai_object_type_t object_type, sai_object_id_t object_id, const sai_attribute_t &attr) +{ + SWSS_LOG_ENTER(); + + sai_status_t status = SAI_STATUS_SUCCESS; + + if (object_type == SAI_OBJECT_TYPE_MACSEC_PORT) + { + status = sai_macsec_api->set_macsec_port_attribute(object_id, &attr); + } + else if (object_type == SAI_OBJECT_TYPE_MACSEC_SC) + { + status = sai_macsec_api->set_macsec_sc_attribute(object_id, &attr); + } + else if (object_type == SAI_OBJECT_TYPE_MACSEC_SA) + { + status = sai_macsec_api->set_macsec_sa_attribute(object_id, &attr); + } + else + { + SWSS_LOG_ERROR("Wrong type %s", sai_serialize_object_type(object_type).c_str()); + return false; + } + + if (status != SAI_STATUS_SUCCESS) + { + task_process_status handle_status = handleSaiSetStatus(SAI_API_MACSEC, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + return true; +} + task_process_status MACsecOrch::createMACsecSA( const std::string &port_sci_an, const TaskArgs &sa_attr, diff --git a/orchagent/macsecorch.h b/orchagent/macsecorch.h index 8856347118..6702c75cf6 100644 --- a/orchagent/macsecorch.h +++ b/orchagent/macsecorch.h @@ -132,6 +132,7 @@ class MACsecOrch : public Orch sai_object_id_t switch_id, sai_macsec_direction_t direction); bool updateMACsecPort(MACsecPort &macsec_port, const TaskArgs & port_attr); + bool updateMACsecSCs(MACsecPort &macsec_port, std::function action); bool deleteMACsecPort( const MACsecPort &macsec_port, const std::string &port_name, @@ -179,6 +180,8 @@ class MACsecOrch : public Orch sai_macsec_direction_t direction); bool deleteMACsecSC(sai_object_id_t sc_id); + bool updateMACsecAttr(sai_object_type_t object_type, sai_object_id_t object_id, const sai_attribute_t &attr); + /* MACsec SA */ task_process_status createMACsecSA( const std::string &port_sci_an, diff --git a/orchagent/main.cpp b/orchagent/main.cpp index de96234a2d..6f397cb42e 100644 --- a/orchagent/main.cpp +++ b/orchagent/main.cpp @@ -478,10 +478,6 @@ int main(int argc, char **argv) attr.value.ptr = (void *)on_port_state_change; attrs.push_back(attr); - attr.id = SAI_SWITCH_ATTR_BFD_SESSION_STATE_CHANGE_NOTIFY; - attr.value.ptr = (void *)on_bfd_session_state_change; - attrs.push_back(attr); - attr.id = SAI_SWITCH_ATTR_SHUTDOWN_REQUEST_NOTIFY; attr.value.ptr = (void *)on_switch_shutdown_request; attrs.push_back(attr); @@ -574,6 +570,36 @@ int main(int argc, char **argv) attr.value.u64 = gSwitchId; attrs.push_back(attr); + if (gMySwitchType == "voq" || gMySwitchType == "fabric") + { + /* We set this long timeout in order for orchagent to wait enough time for + * response from syncd. It is needed since switch create takes more time + * than default time to create switch if there are lots of front panel ports + * and systems ports to initialize + */ + + if (gMySwitchType == "voq") + { + attr.value.u64 = (5 * SAI_REDIS_DEFAULT_SYNC_OPERATION_RESPONSE_TIMEOUT); + } + else if (gMySwitchType == "fabric") + { + attr.value.u64 = (10 * SAI_REDIS_DEFAULT_SYNC_OPERATION_RESPONSE_TIMEOUT); + } + + attr.id = SAI_REDIS_SWITCH_ATTR_SYNC_OPERATION_RESPONSE_TIMEOUT; + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Failed to set SAI REDIS response timeout"); + } + else + { + SWSS_LOG_NOTICE("SAI REDIS response timeout set successfully to %" PRIu64 " ", attr.value.u64); + } + } + status = sai_switch_api->create_switch(&gSwitchId, (uint32_t)attrs.size(), attrs.data()); if (status != SAI_STATUS_SUCCESS) { @@ -582,6 +608,22 @@ int main(int argc, char **argv) } SWSS_LOG_NOTICE("Create a switch, id:%" PRIu64, gSwitchId); + if (gMySwitchType == "voq" || gMySwitchType == "fabric") + { + /* Set syncd response timeout back to the default value */ + attr.id = SAI_REDIS_SWITCH_ATTR_SYNC_OPERATION_RESPONSE_TIMEOUT; + attr.value.u64 = SAI_REDIS_DEFAULT_SYNC_OPERATION_RESPONSE_TIMEOUT; + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Failed to set SAI REDIS response timeout to default"); + } + else + { + SWSS_LOG_NOTICE("SAI REDIS response timeout set successfully to default: %" PRIu64 " ", attr.value.u64); + } + } if (gMySwitchType != "fabric") { diff --git a/orchagent/mirrororch.cpp b/orchagent/mirrororch.cpp index 0a73030f40..6568c63d37 100644 --- a/orchagent/mirrororch.cpp +++ b/orchagent/mirrororch.cpp @@ -327,6 +327,7 @@ bool MirrorOrch::validateSrcPortList(const string& srcPortList) if (port.m_type == Port::LAG) { vector portv; + int portCount = 0; m_portsOrch->getLagMember(port, portv); for (const auto p : portv) { @@ -336,6 +337,13 @@ bool MirrorOrch::validateSrcPortList(const string& srcPortList) p.m_alias.c_str(), port.m_alias.c_str(), srcPortList.c_str()); return false; } + portCount++; + } + if (!portCount) + { + SWSS_LOG_ERROR("Source LAG %s is empty. set mirror session to inactive", + port.m_alias.c_str());; + return false; } } } diff --git a/orchagent/mplsrouteorch.cpp b/orchagent/mplsrouteorch.cpp index 122bb6e8e1..ef40987a19 100644 --- a/orchagent/mplsrouteorch.cpp +++ b/orchagent/mplsrouteorch.cpp @@ -598,8 +598,12 @@ bool RouteOrch::addLabelRoute(LabelRouteBulkContext& ctx, const NextHopGroupKey * in m_syncdLabelRoutes, then we need to update the route with a new next hop * (group) id. The old next hop (group) is then not used and the reference * count will decrease by 1. + * + * In case the entry is already pending removal in the bulk, it would be removed + * from m_syncdLabelRoutes during the bulk call. Therefore, such entries need to be + * re-created rather than set attribute. */ - if (it_route == m_syncdLabelRoutes.at(vrf_id).end()) + if (it_route == m_syncdLabelRoutes.at(vrf_id).end() || gLabelRouteBulker.bulk_entry_pending_removal(inseg_entry)) { vector inseg_attrs; if (blackhole) diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index c1eba4c0e3..42bf064367 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -685,19 +685,6 @@ void NeighOrch::doTask(Consumer &consumer) IpAddress ip_address(key.substr(found+1)); - /* Verify Ipv4 LinkLocal and skip neighbor entry added for RFC5549 */ - if ((ip_address.getAddrScope() == IpAddress::LINK_SCOPE) && (ip_address.isV4())) - { - /* Check if this prefix is not a configured ip, if so allow */ - IpPrefix ipll_prefix(ip_address.getV4Addr(), 16); - if (!m_intfsOrch->isPrefixSubnet (ipll_prefix, alias)) - { - SWSS_LOG_NOTICE("Skip IPv4LL neighbor %s, Intf:%s op: %s ", ip_address.to_string().c_str(), alias.c_str(), op.c_str()); - it = consumer.m_toSync.erase(it); - continue; - } - } - NeighborEntry neighbor_entry = { ip_address, alias }; if (op == SET_COMMAND) @@ -807,6 +794,18 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress memcpy(neighbor_attr.value.mac, macAddress.getMac(), 6); neighbor_attrs.push_back(neighbor_attr); + if ((ip_address.getAddrScope() == IpAddress::LINK_SCOPE) && (ip_address.isV4())) + { + /* Check if this prefix is a configured ip, if not allow */ + IpPrefix ipll_prefix(ip_address.getV4Addr(), 16); + if (!m_intfsOrch->isPrefixSubnet (ipll_prefix, alias)) + { + neighbor_attr.id = SAI_NEIGHBOR_ENTRY_ATTR_NO_HOST_ROUTE; + neighbor_attr.value.booldata = 1; + neighbor_attrs.push_back(neighbor_attr); + } + } + MuxOrch* mux_orch = gDirectory.get(); bool hw_config = isHwConfigured(neighborEntry); @@ -1541,10 +1540,6 @@ bool NeighOrch::addVoqEncapIndex(string &alias, IpAddress &ip, vector gDirectory; +extern PortsOrch* gPortsOrch; +extern sai_object_id_t gSwitchId; +extern sai_object_id_t gUnderlayIfId; +extern sai_object_id_t gVirtualRouterId; +extern sai_tunnel_api_t *sai_tunnel_api; + +static const std::vector nvgreMapTypes = { + MAP_T_VLAN, + MAP_T_BRIDGE +}; + +static const std::map nvgreEncapTunnelMap = { + { MAP_T_VLAN, SAI_TUNNEL_MAP_TYPE_VLAN_ID_TO_VSID }, + { MAP_T_BRIDGE, SAI_TUNNEL_MAP_TYPE_BRIDGE_IF_TO_VSID } +}; + +static inline sai_tunnel_map_type_t get_encap_nvgre_mapper(map_type_t map) +{ + return nvgreEncapTunnelMap.at(map); +} + +static const std::map nvgreDecapTunnelMap = { + { MAP_T_VLAN, SAI_TUNNEL_MAP_TYPE_VSID_TO_VLAN_ID }, + { MAP_T_BRIDGE, SAI_TUNNEL_MAP_TYPE_VSID_TO_BRIDGE_IF } +}; + +static inline sai_tunnel_map_type_t get_decap_nvgre_mapper(map_type_t map) +{ + return nvgreDecapTunnelMap.at(map); +} + +static const map> nvgreEncapTunnelMapKeyVal = +{ + { MAP_T_VLAN, + { SAI_TUNNEL_MAP_ENTRY_ATTR_VLAN_ID_KEY, SAI_TUNNEL_MAP_ENTRY_ATTR_VSID_ID_VALUE } + }, + { MAP_T_BRIDGE, + { SAI_TUNNEL_MAP_ENTRY_ATTR_BRIDGE_ID_KEY, SAI_TUNNEL_MAP_ENTRY_ATTR_VSID_ID_VALUE } + } +}; + +static inline sai_tunnel_map_entry_attr_t get_encap_nvgre_map_key(map_type_t map) +{ + return nvgreEncapTunnelMapKeyVal.at(map).first; +} + +static inline sai_tunnel_map_entry_attr_t get_encap_nvgre_map_val(map_type_t map) +{ + return nvgreEncapTunnelMapKeyVal.at(map).second; +} + +static const map> nvgreDecapTunnelMapKeyVal = +{ + { MAP_T_VLAN, + { SAI_TUNNEL_MAP_ENTRY_ATTR_VSID_ID_KEY, SAI_TUNNEL_MAP_ENTRY_ATTR_VLAN_ID_VALUE } + }, + { MAP_T_BRIDGE, + { SAI_TUNNEL_MAP_ENTRY_ATTR_VSID_ID_KEY, SAI_TUNNEL_MAP_ENTRY_ATTR_BRIDGE_ID_VALUE } + } +}; + +static inline sai_tunnel_map_entry_attr_t get_decap_nvgre_map_key(map_type_t map) +{ + return nvgreDecapTunnelMapKeyVal.at(map).first; +} + +static inline sai_tunnel_map_entry_attr_t get_decap_nvgre_map_val(map_type_t map) +{ + return nvgreDecapTunnelMapKeyVal.at(map).second; +} + +/** @brief Creates tunnel mapper in SAI. + * + * @param sai_tunnel_map_type SAI tunnel map type e.g. VSID_TO_VLAN + * + * @return Tunnel map SAI identifier. + */ +sai_object_id_t NvgreTunnel::sai_create_tunnel_map(sai_tunnel_map_type_t sai_tunnel_map_type) +{ + sai_attribute_t attr; + std::vector tunnel_map_attrs; + + attr.id = SAI_TUNNEL_MAP_ATTR_TYPE; + attr.value.u32 = sai_tunnel_map_type; + + tunnel_map_attrs.push_back(attr); + + sai_object_id_t tunnel_map_id; + sai_status_t status = sai_tunnel_api->create_tunnel_map( + &tunnel_map_id, + gSwitchId, + static_cast(tunnel_map_attrs.size()), + tunnel_map_attrs.data() + ); + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't create the NVGRE tunnel map object"); + } + + return tunnel_map_id; +} + +/** @brief Removes tunnel mapper in SAI. + * + * @param sai_tunnel_map_type SAI tunnel map identifier. + * + * @return void. + */ +void NvgreTunnel::sai_remove_tunnel_map(sai_object_id_t tunnel_map_id) +{ + sai_status_t status = sai_tunnel_api->remove_tunnel_map(tunnel_map_id); + + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't remove the NVGRE tunnel map object"); + } +} + + +/** @brief Creates tunnel in SAI. + * + * @param ids Pointer to structure where stored tunnel and tunnel mappers identifiers. + * @param src_ip Pointer to source IP address. + * + * @return SAI tunnel identifier. + */ +sai_object_id_t NvgreTunnel::sai_create_tunnel(struct tunnel_sai_ids_t &ids, const sai_ip_address_t &src_ip, sai_object_id_t underlay_rif) +{ + sai_attribute_t attr; + std::vector tunnel_attrs; + + attr.id = SAI_TUNNEL_ATTR_TYPE; + attr.value.s32 = SAI_TUNNEL_TYPE_NVGRE; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE; + attr.value.oid = underlay_rif; + tunnel_attrs.push_back(attr); + + sai_object_id_t decap_map_list[MAP_T_MAX]; + uint8_t num_decap_map = 0; + + for (auto map_type : nvgreMapTypes) + { + decap_map_list[num_decap_map] = ids.tunnel_decap_id.at(map_type); + num_decap_map++; + } + + attr.id = SAI_TUNNEL_ATTR_DECAP_MAPPERS; + attr.value.objlist.count = num_decap_map; + attr.value.objlist.list = decap_map_list; + tunnel_attrs.push_back(attr); + + sai_object_id_t encap_map_list[MAP_T_MAX]; + uint8_t num_encap_map = 0; + + for (auto map_type : nvgreMapTypes) + { + encap_map_list[num_encap_map] = ids.tunnel_encap_id.at(map_type); + num_encap_map++; + } + + attr.id = SAI_TUNNEL_ATTR_ENCAP_MAPPERS; + attr.value.objlist.count = num_encap_map; + attr.value.objlist.list = encap_map_list; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_ATTR_ENCAP_SRC_IP; + attr.value.ipaddr = src_ip; + tunnel_attrs.push_back(attr); + + sai_object_id_t tunnel_id; + sai_status_t status = sai_tunnel_api->create_tunnel( + &tunnel_id, + gSwitchId, + static_cast(tunnel_attrs.size()), + tunnel_attrs.data() + ); + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't create the NVGRE tunnel object"); + } + + return tunnel_id; +} + +/** @brief Removes tunnel in SAI. + * + * @param tunnel_id Pointer to tunnel identifier. + * + * @return void. + */ +void NvgreTunnel::sai_remove_tunnel(sai_object_id_t tunnel_id) +{ + sai_status_t status = sai_tunnel_api->remove_tunnel(tunnel_id); + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't remove the NVGRE tunnel object"); + } +} + +/** @brief Creates tunnel termination in SAI. + * + * @param tunnel_id Tunnel identifier. + * @param src_ip Pointer to source IP address. + * @param default_vrid Virtual router identifier. + * + * @return SAI tunnel termination identifier. + */ +sai_object_id_t NvgreTunnel::sai_create_tunnel_termination(sai_object_id_t tunnel_id, const sai_ip_address_t &src_ip, sai_object_id_t default_vrid) +{ + sai_attribute_t attr; + std::vector tunnel_attrs; + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE; + attr.value.s32 = SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID; + attr.value.oid = default_vrid; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP; + attr.value.ipaddr = src_ip; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE; + attr.value.s32 = SAI_TUNNEL_TYPE_NVGRE; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID; + attr.value.oid = tunnel_id; + tunnel_attrs.push_back(attr); + + sai_object_id_t term_table_id; + sai_status_t status = sai_tunnel_api->create_tunnel_term_table_entry( + &term_table_id, + gSwitchId, + static_cast(tunnel_attrs.size()), + tunnel_attrs.data() + ); + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't create a tunnel term table object"); + } + + return term_table_id; +} + +/** @brief Removes tunnel termination in SAI. + * + * @param tunnel_id Pointer to tunnel termination identifier. + * + * @return void. + */ +void NvgreTunnel::sai_remove_tunnel_termination(sai_object_id_t tunnel_term_id) +{ + sai_status_t status = sai_tunnel_api->remove_tunnel_term_table_entry(tunnel_term_id); + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't remove a tunnel term object"); + } +} + +void NvgreTunnel::createNvgreMappers() +{ + for (auto map_type : nvgreMapTypes) + { + tunnel_ids_.tunnel_encap_id.insert( + make_pair(map_type, sai_create_tunnel_map(get_encap_nvgre_mapper(map_type))) + ); + } + + for (auto map_type : nvgreMapTypes) + { + tunnel_ids_.tunnel_decap_id.insert( + make_pair(map_type, sai_create_tunnel_map(get_decap_nvgre_mapper(map_type))) + ); + } +} + +void NvgreTunnel::removeNvgreMappers() +{ + for (auto map_type : nvgreMapTypes) + { + sai_remove_tunnel_map(getEncapMapId(map_type)); + } + + for (auto map_type : nvgreMapTypes) + { + sai_remove_tunnel_map(getDecapMapId(map_type)); + } + + tunnel_ids_.tunnel_encap_id.clear(); + tunnel_ids_.tunnel_decap_id.clear(); +} + +void NvgreTunnel::createNvgreTunnel() +{ + sai_ip_address_t ip_addr; + swss::copy(ip_addr, src_ip_); + + tunnel_ids_.tunnel_id = sai_create_tunnel(tunnel_ids_, ip_addr, gUnderlayIfId); + tunnel_ids_.tunnel_term_id = sai_create_tunnel_termination(tunnel_ids_.tunnel_id, ip_addr, gVirtualRouterId); + + SWSS_LOG_INFO("NVGRE tunnel '%s' was created", tunnel_name_.c_str()); +} + +void NvgreTunnel::removeNvgreTunnel() +{ + try + { + sai_remove_tunnel_termination(tunnel_ids_.tunnel_term_id); + sai_remove_tunnel(tunnel_ids_.tunnel_id); + } + catch(const std::runtime_error& error) + { + SWSS_LOG_ERROR("Error while removing tunnel entry. Tunnel: %s. Error: %s", tunnel_name_.c_str(), error.what()); + } + + SWSS_LOG_INFO("NVGRE tunnel '%s' was removed", tunnel_name_.c_str()); + + tunnel_ids_.tunnel_id = SAI_NULL_OBJECT_ID; + tunnel_ids_.tunnel_term_id = SAI_NULL_OBJECT_ID; +} + +NvgreTunnel::NvgreTunnel(std::string tunnelName, IpAddress srcIp) : + tunnel_name_(tunnelName), + src_ip_(srcIp) +{ + createNvgreMappers(); + createNvgreTunnel(); +} + +NvgreTunnel::~NvgreTunnel() +{ + removeNvgreTunnel(); + removeNvgreMappers(); +} + +bool NvgreTunnelOrch::addOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + + auto src_ip = request.getAttrIP("src_ip"); + const auto& tunnel_name = request.getKeyString(0); + + if (isTunnelExists(tunnel_name)) + { + SWSS_LOG_WARN("NVGRE tunnel '%s' already exists", tunnel_name.c_str()); + return true; + } + + nvgre_tunnel_table_[tunnel_name] = std::unique_ptr(new NvgreTunnel(tunnel_name, src_ip)); + + return true; +} + +bool NvgreTunnelOrch::delOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + + const auto& tunnel_name = request.getKeyString(0); + + if (!isTunnelExists(tunnel_name)) + { + SWSS_LOG_ERROR("NVGRE tunnel '%s' doesn't exist", tunnel_name.c_str()); + return true; + } + + nvgre_tunnel_table_.erase(tunnel_name); + + SWSS_LOG_INFO("NVGRE tunnel '%s' was removed", tunnel_name.c_str()); + + return true; +} + +/** @brief Creates tunnel map entry in SAI. + * + * @param map_type map type - VLAN or BRIDGE. + * @param vsid Virtual Subnet ID value. + * @param vlan_id VLAN ID value. + * @param bridge_obj_id SAI bridge object. + * @param encap encapsulation flag. + * + * @return SAI tunnel map entry ID. + */ +sai_object_id_t NvgreTunnel::sai_create_tunnel_map_entry( + map_type_t map_type, + sai_uint32_t vsid, + sai_vlan_id_t vlan_id, + sai_object_id_t bridge_obj_id, + bool encap) +{ + sai_attribute_t attr; + sai_object_id_t tunnel_map_entry_id; + std::vector tunnel_map_entry_attrs; + + attr.id = SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE; + attr.value.u32 = (encap) ? get_encap_nvgre_mapper(map_type) : get_decap_nvgre_mapper(map_type); + tunnel_map_entry_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP; + attr.value.oid = (encap) ? getEncapMapId(map_type) : getDecapMapId(map_type); + tunnel_map_entry_attrs.push_back(attr); + + attr.id = (encap) ? get_encap_nvgre_map_key(map_type) : get_decap_nvgre_map_val(map_type); + if (bridge_obj_id != SAI_NULL_OBJECT_ID) + { + attr.value.oid = bridge_obj_id; + } + else + { + attr.value.u16 = vlan_id; + } + + tunnel_map_entry_attrs.push_back(attr); + + attr.id = (encap) ? get_encap_nvgre_map_val(map_type) : get_decap_nvgre_map_key(map_type); + attr.value.u32 = vsid; + tunnel_map_entry_attrs.push_back(attr); + + sai_status_t status = sai_tunnel_api->create_tunnel_map_entry(&tunnel_map_entry_id, gSwitchId, + static_cast (tunnel_map_entry_attrs.size()), + tunnel_map_entry_attrs.data()); + + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't create the NVGRE tunnel map entry object"); + } + + return tunnel_map_entry_id; +} + + +bool NvgreTunnel::addDecapMapperEntry( + map_type_t map_type, + uint32_t vsid, + sai_vlan_id_t vlan_id, + std::string tunnel_map_entry_name, + sai_object_id_t bridge_obj) +{ + auto tunnel_map_entry_id = sai_create_tunnel_map_entry(map_type, vsid, vlan_id, bridge_obj); + + nvgre_tunnel_map_table_[tunnel_map_entry_name].map_entry_id = tunnel_map_entry_id; + nvgre_tunnel_map_table_[tunnel_map_entry_name].vlan_id = vlan_id; + nvgre_tunnel_map_table_[tunnel_map_entry_name].vsid = vsid; + + SWSS_LOG_INFO("NVGRE decap tunnel map entry '%s' for tunnel '%s' was created", + tunnel_map_entry_name.c_str(), tunnel_name_.c_str()); + + return true; +} + +bool NvgreTunnelMapOrch::addOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + + auto tunnel_name = request.getKeyString(0); + NvgreTunnelOrch* tunnel_orch = gDirectory.get(); + + if (!tunnel_orch->isTunnelExists(tunnel_name)) + { + SWSS_LOG_WARN("NVGRE tunnel '%s' doesn't exist", tunnel_name.c_str()); + return true; + } + + auto tunnel_obj = tunnel_orch->getNvgreTunnel(tunnel_name); + const auto full_tunnel_map_entry_name = request.getFullKey(); + + if (tunnel_obj->isTunnelMapExists(full_tunnel_map_entry_name)) + { + SWSS_LOG_WARN("NVGRE tunnel map '%s' already exist", full_tunnel_map_entry_name.c_str()); + return true; + } + + sai_vlan_id_t vlan_id = (sai_vlan_id_t) request.getAttrVlan("vlan_id"); + Port port; + + if (!gPortsOrch->getVlanByVlanId(vlan_id, port)) + { + SWSS_LOG_WARN("VLAN ID doesn't exist: %d", vlan_id); + return true; + } + + auto vsid = static_cast(request.getAttrUint("vsid")); + if (vsid > NVGRE_VSID_MAX_VALUE) + { + SWSS_LOG_WARN("VSID is invalid: %d", vsid); + return true; + } + + if (!tunnel_obj->addDecapMapperEntry(MAP_T_VLAN, vsid, vlan_id, full_tunnel_map_entry_name)) + { + return true; + } + + return true; +} + +/** @brief Removes tunnel map entry in SAI. + * + * @param obj_id SAI tunnel map identifier. + * + * @return void. + */ +void NvgreTunnel::sai_remove_tunnel_map_entry(sai_object_id_t obj_id) +{ + sai_status_t status = SAI_STATUS_SUCCESS; + + if (obj_id != SAI_NULL_OBJECT_ID) + { + status = sai_tunnel_api->remove_tunnel_map_entry(obj_id); + } + + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't delete the NVGRE tunnel map entry object"); + } +} + +bool NvgreTunnel::delMapperEntry(std::string tunnel_map_entry_name) +{ + auto tunnel_map_entry_id = getMapEntryId(tunnel_map_entry_name); + + try + { + sai_remove_tunnel_map_entry(tunnel_map_entry_id); + } + catch (const std::runtime_error& error) + { + SWSS_LOG_ERROR("Error while removing decap tunnel map %s: %s", + tunnel_map_entry_name.c_str(), error.what()); + return false; + } + + nvgre_tunnel_map_table_.erase(tunnel_map_entry_name); + + SWSS_LOG_INFO("NVGRE tunnel map entry '%s' for tunnel '%s' was removed", + tunnel_map_entry_name.c_str(), tunnel_name_.c_str()); + + return true; +} + +bool NvgreTunnelMapOrch::delOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + + const auto& tunnel_name = request.getKeyString(0); + NvgreTunnelOrch* tunnel_orch = gDirectory.get(); + auto tunnel_obj = tunnel_orch->getNvgreTunnel(tunnel_name); + const auto& full_tunnel_map_entry_name = request.getFullKey(); + + if (!tunnel_orch->isTunnelExists(tunnel_name)) + { + SWSS_LOG_WARN("NVGRE tunnel '%s' does not exist", tunnel_name.c_str()); + return true; + } + + if (!tunnel_obj->isTunnelMapExists(full_tunnel_map_entry_name)) + { + SWSS_LOG_WARN("NVGRE tunnel map '%s' does not exist", + full_tunnel_map_entry_name.c_str()); + return true; + } + + if (!tunnel_obj->delMapperEntry(full_tunnel_map_entry_name)) + { + return true; + } + + return true; +} diff --git a/orchagent/nvgreorch.h b/orchagent/nvgreorch.h new file mode 100644 index 0000000000..82092565ac --- /dev/null +++ b/orchagent/nvgreorch.h @@ -0,0 +1,167 @@ +#pragma once + +#include + +#include "sai.h" +#include "orch.h" +#include "request_parser.h" +#include "portsorch.h" + +typedef enum { + MAP_T_VLAN = 0, + MAP_T_BRIDGE = 1, + MAP_T_MAX = 2 +} map_type_t; + +struct tunnel_sai_ids_t +{ + std::map tunnel_encap_id; + std::map tunnel_decap_id; + sai_object_id_t tunnel_id; + sai_object_id_t tunnel_term_id; +}; + +typedef struct nvgre_tunnel_map_entry_s +{ + sai_object_id_t map_entry_id; + sai_vlan_id_t vlan_id; + uint32_t vsid; +} nvgre_tunnel_map_entry_t; + +const request_description_t nvgre_tunnel_request_description = { + { REQ_T_STRING }, + { + { "src_ip", REQ_T_IP }, + }, + { "src_ip" } +}; + +typedef std::map NvgreTunnelMapTable; + +class NvgreTunnel +{ +public: + NvgreTunnel(std::string tunnelName, IpAddress srcIp); + ~NvgreTunnel(); + + bool isTunnelMapExists(const std::string& name) const + { + return nvgre_tunnel_map_table_.find(name) != std::end(nvgre_tunnel_map_table_); + } + + sai_object_id_t getDecapMapId(map_type_t type) const + { + return tunnel_ids_.tunnel_decap_id.at(type); + } + + sai_object_id_t getEncapMapId(map_type_t type) const + { + return tunnel_ids_.tunnel_encap_id.at(type); + } + + sai_object_id_t getMapEntryId(std::string tunnel_map_entry_name) + { + return nvgre_tunnel_map_table_.at(tunnel_map_entry_name).map_entry_id; + } + + sai_object_id_t getMapEntryVlanId(std::string tunnel_map_entry_name) + { + return nvgre_tunnel_map_table_.at(tunnel_map_entry_name).vlan_id; + } + + sai_object_id_t getMapEntryVsid(std::string tunnel_map_entry_name) + { + return nvgre_tunnel_map_table_.at(tunnel_map_entry_name).vsid; + } + + bool addDecapMapperEntry(map_type_t map_type, uint32_t vsid, sai_vlan_id_t vlan_id, std::string tunnel_map_entry_name, sai_object_id_t bridge_obj=SAI_NULL_OBJECT_ID); + + bool delMapperEntry(std::string tunnel_map_entry_name); + +private: + void createNvgreMappers(); + void removeNvgreMappers(); + + void createNvgreTunnel(); + void removeNvgreTunnel(); + + sai_object_id_t sai_create_tunnel_map(sai_tunnel_map_type_t sai_tunnel_map_type); + void sai_remove_tunnel_map(sai_object_id_t tunnel_map_id); + + sai_object_id_t sai_create_tunnel(struct tunnel_sai_ids_t &ids, const sai_ip_address_t &src_ip, sai_object_id_t underlay_rif); + void sai_remove_tunnel(sai_object_id_t tunnel_id); + + sai_object_id_t sai_create_tunnel_termination(sai_object_id_t tunnel_id, const sai_ip_address_t &src_ip, sai_object_id_t default_vrid); + void sai_remove_tunnel_termination(sai_object_id_t tunnel_term_id); + + sai_object_id_t sai_create_tunnel_map_entry(map_type_t map_type, sai_uint32_t vsid, sai_vlan_id_t vlan_id, sai_object_id_t bridge_obj_id, bool encap=false); + void sai_remove_tunnel_map_entry(sai_object_id_t obj_id); + + std::string tunnel_name_; + IpAddress src_ip_; + tunnel_sai_ids_t tunnel_ids_; + + NvgreTunnelMapTable nvgre_tunnel_map_table_; +}; + +typedef std::map> NvgreTunnelTable; + +class NvgreTunnelRequest : public Request +{ +public: + NvgreTunnelRequest() : Request(nvgre_tunnel_request_description, '|') { } +}; + +class NvgreTunnelOrch : public Orch2 +{ +public: + NvgreTunnelOrch(DBConnector *db, const std::string& tableName) : + Orch2(db, tableName, request_) + { } + + bool isTunnelExists(const std::string& tunnelName) const + { + return nvgre_tunnel_table_.find(tunnelName) != std::end(nvgre_tunnel_table_); + } + + NvgreTunnel* getNvgreTunnel(const std::string& tunnelName) + { + return nvgre_tunnel_table_.at(tunnelName).get(); + } + +private: + virtual bool addOperation(const Request& request); + virtual bool delOperation(const Request& request); + + NvgreTunnelRequest request_; + NvgreTunnelTable nvgre_tunnel_table_; +}; + +const request_description_t nvgre_tunnel_map_request_description = { + { REQ_T_STRING, REQ_T_STRING }, + { + { "vsid", REQ_T_UINT }, + { "vlan_id", REQ_T_VLAN }, + }, + { "vsid", "vlan_id" } +}; + +class NvgreTunnelMapRequest : public Request +{ +public: + NvgreTunnelMapRequest() : Request(nvgre_tunnel_map_request_description, '|') { } +}; + +class NvgreTunnelMapOrch : public Orch2 +{ +public: + NvgreTunnelMapOrch(DBConnector *db, const std::string& tableName) : + Orch2(db, tableName, request_) + {} + +private: + virtual bool addOperation(const Request& request); + virtual bool delOperation(const Request& request); + + NvgreTunnelMapRequest request_; +}; \ No newline at end of file diff --git a/orchagent/orch.cpp b/orchagent/orch.cpp index 0992e329a4..a9c5c9afcb 100644 --- a/orchagent/orch.cpp +++ b/orchagent/orch.cpp @@ -410,7 +410,8 @@ void Orch::removeMeFromObjsReferencedByMe( const string &table, const string &obj_name, const string &field, - const string &old_referenced_obj_name) + const string &old_referenced_obj_name, + bool remove_field) { vector objects = tokenize(old_referenced_obj_name, list_item_delimiter); for (auto &obj : objects) @@ -426,6 +427,12 @@ void Orch::removeMeFromObjsReferencedByMe( referenced_table.c_str(), ref_obj_name.c_str(), to_string(old_referenced_obj.m_objsDependingOnMe.size()).c_str()); } + + if (remove_field) + { + auto &referencing_object = (*type_maps[table])[obj_name]; + referencing_object.m_objsReferencingByMe.erase(field); + } } void Orch::setObjectReference( @@ -439,7 +446,7 @@ void Orch::setObjectReference( auto field_ref = obj.m_objsReferencingByMe.find(field); if (field_ref != obj.m_objsReferencingByMe.end()) - removeMeFromObjsReferencedByMe(type_maps, table, obj_name, field, field_ref->second); + removeMeFromObjsReferencedByMe(type_maps, table, obj_name, field, field_ref->second, false); obj.m_objsReferencingByMe[field] = referenced_obj; @@ -459,16 +466,44 @@ void Orch::setObjectReference( } } +bool Orch::doesObjectExist( + type_map &type_maps, + const string &table, + const string &obj_name, + const string &field, + string &referenced_obj) +{ + auto &&searchRef = (*type_maps[table]).find(obj_name); + if (searchRef != (*type_maps[table]).end()) + { + auto &obj = searchRef->second; + auto &&searchReferencingObjectRef = obj.m_objsReferencingByMe.find(field); + if (searchReferencingObjectRef != obj.m_objsReferencingByMe.end()) + { + referenced_obj = searchReferencingObjectRef->second; + return true; + } + } + + return false; +} + void Orch::removeObject( type_map &type_maps, const string &table, const string &obj_name) { - auto &obj = (*type_maps[table])[obj_name]; + auto &&searchRef = (*type_maps[table]).find(obj_name); + if (searchRef == (*type_maps[table]).end()) + { + return; + } + + auto &obj = searchRef->second; for (auto field_ref : obj.m_objsReferencingByMe) { - removeMeFromObjsReferencedByMe(type_maps, table, obj_name, field_ref.first, field_ref.second); + removeMeFromObjsReferencedByMe(type_maps, table, obj_name, field_ref.first, field_ref.second, false); } // Update the field store @@ -847,7 +882,7 @@ task_process_status Orch::handleSaiCreateStatus(sai_api_t api, sai_status_t stat default: SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); + abort(); } break; case SAI_API_HOSTIF: @@ -865,7 +900,7 @@ task_process_status Orch::handleSaiCreateStatus(sai_api_t api, sai_status_t stat default: SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); + abort(); } default: switch (status) @@ -876,7 +911,7 @@ task_process_status Orch::handleSaiCreateStatus(sai_api_t api, sai_status_t stat default: SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); + abort(); } } return task_need_retry; @@ -917,12 +952,12 @@ task_process_status Orch::handleSaiSetStatus(sai_api_t api, sai_status_t status, default: SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); + abort(); } default: SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); + abort(); } return task_need_retry; @@ -950,7 +985,7 @@ task_process_status Orch::handleSaiRemoveStatus(sai_api_t api, sai_status_t stat default: SWSS_LOG_ERROR("Encountered failure in remove operation, exiting orchagent, SAI API: %s, status: %s", sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); + abort(); } return task_need_retry; } diff --git a/orchagent/orch.h b/orchagent/orch.h index 46a5d446ce..f6b13aa9c6 100644 --- a/orchagent/orch.h +++ b/orchagent/orch.h @@ -34,6 +34,7 @@ const char state_db_key_delimiter = '|'; #define INVM_PLATFORM_SUBSTRING "innovium" #define MLNX_PLATFORM_SUBSTRING "mellanox" #define BRCM_PLATFORM_SUBSTRING "broadcom" +#define BRCM_DNX_PLATFORM_SUBSTRING "broadcom-dnx" #define BFN_PLATFORM_SUBSTRING "barefoot" #define VS_PLATFORM_SUBSTRING "vs" #define NPS_PLATFORM_SUBSTRING "nephos" @@ -233,9 +234,11 @@ class Orch bool parseReference(type_map &type_maps, std::string &ref, const std::string &table_name, std::string &object_name); ref_resolve_status resolveFieldRefArray(type_map&, const std::string&, const std::string&, swss::KeyOpFieldsValuesTuple&, std::vector&, std::string&); void setObjectReference(type_map&, const std::string&, const std::string&, const std::string&, const std::string&); + bool doesObjectExist(type_map&, const std::string&, const std::string&, const std::string&, std::string&); void removeObject(type_map&, const std::string&, const std::string&); bool isObjectBeingReferenced(type_map&, const std::string&, const std::string&); std::string objectReferenceInfo(type_map&, const std::string&, const std::string&); + void removeMeFromObjsReferencedByMe(type_map &type_maps, const std::string &table, const std::string &obj_name, const std::string &field, const std::string &old_referenced_obj_name, bool remove_field=true); /* Note: consumer will be owned by this class */ void addExecutor(Executor* executor); @@ -250,7 +253,6 @@ class Orch ResponsePublisher m_publisher; private: - void removeMeFromObjsReferencedByMe(type_map &type_maps, const std::string &table, const std::string &obj_name, const std::string &field, const std::string &old_referenced_obj_name); void addConsumer(swss::DBConnector *db, std::string tableName, int pri = default_orch_pri); }; diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index d0f618b591..47c1f34bea 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -41,6 +41,7 @@ PbhOrch *gPbhOrch; MirrorOrch *gMirrorOrch; CrmOrch *gCrmOrch; BufferOrch *gBufferOrch; +QosOrch *gQosOrch; SwitchOrch *gSwitchOrch; Directory gDirectory; NatOrch *gNatOrch; @@ -205,6 +206,10 @@ bool OrchDaemon::init() EvpnNvoOrch* evpn_nvo_orch = new EvpnNvoOrch(m_applDb, APP_VXLAN_EVPN_NVO_TABLE_NAME); gDirectory.set(evpn_nvo_orch); + NvgreTunnelOrch *nvgre_tunnel_orch = new NvgreTunnelOrch(m_configDb, CFG_NVGRE_TUNNEL_TABLE_NAME); + gDirectory.set(nvgre_tunnel_orch); + NvgreTunnelMapOrch *nvgre_tunnel_map_orch = new NvgreTunnelMapOrch(m_configDb, CFG_NVGRE_TUNNEL_MAP_TABLE_NAME); + gDirectory.set(nvgre_tunnel_map_orch); vector qos_tables = { CFG_TC_TO_QUEUE_MAP_TABLE_NAME, @@ -221,7 +226,7 @@ bool OrchDaemon::init() CFG_DSCP_TO_FC_MAP_TABLE_NAME, CFG_EXP_TO_FC_MAP_TABLE_NAME }; - QosOrch *qos_orch = new QosOrch(m_configDb, qos_tables); + gQosOrch = new QosOrch(m_configDb, qos_tables); vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, @@ -330,7 +335,7 @@ bool OrchDaemon::init() * when iterating ConsumerMap. This is ensured implicitly by the order of keys in ordered map. * For cases when Orch has to process tables in specific order, like PortsOrch during warm start, it has to override Orch::doTask() */ - m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, mux_orch, mux_cb_orch, gIntfsOrch, gNeighOrch, gNhgMapOrch, gNhgOrch, gCbfNhgOrch, gRouteOrch, gCoppOrch, qos_orch, wm_orch, policer_orch, tunnel_decap_orch, sflow_orch, debug_counter_orch, gMacsecOrch, gBfdOrch, gSrv6Orch}; + m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, mux_orch, mux_cb_orch, gIntfsOrch, gNeighOrch, gNhgMapOrch, gNhgOrch, gCbfNhgOrch, gRouteOrch, gCoppOrch, gQosOrch, wm_orch, policer_orch, tunnel_decap_orch, sflow_orch, debug_counter_orch, gMacsecOrch, gBfdOrch, gSrv6Orch}; bool initialize_dtel = false; if (platform == BFN_PLATFORM_SUBSTRING || platform == VS_PLATFORM_SUBSTRING) @@ -429,6 +434,8 @@ bool OrchDaemon::init() m_orchList.push_back(gIsoGrpOrch); m_orchList.push_back(gFgNhgOrch); m_orchList.push_back(mux_st_orch); + m_orchList.push_back(nvgre_tunnel_orch); + m_orchList.push_back(nvgre_tunnel_map_orch); if (m_fabricEnabled) { @@ -453,7 +460,7 @@ bool OrchDaemon::init() CFG_PFC_WD_TABLE_NAME }; - if (platform == MLNX_PLATFORM_SUBSTRING) + if ((platform == MLNX_PLATFORM_SUBSTRING) || (platform == VS_PLATFORM_SUBSTRING)) { static const vector portStatIds = @@ -638,7 +645,7 @@ void OrchDaemon::flush() if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to flush redis pipeline %d", status); - exit(EXIT_FAILURE); + abort(); } // check if logroate is requested diff --git a/orchagent/orchdaemon.h b/orchagent/orchdaemon.h index ea49affbfc..35e531aa15 100644 --- a/orchagent/orchdaemon.h +++ b/orchagent/orchdaemon.h @@ -43,6 +43,7 @@ #include "p4orch/p4orch.h" #include "bfdorch.h" #include "srv6orch.h" +#include "nvgreorch.h" using namespace swss; diff --git a/orchagent/p4orch/p4orch.cpp b/orchagent/p4orch/p4orch.cpp index ada1fa2c77..57d50aa5ce 100644 --- a/orchagent/p4orch/p4orch.cpp +++ b/orchagent/p4orch/p4orch.cpp @@ -171,9 +171,9 @@ void P4Orch::handlePortStatusChangeNotification(const std::string &op, const std { m_wcmpManager->pruneNextHops(port.m_alias); } - - sai_deserialize_free_port_oper_status_ntf(count, port_oper_status); } + + sai_deserialize_free_port_oper_status_ntf(count, port_oper_status); } } diff --git a/orchagent/pbh/pbhcap.cpp b/orchagent/pbh/pbhcap.cpp new file mode 100644 index 0000000000..46a3a49e19 --- /dev/null +++ b/orchagent/pbh/pbhcap.cpp @@ -0,0 +1,688 @@ +// includes ----------------------------------------------------------------------------------------------------------- + +#include +#include +#include +#include +#include +#include + +#include "pbhschema.h" +#include "schema.h" +#include "logger.h" + +#include "pbhcap.h" + +using namespace swss; + +// defines ------------------------------------------------------------------------------------------------------------ + +#define PBH_PLATFORM_ENV_VAR "ASIC_VENDOR" +#define PBH_PLATFORM_GENERIC "generic" +#define PBH_PLATFORM_MELLANOX "mellanox" +#define PBH_PLATFORM_UNKN "unknown" + +#define PBH_TABLE_CAPABILITIES_KEY "table" +#define PBH_RULE_CAPABILITIES_KEY "rule" +#define PBH_HASH_CAPABILITIES_KEY "hash" +#define PBH_HASH_FIELD_CAPABILITIES_KEY "hash-field" + +#define PBH_FIELD_CAPABILITY_ADD "ADD" +#define PBH_FIELD_CAPABILITY_UPDATE "UPDATE" +#define PBH_FIELD_CAPABILITY_REMOVE "REMOVE" +#define PBH_FIELD_CAPABILITY_UNKN "UNKNOWN" + +#define PBH_STATE_DB_NAME "STATE_DB" +#define PBH_STATE_DB_TIMEOUT 0 + +// constants ---------------------------------------------------------------------------------------------------------- + +static const std::map pbhAsicVendorMap = +{ + { PbhAsicVendor::GENERIC, PBH_PLATFORM_GENERIC }, + { PbhAsicVendor::MELLANOX, PBH_PLATFORM_MELLANOX } +}; + +static const std::map pbhFieldCapabilityMap = +{ + { PbhFieldCapability::ADD, PBH_FIELD_CAPABILITY_ADD }, + { PbhFieldCapability::UPDATE, PBH_FIELD_CAPABILITY_UPDATE }, + { PbhFieldCapability::REMOVE, PBH_FIELD_CAPABILITY_REMOVE } +}; + +// functions ---------------------------------------------------------------------------------------------------------- + +static std::string toStr(PbhAsicVendor value) noexcept +{ + const auto &cit = pbhAsicVendorMap.find(value); + if (cit != pbhAsicVendorMap.cend()) + { + return cit->second; + } + + return PBH_PLATFORM_UNKN; +} + +static std::string toStr(PbhFieldCapability value) noexcept +{ + const auto &cit = pbhFieldCapabilityMap.find(value); + if (cit != pbhFieldCapabilityMap.cend()) + { + return cit->second; + } + + return PBH_FIELD_CAPABILITY_UNKN; +} + +static std::string toStr(const std::set &value) noexcept +{ + std::stringstream ss; + bool separator = false; + + for (const auto &cit : value) + { + if (!separator) + { + ss << toStr(cit); + separator = true; + } + else + { + ss << "," << toStr(cit); + } + } + + return ss.str(); +} + +// PBH field capabilities --------------------------------------------------------------------------------------------- + +void PbhVendorFieldCapabilities::setPbhDefaults(std::set &fieldCap) noexcept +{ + fieldCap.insert(PbhFieldCapability::ADD); + fieldCap.insert(PbhFieldCapability::UPDATE); + fieldCap.insert(PbhFieldCapability::REMOVE); +} + +PbhGenericFieldCapabilities::PbhGenericFieldCapabilities() noexcept +{ + this->table.interface_list.insert(PbhFieldCapability::UPDATE); + this->table.description.insert(PbhFieldCapability::UPDATE); + + this->rule.priority.insert(PbhFieldCapability::UPDATE); + this->setPbhDefaults(this->rule.gre_key); + this->setPbhDefaults(this->rule.ether_type); + this->setPbhDefaults(this->rule.ip_protocol); + this->setPbhDefaults(this->rule.ipv6_next_header); + this->setPbhDefaults(this->rule.l4_dst_port); + this->setPbhDefaults(this->rule.inner_ether_type); + this->rule.hash.insert(PbhFieldCapability::UPDATE); + this->setPbhDefaults(this->rule.packet_action); + this->setPbhDefaults(this->rule.flow_counter); + + this->hash.hash_field_list.insert(PbhFieldCapability::UPDATE); +} + +PbhMellanoxFieldCapabilities::PbhMellanoxFieldCapabilities() noexcept +{ + this->table.interface_list.insert(PbhFieldCapability::UPDATE); + this->table.description.insert(PbhFieldCapability::UPDATE); + + this->rule.priority.insert(PbhFieldCapability::UPDATE); + this->setPbhDefaults(this->rule.gre_key); + this->setPbhDefaults(this->rule.ether_type); + this->setPbhDefaults(this->rule.ip_protocol); + this->setPbhDefaults(this->rule.ipv6_next_header); + this->setPbhDefaults(this->rule.l4_dst_port); + this->setPbhDefaults(this->rule.inner_ether_type); + this->rule.hash.insert(PbhFieldCapability::UPDATE); + this->setPbhDefaults(this->rule.packet_action); + this->setPbhDefaults(this->rule.flow_counter); +} + +// PBH entity capabilities -------------------------------------------------------------------------------------------- + +PbhEntityCapabilities::PbhEntityCapabilities(const std::shared_ptr &fieldCap) noexcept : + fieldCap(fieldCap) +{ + +} + +bool PbhEntityCapabilities::validate(const std::set &fieldCap, PbhFieldCapability value) const +{ + const auto &cit = fieldCap.find(value); + if (cit == fieldCap.cend()) + { + return false; + } + + return true; +} + +PbhTableCapabilities::PbhTableCapabilities(const std::shared_ptr &fieldCap) noexcept : + PbhEntityCapabilities(fieldCap) +{ + +} + +bool PbhTableCapabilities::validatePbhInterfaceList(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().interface_list, value); +} + +bool PbhTableCapabilities::validatePbhDescription(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().description, value); +} + +auto PbhTableCapabilities::getPbhCap() const -> const decltype(PbhVendorFieldCapabilities::table) & +{ + return this->fieldCap->table; +} + +PbhRuleCapabilities::PbhRuleCapabilities(const std::shared_ptr &fieldCap) noexcept : + PbhEntityCapabilities(fieldCap) +{ + +} + +bool PbhRuleCapabilities::validatePbhPriority(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().priority, value); +} + +bool PbhRuleCapabilities::validatePbhGreKey(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().gre_key, value); +} + +bool PbhRuleCapabilities::validatePbhEtherType(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().ether_type, value); +} + +bool PbhRuleCapabilities::validatePbhIpProtocol(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().ip_protocol, value); +} + +bool PbhRuleCapabilities::validatePbhIpv6NextHeader(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().ipv6_next_header, value); +} + +bool PbhRuleCapabilities::validatePbhL4DstPort(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().l4_dst_port, value); +} + +bool PbhRuleCapabilities::validatePbhInnerEtherType(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().inner_ether_type, value); +} + +bool PbhRuleCapabilities::validatePbhHash(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().hash, value); +} + +bool PbhRuleCapabilities::validatePbhPacketAction(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().packet_action, value); +} + +bool PbhRuleCapabilities::validatePbhFlowCounter(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().flow_counter, value); +} + +auto PbhRuleCapabilities::getPbhCap() const -> const decltype(PbhVendorFieldCapabilities::rule) & +{ + return this->fieldCap->rule; +} + +PbhHashCapabilities::PbhHashCapabilities(const std::shared_ptr &fieldCap) noexcept : + PbhEntityCapabilities(fieldCap) +{ + +} + +bool PbhHashCapabilities::validatePbhHashFieldList(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().hash_field_list, value); +} + +auto PbhHashCapabilities::getPbhCap() const -> const decltype(PbhVendorFieldCapabilities::hash) & +{ + return this->fieldCap->hash; +} + +PbhHashFieldCapabilities::PbhHashFieldCapabilities(const std::shared_ptr &fieldCap) noexcept : + PbhEntityCapabilities(fieldCap) +{ + +} + +bool PbhHashFieldCapabilities::validatePbhHashField(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().hash_field, value); +} + +bool PbhHashFieldCapabilities::validatePbhIpMask(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().ip_mask, value); +} + +bool PbhHashFieldCapabilities::validatePbhSequenceId(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().sequence_id, value); +} + +auto PbhHashFieldCapabilities::getPbhCap() const -> const decltype(PbhVendorFieldCapabilities::hashField) & +{ + return this->fieldCap->hashField; +} + +// PBH capabilities --------------------------------------------------------------------------------------------------- + +DBConnector PbhCapabilities::stateDb(PBH_STATE_DB_NAME, PBH_STATE_DB_TIMEOUT); +Table PbhCapabilities::capTable(&stateDb, STATE_PBH_CAPABILITIES_TABLE_NAME); + +PbhCapabilities::PbhCapabilities() noexcept +{ + SWSS_LOG_ENTER(); + + if (!this->parsePbhAsicVendor()) + { + SWSS_LOG_WARN("Failed to parse ASIC vendor: fallback to %s platform", PBH_PLATFORM_GENERIC); + this->asicVendor = PbhAsicVendor::GENERIC; + } + + this->initPbhVendorCapabilities(); + this->writePbhVendorCapabilitiesToDb(); +} + +PbhAsicVendor PbhCapabilities::getAsicVendor() const noexcept +{ + return this->asicVendor; +} + +bool PbhCapabilities::parsePbhAsicVendor() +{ + SWSS_LOG_ENTER(); + + const auto *envVar = std::getenv(PBH_PLATFORM_ENV_VAR); + if (envVar == nullptr) + { + SWSS_LOG_WARN("Failed to detect ASIC vendor: environmental variable(%s) is not found", PBH_PLATFORM_ENV_VAR); + return false; + } + + std::string platform(envVar); + + if (platform == PBH_PLATFORM_MELLANOX) + { + this->asicVendor = PbhAsicVendor::MELLANOX; + } + else + { + this->asicVendor = PbhAsicVendor::GENERIC; + } + + SWSS_LOG_NOTICE("Parsed PBH ASIC vendor: %s", toStr(this->asicVendor).c_str()); + + return true; +} + +void PbhCapabilities::initPbhVendorCapabilities() +{ + std::shared_ptr fieldCap; + + switch (this->asicVendor) + { + case PbhAsicVendor::GENERIC: + fieldCap = std::make_shared(); + break; + + case PbhAsicVendor::MELLANOX: + fieldCap = std::make_shared(); + break; + + default: + SWSS_LOG_WARN("Unknown ASIC vendor: skipping ..."); + break; + } + + if (!fieldCap) + { + SWSS_LOG_ERROR("Failed to initialize PBH capabilities: unknown ASIC vendor"); + return; + } + + this->table = std::make_shared(fieldCap); + this->rule = std::make_shared(fieldCap); + this->hash = std::make_shared(fieldCap); + this->hashField = std::make_shared(fieldCap); + + SWSS_LOG_NOTICE("Initialized PBH capabilities: %s platform", toStr(this->asicVendor).c_str()); +} + +template<> +void PbhCapabilities::writePbhEntityCapabilitiesToDb(const std::shared_ptr &entityCap) +{ + SWSS_LOG_ENTER(); + + auto key = PbhCapabilities::capTable.getKeyName(PBH_TABLE_CAPABILITIES_KEY); + std::vector fvList; + + fvList.push_back(FieldValueTuple(PBH_TABLE_INTERFACE_LIST, toStr(entityCap->getPbhCap().interface_list))); + fvList.push_back(FieldValueTuple(PBH_TABLE_DESCRIPTION, toStr(entityCap->getPbhCap().description))); + + PbhCapabilities::capTable.set(PBH_TABLE_CAPABILITIES_KEY, fvList); + + SWSS_LOG_NOTICE("Wrote PBH table capabilities to State DB: %s key", key.c_str()); +} + +template<> +void PbhCapabilities::writePbhEntityCapabilitiesToDb(const std::shared_ptr &entityCap) +{ + SWSS_LOG_ENTER(); + + auto key = PbhCapabilities::capTable.getKeyName(PBH_RULE_CAPABILITIES_KEY); + std::vector fvList; + + fvList.push_back(FieldValueTuple(PBH_RULE_PRIORITY, toStr(entityCap->getPbhCap().priority))); + fvList.push_back(FieldValueTuple(PBH_RULE_GRE_KEY, toStr(entityCap->getPbhCap().gre_key))); + fvList.push_back(FieldValueTuple(PBH_RULE_ETHER_TYPE, toStr(entityCap->getPbhCap().ether_type))); + fvList.push_back(FieldValueTuple(PBH_RULE_IP_PROTOCOL, toStr(entityCap->getPbhCap().ip_protocol))); + fvList.push_back(FieldValueTuple(PBH_RULE_IPV6_NEXT_HEADER, toStr(entityCap->getPbhCap().ipv6_next_header))); + fvList.push_back(FieldValueTuple(PBH_RULE_L4_DST_PORT, toStr(entityCap->getPbhCap().l4_dst_port))); + fvList.push_back(FieldValueTuple(PBH_RULE_INNER_ETHER_TYPE, toStr(entityCap->getPbhCap().inner_ether_type))); + fvList.push_back(FieldValueTuple(PBH_RULE_HASH, toStr(entityCap->getPbhCap().hash))); + fvList.push_back(FieldValueTuple(PBH_RULE_PACKET_ACTION, toStr(entityCap->getPbhCap().packet_action))); + fvList.push_back(FieldValueTuple(PBH_RULE_FLOW_COUNTER, toStr(entityCap->getPbhCap().flow_counter))); + + PbhCapabilities::capTable.set(PBH_RULE_CAPABILITIES_KEY, fvList); + + SWSS_LOG_NOTICE("Wrote PBH rule capabilities to State DB: %s key", key.c_str()); +} + +template<> +void PbhCapabilities::writePbhEntityCapabilitiesToDb(const std::shared_ptr &entityCap) +{ + SWSS_LOG_ENTER(); + + auto key = PbhCapabilities::capTable.getKeyName(PBH_HASH_CAPABILITIES_KEY); + std::vector fvList; + + fvList.push_back(FieldValueTuple(PBH_HASH_HASH_FIELD_LIST, toStr(entityCap->getPbhCap().hash_field_list))); + + PbhCapabilities::capTable.set(PBH_HASH_CAPABILITIES_KEY, fvList); + + SWSS_LOG_NOTICE("Wrote PBH hash capabilities to State DB: %s key", key.c_str()); +} + +template<> +void PbhCapabilities::writePbhEntityCapabilitiesToDb(const std::shared_ptr &entityCap) +{ + SWSS_LOG_ENTER(); + + auto key = PbhCapabilities::capTable.getKeyName(PBH_HASH_FIELD_CAPABILITIES_KEY); + std::vector fvList; + + fvList.push_back(FieldValueTuple(PBH_HASH_FIELD_HASH_FIELD, toStr(entityCap->getPbhCap().hash_field))); + fvList.push_back(FieldValueTuple(PBH_HASH_FIELD_IP_MASK, toStr(entityCap->getPbhCap().ip_mask))); + fvList.push_back(FieldValueTuple(PBH_HASH_FIELD_SEQUENCE_ID, toStr(entityCap->getPbhCap().sequence_id))); + + PbhCapabilities::capTable.set(PBH_HASH_FIELD_CAPABILITIES_KEY, fvList); + + SWSS_LOG_NOTICE("Wrote PBH hash field capabilities to State DB: %s key", key.c_str()); +} + +void PbhCapabilities::writePbhVendorCapabilitiesToDb() +{ + SWSS_LOG_ENTER(); + + this->writePbhEntityCapabilitiesToDb(this->table); + this->writePbhEntityCapabilitiesToDb(this->rule); + this->writePbhEntityCapabilitiesToDb(this->hash); + this->writePbhEntityCapabilitiesToDb(this->hashField); + + SWSS_LOG_NOTICE("Wrote PBH capabilities to State DB: %s table", STATE_PBH_CAPABILITIES_TABLE_NAME); +} + +bool PbhCapabilities::validatePbhTableCap(const std::vector &fieldList, PbhFieldCapability value) const +{ + SWSS_LOG_ENTER(); + + for (const auto &cit : fieldList) + { + if (cit == PBH_TABLE_INTERFACE_LIST) + { + if (!this->table->validatePbhInterfaceList(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_TABLE_DESCRIPTION) + { + if (!this->table->validatePbhDescription(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else + { + SWSS_LOG_WARN("Unknown field(%s): skipping ...", cit.c_str()); + } + } + + return true; +} + +bool PbhCapabilities::validatePbhRuleCap(const std::vector &fieldList, PbhFieldCapability value) const +{ + SWSS_LOG_ENTER(); + + for (const auto &cit : fieldList) + { + if (cit == PBH_RULE_PRIORITY) + { + if (!this->rule->validatePbhPriority(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_GRE_KEY) + { + if (!this->rule->validatePbhGreKey(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_ETHER_TYPE) + { + if (!this->rule->validatePbhEtherType(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_IP_PROTOCOL) + { + if (!this->rule->validatePbhIpProtocol(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_IPV6_NEXT_HEADER) + { + if (!this->rule->validatePbhIpv6NextHeader(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_L4_DST_PORT) + { + if (!this->rule->validatePbhL4DstPort(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_INNER_ETHER_TYPE) + { + if (!this->rule->validatePbhInnerEtherType(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_HASH) + { + if (!this->rule->validatePbhHash(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_PACKET_ACTION) + { + if (!this->rule->validatePbhPacketAction(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_FLOW_COUNTER) + { + if (!this->rule->validatePbhFlowCounter(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else + { + SWSS_LOG_WARN("Unknown field(%s): skipping ...", cit.c_str()); + } + } + + return true; +} + +bool PbhCapabilities::validatePbhHashCap(const std::vector &fieldList, PbhFieldCapability value) const +{ + SWSS_LOG_ENTER(); + + for (const auto &cit : fieldList) + { + if (cit == PBH_HASH_HASH_FIELD_LIST) + { + if (!this->hash->validatePbhHashFieldList(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else + { + SWSS_LOG_WARN("Unknown field(%s): skipping ...", cit.c_str()); + } + } + + return true; +} + +bool PbhCapabilities::validatePbhHashFieldCap(const std::vector &fieldList, PbhFieldCapability value) const +{ + SWSS_LOG_ENTER(); + + for (const auto &cit : fieldList) + { + if (cit == PBH_HASH_FIELD_HASH_FIELD) + { + if (!this->hashField->validatePbhHashField(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_HASH_FIELD_IP_MASK) + { + if (!this->hashField->validatePbhIpMask(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_HASH_FIELD_SEQUENCE_ID) + { + if (!this->hashField->validatePbhSequenceId(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else + { + SWSS_LOG_WARN("Unknown field(%s): skipping ...", cit.c_str()); + } + } + + return true; +} diff --git a/orchagent/pbh/pbhcap.h b/orchagent/pbh/pbhcap.h new file mode 100644 index 0000000000..adc2a4c9e6 --- /dev/null +++ b/orchagent/pbh/pbhcap.h @@ -0,0 +1,186 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "dbconnector.h" +#include "table.h" + +enum class PbhAsicVendor : std::int32_t +{ + GENERIC, + MELLANOX +}; + +enum class PbhFieldCapability : std::int32_t +{ + ADD, + UPDATE, + REMOVE +}; + +class PbhVendorFieldCapabilities +{ +public: + PbhVendorFieldCapabilities() = default; + virtual ~PbhVendorFieldCapabilities() = default; + +protected: + void setPbhDefaults(std::set &fieldCap) noexcept; + +public: + struct { + std::set interface_list; + std::set description; + } table; + + struct { + std::set priority; + std::set gre_key; + std::set ether_type; + std::set ip_protocol; + std::set ipv6_next_header; + std::set l4_dst_port; + std::set inner_ether_type; + std::set hash; + std::set packet_action; + std::set flow_counter; + } rule; + + struct { + std::set hash_field_list; + } hash; + + struct { + std::set hash_field; + std::set ip_mask; + std::set sequence_id; + } hashField; +}; + +class PbhGenericFieldCapabilities final : public PbhVendorFieldCapabilities +{ +public: + PbhGenericFieldCapabilities() noexcept; + ~PbhGenericFieldCapabilities() = default; +}; + +class PbhMellanoxFieldCapabilities final : public PbhVendorFieldCapabilities +{ +public: + PbhMellanoxFieldCapabilities() noexcept; + ~PbhMellanoxFieldCapabilities() = default; +}; + +class PbhEntityCapabilities +{ +public: + PbhEntityCapabilities() = delete; + virtual ~PbhEntityCapabilities() = default; + + PbhEntityCapabilities(const std::shared_ptr &fieldCap) noexcept; + +protected: + bool validate(const std::set &fieldCap, PbhFieldCapability value) const; + + std::shared_ptr fieldCap; +}; + +class PbhTableCapabilities final : public PbhEntityCapabilities +{ +public: + PbhTableCapabilities() = delete; + ~PbhTableCapabilities() = default; + + PbhTableCapabilities(const std::shared_ptr &fieldCap) noexcept; + + bool validatePbhInterfaceList(PbhFieldCapability value) const; + bool validatePbhDescription(PbhFieldCapability value) const; + + auto getPbhCap() const -> const decltype(PbhVendorFieldCapabilities::table) &; +}; + +class PbhRuleCapabilities final : public PbhEntityCapabilities +{ +public: + PbhRuleCapabilities() = delete; + ~PbhRuleCapabilities() = default; + + PbhRuleCapabilities(const std::shared_ptr &fieldCap) noexcept; + + bool validatePbhPriority(PbhFieldCapability value) const; + bool validatePbhGreKey(PbhFieldCapability value) const; + bool validatePbhEtherType(PbhFieldCapability value) const; + bool validatePbhIpProtocol(PbhFieldCapability value) const; + bool validatePbhIpv6NextHeader(PbhFieldCapability value) const; + bool validatePbhL4DstPort(PbhFieldCapability value) const; + bool validatePbhInnerEtherType(PbhFieldCapability value) const; + bool validatePbhHash(PbhFieldCapability value) const; + bool validatePbhPacketAction(PbhFieldCapability value) const; + bool validatePbhFlowCounter(PbhFieldCapability value) const; + + auto getPbhCap() const -> const decltype(PbhVendorFieldCapabilities::rule) &; +}; + +class PbhHashCapabilities final : public PbhEntityCapabilities +{ +public: + PbhHashCapabilities() = delete; + ~PbhHashCapabilities() = default; + + PbhHashCapabilities(const std::shared_ptr &fieldCap) noexcept; + + bool validatePbhHashFieldList(PbhFieldCapability value) const; + + auto getPbhCap() const -> const decltype(PbhVendorFieldCapabilities::hash) &; +}; + +class PbhHashFieldCapabilities final : public PbhEntityCapabilities +{ +public: + PbhHashFieldCapabilities() = delete; + ~PbhHashFieldCapabilities() = default; + + PbhHashFieldCapabilities(const std::shared_ptr &fieldCap) noexcept; + + bool validatePbhHashField(PbhFieldCapability value) const; + bool validatePbhIpMask(PbhFieldCapability value) const; + bool validatePbhSequenceId(PbhFieldCapability value) const; + + auto getPbhCap() const -> const decltype(PbhVendorFieldCapabilities::hashField) &; +}; + +class PbhCapabilities final +{ +public: + PbhCapabilities() noexcept; + ~PbhCapabilities() = default; + + bool validatePbhTableCap(const std::vector &fieldList, PbhFieldCapability value) const; + bool validatePbhRuleCap(const std::vector &fieldList, PbhFieldCapability value) const; + bool validatePbhHashCap(const std::vector &fieldList, PbhFieldCapability value) const; + bool validatePbhHashFieldCap(const std::vector &fieldList, PbhFieldCapability value) const; + + PbhAsicVendor getAsicVendor() const noexcept; + +private: + template + void writePbhEntityCapabilitiesToDb(const std::shared_ptr &entityCap); + + bool parsePbhAsicVendor(); + void initPbhVendorCapabilities(); + void writePbhVendorCapabilitiesToDb(); + + PbhAsicVendor asicVendor; + + std::shared_ptr table; + std::shared_ptr rule; + std::shared_ptr hash; + std::shared_ptr hashField; + + static swss::DBConnector stateDb; + static swss::Table capTable; +}; diff --git a/orchagent/pbh/pbhcnt.h b/orchagent/pbh/pbhcnt.h index 787d91b63c..90c40bb681 100644 --- a/orchagent/pbh/pbhcnt.h +++ b/orchagent/pbh/pbhcnt.h @@ -105,19 +105,22 @@ class PbhRule final : public PbhContainer } inner_ether_type; struct { + struct { + std::string name; + } meta; std::string value; bool is_set = false; } hash; struct { + struct { + std::string name; + } meta; sai_acl_entry_attr_t value; bool is_set = false; } packet_action; struct { - struct { - std::string name; - } meta; bool value; bool is_set = false; } flow_counter; diff --git a/orchagent/pbh/pbhmgr.cpp b/orchagent/pbh/pbhmgr.cpp index ed10ff756c..8dfb8e09f8 100644 --- a/orchagent/pbh/pbhmgr.cpp +++ b/orchagent/pbh/pbhmgr.cpp @@ -7,6 +7,7 @@ #include #include +#include "pbhschema.h" #include "ipaddress.h" #include "converter.h" #include "tokenize.h" @@ -16,42 +17,6 @@ using namespace swss; -// defines ------------------------------------------------------------------------------------------------------------ - -#define PBH_TABLE_INTERFACE_LIST "interface_list" -#define PBH_TABLE_DESCRIPTION "description" - -#define PBH_RULE_PACKET_ACTION_SET_ECMP_HASH "SET_ECMP_HASH" -#define PBH_RULE_PACKET_ACTION_SET_LAG_HASH "SET_LAG_HASH" - -#define PBH_RULE_FLOW_COUNTER_ENABLED "ENABLED" -#define PBH_RULE_FLOW_COUNTER_DISABLED "DISABLED" - -#define PBH_RULE_PRIORITY "priority" -#define PBH_RULE_GRE_KEY "gre_key" -#define PBH_RULE_ETHER_TYPE "ether_type" -#define PBH_RULE_IP_PROTOCOL "ip_protocol" -#define PBH_RULE_IPV6_NEXT_HEADER "ipv6_next_header" -#define PBH_RULE_L4_DST_PORT "l4_dst_port" -#define PBH_RULE_INNER_ETHER_TYPE "inner_ether_type" -#define PBH_RULE_HASH "hash" -#define PBH_RULE_PACKET_ACTION "packet_action" -#define PBH_RULE_FLOW_COUNTER "flow_counter" - -#define PBH_HASH_HASH_FIELD_LIST "hash_field_list" - -#define PBH_HASH_FIELD_HASH_FIELD_INNER_IP_PROTOCOL "INNER_IP_PROTOCOL" -#define PBH_HASH_FIELD_HASH_FIELD_INNER_L4_DST_PORT "INNER_L4_DST_PORT" -#define PBH_HASH_FIELD_HASH_FIELD_INNER_L4_SRC_PORT "INNER_L4_SRC_PORT" -#define PBH_HASH_FIELD_HASH_FIELD_INNER_DST_IPV4 "INNER_DST_IPV4" -#define PBH_HASH_FIELD_HASH_FIELD_INNER_SRC_IPV4 "INNER_SRC_IPV4" -#define PBH_HASH_FIELD_HASH_FIELD_INNER_DST_IPV6 "INNER_DST_IPV6" -#define PBH_HASH_FIELD_HASH_FIELD_INNER_SRC_IPV6 "INNER_SRC_IPV6" - -#define PBH_HASH_FIELD_HASH_FIELD "hash_field" -#define PBH_HASH_FIELD_IP_MASK "ip_mask" -#define PBH_HASH_FIELD_SEQUENCE_ID "sequence_id" - // constants ---------------------------------------------------------------------------------------------------------- static const std::unordered_map pbhRulePacketActionMap = @@ -712,6 +677,7 @@ bool PbhHelper::parsePbhRuleHash(PbhRule &rule, const std::string &field, const return false; } + rule.hash.meta.name = field; rule.hash.value = value; rule.hash.is_set = true; @@ -729,6 +695,7 @@ bool PbhHelper::parsePbhRulePacketAction(PbhRule &rule, const std::string &field return false; } + rule.packet_action.meta.name = field; rule.packet_action.value = cit->second; rule.packet_action.is_set = true; @@ -746,7 +713,6 @@ bool PbhHelper::parsePbhRuleFlowCounter(PbhRule &rule, const std::string &field, return false; } - rule.flow_counter.meta.name = field; rule.flow_counter.value = cit->second; rule.flow_counter.is_set = true; @@ -1036,6 +1002,7 @@ bool PbhHelper::validatePbhRule(PbhRule &rule) const PBH_RULE_PACKET_ACTION_SET_ECMP_HASH ); + rule.packet_action.meta.name = PBH_RULE_PACKET_ACTION; rule.packet_action.value = SAI_ACL_ENTRY_ATTR_ACTION_SET_ECMP_HASH_ID; rule.packet_action.is_set = true; @@ -1049,7 +1016,7 @@ bool PbhHelper::validatePbhRule(PbhRule &rule) const PBH_RULE_FLOW_COUNTER, PBH_RULE_FLOW_COUNTER_DISABLED ); - rule.flow_counter.meta.name = PBH_RULE_FLOW_COUNTER; + rule.flow_counter.value = false; rule.flow_counter.is_set = true; diff --git a/orchagent/pbh/pbhrule.cpp b/orchagent/pbh/pbhrule.cpp index 7d35f4bb8f..0b00e40e44 100644 --- a/orchagent/pbh/pbhrule.cpp +++ b/orchagent/pbh/pbhrule.cpp @@ -98,3 +98,50 @@ bool AclRulePbh::validateAddAction(string attr_name, string attr_value) { SWSS_LOG_THROW("This API should not be used on PbhRule"); } + +bool AclRulePbh::disableAction() +{ + const auto &cit1 = m_actions.find(SAI_ACL_ENTRY_ATTR_ACTION_SET_ECMP_HASH_ID); + if (cit1 != m_actions.cend()) + { + const auto &attr1 = cit1->second.getSaiAttr(); + if (attr1.value.aclaction.enable) + { + sai_attribute_t attr; + + attr.id = attr1.id; + attr.value.aclaction.enable = false; + attr.value.aclaction.parameter.oid = SAI_NULL_OBJECT_ID; + + if (!setAttribute(attr)) + { + return false; + } + + m_actions.erase(cit1); + } + } + + const auto &cit2 = m_actions.find(SAI_ACL_ENTRY_ATTR_ACTION_SET_LAG_HASH_ID); + if (cit2 != m_actions.cend()) + { + const auto &attr2 = cit2->second.getSaiAttr(); + if (attr2.value.aclaction.enable) + { + sai_attribute_t attr; + + attr.id = attr2.id; + attr.value.aclaction.enable = false; + attr.value.aclaction.parameter.oid = SAI_NULL_OBJECT_ID; + + if (!setAttribute(attr)) + { + return false; + } + + m_actions.erase(cit2); + } + } + + return true; +} diff --git a/orchagent/pbh/pbhrule.h b/orchagent/pbh/pbhrule.h index 9e661761c4..5fa5ddf1fc 100644 --- a/orchagent/pbh/pbhrule.h +++ b/orchagent/pbh/pbhrule.h @@ -13,4 +13,5 @@ class AclRulePbh: public AclRule bool validate() override; void onUpdate(SubjectType, void *) override; bool validateAddAction(string attr_name, string attr_value) override; + bool disableAction(); }; diff --git a/orchagent/pbh/pbhschema.h b/orchagent/pbh/pbhschema.h new file mode 100644 index 0000000000..3ea280f769 --- /dev/null +++ b/orchagent/pbh/pbhschema.h @@ -0,0 +1,37 @@ +#pragma once + +// defines ------------------------------------------------------------------------------------------------------------ + +#define PBH_TABLE_INTERFACE_LIST "interface_list" +#define PBH_TABLE_DESCRIPTION "description" + +#define PBH_RULE_PACKET_ACTION_SET_ECMP_HASH "SET_ECMP_HASH" +#define PBH_RULE_PACKET_ACTION_SET_LAG_HASH "SET_LAG_HASH" + +#define PBH_RULE_FLOW_COUNTER_ENABLED "ENABLED" +#define PBH_RULE_FLOW_COUNTER_DISABLED "DISABLED" + +#define PBH_RULE_PRIORITY "priority" +#define PBH_RULE_GRE_KEY "gre_key" +#define PBH_RULE_ETHER_TYPE "ether_type" +#define PBH_RULE_IP_PROTOCOL "ip_protocol" +#define PBH_RULE_IPV6_NEXT_HEADER "ipv6_next_header" +#define PBH_RULE_L4_DST_PORT "l4_dst_port" +#define PBH_RULE_INNER_ETHER_TYPE "inner_ether_type" +#define PBH_RULE_HASH "hash" +#define PBH_RULE_PACKET_ACTION "packet_action" +#define PBH_RULE_FLOW_COUNTER "flow_counter" + +#define PBH_HASH_HASH_FIELD_LIST "hash_field_list" + +#define PBH_HASH_FIELD_HASH_FIELD_INNER_IP_PROTOCOL "INNER_IP_PROTOCOL" +#define PBH_HASH_FIELD_HASH_FIELD_INNER_L4_DST_PORT "INNER_L4_DST_PORT" +#define PBH_HASH_FIELD_HASH_FIELD_INNER_L4_SRC_PORT "INNER_L4_SRC_PORT" +#define PBH_HASH_FIELD_HASH_FIELD_INNER_DST_IPV4 "INNER_DST_IPV4" +#define PBH_HASH_FIELD_HASH_FIELD_INNER_SRC_IPV4 "INNER_SRC_IPV4" +#define PBH_HASH_FIELD_HASH_FIELD_INNER_DST_IPV6 "INNER_DST_IPV6" +#define PBH_HASH_FIELD_HASH_FIELD_INNER_SRC_IPV6 "INNER_SRC_IPV6" + +#define PBH_HASH_FIELD_HASH_FIELD "hash_field" +#define PBH_HASH_FIELD_IP_MASK "ip_mask" +#define PBH_HASH_FIELD_SEQUENCE_ID "sequence_id" diff --git a/orchagent/pbhorch.cpp b/orchagent/pbhorch.cpp index 83a1e80bd0..e2146cb362 100644 --- a/orchagent/pbhorch.cpp +++ b/orchagent/pbhorch.cpp @@ -53,7 +53,26 @@ static inline std::vector uMapDiffByKey(const umap_t &uMap1, const umap const auto &s1 = uMapToKeySet(uMap1); const auto &s2 = uMapToKeySet(uMap2); - std::set_symmetric_difference( + std::set_difference( + s1.cbegin(), + s1.cend(), + s2.cbegin(), + s2.cend(), + std::back_inserter(v) + ); + + return v; +} + +template +static inline std::vector uMapIntersectByKey(const umap_t &uMap1, const umap_t &uMap2) +{ + std::vector v; + + const auto &s1 = uMapToKeySet(uMap1); + const auto &s2 = uMapToKeySet(uMap2); + + std::set_intersection( s1.cbegin(), s1.cend(), s2.cbegin(), @@ -76,11 +95,52 @@ PbhOrch::PbhOrch( this->portsOrch = portsOrch; } -PbhOrch::~PbhOrch() +template +std::vector PbhOrch::getPbhAddedFields(const T &obj, const T &nObj) const +{ + return uMapDiffByKey(nObj.fieldValueMap, obj.fieldValueMap); +} + +template std::vector PbhOrch::getPbhAddedFields(const PbhTable &obj, const PbhTable &nObj) const; +template std::vector PbhOrch::getPbhAddedFields(const PbhRule &obj, const PbhRule &nObj) const; +template std::vector PbhOrch::getPbhAddedFields(const PbhHash &obj, const PbhHash &nObj) const; +template std::vector PbhOrch::getPbhAddedFields(const PbhHashField &obj, const PbhHashField &nObj) const; + +template +std::vector PbhOrch::getPbhUpdatedFields(const T &obj, const T &nObj) const { + std::vector v; + + const auto &iv = uMapIntersectByKey(obj.fieldValueMap, nObj.fieldValueMap); + std::copy_if( + iv.cbegin(), + iv.cend(), + std::back_inserter(v), + [&obj, &nObj](const auto &f) { + return obj.fieldValueMap.at(f) != nObj.fieldValueMap.at(f); + } + ); + + return v; } +template std::vector PbhOrch::getPbhUpdatedFields(const PbhTable &obj, const PbhTable &nObj) const; +template std::vector PbhOrch::getPbhUpdatedFields(const PbhRule &obj, const PbhRule &nObj) const; +template std::vector PbhOrch::getPbhUpdatedFields(const PbhHash &obj, const PbhHash &nObj) const; +template std::vector PbhOrch::getPbhUpdatedFields(const PbhHashField &obj, const PbhHashField &nObj) const; + +template +std::vector PbhOrch::getPbhRemovedFields(const T &obj, const T &nObj) const +{ + return uMapDiffByKey(obj.fieldValueMap, nObj.fieldValueMap); +} + +template std::vector PbhOrch::getPbhRemovedFields(const PbhTable &obj, const PbhTable &nObj) const; +template std::vector PbhOrch::getPbhRemovedFields(const PbhRule &obj, const PbhRule &nObj) const; +template std::vector PbhOrch::getPbhRemovedFields(const PbhHash &obj, const PbhHash &nObj) const; +template std::vector PbhOrch::getPbhRemovedFields(const PbhHashField &obj, const PbhHashField &nObj) const; + template<> auto PbhOrch::getPbhSetupTaskMap() const -> const std::unordered_map& { @@ -252,6 +312,34 @@ bool PbhOrch::updatePbhTable(const PbhTable &table) return false; } + const auto &aFields = this->getPbhAddedFields(tObj, table); + const auto &uFields = this->getPbhUpdatedFields(tObj, table); + const auto &rFields = this->getPbhRemovedFields(tObj, table); + + if (aFields.empty() && uFields.empty() && rFields.empty()) + { + SWSS_LOG_NOTICE("PBH table(%s) in SAI is up-to-date", table.key.c_str()); + return true; + } + + if (!this->pbhCap.validatePbhTableCap(aFields, PbhFieldCapability::ADD)) + { + SWSS_LOG_ERROR("Failed to validate PBH table(%s) added fields: unsupported capabilities", table.key.c_str()); + return false; + } + + if (!this->pbhCap.validatePbhTableCap(uFields, PbhFieldCapability::UPDATE)) + { + SWSS_LOG_ERROR("Failed to validate PBH table(%s) updated fields: unsupported capabilities", table.key.c_str()); + return false; + } + + if (!this->pbhCap.validatePbhTableCap(rFields, PbhFieldCapability::REMOVE)) + { + SWSS_LOG_ERROR("Failed to validate PBH table(%s) removed fields: unsupported capabilities", table.key.c_str()); + return false; + } + AclTable pbhTable(this->aclOrch, table.name); if (table.interface_list.is_set) @@ -577,57 +665,227 @@ bool PbhOrch::updatePbhRule(const PbhRule &rule) return false; } - if (!uMapDiffByKey(rObj.fieldValueMap, rule.fieldValueMap).empty()) + const auto &aFields = this->getPbhAddedFields(rObj, rule); + const auto &uFields = this->getPbhUpdatedFields(rObj, rule); + const auto &rFields = this->getPbhRemovedFields(rObj, rule); + + if (aFields.empty() && uFields.empty() && rFields.empty()) + { + SWSS_LOG_NOTICE("PBH rule(%s) in SAI is up-to-date", rule.key.c_str()); + return true; + } + + if (!this->pbhCap.validatePbhRuleCap(aFields, PbhFieldCapability::ADD)) { - SWSS_LOG_ERROR("Failed to update PBH rule(%s) in SAI: fields add/remove is prohibited", rule.key.c_str()); + SWSS_LOG_ERROR("Failed to validate PBH rule(%s) added fields: unsupported capabilities", rule.key.c_str()); return false; } - bool flowCounterUpdate = false; + if (!this->pbhCap.validatePbhRuleCap(uFields, PbhFieldCapability::UPDATE)) + { + SWSS_LOG_ERROR("Failed to validate PBH rule(%s) updated fields: unsupported capabilities", rule.key.c_str()); + return false; + } - for (const auto &oCit : rObj.fieldValueMap) + if (!this->pbhCap.validatePbhRuleCap(rFields, PbhFieldCapability::REMOVE)) { - const auto &field = oCit.first; + SWSS_LOG_ERROR("Failed to validate PBH rule(%s) removed fields: unsupported capabilities", rule.key.c_str()); + return false; + } - const auto &oValue = oCit.second; - const auto &nValue = rule.fieldValueMap.at(field); + std::shared_ptr pbhRule; + + if (rule.flow_counter.is_set) + { + pbhRule = std::make_shared(this->aclOrch, rule.name, rule.table, rule.flow_counter.value); + } + else + { + pbhRule = std::make_shared(this->aclOrch, rule.name, rule.table); + } - if (oValue == nValue) + if (rule.priority.is_set) + { + if (!pbhRule->validateAddPriority(rule.priority.value)) { - continue; + SWSS_LOG_ERROR("Failed to configure PBH rule(%s) priority", rule.key.c_str()); + return false; } + } + + if (rule.gre_key.is_set) + { + sai_attribute_t attr; - if (field != rule.flow_counter.meta.name) + attr.id = SAI_ACL_ENTRY_ATTR_FIELD_GRE_KEY; + attr.value.aclfield.enable = true; + attr.value.aclfield.data.u32 = rule.gre_key.value; + attr.value.aclfield.mask.u32 = rule.gre_key.mask; + + if (!pbhRule->validateAddMatch(attr)) { - SWSS_LOG_ERROR( - "Failed to update PBH rule(%s) in SAI: field(%s) update is prohibited", - rule.key.c_str(), - field.c_str() - ); + SWSS_LOG_ERROR("Failed to configure PBH rule(%s) match: GRE_KEY", rule.key.c_str()); return false; } + } - flowCounterUpdate = true; + if (rule.ether_type.is_set) + { + sai_attribute_t attr; + + attr.id = SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE; + attr.value.aclfield.enable = true; + attr.value.aclfield.data.u16 = rule.ether_type.value; + attr.value.aclfield.mask.u16 = rule.ether_type.mask; + + if (!pbhRule->validateAddMatch(attr)) + { + SWSS_LOG_ERROR("Failed to configure PBH rule(%s) match: ETHER_TYPE", rule.key.c_str()); + return false; + } } - if (!flowCounterUpdate) + if (rule.ip_protocol.is_set) { - SWSS_LOG_NOTICE("PBH rule(%s) in SAI is up-to-date", rule.key.c_str()); - return true; + sai_attribute_t attr; + + attr.id = SAI_ACL_ENTRY_ATTR_FIELD_IP_PROTOCOL; + attr.value.aclfield.enable = true; + attr.value.aclfield.data.u8 = rule.ip_protocol.value; + attr.value.aclfield.mask.u8 = rule.ip_protocol.mask; + + if (!pbhRule->validateAddMatch(attr)) + { + SWSS_LOG_ERROR("Failed to configure PBH rule(%s) match: IP_PROTOCOL", rule.key.c_str()); + return false; + } + } + + if (rule.ipv6_next_header.is_set) + { + sai_attribute_t attr; + + attr.id = SAI_ACL_ENTRY_ATTR_FIELD_IPV6_NEXT_HEADER; + attr.value.aclfield.enable = true; + attr.value.aclfield.data.u8 = rule.ipv6_next_header.value; + attr.value.aclfield.mask.u8 = rule.ipv6_next_header.mask; + + if (!pbhRule->validateAddMatch(attr)) + { + SWSS_LOG_ERROR("Failed to configure PBH rule(%s) match: IPV6_NEXT_HEADER", rule.key.c_str()); + return false; + } + } + + if (rule.l4_dst_port.is_set) + { + sai_attribute_t attr; + + attr.id = SAI_ACL_ENTRY_ATTR_FIELD_L4_DST_PORT; + attr.value.aclfield.enable = true; + attr.value.aclfield.data.u16 = rule.l4_dst_port.value; + attr.value.aclfield.mask.u16 = rule.l4_dst_port.mask; + + if (!pbhRule->validateAddMatch(attr)) + { + SWSS_LOG_ERROR("Failed to configure PBH rule(%s) match: L4_DST_PORT", rule.key.c_str()); + return false; + } + } + + if (rule.inner_ether_type.is_set) + { + sai_attribute_t attr; + + attr.id = SAI_ACL_ENTRY_ATTR_FIELD_INNER_ETHER_TYPE; + attr.value.aclfield.enable = true; + attr.value.aclfield.data.u16 = rule.inner_ether_type.value; + attr.value.aclfield.mask.u16 = rule.inner_ether_type.mask; + + if (!pbhRule->validateAddMatch(attr)) + { + SWSS_LOG_ERROR("Failed to configure PBH rule(%s) match: INNER_ETHER_TYPE", rule.key.c_str()); + return false; + } } - if (!this->aclOrch->updateAclRule(rule.table, rule.name, rule.flow_counter.value)) + if (rule.hash.is_set && rule.packet_action.is_set) + { + PbhHash hObj; + + if (this->pbhHlpr.getPbhHash(hObj, rule.hash.value)) + { + sai_attribute_t attr; + + attr.id = rule.packet_action.value; + attr.value.aclaction.enable = true; + attr.value.aclaction.parameter.oid = hObj.getOid(); + + if (!pbhRule->validateAddAction(attr)) + { + SWSS_LOG_ERROR("Failed to configure PBH rule(%s) action", rule.key.c_str()); + return false; + } + } + } + + if (!pbhRule->validate()) + { + SWSS_LOG_ERROR("Failed to validate PBH rule(%s)", rule.key.c_str()); + return false; + } + + // Mellanox W/A + if (this->pbhCap.getAsicVendor() == PbhAsicVendor::MELLANOX) + { + const auto &hMeta = rule.hash.meta; + const auto &paMeta = rule.packet_action.meta; + + auto cond1 = std::find(uFields.cbegin(), uFields.cend(), hMeta.name) != uFields.cend(); + auto cond2 = std::find(uFields.cbegin(), uFields.cend(), paMeta.name) != uFields.cend(); + + if (cond1 || cond2) + { + auto pbhRulePtr = dynamic_cast(this->aclOrch->getAclRule(rule.table, rule.name)); + + if (pbhRulePtr == nullptr) + { + SWSS_LOG_ERROR("Failed to update PBH rule(%s) in SAI: invalid object type", rule.key.c_str()); + return false; + } + + if (!pbhRulePtr->disableAction()) + { + SWSS_LOG_ERROR("Failed to disable PBH rule(%s) action", rule.key.c_str()); + return false; + } + } + } + + if (!this->aclOrch->updateAclRule(pbhRule)) { SWSS_LOG_ERROR("Failed to update PBH rule(%s) in SAI", rule.key.c_str()); return false; } + if (!this->pbhHlpr.decRefCount(rObj)) + { + SWSS_LOG_ERROR("Failed to remove PBH rule(%s) dependencies", rObj.key.c_str()); + return false; + } + if (!this->pbhHlpr.updatePbhRule(rule)) { SWSS_LOG_ERROR("Failed to update PBH rule(%s) in internal cache", rule.key.c_str()); return false; } + if (!this->pbhHlpr.incRefCount(rule)) + { + SWSS_LOG_ERROR("Failed to add PBH rule(%s) dependencies", rule.key.c_str()); + return false; + } + SWSS_LOG_NOTICE("Updated PBH rule(%s) in SAI", rule.key.c_str()); return true; @@ -832,31 +1090,98 @@ bool PbhOrch::updatePbhHash(const PbhHash &hash) return false; } - if (!uMapDiffByKey(hObj.fieldValueMap, hash.fieldValueMap).empty()) + const auto &aFields = this->getPbhAddedFields(hObj, hash); + const auto &uFields = this->getPbhUpdatedFields(hObj, hash); + const auto &rFields = this->getPbhRemovedFields(hObj, hash); + + if (aFields.empty() && uFields.empty() && rFields.empty()) { - SWSS_LOG_ERROR("Failed to update PBH hash(%s) in SAI: fields add/remove is prohibited", hash.key.c_str()); + SWSS_LOG_NOTICE("PBH hash(%s) in SAI is up-to-date", hash.key.c_str()); + return true; + } + + if (!this->pbhCap.validatePbhHashCap(aFields, PbhFieldCapability::ADD)) + { + SWSS_LOG_ERROR("Failed to validate PBH hash(%s) added fields: unsupported capabilities", hash.key.c_str()); + return false; + } + + if (!this->pbhCap.validatePbhHashCap(uFields, PbhFieldCapability::UPDATE)) + { + SWSS_LOG_ERROR("Failed to validate PBH hash(%s) updated fields: unsupported capabilities", hash.key.c_str()); return false; } - for (const auto &oCit : hObj.fieldValueMap) + if (!this->pbhCap.validatePbhHashCap(rFields, PbhFieldCapability::REMOVE)) { - const auto &field = oCit.first; + SWSS_LOG_ERROR("Failed to validate PBH hash(%s) removed fields: unsupported capabilities", hash.key.c_str()); + return false; + } - const auto &oValue = oCit.second; - const auto &nValue = hash.fieldValueMap.at(field); + std::vector hashFieldOidList; - if (oValue != nValue) + if (hash.hash_field_list.is_set) + { + for (const auto &cit : hash.hash_field_list.value) { - SWSS_LOG_ERROR( - "Failed to update PBH hash(%s) in SAI: field(%s) update is prohibited", - hash.key.c_str(), - field.c_str() - ); - return false; + PbhHashField hfObj; + + if (!this->pbhHlpr.getPbhHashField(hfObj, cit)) + { + SWSS_LOG_ERROR( + "Failed to update PBH hash(%s) in SAI: missing hash field(%s)", + hash.key.c_str(), + cit.c_str() + ); + return false; + } + + hashFieldOidList.push_back(hfObj.getOid()); } } - SWSS_LOG_NOTICE("PBH hash(%s) in SAI is up-to-date", hash.key.c_str()); + if (hashFieldOidList.empty()) + { + SWSS_LOG_ERROR("Failed to update PBH hash(%s) in SAI: missing hash fields", hash.key.c_str()); + return false; + } + + sai_attribute_t attr; + + attr.id = SAI_HASH_ATTR_FINE_GRAINED_HASH_FIELD_LIST; + attr.value.objlist.count = static_cast(hashFieldOidList.size()); + attr.value.objlist.list = hashFieldOidList.data(); + + sai_status_t status; + + status = sai_hash_api->set_hash_attribute(hObj.getOid(), &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to update PBH hash(%s) in SAI", hash.key.c_str()); + return false; + } + + if (!this->pbhHlpr.decRefCount(hObj)) + { + SWSS_LOG_ERROR("Failed to remove PBH hash(%s) dependencies", hObj.key.c_str()); + return false; + } + + hObj.hash_field_list = hash.hash_field_list; + + if (!this->pbhHlpr.updatePbhHash(hObj)) + { + SWSS_LOG_ERROR("Failed to update PBH hash(%s) in internal cache", hObj.key.c_str()); + return false; + } + + if (!this->pbhHlpr.incRefCount(hObj)) + { + SWSS_LOG_ERROR("Failed to add PBH hash(%s) dependencies", hObj.key.c_str()); + return false; + } + + SWSS_LOG_NOTICE("Updated PBH hash(%s) in SAI", hObj.key.c_str()); return true; } @@ -1072,33 +1397,37 @@ bool PbhOrch::updatePbhHashField(const PbhHashField &hashField) return false; } - if (!uMapDiffByKey(hfObj.fieldValueMap, hashField.fieldValueMap).empty()) + const auto &aFields = this->getPbhAddedFields(hfObj, hashField); + const auto &uFields = this->getPbhUpdatedFields(hfObj, hashField); + const auto &rFields = this->getPbhRemovedFields(hfObj, hashField); + + if (aFields.empty() && uFields.empty() && rFields.empty()) { - SWSS_LOG_ERROR("Failed to update PBH hash field(%s) in SAI: fields add/remove is prohibited", hashField.key.c_str()); - return false; + SWSS_LOG_NOTICE("PBH hash field(%s) in SAI is up-to-date", hashField.key.c_str()); + return true; } - for (const auto &oCit : hfObj.fieldValueMap) + if (!this->pbhCap.validatePbhHashFieldCap(aFields, PbhFieldCapability::ADD)) { - const auto &field = oCit.first; + SWSS_LOG_ERROR("Failed to validate PBH hash field(%s) added fields: unsupported capabilities", hashField.key.c_str()); + return false; + } - const auto &oValue = oCit.second; - const auto &nValue = hashField.fieldValueMap.at(field); + if (!this->pbhCap.validatePbhHashFieldCap(uFields, PbhFieldCapability::UPDATE)) + { + SWSS_LOG_ERROR("Failed to validate PBH hash field(%s) updated fields: unsupported capabilities", hashField.key.c_str()); + return false; + } - if (oValue != nValue) - { - SWSS_LOG_ERROR( - "Failed to update PBH hash field(%s) in SAI: field(%s) update is prohibited", - hashField.key.c_str(), - field.c_str() - ); - return false; - } + if (!this->pbhCap.validatePbhHashFieldCap(rFields, PbhFieldCapability::REMOVE)) + { + SWSS_LOG_ERROR("Failed to validate PBH hash field(%s) removed fields: unsupported capabilities", hashField.key.c_str()); + return false; } - SWSS_LOG_NOTICE("PBH hash field(%s) in SAI is up-to-date", hashField.key.c_str()); + SWSS_LOG_ERROR("Failed to update PBH hash field(%s) in SAI: update is prohibited", hfObj.key.c_str()); - return true; + return false; } bool PbhOrch::removePbhHashField(const PbhHashField &hashField) diff --git a/orchagent/pbhorch.h b/orchagent/pbhorch.h index 1aa49e1d26..250963f54a 100644 --- a/orchagent/pbhorch.h +++ b/orchagent/pbhorch.h @@ -8,20 +8,30 @@ #include "pbh/pbhrule.h" #include "pbh/pbhmgr.h" +#include "pbh/pbhcap.h" class PbhOrch final : public Orch { public: + PbhOrch() = delete; + ~PbhOrch() = default; + PbhOrch( std::vector &connectorList, AclOrch *aclOrch, PortsOrch *portsOrch ); - ~PbhOrch(); using Orch::doTask; // Allow access to the basic doTask private: + template + std::vector getPbhAddedFields(const T &obj, const T &nObj) const; + template + std::vector getPbhUpdatedFields(const T &obj, const T &nObj) const; + template + std::vector getPbhRemovedFields(const T &obj, const T &nObj) const; + template auto getPbhSetupTaskMap() const -> const std::unordered_map&; template @@ -75,4 +85,5 @@ class PbhOrch final : public Orch PortsOrch *portsOrch; PbhHelper pbhHlpr; + PbhCapabilities pbhCap; }; diff --git a/orchagent/pfc_detect_barefoot.lua b/orchagent/pfc_detect_barefoot.lua index b270549a29..c413c5999c 100644 --- a/orchagent/pfc_detect_barefoot.lua +++ b/orchagent/pfc_detect_barefoot.lua @@ -36,63 +36,68 @@ for i = n, 1, -1 do local queue_index = redis.call('HGET', 'COUNTERS_QUEUE_INDEX_MAP', KEYS[i]) local port_id = redis.call('HGET', 'COUNTERS_QUEUE_PORT_MAP', KEYS[i]) - local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' - local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION' + -- If there is no entry in COUNTERS_QUEUE_INDEX_MAP or COUNTERS_QUEUE_PORT_MAP then + -- it means KEYS[i] queue is inserted into FLEX COUNTER DB but the corresponding + -- maps haven't been updated yet. + if queue_index and port_id then + local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' + local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION' - -- Get all counters - local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') - local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') - local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) - local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) + -- Get all counters + local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') + local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') + local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) + local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) - if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then - occupancy_bytes = tonumber(occupancy_bytes) - packets = tonumber(packets) - pfc_rx_packets = tonumber(pfc_rx_packets) - pfc_duration = tonumber(pfc_duration) + if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then + occupancy_bytes = tonumber(occupancy_bytes) + packets = tonumber(packets) + pfc_rx_packets = tonumber(pfc_rx_packets) + pfc_duration = tonumber(pfc_duration) - local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') - local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') - local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - -- DEBUG CODE START. Uncomment to enable - local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') - -- DEBUG CODE END. + local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') + local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + -- DEBUG CODE START. Uncomment to enable + local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') + -- DEBUG CODE END. - -- If this is not a first run, then we have last values available - if packets_last and pfc_rx_packets_last and pfc_duration_last then - packets_last = tonumber(packets_last) - pfc_rx_packets_last = tonumber(pfc_rx_packets_last) - pfc_duration_last = tonumber(pfc_duration_last) + -- If this is not a first run, then we have last values available + if packets_last and pfc_rx_packets_last and pfc_duration_last then + packets_last = tonumber(packets_last) + pfc_rx_packets_last = tonumber(pfc_rx_packets_last) + pfc_duration_last = tonumber(pfc_duration_last) - -- Check actual condition of queue being in PFC storm - if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or - -- DEBUG CODE START. Uncomment to enable - (debug_storm == "enabled") or - -- DEBUG CODE END. - (occupancy_bytes == 0 and packets - packets_last == 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then - if time_left <= poll_time then - redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') - redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') - is_deadlock = true - time_left = detection_time + -- Check actual condition of queue being in PFC storm + if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or + -- DEBUG CODE START. Uncomment to enable + (debug_storm == "enabled") or + -- DEBUG CODE END. + (occupancy_bytes == 0 and packets - packets_last == 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then + if time_left <= poll_time then + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') + is_deadlock = true + time_left = detection_time + else + time_left = time_left - poll_time + end else - time_left = time_left - poll_time - end - else - if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + end + time_left = detection_time end - time_left = detection_time end - end - -- Save values for next run - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) - if is_deadlock == false then - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + -- Save values for next run + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) + if is_deadlock == false then + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + end end end end diff --git a/orchagent/pfc_detect_broadcom.lua b/orchagent/pfc_detect_broadcom.lua index 4f82b93317..29ed2d1633 100644 --- a/orchagent/pfc_detect_broadcom.lua +++ b/orchagent/pfc_detect_broadcom.lua @@ -35,61 +35,66 @@ for i = n, 1, -1 do local queue_index = redis.call('HGET', 'COUNTERS_QUEUE_INDEX_MAP', KEYS[i]) local port_id = redis.call('HGET', 'COUNTERS_QUEUE_PORT_MAP', KEYS[i]) - local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' - local pfc_on2off_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_ON2OFF_RX_PKTS' + -- If there is no entry in COUNTERS_QUEUE_INDEX_MAP or COUNTERS_QUEUE_PORT_MAP then + -- it means KEYS[i] queue is inserted into FLEX COUNTER DB but the corresponding + -- maps haven't been updated yet. + if queue_index and port_id then + local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' + local pfc_on2off_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_ON2OFF_RX_PKTS' - -- Get all counters - local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') - local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') - local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) - local pfc_on2off = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_on2off_key) - local queue_pause_status = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_ATTR_PAUSE_STATUS') + -- Get all counters + local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') + local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') + local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) + local pfc_on2off = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_on2off_key) + local queue_pause_status = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_ATTR_PAUSE_STATUS') - if occupancy_bytes and packets and pfc_rx_packets and pfc_on2off and queue_pause_status then - occupancy_bytes = tonumber(occupancy_bytes) - packets = tonumber(packets) - pfc_rx_packets = tonumber(pfc_rx_packets) - pfc_on2off = tonumber(pfc_on2off) + if occupancy_bytes and packets and pfc_rx_packets and pfc_on2off and queue_pause_status then + occupancy_bytes = tonumber(occupancy_bytes) + packets = tonumber(packets) + pfc_rx_packets = tonumber(pfc_rx_packets) + pfc_on2off = tonumber(pfc_on2off) - local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') - local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') - local pfc_on2off_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_on2off_key .. '_last') - local queue_pause_status_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_ATTR_PAUSE_STATUS_last') + local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') + local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + local pfc_on2off_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_on2off_key .. '_last') + local queue_pause_status_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_ATTR_PAUSE_STATUS_last') - -- DEBUG CODE START. Uncomment to enable - local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') - -- DEBUG CODE END. + -- DEBUG CODE START. Uncomment to enable + local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') + -- DEBUG CODE END. - -- If this is not a first run, then we have last values available - if packets_last and pfc_rx_packets_last and pfc_on2off_last and queue_pause_status_last then - packets_last = tonumber(packets_last) - pfc_rx_packets_last = tonumber(pfc_rx_packets_last) - pfc_on2off_last = tonumber(pfc_on2off_last) + -- If this is not a first run, then we have last values available + if packets_last and pfc_rx_packets_last and pfc_on2off_last and queue_pause_status_last then + packets_last = tonumber(packets_last) + pfc_rx_packets_last = tonumber(pfc_rx_packets_last) + pfc_on2off_last = tonumber(pfc_on2off_last) - -- Check actual condition of queue being in PFC storm - if (pfc_rx_packets - pfc_rx_packets_last > 0 and pfc_on2off - pfc_on2off_last == 0 and queue_pause_status_last == 'true' and queue_pause_status == 'true') or - (debug_storm == "enabled") then - if time_left <= poll_time then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') - is_deadlock = true - time_left = detection_time + -- Check actual condition of queue being in PFC storm + if (pfc_rx_packets - pfc_rx_packets_last > 0 and pfc_on2off - pfc_on2off_last == 0 and queue_pause_status_last == 'true' and queue_pause_status == 'true') or + (debug_storm == "enabled") then + if time_left <= poll_time then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') + is_deadlock = true + time_left = detection_time + else + time_left = time_left - poll_time + end else - time_left = time_left - poll_time - end - else - if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + end + time_left = detection_time end - time_left = detection_time end - end - -- Save values for next run - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_ATTR_PAUSE_STATUS_last', queue_pause_status) - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_on2off_key .. '_last', pfc_on2off) + -- Save values for next run + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_ATTR_PAUSE_STATUS_last', queue_pause_status) + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_on2off_key .. '_last', pfc_on2off) + end end end end diff --git a/orchagent/pfc_detect_innovium.lua b/orchagent/pfc_detect_innovium.lua index cedd51baa3..8deedeaa4f 100644 --- a/orchagent/pfc_detect_innovium.lua +++ b/orchagent/pfc_detect_innovium.lua @@ -36,72 +36,77 @@ for i = n, 1, -1 do local queue_index = redis.call('HGET', 'COUNTERS_QUEUE_INDEX_MAP', KEYS[i]) local port_id = redis.call('HGET', 'COUNTERS_QUEUE_PORT_MAP', KEYS[i]) - local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' - local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION' - - -- Get all counters - local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') - local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') - local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) - local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) - - if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then - occupancy_bytes = tonumber(occupancy_bytes) - packets = tonumber(packets) - pfc_rx_packets = tonumber(pfc_rx_packets) - pfc_duration = tonumber(pfc_duration) - - local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') - local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') - local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - -- DEBUG CODE START. Uncomment to enable - local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') - -- DEBUG CODE END. - - -- If this is not a first run, then we have last values available - if packets_last and pfc_rx_packets_last and pfc_duration_last then - packets_last = tonumber(packets_last) - pfc_rx_packets_last = tonumber(pfc_rx_packets_last) - pfc_duration_last = tonumber(pfc_duration_last) - - -- Check actual condition of queue being in PFC storm - -- if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) then - -- redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'K7_debug_1', 'YES') - - -- if (debug_storm == "enabled") then - -- redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'K7_debug_2', 'YES') - - -- if (occupancy_bytes == 0 and packets - packets_last == 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then - -- redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'K7_debug_3', 'YES') - - - if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or - -- DEBUG CODE START. Uncomment to enable - (debug_storm == "enabled") or - -- DEBUG CODE END. - (occupancy_bytes == 0 and pfc_rx_packets - pfc_rx_packets_last > 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then - if time_left <= poll_time then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') - is_deadlock = true - time_left = detection_time + -- If there is no entry in COUNTERS_QUEUE_INDEX_MAP or COUNTERS_QUEUE_PORT_MAP then + -- it means KEYS[i] queue is inserted into FLEX COUNTER DB but the corresponding + -- maps haven't been updated yet. + if queue_index and port_id then + local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' + local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION' + + -- Get all counters + local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') + local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') + local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) + local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) + + if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then + occupancy_bytes = tonumber(occupancy_bytes) + packets = tonumber(packets) + pfc_rx_packets = tonumber(pfc_rx_packets) + pfc_duration = tonumber(pfc_duration) + + local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') + local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + -- DEBUG CODE START. Uncomment to enable + local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') + -- DEBUG CODE END. + + -- If this is not a first run, then we have last values available + if packets_last and pfc_rx_packets_last and pfc_duration_last then + packets_last = tonumber(packets_last) + pfc_rx_packets_last = tonumber(pfc_rx_packets_last) + pfc_duration_last = tonumber(pfc_duration_last) + + -- Check actual condition of queue being in PFC storm + -- if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) then + -- redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'K7_debug_1', 'YES') + + -- if (debug_storm == "enabled") then + -- redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'K7_debug_2', 'YES') + + -- if (occupancy_bytes == 0 and packets - packets_last == 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then + -- redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'K7_debug_3', 'YES') + + + if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or + -- DEBUG CODE START. Uncomment to enable + (debug_storm == "enabled") or + -- DEBUG CODE END. + (occupancy_bytes == 0 and pfc_rx_packets - pfc_rx_packets_last > 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then + if time_left <= poll_time then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') + is_deadlock = true + time_left = detection_time + else + time_left = time_left - poll_time + end else - time_left = time_left - poll_time - end - else - if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + end + time_left = detection_time end - time_left = detection_time end - end - -- Save values for next run - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) - if is_deadlock == false then - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) - redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + -- Save values for next run + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) + if is_deadlock == false then + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + end end end end diff --git a/orchagent/pfc_detect_mellanox.lua b/orchagent/pfc_detect_mellanox.lua index 6df16241e9..e805ad9cff 100644 --- a/orchagent/pfc_detect_mellanox.lua +++ b/orchagent/pfc_detect_mellanox.lua @@ -36,64 +36,69 @@ for i = n, 1, -1 do local queue_index = redis.call('HGET', 'COUNTERS_QUEUE_INDEX_MAP', KEYS[i]) local port_id = redis.call('HGET', 'COUNTERS_QUEUE_PORT_MAP', KEYS[i]) - local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' - local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION_US' + -- If there is no entry in COUNTERS_QUEUE_INDEX_MAP or COUNTERS_QUEUE_PORT_MAP then + -- it means KEYS[i] queue is inserted into FLEX COUNTER DB but the corresponding + -- maps haven't been updated yet. + if queue_index and port_id then + local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' + local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION_US' - -- Get all counters - local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') - local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') - local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) - local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) + -- Get all counters + local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') + local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') + local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) + local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) - if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then - occupancy_bytes = tonumber(occupancy_bytes) - packets = tonumber(packets) - pfc_rx_packets = tonumber(pfc_rx_packets) - pfc_duration = tonumber(pfc_duration) + if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then + occupancy_bytes = tonumber(occupancy_bytes) + packets = tonumber(packets) + pfc_rx_packets = tonumber(pfc_rx_packets) + pfc_duration = tonumber(pfc_duration) - local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') - local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') - local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - -- DEBUG CODE START. Uncomment to enable - local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') - -- DEBUG CODE END. + local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') + local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + -- DEBUG CODE START. Uncomment to enable + local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') + -- DEBUG CODE END. - -- If this is not a first run, then we have last values available - if packets_last and pfc_rx_packets_last and pfc_duration_last then - packets_last = tonumber(packets_last) - pfc_rx_packets_last = tonumber(pfc_rx_packets_last) - pfc_duration_last = tonumber(pfc_duration_last) - local storm_condition = (pfc_duration - pfc_duration_last) > (poll_time * 0.8) + -- If this is not a first run, then we have last values available + if packets_last and pfc_rx_packets_last and pfc_duration_last then + packets_last = tonumber(packets_last) + pfc_rx_packets_last = tonumber(pfc_rx_packets_last) + pfc_duration_last = tonumber(pfc_duration_last) + local storm_condition = (pfc_duration - pfc_duration_last) > (poll_time * 0.8) - -- Check actual condition of queue being in PFC storm - if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or - -- DEBUG CODE START. Uncomment to enable - (debug_storm == "enabled") or - -- DEBUG CODE END. - (occupancy_bytes == 0 and packets - packets_last == 0 and storm_condition) then - if time_left <= poll_time then - redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') - redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') - is_deadlock = true - time_left = detection_time + -- Check actual condition of queue being in PFC storm + if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or + -- DEBUG CODE START. Uncomment to enable + (debug_storm == "enabled") or + -- DEBUG CODE END. + (occupancy_bytes == 0 and packets - packets_last == 0 and storm_condition) then + if time_left <= poll_time then + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') + is_deadlock = true + time_left = detection_time + else + time_left = time_left - poll_time + end else - time_left = time_left - poll_time - end - else - if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + end + time_left = detection_time end - time_left = detection_time end - end - -- Save values for next run - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) - if is_deadlock == false then - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + -- Save values for next run + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) + if is_deadlock == false then + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + end end end end diff --git a/orchagent/pfc_detect_nephos.lua b/orchagent/pfc_detect_nephos.lua index d152fc5f8c..648904e17a 100644 --- a/orchagent/pfc_detect_nephos.lua +++ b/orchagent/pfc_detect_nephos.lua @@ -35,65 +35,70 @@ for i = n, 1, -1 do local queue_index = redis.call('HGET', 'COUNTERS_QUEUE_INDEX_MAP', KEYS[i]) local port_id = redis.call('HGET', 'COUNTERS_QUEUE_PORT_MAP', KEYS[i]) - local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' - local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION' + -- If there is no entry in COUNTERS_QUEUE_INDEX_MAP or COUNTERS_QUEUE_PORT_MAP then + -- it means KEYS[i] queue is inserted into FLEX COUNTER DB but the corresponding + -- maps haven't been updated yet. + if queue_index and port_id then + local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' + local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION' - -- Get all counters - local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') - local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') - local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) - local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) + -- Get all counters + local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') + local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') + local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) + local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) - if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then - occupancy_bytes = tonumber(occupancy_bytes) - packets = tonumber(packets) - pfc_rx_packets = tonumber(pfc_rx_packets) - pfc_duration = tonumber(pfc_duration) + if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then + occupancy_bytes = tonumber(occupancy_bytes) + packets = tonumber(packets) + pfc_rx_packets = tonumber(pfc_rx_packets) + pfc_duration = tonumber(pfc_duration) - local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') - local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') - local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - -- DEBUG CODE START. Uncomment to enable - local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') - -- DEBUG CODE END. + local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') + local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + -- DEBUG CODE START. Uncomment to enable + local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') + -- DEBUG CODE END. - -- If this is not a first run, then we have last values available - if packets_last and pfc_rx_packets_last and pfc_duration_last then - packets_last = tonumber(packets_last) - pfc_rx_packets_last = tonumber(pfc_rx_packets_last) - pfc_duration_last = tonumber(pfc_duration_last) + -- If this is not a first run, then we have last values available + if packets_last and pfc_rx_packets_last and pfc_duration_last then + packets_last = tonumber(packets_last) + pfc_rx_packets_last = tonumber(pfc_rx_packets_last) + pfc_duration_last = tonumber(pfc_duration_last) - -- Check actual condition of queue being in PFC storm - if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or - -- DEBUG CODE START. Uncomment to enable - (debug_storm == "enabled") or - -- DEBUG CODE END. - (occupancy_bytes == 0 and packets - packets_last == 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then - if time_left <= poll_time then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') - is_deadlock = true - time_left = detection_time + -- Check actual condition of queue being in PFC storm + if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or + -- DEBUG CODE START. Uncomment to enable + (debug_storm == "enabled") or + -- DEBUG CODE END. + (occupancy_bytes == 0 and packets - packets_last == 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then + if time_left <= poll_time then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') + is_deadlock = true + time_left = detection_time + else + time_left = time_left - poll_time + end else - time_left = time_left - poll_time - end - else - if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + end + time_left = detection_time end - time_left = detection_time end - end - -- Save values for next run - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) - redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + -- Save values for next run + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + end end end end end return rets - + diff --git a/orchagent/pfc_detect_vs.lua b/orchagent/pfc_detect_vs.lua new file mode 100644 index 0000000000..e805ad9cff --- /dev/null +++ b/orchagent/pfc_detect_vs.lua @@ -0,0 +1,108 @@ +-- KEYS - queue IDs +-- ARGV[1] - counters db index +-- ARGV[2] - counters table name +-- ARGV[3] - poll time interval (milliseconds) +-- return queue Ids that satisfy criteria + +local counters_db = ARGV[1] +local counters_table_name = ARGV[2] +local poll_time = tonumber(ARGV[3]) * 1000 + +local rets = {} + +redis.call('SELECT', counters_db) + +-- Iterate through each queue +local n = table.getn(KEYS) +for i = n, 1, -1 do + local counter_keys = redis.call('HKEYS', counters_table_name .. ':' .. KEYS[i]) + local counter_num = 0 + local old_counter_num = 0 + local is_deadlock = false + local pfc_wd_status = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_STATUS') + local pfc_wd_action = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_ACTION') + + local big_red_switch_mode = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'BIG_RED_SWITCH_MODE') + if not big_red_switch_mode and (pfc_wd_status == 'operational' or pfc_wd_action == 'alert') then + local detection_time = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME') + if detection_time then + detection_time = tonumber(detection_time) + local time_left = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT') + if not time_left then + time_left = detection_time + else + time_left = tonumber(time_left) + end + + local queue_index = redis.call('HGET', 'COUNTERS_QUEUE_INDEX_MAP', KEYS[i]) + local port_id = redis.call('HGET', 'COUNTERS_QUEUE_PORT_MAP', KEYS[i]) + -- If there is no entry in COUNTERS_QUEUE_INDEX_MAP or COUNTERS_QUEUE_PORT_MAP then + -- it means KEYS[i] queue is inserted into FLEX COUNTER DB but the corresponding + -- maps haven't been updated yet. + if queue_index and port_id then + local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' + local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION_US' + + -- Get all counters + local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') + local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') + local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) + local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) + + if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then + occupancy_bytes = tonumber(occupancy_bytes) + packets = tonumber(packets) + pfc_rx_packets = tonumber(pfc_rx_packets) + pfc_duration = tonumber(pfc_duration) + + local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') + local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + -- DEBUG CODE START. Uncomment to enable + local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') + -- DEBUG CODE END. + + -- If this is not a first run, then we have last values available + if packets_last and pfc_rx_packets_last and pfc_duration_last then + packets_last = tonumber(packets_last) + pfc_rx_packets_last = tonumber(pfc_rx_packets_last) + pfc_duration_last = tonumber(pfc_duration_last) + local storm_condition = (pfc_duration - pfc_duration_last) > (poll_time * 0.8) + + -- Check actual condition of queue being in PFC storm + if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or + -- DEBUG CODE START. Uncomment to enable + (debug_storm == "enabled") or + -- DEBUG CODE END. + (occupancy_bytes == 0 and packets - packets_last == 0 and storm_condition) then + if time_left <= poll_time then + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') + is_deadlock = true + time_left = detection_time + else + time_left = time_left - poll_time + end + else + if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + end + time_left = detection_time + end + end + + -- Save values for next run + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) + if is_deadlock == false then + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + end + end + end + end + end +end + +return rets diff --git a/orchagent/pfc_restore.lua b/orchagent/pfc_restore.lua index 7b137a40d3..4c27852687 100644 --- a/orchagent/pfc_restore.lua +++ b/orchagent/pfc_restore.lua @@ -32,36 +32,41 @@ for i = n, 1, -1 do local queue_index = redis.call('HGET', 'COUNTERS_QUEUE_INDEX_MAP', KEYS[i]) local port_id = redis.call('HGET', 'COUNTERS_QUEUE_PORT_MAP', KEYS[i]) - local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' + -- If there is no entry in COUNTERS_QUEUE_INDEX_MAP or COUNTERS_QUEUE_PORT_MAP then + -- it means KEYS[i] queue is inserted into FLEX COUNTER DB but the corresponding + -- maps haven't been updated yet. + if queue_index and port_id then + local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' - local pfc_rx_packets = tonumber(redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key)) - local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') - -- DEBUG CODE START. Uncomment to enable - local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') - -- DEBUG CODE END. - if pfc_rx_packets_last then - pfc_rx_packets_last = tonumber(pfc_rx_packets_last) + local pfc_rx_packets = tonumber(redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key)) + local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + -- DEBUG CODE START. Uncomment to enable + local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') + -- DEBUG CODE END. + if pfc_rx_packets_last then + pfc_rx_packets_last = tonumber(pfc_rx_packets_last) - -- Check actual condition of queue being restored from PFC storm - if (pfc_rx_packets - pfc_rx_packets_last == 0) - -- DEBUG CODE START. Uncomment to enable - and (debug_storm ~= "enabled") - -- DEBUG CODE END. - then - if time_left <= poll_time then - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') - time_left = restoration_time + -- Check actual condition of queue being restored from PFC storm + if (pfc_rx_packets - pfc_rx_packets_last == 0) + -- DEBUG CODE START. Uncomment to enable + and (debug_storm ~= "enabled") + -- DEBUG CODE END. + then + if time_left <= poll_time then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + time_left = restoration_time + else + time_left = time_left - poll_time + end else - time_left = time_left - poll_time + time_left = restoration_time end - else - time_left = restoration_time end - end - -- Save values for next run - redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_RESTORATION_TIME_LEFT', time_left) - redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + -- Save values for next run + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_RESTORATION_TIME_LEFT', time_left) + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + end end end diff --git a/orchagent/pfc_restore_cisco-8000.lua b/orchagent/pfc_restore_cisco-8000.lua index 686de0464b..172e67b960 100644 --- a/orchagent/pfc_restore_cisco-8000.lua +++ b/orchagent/pfc_restore_cisco-8000.lua @@ -44,7 +44,7 @@ for i = n, 1, -1 do and (debug_storm ~= "enabled") -- DEBUG CODE END. then - if time_left <= 0 then + if time_left <= poll_time then redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') time_left = restoration_time else diff --git a/orchagent/pfcactionhandler.cpp b/orchagent/pfcactionhandler.cpp index e44521f849..6fb497812d 100644 --- a/orchagent/pfcactionhandler.cpp +++ b/orchagent/pfcactionhandler.cpp @@ -3,6 +3,7 @@ #include "logger.h" #include "sai_serialize.h" #include "portsorch.h" +#include "bufferorch.h" #include #include @@ -26,6 +27,7 @@ extern sai_object_id_t gSwitchId; extern PortsOrch *gPortsOrch; extern AclOrch * gAclOrch; +extern BufferOrch *gBufferOrch; extern sai_port_api_t *sai_port_api; extern sai_queue_api_t *sai_queue_api; extern sai_buffer_api_t *sai_buffer_api; @@ -221,7 +223,7 @@ void PfcWdActionHandler::updateWdCounters(const string& queueIdStr, const PfcWdQ PfcWdSaiDlrInitHandler::PfcWdSaiDlrInitHandler(sai_object_id_t port, sai_object_id_t queue, uint8_t queueId, shared_ptr countersTable): - PfcWdActionHandler(port, queue, queueId, countersTable) + PfcWdZeroBufferHandler(port, queue, queueId, countersTable) { SWSS_LOG_ENTER(); @@ -262,39 +264,6 @@ PfcWdSaiDlrInitHandler::~PfcWdSaiDlrInitHandler(void) } } -bool PfcWdSaiDlrInitHandler::getHwCounters(PfcWdHwStats& counters) -{ - SWSS_LOG_ENTER(); - - static const vector queueStatIds = - { - SAI_QUEUE_STAT_PACKETS, - SAI_QUEUE_STAT_DROPPED_PACKETS, - }; - - vector queueStats; - queueStats.resize(queueStatIds.size()); - - sai_status_t status = sai_queue_api->get_queue_stats( - getQueue(), - static_cast(queueStatIds.size()), - queueStatIds.data(), - queueStats.data()); - - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to fetch queue 0x%" PRIx64 " stats: %d", getQueue(), status); - return false; - } - - counters.txPkt = queueStats[0]; - counters.txDropPkt = queueStats[1]; - counters.rxPkt = 0; - counters.rxDropPkt = 0; - - return true; -} - PfcWdAclHandler::PfcWdAclHandler(sai_object_id_t port, sai_object_id_t queue, uint8_t queueId, shared_ptr
countersTable): PfcWdLossyHandler(port, queue, queueId, countersTable) @@ -472,6 +441,14 @@ PfcWdLossyHandler::PfcWdLossyHandler(sai_object_id_t port, sai_object_id_t queue { SWSS_LOG_ENTER(); + string platform = getenv("platform") ? getenv("platform") : ""; + if (platform == CISCO_8000_PLATFORM_SUBSTRING) + { + SWSS_LOG_DEBUG("Skipping in constructor PfcWdLossyHandler for platform %s on port 0x%" PRIx64, + platform.c_str(), port); + return; + } + uint8_t pfcMask = 0; if (!gPortsOrch->getPortPfc(port, &pfcMask)) @@ -491,6 +468,14 @@ PfcWdLossyHandler::~PfcWdLossyHandler(void) { SWSS_LOG_ENTER(); + string platform = getenv("platform") ? getenv("platform") : ""; + if (platform == CISCO_8000_PLATFORM_SUBSTRING) + { + SWSS_LOG_DEBUG("Skipping in destructor PfcWdLossyHandler for platform %s on port 0x%" PRIx64, + platform.c_str(), getPort()); + return; + } + uint8_t pfcMask = 0; if (!gPortsOrch->getPortPfc(getPort(), &pfcMask)) @@ -732,6 +717,25 @@ PfcWdZeroBufferHandler::ZeroBufferProfile &PfcWdZeroBufferHandler::ZeroBufferPro return instance; } +sai_object_id_t& PfcWdZeroBufferHandler::ZeroBufferProfile::getPool(bool ingress) +{ + // If there is a cached zero buffer pool, just use it + // else fetch zero buffer pool from buffer orch + // If there is one, use it and increase the reference number. + // otherwise, just return NULL OID + // PfcWdZeroBufferHandler will create it later and notify buffer orch later + auto &poolId = ingress ? m_zeroIngressBufferPool : m_zeroEgressBufferPool; + if (poolId == SAI_NULL_OBJECT_ID) + { + poolId = gBufferOrch->getZeroBufferPool(ingress); + if (poolId != SAI_NULL_OBJECT_ID) + { + gBufferOrch->lockZeroBufferPool(ingress); + } + } + return poolId; +} + sai_object_id_t PfcWdZeroBufferHandler::ZeroBufferProfile::getZeroBufferProfile(bool ingress) { SWSS_LOG_ENTER(); @@ -750,29 +754,39 @@ void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile(bool ing sai_attribute_t attr; vector attribs; + sai_status_t status; - // Create zero pool - attr.id = SAI_BUFFER_POOL_ATTR_SIZE; - attr.value.u64 = 0; - attribs.push_back(attr); + auto &poolId = getPool(ingress); - attr.id = SAI_BUFFER_POOL_ATTR_TYPE; - attr.value.u32 = ingress ? SAI_BUFFER_POOL_TYPE_INGRESS : SAI_BUFFER_POOL_TYPE_EGRESS; - attribs.push_back(attr); + if (SAI_NULL_OBJECT_ID == poolId) + { + // Create zero pool + attr.id = SAI_BUFFER_POOL_ATTR_SIZE; + attr.value.u64 = 0; + attribs.push_back(attr); - attr.id = SAI_BUFFER_POOL_ATTR_THRESHOLD_MODE; - attr.value.u32 = SAI_BUFFER_POOL_THRESHOLD_MODE_DYNAMIC; - attribs.push_back(attr); + attr.id = SAI_BUFFER_POOL_ATTR_TYPE; + attr.value.u32 = ingress ? SAI_BUFFER_POOL_TYPE_INGRESS : SAI_BUFFER_POOL_TYPE_EGRESS; + attribs.push_back(attr); - sai_status_t status = sai_buffer_api->create_buffer_pool( - &getPool(ingress), + attr.id = SAI_BUFFER_POOL_ATTR_THRESHOLD_MODE; + attr.value.u32 = SAI_BUFFER_POOL_THRESHOLD_MODE_STATIC; + attribs.push_back(attr); + + status = sai_buffer_api->create_buffer_pool( + &poolId, gSwitchId, static_cast(attribs.size()), attribs.data()); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to create dynamic zero buffer pool for PFC WD: %d", status); - return; + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create dynamic zero buffer pool for PFC WD: %d", status); + return; + } + + // Pass the ownership to BufferOrch + gBufferOrch->setZeroBufferPool(ingress, poolId); + gBufferOrch->lockZeroBufferPool(ingress); } // Create zero profile @@ -783,15 +797,15 @@ void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile(bool ing attribs.push_back(attr); attr.id = SAI_BUFFER_PROFILE_ATTR_THRESHOLD_MODE; - attr.value.u32 = SAI_BUFFER_PROFILE_THRESHOLD_MODE_DYNAMIC; + attr.value.u32 = SAI_BUFFER_PROFILE_THRESHOLD_MODE_STATIC; attribs.push_back(attr); attr.id = SAI_BUFFER_PROFILE_ATTR_BUFFER_SIZE; attr.value.u64 = 0; attribs.push_back(attr); - attr.id = SAI_BUFFER_PROFILE_ATTR_SHARED_DYNAMIC_TH; - attr.value.s8 = -8; // ALPHA_0 + attr.id = SAI_BUFFER_PROFILE_ATTR_SHARED_STATIC_TH; + attr.value.s8 = 0; attribs.push_back(attr); status = sai_buffer_api->create_buffer_profile( @@ -810,16 +824,19 @@ void PfcWdZeroBufferHandler::ZeroBufferProfile::destroyZeroBufferProfile(bool in { SWSS_LOG_ENTER(); - sai_status_t status = sai_buffer_api->remove_buffer_profile(getProfile(ingress)); - if (status != SAI_STATUS_SUCCESS) + if (getProfile(ingress) != SAI_NULL_OBJECT_ID) { - SWSS_LOG_ERROR("Failed to remove static zero buffer profile for PFC WD: %d", status); - return; + sai_status_t status = sai_buffer_api->remove_buffer_profile(getProfile(ingress)); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove static zero buffer profile for PFC WD: %d", status); + return; + } } - status = sai_buffer_api->remove_buffer_pool(getPool(ingress)); - if (status != SAI_STATUS_SUCCESS) + auto &pool = ingress ? m_zeroIngressBufferPool : m_zeroEgressBufferPool; + if (pool != SAI_NULL_OBJECT_ID) { - SWSS_LOG_ERROR("Failed to remove static zero buffer pool for PFC WD: %d", status); + gBufferOrch->unlockZeroBufferPool(ingress); } } diff --git a/orchagent/pfcactionhandler.h b/orchagent/pfcactionhandler.h index 23cabaee10..22908fbe08 100644 --- a/orchagent/pfcactionhandler.h +++ b/orchagent/pfcactionhandler.h @@ -148,10 +148,7 @@ class PfcWdZeroBufferHandler: public PfcWdLossyHandler return ingress ? m_zeroIngressBufferProfile : m_zeroEgressBufferProfile; } - sai_object_id_t& getPool(bool ingress) - { - return ingress ? m_zeroIngressBufferPool : m_zeroEgressBufferPool; - } + sai_object_id_t& getPool(bool ingress); sai_object_id_t m_zeroIngressBufferPool = SAI_NULL_OBJECT_ID; sai_object_id_t m_zeroEgressBufferPool = SAI_NULL_OBJECT_ID; @@ -165,13 +162,12 @@ class PfcWdZeroBufferHandler: public PfcWdLossyHandler // PFC queue that implements drop action by draining queue via SAI // attribute SAI_QUEUE_ATTR_PFC_DLR_INIT. -class PfcWdSaiDlrInitHandler: public PfcWdActionHandler +class PfcWdSaiDlrInitHandler: public PfcWdZeroBufferHandler { public: PfcWdSaiDlrInitHandler(sai_object_id_t port, sai_object_id_t queue, uint8_t queueId, shared_ptr
countersTable); virtual ~PfcWdSaiDlrInitHandler(void); - virtual bool getHwCounters(PfcWdHwStats& counters); }; #endif diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index ada1f4bb92..1434f98379 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -2172,7 +2172,12 @@ bool PortsOrch::createVlanHostIntf(Port& vl, string hostif_name) attrs.push_back(attr); attr.id = SAI_HOSTIF_ATTR_NAME; - strncpy(attr.value.chardata, hostif_name.c_str(), sizeof(attr.value.chardata)); + if (hostif_name.length() >= SAI_HOSTIF_NAME_SIZE) + { + SWSS_LOG_WARN("Host interface name %s is too long and will be truncated to %d bytes", hostif_name.c_str(), SAI_HOSTIF_NAME_SIZE - 1); + } + strncpy(attr.value.chardata, hostif_name.c_str(), SAI_HOSTIF_NAME_SIZE); + attr.value.chardata[SAI_HOSTIF_NAME_SIZE - 1] = '\0'; attrs.push_back(attr); sai_status_t status = sai_hostif_api->create_hostif(&vl.m_vlan_info.host_intf_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); @@ -2284,6 +2289,13 @@ sai_status_t PortsOrch::removePort(sai_object_id_t port_id) } /* else : port is in default state or not yet created */ + /* + * Remove port serdes (if exists) before removing port since this + * reference is dependency. + */ + + removePortSerdesAttribute(port_id); + sai_status_t status = sai_port_api->remove_port(port_id); if (status != SAI_STATUS_SUCCESS) { @@ -3839,6 +3851,17 @@ void PortsOrch::doLagMemberTask(Consumer &consumer) continue; } + if (!port.m_ingress_acl_tables_uset.empty() || !port.m_egress_acl_tables_uset.empty()) + { + SWSS_LOG_ERROR( + "Failed to add member %s to LAG %s: ingress/egress ACL configuration is present", + port.m_alias.c_str(), + lag.m_alias.c_str() + ); + it = consumer.m_toSync.erase(it); + continue; + } + if (!addLagMember(lag, port, (status == "enabled"))) { it++; @@ -4186,6 +4209,11 @@ bool PortsOrch::addHostIntfs(Port &port, string alias, sai_object_id_t &host_int attr.id = SAI_HOSTIF_ATTR_NAME; strncpy((char *)&attr.value.chardata, alias.c_str(), SAI_HOSTIF_NAME_SIZE); + if (alias.length() >= SAI_HOSTIF_NAME_SIZE) + { + SWSS_LOG_WARN("Host interface name %s is too long and will be truncated to %d bytes", alias.c_str(), SAI_HOSTIF_NAME_SIZE - 1); + } + attr.value.chardata[SAI_HOSTIF_NAME_SIZE - 1] = '\0'; attrs.push_back(attr); sai_status_t status = sai_hostif_api->create_hostif(&host_intfs_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); @@ -4917,7 +4945,7 @@ bool PortsOrch::addLag(string lag_alias, uint32_t spa_id, int32_t switch_id) auto lagport = m_portList.find(lag_alias); if (lagport != m_portList.end()) { - /* The deletion of bridgeport attached to the lag may still be + /* The deletion of bridgeport attached to the lag may still be * pending due to fdb entries still present on the lag. Wait * until the cleanup is done. */ @@ -5611,8 +5639,12 @@ void PortsOrch::doTask(NotificationConsumer &consumer) SWSS_LOG_NOTICE("%s oper speed is %d", port.m_alias.c_str(), speed); updateDbPortOperSpeed(port, speed); } + else + { + updateDbPortOperSpeed(port, 0); + } } - + /* update m_portList */ m_portList[port.m_alias] = port; } @@ -5672,9 +5704,9 @@ void PortsOrch::updateDbPortOperSpeed(Port &port, sai_uint32_t speed) SWSS_LOG_ENTER(); vector tuples; - FieldValueTuple tuple("speed", to_string(speed)); - tuples.push_back(tuple); - m_portTable->set(port.m_alias, tuples); + string speedStr = speed != 0 ? to_string(speed) : "N/A"; + tuples.emplace_back(std::make_pair("speed", speedStr)); + m_portStateTable.set(port.m_alias, tuples); // We don't set port.m_speed = speed here, because CONFIG_DB still hold the old // value. If we set it here, next time configure any attributes related port will @@ -5721,6 +5753,10 @@ void PortsOrch::refreshPortStatus() SWSS_LOG_INFO("%s oper speed is %d", port.m_alias.c_str(), speed); updateDbPortOperSpeed(port, speed); } + else + { + updateDbPortOperSpeed(port, 0); + } } } } diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index edd5db3443..d1a24cb5c9 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -25,6 +25,7 @@ extern sai_acl_api_t* sai_acl_api; extern SwitchOrch *gSwitchOrch; extern PortsOrch *gPortsOrch; +extern QosOrch *gQosOrch; extern sai_object_id_t gSwitchId; extern CrmOrch *gCrmOrch; @@ -150,6 +151,12 @@ task_process_status QosMapHandler::processWorkItem(Consumer& consumer) SWSS_LOG_ERROR("Object with name:%s not found.", qos_object_name.c_str()); return task_process_status::task_invalid_entry; } + if (gQosOrch->isObjectBeingReferenced(QosOrch::getTypeMap(), qos_map_type_name, qos_object_name)) + { + auto hint = gQosOrch->objectReferenceInfo(QosOrch::getTypeMap(), qos_map_type_name, qos_object_name); + SWSS_LOG_NOTICE("Can't remove object %s due to being referenced (%s)", qos_object_name.c_str(), hint.c_str()); + return task_process_status::task_need_retry; + } if (!removeQosItem(sai_object)) { SWSS_LOG_ERROR("Failed to remove dscp_to_tc map. db name:%s sai object:%" PRIx64, qos_object_name.c_str(), sai_object); @@ -243,6 +250,9 @@ void DscpToTcMapHandler::applyDscpToTcMapToSwitch(sai_attr_id_t attr_id, sai_obj return; } + if (map_id != gQosOrch->m_globalDscpToTcMap) + gQosOrch->m_globalDscpToTcMap = map_id; + SWSS_LOG_NOTICE("Applied DSCP_TO_TC QoS map to switch successfully"); } @@ -276,6 +286,41 @@ sai_object_id_t DscpToTcMapHandler::addQosItem(const vector &at return sai_object; } +bool DscpToTcMapHandler::removeQosItem(sai_object_id_t sai_object) +{ + SWSS_LOG_ENTER(); + + if (sai_object == gQosOrch->m_globalDscpToTcMap) + { + // The current global dscp to tc map is about to be removed. + // Find another one to set to the switch or NULL in case this is the last one + const auto &dscpToTcObjects = (*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]); + bool found = false; + for (const auto &ref : dscpToTcObjects) + { + if (ref.second.m_saiObjectId == sai_object) + continue; + SWSS_LOG_NOTICE("Current global dscp_to_tc map is about to be removed, set it to %s %" PRIx64, ref.first.c_str(), ref.second.m_saiObjectId); + applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, ref.second.m_saiObjectId); + found = true; + break; + } + if (!found) + { + applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, SAI_NULL_OBJECT_ID); + } + } + + SWSS_LOG_DEBUG("Removing DscpToTcMap object:%" PRIx64, sai_object); + sai_status_t sai_status = sai_qos_map_api->remove_qos_map(sai_object); + if (SAI_STATUS_SUCCESS != sai_status) + { + SWSS_LOG_ERROR("Failed to remove DSCP_TO_TC map, status:%d", sai_status); + return false; + } + return true; +} + task_process_status QosOrch::handleDscpToTcTable(Consumer& consumer) { SWSS_LOG_ENTER(); @@ -840,7 +885,7 @@ bool DscpToFcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple & { SWSS_LOG_ENTER(); - sai_uint8_t max_fc_val = NhgMapOrch::getMaxFcVal(); + sai_uint8_t max_num_fcs = NhgMapOrch::getMaxNumFcs(); sai_attribute_t list_attr; list_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST; @@ -867,10 +912,11 @@ bool DscpToFcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple & } list_attr.value.qosmap.list[ind].key.dscp = static_cast(value); + // FC value must be in range [0, max_num_fcs) value = stoi(fvValue(*i)); - if ((value < 0) || (value > max_fc_val)) + if ((value < 0) || (value >= max_num_fcs)) { - SWSS_LOG_ERROR("FC value %d is either negative, or bigger than max value %d", value, max_fc_val); + SWSS_LOG_ERROR("FC value %d is either negative, or bigger than max value %d", value, max_num_fcs - 1); delete[] list_attr.value.qosmap.list; return false; } @@ -933,7 +979,7 @@ bool ExpToFcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &t { SWSS_LOG_ENTER(); - sai_uint8_t max_fc_val = NhgMapOrch::getMaxFcVal(); + sai_uint8_t max_num_fcs = NhgMapOrch::getMaxNumFcs(); sai_attribute_t list_attr; list_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST; @@ -960,10 +1006,11 @@ bool ExpToFcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &t } list_attr.value.qosmap.list[ind].key.mpls_exp = static_cast(value); + // FC value must be in range [0, max_num_fcs) value = stoi(fvValue(*i)); - if ((value < 0) || (value > max_fc_val)) + if ((value < 0) || (value >= max_num_fcs)) { - SWSS_LOG_ERROR("FC value %d is either negative, or bigger than max value %hu", value, max_fc_val); + SWSS_LOG_ERROR("FC value %d is either negative, or bigger than max value %hu", value, max_num_fcs - 1); delete[] list_attr.value.qosmap.list; return false; } @@ -1193,6 +1240,12 @@ task_process_status QosOrch::handleSchedulerTable(Consumer& consumer) SWSS_LOG_ERROR("Object with name:%s not found.", qos_object_name.c_str()); return task_process_status::task_invalid_entry; } + if (gQosOrch->isObjectBeingReferenced(QosOrch::getTypeMap(), qos_map_type_name, qos_object_name)) + { + auto hint = gQosOrch->objectReferenceInfo(QosOrch::getTypeMap(), qos_map_type_name, qos_object_name); + SWSS_LOG_NOTICE("Can't remove object %s due to being referenced (%s)", qos_object_name.c_str(), hint.c_str()); + return task_process_status::task_need_retry; + } sai_status = sai_scheduler_api->remove_scheduler(sai_object); if (SAI_STATUS_SUCCESS != sai_status) { @@ -1433,6 +1486,94 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer) SWSS_LOG_ERROR("Failed to parse range:%s", tokens[1].c_str()); return task_process_status::task_invalid_entry; } + + bool donotChangeScheduler = false; + bool donotChangeWredProfile = false; + sai_object_id_t sai_scheduler_profile; + sai_object_id_t sai_wred_profile; + + if (op == SET_COMMAND) + { + string scheduler_profile_name; + resolve_result = resolveFieldRefValue(m_qos_maps, scheduler_field_name, + qos_to_ref_table_map.at(scheduler_field_name), tuple, + sai_scheduler_profile, scheduler_profile_name); + if (ref_resolve_status::success != resolve_result) + { + if (resolve_result != ref_resolve_status::field_not_found) + { + if(ref_resolve_status::not_resolved == resolve_result) + { + SWSS_LOG_INFO("Missing or invalid scheduler reference"); + return task_process_status::task_need_retry; + } + SWSS_LOG_ERROR("Resolving scheduler reference failed"); + return task_process_status::task_failed; + } + + if (doesObjectExist(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, scheduler_field_name, scheduler_profile_name)) + { + SWSS_LOG_NOTICE("QUEUE|%s %s was configured but is not any more. Remove it", key.c_str(), scheduler_field_name.c_str()); + removeMeFromObjsReferencedByMe(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, scheduler_field_name, scheduler_profile_name); + sai_scheduler_profile = SAI_NULL_OBJECT_ID; + } + else + { + // Did not exist and do not exist. No action + donotChangeScheduler = true; + } + } + else + { + setObjectReference(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, scheduler_field_name, scheduler_profile_name); + SWSS_LOG_INFO("QUEUE %s Field %s %s has been resolved to %" PRIx64 , key.c_str(), scheduler_field_name.c_str(), scheduler_profile_name.c_str(), sai_scheduler_profile); + } + + string wred_profile_name; + resolve_result = resolveFieldRefValue(m_qos_maps, wred_profile_field_name, + qos_to_ref_table_map.at(wred_profile_field_name), tuple, + sai_wred_profile, wred_profile_name); + if (ref_resolve_status::success != resolve_result) + { + if (resolve_result != ref_resolve_status::field_not_found) + { + if(ref_resolve_status::not_resolved == resolve_result) + { + SWSS_LOG_INFO("Missing or invalid wred profile reference"); + return task_process_status::task_need_retry; + } + SWSS_LOG_ERROR("Resolving wred profile reference failed"); + return task_process_status::task_failed; + } + + if (doesObjectExist(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, wred_profile_field_name, wred_profile_name)) + { + SWSS_LOG_NOTICE("QUEUE|%s %s was configured but is not any more. Remove it", key.c_str(), wred_profile_field_name.c_str()); + removeMeFromObjsReferencedByMe(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, wred_profile_field_name, wred_profile_name); + sai_wred_profile = SAI_NULL_OBJECT_ID; + } + else + { + donotChangeWredProfile = true; + } + } + else + { + setObjectReference(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, wred_profile_field_name, wred_profile_name); + } + } + else if (op == DEL_COMMAND) + { + removeObject(QosOrch::getTypeMap(), CFG_QUEUE_TABLE_NAME, key); + sai_scheduler_profile = SAI_NULL_OBJECT_ID; + sai_wred_profile = SAI_NULL_OBJECT_ID; + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); + return task_process_status::task_invalid_entry; + } + for (string port_name : port_names) { Port port; @@ -1447,27 +1588,11 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer) { queue_ind = ind; SWSS_LOG_DEBUG("processing queue:%zd", queue_ind); - sai_object_id_t sai_scheduler_profile; - string scheduler_profile_name; - resolve_result = resolveFieldRefValue(m_qos_maps, scheduler_field_name, - qos_to_ref_table_map.at(scheduler_field_name), tuple, - sai_scheduler_profile, scheduler_profile_name); - if (ref_resolve_status::success == resolve_result) + + if (!donotChangeScheduler) { - if (op == SET_COMMAND) - { - result = applySchedulerToQueueSchedulerGroup(port, queue_ind, sai_scheduler_profile); - } - else if (op == DEL_COMMAND) - { - // NOTE: The map is un-bound from the port. But the map itself still exists. - result = applySchedulerToQueueSchedulerGroup(port, queue_ind, SAI_NULL_OBJECT_ID); - } - else - { - SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - return task_process_status::task_invalid_entry; - } + result = applySchedulerToQueueSchedulerGroup(port, queue_ind, sai_scheduler_profile); + if (!result) { SWSS_LOG_ERROR("Failed setting field:%s to port:%s, queue:%zd, line:%d", scheduler_field_name.c_str(), port.m_alias.c_str(), queue_ind, __LINE__); @@ -1475,38 +1600,11 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer) } SWSS_LOG_DEBUG("Applied scheduler to port:%s", port_name.c_str()); } - else if (resolve_result != ref_resolve_status::field_not_found) - { - if(ref_resolve_status::not_resolved == resolve_result) - { - SWSS_LOG_INFO("Missing or invalid scheduler reference"); - return task_process_status::task_need_retry; - } - SWSS_LOG_ERROR("Resolving scheduler reference failed"); - return task_process_status::task_failed; - } - sai_object_id_t sai_wred_profile; - string wred_profile_name; - resolve_result = resolveFieldRefValue(m_qos_maps, wred_profile_field_name, - qos_to_ref_table_map.at(wred_profile_field_name), tuple, - sai_wred_profile, wred_profile_name); - if (ref_resolve_status::success == resolve_result) + if (!donotChangeWredProfile) { - if (op == SET_COMMAND) - { - result = applyWredProfileToQueue(port, queue_ind, sai_wred_profile); - } - else if (op == DEL_COMMAND) - { - // NOTE: The map is un-bound from the port. But the map itself still exists. - result = applyWredProfileToQueue(port, queue_ind, SAI_NULL_OBJECT_ID); - } - else - { - SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - return task_process_status::task_invalid_entry; - } + result = applyWredProfileToQueue(port, queue_ind, sai_wred_profile); + if (!result) { SWSS_LOG_ERROR("Failed setting field:%s to port:%s, queue:%zd, line:%d", wred_profile_field_name.c_str(), port.m_alias.c_str(), queue_ind, __LINE__); @@ -1514,31 +1612,6 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer) } SWSS_LOG_DEBUG("Applied wred profile to port:%s", port_name.c_str()); } - else if (resolve_result != ref_resolve_status::field_not_found) - { - if (ref_resolve_status::empty == resolve_result) - { - SWSS_LOG_INFO("Missing wred reference. Unbind wred profile from queue"); - // NOTE: The wred profile is un-bound from the port. But the wred profile itself still exists - // and stays untouched. - result = applyWredProfileToQueue(port, queue_ind, SAI_NULL_OBJECT_ID); - if (!result) - { - SWSS_LOG_ERROR("Failed unbinding field:%s from port:%s, queue:%zd, line:%d", wred_profile_field_name.c_str(), port.m_alias.c_str(), queue_ind, __LINE__); - return task_process_status::task_failed; - } - } - else if (ref_resolve_status::not_resolved == resolve_result) - { - SWSS_LOG_INFO("Invalid wred reference"); - return task_process_status::task_need_retry; - } - else - { - SWSS_LOG_ERROR("Resolving wred reference failed"); - return task_process_status::task_failed; - } - } } } SWSS_LOG_DEBUG("finished"); @@ -1624,6 +1697,60 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) KeyOpFieldsValuesTuple tuple = consumer.m_toSync.begin()->second; string key = kfvKey(tuple); string op = kfvOp(tuple); + vector port_names = tokenize(key, list_item_delimiter); + + if (op == DEL_COMMAND) + { + /* Handle DEL command. Just set all the maps to oid:0x0 */ + for (string port_name : port_names) + { + Port port; + + /* Skip port which is not found */ + if (!gPortsOrch->getPort(port_name, port)) + { + SWSS_LOG_ERROR("Failed to apply QoS maps to port %s. Port is not found.", port_name.c_str()); + continue; + } + + for (auto &mapRef : qos_to_attr_map) + { + string referenced_obj; + if (!doesObjectExist(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, key, mapRef.first, referenced_obj)) + { + continue; + } + + sai_attribute_t attr; + attr.id = mapRef.second; + attr.value.oid = SAI_NULL_OBJECT_ID; + + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove %s on port %s, rv:%d", + mapRef.first.c_str(), port_name.c_str(), status); + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); + if (handle_status != task_process_status::task_success) + { + return task_process_status::task_invalid_entry; + } + } + SWSS_LOG_INFO("Removed %s on port %s", mapRef.first.c_str(), port_name.c_str()); + } + + if (!gPortsOrch->setPortPfc(port.m_port_id, 0)) + { + SWSS_LOG_ERROR("Failed to disable PFC on port %s", port_name.c_str()); + } + + SWSS_LOG_INFO("Disabled PFC on port %s", port_name.c_str()); + } + + removeObject(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, key); + + return task_process_status::task_success; + } sai_uint8_t pfc_enable = 0; map> update_list; @@ -1634,7 +1761,7 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) { sai_object_id_t id; string object_name; - string map_type_name = fvField(*it), map_name = fvValue(*it); + string &map_type_name = fvField(*it), &map_name = fvValue(*it); ref_resolve_status status = resolveFieldRefValue(m_qos_maps, map_type_name, qos_to_ref_table_map.at(map_type_name), tuple, id, object_name); if (status != ref_resolve_status::success) @@ -1644,6 +1771,7 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) } update_list[qos_to_attr_map[map_type_name]] = make_pair(map_name, id); + setObjectReference(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, key, map_type_name, object_name); } if (fvField(*it) == pfc_enable_name) @@ -1658,7 +1786,23 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) } } - vector port_names = tokenize(key, list_item_delimiter); + /* Remove any map that was configured but isn't there any longer. */ + for (auto &mapRef : qos_to_attr_map) + { + auto &sai_attribute = mapRef.second; + if (update_list.find(sai_attribute) == update_list.end()) + { + string referenced_obj; + if (!doesObjectExist(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, key, mapRef.first, referenced_obj)) + { + continue; + } + SWSS_LOG_NOTICE("PORT_QOS_MAP|%s %s was configured but is not any more. Remove it", key.c_str(), mapRef.first.c_str()); + removeMeFromObjsReferencedByMe(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, key, mapRef.first, referenced_obj); + update_list[mapRef.second] = make_pair("NULL", SAI_NULL_OBJECT_ID); + } + } + for (string port_name : port_names) { Port port; @@ -1692,7 +1836,7 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) } sai_uint8_t old_pfc_enable = 0; - if (!gPortsOrch->getPortPfc(port.m_port_id, &old_pfc_enable)) + if (!gPortsOrch->getPortPfc(port.m_port_id, &old_pfc_enable)) { SWSS_LOG_ERROR("Failed to retrieve PFC bits on port %s", port_name.c_str()); } diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index cd265d59ec..613bc7437e 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -72,6 +72,7 @@ class DscpToTcMapHandler : public QosMapHandler public: bool convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) override; sai_object_id_t addQosItem(const vector &attributes) override; + bool removeQosItem(sai_object_id_t sai_object); protected: void applyDscpToTcMapToSwitch(sai_attr_id_t attr_id, sai_object_id_t sai_dscp_to_tc_map); }; @@ -196,5 +197,11 @@ class QosOrch : public Orch }; std::unordered_map m_scheduler_group_port_info; + + // SAI OID of the global dscp to tc map + sai_object_id_t m_globalDscpToTcMap; + + friend QosMapHandler; + friend DscpToTcMapHandler; }; #endif /* SWSS_QOSORCH_H */ diff --git a/orchagent/routeorch.cpp b/orchagent/routeorch.cpp index 9a6f367d0a..8894521277 100644 --- a/orchagent/routeorch.cpp +++ b/orchagent/routeorch.cpp @@ -376,6 +376,13 @@ bool RouteOrch::validnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t& nhgm_attrs.push_back(nhgm_attr); } + if (m_switchOrch->checkOrderedEcmpEnable()) + { + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID; + nhgm_attr.value.u32 = nhopgroup->second.nhopgroup_members[nexthop].seq_id; + nhgm_attrs.push_back(nhgm_attr); + } + status = sai_next_hop_group_api->create_next_hop_group_member(&nexthop_id, gSwitchId, (uint32_t)nhgm_attrs.size(), nhgm_attrs.data()); @@ -393,7 +400,7 @@ bool RouteOrch::validnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t& ++count; gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); - nhopgroup->second.nhopgroup_members[nexthop] = nexthop_id; + nhopgroup->second.nhopgroup_members[nexthop].next_hop_id = nexthop_id; } if (!m_fgNhgOrch->validNextHopInNextHopGroup(nexthop)) @@ -421,7 +428,7 @@ bool RouteOrch::invalidnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t continue; } - nexthop_id = nhopgroup->second.nhopgroup_members[nexthop]; + nexthop_id = nhopgroup->second.nhopgroup_members[nexthop].next_hop_id; status = sai_next_hop_group_api->remove_next_hop_group_member(nexthop_id); if (status != SAI_STATUS_SUCCESS) @@ -790,6 +797,11 @@ void RouteOrch::doTask(Consumer& consumer) } } + sai_route_entry_t route_entry; + route_entry.vr_id = vrf_id; + route_entry.switch_id = gSwitchId; + copy(route_entry.destination, ip_prefix); + if (nhg.getSize() == 1 && nhg.hasIntfNextHop()) { if (alsv[0] == "unknown") @@ -833,6 +845,7 @@ void RouteOrch::doTask(Consumer& consumer) else if (m_syncdRoutes.find(vrf_id) == m_syncdRoutes.end() || m_syncdRoutes.at(vrf_id).find(ip_prefix) == m_syncdRoutes.at(vrf_id).end() || m_syncdRoutes.at(vrf_id).at(ip_prefix) != RouteNhg(nhg, ctx.nhg_index) || + gRouteBulker.bulk_entry_pending_removal(route_entry) || ctx.using_temp_nhg) { if (addRoute(ctx, nhg)) @@ -1235,7 +1248,7 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) vector nhg_attrs; nhg_attr.id = SAI_NEXT_HOP_GROUP_ATTR_TYPE; - nhg_attr.value.s32 = SAI_NEXT_HOP_GROUP_TYPE_ECMP; + nhg_attr.value.s32 = m_switchOrch->checkOrderedEcmpEnable() ? SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP : SAI_NEXT_HOP_GROUP_TYPE_ECMP; nhg_attrs.push_back(nhg_attr); sai_object_id_t next_hop_group_id; @@ -1289,6 +1302,13 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) nhgm_attrs.push_back(nhgm_attr); } + if (m_switchOrch->checkOrderedEcmpEnable()) + { + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID; + nhgm_attr.value.u32 = ((uint32_t)i) + 1; // To make non-zero sequence id + nhgm_attrs.push_back(nhgm_attr); + } + gNextHopGroupMemberBulker.create_entry(&nhgm_ids[i], (uint32_t)nhgm_attrs.size(), nhgm_attrs.data()); @@ -1313,7 +1333,8 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) if (nhopgroup_shared_set.find(nhid) != nhopgroup_shared_set.end()) { auto it = nhopgroup_shared_set[nhid].begin(); - next_hop_group_entry.nhopgroup_members[*it] = nhgm_id; + next_hop_group_entry.nhopgroup_members[*it].next_hop_id = nhgm_id; + next_hop_group_entry.nhopgroup_members[*it].seq_id = (uint32_t)i + 1; nhopgroup_shared_set[nhid].erase(it); if (nhopgroup_shared_set[nhid].empty()) { @@ -1322,7 +1343,8 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) } else { - next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second] = nhgm_id; + next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second].next_hop_id = nhgm_id; + next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second].seq_id = ((uint32_t)i) + 1; } } @@ -1367,12 +1389,12 @@ bool RouteOrch::removeNextHopGroup(const NextHopGroupKey &nexthops) if (m_neighOrch->isNextHopFlagSet(nhop->first, NHFLAGS_IFDOWN)) { SWSS_LOG_WARN("NHFLAGS_IFDOWN set for next hop group member %s with next_hop_id %" PRIx64, - nhop->first.to_string().c_str(), nhop->second); + nhop->first.to_string().c_str(), nhop->second.next_hop_id); nhop = nhgm.erase(nhop); continue; } - next_hop_ids.push_back(nhop->second); + next_hop_ids.push_back(nhop->second.next_hop_id); nhop = nhgm.erase(nhop); } @@ -1826,8 +1848,12 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) * in m_syncdRoutes, then we need to update the route with a new next hop * (group) id. The old next hop (group) is then not used and the reference * count will decrease by 1. + * + * In case the entry is already pending removal in the bulk, it would be removed + * from m_syncdRoutes during the bulk call. Therefore, such entries need to be + * re-created rather than set attribute. */ - if (it_route == m_syncdRoutes.at(vrf_id).end()) + if (it_route == m_syncdRoutes.at(vrf_id).end() || gRouteBulker.bulk_entry_pending_removal(route_entry)) { if (blackhole) { @@ -1870,6 +1896,25 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) } else { + if (!blackhole && vrf_id == gVirtualRouterId && ipPrefix.isDefaultRoute()) + { + // Always set packet action for default route to avoid conflict settings + // in case a SET follows a DEL on the default route in the same bulk. + // - On DEL default route, the packet action will be set to DROP + // - On SET default route, as the default route has NOT been removed from m_syncdRoute + // it calls SAI set_route_attributes instead of crate_route + // However, packet action is called only when a route entry is created + // This leads to conflict settings: + // - packet action: DROP + // - next hop: a valid next hop id + // To avoid this, we always set packet action for default route. + route_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + route_attr.value.s32 = SAI_PACKET_ACTION_FORWARD; + + object_statuses.emplace_back(); + gRouteBulker.set_entry_attribute(&object_statuses.back(), &route_entry, &route_attr); + } + route_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; route_attr.value.oid = next_hop_id; diff --git a/orchagent/routeorch.h b/orchagent/routeorch.h index 3fc8f28686..3c0ea05e59 100644 --- a/orchagent/routeorch.h +++ b/orchagent/routeorch.h @@ -24,7 +24,13 @@ #define LOOPBACK_PREFIX "Loopback" -typedef std::map NextHopGroupMembers; +struct NextHopGroupMemberEntry +{ + sai_object_id_t next_hop_id; // next hop sai oid + uint32_t seq_id; // Sequence Id of nexthop in the group +}; + +typedef std::map NextHopGroupMembers; struct NhgBase; diff --git a/orchagent/switchorch.cpp b/orchagent/switchorch.cpp index 1251a956e3..48ecd1fd35 100644 --- a/orchagent/switchorch.cpp +++ b/orchagent/switchorch.cpp @@ -1,4 +1,5 @@ #include +#include #include #include "switchorch.h" @@ -26,6 +27,7 @@ const map switch_attribute_map = {"ecmp_hash_seed", SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_SEED}, {"lag_hash_seed", SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_SEED}, {"fdb_aging_time", SAI_SWITCH_ATTR_FDB_AGING_TIME}, + {"debug_shell_enable", SAI_SWITCH_ATTR_SWITCH_SHELL_ENABLE}, {"vxlan_port", SAI_SWITCH_ATTR_VXLAN_DEFAULT_PORT}, {"vxlan_router_mac", SAI_SWITCH_ATTR_VXLAN_DEFAULT_ROUTER_MAC} }; @@ -43,6 +45,9 @@ const map packet_action_map = {"trap", SAI_PACKET_ACTION_TRAP} }; + +const std::set switch_non_sai_attribute_set = {"ordered_ecmp"}; + SwitchOrch::SwitchOrch(DBConnector *db, vector& connectors, TableConnector switchTable): Orch(connectors), m_switchTable(switchTable.first, switchTable.second), @@ -223,7 +228,51 @@ void SwitchOrch::doCfgSensorsTableTask(Consumer &consumer) } } +void SwitchOrch::setSwitchNonSaiAttributes(swss::FieldValueTuple &val) +{ + auto attribute = fvField(val); + auto value = fvValue(val); + if (attribute == "ordered_ecmp") + { + vector fvVector; + if (value == "true") + { + const auto* meta = sai_metadata_get_attr_metadata(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, SAI_NEXT_HOP_GROUP_ATTR_TYPE); + if (meta && meta->isenum) + { + vector values_list(meta->enummetadata->valuescount); + sai_s32_list_t values; + values.count = static_cast(values_list.size()); + values.list = values_list.data(); + + auto status = sai_query_attribute_enum_values_capability(gSwitchId, + SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + SAI_NEXT_HOP_GROUP_ATTR_TYPE, + &values); + if (status == SAI_STATUS_SUCCESS) + { + for (size_t i = 0; i < values.count; i++) + { + if (values.list[i] == SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP) + { + m_orderedEcmpEnable = true; + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_ORDERED_ECMP_CAPABLE, "true"); + set_switch_capability(fvVector); + SWSS_LOG_NOTICE("Ordered ECMP/Nexthop-Group is configured"); + return; + } + } + } + } + } + m_orderedEcmpEnable = false; + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_ORDERED_ECMP_CAPABLE, "false"); + set_switch_capability(fvVector); + SWSS_LOG_NOTICE("Ordered ECMP/Nexthop-Group is not configured"); + return; + } +} sai_status_t SwitchOrch::setSwitchTunnelVxlanParams(swss::FieldValueTuple &val) { auto attribute = fvField(val); @@ -295,7 +344,12 @@ void SwitchOrch::doAppSwitchTableTask(Consumer &consumer) { auto attribute = fvField(i); - if (switch_attribute_map.find(attribute) == switch_attribute_map.end()) + if (switch_non_sai_attribute_set.find(attribute) != switch_non_sai_attribute_set.end()) + { + setSwitchNonSaiAttributes(i); + continue; + } + else if (switch_attribute_map.find(attribute) == switch_attribute_map.end()) { // Check additionally 'switch_tunnel_attribute_map' for Switch Tunnel if (switch_tunnel_attribute_map.find(attribute) == switch_tunnel_attribute_map.end()) @@ -344,6 +398,10 @@ void SwitchOrch::doAppSwitchTableTask(Consumer &consumer) attr.value.u32 = to_uint(value); break; + case SAI_SWITCH_ATTR_SWITCH_SHELL_ENABLE: + attr.value.booldata = to_uint(value); + break; + case SAI_SWITCH_ATTR_VXLAN_DEFAULT_PORT: attr.value.u16 = to_uint(value); break; diff --git a/orchagent/switchorch.h b/orchagent/switchorch.h index 8c3789f523..5b09a67640 100644 --- a/orchagent/switchorch.h +++ b/orchagent/switchorch.h @@ -10,6 +10,7 @@ #define SWITCH_CAPABILITY_TABLE_PORT_TPID_CAPABLE "PORT_TPID_CAPABLE" #define SWITCH_CAPABILITY_TABLE_LAG_TPID_CAPABLE "LAG_TPID_CAPABLE" +#define SWITCH_CAPABILITY_TABLE_ORDERED_ECMP_CAPABLE "ORDERED_ECMP_CAPABLE" struct WarmRestartCheck { @@ -37,6 +38,8 @@ class SwitchOrch : public Orch // Initialize the ACL groups bind to Switch void initAclGroupsBindToSwitch(); + bool checkOrderedEcmpEnable() { return m_orderedEcmpEnable; } + private: void doTask(Consumer &consumer); void doTask(swss::SelectableTimer &timer); @@ -45,6 +48,8 @@ class SwitchOrch : public Orch void initSensorsTable(); void querySwitchTpidCapability(); sai_status_t setSwitchTunnelVxlanParams(swss::FieldValueTuple &val); + void setSwitchNonSaiAttributes(swss::FieldValueTuple &val); + // Create the default ACL group for the given stage, bind point is // SAI_ACL_BIND_POINT_TYPE_SWITCH and group type is @@ -74,6 +79,7 @@ class SwitchOrch : public Orch bool m_sensorsMaxTempSupported = true; bool m_sensorsAvgTempSupported = true; bool m_vxlanSportUserModeEnabled = false; + bool m_orderedEcmpEnable = false; // Information contained in the request from // external program for orchagent pre-shutdown state check diff --git a/orchagent/vnetorch.cpp b/orchagent/vnetorch.cpp index dc5838d8a5..e3f2cbac6e 100644 --- a/orchagent/vnetorch.cpp +++ b/orchagent/vnetorch.cpp @@ -40,7 +40,7 @@ extern CrmOrch *gCrmOrch; extern RouteOrch *gRouteOrch; extern MacAddress gVxlanMacAddress; extern BfdOrch *gBfdOrch; - +extern SwitchOrch *gSwitchOrch; /* * VRF Modeling and VNetVrf class definitions */ @@ -396,7 +396,7 @@ bool VNetOrch::addOperation(const Request& request) sai_attribute_t attr; vector attrs; set peer_list = {}; - bool peer = false, create = false; + bool peer = false, create = false, advertise_prefix = false; uint32_t vni=0; string tunnel; string scope; @@ -427,6 +427,10 @@ bool VNetOrch::addOperation(const Request& request) { scope = request.getAttrString("scope"); } + else if (name == "advertise_prefix") + { + advertise_prefix = request.getAttrBool("advertise_prefix"); + } else { SWSS_LOG_INFO("Unknown attribute: %s", name.c_str()); @@ -453,7 +457,7 @@ bool VNetOrch::addOperation(const Request& request) if (it == std::end(vnet_table_)) { - VNetInfo vnet_info = { tunnel, vni, peer_list, scope }; + VNetInfo vnet_info = { tunnel, vni, peer_list, scope, advertise_prefix }; obj = createObject(vnet_name, vnet_info, attrs); create = true; @@ -645,6 +649,7 @@ VNetRouteOrch::VNetRouteOrch(DBConnector *db, vector &tableNames, VNetOr state_db_ = shared_ptr(new DBConnector("STATE_DB", 0)); state_vnet_rt_tunnel_table_ = unique_ptr
(new Table(state_db_.get(), STATE_VNET_RT_TUNNEL_TABLE_NAME)); + state_vnet_rt_adv_table_ = unique_ptr
(new Table(state_db_.get(), STATE_ADVERTISE_NETWORK_TABLE_NAME)); gBfdOrch->attach(this); } @@ -675,9 +680,12 @@ bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &n vector next_hop_ids; set next_hop_set = nexthops.getNextHops(); std::map nhopgroup_members_set; + std::map nh_seq_id_in_nhgrp; + uint32_t seq_id = 0; for (auto it : next_hop_set) { + nh_seq_id_in_nhgrp[it] = ++seq_id; if (nexthop_info_[vnet].find(it.ip_address) != nexthop_info_[vnet].end() && nexthop_info_[vnet][it.ip_address].bfd_state != SAI_BFD_SESSION_STATE_UP) { continue; @@ -691,7 +699,7 @@ bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &n vector nhg_attrs; nhg_attr.id = SAI_NEXT_HOP_GROUP_ATTR_TYPE; - nhg_attr.value.s32 = SAI_NEXT_HOP_GROUP_TYPE_ECMP; + nhg_attr.value.s32 = gSwitchOrch->checkOrderedEcmpEnable() ? SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP : SAI_NEXT_HOP_GROUP_TYPE_ECMP; nhg_attrs.push_back(nhg_attr); sai_object_id_t next_hop_group_id; @@ -728,6 +736,13 @@ bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &n nhgm_attr.value.oid = nhid; nhgm_attrs.push_back(nhgm_attr); + if (gSwitchOrch->checkOrderedEcmpEnable()) + { + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID; + nhgm_attr.value.u32 = nh_seq_id_in_nhgrp[nhopgroup_members_set.find(nhid)->second]; + nhgm_attrs.push_back(nhgm_attr); + } + sai_object_id_t next_hop_group_member_id; status = sai_next_hop_group_api->create_next_hop_group_member(&next_hop_group_member_id, gSwitchId, @@ -860,7 +875,10 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP NextHopGroupInfo next_hop_group_entry; next_hop_group_entry.next_hop_group_id = vrf_obj->getTunnelNextHop(nexthop); next_hop_group_entry.ref_count = 0; - next_hop_group_entry.active_members[nexthop] = SAI_NULL_OBJECT_ID; + if (nexthop_info_[vnet].find(nexthop.ip_address) == nexthop_info_[vnet].end() || nexthop_info_[vnet][nexthop.ip_address].bfd_state == SAI_BFD_SESSION_STATE_UP) + { + next_hop_group_entry.active_members[nexthop] = SAI_NULL_OBJECT_ID; + } syncd_nexthop_groups_[vnet][nexthops] = next_hop_group_entry; } else @@ -931,7 +949,7 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP NextHopGroupKey nhg = it_route->second; if(--syncd_nexthop_groups_[vnet][nhg].ref_count == 0) { - if (nexthops.getSize() > 1) + if (nhg.getSize() > 1) { removeNextHopGroup(vnet, nhg, vrf_obj); } @@ -1563,12 +1581,39 @@ void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextH fvVector.emplace_back("state", route_state); state_vnet_rt_tunnel_table_->set(state_db_key, fvVector); + + if (vnet_orch_->getAdvertisePrefix(vnet)) + { + if (route_state == "active") + { + addRouteAdvertisement(ipPrefix); + } + else + { + removeRouteAdvertisement(ipPrefix); + } + } } void VNetRouteOrch::removeRouteState(const string& vnet, IpPrefix& ipPrefix) { const string state_db_key = vnet + state_db_key_delimiter + ipPrefix.to_string(); state_vnet_rt_tunnel_table_->del(state_db_key); + removeRouteAdvertisement(ipPrefix); +} + +void VNetRouteOrch::addRouteAdvertisement(IpPrefix& ipPrefix) +{ + const string key = ipPrefix.to_string(); + vector fvs; + fvs.push_back(FieldValueTuple("", "")); + state_vnet_rt_adv_table_->set(key, fvs); +} + +void VNetRouteOrch::removeRouteAdvertisement(IpPrefix& ipPrefix) +{ + const string key = ipPrefix.to_string(); + state_vnet_rt_adv_table_->del(key); } void VNetRouteOrch::update(SubjectType type, void *cntx) @@ -1648,7 +1693,20 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) NextHopGroupKey nexthops = nhg_info_pair.first; NextHopGroupInfo& nhg_info = nhg_info_pair.second; - if (!(nexthops.contains(endpoint))) + std::set next_hop_set = nexthops.getNextHops(); + uint32_t seq_id = 0; + uint32_t nh_seq_id = 0; + for (auto nh: next_hop_set) + { + seq_id++; + if (nh == endpoint) + { + nh_seq_id = seq_id; + break; + } + } + + if (!nh_seq_id) { continue; } @@ -1670,6 +1728,13 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) nhgm_attr.value.oid = vrf_obj->getTunnelNextHop(endpoint); nhgm_attrs.push_back(nhgm_attr); + if (gSwitchOrch->checkOrderedEcmpEnable()) + { + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID; + nhgm_attr.value.u32 = nh_seq_id; + nhgm_attrs.push_back(nhgm_attr); + } + sai_status_t status = sai_next_hop_group_api->create_next_hop_group_member(&next_hop_group_member_id, gSwitchId, (uint32_t)nhgm_attrs.size(), diff --git a/orchagent/vnetorch.h b/orchagent/vnetorch.h index 7e493c5f30..77c2785371 100644 --- a/orchagent/vnetorch.h +++ b/orchagent/vnetorch.h @@ -27,12 +27,13 @@ extern sai_object_id_t gVirtualRouterId; const request_description_t vnet_request_description = { { REQ_T_STRING }, { - { "src_mac", REQ_T_MAC_ADDRESS }, - { "vxlan_tunnel", REQ_T_STRING }, - { "vni", REQ_T_UINT }, - { "peer_list", REQ_T_SET }, - { "guid", REQ_T_STRING }, - { "scope", REQ_T_STRING }, + { "src_mac", REQ_T_MAC_ADDRESS }, + { "vxlan_tunnel", REQ_T_STRING }, + { "vni", REQ_T_UINT }, + { "peer_list", REQ_T_SET }, + { "guid", REQ_T_STRING }, + { "scope", REQ_T_STRING }, + { "advertise_prefix", REQ_T_BOOL}, }, { "vxlan_tunnel", "vni" } // mandatory attributes }; @@ -57,6 +58,7 @@ struct VNetInfo uint32_t vni; set peers; string scope; + bool advertise_prefix; }; typedef map vrid_list_t; @@ -83,7 +85,8 @@ class VNetObject tunnel_(vnetInfo.tunnel), peer_list_(vnetInfo.peers), vni_(vnetInfo.vni), - scope_(vnetInfo.scope) + scope_(vnetInfo.scope), + advertise_prefix_(vnetInfo.advertise_prefix) { } virtual bool updateObj(vector&) = 0; @@ -113,6 +116,11 @@ class VNetObject return scope_; } + bool getAdvertisePrefix() const + { + return advertise_prefix_; + } + virtual ~VNetObject() noexcept(false) {}; private: @@ -120,6 +128,7 @@ class VNetObject string tunnel_; uint32_t vni_; string scope_; + bool advertise_prefix_; }; struct nextHop @@ -223,6 +232,11 @@ class VNetOrch : public Orch2 return vnet_table_.at(name)->getTunnelName(); } + bool getAdvertisePrefix(const std::string& name) const + { + return vnet_table_.at(name)->getAdvertisePrefix(); + } + bool isVnetExecVrf() const { return (vnet_exec_ == VNET_EXEC::VNET_EXEC_VRF); @@ -338,6 +352,8 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer void delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops); void postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops); void removeRouteState(const string& vnet, IpPrefix& ipPrefix); + void addRouteAdvertisement(IpPrefix& ipPrefix); + void removeRouteAdvertisement(IpPrefix& ipPrefix); void updateVnetTunnel(const BfdUpdate&); bool updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op); @@ -362,6 +378,7 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer ProducerStateTable bfd_session_producer_; shared_ptr state_db_; unique_ptr
state_vnet_rt_tunnel_table_; + unique_ptr
state_vnet_rt_adv_table_; }; class VNetCfgRouteOrch : public Orch diff --git a/orchagent/vxlanorch.cpp b/orchagent/vxlanorch.cpp index fc6a505a1f..8fce069631 100644 --- a/orchagent/vxlanorch.cpp +++ b/orchagent/vxlanorch.cpp @@ -1542,28 +1542,12 @@ bool VxlanTunnelOrch::removeVxlanTunnelMap(string tunnelName, uint32_t vni) tunnel_obj->vlan_vrf_vni_count--; if (tunnel_obj->vlan_vrf_vni_count == 0) { - auto tunnel_term_id = vxlan_tunnel_table_[tunnelName].get()->getTunnelTermId(); - try - { - remove_tunnel_termination(tunnel_term_id); - } - catch(const std::runtime_error& error) - { - SWSS_LOG_ERROR("Error removing tunnel term entry. Tunnel: %s. Error: %s", tunnelName.c_str(), error.what()); - return false; - } - - auto tunnel_id = vxlan_tunnel_table_[tunnelName].get()->getTunnelId(); - try - { - removeTunnelFromFlexCounter(tunnel_id, tunnelName); - remove_tunnel(tunnel_id); - } - catch(const std::runtime_error& error) - { - SWSS_LOG_ERROR("Error removing tunnel entry. Tunnel: %s. Error: %s", tunnelName.c_str(), error.what()); - return false; - } + uint8_t mapper_list = 0; + + TUNNELMAP_SET_VLAN(mapper_list); + TUNNELMAP_SET_VRF(mapper_list); + + tunnel_obj->deleteTunnelHw(mapper_list, TUNNEL_MAP_USE_DEDICATED_ENCAP_DECAP); } SWSS_LOG_NOTICE("Vxlan map entry deleted for tunnel '%s' with vni '%d'", tunnelName.c_str(), vni); diff --git a/portsyncd/portsyncd.cpp b/portsyncd/portsyncd.cpp index c55c1685af..37e0c4232f 100644 --- a/portsyncd/portsyncd.cpp +++ b/portsyncd/portsyncd.cpp @@ -228,11 +228,6 @@ void handlePortConfigFromConfigDB(ProducerStateTable &p, DBConnector &cfgDb, boo void handlePortConfig(ProducerStateTable &p, map &port_cfg_map) { - string autoneg; - vector attrs; - vector autoneg_attrs; - vector force_attrs; - auto it = port_cfg_map.begin(); while (it != port_cfg_map.end()) { @@ -247,54 +242,7 @@ void handlePortConfig(ProducerStateTable &p, map /* No support for port delete yet */ if (op == SET_COMMAND) { - - for (auto i : values) - { - auto field = fvField(i); - if (field == "adv_speeds") - { - autoneg_attrs.push_back(i); - } - else if (field == "adv_interface_types") - { - autoneg_attrs.push_back(i); - } - else if (field == "speed") - { - force_attrs.push_back(i); - } - else if (field == "interface_type") - { - force_attrs.push_back(i); - } - else if (field == "autoneg") - { - autoneg = fvValue(i); - attrs.push_back(i); - } - else - { - attrs.push_back(i); - } - } - if (autoneg == "on") // autoneg is on, only put adv_speeds and adv_interface_types to APPL_DB - { - attrs.insert(attrs.end(), autoneg_attrs.begin(), autoneg_attrs.end()); - } - else if (autoneg == "off") // autoneg is off, only put speed and interface_type to APPL_DB - { - attrs.insert(attrs.end(), force_attrs.begin(), force_attrs.end()); - } - else // autoneg is not configured, put all attributes to APPL_DB - { - attrs.insert(attrs.end(), autoneg_attrs.begin(), autoneg_attrs.end()); - attrs.insert(attrs.end(), force_attrs.begin(), force_attrs.end()); - } - p.set(key, attrs); - attrs.clear(); - autoneg_attrs.clear(); - force_attrs.clear(); - autoneg.clear(); + p.set(key, values); } it = port_cfg_map.erase(it); diff --git a/tests/conftest.py b/tests/conftest.py index 0ec6626fe9..f1e8248a14 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -33,10 +33,8 @@ # a dynamic number of ports. GitHub Issue: Azure/sonic-swss#1384. NUM_PORTS = 32 -# FIXME: Voq asics will have 16 fabric ports created (defined in Azure/sonic-buildimage#6185). -# Right now, we set FABRIC_NUM_PORTS to 0, and change to 16 when PR#6185 merges. PR#6185 can't -# be merged before this PR. Otherwise it will cause swss voq test failures. -FABRIC_NUM_PORTS = 0 +# Voq asics will have 16 fabric ports created (defined in Azure/sonic-buildimage#7629). +FABRIC_NUM_PORTS = 16 def ensure_system(cmd): rc, output = subprocess.getstatusoutput(cmd) @@ -526,22 +524,12 @@ def _polling_function(): # Verify that all ports have been created asic_db = self.get_asic_db() - - # Verify that we have "at least" NUM_PORTS + FABRIC_NUM_PORTS, rather exact number. - # Right now, FABRIC_NUM_PORTS = 0. So it essentially waits for at least NUM_PORTS. - # This will allow us to merge Azure/sonic-buildimage#6185 that creates 16 fabric ports. - # When PR#6185 merges, FABRIC_NUM_PORTS should be 16, and so this verification (at least - # NUM_PORTS) still holds. - # Will update FABRIC_NUM_PORTS to 16, and revert back to wait exact NUM_PORTS + FABRIC_NUM_PORTS - # when PR#6185 merges. - wait_at_least_n_keys = True - - asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_ports + 1, wait_at_least_n_keys) # +1 CPU Port + asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_ports + 1) # +1 CPU Port # Verify that fabric ports are monitored in STATE_DB if metadata.get('switch_type', 'npu') in ['voq', 'fabric']: self.get_state_db() - self.state_db.wait_for_n_keys("FABRIC_PORT_TABLE", FABRIC_NUM_PORTS, wait_at_least_n_keys) + self.state_db.wait_for_n_keys("FABRIC_PORT_TABLE", FABRIC_NUM_PORTS) def net_cleanup(self) -> None: """Clean up network, remove extra links.""" @@ -1169,6 +1157,44 @@ def getCrmCounterValue(self, key, counter): if k[0] == counter: return int(k[1]) + def port_field_set(self, port, field, value): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "PORT") + fvs = swsscommon.FieldValuePairs([(field, value)]) + tbl.set(port, fvs) + time.sleep(1) + + def port_admin_set(self, port, status): + self.port_field_set(port, "admin_status", status) + + def interface_ip_add(self, port, ip_address): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "INTERFACE") + fvs = swsscommon.FieldValuePairs([("NULL", "NULL")]) + tbl.set(port, fvs) + tbl.set(port + "|" + ip_address, fvs) + time.sleep(1) + + def crm_poll_set(self, value): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "CRM") + fvs = swsscommon.FieldValuePairs([("polling_interval", value)]) + tbl.set("Config", fvs) + time.sleep(1) + + def clear_fdb(self): + adb = swsscommon.DBConnector(0, self.redis_sock, 0) + opdata = ["ALL", "ALL"] + msg = json.dumps(opdata,separators=(',',':')) + adb.publish('FLUSHFDBREQUEST', msg) + + def warm_restart_swss(self, enable): + db = swsscommon.DBConnector(6, self.redis_sock, 0) + + tbl = swsscommon.Table(db, "WARM_RESTART_ENABLE_TABLE") + fvs = swsscommon.FieldValuePairs([("enable",enable)]) + tbl.set("swss", fvs) + # deps: acl, crm, fdb def setReadOnlyAttr(self, obj, attr, val): db = swsscommon.DBConnector(swsscommon.ASIC_DB, self.redis_sock, 0) diff --git a/tests/dvslib/dvs_acl.py b/tests/dvslib/dvs_acl.py index 9111de7a8e..266761c568 100644 --- a/tests/dvslib/dvs_acl.py +++ b/tests/dvslib/dvs_acl.py @@ -16,6 +16,7 @@ class DVSAcl: ADB_ACL_TABLE_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE" ADB_ACL_GROUP_TABLE_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP" ADB_ACL_GROUP_MEMBER_TABLE_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER" + ADB_ACL_COUNTER_TABLE_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_COUNTER" ADB_ACL_STAGE_LOOKUP = { "ingress": "SAI_ACL_STAGE_INGRESS", @@ -140,6 +141,19 @@ def remove_acl_table_type(self, name: str) -> None: """ self.config_db.delete_entry(self.CDB_ACL_TABLE_TYPE_NAME, name) + def get_acl_counter_ids(self, expected: int) -> List[str]: + """Get all of the ACL counter IDs in ASIC DB. + + This method will wait for the expected number of counters to exist, or fail. + + Args: + expected: The number of counters that are expected to be present in ASIC DB. + + Returns: + The list of ACL counter IDs in ASIC DB. + """ + return self.asic_db.wait_for_n_keys(self.ADB_ACL_COUNTER_TABLE_NAME, expected) + def get_acl_table_ids(self, expected: int) -> List[str]: """Get all of the ACL table IDs in ASIC DB. @@ -530,6 +544,39 @@ def verify_mirror_acl_rule( self._check_acl_entry_mirror_action(fvs, session_oid, stage) self._check_acl_entry_counters_map(acl_rule_id) + def verify_acl_rule_generic( + self, + sai_qualifiers: Dict[str, str], + acl_table_id: str = None, + acl_rule_id: str = None + ) -> None: + """Verify that an ACL rule has the correct ASIC DB representation. + + Args: + sai_qualifiers: The expected set of SAI qualifiers to be found in ASIC DB. + acl_table_id: A specific OID to check in ASIC DB. If left empty, this method + assumes that only one table exists in ASIC DB. + acl_rule_id: A specific OID to check in ASIC DB. If left empty, this method + assumes that only one rule exists in ASIC DB. + """ + if not acl_table_id: + acl_table_id = self.get_acl_table_ids(1)[0] + + if not acl_rule_id: + acl_rule_id = self._get_acl_rule_id() + + entry = self.asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY", acl_rule_id) + + for k, v in entry.items(): + if k == "SAI_ACL_ENTRY_ATTR_TABLE_ID": + assert v == acl_table_id + elif k == "SAI_ACL_ENTRY_ATTR_ADMIN_STATE": + assert v == "true" + elif k in sai_qualifiers: + assert sai_qualifiers[k](v) + else: + assert False, "Unknown SAI qualifier: key={}, value={}".format(k, v) + def verify_acl_rule_set( self, priorities: List[str], diff --git a/tests/dvslib/dvs_database.py b/tests/dvslib/dvs_database.py index f2657f7516..4256b5802d 100644 --- a/tests/dvslib/dvs_database.py +++ b/tests/dvslib/dvs_database.py @@ -34,6 +34,24 @@ def create_entry(self, table_name: str, key: str, entry: Dict[str, str]) -> None formatted_entry = swsscommon.FieldValuePairs(list(entry.items())) table.set(key, formatted_entry) + def set_entry(self, table_name: str, key: str, entry: Dict[str, str]) -> None: + """Set entry of an existing key in the specified table. + + Args: + table_name: The name of the table. + key: The key that needs to be updated. + entry: A set of key-value pairs to be updated. + """ + table = swsscommon.Table(self.db_connection, table_name) + (status, fv_pairs) = table.get(key) + + formatted_entry = swsscommon.FieldValuePairs(list(entry.items())) + table.set(key, formatted_entry) + + if status: + for f in [ k for k, v in dict(fv_pairs).items() if k not in entry.keys() ]: + table.hdel(key, f) + def update_entry(self, table_name: str, key: str, entry: Dict[str, str]) -> None: """Update entry of an existing key in the specified table. diff --git a/tests/dvslib/dvs_pbh.py b/tests/dvslib/dvs_pbh.py index 79a58681a9..df612638ea 100644 --- a/tests/dvslib/dvs_pbh.py +++ b/tests/dvslib/dvs_pbh.py @@ -10,6 +10,8 @@ class DVSPbh: CDB_PBH_HASH = "PBH_HASH" CDB_PBH_HASH_FIELD = "PBH_HASH_FIELD" + ADB_PBH_HASH = "ASIC_STATE:SAI_OBJECT_TYPE_HASH" + def __init__(self, asic_db, config_db): """Create a new DVS PBH Manager.""" self.asic_db = asic_db @@ -60,6 +62,27 @@ def create_pbh_rule( self.config_db.create_entry(self.CDB_PBH_RULE, "{}|{}".format(table_name, rule_name), attr_dict) + def update_pbh_rule( + self, + table_name: str, + rule_name: str, + priority: str, + qualifiers: Dict[str, str], + hash_name: str, + packet_action: str = "SET_ECMP_HASH", + flow_counter: str = "DISABLED" + ) -> None: + """Update PBH rule in Config DB.""" + attr_dict = { + "priority": priority, + "hash": hash_name, + "packet_action": packet_action, + "flow_counter": flow_counter, + **qualifiers + } + + self.config_db.set_entry(self.CDB_PBH_RULE, "{}|{}".format(table_name, rule_name), attr_dict) + def remove_pbh_rule( self, table_name: str, @@ -125,3 +148,10 @@ def verify_pbh_hash_field_count( ) -> None: """Verify that there are N hash field objects in ASIC DB.""" self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_FINE_GRAINED_HASH_FIELD", expected) + + def get_pbh_hash_ids( + self, + expected: int + ) -> List[str]: + """Get all of the PBH hash IDs in ASIC DB.""" + return self.asic_db.wait_for_n_keys(self.ADB_PBH_HASH, expected) diff --git a/tests/gcov_support.sh b/tests/gcov_support.sh index 4200e20813..d96ee1c250 100755 --- a/tests/gcov_support.sh +++ b/tests/gcov_support.sh @@ -146,7 +146,8 @@ lcov_merge_all() cp $1/lcov_cobertura.py $1/common_work/gcov/ python $1/common_work/gcov/lcov_cobertura.py total.info -o coverage.xml - sed -i "s#common_work/#$1/common_work/#" coverage.xml + sed -i "s#common_work/gcov/##" coverage.xml + sed -i "s#common_work.gcov.##" coverage.xml cd gcov_output/ if [ ! -d ${ALLMERGE_DIR} ]; then diff --git a/tests/mock_tests/Makefile.am b/tests/mock_tests/Makefile.am index 11cd00670e..15bc47bd70 100644 --- a/tests/mock_tests/Makefile.am +++ b/tests/mock_tests/Makefile.am @@ -23,6 +23,8 @@ LDADD_GTEST = -L/usr/src/gtest tests_SOURCES = aclorch_ut.cpp \ portsorch_ut.cpp \ + routeorch_ut.cpp \ + qosorch_ut.cpp \ saispy_ut.cpp \ consumer_ut.cpp \ ut_saihelper.cpp \ @@ -57,6 +59,7 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/mirrororch.cpp \ $(top_srcdir)/orchagent/fdborch.cpp \ $(top_srcdir)/orchagent/aclorch.cpp \ + $(top_srcdir)/orchagent/pbh/pbhcap.cpp \ $(top_srcdir)/orchagent/pbh/pbhcnt.cpp \ $(top_srcdir)/orchagent/pbh/pbhmgr.cpp \ $(top_srcdir)/orchagent/pbh/pbhrule.cpp \ @@ -86,7 +89,8 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/macsecorch.cpp \ $(top_srcdir)/orchagent/lagid.cpp \ $(top_srcdir)/orchagent/bfdorch.cpp \ - $(top_srcdir)/orchagent/srv6orch.cpp + $(top_srcdir)/orchagent/srv6orch.cpp \ + $(top_srcdir)/orchagent/nvgreorch.cpp tests_SOURCES += $(FLEX_CTR_DIR)/flex_counter_manager.cpp $(FLEX_CTR_DIR)/flex_counter_stat_manager.cpp $(FLEX_CTR_DIR)/flow_counter_handler.cpp tests_SOURCES += $(DEBUG_CTR_DIR)/debug_counter.cpp $(DEBUG_CTR_DIR)/drop_counter.cpp diff --git a/tests/mock_tests/aclorch_ut.cpp b/tests/mock_tests/aclorch_ut.cpp index 51399cb9b3..a381fed968 100644 --- a/tests/mock_tests/aclorch_ut.cpp +++ b/tests/mock_tests/aclorch_ut.cpp @@ -1724,4 +1724,28 @@ namespace aclorch_test ASSERT_TRUE(orch->m_aclOrch->removeAclRule(rule->getTableId(), rule->getId())); } + TEST_F(AclOrchTest, deleteNonExistingRule) + { + string tableId = "acl_table"; + string ruleId = "acl_rule"; + + auto orch = createAclOrch(); + + // add acl table + auto kvfAclTable = deque({{ + tableId, + SET_COMMAND, + { + { ACL_TABLE_DESCRIPTION, "L3 table" }, + { ACL_TABLE_TYPE, TABLE_TYPE_L3 }, + { ACL_TABLE_STAGE, STAGE_INGRESS }, + { ACL_TABLE_PORTS, "1,2" } + } + }}); + + orch->doAclTableTask(kvfAclTable); + + // try to delete non existing acl rule + ASSERT_TRUE(orch->m_aclOrch->removeAclRule(tableId, ruleId)); + } } // namespace nsAclOrchTest diff --git a/tests/mock_tests/bulker_ut.cpp b/tests/mock_tests/bulker_ut.cpp index a2cdaa07a3..6210cc0969 100644 --- a/tests/mock_tests/bulker_ut.cpp +++ b/tests/mock_tests/bulker_ut.cpp @@ -106,4 +106,40 @@ namespace bulker_test ASSERT_EQ(ia->first.id, SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION); ASSERT_EQ(ia->first.value.s32, SAI_PACKET_ACTION_FORWARD); } + + TEST_F(BulkerTest, BulkerPendindRemoval) + { + // Create bulker + EntityBulker gRouteBulker(sai_route_api, 1000); + deque object_statuses; + + // Check max bulk size + ASSERT_EQ(gRouteBulker.max_bulk_size, 1000); + + // Create a dummy route entry + sai_route_entry_t route_entry_remove; + route_entry_remove.destination.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + route_entry_remove.destination.addr.ip4 = htonl(0x0a00000f); + route_entry_remove.destination.mask.ip4 = htonl(0xffffff00); + route_entry_remove.vr_id = 0x0; + route_entry_remove.switch_id = 0x0; + + // Put route entry into remove + object_statuses.emplace_back(); + gRouteBulker.remove_entry(&object_statuses.back(), &route_entry_remove); + + // Confirm route entry is pending removal + ASSERT_TRUE(gRouteBulker.bulk_entry_pending_removal(route_entry_remove)); + + // Create another dummy route entry that will not be removed + sai_route_entry_t route_entry_non_remove; + route_entry_non_remove.destination.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + route_entry_non_remove.destination.addr.ip4 = htonl(0x0a00010f); + route_entry_non_remove.destination.mask.ip4 = htonl(0xffffff00); + route_entry_non_remove.vr_id = 0x0; + route_entry_non_remove.switch_id = 0x0; + + // Confirm route entry is not pending removal + ASSERT_FALSE(gRouteBulker.bulk_entry_pending_removal(route_entry_non_remove)); + } } diff --git a/tests/mock_tests/mock_orchagent_main.h b/tests/mock_tests/mock_orchagent_main.h index 321f3d15d2..3166f3d962 100644 --- a/tests/mock_tests/mock_orchagent_main.h +++ b/tests/mock_tests/mock_orchagent_main.h @@ -9,13 +9,19 @@ #include "neighorch.h" #include "fdborch.h" #include "mirrororch.h" +#define private public #include "bufferorch.h" +#undef private +#include "qosorch.h" #include "vrforch.h" #include "vnetorch.h" #include "vxlanorch.h" #include "policerorch.h" #include "fgnhgorch.h" #include "flexcounterorch.h" +#include "tunneldecaporch.h" +#include "muxorch.h" +#include "nhgorch.h" #include "directory.h" extern int gBatchSize; @@ -43,7 +49,10 @@ extern NeighOrch *gNeighOrch; extern FdbOrch *gFdbOrch; extern MirrorOrch *gMirrorOrch; extern BufferOrch *gBufferOrch; +extern QosOrch *gQosOrch; extern VRFOrch *gVrfOrch; +extern NhgOrch *gNhgOrch; +extern Srv6Orch *gSrv6Orch; extern Directory gDirectory; extern sai_acl_api_t *sai_acl_api; @@ -60,5 +69,10 @@ extern sai_tunnel_api_t *sai_tunnel_api; extern sai_next_hop_api_t *sai_next_hop_api; extern sai_hostif_api_t *sai_hostif_api; extern sai_buffer_api_t *sai_buffer_api; +extern sai_qos_map_api_t *sai_qos_map_api; +extern sai_scheduler_api_t *sai_scheduler_api; +extern sai_scheduler_group_api_t *sai_scheduler_group_api; +extern sai_wred_api_t *sai_wred_api; extern sai_queue_api_t *sai_queue_api; extern sai_udf_api_t* sai_udf_api; +extern sai_mpls_api_t* sai_mpls_api; diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 853fdbfb69..28df6610fd 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -7,7 +7,9 @@ #include "mock_orchagent_main.h" #include "mock_table.h" #include "notifier.h" +#define private public #include "pfcactionhandler.h" +#undef private #include @@ -18,6 +20,105 @@ namespace portsorch_test using namespace std; + sai_queue_api_t ut_sai_queue_api; + sai_queue_api_t *pold_sai_queue_api; + sai_buffer_api_t ut_sai_buffer_api; + sai_buffer_api_t *pold_sai_buffer_api; + + string _ut_stub_queue_key; + sai_status_t _ut_stub_sai_get_queue_attribute( + _In_ sai_object_id_t queue_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) + { + if (attr_count == 1 && attr_list[0].id == SAI_QUEUE_ATTR_BUFFER_PROFILE_ID) + { + auto &typemapQueue = (*gBufferOrch->m_buffer_type_maps[APP_BUFFER_QUEUE_TABLE_NAME]); + auto &profileName = typemapQueue["Ethernet0:3-4"].m_objsReferencingByMe["profile"]; + auto profileNameVec = tokenize(profileName, ':'); + auto &typemapProfile = (*gBufferOrch->m_buffer_type_maps[APP_BUFFER_PROFILE_TABLE_NAME]); + attr_list[0].value.oid = typemapProfile[profileNameVec[1]].m_saiObjectId; + return SAI_STATUS_SUCCESS; + } + else + { + return pold_sai_queue_api->get_queue_attribute(queue_id, attr_count, attr_list); + } + } + + sai_status_t _ut_stub_sai_get_ingress_priority_group_attribute( + _In_ sai_object_id_t ingress_priority_group_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) + { + if (attr_count == 1 && attr_list[0].id == SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE) + { + auto &typemapPg = (*gBufferOrch->m_buffer_type_maps[APP_BUFFER_PG_TABLE_NAME]); + auto &profileName = typemapPg["Ethernet0:3-4"].m_objsReferencingByMe["profile"]; + auto profileNameVec = tokenize(profileName, ':'); + auto &typemapProfile = (*gBufferOrch->m_buffer_type_maps[APP_BUFFER_PROFILE_TABLE_NAME]); + attr_list[0].value.oid = typemapProfile[profileNameVec[1]].m_saiObjectId; + return SAI_STATUS_SUCCESS; + } + else + { + return pold_sai_buffer_api->get_ingress_priority_group_attribute(ingress_priority_group_id, attr_count, attr_list); + } + } + + int _sai_create_buffer_pool_count = 0; + sai_status_t _ut_stub_sai_create_buffer_pool( + _Out_ sai_object_id_t *buffer_pool_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + auto status = pold_sai_buffer_api->create_buffer_pool(buffer_pool_id, switch_id, attr_count, attr_list); + if (SAI_STATUS_SUCCESS == status) + _sai_create_buffer_pool_count++; + return status; + } + + int _sai_remove_buffer_pool_count = 0; + sai_status_t _ut_stub_sai_remove_buffer_pool( + _In_ sai_object_id_t buffer_pool_id) + { + auto status = pold_sai_buffer_api->remove_buffer_pool(buffer_pool_id); + if (SAI_STATUS_SUCCESS == status) + _sai_remove_buffer_pool_count++; + return status; + } + + void _hook_sai_buffer_and_queue_api() + { + ut_sai_buffer_api = *sai_buffer_api; + pold_sai_buffer_api = sai_buffer_api; + ut_sai_buffer_api.create_buffer_pool = _ut_stub_sai_create_buffer_pool; + ut_sai_buffer_api.remove_buffer_pool = _ut_stub_sai_remove_buffer_pool; + ut_sai_buffer_api.get_ingress_priority_group_attribute = _ut_stub_sai_get_ingress_priority_group_attribute; + sai_buffer_api = &ut_sai_buffer_api; + + ut_sai_queue_api = *sai_queue_api; + pold_sai_queue_api = sai_queue_api; + ut_sai_queue_api.get_queue_attribute = _ut_stub_sai_get_queue_attribute; + sai_queue_api = &ut_sai_queue_api; + } + + void _unhook_sai_buffer_and_queue_api() + { + sai_buffer_api = pold_sai_buffer_api; + sai_queue_api = pold_sai_queue_api; + } + + void clear_pfcwd_zero_buffer_handler() + { + auto &zeroProfile = PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance(); + zeroProfile.m_zeroIngressBufferPool = SAI_NULL_OBJECT_ID; + zeroProfile.m_zeroEgressBufferPool = SAI_NULL_OBJECT_ID; + zeroProfile.m_zeroIngressBufferProfile = SAI_NULL_OBJECT_ID; + zeroProfile.m_zeroEgressBufferProfile = SAI_NULL_OBJECT_ID; + } + struct PortsOrchTest : public ::testing::Test { shared_ptr m_app_db; @@ -103,6 +204,12 @@ namespace portsorch_test { ::testing_db::reset(); + auto buffer_maps = BufferOrch::m_buffer_type_maps; + for (auto &i : buffer_maps) + { + i.second->clear(); + } + delete gNeighOrch; gNeighOrch = nullptr; delete gFdbOrch; @@ -355,10 +462,12 @@ namespace portsorch_test TEST_F(PortsOrchTest, PfcZeroBufferHandlerLocksPortPgAndQueue) { + _hook_sai_buffer_and_queue_api(); Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); Table poolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table queueTable = Table(m_app_db.get(), APP_BUFFER_QUEUE_TABLE_NAME); // Get SAI default ports to populate DB auto ports = ut_helper::getInitialSaiPorts(); @@ -397,39 +506,71 @@ namespace portsorch_test Port port; gPortsOrch->getPort("Ethernet0", port); - auto countersTable = make_shared
(m_counters_db.get(), COUNTERS_TABLE); - auto dropHandler = make_unique(port.m_port_id, port.m_queue_ids[3], 3, countersTable); - // Create test buffer pool poolTable.set( - "test_pool", + "ingress_pool", { { "type", "ingress" }, { "mode", "dynamic" }, { "size", "4200000" }, }); + poolTable.set( + "egress_pool", + { + { "type", "egress" }, + { "mode", "dynamic" }, + { "size", "4200000" }, + }); // Create test buffer profile - profileTable.set("test_profile", { { "pool", "test_pool" }, + profileTable.set("test_profile", { { "pool", "ingress_pool" }, { "xon", "14832" }, { "xoff", "14832" }, { "size", "35000" }, { "dynamic_th", "0" } }); + profileTable.set("ingress_profile", { { "pool", "ingress_pool" }, + { "xon", "14832" }, + { "xoff", "14832" }, + { "size", "35000" }, + { "dynamic_th", "0" } }); + profileTable.set("egress_profile", { { "pool", "egress_pool" }, + { "size", "0" }, + { "dynamic_th", "0" } }); // Apply profile on PGs 3-4 all ports for (const auto &it : ports) { std::ostringstream oss; oss << it.first << ":3-4"; - pgTable.set(oss.str(), { { "profile", "test_profile" } }); + pgTable.set(oss.str(), { { "profile", "ingress_profile" } }); + queueTable.set(oss.str(), { {"profile", "egress_profile" } }); } gBufferOrch->addExistingData(&pgTable); gBufferOrch->addExistingData(&poolTable); gBufferOrch->addExistingData(&profileTable); + gBufferOrch->addExistingData(&queueTable); // process pool, profile and PGs static_cast(gBufferOrch)->doTask(); + auto countersTable = make_shared
(m_counters_db.get(), COUNTERS_TABLE); + auto current_create_buffer_pool_count = _sai_create_buffer_pool_count; + auto dropHandler = make_unique(port.m_port_id, port.m_queue_ids[3], 3, countersTable); + + current_create_buffer_pool_count += 2; + ASSERT_TRUE(current_create_buffer_pool_count == _sai_create_buffer_pool_count); + ASSERT_TRUE(PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance().getPool(true) == gBufferOrch->m_ingressZeroBufferPool); + ASSERT_TRUE(PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance().getPool(false) == gBufferOrch->m_egressZeroBufferPool); + ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 1); + ASSERT_TRUE(gBufferOrch->m_egressZeroPoolRefCount == 1); + + std::deque entries; + entries.push_back({"Ethernet0:3-4", "SET", {{ "profile", "test_profile"}}}); + auto pgConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); + pgConsumer->addToSync(entries); + entries.clear(); + static_cast(gBufferOrch)->doTask(); + // Port should have been updated by BufferOrch->doTask gPortsOrch->getPort("Ethernet0", port); auto profile_id = (*BufferOrch::m_buffer_type_maps["BUFFER_PROFILE_TABLE"])[string("test_profile")].m_saiObjectId; @@ -437,11 +578,32 @@ namespace portsorch_test ASSERT_TRUE(port.m_priority_group_pending_profile[3] == profile_id); ASSERT_TRUE(port.m_priority_group_pending_profile[4] == SAI_NULL_OBJECT_ID); - auto pgConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); + pgConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); pgConsumer->dumpPendingTasks(ts); ASSERT_TRUE(ts.empty()); // PG is stored in m_priority_group_pending_profile ts.clear(); + // Create a zero buffer pool after PFC storm + entries.push_back({"ingress_zero_pool", "SET", {{ "type", "ingress" }, + { "mode", "static" }, + { "size", "0" }}}); + auto poolConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_POOL_TABLE_NAME)); + poolConsumer->addToSync(entries); + entries.clear(); + static_cast(gBufferOrch)->doTask(); + // Reference increased + ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 2); + // Didn't create buffer pool again + ASSERT_TRUE(_sai_create_buffer_pool_count == current_create_buffer_pool_count); + + entries.push_back({"ingress_zero_pool", "DEL", {}}); + poolConsumer->addToSync(entries); + entries.clear(); + auto current_remove_buffer_pool_count = _sai_remove_buffer_pool_count; + static_cast(gBufferOrch)->doTask(); + ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 1); + ASSERT_TRUE(_sai_remove_buffer_pool_count == current_remove_buffer_pool_count); + // release zero buffer drop handler dropHandler.reset(); @@ -459,6 +621,139 @@ namespace portsorch_test pgConsumer->dumpPendingTasks(ts); ASSERT_TRUE(ts.empty()); // PG should be processed now ts.clear(); + clear_pfcwd_zero_buffer_handler(); + _unhook_sai_buffer_and_queue_api(); + } + + TEST_F(PortsOrchTest, PfcZeroBufferHandlerLocksPortWithZeroPoolCreated) + { + _hook_sai_buffer_and_queue_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); + Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); + Table poolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table queueTable = Table(m_app_db.get(), APP_BUFFER_QUEUE_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone, PortInitDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + + static_cast(gPortsOrch)->doTask(); + + // Apply configuration + // ports + static_cast(gPortsOrch)->doTask(); + + ASSERT_TRUE(gPortsOrch->allPortsReady()); + + // No more tasks + vector ts; + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + ts.clear(); + + // Simulate storm drop handler started on Ethernet0 TC 3 + Port port; + gPortsOrch->getPort("Ethernet0", port); + + // Create test buffer pool + poolTable.set("ingress_pool", + { + { "type", "ingress" }, + { "mode", "dynamic" }, + { "size", "4200000" }, + }); + poolTable.set("egress_pool", + { + { "type", "egress" }, + { "mode", "dynamic" }, + { "size", "4200000" }, + }); + poolTable.set("ingress_zero_pool", + { + { "type", "ingress" }, + { "mode", "static" }, + { "size", "0" } + }); + auto poolConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_POOL_TABLE_NAME)); + + // Create test buffer profile + profileTable.set("ingress_profile", { { "pool", "ingress_pool" }, + { "xon", "14832" }, + { "xoff", "14832" }, + { "size", "35000" }, + { "dynamic_th", "0" } }); + profileTable.set("egress_profile", { { "pool", "egress_pool" }, + { "size", "0" }, + { "dynamic_th", "0" } }); + + // Apply profile on PGs 3-4 all ports + for (const auto &it : ports) + { + std::ostringstream oss; + oss << it.first << ":3-4"; + pgTable.set(oss.str(), { { "profile", "ingress_profile" } }); + queueTable.set(oss.str(), { {"profile", "egress_profile" } }); + } + + gBufferOrch->addExistingData(&poolTable); + gBufferOrch->addExistingData(&profileTable); + gBufferOrch->addExistingData(&pgTable); + gBufferOrch->addExistingData(&queueTable); + + auto current_create_buffer_pool_count = _sai_create_buffer_pool_count + 3; // call SAI API create_buffer_pool for each pool + ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 0); + ASSERT_TRUE(gBufferOrch->m_egressZeroPoolRefCount == 0); + ASSERT_TRUE(gBufferOrch->m_ingressZeroBufferPool == SAI_NULL_OBJECT_ID); + ASSERT_TRUE(gBufferOrch->m_egressZeroBufferPool == SAI_NULL_OBJECT_ID); + + // process pool, profile and PGs + static_cast(gBufferOrch)->doTask(); + + ASSERT_TRUE(current_create_buffer_pool_count == _sai_create_buffer_pool_count); + ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 1); + ASSERT_TRUE(gBufferOrch->m_egressZeroPoolRefCount == 0); + ASSERT_TRUE(gBufferOrch->m_ingressZeroBufferPool != SAI_NULL_OBJECT_ID); + ASSERT_TRUE(gBufferOrch->m_egressZeroBufferPool == SAI_NULL_OBJECT_ID); + + auto countersTable = make_shared
(m_counters_db.get(), COUNTERS_TABLE); + auto dropHandler = make_unique(port.m_port_id, port.m_queue_ids[3], 3, countersTable); + + current_create_buffer_pool_count++; // Increased for egress zero pool + ASSERT_TRUE(current_create_buffer_pool_count == _sai_create_buffer_pool_count); + ASSERT_TRUE(PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance().getPool(true) == gBufferOrch->m_ingressZeroBufferPool); + ASSERT_TRUE(PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance().getPool(false) == gBufferOrch->m_egressZeroBufferPool); + ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 2); + ASSERT_TRUE(gBufferOrch->m_egressZeroPoolRefCount == 1); + + std::deque entries; + entries.push_back({"ingress_zero_pool", "DEL", {}}); + poolConsumer->addToSync(entries); + entries.clear(); + auto current_remove_buffer_pool_count = _sai_remove_buffer_pool_count; + static_cast(gBufferOrch)->doTask(); + ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 1); + ASSERT_TRUE(_sai_remove_buffer_pool_count == current_remove_buffer_pool_count); + + // release zero buffer drop handler + dropHandler.reset(); + clear_pfcwd_zero_buffer_handler(); + _unhook_sai_buffer_and_queue_api(); } /* This test checks that a LAG member validation happens on orchagent level diff --git a/tests/mock_tests/qosorch_ut.cpp b/tests/mock_tests/qosorch_ut.cpp new file mode 100644 index 0000000000..a77d19b38b --- /dev/null +++ b/tests/mock_tests/qosorch_ut.cpp @@ -0,0 +1,789 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" + +extern string gMySwitchType; + + +namespace qosorch_test +{ + using namespace std; + + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_chassis_app_db; + + int sai_remove_qos_map_count; + int sai_remove_wred_profile_count; + int sai_remove_scheduler_count; + sai_object_id_t switch_dscp_to_tc_map_id; + + sai_remove_scheduler_fn old_remove_scheduler; + sai_scheduler_api_t ut_sai_scheduler_api, *pold_sai_scheduler_api; + sai_remove_wred_fn old_remove_wred; + sai_wred_api_t ut_sai_wred_api, *pold_sai_wred_api; + sai_remove_qos_map_fn old_remove_qos_map; + sai_qos_map_api_t ut_sai_qos_map_api, *pold_sai_qos_map_api; + sai_set_switch_attribute_fn old_set_switch_attribute_fn; + sai_switch_api_t ut_sai_switch_api, *pold_sai_switch_api; + + sai_status_t _ut_stub_sai_set_switch_attribute(sai_object_id_t switch_id, const sai_attribute_t *attr) + { + auto rc = old_set_switch_attribute_fn(switch_id, attr); + if (rc == SAI_STATUS_SUCCESS && attr->id == SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP) + switch_dscp_to_tc_map_id = attr->value.oid; + return rc; + } + + sai_status_t _ut_stub_sai_remove_qos_map(sai_object_id_t qos_map_id) + { + auto rc = old_remove_qos_map(qos_map_id); + if (rc == SAI_STATUS_SUCCESS) + sai_remove_qos_map_count++; + return rc; + } + + sai_status_t _ut_stub_sai_remove_wred(sai_object_id_t wred_id) + { + auto rc = old_remove_wred(wred_id); + if (rc == SAI_STATUS_SUCCESS) + sai_remove_wred_profile_count++; + return rc; + } + + sai_status_t _ut_stub_sai_remove_scheduler(sai_object_id_t scheduler_id) + { + auto rc = old_remove_scheduler(scheduler_id); + if (rc == SAI_STATUS_SUCCESS) + sai_remove_scheduler_count++; + return rc; + } + + struct QosOrchTest : public ::testing::Test + { + QosOrchTest() + { + } + + void CheckDependency(const string &referencingTableName, const string &referencingObjectName, const string &field, const string &dependentTableName, const string &dependentObjectName="") + { + auto &qosTypeMaps = QosOrch::getTypeMap(); + auto &referencingTable = (*qosTypeMaps[referencingTableName]); + auto &dependentTable = (*qosTypeMaps[dependentTableName]); + + if (dependentObjectName.empty()) + { + ASSERT_TRUE(referencingTable[referencingObjectName].m_objsReferencingByMe[field].empty()); + ASSERT_EQ(dependentTable[dependentObjectName].m_objsDependingOnMe.count(referencingObjectName), 0); + } + else + { + ASSERT_EQ(referencingTable[referencingObjectName].m_objsReferencingByMe[field], dependentTableName + ":" + dependentObjectName); + ASSERT_EQ(dependentTable[dependentObjectName].m_objsDependingOnMe.count(referencingObjectName), 1); + } + } + + void RemoveItem(const string &table, const string &key) + { + std::deque entries; + entries.push_back({key, "DEL", {}}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(table)); + consumer->addToSync(entries); + } + + template void ReplaceSaiRemoveApi(sai_api_t* &sai_api, + sai_api_t &ut_sai_api, + sai_api_t* &pold_sai_api, + sai_remove_func ut_remove, + sai_remove_func &sai_remove, + sai_remove_func &old_remove, + sai_remove_func &put_remove) + { + old_remove = sai_remove; + pold_sai_api = sai_api; + ut_sai_api = *pold_sai_api; + sai_api = &ut_sai_api; + put_remove = ut_remove; + } + + void SetUp() override + { + ASSERT_EQ(sai_route_api, nullptr); + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + + // Hack SAI APIs + ReplaceSaiRemoveApi(sai_qos_map_api, ut_sai_qos_map_api, pold_sai_qos_map_api, + _ut_stub_sai_remove_qos_map, sai_qos_map_api->remove_qos_map, + old_remove_qos_map, ut_sai_qos_map_api.remove_qos_map); + ReplaceSaiRemoveApi(sai_scheduler_api, ut_sai_scheduler_api, pold_sai_scheduler_api, + _ut_stub_sai_remove_scheduler, sai_scheduler_api->remove_scheduler, + old_remove_scheduler, ut_sai_scheduler_api.remove_scheduler); + ReplaceSaiRemoveApi(sai_wred_api, ut_sai_wred_api, pold_sai_wred_api, + _ut_stub_sai_remove_wred, sai_wred_api->remove_wred, + old_remove_wred, ut_sai_wred_api.remove_wred); + pold_sai_switch_api = sai_switch_api; + ut_sai_switch_api = *pold_sai_switch_api; + old_set_switch_attribute_fn = pold_sai_switch_api->set_switch_attribute; + sai_switch_api = &ut_sai_switch_api; + ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + + // Init switch and create dependencies + m_app_db = make_shared("APPL_DB", 0); + m_config_db = make_shared("CONFIG_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + if(gMySwitchType == "voq") + m_chassis_app_db = make_shared("CHASSIS_APP_DB", 0); + + sai_attribute_t attr; + + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + auto status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + // Get the default virtual router ID + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + + ASSERT_EQ(gCrmOrch, nullptr); + gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + + // Create dependencies ... + + const int portsorch_base_pri = 40; + + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + vector flex_counter_tables = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); + gDirectory.set(flexCounterOrch); + + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + + ASSERT_EQ(gVrfOrch, nullptr); + gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); + + ASSERT_EQ(gIntfsOrch, nullptr); + gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); + + const int fdborch_pri = 20; + + vector app_fdb_tables = { + { APP_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_VXLAN_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_MCLAG_FDB_TABLE_NAME, fdborch_pri} + }; + + TableConnector stateDbFdb(m_state_db.get(), STATE_FDB_TABLE_NAME); + TableConnector stateMclagDbFdb(m_state_db.get(), STATE_MCLAG_REMOTE_FDB_TABLE_NAME); + ASSERT_EQ(gFdbOrch, nullptr); + gFdbOrch = new FdbOrch(m_app_db.get(), app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); + + ASSERT_EQ(gNeighOrch, nullptr); + gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + + vector qos_tables = { + CFG_TC_TO_QUEUE_MAP_TABLE_NAME, + CFG_SCHEDULER_TABLE_NAME, + CFG_DSCP_TO_TC_MAP_TABLE_NAME, + CFG_MPLS_TC_TO_TC_MAP_TABLE_NAME, + CFG_DOT1P_TO_TC_MAP_TABLE_NAME, + CFG_QUEUE_TABLE_NAME, + CFG_PORT_QOS_MAP_TABLE_NAME, + CFG_WRED_PROFILE_TABLE_NAME, + CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, + CFG_DSCP_TO_FC_MAP_TABLE_NAME, + CFG_EXP_TO_FC_MAP_TABLE_NAME + }; + gQosOrch = new QosOrch(m_config_db.get(), qos_tables); + + // Recreate buffer orch to read populated data + vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME }; + + gBufferOrch = new BufferOrch(m_app_db.get(), m_config_db.get(), m_state_db.get(), buffer_tables); + + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate pot table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + Table tcToQueueMapTable = Table(m_config_db.get(), CFG_TC_TO_QUEUE_MAP_TABLE_NAME); + Table scheduleTable = Table(m_config_db.get(), CFG_SCHEDULER_TABLE_NAME); + Table dscpToTcMapTable = Table(m_config_db.get(), CFG_DSCP_TO_TC_MAP_TABLE_NAME); + Table dot1pToTcMapTable = Table(m_config_db.get(), CFG_DOT1P_TO_TC_MAP_TABLE_NAME); + Table queueTable = Table(m_config_db.get(), CFG_QUEUE_TABLE_NAME); + Table portQosMapTable = Table(m_config_db.get(), CFG_PORT_QOS_MAP_TABLE_NAME); + Table wredProfileTable = Table(m_config_db.get(), CFG_WRED_PROFILE_TABLE_NAME); + Table tcToPgMapTable = Table(m_config_db.get(), CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME); + Table pfcPriorityToPgMapTable = Table(m_config_db.get(), CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME); + Table pfcPriorityToQueueMapTable = Table(m_config_db.get(), CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME); + Table dscpToFcMapTable = Table(m_config_db.get(), CFG_DSCP_TO_FC_MAP_TABLE_NAME); + Table expToFcMapTable = Table(m_config_db.get(), CFG_EXP_TO_FC_MAP_TABLE_NAME); + + scheduleTable.set("scheduler.1", + { + {"type", "DWRR"}, + {"weight", "15"} + }); + + scheduleTable.set("scheduler.0", + { + {"type", "DWRR"}, + {"weight", "14"} + }); + + wredProfileTable.set("AZURE_LOSSLESS", + { + {"ecn", "ecn_all"}, + {"green_drop_probability", "5"}, + {"green_max_threshold", "2097152"}, + {"green_min_threshold", "1048576"}, + {"wred_green_enable", "true"}, + {"yellow_drop_probability", "5"}, + {"yellow_max_threshold", "2097152"}, + {"yellow_min_threshold", "1048576"}, + {"wred_yellow_enable", "true"}, + {"red_drop_probability", "5"}, + {"red_max_threshold", "2097152"}, + {"red_min_threshold", "1048576"}, + {"wred_red_enable", "true"} + }); + + tcToQueueMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + dscpToTcMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + tcToPgMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + dot1pToTcMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + pfcPriorityToPgMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + pfcPriorityToQueueMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + dot1pToTcMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + gQosOrch->addExistingData(&tcToQueueMapTable); + gQosOrch->addExistingData(&dscpToTcMapTable); + gQosOrch->addExistingData(&tcToPgMapTable); + gQosOrch->addExistingData(&pfcPriorityToPgMapTable); + gQosOrch->addExistingData(&pfcPriorityToQueueMapTable); + gQosOrch->addExistingData(&scheduleTable); + gQosOrch->addExistingData(&wredProfileTable); + + static_cast(gQosOrch)->doTask(); + } + + void TearDown() override + { + auto qos_maps = QosOrch::getTypeMap(); + for (auto &i : qos_maps) + { + i.second->clear(); + } + + gDirectory.m_values.clear(); + + delete gCrmOrch; + gCrmOrch = nullptr; + + delete gSwitchOrch; + gSwitchOrch = nullptr; + + delete gVrfOrch; + gVrfOrch = nullptr; + + delete gIntfsOrch; + gIntfsOrch = nullptr; + + delete gNeighOrch; + gNeighOrch = nullptr; + + delete gFdbOrch; + gFdbOrch = nullptr; + + delete gPortsOrch; + gPortsOrch = nullptr; + + delete gQosOrch; + gQosOrch = nullptr; + + sai_qos_map_api = pold_sai_qos_map_api; + sai_scheduler_api = pold_sai_scheduler_api; + sai_wred_api = pold_sai_wred_api; + sai_switch_api = pold_sai_switch_api; + ut_helper::uninitSaiApi(); + } + }; + + TEST_F(QosOrchTest, QosOrchTestPortQosMapRemoveOneField) + { + Table portQosMapTable = Table(m_config_db.get(), CFG_PORT_QOS_MAP_TABLE_NAME); + + portQosMapTable.set("Ethernet0", + { + {"dscp_to_tc_map", "AZURE"}, + {"pfc_to_pg_map", "AZURE"}, + {"pfc_to_queue_map", "AZURE"}, + {"tc_to_pg_map", "AZURE"}, + {"tc_to_queue_map", "AZURE"}, + {"pfc_enable", "3,4"} + }); + gQosOrch->addExistingData(&portQosMapTable); + static_cast(gQosOrch)->doTask(); + + // Check whether the dependencies have been recorded + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_pg_map", CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_queue_map", CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_pg_map", CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_queue_map", CFG_TC_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + + // Try removing AZURE from DSCP_TO_TC_MAP while it is still referenced + RemoveItem(CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + auto current_sai_remove_qos_map_count = sai_remove_qos_map_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(current_sai_remove_qos_map_count, sai_remove_qos_map_count); + // Dependency is not cleared + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + + // Remove dscp_to_tc_map from Ethernet0 via resetting the entry with field dscp_to_tc_map removed + std::deque entries; + entries.push_back({"Ethernet0", "SET", + { + {"pfc_to_pg_map", "AZURE"}, + {"pfc_to_queue_map", "AZURE"}, + {"tc_to_pg_map", "AZURE"}, + {"tc_to_queue_map", "AZURE"}, + {"pfc_enable", "3,4"} + }}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(current_sai_remove_qos_map_count + 1, sai_remove_qos_map_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]).count("AZURE"), 0); + // Dependency of dscp_to_tc_map should be cleared + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME); + // Dependencies of other items are not touched + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_pg_map", CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_queue_map", CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_pg_map", CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_queue_map", CFG_TC_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + } + + TEST_F(QosOrchTest, QosOrchTestQueueRemoveWredProfile) + { + std::deque entries; + Table queueTable = Table(m_config_db.get(), CFG_QUEUE_TABLE_NAME); + + queueTable.set("Ethernet0|3", + { + {"scheduler", "scheduler.1"}, + {"wred_profile", "AZURE_LOSSLESS"} + }); + gQosOrch->addExistingData(&queueTable); + static_cast(gQosOrch)->doTask(); + + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + + // Try removing scheduler from WRED_PROFILE table while it is still referenced + RemoveItem(CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + auto current_sai_remove_wred_profile_count = sai_remove_wred_profile_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(current_sai_remove_wred_profile_count, sai_remove_wred_profile_count); + // Make sure the dependency is untouched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + + // Remove wred_profile from Ethernet0 queue 3 + entries.push_back({"Ethernet0|3", "SET", + { + {"scheduler", "scheduler.1"} + }}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_QUEUE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain QUEUE table + static_cast(gQosOrch)->doTask(); + // Drain WRED_PROFILE table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency is cleared + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME); + // And the sai remove API has been called + ASSERT_EQ(current_sai_remove_wred_profile_count + 1, sai_remove_wred_profile_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_WRED_PROFILE_TABLE_NAME]).count("AZURE_LOSSLESS"), 0); + // Other field should be untouched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + } + + TEST_F(QosOrchTest, QosOrchTestQueueRemoveScheduler) + { + std::deque entries; + Table queueTable = Table(m_config_db.get(), CFG_QUEUE_TABLE_NAME); + + queueTable.set("Ethernet0|3", + { + {"scheduler", "scheduler.1"}, + {"wred_profile", "AZURE_LOSSLESS"} + }); + gQosOrch->addExistingData(&queueTable); + static_cast(gQosOrch)->doTask(); + + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + + // Try removing scheduler from QUEUE table while it is still referenced + RemoveItem(CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + auto current_sai_remove_scheduler_count = sai_remove_scheduler_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(current_sai_remove_scheduler_count, sai_remove_scheduler_count); + // Make sure the dependency is untouched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + + // Remove scheduler from Ethernet0 queue 3 + entries.push_back({"Ethernet0|3", "SET", + { + {"wred_profile", "AZURE_LOSSLESS"} + }}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_QUEUE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain QUEUE table + static_cast(gQosOrch)->doTask(); + // Drain SCHEDULER table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency is cleared + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME); + // And the sai remove API has been called + ASSERT_EQ(current_sai_remove_scheduler_count + 1, sai_remove_scheduler_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_SCHEDULER_TABLE_NAME]).count("scheduler.1"), 0); + // Other field should be untouched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + } + + TEST_F(QosOrchTest, QosOrchTestQueueReplaceFieldAndRemoveObject) + { + std::deque entries; + Table queueTable = Table(m_config_db.get(), CFG_QUEUE_TABLE_NAME); + auto queueConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_QUEUE_TABLE_NAME)); + auto wredProfileConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_WRED_PROFILE_TABLE_NAME)); + auto schedulerConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_SCHEDULER_TABLE_NAME)); + + queueTable.set("Ethernet0|3", + { + {"scheduler", "scheduler.1"}, + {"wred_profile", "AZURE_LOSSLESS"} + }); + gQosOrch->addExistingData(&queueTable); + static_cast(gQosOrch)->doTask(); + + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + + // Try replacing scheduler in QUEUE table: scheduler.1 => scheduler.0 + entries.push_back({"Ethernet0|3", "SET", + { + {"scheduler", "scheduler.0"}, + {"wred_profile", "AZURE_LOSSLESS"} + }}); + queueConsumer->addToSync(entries); + entries.clear(); + // Drain QUEUE table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency is updated + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0"); + // And the other field is not touched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + + RemoveItem(CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + auto current_sai_remove_scheduler_count = sai_remove_scheduler_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_scheduler_count, sai_remove_scheduler_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_SCHEDULER_TABLE_NAME]).count("scheduler.1"), 0); + + entries.push_back({"AZURE_LOSSLESS_1", "SET", + { + {"ecn", "ecn_all"}, + {"green_drop_probability", "5"}, + {"green_max_threshold", "2097152"}, + {"green_min_threshold", "1048576"}, + {"wred_green_enable", "true"}, + {"yellow_drop_probability", "5"}, + {"yellow_max_threshold", "2097152"}, + {"yellow_min_threshold", "1048576"}, + {"wred_yellow_enable", "true"}, + {"red_drop_probability", "5"}, + {"red_max_threshold", "2097152"}, + {"red_min_threshold", "1048576"}, + {"wred_red_enable", "true"} + }}); + wredProfileConsumer->addToSync(entries); + entries.clear(); + // Drain WRED_PROFILE table + static_cast(gQosOrch)->doTask(); + + // Replace wred_profile from Ethernet0 queue 3 + entries.push_back({"Ethernet0|3", "SET", + { + {"scheduler", "scheduler.0"}, + {"wred_profile", "AZURE_LOSSLESS_1"} + }}); + queueConsumer->addToSync(entries); + entries.clear(); + // Drain QUEUE table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency is updated + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS_1"); + // And the other field is not touched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0"); + + RemoveItem(CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + // Drain WRED_PROFILE table + auto current_sai_remove_wred_profile_count = sai_remove_wred_profile_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_wred_profile_count, sai_remove_wred_profile_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_WRED_PROFILE_TABLE_NAME]).count("AZURE_LOSSLESS"), 0); + + // Remove object + entries.push_back({"Ethernet0|3", "DEL", {}}); + queueConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + + // Make sure the dependency is updated + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME); + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME); + + // Remove scheduler object + entries.push_back({"scheduler.0", "DEL", {}}); + schedulerConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_scheduler_count, sai_remove_scheduler_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_SCHEDULER_TABLE_NAME]).count("scheduler.0"), 0); + + // Remove wred profile object + entries.push_back({"AZURE_LOSSLESS_1", "DEL", {}}); + wredProfileConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_wred_profile_count, sai_remove_wred_profile_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_WRED_PROFILE_TABLE_NAME]).count("AZURE_LOSSLESS_1"), 0); + } + + TEST_F(QosOrchTest, QosOrchTestPortQosMapReplaceOneFieldAndRemoveObject) + { + std::deque entries; + Table portQosMapTable = Table(m_config_db.get(), CFG_PORT_QOS_MAP_TABLE_NAME); + + portQosMapTable.set("Ethernet0", + { + {"dscp_to_tc_map", "AZURE"}, + {"pfc_to_pg_map", "AZURE"}, + {"pfc_to_queue_map", "AZURE"}, + {"tc_to_pg_map", "AZURE"}, + {"tc_to_queue_map", "AZURE"}, + {"pfc_enable", "3,4"} + }); + + static_cast(gQosOrch)->doTask(); + + entries.push_back({"AZURE_1", "SET", + { + {"1", "0"}, + {"0", "1"} + }}); + + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + + entries.push_back({"Ethernet0", "SET", + { + {"dscp_to_tc_map", "AZURE_1"}, + {"pfc_to_pg_map", "AZURE"}, + {"pfc_to_queue_map", "AZURE"}, + {"tc_to_pg_map", "AZURE"}, + {"tc_to_queue_map", "AZURE"}, + {"pfc_enable", "3,4"} + }}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + // Dependency is updated + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE_1"); + + // Try removing AZURE from DSCP_TO_TC_MAP + RemoveItem(CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + auto current_sai_remove_qos_map_count = sai_remove_qos_map_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_qos_map_count, sai_remove_qos_map_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]).count("AZURE"), 0); + // Global dscp to tc map should not be cleared + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE_1"].m_saiObjectId, switch_dscp_to_tc_map_id); + + // Make sure other dependencies are not touched + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_pg_map", CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_queue_map", CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_pg_map", CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_queue_map", CFG_TC_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + + // Remove port from PORT_QOS_MAP table + entries.push_back({"Ethernet0", "DEL", {}}); + consumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_PORT_QOS_MAP_TABLE_NAME]).count("Ethernet0"), 0); + + // Make sure the maps can be removed now. Checking anyone should suffice since all the maps are handled in the same way. + entries.push_back({"AZURE", "DEL", {}}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_qos_map_count, sai_remove_qos_map_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME]).count("AZURE"), 0); + + entries.push_back({"AZURE_1", "DEL", {}}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_qos_map_count, sai_remove_qos_map_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]).count("AZURE_1"), 0); + // Global dscp to tc map should be cleared + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE_1"].m_saiObjectId, SAI_NULL_OBJECT_ID); + } + + TEST_F(QosOrchTest, QosOrchTestGlobalDscpToTcMap) + { + // Make sure dscp to tc map is correct + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); + + // Create a new dscp to tc map + std::deque entries; + entries.push_back({"AZURE_1", "SET", + { + {"1", "0"}, + {"0", "1"} + }}); + + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE_1"].m_saiObjectId, switch_dscp_to_tc_map_id); + + entries.push_back({"AZURE_1", "DEL", {}}); + consumer->addToSync(entries); + entries.clear(); + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); + } +} diff --git a/tests/mock_tests/routeorch_ut.cpp b/tests/mock_tests/routeorch_ut.cpp new file mode 100644 index 0000000000..84f92a088c --- /dev/null +++ b/tests/mock_tests/routeorch_ut.cpp @@ -0,0 +1,419 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" +#include "bulker.h" + +extern string gMySwitchType; + + +namespace routeorch_test +{ + using namespace std; + + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_chassis_app_db; + + int create_route_count; + int set_route_count; + int remove_route_count; + int sai_fail_count; + + sai_route_api_t ut_sai_route_api; + sai_route_api_t *pold_sai_route_api; + + sai_bulk_create_route_entry_fn old_create_route_entries; + sai_bulk_remove_route_entry_fn old_remove_route_entries; + sai_bulk_set_route_entry_attribute_fn old_set_route_entries_attribute; + + sai_status_t _ut_stub_sai_bulk_create_route_entry( + _In_ uint32_t object_count, + _In_ const sai_route_entry_t *route_entry, + _In_ const uint32_t *attr_count, + _In_ const sai_attribute_t **attr_list, + _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses) + { + create_route_count++; + return old_create_route_entries(object_count, route_entry, attr_count, attr_list, mode, object_statuses); + } + + sai_status_t _ut_stub_sai_bulk_remove_route_entry( + _In_ uint32_t object_count, + _In_ const sai_route_entry_t *route_entry, + _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses) + { + remove_route_count++; + return old_remove_route_entries(object_count, route_entry, mode, object_statuses); + } + + sai_status_t _ut_stub_sai_bulk_set_route_entry_attribute( + _In_ uint32_t object_count, + _In_ const sai_route_entry_t *route_entry, + _In_ const sai_attribute_t *attr_list, + _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses) + { + set_route_count++; + + // Make sure there is not conflict settings + bool drop = false; + bool valid_nexthop = false; + for (uint32_t i = 0; i < object_count; i++) + { + if (route_entry[i].destination.mask.ip4 == 0) + { + if (attr_list[i].id == SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION) + { + drop = (attr_list[i].value.s32 == SAI_PACKET_ACTION_DROP); + } + else if (attr_list[i].id == SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID) + { + valid_nexthop = (attr_list[i].value.oid != SAI_NULL_OBJECT_ID); + } + } + } + + // Drop and a valid nexthop can not be provided for the same prefix + if (drop && valid_nexthop) + sai_fail_count++; + + return old_set_route_entries_attribute(object_count, route_entry, attr_list, mode, object_statuses); + } + + struct RouteOrchTest : public ::testing::Test + { + RouteOrchTest() + { + } + + void SetUp() override + { + ASSERT_EQ(sai_route_api, nullptr); + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + + // Hack the route create function + old_create_route_entries = sai_route_api->create_route_entries; + old_remove_route_entries = sai_route_api->remove_route_entries; + old_set_route_entries_attribute = sai_route_api->set_route_entries_attribute; + + pold_sai_route_api = sai_route_api; + ut_sai_route_api = *sai_route_api; + sai_route_api = &ut_sai_route_api; + + sai_route_api->create_route_entries = _ut_stub_sai_bulk_create_route_entry; + sai_route_api->remove_route_entries = _ut_stub_sai_bulk_remove_route_entry; + sai_route_api->set_route_entries_attribute = _ut_stub_sai_bulk_set_route_entry_attribute; + + // Init switch and create dependencies + m_app_db = make_shared("APPL_DB", 0); + m_config_db = make_shared("CONFIG_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + if(gMySwitchType == "voq") + m_chassis_app_db = make_shared("CHASSIS_APP_DB", 0); + + sai_attribute_t attr; + + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + auto status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + // Get the default virtual router ID + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + + ASSERT_EQ(gCrmOrch, nullptr); + gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + + // Create dependencies ... + + const int portsorch_base_pri = 40; + + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + vector flex_counter_tables = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); + gDirectory.set(flexCounterOrch); + + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + + ASSERT_EQ(gVrfOrch, nullptr); + gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); + + ASSERT_EQ(gIntfsOrch, nullptr); + gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); + + const int fdborch_pri = 20; + + vector app_fdb_tables = { + { APP_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_VXLAN_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_MCLAG_FDB_TABLE_NAME, fdborch_pri} + }; + + TableConnector stateDbFdb(m_state_db.get(), STATE_FDB_TABLE_NAME); + TableConnector stateMclagDbFdb(m_state_db.get(), STATE_MCLAG_REMOTE_FDB_TABLE_NAME); + ASSERT_EQ(gFdbOrch, nullptr); + gFdbOrch = new FdbOrch(m_app_db.get(), app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); + + ASSERT_EQ(gNeighOrch, nullptr); + gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + + TunnelDecapOrch *tunnel_decap_orch = new TunnelDecapOrch(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + vector mux_tables = { + CFG_MUX_CABLE_TABLE_NAME, + CFG_PEER_SWITCH_TABLE_NAME + }; + MuxOrch *mux_orch = new MuxOrch(m_config_db.get(), mux_tables, tunnel_decap_orch, gNeighOrch, gFdbOrch); + gDirectory.set(mux_orch); + + ASSERT_EQ(gFgNhgOrch, nullptr); + const int fgnhgorch_pri = 15; + + vector fgnhg_tables = { + { CFG_FG_NHG, fgnhgorch_pri }, + { CFG_FG_NHG_PREFIX, fgnhgorch_pri }, + { CFG_FG_NHG_MEMBER, fgnhgorch_pri } + }; + gFgNhgOrch = new FgNhgOrch(m_config_db.get(), m_app_db.get(), m_state_db.get(), fgnhg_tables, gNeighOrch, gIntfsOrch, gVrfOrch); + + ASSERT_EQ(gSrv6Orch, nullptr); + vector srv6_tables = { + APP_SRV6_SID_LIST_TABLE_NAME, + APP_SRV6_MY_SID_TABLE_NAME + }; + gSrv6Orch = new Srv6Orch(m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); + + ASSERT_EQ(gRouteOrch, nullptr); + const int routeorch_pri = 5; + vector route_tables = { + { APP_ROUTE_TABLE_NAME, routeorch_pri }, + { APP_LABEL_ROUTE_TABLE_NAME, routeorch_pri } + }; + gRouteOrch = new RouteOrch(m_app_db.get(), route_tables, gSwitchOrch, gNeighOrch, gIntfsOrch, gVrfOrch, gFgNhgOrch, gSrv6Orch); + gNhgOrch = new NhgOrch(m_app_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + + // Recreate buffer orch to read populated data + vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME }; + + gBufferOrch = new BufferOrch(m_app_db.get(), m_config_db.get(), m_state_db.get(), buffer_tables); + + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate pot table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + Table intfTable = Table(m_app_db.get(), APP_INTF_TABLE_NAME); + intfTable.set("Ethernet0", { {"NULL", "NULL" }, + {"mac_addr", "00:00:00:00:00:00" }}); + intfTable.set("Ethernet0:10.0.0.1/24", { { "scope", "global" }, + { "family", "IPv4" }}); + gIntfsOrch->addExistingData(&intfTable); + static_cast(gIntfsOrch)->doTask(); + + Table neighborTable = Table(m_app_db.get(), APP_NEIGH_TABLE_NAME); + + map neighborIp2Mac = {{"10.0.0.2", "00:00:0a:00:00:02" }, + {"10.0.0.3", "00:00:0a:00:00:03" } }; + neighborTable.set("Ethernet0:10.0.0.2", { {"neigh", neighborIp2Mac["10.0.0.2"]}, + {"family", "IPv4" }}); + neighborTable.set("Ethernet0:10.0.0.3", { {"neigh", neighborIp2Mac["10.0.0.3"]}, + {"family", "IPv4" }}); + gNeighOrch->addExistingData(&neighborTable); + static_cast(gNeighOrch)->doTask(); + + Table routeTable = Table(m_app_db.get(), APP_ROUTE_TABLE_NAME); + routeTable.set("1.1.1.0/24", { {"ifname", "Ethernet0" }, + {"nexthop", "10.0.0.2" }}); + routeTable.set("0.0.0.0/0", { {"ifname", "Ethernet0" }, + {"nexthop", "10.0.0.2" }}); + gRouteOrch->addExistingData(&routeTable); + static_cast(gRouteOrch)->doTask(); + } + + void TearDown() override + { + gDirectory.m_values.clear(); + + delete gCrmOrch; + gCrmOrch = nullptr; + + delete gSwitchOrch; + gSwitchOrch = nullptr; + + delete gVrfOrch; + gVrfOrch = nullptr; + + delete gIntfsOrch; + gIntfsOrch = nullptr; + + delete gNeighOrch; + gNeighOrch = nullptr; + + delete gFdbOrch; + gFdbOrch = nullptr; + + delete gFgNhgOrch; + gFgNhgOrch = nullptr; + + delete gSrv6Orch; + gSrv6Orch = nullptr; + + delete gRouteOrch; + gRouteOrch = nullptr; + + delete gPortsOrch; + gPortsOrch = nullptr; + + sai_route_api = pold_sai_route_api; + ut_helper::uninitSaiApi(); + } + }; + + TEST_F(RouteOrchTest, RouteOrchTestDelSetSameNexthop) + { + std::deque entries; + + // Setting route with same next hop but after a DEL in the same bulk + entries.push_back({"1.1.1.0/24", "DEL", { {} }}); + entries.push_back({"1.1.1.0/24", "SET", { {"ifname", "Ethernet0"}, + {"nexthop", "10.0.0.2"}}}); + auto consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + auto current_create_count = create_route_count; + auto current_remove_count = remove_route_count; + auto current_set_count = set_route_count; + + static_cast(gRouteOrch)->doTask(); + // Make sure both create and set has been called + ASSERT_EQ(current_create_count + 1, create_route_count); + ASSERT_EQ(current_remove_count + 1, remove_route_count); + ASSERT_EQ(current_set_count, set_route_count); + + entries.clear(); + + // Make sure SAI API won't be called if setting it for second time with the same next hop + entries.push_back({"1.1.1.0/24", "SET", { {"ifname", "Ethernet0"}, + {"nexthop", "10.0.0.2"}}}); + consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + current_create_count = create_route_count; + current_remove_count = remove_route_count; + current_set_count = set_route_count; + + static_cast(gRouteOrch)->doTask(); + // Make sure both create and set has been called + ASSERT_EQ(current_create_count, create_route_count); + ASSERT_EQ(current_remove_count, remove_route_count); + ASSERT_EQ(current_set_count, set_route_count); + } + + TEST_F(RouteOrchTest, RouteOrchTestDelSetDiffNexthop) + { + std::deque entries; + entries.push_back({"1.1.1.0/24", "DEL", { {} }}); + entries.push_back({"1.1.1.0/24", "SET", { {"ifname", "Ethernet0"}, + {"nexthop", "10.0.0.3"}}}); + + auto consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + auto current_create_count = create_route_count; + auto current_remove_count = remove_route_count; + auto current_set_count = set_route_count; + + static_cast(gRouteOrch)->doTask(); + // Make sure both create and remove has been called + ASSERT_EQ(current_create_count + 1, create_route_count); + ASSERT_EQ(current_remove_count + 1, remove_route_count); + ASSERT_EQ(current_set_count, set_route_count); + } + + TEST_F(RouteOrchTest, RouteOrchTestDelSetDefaultRoute) + { + std::deque entries; + entries.push_back({"0.0.0.0/0", "DEL", { {} }}); + entries.push_back({"0.0.0.0/0", "SET", { {"ifname", "Ethernet0"}, + {"nexthop", "10.0.0.3"}}}); + + auto consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + auto current_create_count = create_route_count; + auto current_remove_count = remove_route_count; + auto current_set_count = set_route_count; + + static_cast(gRouteOrch)->doTask(); + // Make sure both create and set has been called + ASSERT_EQ(current_create_count, create_route_count); + ASSERT_EQ(current_remove_count, remove_route_count); + ASSERT_EQ(current_set_count + 1, set_route_count); + ASSERT_EQ(sai_fail_count, 0); + } +} diff --git a/tests/mock_tests/ut_saihelper.cpp b/tests/mock_tests/ut_saihelper.cpp index 34b76e7e5a..70eb96c99f 100644 --- a/tests/mock_tests/ut_saihelper.cpp +++ b/tests/mock_tests/ut_saihelper.cpp @@ -77,7 +77,12 @@ namespace ut_helper sai_api_query(SAI_API_ACL, (void **)&sai_acl_api); sai_api_query(SAI_API_HOSTIF, (void **)&sai_hostif_api); sai_api_query(SAI_API_BUFFER, (void **)&sai_buffer_api); + sai_api_query(SAI_API_QOS_MAP, (void **)&sai_qos_map_api); + sai_api_query(SAI_API_SCHEDULER_GROUP, (void **)&sai_scheduler_group_api); + sai_api_query(SAI_API_SCHEDULER, (void **)&sai_scheduler_api); + sai_api_query(SAI_API_WRED, (void **)&sai_wred_api); sai_api_query(SAI_API_QUEUE, (void **)&sai_queue_api); + sai_api_query(SAI_API_MPLS, (void**)&sai_mpls_api); return SAI_STATUS_SUCCESS; } diff --git a/tests/p4rt/test_l3.py b/tests/p4rt/test_l3.py index dbd6ae9781..4156576bc2 100644 --- a/tests/p4rt/test_l3.py +++ b/tests/p4rt/test_l3.py @@ -23,7 +23,7 @@ def _set_up(self, dvs): self._p4rt_route_obj.set_up_databases(dvs) self._p4rt_wcmp_group_obj.set_up_databases(dvs) self.response_consumer = swsscommon.NotificationConsumer( - self._p4rt_route_obj.appl_db, "APPL_DB_P4RT_TABLE_RESPONSE_CHANNEL") + self._p4rt_route_obj.appl_state_db, "APPL_DB_P4RT_TABLE_RESPONSE_CHANNEL") def _set_vrf(self, dvs): # Create VRF. @@ -1328,7 +1328,7 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): util.set_interface_status(dvs, if_name) # Execute the warm reboot. - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") dvs.stop_swss() dvs.start_swss() diff --git a/tests/p4rt/test_p4rt_acl.py b/tests/p4rt/test_p4rt_acl.py index 201a38f978..89015fc9d5 100644 --- a/tests/p4rt/test_p4rt_acl.py +++ b/tests/p4rt/test_p4rt_acl.py @@ -63,7 +63,7 @@ def _set_up(self, dvs): self._p4rt_udf_obj.set_up_databases(dvs) self.response_consumer = swsscommon.NotificationConsumer( - self._p4rt_acl_table_definition_obj.appl_db, "APPL_DB_P4RT_TABLE_RESPONSE_CHANNEL" + self._p4rt_acl_table_definition_obj.appl_state_db, "APPL_DB_P4RT_TABLE_RESPONSE_CHANNEL" ) @pytest.mark.skip(reason="p4orch is not enabled") diff --git a/tests/p4rt/test_p4rt_mirror.py b/tests/p4rt/test_p4rt_mirror.py index c625749293..bc218df147 100644 --- a/tests/p4rt/test_p4rt_mirror.py +++ b/tests/p4rt/test_p4rt_mirror.py @@ -42,7 +42,7 @@ def _set_up(self, dvs): self._p4rt_mirror_session_wrapper = P4RtMirrorSessionWrapper() self._p4rt_mirror_session_wrapper.set_up_databases(dvs) self._response_consumer = swsscommon.NotificationConsumer( - self._p4rt_mirror_session_wrapper.appl_db, "APPL_DB_P4RT_TABLE_RESPONSE_CHANNEL") + self._p4rt_mirror_session_wrapper.appl_state_db, "APPL_DB_P4RT_TABLE_RESPONSE_CHANNEL") def test_MirrorSessionAddModifyAndDelete(self, dvs, testlog): # Initialize database connectors diff --git a/tests/p4rt/util.py b/tests/p4rt/util.py index 831c7a5cbe..778a54960d 100644 --- a/tests/p4rt/util.py +++ b/tests/p4rt/util.py @@ -84,8 +84,8 @@ def get_port_oid_by_name(dvs, port_name): return port_oid def initialize_interface(dvs, port_name, ip): - dvs.runcmd("config interface startup {}".format(port_name)) - dvs.runcmd("config interface ip add {} {}".format(port_name, ip)) + dvs.port_admin_set(port_name, "up") + dvs.interface_ip_add(port_name, ip) def set_interface_status(dvs, if_name, status = "down", server = 0): dvs.servers[0].runcmd("ip link set {} dev {}".format(status, if_name)) == 0 diff --git a/tests/test_acl.py b/tests/test_acl.py index fb8aecb0ea..c246eefe53 100644 --- a/tests/test_acl.py +++ b/tests/test_acl.py @@ -553,11 +553,11 @@ def test_AclRuleRedirect(self, dvs, dvs_acl, l3_acl_table, setup_teardown_neighb class TestAclCrmUtilization: @pytest.fixture(scope="class", autouse=True) def configure_crm_polling_interval_for_test(self, dvs): - dvs.runcmd("crm config polling interval 1") + dvs.crm_poll_set("1") yield - dvs.runcmd("crm config polling interval 300") + dvs.crm_poll_set("300") def test_ValidateAclTableBindingCrmUtilization(self, dvs, dvs_acl): counter_db = dvs.get_counters_db() diff --git a/tests/test_acl_cli.py b/tests/test_acl_cli.py deleted file mode 100644 index 02785314d2..0000000000 --- a/tests/test_acl_cli.py +++ /dev/null @@ -1,33 +0,0 @@ -class TestAclCli: - def test_AddTableMultipleTimes(self, dvs, dvs_acl): - dvs.runcmd("config acl add table TEST L3 -p Ethernet0") - - cdb = dvs.get_config_db() - cdb.wait_for_field_match( - "ACL_TABLE", - "TEST", - {"ports": "Ethernet0"} - ) - - # Verify that subsequent updates don't delete "ports" from config DB - dvs.runcmd("config acl add table TEST L3 -p Ethernet4") - cdb.wait_for_field_match( - "ACL_TABLE", - "TEST", - {"ports": "Ethernet4"} - ) - - # Verify that subsequent updates propagate to ASIC DB - L3_BIND_PORTS = ["Ethernet0", "Ethernet4", "Ethernet8", "Ethernet12"] - dvs.runcmd(f"config acl add table TEST L3 -p {','.join(L3_BIND_PORTS)}") - acl_table_id = dvs_acl.get_acl_table_ids(1)[0] - acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(L3_BIND_PORTS)) - - dvs_acl.verify_acl_table_group_members(acl_table_id, acl_table_group_ids, 1) - dvs_acl.verify_acl_table_port_binding(acl_table_id, L3_BIND_PORTS, 1) - - -# Add Dummy always-pass test at end as workaroud -# for issue when Flaky fail on final test it invokes module tear-down before retrying -def test_nonflaky_dummy(): - pass diff --git a/tests/test_acl_portchannel.py b/tests/test_acl_portchannel.py index 759850d1be..210c4f18d8 100644 --- a/tests/test_acl_portchannel.py +++ b/tests/test_acl_portchannel.py @@ -1,9 +1,87 @@ import time import pytest +import logging from swsscommon import swsscommon +logging.basicConfig(level=logging.INFO) +acllogger = logging.getLogger(__name__) + + +@pytest.fixture(autouse=True, scope="class") +def dvs_api(request, dvs_acl): + # Fixtures are created when first requested by a test, and are destroyed based on their scope + if request.cls is None: + yield + return + acllogger.info("Initialize DVS API: ACL") + request.cls.dvs_acl = dvs_acl + yield + acllogger.info("Deinitialize DVS API: ACL") + del request.cls.dvs_acl + + +@pytest.mark.usefixtures("dvs_lag_manager") +class TestAclInterfaceBinding: + @pytest.mark.parametrize("stage", ["ingress", "egress"]) + def test_AclTablePortChannelMemberBinding(self, testlog, stage): + """Verify that LAG member creation is prohibited when ACL binding is configured + + The test flow: + 1. Create ACL table and bind Ethernet124 + 2. Verify ACL table has been successfully added + 3. Create LAG + 4. Verify LAG has been successfully added + 5. Create LAG member Ethernet120 + 6. Verify LAG member has been successfully added + 7. Create LAG member Ethernet124 + 8. Verify LAG member hasn't been added because of active ACL binding + + Args: + testlog: test start/end log record injector + stage: ACL table stage (e.g., ingress/egress) + """ + try: + acllogger.info("Create ACL table: acl_table") + self.dvs_acl.create_acl_table( + table_name="acl_table", + table_type="L3", + ports=["Ethernet124"], + stage=stage + ) + self.dvs_acl.verify_acl_table_count(1) + + acllogger.info("Create LAG: PortChannel0001") + self.dvs_lag.create_port_channel("0001") + self.dvs_lag.get_and_verify_port_channel(1) + + acllogger.info("Create LAG member: Ethernet120") + self.dvs_lag.create_port_channel_member("0001", "Ethernet120") + self.dvs_lag.get_and_verify_port_channel_members(1) + + acllogger.info("Create LAG member: Ethernet124") + self.dvs_lag.create_port_channel_member("0001", "Ethernet124") + acllogger.info("Verify LAG member hasn't been created: Ethernet124") + self.dvs_lag.get_and_verify_port_channel_members(1) + finally: + acllogger.info("Remove LAG member: Ethernet124") + self.dvs_lag.remove_port_channel_member("0001", "Ethernet124") + self.dvs_lag.get_and_verify_port_channel_members(1) + + acllogger.info("Remove LAG member: Ethernet120") + self.dvs_lag.remove_port_channel_member("0001", "Ethernet120") + self.dvs_lag.get_and_verify_port_channel_members(0) + + acllogger.info("Remove LAG: PortChannel0001") + self.dvs_lag.remove_port_channel("0001") + self.dvs_lag.get_and_verify_port_channel(0) + + acllogger.info("Remove ACL table: acl_table") + self.dvs_acl.remove_acl_table("acl_table") + self.dvs_acl.verify_acl_table_count(0) + + class TestPortChannelAcl(object): def setup_db(self, dvs): self.pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) @@ -129,7 +207,7 @@ def check_asic_table_absent(self, dvs): # Second create ACL table def test_PortChannelAfterAcl(self, dvs): self.setup_db(dvs) - dvs.runcmd("crm config polling interval 1") + dvs.crm_poll_set("1") time.sleep(2) used_counter = dvs.getCrmCounterValue('ACL_STATS:INGRESS:LAG', 'crm_stats_acl_group_used') @@ -162,7 +240,7 @@ def test_PortChannelAfterAcl(self, dvs): new_new_used_counter = 0 assert new_used_counter - new_new_used_counter == 1 # slow down crm polling - dvs.runcmd("crm config polling interval 10000") + dvs.crm_poll_set("10000") # Frist create ACL table # Second create port channel diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index e44f2824f5..2b4367f00d 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -164,14 +164,14 @@ def test_changeSpeed(self, dvs, testlog): self.setup_db(dvs) # Startup interface - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') self.check_queues_after_port_startup(dvs) # Configure lossless PG 3-4 on interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) # Change speed to speed1 and verify whether the profile has been updated - dvs.runcmd("config interface speed Ethernet0 " + self.speedToTest1) + dvs.port_field_set("Ethernet0", "speed", self.speedToTest1) expectedProfile = self.make_lossless_profile_name(self.speedToTest1, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) @@ -185,7 +185,7 @@ def test_changeSpeed(self, dvs, testlog): self.app_db.wait_for_deleted_entry("BUFFER_PG_TABLE", "Ethernet0:3-4") # Change speed to speed2 and verify - dvs.runcmd("config interface speed Ethernet0 " + self.speedToTest2) + dvs.port_field_set("Ethernet0", "speed", self.speedToTest2) expectedProfile = self.make_lossless_profile_name(self.speedToTest2, self.originalCableLen) # Re-add another lossless PG @@ -197,7 +197,7 @@ def test_changeSpeed(self, dvs, testlog): self.app_db.wait_for_deleted_entry("BUFFER_PG_TABLE", "Ethernet0:6") # Remove the lossless PG 3-4 and revert speed - dvs.runcmd("config interface speed Ethernet0 " + self.originalSpeed) + dvs.port_field_set("Ethernet0", "speed", self.originalSpeed) self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) @@ -210,15 +210,16 @@ def test_changeSpeed(self, dvs, testlog): self.app_db.wait_for_deleted_entry("BUFFER_PG_TABLE", "Ethernet0:3-4") # Shutdown interface - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) + @pytest.mark.skip(reason="Failing. Under investigation") def test_changeCableLen(self, dvs, testlog): self.setup_db(dvs) # Startup interface - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') # Configure lossless PG 3-4 on interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) @@ -262,7 +263,7 @@ def test_changeCableLen(self, dvs, testlog): self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') # Shutdown interface - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) @@ -270,7 +271,7 @@ def test_MultipleLosslessPg(self, dvs, testlog): self.setup_db(dvs) # Startup interface - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') # Configure lossless PG 3-4 on interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) @@ -281,7 +282,7 @@ def test_MultipleLosslessPg(self, dvs, testlog): self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Change speed and check - dvs.runcmd("config interface speed Ethernet0 " + self.speedToTest1) + dvs.port_field_set("Ethernet0", "speed", self.speedToTest1) expectedProfile = self.make_lossless_profile_name(self.speedToTest1, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) @@ -298,7 +299,7 @@ def test_MultipleLosslessPg(self, dvs, testlog): # Revert the speed and cable length and check self.change_cable_length(self.originalCableLen) - dvs.runcmd("config interface speed Ethernet0 " + self.originalSpeed) + dvs.port_field_set("Ethernet0", "speed", self.originalSpeed) self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", expectedProfile) self.asic_db.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_PROFILE", self.newProfileInAsicDb) expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) @@ -311,7 +312,7 @@ def test_MultipleLosslessPg(self, dvs, testlog): self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|6') # Shutdown interface - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) @@ -319,7 +320,7 @@ def test_headroomOverride(self, dvs, testlog): self.setup_db(dvs) # Startup interface - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') # Configure static profile self.config_db.update_entry('BUFFER_PROFILE', 'test', @@ -396,7 +397,7 @@ def test_headroomOverride(self, dvs, testlog): self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') # Shutdown interface - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) @@ -404,7 +405,7 @@ def test_mtuUpdate(self, dvs, testlog): self.setup_db(dvs) # Startup interface - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') test_mtu = '1500' default_mtu = '9100' @@ -412,7 +413,7 @@ def test_mtuUpdate(self, dvs, testlog): expectedProfileNormal = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) # update the mtu on the interface - dvs.runcmd("config interface mtu Ethernet0 {}".format(test_mtu)) + dvs.port_field_set("Ethernet0", "mtu", test_mtu) # configure lossless PG 3-4 on interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) @@ -422,7 +423,7 @@ def test_mtuUpdate(self, dvs, testlog): self.check_new_profile_in_asic_db(dvs, expectedProfileMtu) self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfileMtu}) - dvs.runcmd("config interface mtu Ethernet0 {}".format(default_mtu)) + dvs.port_field_set("Ethernet0", "mtu", default_mtu) self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", expectedProfileMtu) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfileNormal) @@ -432,7 +433,7 @@ def test_mtuUpdate(self, dvs, testlog): self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') # Shutdown interface - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) @@ -440,7 +441,7 @@ def test_nonDefaultAlpha(self, dvs, testlog): self.setup_db(dvs) # Startup interface - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') test_dynamic_th_1 = '1' expectedProfile_th1 = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen, dynamic_th = test_dynamic_th_1) @@ -476,7 +477,7 @@ def test_nonDefaultAlpha(self, dvs, testlog): self.config_db.delete_entry('BUFFER_PROFILE', 'non-default-dynamic') # Shutdown interface - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) @@ -484,7 +485,7 @@ def test_sharedHeadroomPool(self, dvs, testlog): self.setup_db(dvs) # Startup interface - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') # configure lossless PG 3-4 on interface and start up the interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) @@ -573,10 +574,10 @@ def test_sharedHeadroomPool(self, dvs, testlog): # remove lossless PG 3-4 on interface self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') # Shutdown interface - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) @@ -594,7 +595,7 @@ def test_shutdownPort(self, dvs, testlog): lossless_queue_zero_reference = 'egress_lossless_zero_profile' # Startup interface - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') # Configure lossless PG 3-4 on interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) @@ -603,7 +604,7 @@ def test_shutdownPort(self, dvs, testlog): # Shutdown port and check whether zero profiles have been applied on queues and the PG 0 maximumQueues = int(self.bufferMaxParameter['max_queues']) - 1 - dvs.runcmd("config interface shutdown Ethernet0") + dvs.port_admin_set('Ethernet0', 'down') self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:0", {"profile": lossy_pg_zero_reference}) self.app_db.wait_for_field_match("BUFFER_QUEUE_TABLE", "Ethernet0:0-2", {"profile": lossy_queue_zero_reference}) self.app_db.wait_for_field_match("BUFFER_QUEUE_TABLE", "Ethernet0:3-4", {"profile": lossless_queue_zero_reference}) @@ -631,7 +632,7 @@ def test_shutdownPort(self, dvs, testlog): self.app_db.wait_for_deleted_entry("BUFFER_PG_TABLE", "Ethernet0:6") # Startup port and check whether all the PGs have been added - dvs.runcmd("config interface startup Ethernet0") + dvs.port_admin_set('Ethernet0', 'up') self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:0", {"profile": lossy_pg_reference_appl_db}) self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:1", {"profile": lossy_pg_reference_appl_db}) self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) @@ -644,7 +645,7 @@ def test_shutdownPort(self, dvs, testlog): self.app_db.wait_for_deleted_entry("BUFFER_QUEUE_TABLE", "Ethernet0:9-{}".format(maximumQueues)) # Shutdown the port again to verify flow to remove buffer objects from an admin down port - dvs.runcmd("config interface shutdown Ethernet0") + dvs.port_admin_set('Ethernet0', 'down') # First, check whether the objects have been correctly handled self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:0", {"profile": lossy_pg_zero_reference}) self.app_db.wait_for_field_match("BUFFER_QUEUE_TABLE", "Ethernet0:0-2", {"profile": lossy_queue_zero_reference}) @@ -670,7 +671,7 @@ def test_shutdownPort(self, dvs, testlog): self.app_db.wait_for_field_match("BUFFER_QUEUE_TABLE", "Ethernet0:7-{}".format(maximumQueues), {"profile": lossy_queue_zero_reference}) # Startup again - dvs.runcmd("config interface startup Ethernet0") + dvs.port_admin_set('Ethernet0', 'up') self.app_db.wait_for_field_match("BUFFER_QUEUE_TABLE", "Ethernet0:0-2", {"profile": lossy_queue_reference_appl_db}) self.app_db.wait_for_field_match("BUFFER_QUEUE_TABLE", "Ethernet0:3-4", {"profile": lossless_queue_reference_appl_db}) self.app_db.wait_for_field_match("BUFFER_QUEUE_TABLE", "Ethernet0:5-6", {"profile": lossy_queue_reference_appl_db}) @@ -682,7 +683,7 @@ def test_shutdownPort(self, dvs, testlog): self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') # Shutdown interface - dvs.runcmd("config interface shutdown Ethernet0") + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) @@ -697,14 +698,14 @@ def test_autoNegPort(self, dvs, testlog): maximum_advertised_speed = '25000' # Startup interfaces - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') # Configure lossless PG 3-4 on the interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) # Enable port auto negotiation - dvs.runcmd('config interface autoneg Ethernet0 enabled') - dvs.runcmd('config interface advertised-speeds Ethernet0 {}'.format(advertised_speeds)) + dvs.port_field_set('Ethernet0','autoneg', 'on') + dvs.port_field_set('Ethernet0','adv_speeds', advertised_speeds) # Check the buffer profile. The maximum_advertised_speed should be used expectedProfile = self.make_lossless_profile_name(maximum_advertised_speed, self.originalCableLen) @@ -718,7 +719,7 @@ def test_autoNegPort(self, dvs, testlog): self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Disable port auto negotiation - dvs.runcmd('config interface autoneg Ethernet0 disabled') + dvs.port_field_set('Ethernet0','autoneg', 'off') # Check the buffer profile. The configured speed should be used expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) @@ -732,7 +733,7 @@ def test_autoNegPort(self, dvs, testlog): self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|6') # Shutdown interface - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 3defae0c80..e955390fde 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -91,7 +91,7 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): test_speed = "100000" test_cable_len = "0m" - dvs.runcmd("config interface startup {}".format(self.INTF)) + dvs.port_admin_set(self.INTF, "up") # Make sure the buffer PG has been created orig_lossless_profile = "pg_lossless_{}_{}_profile".format(orig_speed, cable_len_before_test) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", orig_lossless_profile) @@ -113,7 +113,7 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): self.change_cable_len(test_cable_len) # change intf speed to 'test_speed' - dvs.runcmd("config interface speed {} {}".format(self.INTF, test_speed)) + dvs.port_field_set(self.INTF, "speed", test_speed) test_lossless_profile = "pg_lossless_{}_{}_profile".format(test_speed, test_cable_len) # buffer profile should not get created self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", test_lossless_profile) @@ -129,7 +129,7 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): self.change_cable_len(cable_len_before_test) # change intf speed to 'test_speed' - dvs.runcmd("config interface speed {} {}".format(self.INTF, test_speed)) + dvs.port_field_set(self.INTF, "speed", test_speed) if profile_exp_cnt_diff != 0: # new profile will get created self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", new_lossless_profile) @@ -150,5 +150,5 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): if orig_cable_len: self.change_cable_len(orig_cable_len) if orig_speed: - dvs.runcmd("config interface speed {} {}".format(self.INTF, orig_speed)) - dvs.runcmd("config interface shutdown {}".format(self.INTF)) + dvs.port_field_set(self.INTF, "speed", orig_speed) + dvs.port_admin_set(self.INTF, "down") diff --git a/tests/test_copp.py b/tests/test_copp.py index 19faac954f..5885a489b5 100644 --- a/tests/test_copp.py +++ b/tests/test_copp.py @@ -151,17 +151,18 @@ "trap_action": "trap", "trap_priority": "5" } + copp_trap = { - "bgp,bgpv6": copp_group_queue4_group1, - "lacp": copp_group_queue4_group1, - "arp_req,arp_resp,neigh_discovery":copp_group_queue4_group2, - "lldp":copp_group_queue4_group3, - "dhcp,dhcpv6":copp_group_queue4_group3, - "udld":copp_group_queue4_group3, - "ip2me":copp_group_queue1_group1, - "src_nat_miss,dest_nat_miss": copp_group_queue1_group2, - "sample_packet": copp_group_queue2_group1, - "ttl_error": copp_group_default + "bgp": ["bgp;bgpv6", copp_group_queue4_group1], + "lacp": ["lacp", copp_group_queue4_group1, "always_enabled"], + "arp": ["arp_req;arp_resp;neigh_discovery", copp_group_queue4_group2, "always_enabled"], + "lldp": ["lldp", copp_group_queue4_group3], + "dhcp": ["dhcp;dhcpv6", copp_group_queue4_group3], + "udld": ["udld", copp_group_queue4_group3, "always_enabled"], + "ip2me": ["ip2me", copp_group_queue1_group1, "always_enabled"], + "nat": ["src_nat_miss;dest_nat_miss", copp_group_queue1_group2], + "sflow": ["sample_packet", copp_group_queue2_group1], + "ttl": ["ttl_error", copp_group_default] } disabled_traps = ["sample_packet"] @@ -201,7 +202,7 @@ def setup_copp(self, dvs): self.trap_ctbl = swsscommon.Table(self.cdb, "COPP_TRAP") self.trap_group_ctbl = swsscommon.Table(self.cdb, "COPP_GROUP") self.feature_tbl = swsscommon.Table(self.cdb, "FEATURE") - fvs = swsscommon.FieldValuePairs([("state", "disbled")]) + fvs = swsscommon.FieldValuePairs([("state", "disabled")]) self.feature_tbl.set("sflow", fvs) time.sleep(2) @@ -306,8 +307,12 @@ def test_defaults(self, dvs, testlog): self.setup_copp(dvs) trap_keys = self.trap_atbl.getKeys() for traps in copp_trap: - trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_info = copp_trap[traps] + trap_ids = trap_info[0].split(";") + trap_group = trap_info[1] + always_enabled = False + if len(trap_info) > 2: + always_enabled = True for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -325,6 +330,7 @@ def test_defaults(self, dvs, testlog): if trap_id not in disabled_traps: assert trap_found == True + def test_restricted_trap_sflow(self, dvs, testlog): self.setup_copp(dvs) fvs = swsscommon.FieldValuePairs([("state", "enabled")]) @@ -334,10 +340,14 @@ def test_restricted_trap_sflow(self, dvs, testlog): trap_keys = self.trap_atbl.getKeys() for traps in copp_trap: - trap_ids = traps.split(",") + trap_info = copp_trap[traps] + trap_ids = trap_info[0].split(";") + trap_group = trap_info[1] + always_enabled = False + if len(trap_info) > 2: + always_enabled = True if "sample_packet" not in trap_ids: continue - trap_group = copp_trap[traps] trap_found = False trap_type = traps_to_trap_type["sample_packet"] for key in trap_keys: @@ -363,10 +373,14 @@ def test_policer_set(self, dvs, testlog): trap_keys = self.trap_atbl.getKeys() for traps in copp_trap: - if copp_trap[traps] != copp_group_queue4_group2: + trap_info = copp_trap[traps] + trap_ids = trap_info[0].split(";") + trap_group = trap_info[1] + always_enabled = False + if len(trap_info) > 2: + always_enabled = True + if trap_group != copp_group_queue4_group2: continue - trap_ids = traps.split(",") - trap_group = copp_trap[traps] for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -390,12 +404,19 @@ def test_trap_group_set(self, dvs, testlog): traps = "bgp,bgpv6" fvs = swsscommon.FieldValuePairs([("trap_group", "queue1_group1")]) self.trap_ctbl.set("bgp", fvs) - copp_trap[traps] = copp_group_queue1_group1 + + for c_trap in copp_trap: + trap_info = copp_trap[c_trap] + ids = trap_info[0].replace(';', ',') + if traps == ids: + break + + trap_info[1] = copp_group_queue1_group1 time.sleep(2) trap_keys = self.trap_atbl.getKeys() trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_group = trap_info[1] for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -423,8 +444,14 @@ def test_trap_ids_set(self, dvs, testlog): old_traps = "bgp,bgpv6" trap_keys = self.trap_atbl.getKeys() + for c_trap in copp_trap: + trap_info = copp_trap[c_trap] + ids = trap_info[0].replace(';', ',') + if old_traps == ids: + break + trap_ids = old_traps.split(",") - trap_group = copp_trap[old_traps] + trap_group = trap_info[1] for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -451,7 +478,7 @@ def test_trap_ids_set(self, dvs, testlog): trap_keys = self.trap_atbl.getKeys() trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_group = trap_info[1] for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -478,10 +505,11 @@ def test_trap_action_set(self, dvs, testlog): trap_keys = self.trap_atbl.getKeys() for traps in copp_trap: - if copp_trap[traps] != copp_group_queue4_group1: + trap_info = copp_trap[traps] + if trap_info[1] != copp_group_queue4_group1: continue - trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_ids = trap_info[0].split(";") + trap_group = trap_info[1] for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -499,18 +527,21 @@ def test_trap_action_set(self, dvs, testlog): if trap_id not in disabled_traps: assert trap_found == True + def test_new_trap_add(self, dvs, testlog): self.setup_copp(dvs) global copp_trap traps = "eapol,isis,bfd_micro,bfdv6_micro,ldp" - fvs = swsscommon.FieldValuePairs([("trap_group", "queue1_group2"),("trap_ids", traps)]) + fvs = swsscommon.FieldValuePairs([("trap_group", "queue1_group2"),("trap_ids", traps),("always_enabled", "true")]) self.trap_ctbl.set(traps, fvs) - copp_trap[traps] = copp_group_queue1_group2 + + + copp_trap["eapol"] = [traps, copp_group_queue1_group2, "always_enabled"] time.sleep(2) trap_keys = self.trap_atbl.getKeys() trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_group = copp_group_queue1_group2 for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -534,13 +565,19 @@ def test_new_trap_del(self, dvs, testlog): traps = "eapol,isis,bfd_micro,bfdv6_micro,ldp" fvs = swsscommon.FieldValuePairs([("trap_group", "queue1_group2"),("trap_ids", traps)]) self.trap_ctbl.set(traps, fvs) - copp_trap[traps] = copp_group_queue1_group2 + for c_trap in copp_trap: + trap_info = copp_trap[c_trap] + ids = trap_info[0].replace(';', ',') + if traps == ids: + break + + trap_info[1] = copp_group_queue1_group2 time.sleep(2) self.trap_ctbl._del(traps) time.sleep(2) trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_group = trap_info[1] trap_keys = self.trap_atbl.getKeys() for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] @@ -568,14 +605,19 @@ def test_new_trap_group_add(self, dvs, testlog): fvs = swsscommon.FieldValuePairs(list_val) self.trap_group_ctbl.set("queue5_group1", fvs) traps = "igmp_v1_report" - t_fvs = swsscommon.FieldValuePairs([("trap_group", "queue5_group1"),("trap_ids", "igmp_v1_report")]) + t_fvs = swsscommon.FieldValuePairs([("trap_group", "queue5_group1"),("trap_ids", "igmp_v1_report"),("always_enabled", "true")]) self.trap_ctbl.set(traps, t_fvs) - copp_trap[traps] = copp_group_queue5_group1 + for c_trap in copp_trap: + trap_info = copp_trap[c_trap] + ids = trap_info[0].replace(';', ',') + if traps == ids: + break + trap_info[1] = copp_group_queue5_group1 time.sleep(2) trap_keys = self.trap_atbl.getKeys() trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_group = trap_info[1] for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -602,16 +644,21 @@ def test_new_trap_group_del(self, dvs, testlog): fvs = swsscommon.FieldValuePairs(list_val) self.trap_group_ctbl.set("queue5_group1", fvs) traps = "igmp_v1_report" - t_fvs = swsscommon.FieldValuePairs([("trap_group", "queue5_group1"),("trap_ids", "igmp_v1_report")]) + t_fvs = swsscommon.FieldValuePairs([("trap_group", "queue5_group1"),("trap_ids", "igmp_v1_report"),("always_enabled", "true")]) self.trap_ctbl.set(traps, t_fvs) - copp_trap[traps] = copp_group_queue5_group1 + for c_trap in copp_trap: + trap_info = copp_trap[c_trap] + ids = trap_info[0].replace(';', ',') + if traps == ids: + break + trap_info[1] = copp_group_queue5_group1 self.trap_group_ctbl._del("queue5_group1") time.sleep(2) trap_keys = self.trap_atbl.getKeys() trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_group = trap_info[1] for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -643,10 +690,11 @@ def test_override_trap_grp_cfg_del (self, dvs, testlog): trap_keys = self.trap_atbl.getKeys() for traps in copp_trap: - if copp_trap[traps] != copp_group_queue1_group1: + trap_info = copp_trap[traps] + if trap_info[1] != copp_group_queue1_group1: continue - trap_ids = traps.split(",") - trap_group = copp_trap[traps] + trap_ids = trap_info[0].split(";") + trap_group = trap_info[1] for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -675,7 +723,7 @@ def test_override_trap_cfg_del(self, dvs, testlog): self.trap_ctbl._del("ip2me") time.sleep(2) trap_ids = traps.split(",") - trap_group = copp_trap["ip2me"] + trap_group = copp_trap["ip2me"][1] trap_keys = self.trap_atbl.getKeys() for trap_id in trap_ids: trap_type = traps_to_trap_type[trap_id] @@ -705,7 +753,7 @@ def test_empty_trap_cfg(self, dvs, testlog): time.sleep(2) trap_id = "ip2me" - trap_group = copp_trap["ip2me"] + trap_group = copp_trap["ip2me"][1] trap_keys = self.trap_atbl.getKeys() trap_type = traps_to_trap_type[trap_id] trap_found = False @@ -740,3 +788,56 @@ def test_empty_trap_cfg(self, dvs, testlog): self.validate_trap_group(key,trap_group) break assert trap_found == True + + + def test_disabled_feature_always_enabled_trap(self, dvs, testlog): + self.setup_copp(dvs) + fvs = swsscommon.FieldValuePairs([("trap_ids", "lldp"), ("trap_group", "queue4_group3"), ("always_enabled", "true")]) + self.trap_ctbl.set("lldp", fvs) + fvs = swsscommon.FieldValuePairs([("state", "disabled")]) + self.feature_tbl.set("lldp", fvs) + + time.sleep(2) + global copp_trap + + trap_keys = self.trap_atbl.getKeys() + for traps in copp_trap: + trap_info = copp_trap[traps] + trap_ids = trap_info[0].split(";") + trap_group = trap_info[1] + + if "lldp" not in trap_ids: + continue + + trap_found = False + trap_type = traps_to_trap_type["lldp"] + for key in trap_keys: + (status, fvs) = self.trap_atbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_HOSTIF_TRAP_ATTR_TRAP_TYPE": + if fv[1] == trap_type: + trap_found = True + if trap_found: + self.validate_trap_group(key,trap_group) + break + assert trap_found == True + + # change always_enabled to be false and check the trap is not installed: + fvs = swsscommon.FieldValuePairs([("trap_ids", "lldp"), ("trap_group", "queue4_group3"), ("always_enabled", "false")]) + self.trap_ctbl.set("lldp", fvs) + time.sleep(2) + + table_found = True + for key in trap_keys: + (status, fvs) = self.trap_atbl.get(key) + if status == False: + table_found = False + + # teardown + fvs = swsscommon.FieldValuePairs([("trap_ids", "lldp"), ("trap_group", "queue4_group3")]) + self.trap_ctbl.set("lldp", fvs) + fvs = swsscommon.FieldValuePairs([("state", "enabled")]) + self.feature_tbl.set("lldp", fvs) + + assert table_found == False diff --git a/tests/test_crm.py b/tests/test_crm.py index 200b15cf79..e899aff6d3 100644 --- a/tests/test_crm.py +++ b/tests/test_crm.py @@ -17,29 +17,16 @@ def getCrmCounterValue(dvs, key, counter): return 0 -def getCrmConfigValue(dvs, key, counter): - - config_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - crm_stats_table = swsscommon.Table(config_db, 'CRM') - - for k in crm_stats_table.get(key)[1]: - if k[0] == counter: - return int(k[1]) - -def getCrmConfigStr(dvs, key, counter): - - config_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - crm_stats_table = swsscommon.Table(config_db, 'CRM') - - for k in crm_stats_table.get(key)[1]: - if k[0] == counter: - return k[1] - return "" - def check_syslog(dvs, marker, err_log, expected_cnt): (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, err_log)]) assert num.strip() >= str(expected_cnt) +def crm_update(dvs, field, value): + cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(cfg_db, "CRM") + fvs = swsscommon.FieldValuePairs([(field, value)]) + tbl.set("Config", fvs) + time.sleep(1) class TestCrm(object): def test_CrmFdbEntry(self, dvs, testlog): @@ -48,7 +35,7 @@ def test_CrmFdbEntry(self, dvs, testlog): # configured, server 2 will send packet which can switch to learn another # mac and fail the test. dvs.servers[2].runcmd("sysctl -w net.ipv6.conf.eth0.disable_ipv6=1") - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_FDB_ENTRY', '1000') @@ -99,9 +86,9 @@ def test_CrmFdbEntry(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds fdb high 90") - dvs.runcmd("crm config thresholds fdb type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "fdb_entry_high_threshold", "90") + crm_update(dvs, "fdb_entry_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "FDB_ENTRY THRESHOLD_EXCEEDED for TH_FREE", 1) @@ -115,9 +102,9 @@ def test_CrmIpv4Route(self, dvs, testlog): fvs = swsscommon.FieldValuePairs([("NULL","NULL")]) intf_tbl.set("Ethernet0", fvs) intf_tbl.set("Ethernet0|10.0.0.0/31", fvs) - dvs.runcmd("config interface startup Ethernet0") + dvs.port_admin_set("Ethernet0", "up") - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_IPV4_ROUTE_ENTRY', '1000') @@ -162,9 +149,9 @@ def test_CrmIpv4Route(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds ipv4 route high 90") - dvs.runcmd("crm config thresholds ipv4 route type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "ipv4_route_high_threshold", "90") + crm_update(dvs, "ipv4_route_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "IPV4_ROUTE THRESHOLD_EXCEEDED for TH_FREE",1) @@ -182,12 +169,12 @@ def test_CrmIpv6Route(self, dvs, testlog): fvs = swsscommon.FieldValuePairs([("NULL","NULL")]) intf_tbl.set("Ethernet0", fvs) intf_tbl.set("Ethernet0|fc00::1/126", fvs) - dvs.runcmd("config interface startup Ethernet0") + dvs.port_admin_set("Ethernet0", "up") dvs.servers[0].runcmd("ifconfig eth0 inet6 add fc00::2/126") dvs.servers[0].runcmd("ip -6 route add default via fc00::1") - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_IPV6_ROUTE_ENTRY', '1000') @@ -232,9 +219,9 @@ def test_CrmIpv6Route(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds ipv6 route high 90") - dvs.runcmd("crm config thresholds ipv6 route type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "ipv6_route_high_threshold", "90") + crm_update(dvs, "ipv6_route_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "IPV6_ROUTE THRESHOLD_EXCEEDED for TH_FREE",1) @@ -248,9 +235,8 @@ def test_CrmIpv4Nexthop(self, dvs, testlog): fvs = swsscommon.FieldValuePairs([("NULL","NULL")]) intf_tbl.set("Ethernet0|10.0.0.0/31", fvs) intf_tbl.set("Ethernet0", fvs) - dvs.runcmd("config interface startup Ethernet0") - - dvs.runcmd("crm config polling interval 1") + dvs.port_admin_set("Ethernet0", "up") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_IPV4_NEXTHOP_ENTRY', '1000') @@ -287,9 +273,9 @@ def test_CrmIpv4Nexthop(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds ipv4 nexthop high 90") - dvs.runcmd("crm config thresholds ipv4 nexthop type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "ipv4_nexthop_high_threshold", "90") + crm_update(dvs, "ipv4_nexthop_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "IPV4_NEXTHOP THRESHOLD_EXCEEDED for TH_FREE",1) @@ -307,9 +293,9 @@ def test_CrmIpv6Nexthop(self, dvs, testlog): fvs = swsscommon.FieldValuePairs([("NULL","NULL")]) intf_tbl.set("Ethernet0", fvs) intf_tbl.set("Ethernet0|fc00::1/126", fvs) - dvs.runcmd("config interface startup Ethernet0") + dvs.port_admin_set("Ethernet0", "up") - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_IPV6_NEXTHOP_ENTRY', '1000') @@ -346,9 +332,9 @@ def test_CrmIpv6Nexthop(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds ipv6 nexthop high 90") - dvs.runcmd("crm config thresholds ipv6 nexthop type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "ipv6_nexthop_high_threshold", "90") + crm_update(dvs, "ipv6_nexthop_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "IPV6_NEXTHOP THRESHOLD_EXCEEDED for TH_FREE",1) @@ -362,9 +348,9 @@ def test_CrmIpv4Neighbor(self, dvs, testlog): fvs = swsscommon.FieldValuePairs([("NULL","NULL")]) intf_tbl.set("Ethernet0", fvs) intf_tbl.set("Ethernet0|10.0.0.0/31", fvs) - dvs.runcmd("config interface startup Ethernet0") + dvs.port_admin_set("Ethernet0", "up") - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_IPV4_NEIGHBOR_ENTRY', '1000') @@ -401,9 +387,9 @@ def test_CrmIpv4Neighbor(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds ipv4 neighbor high 90") - dvs.runcmd("crm config thresholds ipv4 neighbor type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "ipv4_neighbor_high_threshold", "90") + crm_update(dvs, "ipv4_neighbor_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "IPV4_NEIGHBOR THRESHOLD_EXCEEDED for TH_FREE",1) @@ -421,9 +407,9 @@ def test_CrmIpv6Neighbor(self, dvs, testlog): fvs = swsscommon.FieldValuePairs([("NULL","NULL")]) intf_tbl.set("Ethernet0", fvs) intf_tbl.set("Ethernet0|fc00::1/126", fvs) - dvs.runcmd("config interface startup Ethernet0") + dvs.port_admin_set("Ethernet0", "up") - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_IPV6_NEIGHBOR_ENTRY', '1000') @@ -460,9 +446,9 @@ def test_CrmIpv6Neighbor(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds ipv6 neighbor high 90") - dvs.runcmd("crm config thresholds ipv6 neighbor type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "ipv6_neighbor_high_threshold", "90") + crm_update(dvs, "ipv6_neighbor_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "IPV6_NEIGHBOR THRESHOLD_EXCEEDED for TH_FREE",1) @@ -478,10 +464,10 @@ def test_CrmNexthopGroup(self, dvs, testlog): intf_tbl.set("Ethernet4", fvs) intf_tbl.set("Ethernet0|10.0.0.0/31", fvs) intf_tbl.set("Ethernet4|10.0.0.2/31", fvs) - dvs.runcmd("config interface startup Ethernet0") - dvs.runcmd("config interface startup Ethernet4") + dvs.port_admin_set("Ethernet0", "up") + dvs.port_admin_set("Ethernet4", "up") - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_NEXT_HOP_GROUP_ENTRY', '1000') @@ -528,9 +514,9 @@ def test_CrmNexthopGroup(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds nexthop group member high 90") - dvs.runcmd("crm config thresholds nexthop group object type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "nexthop_group_high_threshold", "90") + crm_update(dvs, "nexthop_group_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "NEXTHOP_GROUP THRESHOLD_EXCEEDED for TH_FREE",1) @@ -553,10 +539,10 @@ def test_CrmNexthopGroupMember(self, dvs, testlog): intf_tbl.set("Ethernet4", fvs) intf_tbl.set("Ethernet0|10.0.0.0/31", fvs) intf_tbl.set("Ethernet4|10.0.0.2/31", fvs) - dvs.runcmd("config interface startup Ethernet0") - dvs.runcmd("config interface startup Ethernet4") + dvs.port_admin_set("Ethernet0", "up") + dvs.port_admin_set("Ethernet4", "up") - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_NEXT_HOP_GROUP_MEMBER_ENTRY', '1000') @@ -603,9 +589,9 @@ def test_CrmNexthopGroupMember(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds nexthop group member high 90") - dvs.runcmd("crm config thresholds nexthop group member type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "nexthop_group_member_high_threshold", "90") + crm_update(dvs, "nexthop_group_member_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "NEXTHOP_GROUP_MEMBER THRESHOLD_EXCEEDED for TH_FREE",1) @@ -618,7 +604,7 @@ def test_CrmAcl(self, dvs, testlog): db = swsscommon.DBConnector(4, dvs.redis_sock, 0) adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") time.sleep(1) bind_ports = ["Ethernet0", "Ethernet4"] @@ -698,7 +684,7 @@ def test_CrmAclGroup(self, dvs, testlog): db = swsscommon.DBConnector(4, dvs.redis_sock, 0) adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") bind_ports = ["Ethernet0", "Ethernet4", "Ethernet8"] # create ACL table @@ -734,263 +720,6 @@ def test_CrmDnatEntry(self, dvs, testlog): assert used_counter == 0 assert avail_counter != 0 -# commented ipmc test case till vslib is updated -# def test_CrmIpmcEntry(self, dvs, testlog): -# -# # get counters -# used_counter = getCrmCounterValue(dvs, 'STATS', 'crm_stats_ipmc_entry_used') -# avail_counter = getCrmCounterValue(dvs, 'STATS', 'crm_stats_ipmc_entry_available') -# assert used_counter == 0 -# assert avail_counter != 0 - - def test_Configure(self, dvs, testlog): - - #polling interval - dvs.runcmd("crm config polling interval 10") - time.sleep(2) - polling_interval = getCrmConfigValue(dvs, 'Config', 'polling_interval') - assert polling_interval == 10 - - def test_Configure_ipv4_route(self, dvs, testlog): - - #ipv4 route low/high threshold/type - dvs.runcmd("crm config thresholds ipv4 route low 50") - dvs.runcmd("crm config thresholds ipv4 route high 90") - dvs.runcmd("crm config thresholds ipv4 route type percentage") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'ipv4_route_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'ipv4_route_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'ipv4_route_threshold_type') - assert threshold_type == 'percentage' - - def test_Configure_ipv6_route(self, dvs, testlog): - - #ipv6 route low/high threshold/type - dvs.runcmd("crm config thresholds ipv6 route low 50") - dvs.runcmd("crm config thresholds ipv6 route high 90") - dvs.runcmd("crm config thresholds ipv6 route type used") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'ipv6_route_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'ipv6_route_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'ipv6_route_threshold_type') - assert threshold_type == 'used' - - def test_Configure_ipv4_nexthop(self, dvs, testlog): - - #ipv4 nexthop low/high threshold/type - dvs.runcmd("crm config thresholds ipv4 nexthop low 50") - dvs.runcmd("crm config thresholds ipv4 nexthop high 90") - dvs.runcmd("crm config thresholds ipv4 nexthop type 'percentage'") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'ipv4_nexthop_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'ipv4_nexthop_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'ipv4_nexthop_threshold_type') - assert threshold_type == 'percentage' - - def test_Configure_ipv6_nexthop(self, dvs, testlog): - - #ipv6 nexthop low/high threshold/type - dvs.runcmd("crm config thresholds ipv6 nexthop low 50") - dvs.runcmd("crm config thresholds ipv6 nexthop high 90") - dvs.runcmd("crm config thresholds ipv6 nexthop type free") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'ipv6_nexthop_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'ipv6_nexthop_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'ipv6_nexthop_threshold_type') - assert threshold_type == 'free' - - def test_Configure_ipv4_neighbor(self, dvs, testlog): - - #ipv4 neighbor low/high threshold/type - dvs.runcmd("crm config thresholds ipv4 neighbor low 50") - dvs.runcmd("crm config thresholds ipv4 neighbor high 90") - dvs.runcmd("crm config thresholds ipv4 neighbor type percentage") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'ipv4_neighbor_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'ipv4_neighbor_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'ipv4_neighbor_threshold_type') - assert threshold_type == 'percentage' - - def test_Configure_ipv6_neighbor(self, dvs, testlog): - - #ipv6 neighbor low/high threshold/type - dvs.runcmd("crm config thresholds ipv6 neighbor low 50") - dvs.runcmd("crm config thresholds ipv6 neighbor high 90") - dvs.runcmd("crm config thresholds ipv6 neighbor type used") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'ipv6_neighbor_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'ipv6_neighbor_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'ipv6_neighbor_threshold_type') - assert threshold_type == 'used' - - def test_Configure_group_member(self, dvs, testlog): - - #nexthop group member low/high threshold/type - dvs.runcmd("crm config thresholds nexthop group member low 50") - dvs.runcmd("crm config thresholds nexthop group member high 90") - dvs.runcmd("crm config thresholds nexthop group member type percentage") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'nexthop_group_member_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'nexthop_group_member_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'nexthop_group_member_threshold_type') - assert threshold_type == 'percentage' - - def test_Configure_group_object(self, dvs, testlog): - - #nexthop group object low/high threshold/type - dvs.runcmd("crm config thresholds nexthop group object low 50") - dvs.runcmd("crm config thresholds nexthop group object high 90") - dvs.runcmd("crm config thresholds nexthop group object type free") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'nexthop_group_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'nexthop_group_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'nexthop_group_threshold_type') - assert threshold_type == 'free' - - def test_Configure_acl_table(self, dvs, testlog): - - #thresholds acl table low/high threshold/type - dvs.runcmd("crm config thresholds acl table low 50") - dvs.runcmd("crm config thresholds acl table high 90") - dvs.runcmd("crm config thresholds acl table type percentage") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'acl_table_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'acl_table_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'acl_table_threshold_type') - assert threshold_type == 'percentage' - - def test_Configure_acl_group(self, dvs, testlog): - - #thresholds acl group low/high threshold/type - dvs.runcmd("crm config thresholds acl group low 50") - dvs.runcmd("crm config thresholds acl group high 90") - dvs.runcmd("crm config thresholds acl group type used") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'acl_group_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'acl_group_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'acl_group_threshold_type') - assert threshold_type == 'used' - - def test_Configure_acl_group_entry(self, dvs, testlog): - - #thresholds acl group entry low/high threshold/type - dvs.runcmd("crm config thresholds acl group entry low 50") - dvs.runcmd("crm config thresholds acl group entry high 90") - dvs.runcmd("crm config thresholds acl group entry type percentage") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'acl_entry_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'acl_entry_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'acl_entry_threshold_type') - assert threshold_type == 'percentage' - - def test_Configure_acl_group_counter(self, dvs, testlog): - - #thresholds acl group counter low/high threshold/type - dvs.runcmd("crm config thresholds acl group counter low 50") - dvs.runcmd("crm config thresholds acl group counter high 90") - dvs.runcmd("crm config thresholds acl group counter type free") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'acl_counter_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'acl_counter_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'acl_counter_threshold_type') - assert threshold_type == 'free' - - def test_Configure_fdb(self, dvs, testlog): - - #thresholds fdb low/high threshold/type - dvs.runcmd("crm config thresholds fdb low 50") - dvs.runcmd("crm config thresholds fdb high 90") - dvs.runcmd("crm config thresholds fdb type percentage") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'fdb_entry_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'fdb_entry_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'fdb_entry_threshold_type') - assert threshold_type == 'percentage' - - def test_Configure_snat(self, dvs, testlog): - - #thresholds snat low/high threshold/type - dvs.runcmd("crm config thresholds snat low 50") - dvs.runcmd("crm config thresholds snat high 90") - dvs.runcmd("crm config thresholds snat type percentage") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'snat_entry_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'snat_entry_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'snat_entry_threshold_type') - assert threshold_type == 'percentage' - - def test_Configure_dnat(self, dvs, testlog): - - #thresholds dnat low/high threshold/type - dvs.runcmd("crm config thresholds dnat low 50") - dvs.runcmd("crm config thresholds dnat high 90") - dvs.runcmd("crm config thresholds dnat type percentage") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'dnat_entry_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'dnat_entry_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'dnat_entry_threshold_type') - assert threshold_type == 'percentage' - - def test_Configure_ipmc(self, dvs, testlog): - - #thresholds ipmc low/high threshold/type - dvs.runcmd("crm config thresholds ipmc low 50") - dvs.runcmd("crm config thresholds ipmc high 90") - dvs.runcmd("crm config thresholds ipmc type percentage") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'ipmc_entry_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'ipmc_entry_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'ipmc_entry_threshold_type') - assert threshold_type == 'percentage' - # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_evpn_fdb.py b/tests/test_evpn_fdb.py index 31d75535c7..3c9a217747 100644 --- a/tests/test_evpn_fdb.py +++ b/tests/test_evpn_fdb.py @@ -51,7 +51,7 @@ def test_evpnFdb(dvs, testlog): helper = VxlanEvpnHelper() dvs.setup_db() - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() time.sleep(2) #Find switch_id @@ -62,7 +62,6 @@ def test_evpnFdb(dvs, testlog): # create vlan print("Creating Vlan3") - #dvs.runcmd("config vlan add 3") dvs.create_vlan("3") time.sleep(2) @@ -79,7 +78,6 @@ def test_evpnFdb(dvs, testlog): vm_before = helper.how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER") print("Making Ethernet0 as a member of Vlan3") - #dvs.runcmd("config vlan member add 3 Ethernet0") dvs.create_vlan_member("3", "Ethernet0") time.sleep(2) diff --git a/tests/test_evpn_fdb_p2mp.py b/tests/test_evpn_fdb_p2mp.py index 7929bc862f..8c1cfbf1d6 100644 --- a/tests/test_evpn_fdb_p2mp.py +++ b/tests/test_evpn_fdb_p2mp.py @@ -54,7 +54,7 @@ def test_evpnFdbP2MP(dvs, testlog): helper = VxlanEvpnHelper() dvs.setup_db() - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() time.sleep(2) #Find switch_id diff --git a/tests/test_fdb.py b/tests/test_fdb.py index 9893a4e3b0..2f9067a599 100644 --- a/tests/test_fdb.py +++ b/tests/test_fdb.py @@ -31,9 +31,10 @@ class TestFdb(object): def test_FdbWarmRestartNotifications(self, dvs, testlog): dvs.setup_db() - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() - dvs.runcmd("crm config polling interval 1") + dvs.crm_poll_set("1") + dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_FDB_ENTRY', '1000') time.sleep(2) @@ -225,8 +226,7 @@ def test_FdbWarmRestartNotifications(self, dvs, testlog): assert ok, str(extra) # enable warm restart - (exitcode, result) = dvs.runcmd("config warm_restart enable swss") - assert exitcode == 0 + dvs.warm_restart_swss("true") # freeze orchagent for warm restart (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check") @@ -317,14 +317,14 @@ def test_FdbWarmRestartNotifications(self, dvs, testlog): finally: # disable warm restart - dvs.runcmd("config warm_restart disable swss") + dvs.warm_restart_swss("false") # slow down crm polling - dvs.runcmd("crm config polling interval 10000") + dvs.crm_poll_set("10000") def test_FdbAddedAfterMemberCreated(self, dvs, testlog): dvs.setup_db() - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() time.sleep(2) # create a FDB entry in Application DB @@ -377,7 +377,7 @@ def test_FdbAddedAfterMemberCreated(self, dvs, testlog): ("SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID", iface_2_bridge_port_id["Ethernet0"])]) assert ok, str(extra) - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() dvs.remove_vlan_member("2", "Ethernet0") dvs.remove_vlan("2") diff --git a/tests/test_fdb_update.py b/tests/test_fdb_update.py index 5daf27804e..128dc3773b 100644 --- a/tests/test_fdb_update.py +++ b/tests/test_fdb_update.py @@ -56,8 +56,7 @@ def get_mac_by_bridge_id(self, dvs, bridge_id): def test_FDBAddedAndUpdated(self, dvs, testlog): dvs.setup_db() - - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() time.sleep(2) # create a FDB entry in Application DB @@ -173,7 +172,7 @@ def test_FDBAddedAndUpdated(self, dvs, testlog): def test_FDBLearnedAndUpdated(self, dvs, testlog): dvs.setup_db() - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() # create vlan; create vlan member dvs.create_vlan("6") @@ -261,12 +260,12 @@ def test_FDBLearnedAndUpdated(self, dvs, testlog): dvs.remove_vlan("6") # clear fdb - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() def test_FDBLearnedAndFlushed(self, dvs, testlog): dvs.setup_db() - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() VLAN = "9" VLAN_NAME = "Vlan9" diff --git a/tests/test_fgnhg.py b/tests/test_fgnhg.py index 2fa8a9d890..645853e24c 100644 --- a/tests/test_fgnhg.py +++ b/tests/test_fgnhg.py @@ -216,7 +216,7 @@ def startup_link(dvs, db, port): db.wait_for_field_match("PORT_TABLE", "Ethernet%d" % (port * 4), {"oper_status": "up"}) def run_warm_reboot(dvs): - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") # Stop swss before modifing the configDB dvs.stop_swss() @@ -280,7 +280,7 @@ def create_interface_n_fg_ecmp_config(dvs, nh_range_start, nh_range_end, fg_nhg_ ip_pref_key = "Ethernet" + str(i*4) + "|10.0.0." + str(i*2) + "/31" create_entry(config_db, IF_TB, if_name_key, fvs_nul) create_entry(config_db, IF_TB, ip_pref_key, fvs_nul) - dvs.runcmd("config interface startup " + if_name_key) + dvs.port_admin_set(if_name_key, "up") shutdown_link(dvs, app_db, i) startup_link(dvs, app_db, i) bank = 1 @@ -300,7 +300,7 @@ def remove_interface_n_fg_ecmp_config(dvs, nh_range_start, nh_range_end, fg_nhg_ ip_pref_key = "Ethernet" + str(i*4) + "|10.0.0." + str(i*2) + "/31" remove_entry(config_db, IF_TB, if_name_key) remove_entry(config_db, IF_TB, ip_pref_key) - dvs.runcmd("config interface shutdown " + if_name_key) + dvs.port_admin_set(if_name_key, "down") shutdown_link(dvs, app_db, i) remove_entry(config_db, FG_NHG_MEMBER, "10.0.0." + str(1 + i*2)) remove_entry(config_db, FG_NHG, fg_nhg_name) @@ -334,7 +334,7 @@ def fine_grained_ecmp_base_test(dvs, match_mode): create_entry(config_db, VLAN_MEMB_TB, vlan_name_key + "|" + if_name_key, fvs) create_entry(config_db, VLAN_IF_TB, vlan_name_key, fvs_nul) create_entry(config_db, VLAN_IF_TB, ip_pref_key, fvs_nul) - dvs.runcmd("config interface startup " + if_name_key) + dvs.port_admin_set(if_name_key, "up") dvs.servers[i].runcmd("ip link set down dev eth0") == 0 dvs.servers[i].runcmd("ip link set up dev eth0") == 0 bank = 0 @@ -619,7 +619,7 @@ def fine_grained_ecmp_base_test(dvs, match_mode): remove_entry(config_db, VLAN_IF_TB, vlan_name_key) remove_entry(config_db, VLAN_MEMB_TB, vlan_name_key + "|" + if_name_key) remove_entry(config_db, VLAN_TB, vlan_name_key) - dvs.runcmd("config interface shutdown " + if_name_key) + dvs.port_admin_set(if_name_key, "down") dvs.servers[i].runcmd("ip link set down dev eth0") == 0 remove_entry(config_db, "FG_NHG_MEMBER", "10.0.0." + str(1 + i*2)) @@ -770,7 +770,7 @@ def test_fgnhg_matchmode_nexthop_multi_route(self, dvs, testlog): ip_pref_key = "Ethernet" + str(i*4) + "|10.0.0." + str(i*2) + "/31" create_entry(config_db, IF_TB, if_name_key, fvs_nul) create_entry(config_db, IF_TB, ip_pref_key, fvs_nul) - dvs.runcmd("config interface startup " + if_name_key) + dvs.port_admin_set(if_name_key, "up") shutdown_link(dvs, app_db, i) startup_link(dvs, app_db, i) dvs.runcmd("arp -s 10.0.0." + str(1 + i*2) + " 00:00:00:00:00:" + str(1 + i*2)) diff --git a/tests/test_inband_intf_mgmt_vrf.py b/tests/test_inband_intf_mgmt_vrf.py index 05aa1f7389..4b1b8c86ed 100644 --- a/tests/test_inband_intf_mgmt_vrf.py +++ b/tests/test_inband_intf_mgmt_vrf.py @@ -14,7 +14,6 @@ def setup_db(self, dvs): def add_mgmt_vrf(self, dvs): initial_entries = set(self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER")) - #dvs.runcmd("config vrf add mgmt") dvs.runcmd("ip link add mgmt type vrf table 5000") dvs.runcmd("ifconfig mgmt up") time.sleep(2) diff --git a/tests/test_macsec.py b/tests/test_macsec.py index 0f945300e3..f74f31c008 100644 --- a/tests/test_macsec.py +++ b/tests/test_macsec.py @@ -699,6 +699,54 @@ def test_macsec_term_orch(self, dvs: conftest.DockerVirtualSwitch, testlog): 1) assert(not inspector.get_macsec_port(macsec_port)) + def test_macsec_attribute_change(self, dvs: conftest.DockerVirtualSwitch, testlog): + port_name = "Ethernet0" + local_mac_address = "00-15-5D-78-FF-C1" + peer_mac_address = "00-15-5D-78-FF-C2" + macsec_port_identifier = 1 + macsec_port = "macsec_eth1" + sak = "0" * 32 + auth_key = "0" * 32 + packet_number = 1 + ssci = 1 + salt = "0" * 24 + + wpa = WPASupplicantMock(dvs) + inspector = MACsecInspector(dvs) + + self.init_macsec( + wpa, + port_name, + local_mac_address, + macsec_port_identifier) + wpa.set_macsec_control(port_name, True) + wpa.config_macsec_port(port_name, {"enable_encrypt": False}) + wpa.config_macsec_port(port_name, {"cipher_suite": "GCM-AES-256"}) + self.establish_macsec( + wpa, + port_name, + local_mac_address, + peer_mac_address, + macsec_port_identifier, + 0, + sak, + packet_number, + auth_key, + ssci, + salt) + macsec_info = inspector.get_macsec_port(macsec_port) + assert("encrypt off" in macsec_info) + assert("GCM-AES-256" in macsec_info) + self.deinit_macsec( + wpa, + inspector, + port_name, + macsec_port, + local_mac_address, + peer_mac_address, + macsec_port_identifier, + 0) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down diff --git a/tests/test_mclag_fdb.py b/tests/test_mclag_fdb.py index 5049859437..a4e5ff0f9d 100644 --- a/tests/test_mclag_fdb.py +++ b/tests/test_mclag_fdb.py @@ -76,7 +76,7 @@ def how_many_entries_exist(db, table): @pytest.mark.dev_sanity def test_mclagFdb_basic_config_add(dvs, testlog): dvs.setup_db() - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() time.sleep(2) vlan_before = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN") diff --git a/tests/test_mux.py b/tests/test_mux.py index e9eb027a9d..20ac1832c1 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -61,8 +61,8 @@ def create_vlan_interface(self, confdb, asicdb, dvs): confdb.create_entry("VLAN_INTERFACE", "Vlan1000|192.168.0.1/24", fvs) confdb.create_entry("VLAN_INTERFACE", "Vlan1000|fc02:1000::1/64", fvs) - dvs.runcmd("config interface startup Ethernet0") - dvs.runcmd("config interface startup Ethernet4") + dvs.port_admin_set("Ethernet0", "up") + dvs.port_admin_set("Ethernet4", "up") def create_mux_cable(self, confdb): diff --git a/tests/test_nat.py b/tests/test_nat.py index 3c4a5ddce3..9e87b5f54c 100644 --- a/tests/test_nat.py +++ b/tests/test_nat.py @@ -1,4 +1,5 @@ import time +import pytest from dvslib.dvs_common import wait_for_result @@ -175,6 +176,7 @@ def test_DelNaPtStaticEntry(self, dvs, testlog): #check the entry is not there in asic db self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 0) + @pytest.mark.skip(reason="Failing. Under investigation") def test_AddTwiceNatEntry(self, dvs, testlog): # initialize self.setup_db(dvs) diff --git a/tests/test_nhg.py b/tests/test_nhg.py index df071b4d17..94d581b47c 100644 --- a/tests/test_nhg.py +++ b/tests/test_nhg.py @@ -135,7 +135,7 @@ def config_intf(self, i): self.config_db.create_entry("INTERFACE", self.port_name(i), fvs) self.config_db.create_entry("INTERFACE", "{}|{}".format(self.port_name(i), self.port_ipprefix(i)), fvs) - self.dvs.runcmd("config interface startup " + self.port_name(i)) + self.dvs.port_admin_set(self.port_name(i), "up") self.dvs.runcmd("arp -s {} {}".format(self.peer_ip(i), self.port_mac(i))) assert self.dvs.servers[i].runcmd("ip link set down dev eth0") == 0 assert self.dvs.servers[i].runcmd("ip link set up dev eth0") == 0 @@ -154,11 +154,13 @@ def init_test(self, dvs, num_intfs): self.app_db = self.dvs.get_app_db() self.asic_db = self.dvs.get_asic_db() self.config_db = self.dvs.get_config_db() + self.state_db = self.dvs.get_state_db() self.nhg_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_NEXTHOP_GROUP_TABLE_NAME) self.rt_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_ROUTE_TABLE_NAME) self.lr_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_LABEL_ROUTE_TABLE_NAME) self.cbf_nhg_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_CLASS_BASED_NEXT_HOP_GROUP_TABLE_NAME) self.fc_to_nhg_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_FC_TO_NHG_INDEX_MAP_TABLE_NAME) + self.switch_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_SWITCH_TABLE_NAME) # Set switch FC capability to 63 self.dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_MAX_NUMBER_OF_FORWARDING_CLASSES', '63') @@ -182,6 +184,16 @@ def route_exists(self, rt_prefix): def nhg_map_exists(self, nhg_map_index): return self.get_nhg_map_id(nhg_map_index) is not None + def enable_ordered_ecmp(self): + switch_fvs = swsscommon.FieldValuePairs([('ordered_ecmp', 'true')]) + self.switch_ps.set('switch', switch_fvs) + self.state_db.wait_for_field_match("SWITCH_CAPABILITY", "switch", {"ORDERED_ECMP_CAPABLE": "true"}) + + def disble_ordered_ecmp(self): + switch_fvs = swsscommon.FieldValuePairs([('ordered_ecmp', 'false')]) + self.switch_ps.set('switch', switch_fvs) + self.state_db.wait_for_field_match("SWITCH_CAPABILITY", "switch", {"ORDERED_ECMP_CAPABLE": "false"}) + class TestNhgExhaustBase(TestNextHopGroupBase): MAX_ECMP_COUNT = 512 MAX_PORT_COUNT = 10 @@ -887,8 +899,13 @@ def test_cbf_nhg_exhaust(self, dvs, testlog): class TestNextHopGroup(TestNextHopGroupBase): - def test_route_nhg(self, dvs, dvs_route, testlog): + @pytest.mark.parametrize('ordered_ecmp', ['false', 'true']) + def test_route_nhg(self, ordered_ecmp, dvs, dvs_route, testlog): self.init_test(dvs, 3) + nhip_seqid_map = {"10.0.0.1" : "1", "10.0.0.3" : "2" , "10.0.0.5" : "3" } + + if ordered_ecmp == 'true': + self.enable_ordered_ecmp() rtprefix = "2.2.2.0/24" @@ -911,6 +928,11 @@ def test_route_nhg(self, dvs, dvs_route, testlog): assert bool(fvs) + if ordered_ecmp == 'true': + assert fvs["SAI_NEXT_HOP_GROUP_ATTR_TYPE"] == "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP" + else: + assert fvs["SAI_NEXT_HOP_GROUP_ATTR_TYPE"] == "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP" + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) assert len(keys) == 3 @@ -923,6 +945,13 @@ def test_route_nhg(self, dvs, dvs_route, testlog): # verify weight attributes not in asic db assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT") is None + if ordered_ecmp == "true": + nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + nh_fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) + assert nhip_seqid_map[nh_fvs["SAI_NEXT_HOP_ATTR_IP"]] == fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + # Remove route 2.2.2.0/24 self.rt_ps._del(rtprefix) @@ -930,8 +959,9 @@ def test_route_nhg(self, dvs, dvs_route, testlog): dvs_route.check_asicdb_deleted_route_entries([rtprefix]) # Negative test with nexthops with incomplete weight info - fvs = swsscommon.FieldValuePairs([("nexthop","10.0.0.1,10.0.0.3,10.0.0.5"), - ("ifname", "Ethernet0,Ethernet4,Ethernet8"), + # To validate Order ECMP change the nexthop order + fvs = swsscommon.FieldValuePairs([("nexthop","10.0.0.5,10.0.0.1,10.0.0.3"), + ("ifname", "Ethernet8,Ethernet0,Ethernet4"), ("weight", "10,30")]) self.rt_ps.set(rtprefix, fvs) @@ -939,25 +969,33 @@ def test_route_nhg(self, dvs, dvs_route, testlog): rtkeys = dvs_route.check_asicdb_route_entries([rtprefix]) # assert the route points to next hop group - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", rtkeys[0]) + fvs = self.asic_db.get_entry(self.ASIC_RT_STR, rtkeys[0]) nhgid = fvs["SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID"] - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP", nhgid) + fvs = self.asic_db.get_entry(self.ASIC_NHG_STR, nhgid) assert bool(fvs) - keys = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) assert len(keys) == 3 for k in keys: - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", k) + fvs = self.asic_db.get_entry(self.ASIC_NHGM_STR, k) assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhgid # verify weight attributes not in asic db assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT") is None + + if ordered_ecmp == "true": + nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + nh_fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) + assert nhip_seqid_map[nh_fvs["SAI_NEXT_HOP_ATTR_IP"]] == fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + # Remove route 2.2.2.0/24 self.rt_ps._del(rtprefix) @@ -974,20 +1012,20 @@ def test_route_nhg(self, dvs, dvs_route, testlog): rtkeys = dvs_route.check_asicdb_route_entries([rtprefix]) # assert the route points to next hop group - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", rtkeys[0]) + fvs = self.asic_db.get_entry(self.ASIC_RT_STR, rtkeys[0]) nhgid = fvs["SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID"] - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP", nhgid) + fvs = self.asic_db.get_entry(self.ASIC_NHG_STR, nhgid) assert bool(fvs) - keys = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) assert len(keys) == 3 for k in keys: - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", k) + fvs = self.asic_db.get_entry(self.ASIC_NHGM_STR, k) assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhgid @@ -995,6 +1033,13 @@ def test_route_nhg(self, dvs, dvs_route, testlog): nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] weight = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT"] + if ordered_ecmp == "true": + nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + nh_fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) + assert nhip_seqid_map[nh_fvs["SAI_NEXT_HOP_ATTR_IP"]] == fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) nhip = fvs["SAI_NEXT_HOP_ATTR_IP"].split('.') expected_weight = int(nhip[3]) * 10 @@ -1011,11 +1056,11 @@ def test_route_nhg(self, dvs, dvs_route, testlog): # wait for route to be programmed time.sleep(1) - keys = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP") + keys = self.asic_db.get_keys(self.ASIC_NHG_STR) assert len(keys) == 2 - keys = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) assert len(keys) == 6 @@ -1035,7 +1080,8 @@ def test_route_nhg(self, dvs, dvs_route, testlog): assert len(keys) == 2 - i # bring links up one-by-one - for i in [0, 1, 2]: + # Bring link up in random order to verify sequence id is as per order + for i, val in enumerate([2,1,0]): self.flap_intf(i, 'up') keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) @@ -1045,13 +1091,23 @@ def test_route_nhg(self, dvs, dvs_route, testlog): for k in keys: fvs = self.asic_db.get_entry(self.ASIC_NHGM_STR, k) assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhgid - + if ordered_ecmp == "true": + nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + nh_fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) + assert nhip_seqid_map[nh_fvs["SAI_NEXT_HOP_ATTR_IP"]] == fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + # Remove route 2.2.2.0/24 self.rt_ps._del(rtprefix) # Wait for route 2.2.2.0/24 to be removed dvs_route.check_asicdb_deleted_route_entries([rtprefix]) + # Cleanup by disabling to get default behaviour + if ordered_ecmp == 'true': + self.disble_ordered_ecmp() + def test_label_route_nhg(self, dvs, testlog): self.init_test(dvs, 3) @@ -1875,6 +1931,9 @@ def create_cbf_invalid_nhg_map_test(): time.sleep(1) assert(not self.nhg_exists('cbfgroup3')) + # Cleanup + self.cbf_nhg_ps._del('cbfgroup3') + self.init_test(dvs, 4) mainline_cbf_nhg_test() @@ -1980,7 +2039,7 @@ def data_validation_test(): # Test validation errors nhg_maps = [ ('-1', '0'), # negative FC - ('64', '0'), # greater than max FC value + ('63', '0'), # greater than max FC value ('a', '0'), # non-integer FC ('0', '-1'), # negative NH index ('0', 'a'), # non-integer NH index @@ -2151,6 +2210,42 @@ def create_cbf_nhg_inexistent_map_test(): self.fc_to_nhg_ps._del(nhg_maps.pop()) self.asic_db.wait_for_n_keys(self.ASIC_NHG_MAP_STR, self.asic_nhg_maps_count) + # Test scenario: + # - Create a CBF NHG that has a member which is not yet synced. It shouldn't be synced. + # - Add the missing member and assert the CBF NHG is now synced. + def test_cbf_sync_before_member(self, dvs, testlog): + self.init_test(dvs, 2) + + # Create an FC to NH index selection map + nhg_map = [(str(i), '0' if i < 4 else '1') for i in range(8)] + fvs = swsscommon.FieldValuePairs(nhg_map) + self.fc_to_nhg_ps.set('cbfnhgmap1', fvs) + self.asic_db.wait_for_n_keys(self.ASIC_NHG_MAP_STR, self.asic_nhg_maps_count + 1) + + # Create a non-CBF NHG + fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1,10.0.0.3'), + ('ifname', 'Ethernet0,Ethernet4')]) + self.nhg_ps.set('group1', fvs) + self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 1) + + # Create a CBF NHG with a member that doesn't currently exist. Nothing should happen + fvs = swsscommon.FieldValuePairs([('members', 'group1,group2'), + ('selection_map', 'cbfnhgmap1')]) + self.cbf_nhg_ps.set('cbfgroup1', fvs) + time.sleep(1) + assert(len(self.asic_db.get_keys(self.ASIC_NHG_STR)) == self.asic_nhgs_count + 1) + + # Create the missing non-CBF NHG. This and the CBF NHG should be created. + fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1,10.0.0.3'), + ("ifname", "Ethernet0,Ethernet4")]) + self.nhg_ps.set("group2", fvs) + self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 3) + + # Cleanup + self.nhg_ps._del('cbfgroup1') + self.nhg_ps._del('group1') + self.nhg_ps._del('group2') + self.nhg_ps._del('cbfnhgmap1') # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_nvgre_tunnel.py b/tests/test_nvgre_tunnel.py new file mode 100644 index 0000000000..90fe560141 --- /dev/null +++ b/tests/test_nvgre_tunnel.py @@ -0,0 +1,381 @@ +import time +import json +import random +import time +import pytest + + +from swsscommon import swsscommon +from pprint import pprint + + +NVGRE_TUNNEL = 'NVGRE_TUNNEL' +NVGRE_TUNNEL_MAP = 'NVGRE_TUNNEL_MAP' + + +SAI_OBJECT_TYPE_TUNNEL = 'ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL' +SAI_OBJECT_TYPE_TUNNEL_MAP = 'ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP' +SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY = 'ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY' + + +def create_entry(tbl, key, pairs): + fvs = swsscommon.FieldValuePairs(pairs) + tbl.set(key, fvs) + time.sleep(1) + + +def create_entry_tbl(db, table, separator, key, pairs): + tbl = swsscommon.Table(db, table) + create_entry(tbl, key, pairs) + + +def delete_entry_tbl(db, table, key): + tbl = swsscommon.Table(db, table) + tbl._del(key) + time.sleep(1) + + +def get_all_created_entries(db, table, existed_entries): + tbl = swsscommon.Table(db, table) + entries = set(tbl.getKeys()) + new_entries = list(entries - existed_entries) + assert len(new_entries) >= 0, "DB entries was't created" + new_entries.sort() + return new_entries + + +def get_created_entries(db, table, existed_entries, count): + new_entries = get_all_created_entries(db, table, existed_entries) + assert len(new_entries) == count, "Wrong number of created entries." + return new_entries + + +def get_exist_entries(dvs, table): + db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(db, table) + return set(tbl.getKeys()) + + +def get_created_entry(db, table, existed_entries): + tbl = swsscommon.Table(db, table) + entries = set(tbl.getKeys()) + new_entries = list(entries - existed_entries) + assert len(new_entries) == 1, "Wrong number of created entries." + return new_entries[0] + + +def how_many_entries_exist(db, table): + tbl = swsscommon.Table(db, table) + return len(tbl.getKeys()) + + +def get_lo(dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE') + + entries = tbl.getKeys() + lo_id = None + for entry in entries: + status, fvs = tbl.get(entry) + assert status, "Got an error when get a key" + for key, value in fvs: + if key == 'SAI_ROUTER_INTERFACE_ATTR_TYPE' and value == 'SAI_ROUTER_INTERFACE_TYPE_LOOPBACK': + lo_id = entry + break + else: + assert False, 'Don\'t found loopback id' + + return lo_id + + +def check_object(db, table, key, expected_attributes): + tbl = swsscommon.Table(db, table) + keys = tbl.getKeys() + assert key in keys, "The desired key is not presented" + + status, fvs = tbl.get(key) + assert status, "Got an error when get a key" + + assert len(fvs) == len(expected_attributes), "Unexpected number of attributes" + + attr_keys = {entry[0] for entry in fvs} + + for name, value in fvs: + assert expected_attributes[name] == value, "Wrong value %s for the attribute %s = %s" % \ + (value, name, expected_attributes[name]) + + +loopback_id = 0 + + +class NvgreTunnel(object): + tunnel_ids = set() + tunnel_map_ids = set() + tunnel_map_entry_ids = set() + tunnel_map_map = {} + tunnel = {} + + + def fetch_exist_entries(self, dvs): + self.tunnel_ids = get_exist_entries(dvs, SAI_OBJECT_TYPE_TUNNEL) + self.tunnel_map_ids = get_exist_entries(dvs, SAI_OBJECT_TYPE_TUNNEL_MAP) + self.tunnel_map_entry_ids = get_exist_entries(dvs, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY) + + global loopback_id + if not loopback_id: + loopback_id = get_lo(dvs) + + + def create_nvgre_tunnel(self, dvs, tunnel_name, src_ip): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + create_entry_tbl(conf_db, NVGRE_TUNNEL, '|', tunnel_name, [ ('src_ip', src_ip) ]) + time.sleep(1) + + + def check_nvgre_tunnel(self, dvs, tunnel_name, src_ip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + global loopback_id + + tunnel_id = get_created_entry(asic_db, SAI_OBJECT_TYPE_TUNNEL, self.tunnel_ids) + tunnel_map_ids = get_created_entries(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, self.tunnel_map_ids, 4) + + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP) == (len(self.tunnel_map_ids) + 4), "The TUNNEL_MAP wasn't created" + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY) == len(self.tunnel_map_entry_ids), "The TUNNEL_MAP_ENTRY is created too early" + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL) == (len(self.tunnel_ids) + 1), "The TUNNEL wasn't created" + + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, tunnel_map_ids[0], { 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VLAN_ID_TO_VSID' }) + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, tunnel_map_ids[1], { 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_BRIDGE_IF_TO_VSID' }) + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, tunnel_map_ids[2], { 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VSID_TO_VLAN_ID' }) + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, tunnel_map_ids[3], { 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VSID_TO_BRIDGE_IF' }) + + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL, tunnel_id, + { + 'SAI_TUNNEL_ATTR_TYPE': 'SAI_TUNNEL_TYPE_NVGRE', + 'SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE': loopback_id, + 'SAI_TUNNEL_ATTR_DECAP_MAPPERS': f'2:{tunnel_map_ids[2]},{tunnel_map_ids[3]}', + 'SAI_TUNNEL_ATTR_ENCAP_MAPPERS': f'2:{tunnel_map_ids[0]},{tunnel_map_ids[1]}', + 'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip + } + ) + + self.tunnel_map_ids.update(tunnel_map_ids) + self.tunnel_ids.add(tunnel_id) + self.tunnel_map_map[tunnel_name] = tunnel_map_ids + self.tunnel[tunnel_name] = tunnel_id + + + def check_invalid_nvgre_tunnel(self, dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL) == len(self.tunnel_ids), "Invalid TUNNEL was created" + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP) == len(self.tunnel_map_ids), "Invalid TUNNEL_MAP was created" + + + def remove_nvgre_tunnel(self, dvs, tunnel_name): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + delete_entry_tbl(conf_db, NVGRE_TUNNEL, tunnel_name) + time.sleep(1) + + + def check_remove_nvgre_tunnel(self, dvs, tunnel_name): + self.fetch_exist_entries(dvs) + self.tunnel.pop(tunnel_name, None) + self.tunnel_map_map.pop(tunnel_name, None) + + + def create_nvgre_tunnel_map_entry(self, dvs, tunnel_name, tunnel_map_entry_name, vlan_id, vsid): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + create_entry_tbl( + conf_db, + NVGRE_TUNNEL_MAP, '|', f'{tunnel_name}|{tunnel_map_entry_name}', + [ + ('vsid', vsid), + ('vlan_id', f'Vlan{vlan_id}'), + ], + ) + time.sleep(1) + + + def check_nvgre_tunnel_map_entry(self, dvs, tunnel_name, vlan_id, vsid): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + if (self.tunnel_map_map.get(tunnel_name) is None): + tunnel_map_ids = get_created_entries(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, self.tunnel_map_ids, 4) + else: + tunnel_map_ids = self.tunnel_map_map[tunnel_name] + + tunnel_map_entry_id = get_created_entries(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY, self.tunnel_map_entry_ids, 1) + + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + 1), "The TUNNEL_MAP_ENTRY is created too early" + + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[0], + { + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VSID_TO_VLAN_ID', + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_ids[2], + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VSID_ID_KEY': vsid, + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VLAN_ID_VALUE': vlan_id, + } + ) + + self.tunnel_map_entry_ids.update(tunnel_map_entry_id) + + + def check_invalid_nvgre_tunnel_map_entry(self, dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY) == len(self.tunnel_map_entry_ids), "Invalid TUNNEL_MAP_ENTRY was created" + + + def remove_nvgre_tunnel_map_entry(self, dvs, tunnel_name, tunnel_map_entry_name): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + delete_entry_tbl(conf_db, NVGRE_TUNNEL_MAP, f'{tunnel_name}|{tunnel_map_entry_name}') + time.sleep(1) + + + def check_remove_nvgre_tunnel_map_entry(self, dvs): + self.fetch_exist_entries(dvs) + + +@pytest.mark.usefixtures('dvs_vlan_manager') +class TestNvgreTunnel(object): + + def get_nvgre_tunnel_obj(self): + return NvgreTunnel() + + + def test_nvgre_create_tunnel_map_entry(self, dvs, testlog): + try: + tunnel_name = 'tunnel_1' + tunnel_map_entry_name = 'entry_1' + src_ip = '10.0.0.1' + vlan_id = '500' + vsid = '850' + + nvgre_obj = self.get_nvgre_tunnel_obj() + nvgre_obj.fetch_exist_entries(dvs) + + self.dvs_vlan.create_vlan(vlan_id) + + nvgre_obj.create_nvgre_tunnel(dvs, tunnel_name, src_ip) + nvgre_obj.check_nvgre_tunnel(dvs, tunnel_name, src_ip) + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name, tunnel_map_entry_name, vlan_id, vsid) + nvgre_obj.check_nvgre_tunnel_map_entry(dvs, tunnel_name, vlan_id, vsid) + finally: + nvgre_obj.remove_nvgre_tunnel_map_entry(dvs, tunnel_name, tunnel_map_entry_name) + nvgre_obj.check_remove_nvgre_tunnel_map_entry(dvs) + + nvgre_obj.remove_nvgre_tunnel(dvs, tunnel_name) + nvgre_obj.check_remove_nvgre_tunnel(dvs, tunnel_name) + + self.dvs_vlan.remove_vlan(vlan_id) + + + def test_multiple_nvgre_tunnels_entries(self, dvs, testlog): + try: + tunnel_name_1 = 'tunnel_1' + tunnel_name_2 = 'tunnel_2' + tunnel_name_3 = 'tunnel_3' + entry_1 = 'entry_1' + entry_2 = 'entry_2' + entry_3 = 'entry_3' + entry_4 = 'entry_4' + + nvgre_obj = self.get_nvgre_tunnel_obj() + nvgre_obj.fetch_exist_entries(dvs) + + self.dvs_vlan.create_vlan('501') + self.dvs_vlan.create_vlan('502') + self.dvs_vlan.create_vlan('503') + self.dvs_vlan.create_vlan('504') + + nvgre_obj.create_nvgre_tunnel(dvs, tunnel_name_1, '10.0.0.1') + nvgre_obj.check_nvgre_tunnel(dvs, tunnel_name_1, '10.0.0.1') + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name_1, entry_1, '501', '801') + nvgre_obj.check_nvgre_tunnel_map_entry(dvs, tunnel_name_1, '501', '801') + + nvgre_obj.create_nvgre_tunnel(dvs, tunnel_name_2, '10.0.0.2') + nvgre_obj.check_nvgre_tunnel(dvs, tunnel_name_2, '10.0.0.2') + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name_2, entry_2, '502', '802') + nvgre_obj.check_nvgre_tunnel_map_entry(dvs, tunnel_name_2, '502', '802') + + nvgre_obj.create_nvgre_tunnel(dvs, tunnel_name_3, '10.0.0.3') + nvgre_obj.check_nvgre_tunnel(dvs, tunnel_name_3, '10.0.0.3') + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name_3, entry_3, '503', '803') + nvgre_obj.check_nvgre_tunnel_map_entry(dvs, tunnel_name_3, '503', '803') + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name_3, entry_4, '504', '804') + nvgre_obj.check_nvgre_tunnel_map_entry(dvs, tunnel_name_3, '504', '804') + finally: + nvgre_obj.remove_nvgre_tunnel_map_entry(dvs, tunnel_name_1, entry_1) + nvgre_obj.check_remove_nvgre_tunnel_map_entry(dvs) + + nvgre_obj.remove_nvgre_tunnel(dvs, tunnel_name_1) + nvgre_obj.check_remove_nvgre_tunnel(dvs, tunnel_name_1) + + nvgre_obj.remove_nvgre_tunnel_map_entry(dvs, tunnel_name_2, entry_2) + nvgre_obj.check_remove_nvgre_tunnel_map_entry(dvs) + + nvgre_obj.remove_nvgre_tunnel(dvs, tunnel_name_2) + nvgre_obj.check_remove_nvgre_tunnel(dvs, tunnel_name_2) + + nvgre_obj.remove_nvgre_tunnel_map_entry(dvs, tunnel_name_3, entry_3) + nvgre_obj.check_remove_nvgre_tunnel_map_entry(dvs) + + nvgre_obj.remove_nvgre_tunnel_map_entry(dvs, tunnel_name_3, entry_4) + nvgre_obj.check_remove_nvgre_tunnel_map_entry(dvs) + + nvgre_obj.remove_nvgre_tunnel(dvs, tunnel_name_3) + nvgre_obj.check_remove_nvgre_tunnel(dvs, tunnel_name_3) + + self.dvs_vlan.remove_vlan('501') + self.dvs_vlan.remove_vlan('502') + self.dvs_vlan.remove_vlan('503') + self.dvs_vlan.remove_vlan('504') + + + def test_invalid_nvgre_tunnel(self, dvs, testlog): + nvgre_obj = self.get_nvgre_tunnel_obj() + nvgre_obj.fetch_exist_entries(dvs) + + nvgre_obj.create_nvgre_tunnel(dvs, 'tunnel_1', '1111.1111.1111.1111') + nvgre_obj.check_invalid_nvgre_tunnel(dvs) + + + def test_invalid_nvgre_tunnel_map_entry(self, dvs, testlog): + try: + tunnel_name = 'tunnel_1' + tunnel_map_entry_name = 'entry_1' + src_ip = '10.0.0.1' + vlan_id = '500' + vsid = 'INVALID' + + nvgre_obj = self.get_nvgre_tunnel_obj() + nvgre_obj.fetch_exist_entries(dvs) + + self.dvs_vlan.create_vlan(vlan_id) + + nvgre_obj.create_nvgre_tunnel(dvs, tunnel_name, src_ip) + nvgre_obj.check_nvgre_tunnel(dvs, tunnel_name, src_ip) + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name, tunnel_map_entry_name, vlan_id, vsid) + nvgre_obj.check_invalid_nvgre_tunnel_map_entry(dvs) + finally: + nvgre_obj.remove_nvgre_tunnel(dvs, tunnel_name) + nvgre_obj.check_remove_nvgre_tunnel(dvs, tunnel_name) + + self.dvs_vlan.remove_vlan(vlan_id) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_pbh.py b/tests/test_pbh.py index 328e8231bc..65155ba2e9 100644 --- a/tests/test_pbh.py +++ b/tests/test_pbh.py @@ -253,6 +253,111 @@ def test_PbhRuleCreationDeletion(self, testlog): self.dvs_pbh.verify_pbh_hash_field_count(0) +class TestPbhBasicEditFlows: + def test_PbhRuleUpdate(self, testlog): + try: + # PBH hash field + pbhlogger.info("Create PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) + self.dvs_pbh.create_pbh_hash_field( + hash_field_name=PBH_HASH_FIELD_NAME, + hash_field=PBH_HASH_FIELD_HASH_FIELD, + sequence_id=PBH_HASH_FIELD_SEQUENCE_ID + ) + self.dvs_pbh.verify_pbh_hash_field_count(1) + + # PBH hash + pbhlogger.info("Create PBH hash: {}".format(PBH_HASH_NAME)) + self.dvs_pbh.create_pbh_hash( + hash_name=PBH_HASH_NAME, + hash_field_list=PBH_HASH_HASH_FIELD_LIST + ) + self.dvs_pbh.verify_pbh_hash_count(1) + + # PBH table + pbhlogger.info("Create PBH table: {}".format(PBH_TABLE_NAME)) + self.dvs_pbh.create_pbh_table( + table_name=PBH_TABLE_NAME, + interface_list=PBH_TABLE_INTERFACE_LIST, + description=PBH_TABLE_DESCRIPTION + ) + self.dvs_acl.verify_acl_table_count(1) + + # PBH rule + attr_dict = { + "ether_type": PBH_RULE_ETHER_TYPE, + "ip_protocol": PBH_RULE_IP_PROTOCOL, + "gre_key": PBH_RULE_GRE_KEY, + "inner_ether_type": PBH_RULE_INNER_ETHER_TYPE + } + + pbhlogger.info("Create PBH rule: {}".format(PBH_RULE_NAME)) + self.dvs_pbh.create_pbh_rule( + table_name=PBH_TABLE_NAME, + rule_name=PBH_RULE_NAME, + priority=PBH_RULE_PRIORITY, + qualifiers=attr_dict, + hash_name=PBH_RULE_HASH + ) + self.dvs_acl.verify_acl_rule_count(1) + + attr_dict = { + "ether_type": "0x86dd", + "ipv6_next_header": "0x2f", + "inner_ether_type": "0x0800" + } + + pbhlogger.info("Update PBH rule: {}".format(PBH_RULE_NAME)) + self.dvs_pbh.update_pbh_rule( + table_name=PBH_TABLE_NAME, + rule_name=PBH_RULE_NAME, + priority="100", + qualifiers=attr_dict, + hash_name=PBH_RULE_HASH, + packet_action="SET_LAG_HASH", + flow_counter="ENABLED" + ) + + hash_id = self.dvs_pbh.get_pbh_hash_ids(1)[0] + counter_id = self.dvs_acl.get_acl_counter_ids(1)[0] + + sai_attr_dict = { + "SAI_ACL_ENTRY_ATTR_PRIORITY": self.dvs_acl.get_simple_qualifier_comparator("100"), + "SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE": self.dvs_acl.get_simple_qualifier_comparator("34525&mask:0xffff"), + "SAI_ACL_ENTRY_ATTR_FIELD_IP_PROTOCOL": self.dvs_acl.get_simple_qualifier_comparator("disabled"), + "SAI_ACL_ENTRY_ATTR_FIELD_IPV6_NEXT_HEADER": self.dvs_acl.get_simple_qualifier_comparator("47&mask:0xff"), + "SAI_ACL_ENTRY_ATTR_FIELD_GRE_KEY": self.dvs_acl.get_simple_qualifier_comparator("disabled"), + "SAI_ACL_ENTRY_ATTR_FIELD_INNER_ETHER_TYPE": self.dvs_acl.get_simple_qualifier_comparator("2048&mask:0xffff"), + "SAI_ACL_ENTRY_ATTR_ACTION_SET_ECMP_HASH_ID": self.dvs_acl.get_simple_qualifier_comparator("disabled"), + "SAI_ACL_ENTRY_ATTR_ACTION_SET_LAG_HASH_ID": self.dvs_acl.get_simple_qualifier_comparator(hash_id), + "SAI_ACL_ENTRY_ATTR_ACTION_COUNTER": self.dvs_acl.get_simple_qualifier_comparator(counter_id) + } + + self.dvs_acl.verify_acl_rule_generic( + sai_qualifiers=sai_attr_dict + ) + + finally: + # PBH rule + pbhlogger.info("Remove PBH rule: {}".format(PBH_RULE_NAME)) + self.dvs_pbh.remove_pbh_rule(PBH_TABLE_NAME, PBH_RULE_NAME) + self.dvs_acl.verify_acl_rule_count(0) + + # PBH table + pbhlogger.info("Remove PBH table: {}".format(PBH_TABLE_NAME)) + self.dvs_pbh.remove_pbh_table(PBH_TABLE_NAME) + self.dvs_acl.verify_acl_table_count(0) + + # PBH hash + pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) + self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) + self.dvs_pbh.verify_pbh_hash_count(0) + + # PBH hash field + pbhlogger.info("Remove PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) + self.dvs_pbh.remove_pbh_hash_field(PBH_HASH_FIELD_NAME) + self.dvs_pbh.verify_pbh_hash_field_count(0) + + @pytest.mark.usefixtures("dvs_lag_manager") class TestPbhExtendedFlows: class PbhRefCountHelper(object): diff --git a/tests/test_pfcwd.py b/tests/test_pfcwd.py index c569bc8a43..2707588580 100644 --- a/tests/test_pfcwd.py +++ b/tests/test_pfcwd.py @@ -77,6 +77,222 @@ def test_PfcWdAclCreationDeletion(self, dvs, dvs_acl, testlog): finally: dvs_acl.remove_acl_table(PFCWD_TABLE_NAME) + + +class TestPfcwdFunc(object): + @pytest.fixture + def setup_teardown_test(self, dvs): + self.get_db_handle(dvs) + + self.test_ports = ["Ethernet0"] + + self.setup_test(dvs) + self.get_port_oids() + self.get_queue_oids() + + yield + + self.teardown_test(dvs) + + def setup_test(self, dvs): + # get original cable len for test ports + fvs = self.config_db.get_entry("CABLE_LENGTH", "AZURE") + self.orig_cable_len = dict() + for port in self.test_ports: + self.orig_cable_len[port] = fvs[port] + # set cable len to non zero value. if port is down, default cable len is 0 + self.set_cable_len(port, "5m") + # startup port + dvs.port_admin_set(port, "up") + + # enable pfcwd + self.set_flex_counter_status("PFCWD", "enable") + # enable queue so that queue oids are generated + self.set_flex_counter_status("QUEUE", "enable") + + def teardown_test(self, dvs): + # disable pfcwd + self.set_flex_counter_status("PFCWD", "disable") + # disable queue + self.set_flex_counter_status("QUEUE", "disable") + + for port in self.test_ports: + if self.orig_cable_len: + self.set_cable_len(port, self.orig_cable_len[port]) + # shutdown port + dvs.port_admin_set(port, "down") + + def get_db_handle(self, dvs): + self.app_db = dvs.get_app_db() + self.asic_db = dvs.get_asic_db() + self.config_db = dvs.get_config_db() + self.counters_db = dvs.get_counters_db() + + def set_flex_counter_status(self, key, state): + fvs = {'FLEX_COUNTER_STATUS': state} + self.config_db.update_entry("FLEX_COUNTER_TABLE", key, fvs) + time.sleep(1) + + def get_queue_oids(self): + self.queue_oids = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") + + def get_port_oids(self): + self.port_oids = self.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "") + + def _get_bitmask(self, queues): + mask = 0 + if queues is not None: + for queue in queues: + mask = mask | 1 << queue + + return str(mask) + + def set_ports_pfc(self, status='enable', pfc_queues=[3,4]): + for port in self.test_ports: + if 'enable' in status: + fvs = {'pfc_enable': ",".join([str(q) for q in pfc_queues])} + self.config_db.create_entry("PORT_QOS_MAP", port, fvs) + else: + self.config_db.delete_entry("PORT_QOS_MAP", port) + + def set_cable_len(self, port_name, cable_len): + fvs = {port_name: cable_len} + self.config_db.update_entry("CABLE_LEN", "AZURE", fvs) + + def start_pfcwd_on_ports(self, poll_interval="200", detection_time="200", restoration_time="200", action="drop"): + pfcwd_info = {"POLL_INTERVAL": poll_interval} + self.config_db.update_entry("PFC_WD", "GLOBAL", pfcwd_info) + + pfcwd_info = {"action": action, + "detection_time" : detection_time, + "restoration_time": restoration_time + } + for port in self.test_ports: + self.config_db.update_entry("PFC_WD", port, pfcwd_info) + + def stop_pfcwd_on_ports(self): + for port in self.test_ports: + self.config_db.delete_entry("PFC_WD", port) + + def verify_ports_pfc(self, queues=None): + mask = self._get_bitmask(queues) + fvs = {"SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL" : mask} + for port in self.test_ports: + self.asic_db.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", self.port_oids[port], fvs) + + def verify_pfcwd_state(self, queues, state="stormed"): + fvs = {"PFC_WD_STATUS": state} + for port in self.test_ports: + for queue in queues: + queue_name = port + ":" + str(queue) + self.counters_db.wait_for_field_match("COUNTERS", self.queue_oids[queue_name], fvs) + + def verify_pfcwd_counters(self, queues, restore="0"): + fvs = {"PFC_WD_QUEUE_STATS_DEADLOCK_DETECTED" : "1", + "PFC_WD_QUEUE_STATS_DEADLOCK_RESTORED" : restore + } + for port in self.test_ports: + for queue in queues: + queue_name = port + ":" + str(queue) + self.counters_db.wait_for_field_match("COUNTERS", self.queue_oids[queue_name], fvs) + + def reset_pfcwd_counters(self, queues): + fvs = {"PFC_WD_QUEUE_STATS_DEADLOCK_DETECTED" : "0", + "PFC_WD_QUEUE_STATS_DEADLOCK_RESTORED" : "0" + } + for port in self.test_ports: + for queue in queues: + queue_name = port + ":" + str(queue) + self.counters_db.update_entry("COUNTERS", self.queue_oids[queue_name], fvs) + + def set_storm_state(self, queues, state="enabled"): + fvs = {"DEBUG_STORM": state} + for port in self.test_ports: + for queue in queues: + queue_name = port + ":" + str(queue) + self.counters_db.update_entry("COUNTERS", self.queue_oids[queue_name], fvs) + + def test_pfcwd_single_queue(self, dvs, setup_teardown_test): + try: + # enable PFC on queues + test_queues = [3, 4] + self.set_ports_pfc(pfc_queues=test_queues) + + # verify in asic db + self.verify_ports_pfc(test_queues) + + # start pfcwd + self.start_pfcwd_on_ports() + + # start pfc storm + storm_queue = [3] + self.set_storm_state(storm_queue) + + # verify pfcwd is triggered + self.verify_pfcwd_state(storm_queue) + + # verify pfcwd counters + self.verify_pfcwd_counters(storm_queue) + + # verify if queue is disabled + self.verify_ports_pfc(queues=[4]) + + # stop storm + self.set_storm_state(storm_queue, state="disabled") + + # verify pfcwd state is restored + self.verify_pfcwd_state(storm_queue, state="operational") + + # verify pfcwd counters + self.verify_pfcwd_counters(storm_queue, restore="1") + + # verify if queue is enabled + self.verify_ports_pfc(test_queues) + + finally: + self.reset_pfcwd_counters(storm_queue) + self.stop_pfcwd_on_ports() + + def test_pfcwd_multi_queue(self, dvs, setup_teardown_test): + try: + # enable PFC on queues + test_queues = [3, 4] + self.set_ports_pfc(pfc_queues=test_queues) + + # verify in asic db + self.verify_ports_pfc(test_queues) + + # start pfcwd + self.start_pfcwd_on_ports() + + # start pfc storm + self.set_storm_state(test_queues) + + # verify pfcwd is triggered + self.verify_pfcwd_state(test_queues) + + # verify pfcwd counters + self.verify_pfcwd_counters(test_queues) + + # verify if queue is disabled. Expected mask is 0 + self.verify_ports_pfc() + + # stop storm + self.set_storm_state(test_queues, state="disabled") + + # verify pfcwd state is restored + self.verify_pfcwd_state(test_queues, state="operational") + + # verify pfcwd counters + self.verify_pfcwd_counters(test_queues, restore="1") + + # verify if queue is enabled + self.verify_ports_pfc(test_queues) + + finally: + self.reset_pfcwd_counters(test_queues) + self.stop_pfcwd_on_ports() + # # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_port.py b/tests/test_port.py index 4766c87deb..c63dae5c57 100644 --- a/tests/test_port.py +++ b/tests/test_port.py @@ -59,11 +59,11 @@ def test_PortMtu(self, dvs, testlog): assert fv[1] == "9100" def test_PortNotification(self, dvs, testlog): - dvs.runcmd("config interface startup Ethernet0") - dvs.runcmd("config interface ip add Ethernet0 10.0.0.0/31") + dvs.port_admin_set("Ethernet0", "up") + dvs.interface_ip_add("Ethernet0", "10.0.0.0/31") - dvs.runcmd("config interface startup Ethernet4") - dvs.runcmd("config interface ip add Ethernet4 10.0.0.2/31") + dvs.port_admin_set("Ethernet4", "up") + dvs.interface_ip_add("Ethernet4", "10.0.0.2/31") dvs.servers[0].runcmd("ip link set down dev eth0") == 0 @@ -126,11 +126,11 @@ def test_PortFecForce(self, dvs, testlog): adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) def test_PortFec(self, dvs, testlog): - dvs.runcmd("config interface startup Ethernet0") - dvs.runcmd("config interface ip add Ethernet0 10.0.0.0/31") + dvs.port_admin_set("Ethernet0", "up") + dvs.interface_ip_add("Ethernet0", "10.0.0.0/31") - dvs.runcmd("config interface startup Ethernet4") - dvs.runcmd("config interface ip add Ethernet4 10.0.0.2/31") + dvs.port_admin_set("Ethernet4", "up") + dvs.interface_ip_add("Ethernet4", "10.0.0.2/31") dvs.servers[0].runcmd("ip link set down dev eth0") == 0 diff --git a/tests/test_port_an.py b/tests/test_port_an.py index 93add09b9a..dc98f43d0e 100644 --- a/tests/test_port_an.py +++ b/tests/test_port_an.py @@ -254,9 +254,8 @@ def test_PortAutoNegWarm(self, dvs, testlog): cfvs = swsscommon.FieldValuePairs([("admin_status", "up")]) ctbl.set("Ethernet0", cfvs) - # enable warm restart - (exitcode, result) = dvs.runcmd("config warm_restart enable swss") - assert exitcode == 0 + + dvs.warm_restart_swss("true") # freeze orchagent for warm restart (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check") @@ -290,9 +289,9 @@ def test_PortAutoNegWarm(self, dvs, testlog): finally: # disable warm restart - dvs.runcmd("config warm_restart disable swss") + dvs.warm_restart_swss("disable") # slow down crm polling - dvs.runcmd("crm config polling interval 10000") + dvs.crm_poll_set("10000") # Add Dummy always-pass test at end as workaroud diff --git a/tests/test_port_dpb_vlan.py b/tests/test_port_dpb_vlan.py index df03a5ecf9..e6f89beb1a 100644 --- a/tests/test_port_dpb_vlan.py +++ b/tests/test_port_dpb_vlan.py @@ -52,6 +52,7 @@ def test_dependency(self, dvs): self.dvs_vlan.remove_vlan(vlan) self.dvs_vlan.get_and_verify_vlan_ids(0) + @pytest.mark.skip(reason="Failing. Under investigation") def test_one_port_one_vlan(self, dvs): dpb = DPB() vlan = "100" @@ -117,6 +118,7 @@ def test_one_port_one_vlan(self, dvs): self.dvs_vlan.remove_vlan(vlan) self.dvs_vlan.get_and_verify_vlan_ids(0) + @pytest.mark.skip(reason="Failing. Under investigation") def test_one_port_multiple_vlan(self, dvs): dpb = DPB() @@ -182,6 +184,7 @@ def test_one_port_multiple_vlan(self, dvs): self.dvs_vlan.remove_vlan("102") self.dvs_vlan.get_and_verify_vlan_ids(0) + @pytest.mark.skip(reason="Failing. Under investigation") def test_all_port_10_vlans(self, dvs): num_vlans = 10 start_vlan = 100 diff --git a/tests/test_qos_map.py b/tests/test_qos_map.py index 21a25742c9..301bd3c6d6 100644 --- a/tests/test_qos_map.py +++ b/tests/test_qos_map.py @@ -139,7 +139,7 @@ def test_dscp_to_fc(self, dvs): self.init_test(dvs) # Create a DSCP_TO_FC map - dscp_map = [(str(i), str(i)) for i in range(0, 64)] + dscp_map = [(str(i), str(i)) for i in range(0, 63)] self.dscp_ps.set("AZURE", swsscommon.FieldValuePairs(dscp_map)) self.asic_db.wait_for_n_keys(self.ASIC_QOS_MAP_STR, self.asic_qos_map_count + 1) @@ -153,7 +153,7 @@ def test_dscp_to_fc(self, dvs): assert(fvs.get("SAI_QOS_MAP_ATTR_TYPE") == "SAI_QOS_MAP_TYPE_DSCP_TO_FORWARDING_CLASS") # Modify the map - dscp_map = [(str(i), '0') for i in range(0, 64)] + dscp_map = [(str(i), '0') for i in range(0, 63)] self.dscp_ps.set("AZURE", swsscommon.FieldValuePairs(dscp_map)) time.sleep(1) @@ -174,7 +174,7 @@ def test_dscp_to_fc(self, dvs): ('-1', '0'), # negative DSCP ('64', '0'), # DSCP greater than max value ('0', '-1'), # negative FC - ('0', '64'), # FC greater than max value + ('0', '63'), # FC greater than max value ('a', '0'), # non-integer DSCP ('0', 'a'), # non-integet FC ] @@ -228,7 +228,7 @@ def test_exp_to_fc(self, dvs): ('-1', '0'), # negative EXP ('8', '0'), # EXP greater than max value ('0', '-1'), # negative FC - ('0', '64'), # FC greater than max value + ('0', '63'), # FC greater than max value ('a', '0'), # non-integer EXP ('0', 'a'), # non-integet FC ] @@ -258,7 +258,7 @@ def test_per_port_cbf_binding(self, dvs): self.init_test(dvs) # Create a DSCP_TO_FC map - dscp_map = [(str(i), str(i)) for i in range(0, 64)] + dscp_map = [(str(i), str(i)) for i in range(0, 63)] self.dscp_ps.set("AZURE", swsscommon.FieldValuePairs(dscp_map)) self.asic_db.wait_for_n_keys(self.ASIC_QOS_MAP_STR, self.asic_qos_map_count + 1) dscp_map_id = self.get_qos_id() diff --git a/tests/test_sflow.py b/tests/test_sflow.py index e3c95a6946..f6ab6a3c13 100644 --- a/tests/test_sflow.py +++ b/tests/test_sflow.py @@ -146,7 +146,6 @@ def test_SamplingRatePortCfgUpdate(self, dvs, testlog): ''' self.setup_sflow(dvs) appldb = dvs.get_app_db() - #dvs.runcmd("portconfig -p {} -s {}".format("Ethernet0", "25000")) self.cdb.update_entry("PORT", "Ethernet0", {'speed' : "25000"}) expected_fields = {"sample_rate": self.speed_rate_table["25000"]} appldb.wait_for_field_match("SFLOW_SESSION_TABLE", "Ethernet0", expected_fields) diff --git a/tests/test_virtual_chassis.py b/tests/test_virtual_chassis.py index a963a55f23..9f4d6ddedb 100644 --- a/tests/test_virtual_chassis.py +++ b/tests/test_virtual_chassis.py @@ -1,4 +1,3 @@ -import pytest from swsscommon import swsscommon from dvslib.dvs_database import DVSDatabase import ast @@ -136,7 +135,6 @@ def test_voq_switch(self, vct): spcfg = ast.literal_eval(value) assert spcfg['count'] == sp_count, "Number of systems ports configured is invalid" - @pytest.mark.skip(reason="This test is not stable enough") def test_chassis_app_db_sync(self, vct): """Test chassis app db syncing. @@ -213,7 +211,6 @@ def test_chassis_system_interface(self, vct): # Remote system ports's switch id should not match local switch id assert spcfginfo["attached_switch_id"] != lc_switch_id, "RIF system port with wrong switch_id" - @pytest.mark.skip(reason="This test is not stable enough") def test_chassis_system_neigh(self, vct): """Test neigh record create/delete and syncing to chassis app db. @@ -384,11 +381,6 @@ def test_chassis_system_neigh(self, vct): assert mac == test_neigh_mac, "Encap index of remote neigh mismatch with allocated encap index" # Check for other mandatory attributes - # For remote neighbor, encap index must be imposed. So impose_index must be "true" - impose_index = remote_neigh_entry.get("SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_IMPOSE_INDEX") - assert impose_index != "", "Impose index attribute is not programmed for remote neigh in ASIC_DB" - assert impose_index == "true", "Impose index attribute is false for remote neigh" - # For remote neighbors, is_local must be "false" is_local = remote_neigh_entry.get("SAI_NEIGHBOR_ENTRY_ATTR_IS_LOCAL") assert is_local != "", "is_local attribute is not programmed for remote neigh in ASIC_DB" @@ -470,7 +462,6 @@ def test_chassis_system_neigh(self, vct): # Cleanup inband if configuration self.del_inbandif_port(vct, inband_port) - @pytest.mark.skip(reason="This test is not stable enough") def test_chassis_system_lag(self, vct): """Test PortChannel in VOQ based chassis systems. @@ -607,7 +598,6 @@ def test_chassis_system_lag(self, vct): break - @pytest.mark.skip(reason="This test is not stable enough") def test_chassis_system_lag_id_allocator_table_full(self, vct): """Test lag id allocator table full. @@ -685,7 +675,6 @@ def test_chassis_system_lag_id_allocator_table_full(self, vct): break - @pytest.mark.skip(reason="This test is not stable enough") def test_chassis_system_lag_id_allocator_del_id(self, vct): """Test lag id allocator's release id and re-use id processing. diff --git a/tests/test_vnet.py b/tests/test_vnet.py index a41f9ee39f..0f0e554092 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -314,7 +314,7 @@ def delete_phy_interface(dvs, ifname, ipaddr): time.sleep(2) -def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope=""): +def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope="", advertise_prefix=False): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) @@ -327,6 +327,9 @@ def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope=""): if scope: attrs.append(('scope', scope)) + if advertise_prefix: + attrs.append(('advertise_prefix', 'true')) + # create the VXLAN tunnel Term entry in Config DB create_entry_tbl( conf_db, @@ -359,6 +362,9 @@ def create_vxlan_tunnel(dvs, name, src_ip): attrs, ) +def delete_vxlan_tunnel(dvs, name): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + delete_entry_tbl(conf_db, "VXLAN_TUNNEL", name) def create_vxlan_tunnel_map(dvs, tunnel_name, tunnel_map_entry_name, vlan, vni_id): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) @@ -483,6 +489,23 @@ def check_remove_state_db_routes(dvs, vnet, prefix): assert vnet + '|' + prefix not in keys + +def check_routes_advertisement(dvs, prefix): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "ADVERTISE_NETWORK_TABLE") + keys = tbl.getKeys() + + assert prefix in keys + + +def check_remove_routes_advertisement(dvs, prefix): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "ADVERTISE_NETWORK_TABLE") + keys = tbl.getKeys() + + assert prefix not in keys + + loopback_id = 0 def_vr_id = 0 switch_mac = None @@ -503,16 +526,17 @@ class VnetVxlanVrfTunnel(object): ASIC_NEXT_HOP_GROUP_MEMBER = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" ASIC_BFD_SESSION = "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION" - tunnel_map_ids = set() - tunnel_map_entry_ids = set() - tunnel_ids = set() - tunnel_term_ids = set() - tunnel_map_map = {} - tunnel = {} - vnet_vr_ids = set() - vr_map = {} - nh_ids = {} - nhg_ids = {} + def __init__(self): + self.tunnel_map_ids = set() + self.tunnel_map_entry_ids = set() + self.tunnel_ids = set() + self.tunnel_term_ids = set() + self.tunnel_map_map = {} + self.tunnel = {} + self.vnet_vr_ids = set() + self.vr_map = {} + self.nh_ids = {} + self.nhg_ids = {} def fetch_exist_entries(self, dvs): self.vnet_vr_ids = get_exist_entries(dvs, self.ASIC_VRF_TABLE) @@ -600,6 +624,18 @@ def check_vxlan_tunnel(self, dvs, tunnel_name, src_ip): self.tunnel_map_map[tunnel_name] = tunnel_map_id self.tunnel[tunnel_name] = tunnel_id + def check_del_vxlan_tunnel(self, dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + old_tunnel = get_deleted_entries(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids, 1) + check_deleted_object(asic_db, self.ASIC_TUNNEL_TABLE, old_tunnel[0]) + self.tunnel_ids.remove(old_tunnel[0]) + + old_tunnel_maps = get_deleted_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) + for old_tunnel_map in old_tunnel_maps: + check_deleted_object(asic_db, self.ASIC_TUNNEL_MAP, old_tunnel_map) + self.tunnel_map_ids.remove(old_tunnel_map) + def check_vxlan_tunnel_entry(self, dvs, tunnel_name, vnet_name, vni_id): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) @@ -798,7 +834,7 @@ def serialize_endpoint_group(self, endpoints): endpoints.sort() return ",".join(endpoints) - def check_next_hop_group_member(self, dvs, nhg, expected_endpoint, expected_attrs): + def check_next_hop_group_member(self, dvs, nhg, ordered_ecmp, expected_endpoint, expected_attrs): expected_endpoint_str = self.serialize_endpoint_group(expected_endpoint) asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) tbl_nhgm = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP_GROUP_MEMBER) @@ -817,11 +853,17 @@ def check_next_hop_group_member(self, dvs, nhg, expected_endpoint, expected_attr endpoint = nh_fvs["SAI_NEXT_HOP_ATTR_IP"] endpoints.append(endpoint) assert endpoint in expected_attrs + if ordered_ecmp == "true": + assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] == expected_attrs[endpoint]['SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID'] + del expected_attrs[endpoint]['SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID'] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + check_object(asic_db, self.ASIC_NEXT_HOP, nh_key, expected_attrs[endpoint]) assert self.serialize_endpoint_group(endpoints) == expected_endpoint_str - def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], route_ids=[], nhg=""): + def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], route_ids=[], nhg="", ordered_ecmp="false", nh_seq_id=None): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) endpoint_str = name + "|" + self.serialize_endpoint_group(endpoints) @@ -839,6 +881,8 @@ def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], r expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni[idx]}) if mac and mac[idx]: expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac[idx]}) + if ordered_ecmp == "true" and nh_seq_id: + expected_attr.update({'SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID': nh_seq_id[idx]}) expected_attrs[endpoint] = expected_attr if nhg: @@ -853,12 +897,12 @@ def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], r # Check routes in ingress VRF expected_nhg_attr = { - "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP", + "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP" if ordered_ecmp == "false" else "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP", } check_object(asic_db, self.ASIC_NEXT_HOP_GROUP, new_nhg, expected_nhg_attr) # Check nexthop group member - self.check_next_hop_group_member(dvs, new_nhg, endpoints, expected_attrs) + self.check_next_hop_group_member(dvs, new_nhg, ordered_ecmp, endpoints, expected_attrs) if route_ids: new_route = route_ids @@ -901,6 +945,32 @@ class TestVnetOrch(object): def get_vnet_obj(self): return VnetVxlanVrfTunnel() + @pytest.fixture(params=["true", "false"]) + def ordered_ecmp(self, dvs, request): + + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + if request.param == "true": + create_entry_pst( + app_db, + "SWITCH_TABLE", ':', "switch", + [ + ('ordered_ecmp', 'true') + ], + ) + dvs.get_state_db().wait_for_field_match("SWITCH_CAPABILITY", "switch", {"ORDERED_ECMP_CAPABLE": "true"}) + + yield request.param + + if request.param == "true": + create_entry_pst( + app_db, + "SWITCH_TABLE", ':', "switch", + [ + ('ordered_ecmp', 'false') + ], + ) + dvs.get_state_db().wait_for_field_match("SWITCH_CAPABILITY", "switch", {"ORDERED_ECMP_CAPABLE": "false"}) + ''' Test 1 - Create Vlan Interface, Tunnel and Vnet ''' @@ -929,6 +999,8 @@ def test_vnet_orch_1(self, dvs, testlog): create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet_2000', '10.10.10.1') vnet_obj.check_vnet_routes(dvs, 'Vnet_2000', '10.10.10.1', tunnel_name) check_state_db_routes(dvs, 'Vnet_2000', "100.100.1.1/32", ['10.10.10.1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") create_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet_2000', 'Vlan100') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2000') @@ -950,6 +1022,8 @@ def test_vnet_orch_1(self, dvs, testlog): create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet_2001', '10.10.10.2', "00:12:34:56:78:9A") vnet_obj.check_vnet_routes(dvs, 'Vnet_2001', '10.10.10.2', tunnel_name, "00:12:34:56:78:9A") check_state_db_routes(dvs, 'Vnet_2001', "100.100.2.1/32", ['10.10.10.2']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") create_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet_2001', 'Ethernet4') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2001') @@ -968,10 +1042,12 @@ def test_vnet_orch_1(self, dvs, testlog): delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet_2001') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2001') check_remove_state_db_routes(dvs, 'Vnet_2001', "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet_2000') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2000') check_remove_state_db_routes(dvs, 'Vnet_2000', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") delete_phy_interface(dvs, "Ethernet4", "100.102.1.1/24") vnet_obj.check_del_router_interface(dvs, "Ethernet4") @@ -988,6 +1064,9 @@ def test_vnet_orch_1(self, dvs, testlog): delete_vnet_entry(dvs, 'Vnet_2000') vnet_obj.check_del_vnet_entry(dvs, 'Vnet_2000') + delete_vxlan_tunnel(dvs, tunnel_name) + vnet_obj.check_del_vxlan_tunnel(dvs) + ''' Test 2 - Two VNets, One HSMs per VNet ''' @@ -1013,21 +1092,28 @@ def test_vnet_orch_2(self, dvs, testlog): create_vnet_routes(dvs, "1.1.1.10/32", 'Vnet_1', '100.1.1.10') vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '100.1.1.10', tunnel_name) check_state_db_routes(dvs, 'Vnet_1', "1.1.1.10/32", ['100.1.1.10']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "1.1.1.10/32") vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "1.1.1.11/32", 'Vnet_1', '100.1.1.10') vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '100.1.1.10', tunnel_name) check_state_db_routes(dvs, 'Vnet_1', "1.1.1.11/32", ['100.1.1.10']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "1.1.1.11/32") vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "1.1.1.12/32", 'Vnet_1', '200.200.1.200') vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '200.200.1.200', tunnel_name) check_state_db_routes(dvs, 'Vnet_1', "1.1.1.12/32", ['200.200.1.200']) + check_remove_routes_advertisement(dvs, "1.1.1.12/32") vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "1.1.1.14/32", 'Vnet_1', '200.200.1.201') vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '200.200.1.201', tunnel_name) check_state_db_routes(dvs, 'Vnet_1', "1.1.1.14/32", ['200.200.1.201']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "1.1.1.14/32") create_vnet_local_routes(dvs, "1.1.10.0/24", 'Vnet_1', 'Vlan1001') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_1') @@ -1044,11 +1130,15 @@ def test_vnet_orch_2(self, dvs, testlog): create_vnet_routes(dvs, "2.2.2.10/32", 'Vnet_2', '100.1.1.20') vnet_obj.check_vnet_routes(dvs, 'Vnet_2', '100.1.1.20', tunnel_name) check_state_db_routes(dvs, 'Vnet_2', "2.2.2.10/32", ['100.1.1.20']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "2.2.2.10/32") vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "2.2.2.11/32", 'Vnet_2', '100.1.1.20') vnet_obj.check_vnet_routes(dvs, 'Vnet_2', '100.1.1.20', tunnel_name) check_state_db_routes(dvs, 'Vnet_2', "2.2.2.11/32", ['100.1.1.20']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "2.2.2.11/32") create_vnet_local_routes(dvs, "2.2.10.0/24", 'Vnet_2', 'Vlan1002') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2') @@ -1064,26 +1154,32 @@ def test_vnet_orch_2(self, dvs, testlog): delete_vnet_routes(dvs, "2.2.2.11/32", 'Vnet_2') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2') check_remove_state_db_routes(dvs, 'Vnet_2', "2.2.2.11/32") + check_remove_routes_advertisement(dvs, "2.2.2.11/32") delete_vnet_routes(dvs, "2.2.2.10/32", 'Vnet_2') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2') check_remove_state_db_routes(dvs, 'Vnet_2', "2.2.2.10/32") + check_remove_routes_advertisement(dvs, "2.2.2.10/32") delete_vnet_routes(dvs, "1.1.1.14/32", 'Vnet_1') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.14/32") + check_remove_routes_advertisement(dvs, "1.1.1.14/32") delete_vnet_routes(dvs, "1.1.1.12/32", 'Vnet_1') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.12/32") + check_remove_routes_advertisement(dvs, "1.1.1.12/32") delete_vnet_routes(dvs, "1.1.1.11/32", 'Vnet_1') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.11/32") + check_remove_routes_advertisement(dvs, "1.1.1.11/32") delete_vnet_routes(dvs, "1.1.1.10/32", 'Vnet_1') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.10/32") + check_remove_routes_advertisement(dvs, "1.1.1.10/32") delete_vlan_interface(dvs, "Vlan1002", "2.2.10.1/24") vnet_obj.check_del_router_interface(dvs, "Vlan1002") @@ -1097,6 +1193,9 @@ def test_vnet_orch_2(self, dvs, testlog): delete_vnet_entry(dvs, 'Vnet_2') vnet_obj.check_del_vnet_entry(dvs, 'Vnet_2') + delete_vxlan_tunnel(dvs, tunnel_name) + vnet_obj.check_del_vxlan_tunnel(dvs) + ''' Test 3 - Two VNets, One HSMs per VNet, Peering ''' @@ -1131,11 +1230,15 @@ def test_vnet_orch_3(self, dvs, testlog): create_vnet_routes(dvs, "5.5.5.10/32", 'Vnet_10', '50.1.1.10') vnet_obj.check_vnet_routes(dvs, 'Vnet_10', '50.1.1.10', tunnel_name) check_state_db_routes(dvs, 'Vnet_10', "5.5.5.10/32", ['50.1.1.10']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "5.5.5.10/32") vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "8.8.8.10/32", 'Vnet_20', '80.1.1.20') vnet_obj.check_vnet_routes(dvs, 'Vnet_10', '80.1.1.20', tunnel_name) check_state_db_routes(dvs, 'Vnet_20', "8.8.8.10/32", ['80.1.1.20']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "8.8.8.10/32") create_vnet_local_routes(dvs, "5.5.10.0/24", 'Vnet_10', 'Vlan2001') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_10') @@ -1154,10 +1257,12 @@ def test_vnet_orch_3(self, dvs, testlog): delete_vnet_routes(dvs, "5.5.5.10/32", 'Vnet_10') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_10') check_remove_state_db_routes(dvs, 'Vnet_10', "5.5.5.10/32") + check_remove_routes_advertisement(dvs, "5.5.5.10/32") delete_vnet_routes(dvs, "8.8.8.10/32", 'Vnet_20') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_20') check_remove_state_db_routes(dvs, 'Vnet_20', "8.8.8.10/32") + check_remove_routes_advertisement(dvs, "8.8.8.10/32") delete_vlan_interface(dvs, "Vlan2001", "5.5.10.1/24") vnet_obj.check_del_router_interface(dvs, "Vlan2001") @@ -1171,6 +1276,9 @@ def test_vnet_orch_3(self, dvs, testlog): delete_vnet_entry(dvs, 'Vnet_20') vnet_obj.check_del_vnet_entry(dvs, 'Vnet_20') + delete_vxlan_tunnel(dvs, tunnel_name) + vnet_obj.check_del_vxlan_tunnel(dvs) + ''' Test 4 - IPv6 Vxlan tunnel test ''' @@ -1198,10 +1306,14 @@ def test_vnet_orch_4(self, dvs, testlog): create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000') vnet_obj.check_vnet_routes(dvs, 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000', tunnel_name) check_state_db_routes(dvs, 'Vnet3001', "100.100.1.1/32", ['2000:1000:2000:3000:4000:5000:6000:7000']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") create_vnet_routes(dvs, "100.100.1.2/32", 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000') vnet_obj.check_vnet_routes(dvs, 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000', tunnel_name) check_state_db_routes(dvs, 'Vnet3001', "100.100.1.2/32", ['2000:1000:2000:3000:4000:5000:6000:7000']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.2/32") create_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet3001', 'Vlan300') vnet_obj.check_vnet_local_routes(dvs, 'Vnet3001') @@ -1222,6 +1334,8 @@ def test_vnet_orch_4(self, dvs, testlog): create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet3002', 'fd:2::34', "00:12:34:56:78:9A") vnet_obj.check_vnet_routes(dvs, 'Vnet3002', 'fd:2::34', tunnel_name, "00:12:34:56:78:9A") check_state_db_routes(dvs, 'Vnet3002', "100.100.2.1/32", ['fd:2::34']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") create_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet3002', 'Ethernet60') vnet_obj.check_vnet_local_routes(dvs, 'Vnet3002') @@ -1240,20 +1354,26 @@ def test_vnet_orch_4(self, dvs, testlog): create_vnet_routes(dvs, "5.5.5.10/32", 'Vnet3003', 'fd:2::35') vnet_obj.check_vnet_routes(dvs, 'Vnet3004', 'fd:2::35', tunnel_name) check_state_db_routes(dvs, 'Vnet3003', "5.5.5.10/32", ['fd:2::35']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "5.5.5.10/32") create_vnet_routes(dvs, "8.8.8.10/32", 'Vnet3004', 'fd:2::36') vnet_obj.check_vnet_routes(dvs, 'Vnet3003', 'fd:2::36', tunnel_name) check_state_db_routes(dvs, 'Vnet3004', "8.8.8.10/32", ['fd:2::36']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "8.8.8.10/32") # Clean-up and verify remove flows delete_vnet_routes(dvs, "5.5.5.10/32", 'Vnet3003') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3003') check_remove_state_db_routes(dvs, 'Vnet3003', "5.5.5.10/32") + check_remove_routes_advertisement(dvs, "5.5.5.10/32") delete_vnet_routes(dvs, "8.8.8.10/32", 'Vnet3004') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3004') check_remove_state_db_routes(dvs, 'Vnet3004', "8.8.8.10/32") + check_remove_routes_advertisement(dvs, "8.8.8.10/32") delete_vnet_entry(dvs, 'Vnet3003') vnet_obj.check_del_vnet_entry(dvs, 'Vnet3003') @@ -1264,6 +1384,7 @@ def test_vnet_orch_4(self, dvs, testlog): delete_vnet_routes(dvs, "100.100.2.1/24", 'Vnet3002') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3002') check_remove_state_db_routes(dvs, 'Vnet3002', "100.100.2.1/24") + check_remove_routes_advertisement(dvs, "100.100.2.1/24") delete_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet3002') vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet3002') @@ -1283,10 +1404,12 @@ def test_vnet_orch_4(self, dvs, testlog): delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet3001') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3001') check_remove_state_db_routes(dvs, 'Vnet3001', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") delete_vnet_routes(dvs, "100.100.1.2/32", 'Vnet3001') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3001') check_remove_state_db_routes(dvs, 'Vnet3001', "100.100.1.2/32") + check_remove_routes_advertisement(dvs, "100.100.1.2/32") delete_vlan_interface(dvs, "Vlan300", "100.100.3.1/24") vnet_obj.check_del_router_interface(dvs, "Vlan300") @@ -1297,6 +1420,9 @@ def test_vnet_orch_4(self, dvs, testlog): delete_vnet_entry(dvs, 'Vnet3001') vnet_obj.check_del_vnet_entry(dvs, 'Vnet3001') + delete_vxlan_tunnel(dvs, tunnel_name) + vnet_obj.check_del_vxlan_tunnel(dvs) + ''' Test 5 - Default VNet test ''' @@ -1335,202 +1461,231 @@ def test_vnet_vxlan_multi_map(self, dvs, testlog): ''' Test 7 - Test for vnet tunnel routes with ECMP nexthop group ''' - def test_vnet_orch_7(self, dvs, testlog): + def test_vnet_orch_7(self, dvs, ordered_ecmp, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_7' + tunnel_name = 'tunnel_7' + ordered_ecmp + vnet_name = 'Vnet7' + ordered_ecmp vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7') - create_vnet_entry(dvs, 'Vnet7', tunnel_name, '10007', "") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10007', "") - vnet_obj.check_vnet_entry(dvs, 'Vnet7') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet7', '10007') + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10007') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7') # Create an ECMP tunnel route vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet7', '7.0.0.1,7.0.0.2,7.0.0.3') - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet7', ['7.0.0.1', '7.0.0.2', '7.0.0.3'], tunnel_name) - check_state_db_routes(dvs, 'Vnet7', "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3']) + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '7.0.0.3,7.0.0.2,7.0.0.1') + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['7.0.0.1', '7.0.0.2', '7.0.0.3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Set the tunnel route to another nexthop group - set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet7', '7.0.0.1,7.0.0.2,7.0.0.3,7.0.0.4') - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet7', ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet7', "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) + set_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '7.0.0.1,7.0.0.2,7.0.0.4,7.0.0.3') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name, route_ids=route1, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) assert nhg1_1 not in vnet_obj.nhgs # Create another tunnel route to the same set of endpoints - create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet7', '7.0.0.1,7.0.0.2,7.0.0.3,7.0.0.4') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet7', ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name) - check_state_db_routes(dvs, 'Vnet7', "100.100.2.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) + create_vnet_routes(dvs, "100.100.2.1/32", vnet_name, '7.0.0.1,7.0.0.2,7.0.0.3,7.0.0.4') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) + check_remove_routes_advertisement(dvs, "100.100.2.1/32") assert nhg2_1 == nhg1_2 # Remove one of the tunnel routes - delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet7') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet7', ["100.100.1.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet7', "100.100.1.1/32") + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Check the nexthop group still exists vnet_obj.fetch_exist_entries(dvs) assert nhg1_2 in vnet_obj.nhgs # Remove the other tunnel route - delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet7') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet7', ["100.100.2.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet7', "100.100.2.1/32") + delete_vnet_routes(dvs, "100.100.2.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") # Check the nexthop group is removed vnet_obj.fetch_exist_entries(dvs) assert nhg2_1 not in vnet_obj.nhgs - delete_vnet_entry(dvs, 'Vnet7') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet7') + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) ''' Test 8 - Test for ipv6 vnet tunnel routes with ECMP nexthop group ''' - def test_vnet_orch_8(self, dvs, testlog): + def test_vnet_orch_8(self, dvs, ordered_ecmp, testlog): + vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_8' + tunnel_name = 'tunnel_8' + ordered_ecmp + vnet_name = 'Vnet8' + ordered_ecmp + vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') - create_vnet_entry(dvs, 'Vnet8', tunnel_name, '10008', "") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10008', "") - vnet_obj.check_vnet_entry(dvs, 'Vnet8') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet8', '10008') + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10008') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') # Create an ECMP tunnel route vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name) - check_state_db_routes(dvs, 'Vnet8', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + create_vnet_routes(dvs, "fd:8:10::32/128", vnet_name, 'fd:8:1::1,fd:8:1::3,fd:8:1::2') + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) + check_state_db_routes(dvs, vnet_name, "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") # Set the tunnel route to another nexthop group - set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet8', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + set_vnet_routes(dvs, "fd:8:10::32/128", vnet_name, 'fd:8:1::2,fd:8:1::3,fd:8:1::1,fd:8:1::4') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, route_ids=route1, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) assert nhg1_1 not in vnet_obj.nhgs # Create another tunnel route to the same set of endpoints - create_vnet_routes(dvs, "fd:8:20::32/128", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name) - check_state_db_routes(dvs, 'Vnet8', "fd:8:20::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + create_vnet_routes(dvs, "fd:8:20::32/128", vnet_name, 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "fd:8:20::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + check_remove_routes_advertisement(dvs, "fd:8:20::32/128") assert nhg2_1 == nhg1_2 # Create another tunnel route with ipv4 prefix to the same set of endpoints - create_vnet_routes(dvs, "8.0.0.0/24", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') - route3, nhg3_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name) - check_state_db_routes(dvs, 'Vnet8', "8.0.0.0/24", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + create_vnet_routes(dvs, "8.0.0.0/24", vnet_name, 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') + route3, nhg3_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "8.0.0.0/24", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + check_remove_routes_advertisement(dvs, "8.0.0.0/24") assert nhg3_1 == nhg1_2 # Remove one of the tunnel routes - delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet8') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet8', ["fd:8:10::32/128"]) - check_remove_state_db_routes(dvs, 'Vnet8', "fd:8:10::32/128") + delete_vnet_routes(dvs, "fd:8:10::32/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:8:10::32/128") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") # Check the nexthop group still exists vnet_obj.fetch_exist_entries(dvs) assert nhg1_2 in vnet_obj.nhgs # Remove tunnel route 2 - delete_vnet_routes(dvs, "fd:8:20::32/128", 'Vnet8') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet8', ["fd:8:20::32/128"]) - check_remove_state_db_routes(dvs, 'Vnet8', "fd:8:20::32/128") + delete_vnet_routes(dvs, "fd:8:20::32/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:8:20::32/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:8:20::32/128") + check_remove_routes_advertisement(dvs, "fd:8:20::32/128") # Remove tunnel route 3 - delete_vnet_routes(dvs, "8.0.0.0/24", 'Vnet8') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet8', ["8.0.0.0/24"]) - check_remove_state_db_routes(dvs, 'Vnet8', "8.0.0.0/24") + delete_vnet_routes(dvs, "8.0.0.0/24", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["8.0.0.0/24"]) + check_remove_state_db_routes(dvs, vnet_name, "8.0.0.0/24") + check_remove_routes_advertisement(dvs, "8.0.0.0/24") # Check the nexthop group is removed vnet_obj.fetch_exist_entries(dvs) assert nhg2_1 not in vnet_obj.nhgs - delete_vnet_entry(dvs, 'Vnet8') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet8') + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) ''' Test 9 - Test for vnet tunnel routes with ECMP nexthop group with endpoint health monitor ''' - def test_vnet_orch_9(self, dvs, testlog): + def test_vnet_orch_9(self, dvs, ordered_ecmp, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_9' + tunnel_name = 'tunnel_9' + ordered_ecmp + vnet_name = 'Vnet9' + ordered_ecmp vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') - create_vnet_entry(dvs, 'Vnet9', tunnel_name, '10009', "") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10009', "") - vnet_obj.check_vnet_entry(dvs, 'Vnet9') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet9', '10009') + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10009') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet9', '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') # default bfd status is down, route should not be programmed in this status - vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.1.1/32"]) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", []) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Route should be properly configured when all bfd session states go up - update_bfd_session_state(dvs, '9.1.0.1', 'Up') update_bfd_session_state(dvs, '9.1.0.2', 'Up') update_bfd_session_state(dvs, '9.1.0.3', 'Up') + update_bfd_session_state(dvs, '9.1.0.1', 'Up') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Remove endpoint from group if it goes down update_bfd_session_state(dvs, '9.1.0.2', 'Down') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.3']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.3']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Create another tunnel route with endpoint group overlapped with route1 vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet9', '9.0.0.1,9.0.0.2,9.0.0.5', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.5') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1'], tunnel_name) - check_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32", ['9.0.0.1']) + create_vnet_routes(dvs, "100.100.2.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.5', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['9.0.0.1']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Update BFD session state and verify route change update_bfd_session_state(dvs, '9.1.0.5', 'Up') time.sleep(2) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32", ['9.0.0.1', '9.0.0.5']) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['9.0.0.1', '9.0.0.5']) + check_remove_routes_advertisement(dvs, "100.100.2.1/32") # Update BFD state and check route nexthop update_bfd_session_state(dvs, '9.1.0.3', 'Down') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Set the route1 to a new group - set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet9', '9.0.0.1,9.0.0.2,9.0.0.3,9.0.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4') + set_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3,9.0.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4') update_bfd_session_state(dvs, '9.1.0.4', 'Up') time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.4']) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.4'], tunnel_name, route_ids=route1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '4']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.4']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1539,8 +1694,9 @@ def test_vnet_orch_9(self, dvs, testlog): # Set BFD session state for a down endpoint to up update_bfd_session_state(dvs, '9.1.0.2', 'Up') time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.2', '9.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.4']) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.2', '9.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '4']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.4']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Set all endpoint to down state update_bfd_session_state(dvs, '9.1.0.1', 'Down') @@ -1550,15 +1706,19 @@ def test_vnet_orch_9(self, dvs, testlog): time.sleep(2) # Confirm the tunnel route is updated in ASIC - vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.1.1/32"]) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32", ['9.0.0.5']) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", []) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['3']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['9.0.0.5']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") # Remove tunnel route2 - delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet9') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.2.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32") + delete_vnet_routes(dvs, "100.100.2.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") # Check the corresponding nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1569,9 +1729,10 @@ def test_vnet_orch_9(self, dvs, testlog): check_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3', '9.1.0.4']) # Remove tunnel route 1 - delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet9') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.1.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32") + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1580,75 +1741,83 @@ def test_vnet_orch_9(self, dvs, testlog): # Confirm the BFD sessions are removed check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3', '9.1.0.4', '9.1.0.5']) - delete_vnet_entry(dvs, 'Vnet9') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet9') + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) ''' Test 10 - Test for ipv6 vnet tunnel routes with ECMP nexthop group with endpoint health monitor ''' - def test_vnet_orch_10(self, dvs, testlog): + def test_vnet_orch_10(self, dvs, ordered_ecmp, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_10' + tunnel_name = 'tunnel_10' + ordered_ecmp + vnet_name = 'Vnet10' + ordered_ecmp vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') - create_vnet_entry(dvs, 'Vnet10', tunnel_name, '10010', "") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10010', "") - vnet_obj.check_vnet_entry(dvs, 'Vnet10') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet10', '10010') + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10010') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "fd:10:10::1/128", 'Vnet10', 'fd:10:1::1,fd:10:1::2,fd:10:1::3', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3') + create_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3') # default bfd status is down, route should not be programmed in this status - vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:10::1/128"]) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", []) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", []) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") # Route should be properly configured when all bfd session states go up - update_bfd_session_state(dvs, 'fd:10:2::1', 'Up') update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') + update_bfd_session_state(dvs, 'fd:10:2::1', 'Up') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3'], tunnel_name) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3']) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") # Remove endpoint from group if it goes down update_bfd_session_state(dvs, 'fd:10:2::2', 'Down') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::3'], tunnel_name, route_ids=route1, nhg=nhg1_1) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::3']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::3'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::3']) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") # Create another tunnel route with endpoint group overlapped with route1 vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "fd:10:20::1/128", 'Vnet10', 'fd:10:1::1,fd:10:1::2,fd:10:1::5', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::5') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1'], tunnel_name) - check_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128", ['fd:10:1::1']) + create_vnet_routes(dvs, "fd:10:20::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::5', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) + check_state_db_routes(dvs, vnet_name, "fd:10:20::1/128", ['fd:10:1::1']) + check_remove_routes_advertisement(dvs, "fd:10:20::1/128") # Update BFD session state and verify route change update_bfd_session_state(dvs, 'fd:10:2::5', 'Up') time.sleep(2) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128", ['fd:10:1::1', 'fd:10:1::5']) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) + check_state_db_routes(dvs, vnet_name, "fd:10:20::1/128", ['fd:10:1::1', 'fd:10:1::5']) + check_remove_routes_advertisement(dvs, "fd:10:20::1/128") # Update BFD state and check route nexthop update_bfd_session_state(dvs, 'fd:10:2::3', 'Down') update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2'], tunnel_name, route_ids=route1, nhg=nhg1_1) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2']) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") # Set the route to a new group - set_vnet_routes(dvs, "fd:10:10::1/128", 'Vnet10', 'fd:10:1::1,fd:10:1::2,fd:10:1::3,fd:10:1::4', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3,fd:10:2::4') + set_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3,fd:10:1::4', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3,fd:10:2::4') update_bfd_session_state(dvs, 'fd:10:2::4', 'Up') time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4']) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4'], tunnel_name, route_ids=route1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '4']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4']) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) assert nhg1_1 not in vnet_obj.nhgs @@ -1656,8 +1825,10 @@ def test_vnet_orch_10(self, dvs, testlog): # Set BFD session state for a down endpoint to up update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4'], tunnel_name, route_ids=route1, nhg=nhg1_2) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4']) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4'], tunnel_name, route_ids=route1, nhg=nhg1_2, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4']) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") # Set all endpoint to down state update_bfd_session_state(dvs, 'fd:10:2::1', 'Down') @@ -1667,15 +1838,19 @@ def test_vnet_orch_10(self, dvs, testlog): time.sleep(2) # Confirm the tunnel route is updated in ASIC - vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:10::1/128"]) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128", ['fd:10:1::5']) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", []) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['3']) + check_state_db_routes(dvs, vnet_name, "fd:10:20::1/128", ['fd:10:1::5']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", []) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:10:20::1/128") # Remove tunnel route2 - delete_vnet_routes(dvs, "fd:10:20::1/128", 'Vnet10') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:20::1/128"]) - check_remove_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128") + delete_vnet_routes(dvs, "fd:10:20::1/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:20::1/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:10:20::1/128") + check_remove_routes_advertisement(dvs, "fd:10:20::1/128") # Check the corresponding nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1690,9 +1865,10 @@ def test_vnet_orch_10(self, dvs, testlog): check_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4']) # Remove tunnel route 1 - delete_vnet_routes(dvs, "fd:10:10::1/128", 'Vnet10') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:10::1/128"]) - check_remove_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128") + delete_vnet_routes(dvs, "fd:10:10::1/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:10:10::1/128") + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") # Confirm the BFD sessions are removed check_del_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4', 'fd:10:2::5']) @@ -1701,76 +1877,90 @@ def test_vnet_orch_10(self, dvs, testlog): vnet_obj.fetch_exist_entries(dvs) assert nhg1_2 not in vnet_obj.nhgs - delete_vnet_entry(dvs, 'Vnet10') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet10') + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) ''' Test 11 - Test for vnet tunnel routes with both single endpoint and ECMP group with endpoint health monitor ''' - def test_vnet_orch_11(self, dvs, testlog): + def test_vnet_orch_11(self, dvs, ordered_ecmp, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_11' + tunnel_name = 'tunnel_11' + ordered_ecmp + vnet_name = 'Vnet11' + ordered_ecmp vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, '11.11.11.11') - create_vnet_entry(dvs, 'Vnet11', tunnel_name, '100011', "") + create_vnet_entry(dvs, vnet_name, tunnel_name, '100011', "") - vnet_obj.check_vnet_entry(dvs, 'Vnet11') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet11', '100011') + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '100011') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '11.11.11.11') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet11', '11.0.0.1', ep_monitor='11.1.0.1') + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '11.0.0.1', ep_monitor='11.1.0.1') # default bfd status is down, route should not be programmed in this status - vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.1.1/32"]) - check_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32", []) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Route should be properly configured when bfd session state goes up update_bfd_session_state(dvs, '11.1.0.1', 'Up') time.sleep(2) - vnet_obj.check_vnet_routes(dvs, 'Vnet11', '11.0.0.1', tunnel_name) - check_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32", ['11.0.0.1']) + vnet_obj.check_vnet_routes(dvs, vnet_name, '11.0.0.1', tunnel_name) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['11.0.0.1']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Create another tunnel route with endpoint group overlapped with route1 vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet11', '11.0.0.1,11.0.0.2', ep_monitor='11.1.0.1,11.1.0.2') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet11', ['11.0.0.1'], tunnel_name) - check_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32", ['11.0.0.1']) + create_vnet_routes(dvs, "100.100.2.1/32", vnet_name, '11.0.0.2,11.0.0.1', ep_monitor='11.1.0.2,11.1.0.1') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['11.0.0.1'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['11.0.0.1']) + check_remove_routes_advertisement(dvs, "100.100.2.1/32") # Create a third tunnel route with another endpoint vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.3.1/32", 'Vnet11', '11.0.0.2', ep_monitor='11.1.0.2') + create_vnet_routes(dvs, "100.100.3.1/32", vnet_name, '11.0.0.2', ep_monitor='11.1.0.2') # Update BFD session state and verify route change update_bfd_session_state(dvs, '11.1.0.2', 'Up') time.sleep(2) - vnet_obj.check_vnet_routes(dvs, 'Vnet11', '11.0.0.2', tunnel_name) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet11', ['11.0.0.1', '11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet11', "100.100.3.1/32", ['11.0.0.2']) - check_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32", ['11.0.0.1', '11.0.0.2']) + vnet_obj.check_vnet_routes(dvs, vnet_name, '11.0.0.2', tunnel_name) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['11.0.0.1', '11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2']) + check_state_db_routes(dvs, vnet_name, "100.100.3.1/32", ['11.0.0.2']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['11.0.0.1', '11.0.0.2']) + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.3.1/32") + update_bfd_session_state(dvs, '11.1.0.1', 'Down') time.sleep(2) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet11', ['11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1) - vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.1.1/32"]) - check_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32", ['11.0.0.2']) - check_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32", []) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['2']) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['11.0.0.2']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + # Set the route1 to a new endpoint vnet_obj.fetch_exist_entries(dvs) - set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet11', '11.0.0.2', ep_monitor='11.1.0.2') - vnet_obj.check_vnet_routes(dvs, 'Vnet11', '11.0.0.2', tunnel_name) - check_state_db_routes(dvs, 'Vnet11', "100.100.3.1/32", ['11.0.0.2']) + set_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '11.0.0.2', ep_monitor='11.1.0.2') + vnet_obj.check_vnet_routes(dvs, vnet_name, '11.0.0.2', tunnel_name) + check_state_db_routes(dvs, vnet_name, "100.100.3.1/32", ['11.0.0.2']) + check_remove_routes_advertisement(dvs, "100.100.3.1/32") # Remove tunnel route2 - delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet11') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.2.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32") + delete_vnet_routes(dvs, "100.100.2.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") # Check the corresponding nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1781,21 +1971,151 @@ def test_vnet_orch_11(self, dvs, testlog): check_bfd_session(dvs, ['11.1.0.2']) # Remove tunnel route 1 - delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet11') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.1.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32") + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # Remove tunnel route 3 - delete_vnet_routes(dvs, "100.100.3.1/32", 'Vnet11') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.3.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet11', "100.100.3.1/32") + delete_vnet_routes(dvs, "100.100.3.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.3.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.3.1/32") + check_remove_routes_advertisement(dvs, "100.100.3.1/32") # Confirm the BFD sessions are removed check_del_bfd_session(dvs, ['11.1.0.1', '11.1.0.2']) - delete_vnet_entry(dvs, 'Vnet11') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet11') + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + + + ''' + Test 12 - Test for vnet tunnel routes with ECMP nexthop group with endpoint health monitor and route advertisement + ''' + def test_vnet_orch_12(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_12' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '12.12.12.12') + create_vnet_entry(dvs, 'Vnet12', tunnel_name, '10012', "", advertise_prefix=True) + + vnet_obj.check_vnet_entry(dvs, 'Vnet12') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet12', '10012') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '12.12.12.12') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '12.1.0.1', 'Up') + update_bfd_session_state(dvs, '12.1.0.2', 'Up') + update_bfd_session_state(dvs, '12.1.0.3', 'Up') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.2', '12.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.2', '12.0.0.3']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Remove endpoint from group if it goes down + update_bfd_session_state(dvs, '12.1.0.2', 'Down') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.3']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Create another tunnel route with endpoint group overlapped with route1 + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.5', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1'], tunnel_name) + check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.1']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Update BFD session state and verify route change + update_bfd_session_state(dvs, '12.1.0.5', 'Up') + time.sleep(2) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.1', '12.0.0.5']) + check_routes_advertisement(dvs, "100.100.2.1/32") + + # Update BFD state and check route nexthop + update_bfd_session_state(dvs, '12.1.0.3', 'Down') + time.sleep(2) + + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Set the route1 to a new group + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3,12.0.0.4', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3,12.1.0.4') + update_bfd_session_state(dvs, '12.1.0.4', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.4']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + + # Set BFD session state for a down endpoint to up + update_bfd_session_state(dvs, '12.1.0.2', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.2', '12.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.2', '12.0.0.4']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Set all endpoint to down state + update_bfd_session_state(dvs, '12.1.0.1', 'Down') + update_bfd_session_state(dvs, '12.1.0.2', 'Down') + update_bfd_session_state(dvs, '12.1.0.3', 'Down') + update_bfd_session_state(dvs, '12.1.0.4', 'Down') + time.sleep(2) + + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.5']) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + check_routes_advertisement(dvs, "100.100.2.1/32") + + # Remove tunnel route2 + delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet12') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs + + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['12.1.0.5']) + check_bfd_session(dvs, ['12.1.0.1', '12.1.0.2', '12.1.0.3', '12.1.0.4']) + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['12.1.0.1', '12.1.0.2', '12.1.0.3', '12.1.0.4', '12.1.0.5']) + delete_vnet_entry(dvs, 'Vnet12') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet12') # Add Dummy always-pass test at end as workaroud diff --git a/tests/test_vxlan_tunnel.py b/tests/test_vxlan_tunnel.py index 14fe28261f..d296fcc741 100644 --- a/tests/test_vxlan_tunnel.py +++ b/tests/test_vxlan_tunnel.py @@ -26,6 +26,18 @@ def create_entry_pst(db, table, separator, key, pairs): create_entry(tbl, key, pairs) +def delete_entry_pst(db, table, key): + tbl = swsscommon.ProducerStateTable(db, table) + tbl._del(key) + time.sleep(1) + + +def delete_entry_tbl(db, table, key): + tbl = swsscommon.Table(db, table) + tbl._del(key) + time.sleep(1) + + def how_many_entries_exist(db, table): tbl = swsscommon.Table(db, table) return len(tbl.getKeys()) @@ -324,6 +336,66 @@ def test_vxlan_term_orch(self, dvs, testlog): create_vxlan_tunnel_entry(dvs, 'tunnel_4', 'entry_2', tunnel_map_map, 'Vlan57', '857', tunnel_map_ids, tunnel_map_entry_ids, tunnel_ids, tunnel_term_ids) +def apply_test_vnet_cfg(cfg_db): + + # create VXLAN Tunnel + create_entry_tbl( + cfg_db, + "VXLAN_TUNNEL", '|', "tunnel1", + [ + ("src_ip", "1.1.1.1") + ], + ) + + # create VNET + create_entry_tbl( + cfg_db, + "VNET", '|', "tunnel1", + [ + ("vxlan_tunnel", "tunnel1"), + ("vni", "1") + ], + ) + + return + + +@pytest.fixture +def env_setup(dvs): + cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + + create_entry_pst( + app_db, + "SWITCH_TABLE", ':', "switch", + [ + ("vxlan_router_mac", "00:01:02:03:04:05") + ], + ) + + apply_test_vnet_cfg(cfg_db) + + yield + + delete_entry_pst(app_db, "SWITCH_TABLE", "switch") + delete_entry_tbl(cfg_db, "VXLAN_TUNNEL", "tunnel1") + delete_entry_tbl(cfg_db, "VNET", "Vnet1") + +def test_vnet_cleanup_config_reload(dvs, env_setup): + + # Restart vxlanmgrd Process + dvs.runcmd(["systemctl", "restart", "vxlanmgrd"]) + + # Reapply cfg to simulate cfg reload + cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + apply_test_vnet_cfg(cfg_db) + + time.sleep(0.5) + + # Check if the netdevices is created as expected + ret, stdout = dvs.runcmd(["ip", "link", "show"]) + assert "Vxlan1" in stdout + assert "Brvxlan1" in stdout # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_warm_reboot.py b/tests/test_warm_reboot.py index 36028dfc69..cf525a64f3 100644 --- a/tests/test_warm_reboot.py +++ b/tests/test_warm_reboot.py @@ -237,6 +237,20 @@ def ping_new_ips(dvs): dvs.runcmd(['sh', '-c', "ping -c 1 -W 0 -q {}.0.0.{} > /dev/null 2>&1".format(i*4, j+NUM_NEIGH_PER_INTF+2)]) dvs.runcmd(['sh', '-c', "ping6 -c 1 -W 0 -q {}00::{} > /dev/null 2>&1".format(i*4, j+NUM_NEIGH_PER_INTF+2)]) +def warm_restart_set(dvs, app, enable): + db = swsscommon.DBConnector(6, dvs.redis_sock, 0) + tbl = swsscommon.Table(db, "WARM_RESTART_ENABLE_TABLE") + fvs = swsscommon.FieldValuePairs([("enable",enable)]) + tbl.set(app, fvs) + time.sleep(1) + + +def warm_restart_timer_set(dvs, app, timer, val): + db = swsscommon.DBConnector(4, dvs.redis_sock, 0) + tbl = swsscommon.Table(db, "WARM_RESTART") + fvs = swsscommon.FieldValuePairs([(timer, val)]) + tbl.set(app, fvs) + time.sleep(1) class TestWarmReboot(object): def test_PortSyncdWarmRestart(self, dvs, testlog): @@ -245,10 +259,10 @@ def test_PortSyncdWarmRestart(self, dvs, testlog): appl_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") - dvs.runcmd("config interface startup Ethernet16") - dvs.runcmd("config interface startup Ethernet20") + dvs.port_admin_set("Ethernet16", "up") + dvs.port_admin_set("Ethernet20", "up") time.sleep(1) @@ -259,8 +273,8 @@ def test_PortSyncdWarmRestart(self, dvs, testlog): intf_tbl.set("Ethernet20|11.0.0.9/29", fvs) intf_tbl.set("Ethernet16", fvs) intf_tbl.set("Ethernet20", fvs) - dvs.runcmd("config interface startup Ethernet16") - dvs.runcmd("config interface startup Ethernet20") + dvs.port_admin_set("Ethernet16", "up") + dvs.port_admin_set("Ethernet20", "up") dvs.servers[4].runcmd("ip link set down dev eth0") == 0 dvs.servers[4].runcmd("ip link set up dev eth0") == 0 @@ -339,12 +353,12 @@ def test_VlanMgrdWarmRestart(self, dvs, testlog): dvs.runcmd("ifconfig Ethernet16 0") dvs.runcmd("ifconfig Ethernet20 0") - dvs.runcmd("config interface startup Ethernet16 ") - dvs.runcmd("config interface startup Ethernet20 ") + dvs.port_admin_set("Ethernet16", "up") + dvs.port_admin_set("Ethernet20", "up") time.sleep(1) - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") # create vlan create_entry_tbl( @@ -387,8 +401,6 @@ def test_VlanMgrdWarmRestart(self, dvs, testlog): intf_tbl.set("Vlan20|11.0.0.9/29", fvs) intf_tbl.set("Vlan16", fvs) intf_tbl.set("Vlan20", fvs) - dvs.runcmd("config interface startup Vlan16") - dvs.runcmd("config interface startup Vlan20") dvs.servers[4].runcmd("ifconfig eth0 11.0.0.2/29") dvs.servers[4].runcmd("ip route add default via 11.0.0.1") @@ -453,7 +465,7 @@ def test_IntfMgrdWarmRestartNoInterfaces(self, dvs, testlog): state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) restore_count = swss_get_RestoreCount(dvs, state_db) - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") dvs.runcmd("supervisorctl restart intfmgrd") reached_desired_state = False @@ -474,7 +486,7 @@ def test_swss_neighbor_syncup(self, dvs, testlog): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") # # Testcase1: @@ -503,8 +515,8 @@ def test_swss_neighbor_syncup(self, dvs, testlog): intf_tbl.set("{}".format(intfs[1]), fvs) intf_tbl.set("{}".format(intfs[0]), fvs) intf_tbl.set("{}".format(intfs[1]), fvs) - dvs.runcmd("config interface startup {}".format(intfs[0])) - dvs.runcmd("config interface startup {}".format(intfs[1])) + dvs.port_admin_set(intfs[0], "up") + dvs.port_admin_set(intfs[1], "up") ips = ["24.0.0.2", "24.0.0.3", "28.0.0.2", "28.0.0.3"] v6ips = ["2400::2", "2400::3", "2800::2", "2800::3"] @@ -748,7 +760,7 @@ def test_swss_neighbor_syncup(self, dvs, testlog): # setup timer in configDB timer_value = "15" - dvs.runcmd("config warm_restart neighsyncd_timer {}".format(timer_value)) + warm_restart_timer_set(dvs, "swss", "neighsyncd_timer", timer_value) # get restore_count restore_count = swss_get_RestoreCount(dvs, state_db) @@ -847,7 +859,7 @@ def test_OrchagentWarmRestartReadyCheck(self, dvs, testlog): time.sleep(1) - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") config_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) intf_tbl = swsscommon.Table(config_db, "INTERFACE") @@ -856,8 +868,8 @@ def test_OrchagentWarmRestartReadyCheck(self, dvs, testlog): intf_tbl.set("Ethernet4|10.0.0.2/31", fvs) intf_tbl.set("Ethernet0", fvs) intf_tbl.set("Ethernet4", fvs) - dvs.runcmd("config interface startup Ethernet0") - dvs.runcmd("config interface startup Ethernet4") + dvs.port_admin_set("Ethernet0", "up") + dvs.port_admin_set("Ethernet4", "up") dvs.servers[0].runcmd("ifconfig eth0 10.0.0.1/31") dvs.servers[0].runcmd("ip route add default via 10.0.0.0") @@ -916,7 +928,7 @@ def test_swss_port_state_syncup(self, dvs, testlog): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") tbl = swsscommon.Table(appl_db, swsscommon.APP_PORT_TABLE_NAME) @@ -931,9 +943,9 @@ def test_swss_port_state_syncup(self, dvs, testlog): intf_tbl.set("Ethernet0", fvs) intf_tbl.set("Ethernet4", fvs) intf_tbl.set("Ethernet8", fvs) - dvs.runcmd("config interface startup Ethernet0") - dvs.runcmd("config interface startup Ethernet4") - dvs.runcmd("config interface startup Ethernet8") + dvs.port_admin_set("Ethernet0", "up") + dvs.port_admin_set("Ethernet4", "up") + dvs.port_admin_set("Ethernet8", "up") dvs.runcmd("arp -s 10.0.0.1 00:00:00:00:00:01") dvs.runcmd("arp -s 10.0.0.3 00:00:00:00:00:02") @@ -1102,9 +1114,9 @@ def test_routing_WarmRestart(self, dvs, testlog): intf_tbl.set("{}".format(intfs[1]), fvs) intf_tbl.set("{}".format(intfs[2]), fvs) intf_tbl.set("{}".format(intfs[2]), fvs) - dvs.runcmd("config interface startup {}".format(intfs[0])) - dvs.runcmd("config interface startup {}".format(intfs[1])) - dvs.runcmd("config interface startup {}".format(intfs[2])) + dvs.port_admin_set(intfs[0], "up") + dvs.port_admin_set(intfs[1], "up") + dvs.port_admin_set(intfs[2], "up") time.sleep(1) @@ -1199,8 +1211,8 @@ def test_routing_WarmRestart(self, dvs, testlog): # The following two instructions will be substituted by the commented ones # once the later ones are added to sonic-utilities repo. - dvs.runcmd("config warm_restart enable bgp") - dvs.runcmd("config warm_restart bgp_timer {}".format(restart_timer)) + warm_restart_set(dvs, "bgp", "true") + warm_restart_timer_set(dvs, "bgp", "bgp_timer", str(restart_timer)) time.sleep(1) @@ -1711,7 +1723,7 @@ def test_routing_WarmRestart(self, dvs, testlog): del_entry_tbl(state_db, "BGP_STATE_TABLE", "IPv4|eoiu") del_entry_tbl(state_db, "BGP_STATE_TABLE", "IPv6|eoiu") - dvs.runcmd("config warm_restart bgp_timer {}".format(restart_timer)) + warm_restart_timer_set(dvs, "bgp", "bgp_timer", str(restart_timer)) # Restart zebra dvs.stop_zebra() dvs.start_zebra() @@ -1854,7 +1866,7 @@ def test_system_warmreboot_neighbor_syncup(self, dvs, testlog): flush_neigh_entries(dvs) time.sleep(5) - dvs.runcmd("config warm_restart enable system") + warm_restart_set(dvs, "system", "true") # Test neighbors on NUM_INTF (e,g 8) interfaces # Ethernet32/36/.../60, with ip: 32.0.0.1/24... 60.0.0.1/24 @@ -1877,7 +1889,7 @@ def test_system_warmreboot_neighbor_syncup(self, dvs, testlog): intf_tbl.set("Ethernet{}|{}00::1/64".format(i*4, i*4), fvs) intf_tbl.set("Ethernet{}".format(i*4, i*4), fvs) intf_tbl.set("Ethernet{}".format(i*4, i*4), fvs) - dvs.runcmd("config interface startup Ethernet{}".format(i*4, i*4)) + dvs.port_admin_set("Ethernet{}".format(i*4), "up") dvs.servers[i].runcmd("ip link set up dev eth0") dvs.servers[i].runcmd("ip addr flush dev eth0") #result = dvs.servers[i].runcmd_output("ifconfig eth0 | grep HWaddr | awk '{print $NF}'") @@ -2103,7 +2115,7 @@ def test_system_warmreboot_neighbor_syncup(self, dvs, testlog): swss_app_check_RestoreCount_single(state_db, restore_count, "neighsyncd") # disable system warm restart - dvs.runcmd("config warm_restart disable system") + warm_restart_set(dvs, "system", "false") for i in range(8, 8+NUM_INTF): intf_tbl._del("Ethernet{}|{}.0.0.1/24".format(i*4, i*4)) @@ -2117,11 +2129,11 @@ def test_VrfMgrdWarmRestart(self, dvs, testlog): appl_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") # bring up interface - dvs.runcmd("config interface startup Ethernet0 ") - dvs.runcmd("config interface startup Ethernet4 ") + dvs.port_admin_set("Ethernet0", "up") + dvs.port_admin_set("Ethernet4", "up") # create vrf create_entry_tbl(conf_db, "VRF", "Vrf_1", [('empty', 'empty')]) @@ -2285,7 +2297,7 @@ def test_MirrorSessionWarmReboot(self, dvs): # Monitor port should not change b/c routes are ECMP state_db.wait_for_field_match("MIRROR_SESSION_TABLE", "test_session", {"monitor_port": "Ethernet12"}) - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") dvs.stop_swss() dvs.start_swss() @@ -2332,7 +2344,7 @@ def test_EverflowWarmReboot(self, dvs, dvs_acl): asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY", 1 + len(asic_db.default_acl_entries)) # Execute the warm reboot - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") dvs.stop_swss() dvs.start_swss() @@ -2365,6 +2377,35 @@ def test_EverflowWarmReboot(self, dvs, dvs_acl): dvs.start_swss() dvs.check_swss_ready() + def test_TunnelMgrdWarmRestart(self, dvs): + tunnel_name = "MuxTunnel0" + tunnel_table = "TUNNEL_DECAP_TABLE" + tunnel_params = { + "tunnel_type": "IPINIP", + "dst_ip": "10.1.0.32", + "dscp_mode": "uniform", + "ecn_mode": "standard", + "ttl_mode": "pipe" + } + + pubsub = dvs.SubscribeAppDbObject(tunnel_table) + + dvs.runcmd("config warm_restart enable swss") + config_db = dvs.get_config_db() + config_db.create_entry("TUNNEL", tunnel_name, tunnel_params) + + app_db = dvs.get_app_db() + app_db.wait_for_matching_keys(tunnel_table, [tunnel_name]) + + nadd, ndel = dvs.CountSubscribedObjects(pubsub) + assert nadd == len(tunnel_params) + assert ndel == 1 # Expect 1 deletion as part of table creation + + dvs.runcmd("supervisorctl restart tunnelmgrd") + dvs.check_services_ready() + nadd, ndel = dvs.CountSubscribedObjects(pubsub) + assert nadd == 0 + assert ndel == 0 # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_watermark.py b/tests/test_watermark.py index 6d7c993125..23efedcb42 100644 --- a/tests/test_watermark.py +++ b/tests/test_watermark.py @@ -172,6 +172,12 @@ def set_up(self, dvs): tbl.set('', [(q, "SAI_QUEUE_TYPE_ALL")]) self.all_q.append(q) + def clear_watermark(self, dvs, data): + adb = swsscommon.DBConnector(0, dvs.redis_sock, 0) + msg = json.dumps(data, separators=(',',':')) + adb.publish('WATERMARK_CLEAR_REQUEST', msg) + time.sleep(1) + def test_telemetry_period(self, dvs): self.setup_dbs(dvs) self.set_up(dvs) @@ -191,7 +197,10 @@ def test_telemetry_period(self, dvs): self.populate_asic_all(dvs, "123") - dvs.runcmd("config watermark telemetry interval {}".format(5)) + interval = {"interval": "5"} + self.config_db.create_entry("WATERMARK_TABLE", + "TELEMETRY_INTERVAL", + interval) time.sleep(self.DEFAULT_TELEMETRY_INTERVAL + 1) time.sleep(self.NEW_INTERVAL + 1) @@ -257,10 +266,7 @@ def test_clear(self, dvs): # clear pg shared watermark, and verify that headroom watermark and persistent watermarks are not affected - exitcode, output = dvs.runcmd("sonic-clear priority-group watermark shared") - time.sleep(1) - assert exitcode == 0, "CLI failure: %s" % output - # make sure it cleared + self.clear_watermark(dvs, ["USER", "PG_SHARED"]) self.verify_value(dvs, self.pgs, WmTables.user, SaiWmStats.pg_shared, "0") # make sure the rest is untouched @@ -271,9 +277,7 @@ def test_clear(self, dvs): # clear queue unicast persistent watermark, and verify that multicast watermark and user watermarks are not affected - exitcode, output = dvs.runcmd("sonic-clear queue persistent-watermark unicast") - time.sleep(1) - assert exitcode == 0, "CLI failure: %s" % output + self.clear_watermark(dvs, ["PERSISTENT", "Q_SHARED_UNI"]) # make sure it cleared self.verify_value(dvs, self.uc_q, WmTables.persistent, SaiWmStats.queue_shared, "0") @@ -289,16 +293,14 @@ def test_clear(self, dvs): # clear queue all watermark, and verify that multicast and unicast watermarks are not affected # clear persistent all watermark - exitcode, output = dvs.runcmd("sonic-clear queue persistent-watermark all") - time.sleep(1) - assert exitcode == 0, "CLI failure: %s" % output + self.clear_watermark(dvs, ["PERSISTENT", "Q_SHARED_ALL"]) + # make sure it cleared self.verify_value(dvs, self.all_q, WmTables.persistent, SaiWmStats.queue_shared, "0") # clear user all watermark - exitcode, output = dvs.runcmd("sonic-clear queue watermark all") - time.sleep(1) - assert exitcode == 0, "CLI failure: %s" % output + self.clear_watermark(dvs, ["USER", "Q_SHARED_ALL"]) + # make sure it cleared self.verify_value(dvs, self.all_q, WmTables.user, SaiWmStats.queue_shared, "0")