From 2c5116e33dcf9c6bc37ae4691e99a4d268a7c7fd Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Thu, 8 Sep 2022 17:40:28 +0300 Subject: [PATCH] [202205][counters] Improve performance by polling only configured ports buffer queue/pg counters (#2432) * Filter unconfigured ports buffers queue/pg counters configurations on init commit 6f1199afc3458e526c26b5d7c498fdaf810fbaca Author: Shlomi Bitton Date: Sun Jan 2 16:55:58 2022 +0000 Filter unconfigured ports buffers queue/pg counters configurations on init. If no buffer configurations available, no counters will be created. Allow creating/removing counters on runtime if buffer PG/Queue is created or removed. New UT added to verify new flow. Signed-off-by: Shlomi Bitton --- lgtm.yml | 1 + orchagent/bufferorch.cpp | 32 ++- orchagent/flexcounterorch.cpp | 181 ++++++++++++++- orchagent/flexcounterorch.h | 32 +++ orchagent/p4orch/tests/fake_portorch.cpp | 36 ++- orchagent/portsorch.cpp | 272 ++++++++++++++++++----- orchagent/portsorch.h | 20 +- tests/mock_tests/portsorch_ut.cpp | 13 +- tests/mock_tests/routeorch_ut.cpp | 6 +- tests/test_buffer_traditional.py | 14 +- tests/test_flex_counters.py | 122 +++++----- tests/test_pg_drop_counter.py | 64 +----- tests/test_watermark.py | 29 +-- 13 files changed, 592 insertions(+), 230 deletions(-) diff --git a/lgtm.yml b/lgtm.yml index 59f2e812af..981fcd589b 100644 --- a/lgtm.yml +++ b/lgtm.yml @@ -29,6 +29,7 @@ extraction: - flex - graphviz - autoconf-archive + - uuid-dev after_prepare: - git clone https://github.com/Azure/sonic-buildimage; pushd sonic-buildimage/src/libnl3 - git clone https://github.com/thom311/libnl libnl3-3.5.0; pushd libnl3-3.5.0; git checkout tags/libnl3_5_0 diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index 36e4c58d4f..3519ba432f 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -1,5 +1,6 @@ #include "tokenize.h" #include "bufferorch.h" +#include "directory.h" #include "logger.h" #include "sai_serialize.h" #include "warm_restart.h" @@ -16,6 +17,7 @@ extern sai_switch_api_t *sai_switch_api; extern sai_buffer_api_t *sai_buffer_api; extern PortsOrch *gPortsOrch; +extern Directory gDirectory; extern sai_object_id_t gSwitchId; #define BUFFER_POOL_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS "60000" @@ -815,6 +817,20 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) return handle_status; } } + // create/remove a port queue counter for the queue buffer + else + { + auto flexCounterOrch = gDirectory.get(); + auto queues = tokens[1]; + if (op == SET_COMMAND && flexCounterOrch->getQueueCountersState()) + { + gPortsOrch->createPortBufferQueueCounters(port, queues); + } + else if (op == DEL_COMMAND && flexCounterOrch->getQueueCountersState()) + { + gPortsOrch->removePortBufferQueueCounters(port, queues); + } + } } /* when we apply buffer configuration we need to increase the ref counter of this port @@ -907,7 +923,7 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup if (op == SET_COMMAND) { ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, - buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, + buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, sai_buffer_profile, buffer_profile_name); if (ref_resolve_status::success != resolve_result) { @@ -980,6 +996,20 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup return handle_status; } } + // create or remove a port PG counter for the PG buffer + else + { + auto flexCounterOrch = gDirectory.get(); + auto pgs = tokens[1]; + if (op == SET_COMMAND && flexCounterOrch->getPgWatermarkCountersState()) + { + gPortsOrch->createPortBufferPgCounters(port, pgs); + } + else if (op == DEL_COMMAND && flexCounterOrch->getPgWatermarkCountersState()) + { + gPortsOrch->removePortBufferPgCounters(port, pgs); + } + } } } diff --git a/orchagent/flexcounterorch.cpp b/orchagent/flexcounterorch.cpp index ffaac6daaf..e30b9a0cb1 100644 --- a/orchagent/flexcounterorch.cpp +++ b/orchagent/flexcounterorch.cpp @@ -10,6 +10,7 @@ #include "debugcounterorch.h" #include "directory.h" #include "copporch.h" +#include #include "routeorch.h" #include "macsecorch.h" #include "flowcounterrouteorch.h" @@ -62,6 +63,8 @@ unordered_map flexCounterGroupMap = FlexCounterOrch::FlexCounterOrch(DBConnector *db, vector &tableNames): Orch(db, tableNames), m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME), + m_bufferQueueConfigTable(db, CFG_BUFFER_QUEUE_TABLE_NAME), + m_bufferPgConfigTable(db, CFG_BUFFER_PG_TABLE_NAME), m_flexCounterDb(new DBConnector("FLEX_COUNTER_DB", 0)), m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)), m_gbflexCounterDb(new DBConnector("GB_FLEX_COUNTER_DB", 0)), @@ -157,11 +160,13 @@ void FlexCounterOrch::doTask(Consumer &consumer) } else if(key == QUEUE_KEY) { - gPortsOrch->generateQueueMap(); + gPortsOrch->generateQueueMap(getQueueConfigurations()); + m_queue_enabled = true; } else if(key == PG_WATERMARK_KEY) { - gPortsOrch->generatePriorityGroupMap(); + gPortsOrch->generatePriorityGroupMap(getPgConfigurations()); + m_pg_watermark_enabled = true; } } if(gIntfsOrch && (key == RIF_KEY) && (value == "enable")) @@ -245,6 +250,16 @@ bool FlexCounterOrch::getPortBufferDropCountersState() const return m_port_buffer_drop_counter_enabled; } +bool FlexCounterOrch::getPgWatermarkCountersState() const +{ + return m_pg_watermark_enabled; +} + +bool FlexCounterOrch::getQueueCountersState() const +{ + return m_queue_enabled; +} + bool FlexCounterOrch::bake() { /* @@ -286,3 +301,165 @@ bool FlexCounterOrch::bake() Consumer* consumer = dynamic_cast(getExecutor(CFG_FLEX_COUNTER_TABLE_NAME)); return consumer->addToSync(entries); } + +map FlexCounterOrch::getQueueConfigurations() +{ + SWSS_LOG_ENTER(); + + map queuesStateVector; + std::vector portQueueKeys; + m_bufferQueueConfigTable.getKeys(portQueueKeys); + + for (const auto& portQueueKey : portQueueKeys) + { + auto toks = tokenize(portQueueKey, '|'); + if (toks.size() != 2) + { + SWSS_LOG_ERROR("Invalid BUFFER_QUEUE key: [%s]", portQueueKey.c_str()); + continue; + } + + auto configPortNames = tokenize(toks[0], ','); + auto configPortQueues = toks[1]; + toks = tokenize(configPortQueues, '-'); + + for (const auto& configPortName : configPortNames) + { + uint32_t maxQueueNumber = gPortsOrch->getNumberOfPortSupportedQueueCounters(configPortName); + uint32_t maxQueueIndex = maxQueueNumber - 1; + uint32_t minQueueIndex = 0; + + if (!queuesStateVector.count(configPortName)) + { + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + queuesStateVector.insert(make_pair(configPortName, flexCounterQueueState)); + } + + try { + auto startIndex = to_uint(toks[0], minQueueIndex, maxQueueIndex); + if (toks.size() > 1) + { + auto endIndex = to_uint(toks[1], minQueueIndex, maxQueueIndex); + queuesStateVector.at(configPortName).enableQueueCounters(startIndex, endIndex); + } + else + { + queuesStateVector.at(configPortName).enableQueueCounter(startIndex); + } + } catch (std::invalid_argument const& e) { + SWSS_LOG_ERROR("Invalid queue index [%s] for port [%s]", configPortQueues.c_str(), configPortName.c_str()); + continue; + } + } + } + + return queuesStateVector; +} + +map FlexCounterOrch::getPgConfigurations() +{ + SWSS_LOG_ENTER(); + + map pgsStateVector; + std::vector portPgKeys; + m_bufferPgConfigTable.getKeys(portPgKeys); + + for (const auto& portPgKey : portPgKeys) + { + auto toks = tokenize(portPgKey, '|'); + if (toks.size() != 2) + { + SWSS_LOG_ERROR("Invalid BUFFER_PG key: [%s]", portPgKey.c_str()); + continue; + } + + auto configPortNames = tokenize(toks[0], ','); + auto configPortPgs = toks[1]; + toks = tokenize(configPortPgs, '-'); + + for (const auto& configPortName : configPortNames) + { + uint32_t maxPgNumber = gPortsOrch->getNumberOfPortSupportedPgCounters(configPortName); + uint32_t maxPgIndex = maxPgNumber - 1; + uint32_t minPgIndex = 0; + + if (!pgsStateVector.count(configPortName)) + { + FlexCounterPgStates flexCounterPgState(maxPgNumber); + pgsStateVector.insert(make_pair(configPortName, flexCounterPgState)); + } + + try { + auto startIndex = to_uint(toks[0], minPgIndex, maxPgIndex); + if (toks.size() > 1) + { + auto endIndex = to_uint(toks[1], minPgIndex, maxPgIndex); + pgsStateVector.at(configPortName).enablePgCounters(startIndex, endIndex); + } + else + { + pgsStateVector.at(configPortName).enablePgCounter(startIndex); + } + } catch (std::invalid_argument const& e) { + SWSS_LOG_ERROR("Invalid pg index [%s] for port [%s]", configPortPgs.c_str(), configPortName.c_str()); + continue; + } + } + } + + return pgsStateVector; +} + +FlexCounterQueueStates::FlexCounterQueueStates(uint32_t maxQueueNumber) +{ + SWSS_LOG_ENTER(); + m_queueStates.resize(maxQueueNumber, false); +} + +bool FlexCounterQueueStates::isQueueCounterEnabled(uint32_t index) const +{ + SWSS_LOG_ENTER(); + return m_queueStates[index]; +} + +void FlexCounterQueueStates::enableQueueCounters(uint32_t startIndex, uint32_t endIndex) +{ + SWSS_LOG_ENTER(); + for (uint32_t queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) + { + enableQueueCounter(queueIndex); + } +} + +void FlexCounterQueueStates::enableQueueCounter(uint32_t queueIndex) +{ + SWSS_LOG_ENTER(); + m_queueStates[queueIndex] = true; +} + +FlexCounterPgStates::FlexCounterPgStates(uint32_t maxPgNumber) +{ + SWSS_LOG_ENTER(); + m_pgStates.resize(maxPgNumber, false); +} + +bool FlexCounterPgStates::isPgCounterEnabled(uint32_t index) const +{ + SWSS_LOG_ENTER(); + return m_pgStates[index]; +} + +void FlexCounterPgStates::enablePgCounters(uint32_t startIndex, uint32_t endIndex) +{ + SWSS_LOG_ENTER(); + for (uint32_t pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) + { + enablePgCounter(pgIndex); + } +} + +void FlexCounterPgStates::enablePgCounter(uint32_t pgIndex) +{ + SWSS_LOG_ENTER(); + m_pgStates[pgIndex] = true; +} diff --git a/orchagent/flexcounterorch.h b/orchagent/flexcounterorch.h index c00a435b68..076faf5e19 100644 --- a/orchagent/flexcounterorch.h +++ b/orchagent/flexcounterorch.h @@ -10,6 +10,30 @@ extern "C" { #include "sai.h" } +class FlexCounterQueueStates +{ +public: + FlexCounterQueueStates(uint32_t maxQueueNumber); + bool isQueueCounterEnabled(uint32_t index) const; + void enableQueueCounters(uint32_t startIndex, uint32_t endIndex); + void enableQueueCounter(uint32_t queueIndex); + +private: + std::vector m_queueStates{}; +}; + +class FlexCounterPgStates +{ +public: + FlexCounterPgStates(uint32_t maxPgNumber); + bool isPgCounterEnabled(uint32_t index) const; + void enablePgCounters(uint32_t startIndex, uint32_t endIndex); + void enablePgCounter(uint32_t pgIndex); + +private: + std::vector m_pgStates{}; +}; + class FlexCounterOrch: public Orch { public: @@ -18,6 +42,10 @@ class FlexCounterOrch: public Orch virtual ~FlexCounterOrch(void); bool getPortCountersState() const; bool getPortBufferDropCountersState() const; + bool getPgWatermarkCountersState() const; + bool getQueueCountersState() const; + std::map getQueueConfigurations(); + std::map getPgConfigurations(); bool getHostIfTrapCounterState() const {return m_hostif_trap_counter_enabled;} bool getRouteFlowCountersState() const {return m_route_flow_counter_enabled;} bool bake() override; @@ -29,9 +57,13 @@ class FlexCounterOrch: public Orch shared_ptr m_gbflexCounterGroupTable = nullptr; bool m_port_counter_enabled = false; bool m_port_buffer_drop_counter_enabled = false; + bool m_pg_watermark_enabled = false; + bool m_queue_enabled = false; bool m_hostif_trap_counter_enabled = false; bool m_route_flow_counter_enabled = false; Table m_flexCounterConfigTable; + Table m_bufferQueueConfigTable; + Table m_bufferPgConfigTable; }; #endif diff --git a/orchagent/p4orch/tests/fake_portorch.cpp b/orchagent/p4orch/tests/fake_portorch.cpp index a0aad1ae81..f86c39afe7 100644 --- a/orchagent/p4orch/tests/fake_portorch.cpp +++ b/orchagent/p4orch/tests/fake_portorch.cpp @@ -181,11 +181,35 @@ bool PortsOrch::setPortPfc(sai_object_id_t portId, uint8_t pfc_bitmask) return true; } -void PortsOrch::generateQueueMap() +void PortsOrch::generateQueueMap(std::map queuesStateVector) { } -void PortsOrch::generatePriorityGroupMap() +void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState) +{ +} + +void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) +{ +} + +void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues) +{ +} + +void PortsOrch::generatePriorityGroupMap(std::map pgsStateVector) +{ +} + +void PortsOrch::generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState) +{ +} + +void PortsOrch::createPortBufferPgCounters(const Port& port, string pgs) +{ +} + +void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) { } @@ -581,14 +605,6 @@ bool PortsOrch::getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uin return true; } -void PortsOrch::generateQueueMapPerPort(const Port &port) -{ -} - -void PortsOrch::generatePriorityGroupMapPerPort(const Port &port) -{ -} - task_process_status PortsOrch::setPortAutoNeg(sai_object_id_t id, int an) { return task_success; diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 8ea9c3cf95..c5962f16c8 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -2732,18 +2732,6 @@ bool PortsOrch::initPort(const string &alias, const string &role, const int inde port_buffer_drop_stat_manager.setCounterIdList(p.m_port_id, CounterType::PORT, port_buffer_drop_stats); } - /* when a port is added and priority group map counter is enabled --> we need to add pg counter for it */ - if (m_isPriorityGroupMapGenerated) - { - generatePriorityGroupMapPerPort(p); - } - - /* when a port is added and queue map counter is enabled --> we need to add queue map counter for it */ - if (m_isQueueMapGenerated) - { - generateQueueMapPerPort(p); - } - PortUpdate update = { p, true }; notify(SUBJECT_TYPE_PORT_CHANGE, static_cast(&update)); @@ -2796,18 +2784,6 @@ void PortsOrch::deInitPort(string alias, sai_object_id_t port_id) port_buffer_drop_stat_manager.clearCounterIdList(p.m_port_id); } - /* remove pg port counters */ - if (m_isPriorityGroupMapGenerated) - { - removePriorityGroupMapPerPort(p); - } - - /* remove queue port counters */ - if (m_isQueueMapGenerated) - { - removeQueueMapPerPort(p); - } - /* remove port name map from counter table */ m_counterTable->hdel("", alias); @@ -5908,7 +5884,7 @@ bool PortsOrch::removeTunnel(Port tunnel) return true; } -void PortsOrch::generateQueueMap() +void PortsOrch::generateQueueMap(map queuesStateVector) { if (m_isQueueMapGenerated) { @@ -5919,53 +5895,87 @@ void PortsOrch::generateQueueMap() { if (it.second.m_type == Port::PHY) { - generateQueueMapPerPort(it.second); + if (!queuesStateVector.count(it.second.m_alias)) + { + auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); + } + generateQueueMapPerPort(it.second, queuesStateVector.at(it.second.m_alias)); } } m_isQueueMapGenerated = true; } -void PortsOrch::removeQueueMapPerPort(const Port& port) +void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState) { - /* Remove the Queue map in the Counter DB */ + /* Create the Queue map in the Counter DB */ + /* Add stat counters to flex_counter */ + vector queueVector; + vector queuePortVector; + vector queueIndexVector; + vector queueTypeVector; for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) { std::ostringstream name; name << port.m_alias << ":" << queueIndex; - std::unordered_set counter_stats; const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); - m_queueTable->hdel("",name.str()); - m_queuePortTable->hdel("",id); - string queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) { - m_queueTypeTable->hdel("",id); - m_queueIndexTable->hdel("",id); + if (!queuesState.isQueueCounterEnabled(queueRealIndex)) + { + continue; + } + queueTypeVector.emplace_back(id, queueType); + queueIndexVector.emplace_back(id, to_string(queueRealIndex)); } + queueVector.emplace_back(name.str(), id); + queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + + // Install a flex counter for this queue to track stats + std::unordered_set counter_stats; for (const auto& it: queue_stat_ids) { counter_stats.emplace(sai_serialize_queue_stat(it)); } - queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); + queue_stat_manager.setCounterIdList(port.m_queue_ids[queueIndex], CounterType::QUEUE, counter_stats); - /* remove watermark queue counters */ + /* add watermark queue counters */ string key = getQueueWatermarkFlexCounterTableKey(id); - m_flexCounterTable->del(key); + string delimiter(""); + std::ostringstream counters_stream; + for (const auto& it: queueWatermarkStatIds) + { + counters_stream << delimiter << sai_serialize_queue_stat(it); + delimiter = comma; + } + + vector fieldValues; + fieldValues.emplace_back(QUEUE_COUNTER_ID_LIST, counters_stream.str()); + + m_flexCounterTable->set(key, fieldValues); } - CounterCheckOrch::getInstance().removePort(port); + m_queueTable->set("", queueVector); + m_queuePortTable->set("", queuePortVector); + m_queueIndexTable->set("", queueIndexVector); + m_queueTypeTable->set("", queueTypeVector); + + CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::generateQueueMapPerPort(const Port& port) +void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) { + SWSS_LOG_ENTER(); + /* Create the Queue map in the Counter DB */ /* Add stat counters to flex_counter */ vector queueVector; @@ -5973,16 +5983,21 @@ void PortsOrch::generateQueueMapPerPort(const Port& port) vector queueIndexVector; vector queueTypeVector; - for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) + auto toks = tokenize(queues, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) { std::ostringstream name; name << port.m_alias << ":" << queueIndex; const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); - queueVector.emplace_back(name.str(), id); - queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); - string queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) @@ -5991,6 +6006,9 @@ void PortsOrch::generateQueueMapPerPort(const Port& port) queueIndexVector.emplace_back(id, to_string(queueRealIndex)); } + queueVector.emplace_back(name.str(), id); + queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + // Install a flex counter for this queue to track stats std::unordered_set counter_stats; for (const auto& it: queue_stat_ids) @@ -6024,7 +6042,50 @@ void PortsOrch::generateQueueMapPerPort(const Port& port) CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::generatePriorityGroupMap() +void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues) +{ + SWSS_LOG_ENTER(); + + /* Remove the Queues maps in the Counter DB */ + /* Remove stat counters from flex_counter DB */ + auto toks = tokenize(queues, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << queueIndex; + const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); + + // Remove the queue counter from counters DB maps + m_queueTable->hdel("", name.str()); + m_queuePortTable->hdel("", id); + + string queueType; + uint8_t queueRealIndex = 0; + if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) + { + m_queueTypeTable->hdel("", id); + m_queueIndexTable->hdel("", id); + } + + // Remove the flex counter for this queue + queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); + + // Remove watermark queue counters + string key = getQueueWatermarkFlexCounterTableKey(id); + m_flexCounterTable->del(key); + } + + CounterCheckOrch::getInstance().removePort(port); +} + +void PortsOrch::generatePriorityGroupMap(map pgsStateVector) { if (m_isPriorityGroupMapGenerated) { @@ -6035,48 +6096,100 @@ void PortsOrch::generatePriorityGroupMap() { if (it.second.m_type == Port::PHY) { - generatePriorityGroupMapPerPort(it.second); + if (!pgsStateVector.count(it.second.m_alias)) + { + auto maxPgNumber = getNumberOfPortSupportedPgCounters(it.second.m_alias); + FlexCounterPgStates flexCounterPgState(maxPgNumber); + pgsStateVector.insert(make_pair(it.second.m_alias, flexCounterPgState)); + } + generatePriorityGroupMapPerPort(it.second, pgsStateVector.at(it.second.m_alias)); } } m_isPriorityGroupMapGenerated = true; } -void PortsOrch::removePriorityGroupMapPerPort(const Port& port) +void PortsOrch::generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState) { - /* Remove the PG map in the Counter DB */ + /* Create the PG map in the Counter DB */ + /* Add stat counters to flex_counter */ + vector pgVector; + vector pgPortVector; + vector pgIndexVector; for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) { + if (!pgsState.isPgCounterEnabled(static_cast(pgIndex))) + { + continue; + } std::ostringstream name; name << port.m_alias << ":" << pgIndex; const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); + + pgVector.emplace_back(name.str(), id); + pgPortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + pgIndexVector.emplace_back(id, to_string(pgIndex)); + string key = getPriorityGroupWatermarkFlexCounterTableKey(id); - m_pgTable->hdel("",name.str()); - m_pgPortTable->hdel("",id); - m_pgIndexTable->hdel("",id); + std::string delimiter = ""; + std::ostringstream counters_stream; + /* Add watermark counters to flex_counter */ + for (const auto& it: ingressPriorityGroupWatermarkStatIds) + { + counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); + delimiter = comma; + } - m_flexCounterTable->del(key); + vector fieldValues; + fieldValues.emplace_back(PG_COUNTER_ID_LIST, counters_stream.str()); + m_flexCounterTable->set(key, fieldValues); + delimiter = ""; + std::ostringstream ingress_pg_drop_packets_counters_stream; key = getPriorityGroupDropPacketsFlexCounterTableKey(id); - /* remove dropped packets counters to flex_counter */ - m_flexCounterTable->del(key); + /* Add dropped packets counters to flex_counter */ + for (const auto& it: ingressPriorityGroupDropStatIds) + { + ingress_pg_drop_packets_counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); + if (delimiter.empty()) + { + delimiter = comma; + } + } + fieldValues.clear(); + fieldValues.emplace_back(PG_COUNTER_ID_LIST, ingress_pg_drop_packets_counters_stream.str()); + m_flexCounterTable->set(key, fieldValues); } - CounterCheckOrch::getInstance().removePort(port); + m_pgTable->set("", pgVector); + m_pgPortTable->set("", pgPortVector); + m_pgIndexTable->set("", pgIndexVector); + + CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::generatePriorityGroupMapPerPort(const Port& port) +void PortsOrch::createPortBufferPgCounters(const Port& port, string pgs) { + SWSS_LOG_ENTER(); + /* Create the PG map in the Counter DB */ /* Add stat counters to flex_counter */ vector pgVector; vector pgPortVector; vector pgIndexVector; - for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) + auto toks = tokenize(pgs, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) { std::ostringstream name; name << port.m_alias << ":" << pgIndex; @@ -6126,6 +6239,43 @@ void PortsOrch::generatePriorityGroupMapPerPort(const Port& port) CounterCheckOrch::getInstance().addPort(port); } +void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) +{ + SWSS_LOG_ENTER(); + + /* Remove the Pgs maps in the Counter DB */ + /* Remove stat counters from flex_counter DB */ + auto toks = tokenize(pgs, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << pgIndex; + const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); + + // Remove the pg counter from counters DB maps + m_pgTable->hdel("", name.str()); + m_pgPortTable->hdel("", id); + m_pgIndexTable->hdel("", id); + + // Remove dropped packets counters from flex_counter + string key = getPriorityGroupDropPacketsFlexCounterTableKey(id); + m_flexCounterTable->del(key); + + // Remove watermark counters from flex_counter + key = getPriorityGroupWatermarkFlexCounterTableKey(id); + m_flexCounterTable->del(key); + } + + CounterCheckOrch::getInstance().removePort(port); +} + void PortsOrch::generatePortCounterMap() { if (m_isPortCounterMapGenerated) @@ -6176,6 +6326,16 @@ void PortsOrch::generatePortBufferDropCounterMap() m_isPortBufferDropCounterMapGenerated = true; } +uint32_t PortsOrch::getNumberOfPortSupportedPgCounters(string port) +{ + return static_cast(m_portList[port].m_priority_group_ids.size()); +} + +uint32_t PortsOrch::getNumberOfPortSupportedQueueCounters(string port) +{ + return static_cast(m_portList[port].m_queue_ids.size()); +} + void PortsOrch::doTask(NotificationConsumer &consumer) { SWSS_LOG_ENTER(); diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 28e576e906..1204c63e96 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -129,9 +129,17 @@ class PortsOrch : public Orch, public Subject bool setPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t pfc_bitmask); bool getPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t *pfc_bitmask); + + void generateQueueMap(map queuesStateVector); + uint32_t getNumberOfPortSupportedQueueCounters(string port); + void createPortBufferQueueCounters(const Port &port, string queues); + void removePortBufferQueueCounters(const Port &port, string queues); + + void generatePriorityGroupMap(map pgsStateVector); + uint32_t getNumberOfPortSupportedPgCounters(string port); + void createPortBufferPgCounters(const Port &port, string pgs); + void removePortBufferPgCounters(const Port& port, string pgs); - void generateQueueMap(); - void generatePriorityGroupMap(); void generatePortCounterMap(); void generatePortBufferDropCounterMap(); @@ -338,13 +346,9 @@ class PortsOrch : public Orch, public Subject bool getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uint8_t &index); bool m_isQueueMapGenerated = false; - void generateQueueMapPerPort(const Port& port); - void removeQueueMapPerPort(const Port& port); - + void generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState); bool m_isPriorityGroupMapGenerated = false; - void generatePriorityGroupMapPerPort(const Port& port); - void removePriorityGroupMapPerPort(const Port& port); - + void generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState); bool m_isPortCounterMapGenerated = false; bool m_isPortBufferDropCounterMapGenerated = false; diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 6425dca20f..012203c749 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -160,13 +160,14 @@ namespace portsorch_test ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME }; auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); gDirectory.set(flexCounterOrch); - gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, APP_BUFFER_PROFILE_TABLE_NAME, APP_BUFFER_QUEUE_TABLE_NAME, @@ -892,7 +893,7 @@ namespace portsorch_test * updated to DB. */ TEST_F(PortsOrchTest, PortOperStatusIsUpAndOperSpeedIsZero) - { + { Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); // Get SAI default ports to populate DB @@ -917,7 +918,7 @@ namespace portsorch_test Port port; gPortsOrch->getPort("Ethernet0", port); ASSERT_TRUE(port.m_oper_status != SAI_PORT_OPER_STATUS_UP); - + // save original api since we will spy auto orig_port_api = sai_port_api; sai_port_api = new sai_port_api_t(); @@ -935,14 +936,14 @@ namespace portsorch_test // Return 0 for port operational speed attrs[0].value.u32 = 0; } - + return (sai_status_t)SAI_STATUS_SUCCESS; } ); auto exec = static_cast(gPortsOrch->getExecutor("PORT_STATUS_NOTIFICATIONS")); auto consumer = exec->getNotificationConsumer(); - + // mock a redis reply for notification, it notifies that Ehernet0 is going to up mockReply = (redisReply *)calloc(sizeof(redisReply), 1); mockReply->type = REDIS_REPLY_ARRAY; @@ -964,7 +965,7 @@ namespace portsorch_test // trigger the notification consumer->readData(); gPortsOrch->doTask(*consumer); - mockReply = nullptr; + mockReply = nullptr; gPortsOrch->getPort("Ethernet0", port); ASSERT_TRUE(port.m_oper_status == SAI_PORT_OPER_STATUS_UP); diff --git a/tests/mock_tests/routeorch_ut.cpp b/tests/mock_tests/routeorch_ut.cpp index 66df4bfbcc..2c1c4b8535 100644 --- a/tests/mock_tests/routeorch_ut.cpp +++ b/tests/mock_tests/routeorch_ut.cpp @@ -176,15 +176,15 @@ namespace routeorch_test { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } }; + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME }; auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); gDirectory.set(flexCounterOrch); - ASSERT_EQ(gPortsOrch, nullptr); - gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); - static const vector route_pattern_tables = { CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, }; diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 31d1afbbd8..21371cb05a 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -77,12 +77,14 @@ def get_pg_name_map(self): @pytest.fixture def setup_teardown_test(self, dvs): - try: - self.setup_db(dvs) - self.set_port_qos_table(self.INTF, '3,4') - self.lossless_pg_combinations = ['3-4'] - finally: - self.teardown() + self.setup_db(dvs) + self.set_port_qos_table(self.INTF, '3,4') + self.lossless_pg_combinations = ['3-4'] + time.sleep(2) + + yield + + self.teardown() def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): orig_cable_len = None diff --git a/tests/test_flex_counters.py b/tests/test_flex_counters.py index 76a1a535f9..f5a0b146b2 100644 --- a/tests/test_flex_counters.py +++ b/tests/test_flex_counters.py @@ -7,8 +7,6 @@ ROUTE_TO_PATTERN_MAP = "COUNTERS_ROUTE_TO_PATTERN_MAP" NUMBER_OF_RETRIES = 10 CPU_PORT_OID = "0x0" -PORT = "Ethernet0" -PORT_MAP = "COUNTERS_PORT_NAME_MAP" counter_group_meta = { 'port_counter': { @@ -73,7 +71,6 @@ } } -@pytest.mark.usefixtures('dvs_port_manager') class TestFlexCounters(object): def setup_dbs(self, dvs): @@ -133,6 +130,18 @@ def wait_for_interval_set(self, group, interval): assert False, "Polling interval is not applied to FLEX_COUNTER_GROUP_TABLE for group {}, expect={}, actual={}".format(group, interval, interval_value) + def wait_for_buffer_pg_queue_counter(self, map, port, index, isSet): + for retry in range(NUMBER_OF_RETRIES): + counter_oid = self.counters_db.db_connection.hget(map, port + ':' + index) + if (isSet and counter_oid): + return counter_oid + elif (not isSet and not counter_oid): + return None + else: + time.sleep(1) + + assert False, "Counter not {} for port: {}, type: {}, index: {}".format("created" if isSet else "removed", port, map, index) + def verify_no_flex_counters_tables(self, counter_stat): counters_stat_keys = self.flex_db.get_keys("FLEX_COUNTER_TABLE:" + counter_stat) assert len(counters_stat_keys) == 0, "FLEX_COUNTER_TABLE:" + str(counter_stat) + " tables exist before enabling the flex counter group" @@ -692,64 +701,53 @@ def remove_ip_address(self, interface, ip): def set_admin_status(self, interface, status): self.config_db.update_entry("PORT", interface, {"admin_status": status}) - - def test_add_remove_ports(self, dvs): + + def test_create_remove_buffer_pg_counter(self, dvs): + """ + Test steps: + 1. Enable PG flex counters. + 2. Configure new buffer prioriy group for a port + 3. Verify counter is automatically created + 4. Remove the new buffer prioriy group for the port + 5. Verify counter is automatically removed + + Args: + dvs (object): virtual switch object + """ self.setup_dbs(dvs) - - # set flex counter - counter_key = counter_group_meta['queue_counter']['key'] - counter_stat = counter_group_meta['queue_counter']['group_name'] - counter_map = counter_group_meta['queue_counter']['name_map'] - self.set_flex_counter_group_status(counter_key, counter_map) + meta_data = counter_group_meta['pg_watermark_counter'] + + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|1', {'profile': 'ingress_lossy_profile'}) + counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', True) + self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) + + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|1') + self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', False) + self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) + + def test_create_remove_buffer_queue_counter(self, dvs): + """ + Test steps: + 1. Enable Queue flex counters. + 2. Configure new buffer queue for a port + 3. Verify counter is automatically created + 4. Remove the new buffer queue for the port + 5. Verify counter is automatically removed + + Args: + dvs (object): virtual switch object + """ + self.setup_dbs(dvs) + meta_data = counter_group_meta['queue_counter'] + + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + + self.config_db.update_entry('BUFFER_QUEUE', 'Ethernet0|7', {'profile': 'egress_lossless_profile'}) + counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', True) + self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) - # receive port info - fvs = self.config_db.get_entry("PORT", PORT) - assert len(fvs) > 0 - - # save all the oids of the pg drop counters - oid_list = [] - counters_queue_map = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") - for key, oid in counters_queue_map.items(): - if PORT in key: - oid_list.append(oid) - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", counter_stat + ":%s" % oid) - assert len(fields) == 1 - oid_list_len = len(oid_list) - - # get port oid - port_oid = self.counters_db.get_entry(PORT_MAP, "")[PORT] - - # remove port and verify that it was removed properly - self.dvs_port.remove_port(PORT) - dvs.get_asic_db().wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid) - - # verify counters were removed from flex counter table - for oid in oid_list: - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", counter_stat + ":%s" % oid) - assert len(fields) == 0 - - # verify that port counter maps were removed from counters db - counters_queue_map = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") - for key in counters_queue_map.keys(): - if PORT in key: - assert False - - # add port and wait until the port is added on asic db - num_of_keys_without_port = len(dvs.get_asic_db().get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) - - self.config_db.create_entry("PORT", PORT, fvs) - - dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_of_keys_without_port + 1) - dvs.get_counters_db().wait_for_fields("COUNTERS_QUEUE_NAME_MAP", "", ["%s:0"%(PORT)]) - - # verify queue counters were added - oid_list = [] - counters_queue_map = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") - - for key, oid in counters_queue_map.items(): - if PORT in key: - oid_list.append(oid) - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", counter_stat + ":%s" % oid) - assert len(fields) == 1 - # the number of the oids needs to be the same as the original number of oids (before removing a port and adding) - assert oid_list_len == len(oid_list) + self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|7') + self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', False) + self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) diff --git a/tests/test_pg_drop_counter.py b/tests/test_pg_drop_counter.py index b3682881de..6d97af5f5c 100644 --- a/tests/test_pg_drop_counter.py +++ b/tests/test_pg_drop_counter.py @@ -2,16 +2,12 @@ import re import time import json -import pytest import redis from swsscommon import swsscommon pg_drop_attr = "SAI_INGRESS_PRIORITY_GROUP_STAT_DROPPED_PACKETS" -PORT = "Ethernet0" - -@pytest.mark.usefixtures('dvs_port_manager') class TestPGDropCounter(object): DEFAULT_POLL_INTERVAL = 10 pgs = {} @@ -61,14 +57,11 @@ def verify_value(self, dvs, obj_ids, entry_name, expected_value): assert found, "entry name %s not found" % (entry_name) def set_up_flex_counter(self): - pg_stats_entry = {"PG_COUNTER_ID_LIST": "{}".format(pg_drop_attr)} - for pg in self.pgs: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", "PG_DROP_STAT_COUNTER:{}".format(pg), pg_stats_entry) - fc_status_enable = {"FLEX_COUNTER_STATUS": "enable"} - self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_DROP", fc_status_enable) self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK", fc_status_enable) + # Wait for DB's to populate by orchagent + time.sleep(2) def clear_flex_counter(self): for pg in self.pgs: @@ -79,10 +72,12 @@ def clear_flex_counter(self): def test_pg_drop_counters(self, dvs): self.setup_dbs(dvs) - self.pgs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP") - try: - self.set_up_flex_counter() + self.set_up_flex_counter() + # Get all configured counters OID's + self.pgs = self.counters_db.db_connection.hgetall("COUNTERS_PG_NAME_MAP").values() + assert self.pgs is not None and len(self.pgs) > 0 + try: self.populate_asic(dvs, "0") time.sleep(self.DEFAULT_POLL_INTERVAL) self.verify_value(dvs, self.pgs, pg_drop_attr, "0") @@ -97,48 +92,3 @@ def test_pg_drop_counters(self, dvs): finally: self.clear_flex_counter() - def test_pg_drop_counter_port_add_remove(self, dvs): - self.setup_dbs(dvs) - - try: - # configure pg drop flex counter - self.set_up_flex_counter() - - # receive port info - fvs = self.config_db.get_entry("PORT", PORT) - assert len(fvs) > 0 - - # save all the oids of the pg drop counters - oid_list = [] - for priority in range(0,7): - oid_list.append(dvs.get_counters_db().get_entry("COUNTERS_PG_NAME_MAP", "")["%s:%d"%(PORT, priority)]) - # verify that counters exists on flex counter - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK_STAT_COUNTER:%s"%oid_list[-1]) - assert len(fields) == 1 - - # remove port - port_oid = self.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "")[PORT] - self.dvs_port.remove_port(PORT) - dvs.get_asic_db().wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid) - - # verify counters were removed from flex counter table - for oid in oid_list: - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK_STAT_COUNTER:%s"%oid) - assert len(fields) == 0 - - # add port and wait until the port is added on asic db - num_of_keys_without_port = len(dvs.get_asic_db().get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) - self.config_db.create_entry("PORT", PORT, fvs) - dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_of_keys_without_port + 1) - dvs.get_counters_db().wait_for_fields("COUNTERS_PG_NAME_MAP", "", ["%s:0"%(PORT)]) - - # verify counter was added - for priority in range(0,7): - oid = dvs.get_counters_db().get_entry("COUNTERS_PG_NAME_MAP", "")["%s:%d"%(PORT, priority)] - - # verify that counters exists on flex counter - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK_STAT_COUNTER:%s"%oid) - assert len(fields) == 1 - - finally: - self.clear_flex_counter() diff --git a/tests/test_watermark.py b/tests/test_watermark.py index 23efedcb42..a8cee70aa1 100644 --- a/tests/test_watermark.py +++ b/tests/test_watermark.py @@ -104,22 +104,8 @@ def verify_value(self, dvs, obj_ids, table_name, watermark_name, expected_value) assert found, "no such watermark found" def set_up_flex_counter(self, dvs): - for q in self.qs: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", - "QUEUE_WATERMARK_STAT_COUNTER:{}".format(q), - WmFCEntry.queue_stats_entry) - - for pg in self.pgs: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", - "PG_WATERMARK_STAT_COUNTER:{}".format(pg), - WmFCEntry.pg_stats_entry) - - for buffer in self.buffers: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", - "BUFFER_POOL_WATERMARK_STAT_COUNTER:{}".format(buffer), - WmFCEntry.buffer_stats_entry) - fc_status_enable = {"FLEX_COUNTER_STATUS": "enable"} + self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK", fc_status_enable) @@ -130,7 +116,8 @@ def set_up_flex_counter(self, dvs): "BUFFER_POOL_WATERMARK", fc_status_enable) - self.populate_asic_all(dvs, "0") + # Wait for DB's to populate by orchagent + time.sleep(2) def clear_flex_counter(self, dvs): for q in self.qs: @@ -150,10 +137,14 @@ def clear_flex_counter(self, dvs): self.config_db.delete_entry("FLEX_COUNTER_TABLE", "BUFFER_POOL_WATERMARK") def set_up(self, dvs): - self.qs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_QUEUE") - self.pgs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP") + self.pgs = self.counters_db.db_connection.hgetall("COUNTERS_PG_NAME_MAP").values() + assert self.pgs is not None and len(self.pgs) > 0 + self.qs = self.counters_db.db_connection.hgetall("COUNTERS_QUEUE_NAME_MAP").values() + assert self.qs is not None and len(self.pgs) > 0 self.buffers = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_POOL") + self.populate_asic_all(dvs, "0") + db = swsscommon.DBConnector(swsscommon.COUNTERS_DB, dvs.redis_sock, 0) tbl = swsscommon.Table(db, "COUNTERS_QUEUE_TYPE_MAP") @@ -180,9 +171,9 @@ def clear_watermark(self, dvs, data): def test_telemetry_period(self, dvs): self.setup_dbs(dvs) + self.set_up_flex_counter(dvs) self.set_up(dvs) try: - self.set_up_flex_counter(dvs) self.enable_unittests(dvs, "true") self.populate_asic_all(dvs, "100")