From dbdf31c10958414ee108850e8ee244c90d28b544 Mon Sep 17 00:00:00 2001 From: Vadym Hlushko <62022266+vadymhlushko-mlnx@users.noreply.github.com> Date: Tue, 25 Oct 2022 18:19:50 +0300 Subject: [PATCH] [counters] Improve performance by polling only configured ports buffer queue/pg counters (#2473) This reverts commit f0f1eb47c4ae5359666a026f8b6d4b4d2d1838aa. - What I did Currently, in SONiC all ports queue and pg counters are created by default with the max possible amount of counters. This feature changes this behavior to poll only configured counters provided by the config DB BUFFER_PG and BUFFER_QUEUE tables. If no tables are present in the DB, no counters will be created for ports. Filter the unwanted queues/pgs returned by SAI API calls and skip the creation of these queue/pg counters. Also, allow creating/removing counters on runtime if buffer PG/Queue is configured or removed. - Why I did it Improve performance by filtering unconfigured queue/pg counters on init. - How I verified it Check after enabling the counters, if configured counters are created in Counters DB according to the configurations. Add/Remove buffer PG/Queue configurations and observe the corresponding counters created/removed accordingly. New UT was added to verify this flow. --- orchagent/bufferorch.cpp | 32 +- orchagent/flexcounterorch.cpp | 181 ++++++++- orchagent/flexcounterorch.h | 32 ++ .../p4orch/tests/fake_flexcounterorch.cpp | 7 +- orchagent/p4orch/tests/fake_portorch.cpp | 36 +- orchagent/portsorch.cpp | 363 +++++++++++++----- orchagent/portsorch.h | 20 +- tests/mock_tests/portsorch_ut.cpp | 3 +- tests/mock_tests/routeorch_ut.cpp | 6 +- tests/test_buffer_traditional.py | 14 +- tests/test_flex_counters.py | 122 +++--- tests/test_pg_drop_counter.py | 64 +-- tests/test_watermark.py | 29 +- 13 files changed, 641 insertions(+), 268 deletions(-) diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index 36e4c58d4f5e..3519ba432fac 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -1,5 +1,6 @@ #include "tokenize.h" #include "bufferorch.h" +#include "directory.h" #include "logger.h" #include "sai_serialize.h" #include "warm_restart.h" @@ -16,6 +17,7 @@ extern sai_switch_api_t *sai_switch_api; extern sai_buffer_api_t *sai_buffer_api; extern PortsOrch *gPortsOrch; +extern Directory gDirectory; extern sai_object_id_t gSwitchId; #define BUFFER_POOL_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS "60000" @@ -815,6 +817,20 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) return handle_status; } } + // create/remove a port queue counter for the queue buffer + else + { + auto flexCounterOrch = gDirectory.get(); + auto queues = tokens[1]; + if (op == SET_COMMAND && flexCounterOrch->getQueueCountersState()) + { + gPortsOrch->createPortBufferQueueCounters(port, queues); + } + else if (op == DEL_COMMAND && flexCounterOrch->getQueueCountersState()) + { + gPortsOrch->removePortBufferQueueCounters(port, queues); + } + } } /* when we apply buffer configuration we need to increase the ref counter of this port @@ -907,7 +923,7 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup if (op == SET_COMMAND) { ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, - buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, + buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, sai_buffer_profile, buffer_profile_name); if (ref_resolve_status::success != resolve_result) { @@ -980,6 +996,20 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup return handle_status; } } + // create or remove a port PG counter for the PG buffer + else + { + auto flexCounterOrch = gDirectory.get(); + auto pgs = tokens[1]; + if (op == SET_COMMAND && flexCounterOrch->getPgWatermarkCountersState()) + { + gPortsOrch->createPortBufferPgCounters(port, pgs); + } + else if (op == DEL_COMMAND && flexCounterOrch->getPgWatermarkCountersState()) + { + gPortsOrch->removePortBufferPgCounters(port, pgs); + } + } } } diff --git a/orchagent/flexcounterorch.cpp b/orchagent/flexcounterorch.cpp index ffaac6daaf5e..e30b9a0cb1c3 100644 --- a/orchagent/flexcounterorch.cpp +++ b/orchagent/flexcounterorch.cpp @@ -10,6 +10,7 @@ #include "debugcounterorch.h" #include "directory.h" #include "copporch.h" +#include #include "routeorch.h" #include "macsecorch.h" #include "flowcounterrouteorch.h" @@ -62,6 +63,8 @@ unordered_map flexCounterGroupMap = FlexCounterOrch::FlexCounterOrch(DBConnector *db, vector &tableNames): Orch(db, tableNames), m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME), + m_bufferQueueConfigTable(db, CFG_BUFFER_QUEUE_TABLE_NAME), + m_bufferPgConfigTable(db, CFG_BUFFER_PG_TABLE_NAME), m_flexCounterDb(new DBConnector("FLEX_COUNTER_DB", 0)), m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)), m_gbflexCounterDb(new DBConnector("GB_FLEX_COUNTER_DB", 0)), @@ -157,11 +160,13 @@ void FlexCounterOrch::doTask(Consumer &consumer) } else if(key == QUEUE_KEY) { - gPortsOrch->generateQueueMap(); + gPortsOrch->generateQueueMap(getQueueConfigurations()); + m_queue_enabled = true; } else if(key == PG_WATERMARK_KEY) { - gPortsOrch->generatePriorityGroupMap(); + gPortsOrch->generatePriorityGroupMap(getPgConfigurations()); + m_pg_watermark_enabled = true; } } if(gIntfsOrch && (key == RIF_KEY) && (value == "enable")) @@ -245,6 +250,16 @@ bool FlexCounterOrch::getPortBufferDropCountersState() const return m_port_buffer_drop_counter_enabled; } +bool FlexCounterOrch::getPgWatermarkCountersState() const +{ + return m_pg_watermark_enabled; +} + +bool FlexCounterOrch::getQueueCountersState() const +{ + return m_queue_enabled; +} + bool FlexCounterOrch::bake() { /* @@ -286,3 +301,165 @@ bool FlexCounterOrch::bake() Consumer* consumer = dynamic_cast(getExecutor(CFG_FLEX_COUNTER_TABLE_NAME)); return consumer->addToSync(entries); } + +map FlexCounterOrch::getQueueConfigurations() +{ + SWSS_LOG_ENTER(); + + map queuesStateVector; + std::vector portQueueKeys; + m_bufferQueueConfigTable.getKeys(portQueueKeys); + + for (const auto& portQueueKey : portQueueKeys) + { + auto toks = tokenize(portQueueKey, '|'); + if (toks.size() != 2) + { + SWSS_LOG_ERROR("Invalid BUFFER_QUEUE key: [%s]", portQueueKey.c_str()); + continue; + } + + auto configPortNames = tokenize(toks[0], ','); + auto configPortQueues = toks[1]; + toks = tokenize(configPortQueues, '-'); + + for (const auto& configPortName : configPortNames) + { + uint32_t maxQueueNumber = gPortsOrch->getNumberOfPortSupportedQueueCounters(configPortName); + uint32_t maxQueueIndex = maxQueueNumber - 1; + uint32_t minQueueIndex = 0; + + if (!queuesStateVector.count(configPortName)) + { + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + queuesStateVector.insert(make_pair(configPortName, flexCounterQueueState)); + } + + try { + auto startIndex = to_uint(toks[0], minQueueIndex, maxQueueIndex); + if (toks.size() > 1) + { + auto endIndex = to_uint(toks[1], minQueueIndex, maxQueueIndex); + queuesStateVector.at(configPortName).enableQueueCounters(startIndex, endIndex); + } + else + { + queuesStateVector.at(configPortName).enableQueueCounter(startIndex); + } + } catch (std::invalid_argument const& e) { + SWSS_LOG_ERROR("Invalid queue index [%s] for port [%s]", configPortQueues.c_str(), configPortName.c_str()); + continue; + } + } + } + + return queuesStateVector; +} + +map FlexCounterOrch::getPgConfigurations() +{ + SWSS_LOG_ENTER(); + + map pgsStateVector; + std::vector portPgKeys; + m_bufferPgConfigTable.getKeys(portPgKeys); + + for (const auto& portPgKey : portPgKeys) + { + auto toks = tokenize(portPgKey, '|'); + if (toks.size() != 2) + { + SWSS_LOG_ERROR("Invalid BUFFER_PG key: [%s]", portPgKey.c_str()); + continue; + } + + auto configPortNames = tokenize(toks[0], ','); + auto configPortPgs = toks[1]; + toks = tokenize(configPortPgs, '-'); + + for (const auto& configPortName : configPortNames) + { + uint32_t maxPgNumber = gPortsOrch->getNumberOfPortSupportedPgCounters(configPortName); + uint32_t maxPgIndex = maxPgNumber - 1; + uint32_t minPgIndex = 0; + + if (!pgsStateVector.count(configPortName)) + { + FlexCounterPgStates flexCounterPgState(maxPgNumber); + pgsStateVector.insert(make_pair(configPortName, flexCounterPgState)); + } + + try { + auto startIndex = to_uint(toks[0], minPgIndex, maxPgIndex); + if (toks.size() > 1) + { + auto endIndex = to_uint(toks[1], minPgIndex, maxPgIndex); + pgsStateVector.at(configPortName).enablePgCounters(startIndex, endIndex); + } + else + { + pgsStateVector.at(configPortName).enablePgCounter(startIndex); + } + } catch (std::invalid_argument const& e) { + SWSS_LOG_ERROR("Invalid pg index [%s] for port [%s]", configPortPgs.c_str(), configPortName.c_str()); + continue; + } + } + } + + return pgsStateVector; +} + +FlexCounterQueueStates::FlexCounterQueueStates(uint32_t maxQueueNumber) +{ + SWSS_LOG_ENTER(); + m_queueStates.resize(maxQueueNumber, false); +} + +bool FlexCounterQueueStates::isQueueCounterEnabled(uint32_t index) const +{ + SWSS_LOG_ENTER(); + return m_queueStates[index]; +} + +void FlexCounterQueueStates::enableQueueCounters(uint32_t startIndex, uint32_t endIndex) +{ + SWSS_LOG_ENTER(); + for (uint32_t queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) + { + enableQueueCounter(queueIndex); + } +} + +void FlexCounterQueueStates::enableQueueCounter(uint32_t queueIndex) +{ + SWSS_LOG_ENTER(); + m_queueStates[queueIndex] = true; +} + +FlexCounterPgStates::FlexCounterPgStates(uint32_t maxPgNumber) +{ + SWSS_LOG_ENTER(); + m_pgStates.resize(maxPgNumber, false); +} + +bool FlexCounterPgStates::isPgCounterEnabled(uint32_t index) const +{ + SWSS_LOG_ENTER(); + return m_pgStates[index]; +} + +void FlexCounterPgStates::enablePgCounters(uint32_t startIndex, uint32_t endIndex) +{ + SWSS_LOG_ENTER(); + for (uint32_t pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) + { + enablePgCounter(pgIndex); + } +} + +void FlexCounterPgStates::enablePgCounter(uint32_t pgIndex) +{ + SWSS_LOG_ENTER(); + m_pgStates[pgIndex] = true; +} diff --git a/orchagent/flexcounterorch.h b/orchagent/flexcounterorch.h index 132bfa3b5e62..a6e7527a05c2 100644 --- a/orchagent/flexcounterorch.h +++ b/orchagent/flexcounterorch.h @@ -10,6 +10,30 @@ extern "C" { #include "sai.h" } +class FlexCounterQueueStates +{ +public: + FlexCounterQueueStates(uint32_t maxQueueNumber); + bool isQueueCounterEnabled(uint32_t index) const; + void enableQueueCounters(uint32_t startIndex, uint32_t endIndex); + void enableQueueCounter(uint32_t queueIndex); + +private: + std::vector m_queueStates{}; +}; + +class FlexCounterPgStates +{ +public: + FlexCounterPgStates(uint32_t maxPgNumber); + bool isPgCounterEnabled(uint32_t index) const; + void enablePgCounters(uint32_t startIndex, uint32_t endIndex); + void enablePgCounter(uint32_t pgIndex); + +private: + std::vector m_pgStates{}; +}; + class FlexCounterOrch: public Orch { public: @@ -18,6 +42,10 @@ class FlexCounterOrch: public Orch virtual ~FlexCounterOrch(void); bool getPortCountersState() const; bool getPortBufferDropCountersState() const; + bool getPgWatermarkCountersState() const; + bool getQueueCountersState() const; + std::map getQueueConfigurations(); + std::map getPgConfigurations(); bool getHostIfTrapCounterState() const {return m_hostif_trap_counter_enabled;} bool getRouteFlowCountersState() const {return m_route_flow_counter_enabled;} bool bake() override; @@ -29,9 +57,13 @@ class FlexCounterOrch: public Orch std::shared_ptr m_gbflexCounterGroupTable = nullptr; bool m_port_counter_enabled = false; bool m_port_buffer_drop_counter_enabled = false; + bool m_pg_watermark_enabled = false; + bool m_queue_enabled = false; bool m_hostif_trap_counter_enabled = false; bool m_route_flow_counter_enabled = false; Table m_flexCounterConfigTable; + Table m_bufferQueueConfigTable; + Table m_bufferPgConfigTable; }; #endif diff --git a/orchagent/p4orch/tests/fake_flexcounterorch.cpp b/orchagent/p4orch/tests/fake_flexcounterorch.cpp index a98795bd7776..e44fc555f612 100644 --- a/orchagent/p4orch/tests/fake_flexcounterorch.cpp +++ b/orchagent/p4orch/tests/fake_flexcounterorch.cpp @@ -1,8 +1,11 @@ #include "copporch.h" #include "flexcounterorch.h" -FlexCounterOrch::FlexCounterOrch(swss::DBConnector *db, std::vector &tableNames) - : Orch(db, tableNames), m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME) +FlexCounterOrch::FlexCounterOrch(swss::DBConnector *db, std::vector &tableNames) : + Orch(db, tableNames), + m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME), + m_bufferQueueConfigTable(db, CFG_BUFFER_QUEUE_TABLE_NAME), + m_bufferPgConfigTable(db, CFG_BUFFER_PG_TABLE_NAME) { } diff --git a/orchagent/p4orch/tests/fake_portorch.cpp b/orchagent/p4orch/tests/fake_portorch.cpp index f16ff2409aea..f9098f663918 100644 --- a/orchagent/p4orch/tests/fake_portorch.cpp +++ b/orchagent/p4orch/tests/fake_portorch.cpp @@ -181,11 +181,35 @@ bool PortsOrch::setPortPfc(sai_object_id_t portId, uint8_t pfc_bitmask) return true; } -void PortsOrch::generateQueueMap() +void PortsOrch::generateQueueMap(std::map queuesStateVector) { } -void PortsOrch::generatePriorityGroupMap() +void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState, bool voq) +{ +} + +void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) +{ +} + +void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues) +{ +} + +void PortsOrch::generatePriorityGroupMap(std::map pgsStateVector) +{ +} + +void PortsOrch::generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState) +{ +} + +void PortsOrch::createPortBufferPgCounters(const Port& port, string pgs) +{ +} + +void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) { } @@ -582,14 +606,6 @@ bool PortsOrch::getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uin return true; } -void PortsOrch::generateQueueMapPerPort(const Port &port, bool voq) -{ -} - -void PortsOrch::generatePriorityGroupMapPerPort(const Port &port) -{ -} - task_process_status PortsOrch::setPortAutoNeg(sai_object_id_t id, int an) { return task_success; diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 62557abf2463..004a9af624ce 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -2797,18 +2797,6 @@ bool PortsOrch::initPort(const string &alias, const string &role, const int inde port_buffer_drop_stat_manager.setCounterIdList(p.m_port_id, CounterType::PORT, port_buffer_drop_stats); } - /* when a port is added and priority group map counter is enabled --> we need to add pg counter for it */ - if (m_isPriorityGroupMapGenerated) - { - generatePriorityGroupMapPerPort(p); - } - - /* when a port is added and queue map counter is enabled --> we need to add queue map counter for it */ - if (m_isQueueMapGenerated) - { - generateQueueMapPerPort(p, false); - } - PortUpdate update = { p, true }; notify(SUBJECT_TYPE_PORT_CHANGE, static_cast(&update)); @@ -2861,18 +2849,6 @@ void PortsOrch::deInitPort(string alias, sai_object_id_t port_id) port_buffer_drop_stat_manager.clearCounterIdList(p.m_port_id); } - /* remove pg port counters */ - if (m_isPriorityGroupMapGenerated) - { - removePriorityGroupMapPerPort(p); - } - - /* remove queue port counters */ - if (m_isQueueMapGenerated) - { - removeQueueMapPerPort(p); - } - /* remove port name map from counter table */ m_counterTable->hdel("", alias); @@ -6025,7 +6001,7 @@ bool PortsOrch::removeTunnel(Port tunnel) return true; } -void PortsOrch::generateQueueMap() +void PortsOrch::generateQueueMap(map queuesStateVector) { if (m_isQueueMapGenerated) { @@ -6036,61 +6012,35 @@ void PortsOrch::generateQueueMap() { if (it.second.m_type == Port::PHY) { - generateQueueMapPerPort(it.second, false); + if (!queuesStateVector.count(it.second.m_alias)) + { + auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); + } + generateQueueMapPerPort(it.second, queuesStateVector.at(it.second.m_alias), false); if (gMySwitchType == "voq") { - generateQueueMapPerPort(it.second, true); + generateQueueMapPerPort(it.second, queuesStateVector.at(it.second.m_alias), true); } } if (it.second.m_type == Port::SYSTEM) { - generateQueueMapPerPort(it.second, true); + if (!queuesStateVector.count(it.second.m_alias)) + { + auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); + } + generateQueueMapPerPort(it.second, queuesStateVector.at(it.second.m_alias), true); } } m_isQueueMapGenerated = true; } -void PortsOrch::removeQueueMapPerPort(const Port& port) -{ - /* Remove the Queue map in the Counter DB */ - - for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) - { - std::ostringstream name; - name << port.m_alias << ":" << queueIndex; - std::unordered_set counter_stats; - - const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); - - m_queueTable->hdel("",name.str()); - m_queuePortTable->hdel("",id); - - string queueType; - uint8_t queueRealIndex = 0; - if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) - { - m_queueTypeTable->hdel("",id); - m_queueIndexTable->hdel("",id); - } - - for (const auto& it: queue_stat_ids) - { - counter_stats.emplace(sai_serialize_queue_stat(it)); - } - queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); - - /* remove watermark queue counters */ - string key = getQueueWatermarkFlexCounterTableKey(id); - - m_flexCounterTable->del(key); - } - - CounterCheckOrch::getInstance().removePort(port); -} - -void PortsOrch::generateQueueMapPerPort(const Port& port, bool voq) +void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState, bool voq) { /* Create the Queue map in the Counter DB */ /* Add stat counters to flex_counter */ @@ -6099,6 +6049,7 @@ void PortsOrch::generateQueueMapPerPort(const Port& port, bool voq) vector queueIndexVector; vector queueTypeVector; std::vector queue_ids; + if (voq) { queue_ids = m_port_voq_ids[port.m_alias]; @@ -6111,35 +6062,40 @@ void PortsOrch::generateQueueMapPerPort(const Port& port, bool voq) for (size_t queueIndex = 0; queueIndex < queue_ids.size(); ++queueIndex) { std::ostringstream name; - if (voq) - { - name << port.m_system_port_info.alias << ":" << queueIndex; - } - else - { - name << port.m_alias << ":" << queueIndex; - } - - const auto id = sai_serialize_object_id(queue_ids[queueIndex]); - queueVector.emplace_back(name.str(), id); - if (voq) + if (voq) { - queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_system_port_oid)); + name << port.m_system_port_info.alias << ":" << queueIndex; } else { - queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + name << port.m_alias << ":" << queueIndex; } + const auto id = sai_serialize_object_id(queue_ids[queueIndex]); + string queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(queue_ids[queueIndex], queueType, queueRealIndex)) { + if (!queuesState.isQueueCounterEnabled(queueRealIndex)) + { + continue; + } queueTypeVector.emplace_back(id, queueType); queueIndexVector.emplace_back(id, to_string(queueRealIndex)); } + queueVector.emplace_back(name.str(), id); + if (voq) + { + queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_system_port_oid)); + } + else + { + queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + } + // Install a flex counter for this queue to track stats std::unordered_set counter_stats; for (const auto& it: queue_stat_ids) @@ -6148,9 +6104,10 @@ void PortsOrch::generateQueueMapPerPort(const Port& port, bool voq) } queue_stat_manager.setCounterIdList(queue_ids[queueIndex], CounterType::QUEUE, counter_stats); - if (voq) { - continue; - } + if (voq) + { + continue; + } /* add watermark queue counters */ string key = getQueueWatermarkFlexCounterTableKey(id); @@ -6184,7 +6141,120 @@ void PortsOrch::generateQueueMapPerPort(const Port& port, bool voq) CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::generatePriorityGroupMap() +void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) +{ + SWSS_LOG_ENTER(); + + /* Create the Queue map in the Counter DB */ + /* Add stat counters to flex_counter */ + vector queueVector; + vector queuePortVector; + vector queueIndexVector; + vector queueTypeVector; + + auto toks = tokenize(queues, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << queueIndex; + + const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); + + string queueType; + uint8_t queueRealIndex = 0; + if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) + { + queueTypeVector.emplace_back(id, queueType); + queueIndexVector.emplace_back(id, to_string(queueRealIndex)); + } + + queueVector.emplace_back(name.str(), id); + queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + + // Install a flex counter for this queue to track stats + std::unordered_set counter_stats; + for (const auto& it: queue_stat_ids) + { + counter_stats.emplace(sai_serialize_queue_stat(it)); + } + queue_stat_manager.setCounterIdList(port.m_queue_ids[queueIndex], CounterType::QUEUE, counter_stats); + + /* add watermark queue counters */ + string key = getQueueWatermarkFlexCounterTableKey(id); + + string delimiter(""); + std::ostringstream counters_stream; + for (const auto& it: queueWatermarkStatIds) + { + counters_stream << delimiter << sai_serialize_queue_stat(it); + delimiter = comma; + } + + vector fieldValues; + fieldValues.emplace_back(QUEUE_COUNTER_ID_LIST, counters_stream.str()); + + m_flexCounterTable->set(key, fieldValues); + } + + m_queueTable->set("", queueVector); + m_queuePortTable->set("", queuePortVector); + m_queueIndexTable->set("", queueIndexVector); + m_queueTypeTable->set("", queueTypeVector); + + CounterCheckOrch::getInstance().addPort(port); +} + +void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues) +{ + SWSS_LOG_ENTER(); + + /* Remove the Queues maps in the Counter DB */ + /* Remove stat counters from flex_counter DB */ + auto toks = tokenize(queues, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << queueIndex; + const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); + + // Remove the queue counter from counters DB maps + m_queueTable->hdel("", name.str()); + m_queuePortTable->hdel("", id); + + string queueType; + uint8_t queueRealIndex = 0; + if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) + { + m_queueTypeTable->hdel("", id); + m_queueIndexTable->hdel("", id); + } + + // Remove the flex counter for this queue + queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); + + // Remove watermark queue counters + string key = getQueueWatermarkFlexCounterTableKey(id); + m_flexCounterTable->del(key); + } + + CounterCheckOrch::getInstance().removePort(port); +} + +void PortsOrch::generatePriorityGroupMap(map pgsStateVector) { if (m_isPriorityGroupMapGenerated) { @@ -6195,48 +6265,100 @@ void PortsOrch::generatePriorityGroupMap() { if (it.second.m_type == Port::PHY) { - generatePriorityGroupMapPerPort(it.second); + if (!pgsStateVector.count(it.second.m_alias)) + { + auto maxPgNumber = getNumberOfPortSupportedPgCounters(it.second.m_alias); + FlexCounterPgStates flexCounterPgState(maxPgNumber); + pgsStateVector.insert(make_pair(it.second.m_alias, flexCounterPgState)); + } + generatePriorityGroupMapPerPort(it.second, pgsStateVector.at(it.second.m_alias)); } } m_isPriorityGroupMapGenerated = true; } -void PortsOrch::removePriorityGroupMapPerPort(const Port& port) +void PortsOrch::generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState) { - /* Remove the PG map in the Counter DB */ + /* Create the PG map in the Counter DB */ + /* Add stat counters to flex_counter */ + vector pgVector; + vector pgPortVector; + vector pgIndexVector; for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) { + if (!pgsState.isPgCounterEnabled(static_cast(pgIndex))) + { + continue; + } std::ostringstream name; name << port.m_alias << ":" << pgIndex; const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); + + pgVector.emplace_back(name.str(), id); + pgPortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + pgIndexVector.emplace_back(id, to_string(pgIndex)); + string key = getPriorityGroupWatermarkFlexCounterTableKey(id); - m_pgTable->hdel("",name.str()); - m_pgPortTable->hdel("",id); - m_pgIndexTable->hdel("",id); + std::string delimiter = ""; + std::ostringstream counters_stream; + /* Add watermark counters to flex_counter */ + for (const auto& it: ingressPriorityGroupWatermarkStatIds) + { + counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); + delimiter = comma; + } - m_flexCounterTable->del(key); + vector fieldValues; + fieldValues.emplace_back(PG_COUNTER_ID_LIST, counters_stream.str()); + m_flexCounterTable->set(key, fieldValues); + delimiter = ""; + std::ostringstream ingress_pg_drop_packets_counters_stream; key = getPriorityGroupDropPacketsFlexCounterTableKey(id); - /* remove dropped packets counters to flex_counter */ - m_flexCounterTable->del(key); + /* Add dropped packets counters to flex_counter */ + for (const auto& it: ingressPriorityGroupDropStatIds) + { + ingress_pg_drop_packets_counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); + if (delimiter.empty()) + { + delimiter = comma; + } + } + fieldValues.clear(); + fieldValues.emplace_back(PG_COUNTER_ID_LIST, ingress_pg_drop_packets_counters_stream.str()); + m_flexCounterTable->set(key, fieldValues); } - CounterCheckOrch::getInstance().removePort(port); + m_pgTable->set("", pgVector); + m_pgPortTable->set("", pgPortVector); + m_pgIndexTable->set("", pgIndexVector); + + CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::generatePriorityGroupMapPerPort(const Port& port) +void PortsOrch::createPortBufferPgCounters(const Port& port, string pgs) { + SWSS_LOG_ENTER(); + /* Create the PG map in the Counter DB */ /* Add stat counters to flex_counter */ vector pgVector; vector pgPortVector; vector pgIndexVector; - for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) + auto toks = tokenize(pgs, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) { std::ostringstream name; name << port.m_alias << ":" << pgIndex; @@ -6286,6 +6408,43 @@ void PortsOrch::generatePriorityGroupMapPerPort(const Port& port) CounterCheckOrch::getInstance().addPort(port); } +void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) +{ + SWSS_LOG_ENTER(); + + /* Remove the Pgs maps in the Counter DB */ + /* Remove stat counters from flex_counter DB */ + auto toks = tokenize(pgs, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << pgIndex; + const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); + + // Remove the pg counter from counters DB maps + m_pgTable->hdel("", name.str()); + m_pgPortTable->hdel("", id); + m_pgIndexTable->hdel("", id); + + // Remove dropped packets counters from flex_counter + string key = getPriorityGroupDropPacketsFlexCounterTableKey(id); + m_flexCounterTable->del(key); + + // Remove watermark counters from flex_counter + key = getPriorityGroupWatermarkFlexCounterTableKey(id); + m_flexCounterTable->del(key); + } + + CounterCheckOrch::getInstance().removePort(port); +} + void PortsOrch::generatePortCounterMap() { if (m_isPortCounterMapGenerated) @@ -6336,6 +6495,16 @@ void PortsOrch::generatePortBufferDropCounterMap() m_isPortBufferDropCounterMapGenerated = true; } +uint32_t PortsOrch::getNumberOfPortSupportedPgCounters(string port) +{ + return static_cast(m_portList[port].m_priority_group_ids.size()); +} + +uint32_t PortsOrch::getNumberOfPortSupportedQueueCounters(string port) +{ + return static_cast(m_portList[port].m_queue_ids.size()); +} + void PortsOrch::doTask(NotificationConsumer &consumer) { SWSS_LOG_ENTER(); diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 29769feaa986..add9663fb198 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -131,9 +131,17 @@ class PortsOrch : public Orch, public Subject bool setPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t pfc_bitmask); bool getPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t *pfc_bitmask); + + void generateQueueMap(map queuesStateVector); + uint32_t getNumberOfPortSupportedQueueCounters(string port); + void createPortBufferQueueCounters(const Port &port, string queues); + void removePortBufferQueueCounters(const Port &port, string queues); + + void generatePriorityGroupMap(map pgsStateVector); + uint32_t getNumberOfPortSupportedPgCounters(string port); + void createPortBufferPgCounters(const Port &port, string pgs); + void removePortBufferPgCounters(const Port& port, string pgs); - void generateQueueMap(); - void generatePriorityGroupMap(); void generatePortCounterMap(); void generatePortBufferDropCounterMap(); @@ -349,13 +357,9 @@ class PortsOrch : public Orch, public Subject bool getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uint8_t &index); bool m_isQueueMapGenerated = false; - void generateQueueMapPerPort(const Port& port, bool voq); - void removeQueueMapPerPort(const Port& port); - + void generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState, bool voq); bool m_isPriorityGroupMapGenerated = false; - void generatePriorityGroupMapPerPort(const Port& port); - void removePriorityGroupMapPerPort(const Port& port); - + void generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState); bool m_isPortCounterMapGenerated = false; bool m_isPortBufferDropCounterMapGenerated = false; diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 78982072e77e..836e862d55bd 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -160,13 +160,14 @@ namespace portsorch_test ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME }; auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); gDirectory.set(flexCounterOrch); - gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, APP_BUFFER_PROFILE_TABLE_NAME, APP_BUFFER_QUEUE_TABLE_NAME, diff --git a/tests/mock_tests/routeorch_ut.cpp b/tests/mock_tests/routeorch_ut.cpp index 66df4bfbcc82..2c1c4b8535cf 100644 --- a/tests/mock_tests/routeorch_ut.cpp +++ b/tests/mock_tests/routeorch_ut.cpp @@ -176,15 +176,15 @@ namespace routeorch_test { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } }; + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME }; auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); gDirectory.set(flexCounterOrch); - ASSERT_EQ(gPortsOrch, nullptr); - gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); - static const vector route_pattern_tables = { CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, }; diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 31d1afbbd893..21371cb05aa4 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -77,12 +77,14 @@ def get_pg_name_map(self): @pytest.fixture def setup_teardown_test(self, dvs): - try: - self.setup_db(dvs) - self.set_port_qos_table(self.INTF, '3,4') - self.lossless_pg_combinations = ['3-4'] - finally: - self.teardown() + self.setup_db(dvs) + self.set_port_qos_table(self.INTF, '3,4') + self.lossless_pg_combinations = ['3-4'] + time.sleep(2) + + yield + + self.teardown() def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): orig_cable_len = None diff --git a/tests/test_flex_counters.py b/tests/test_flex_counters.py index 76a1a535f9a6..f5a0b146b231 100644 --- a/tests/test_flex_counters.py +++ b/tests/test_flex_counters.py @@ -7,8 +7,6 @@ ROUTE_TO_PATTERN_MAP = "COUNTERS_ROUTE_TO_PATTERN_MAP" NUMBER_OF_RETRIES = 10 CPU_PORT_OID = "0x0" -PORT = "Ethernet0" -PORT_MAP = "COUNTERS_PORT_NAME_MAP" counter_group_meta = { 'port_counter': { @@ -73,7 +71,6 @@ } } -@pytest.mark.usefixtures('dvs_port_manager') class TestFlexCounters(object): def setup_dbs(self, dvs): @@ -133,6 +130,18 @@ def wait_for_interval_set(self, group, interval): assert False, "Polling interval is not applied to FLEX_COUNTER_GROUP_TABLE for group {}, expect={}, actual={}".format(group, interval, interval_value) + def wait_for_buffer_pg_queue_counter(self, map, port, index, isSet): + for retry in range(NUMBER_OF_RETRIES): + counter_oid = self.counters_db.db_connection.hget(map, port + ':' + index) + if (isSet and counter_oid): + return counter_oid + elif (not isSet and not counter_oid): + return None + else: + time.sleep(1) + + assert False, "Counter not {} for port: {}, type: {}, index: {}".format("created" if isSet else "removed", port, map, index) + def verify_no_flex_counters_tables(self, counter_stat): counters_stat_keys = self.flex_db.get_keys("FLEX_COUNTER_TABLE:" + counter_stat) assert len(counters_stat_keys) == 0, "FLEX_COUNTER_TABLE:" + str(counter_stat) + " tables exist before enabling the flex counter group" @@ -692,64 +701,53 @@ def remove_ip_address(self, interface, ip): def set_admin_status(self, interface, status): self.config_db.update_entry("PORT", interface, {"admin_status": status}) - - def test_add_remove_ports(self, dvs): + + def test_create_remove_buffer_pg_counter(self, dvs): + """ + Test steps: + 1. Enable PG flex counters. + 2. Configure new buffer prioriy group for a port + 3. Verify counter is automatically created + 4. Remove the new buffer prioriy group for the port + 5. Verify counter is automatically removed + + Args: + dvs (object): virtual switch object + """ self.setup_dbs(dvs) - - # set flex counter - counter_key = counter_group_meta['queue_counter']['key'] - counter_stat = counter_group_meta['queue_counter']['group_name'] - counter_map = counter_group_meta['queue_counter']['name_map'] - self.set_flex_counter_group_status(counter_key, counter_map) + meta_data = counter_group_meta['pg_watermark_counter'] + + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|1', {'profile': 'ingress_lossy_profile'}) + counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', True) + self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) + + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|1') + self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', False) + self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) + + def test_create_remove_buffer_queue_counter(self, dvs): + """ + Test steps: + 1. Enable Queue flex counters. + 2. Configure new buffer queue for a port + 3. Verify counter is automatically created + 4. Remove the new buffer queue for the port + 5. Verify counter is automatically removed + + Args: + dvs (object): virtual switch object + """ + self.setup_dbs(dvs) + meta_data = counter_group_meta['queue_counter'] + + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + + self.config_db.update_entry('BUFFER_QUEUE', 'Ethernet0|7', {'profile': 'egress_lossless_profile'}) + counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', True) + self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) - # receive port info - fvs = self.config_db.get_entry("PORT", PORT) - assert len(fvs) > 0 - - # save all the oids of the pg drop counters - oid_list = [] - counters_queue_map = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") - for key, oid in counters_queue_map.items(): - if PORT in key: - oid_list.append(oid) - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", counter_stat + ":%s" % oid) - assert len(fields) == 1 - oid_list_len = len(oid_list) - - # get port oid - port_oid = self.counters_db.get_entry(PORT_MAP, "")[PORT] - - # remove port and verify that it was removed properly - self.dvs_port.remove_port(PORT) - dvs.get_asic_db().wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid) - - # verify counters were removed from flex counter table - for oid in oid_list: - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", counter_stat + ":%s" % oid) - assert len(fields) == 0 - - # verify that port counter maps were removed from counters db - counters_queue_map = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") - for key in counters_queue_map.keys(): - if PORT in key: - assert False - - # add port and wait until the port is added on asic db - num_of_keys_without_port = len(dvs.get_asic_db().get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) - - self.config_db.create_entry("PORT", PORT, fvs) - - dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_of_keys_without_port + 1) - dvs.get_counters_db().wait_for_fields("COUNTERS_QUEUE_NAME_MAP", "", ["%s:0"%(PORT)]) - - # verify queue counters were added - oid_list = [] - counters_queue_map = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") - - for key, oid in counters_queue_map.items(): - if PORT in key: - oid_list.append(oid) - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", counter_stat + ":%s" % oid) - assert len(fields) == 1 - # the number of the oids needs to be the same as the original number of oids (before removing a port and adding) - assert oid_list_len == len(oid_list) + self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|7') + self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', False) + self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) diff --git a/tests/test_pg_drop_counter.py b/tests/test_pg_drop_counter.py index b3682881dec3..6d97af5f5c31 100644 --- a/tests/test_pg_drop_counter.py +++ b/tests/test_pg_drop_counter.py @@ -2,16 +2,12 @@ import re import time import json -import pytest import redis from swsscommon import swsscommon pg_drop_attr = "SAI_INGRESS_PRIORITY_GROUP_STAT_DROPPED_PACKETS" -PORT = "Ethernet0" - -@pytest.mark.usefixtures('dvs_port_manager') class TestPGDropCounter(object): DEFAULT_POLL_INTERVAL = 10 pgs = {} @@ -61,14 +57,11 @@ def verify_value(self, dvs, obj_ids, entry_name, expected_value): assert found, "entry name %s not found" % (entry_name) def set_up_flex_counter(self): - pg_stats_entry = {"PG_COUNTER_ID_LIST": "{}".format(pg_drop_attr)} - for pg in self.pgs: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", "PG_DROP_STAT_COUNTER:{}".format(pg), pg_stats_entry) - fc_status_enable = {"FLEX_COUNTER_STATUS": "enable"} - self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_DROP", fc_status_enable) self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK", fc_status_enable) + # Wait for DB's to populate by orchagent + time.sleep(2) def clear_flex_counter(self): for pg in self.pgs: @@ -79,10 +72,12 @@ def clear_flex_counter(self): def test_pg_drop_counters(self, dvs): self.setup_dbs(dvs) - self.pgs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP") - try: - self.set_up_flex_counter() + self.set_up_flex_counter() + # Get all configured counters OID's + self.pgs = self.counters_db.db_connection.hgetall("COUNTERS_PG_NAME_MAP").values() + assert self.pgs is not None and len(self.pgs) > 0 + try: self.populate_asic(dvs, "0") time.sleep(self.DEFAULT_POLL_INTERVAL) self.verify_value(dvs, self.pgs, pg_drop_attr, "0") @@ -97,48 +92,3 @@ def test_pg_drop_counters(self, dvs): finally: self.clear_flex_counter() - def test_pg_drop_counter_port_add_remove(self, dvs): - self.setup_dbs(dvs) - - try: - # configure pg drop flex counter - self.set_up_flex_counter() - - # receive port info - fvs = self.config_db.get_entry("PORT", PORT) - assert len(fvs) > 0 - - # save all the oids of the pg drop counters - oid_list = [] - for priority in range(0,7): - oid_list.append(dvs.get_counters_db().get_entry("COUNTERS_PG_NAME_MAP", "")["%s:%d"%(PORT, priority)]) - # verify that counters exists on flex counter - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK_STAT_COUNTER:%s"%oid_list[-1]) - assert len(fields) == 1 - - # remove port - port_oid = self.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "")[PORT] - self.dvs_port.remove_port(PORT) - dvs.get_asic_db().wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid) - - # verify counters were removed from flex counter table - for oid in oid_list: - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK_STAT_COUNTER:%s"%oid) - assert len(fields) == 0 - - # add port and wait until the port is added on asic db - num_of_keys_without_port = len(dvs.get_asic_db().get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) - self.config_db.create_entry("PORT", PORT, fvs) - dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_of_keys_without_port + 1) - dvs.get_counters_db().wait_for_fields("COUNTERS_PG_NAME_MAP", "", ["%s:0"%(PORT)]) - - # verify counter was added - for priority in range(0,7): - oid = dvs.get_counters_db().get_entry("COUNTERS_PG_NAME_MAP", "")["%s:%d"%(PORT, priority)] - - # verify that counters exists on flex counter - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK_STAT_COUNTER:%s"%oid) - assert len(fields) == 1 - - finally: - self.clear_flex_counter() diff --git a/tests/test_watermark.py b/tests/test_watermark.py index 23efedcb42df..a8cee70aa147 100644 --- a/tests/test_watermark.py +++ b/tests/test_watermark.py @@ -104,22 +104,8 @@ def verify_value(self, dvs, obj_ids, table_name, watermark_name, expected_value) assert found, "no such watermark found" def set_up_flex_counter(self, dvs): - for q in self.qs: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", - "QUEUE_WATERMARK_STAT_COUNTER:{}".format(q), - WmFCEntry.queue_stats_entry) - - for pg in self.pgs: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", - "PG_WATERMARK_STAT_COUNTER:{}".format(pg), - WmFCEntry.pg_stats_entry) - - for buffer in self.buffers: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", - "BUFFER_POOL_WATERMARK_STAT_COUNTER:{}".format(buffer), - WmFCEntry.buffer_stats_entry) - fc_status_enable = {"FLEX_COUNTER_STATUS": "enable"} + self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK", fc_status_enable) @@ -130,7 +116,8 @@ def set_up_flex_counter(self, dvs): "BUFFER_POOL_WATERMARK", fc_status_enable) - self.populate_asic_all(dvs, "0") + # Wait for DB's to populate by orchagent + time.sleep(2) def clear_flex_counter(self, dvs): for q in self.qs: @@ -150,10 +137,14 @@ def clear_flex_counter(self, dvs): self.config_db.delete_entry("FLEX_COUNTER_TABLE", "BUFFER_POOL_WATERMARK") def set_up(self, dvs): - self.qs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_QUEUE") - self.pgs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP") + self.pgs = self.counters_db.db_connection.hgetall("COUNTERS_PG_NAME_MAP").values() + assert self.pgs is not None and len(self.pgs) > 0 + self.qs = self.counters_db.db_connection.hgetall("COUNTERS_QUEUE_NAME_MAP").values() + assert self.qs is not None and len(self.pgs) > 0 self.buffers = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_POOL") + self.populate_asic_all(dvs, "0") + db = swsscommon.DBConnector(swsscommon.COUNTERS_DB, dvs.redis_sock, 0) tbl = swsscommon.Table(db, "COUNTERS_QUEUE_TYPE_MAP") @@ -180,9 +171,9 @@ def clear_watermark(self, dvs, data): def test_telemetry_period(self, dvs): self.setup_dbs(dvs) + self.set_up_flex_counter(dvs) self.set_up(dvs) try: - self.set_up_flex_counter(dvs) self.enable_unittests(dvs, "true") self.populate_asic_all(dvs, "100")