From 87d30600bd5262400dce84dfa668e7533c5ec77d Mon Sep 17 00:00:00 2001 From: Stephen Sun Date: Fri, 5 May 2023 06:39:53 +0000 Subject: [PATCH 01/13] Fix issue in maximum headroom checking 1. Take the pending PG keys into consideration when calculating the maximum headroom 2. Pass the PG to the Lua plugin when refreshing PGs for a port Signed-off-by: Stephen Sun --- cfgmgr/buffer_check_headroom_mellanox.lua | 33 ++++++++++++++++++----- cfgmgr/buffermgrdyn.cpp | 2 +- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/cfgmgr/buffer_check_headroom_mellanox.lua b/cfgmgr/buffer_check_headroom_mellanox.lua index 20b62d2938..6f0f0c3922 100644 --- a/cfgmgr/buffer_check_headroom_mellanox.lua +++ b/cfgmgr/buffer_check_headroom_mellanox.lua @@ -89,16 +89,37 @@ if new_pg ~= nil then end end --- Fetch all the PGs, accumulate the sizes --- Assume there is only one lossless profile configured among all PGs on each port -table.insert(debuginfo, 'debug:other overhead:' .. accumulative_size) +-- Fetch all the PGs in APPL_DB, and store them into a hash table local pg_keys = redis.call('KEYS', 'BUFFER_PG_TABLE:' .. port .. ':*') +local all_pgs = {} for i = 1, #pg_keys do local profile = redis.call('HGET', pg_keys[i], 'profile') + all_pgs[pg_keys[i]] = profile +end + +-- Fetch all the pending PGs, and store them into the hash table +-- Overwrite any existing entries +local pending_pg_keys = redis.call('KEYS', '_BUFFER_PG_TABLE:' .. port .. ':*') +for i = 1, #pending_pg_keys do + local profile = redis.call('HGET', pending_pg_keys[i], 'profile') + -- Remove the leading underscore when storing it into the hash table + all_pgs[string.sub(pending_pg_keys[i], 2, -1)] = profile + table.insert(debuginfo, 'debug:pending entry: ' .. pending_pg_keys[i] .. ':' .. profile) +end + +-- Handle all the PGs, accumulate the sizes +-- Assume there is only one lossless profile configured among all PGs on each port +table.insert(debuginfo, 'debug:other overhead:' .. accumulative_size) +-- local pg_keys = redis.call('KEYS', 'BUFFER_PG_TABLE:' .. port .. ':*') +for pg_key, profile in pairs(all_pgs) do local current_profile_size - if profile ~= 'ingress_lossy_profile' and (no_input_pg or new_pg ~= pg_keys[i]) then + if profile ~= 'ingress_lossy_profile' and (no_input_pg or new_pg ~= pg_key) then if profile ~= input_profile_name and not no_input_pg then local referenced_profile = redis.call('HGETALL', 'BUFFER_PROFILE_TABLE:' .. profile) + if referenced_profile == nil then + referenced_profile = redis.call('HGETALL', '_BUFFER_PROFILE_TABLE:' .. profile) + table.insert(debuginfo, 'debug:pending profile: ' .. profile) + end for j = 1, #referenced_profile, 2 do if referenced_profile[j] == 'size' then current_profile_size = tonumber(referenced_profile[j+1]) @@ -108,8 +129,8 @@ for i = 1, #pg_keys do current_profile_size = input_profile_size profile = input_profile_name end - accumulative_size = accumulative_size + current_profile_size * get_number_of_pgs(pg_keys[i]) - table.insert(debuginfo, 'debug:' .. pg_keys[i] .. ':' .. profile .. ':' .. current_profile_size .. ':' .. get_number_of_pgs(pg_keys[i]) .. ':accu:' .. accumulative_size) + accumulative_size = accumulative_size + current_profile_size * get_number_of_pgs(pg_key) + table.insert(debuginfo, 'debug:' .. pg_key .. ':' .. profile .. ':' .. current_profile_size .. ':' .. get_number_of_pgs(pg_key) .. ':accu:' .. accumulative_size) end end diff --git a/cfgmgr/buffermgrdyn.cpp b/cfgmgr/buffermgrdyn.cpp index 1d476deb4d..b1495f3491 100644 --- a/cfgmgr/buffermgrdyn.cpp +++ b/cfgmgr/buffermgrdyn.cpp @@ -1497,7 +1497,7 @@ task_process_status BufferMgrDynamic::refreshPgsForPort(const string &port, cons // Calculate whether accumulative headroom size exceeds the maximum value // Abort if it does - if (!isHeadroomResourceValid(port, m_bufferProfileLookup[newProfile], exactly_matched_key)) + if (!isHeadroomResourceValid(port, m_bufferProfileLookup[newProfile], key)) { SWSS_LOG_ERROR("Update speed (%s) and cable length (%s) for port %s failed, accumulative headroom size exceeds the limit", speed.c_str(), cable_length.c_str(), port.c_str()); From a88a8bc99b6de4ac446e44cf02f17eb9942f13b5 Mon Sep 17 00:00:00 2001 From: Stephen Sun Date: Sat, 6 May 2023 14:52:19 +0000 Subject: [PATCH 02/13] Check maximum headroom for lossy PG Signed-off-by: Stephen Sun --- cfgmgr/buffer_check_headroom_mellanox.lua | 26 ++++++++++++----------- cfgmgr/buffermgrdyn.cpp | 11 +++++++--- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/cfgmgr/buffer_check_headroom_mellanox.lua b/cfgmgr/buffer_check_headroom_mellanox.lua index 6f0f0c3922..dac9594436 100644 --- a/cfgmgr/buffer_check_headroom_mellanox.lua +++ b/cfgmgr/buffer_check_headroom_mellanox.lua @@ -5,7 +5,7 @@ local port = KEYS[1] local input_profile_name = ARGV[1] -local input_profile_size = ARGV[2] +local input_profile_size = tonumber(ARGV[2]) local new_pg = ARGV[3] local function is_port_with_8lanes(lanes) @@ -60,7 +60,8 @@ if is_port_with_8lanes(lanes) then pipeline_latency = pipeline_latency * 2 - 1 egress_mirror_size = egress_mirror_size * 2 end -accumulative_size = accumulative_size + 2 * pipeline_latency * 1024 + egress_mirror_size +local lossy_pg_size = pipeline_latency * 1024 +accumulative_size = accumulative_size + lossy_pg_size + egress_mirror_size -- Fetch all keys in BUFFER_PG according to the port redis.call('SELECT', appl_db) @@ -110,31 +111,32 @@ end -- Handle all the PGs, accumulate the sizes -- Assume there is only one lossless profile configured among all PGs on each port table.insert(debuginfo, 'debug:other overhead:' .. accumulative_size) --- local pg_keys = redis.call('KEYS', 'BUFFER_PG_TABLE:' .. port .. ':*') for pg_key, profile in pairs(all_pgs) do local current_profile_size - if profile ~= 'ingress_lossy_profile' and (no_input_pg or new_pg ~= pg_key) then + if no_input_pg or new_pg ~= pg_key then if profile ~= input_profile_name and not no_input_pg then - local referenced_profile = redis.call('HGETALL', 'BUFFER_PROFILE_TABLE:' .. profile) - if referenced_profile == nil then - referenced_profile = redis.call('HGETALL', '_BUFFER_PROFILE_TABLE:' .. profile) + local referenced_profile_size = redis.call('HGET', 'BUFFER_PROFILE_TABLE:' .. profile, 'size') + if not referenced_profile_size then + referenced_profile_size = redis.call('HGET', '_BUFFER_PROFILE_TABLE:' .. profile, 'size') table.insert(debuginfo, 'debug:pending profile: ' .. profile) end - for j = 1, #referenced_profile, 2 do - if referenced_profile[j] == 'size' then - current_profile_size = tonumber(referenced_profile[j+1]) - end - end + current_profile_size = tonumber(referenced_profile_size) else current_profile_size = input_profile_size profile = input_profile_name end + if current_profile_size == 0 then + current_profile_size = lossy_pg_size + end accumulative_size = accumulative_size + current_profile_size * get_number_of_pgs(pg_key) table.insert(debuginfo, 'debug:' .. pg_key .. ':' .. profile .. ':' .. current_profile_size .. ':' .. get_number_of_pgs(pg_key) .. ':accu:' .. accumulative_size) end end if not no_input_pg then + if input_profile_size == 0 then + input_profile_size = lossy_pg_size + end accumulative_size = accumulative_size + input_profile_size * get_number_of_pgs(new_pg) table.insert(debuginfo, 'debug:' .. new_pg .. '*:' .. input_profile_name .. ':' .. input_profile_size .. ':' .. get_number_of_pgs(new_pg) .. ':accu:' .. accumulative_size) end diff --git a/cfgmgr/buffermgrdyn.cpp b/cfgmgr/buffermgrdyn.cpp index b1495f3491..6c9a1e831e 100644 --- a/cfgmgr/buffermgrdyn.cpp +++ b/cfgmgr/buffermgrdyn.cpp @@ -1051,10 +1051,10 @@ bool BufferMgrDynamic::isHeadroomResourceValid(const string &port, const buffer_ // profile: the profile referenced by the new_pg (if provided) or all PGs // new_pg: which pg is newly added? - if (!profile.lossless) + if (!profile.lossless && new_pg.empty()) { - SWSS_LOG_INFO("No need to check headroom for lossy PG port %s profile %s size %s pg %s", - port.c_str(), profile.name.c_str(), profile.size.c_str(), new_pg.c_str()); + SWSS_LOG_INFO("No need to check headroom for lossy PG port %s profile %s size %s without a PG specified", + port.c_str(), profile.name.c_str(), profile.size.c_str()); return true; } @@ -3005,6 +3005,11 @@ task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &ke bufferPg.dynamic_calculated = profileRef.dynamic_calculated; bufferPg.configured_profile_name = profileName; bufferPg.lossless = profileRef.lossless; + if (!profileRef.lossless && !isHeadroomResourceValid(port, profileRef, key)) + { + SWSS_LOG_ERROR("Unable to configure lossy PG %s, accumulative headroom size exceeds the limit", key.c_str()); + return task_process_status::task_failed; + } } bufferPg.static_configured = true; bufferPg.configured_profile_name = profileName; From f1b589ea9438dc3cfea7c132135c9710b8973f70 Mon Sep 17 00:00:00 2001 From: Stephen Sun Date: Mon, 8 May 2023 10:15:01 +0300 Subject: [PATCH 03/13] Mock test to cover the maximum headroom exceeding checking enhancement Signed-off-by: Stephen Sun --- tests/test_buffer_dynamic.py | 52 ++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index 0b4177b64c..8d863dfe55 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -779,4 +779,56 @@ def test_bufferPortMaxParameter(self, dvs, testlog): fvs = self.state_db.wait_for_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0") assert int(fvs["max_queues"]) and int(fvs["max_priority_groups"]) + _, oa_pid = dvs.runcmd("pgrep orchagent") + + try: + fvs["max_headroom_size"] = "122880" + self.state_db.update_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0", fvs) + + # Startup interface + dvs.port_admin_set('Ethernet0', 'up') + # Wait for the lossy profile to be handled + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:0", {"profile": "ingress_lossy_profile"}) + + # Stop orchagent to simulate the scenario that the system is during initialization + dvs.runcmd("kill -s SIGSTOP {}".format(oa_pid)) + + # Create a lossless profile + self.config_db.update_entry('BUFFER_PROFILE', 'test', + {'xon': '19456', + 'xoff': '10240', + 'size': '29696', + 'dynamic_th': '0', + 'pool': 'ingress_lossless_pool'}) + + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'test'}) + # Should not be added due to the maximum headroom exceeded + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|1', {'profile': 'ingress_lossy_profile'}) + # Should not be added due to the maximum headroom exceeded + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|6', {'profile': 'test'}) + + dvs.runcmd("kill -s SIGCONT {}".format(oa_pid)) + time.sleep(1) + + # Check whether BUFFER_PG_TABLE is updated as expected + keys = self.app_db.get_keys('BUFFER_PG_TABLE') + + assert 'Ethernet0:3-4' in keys + assert 'Ethernet0:0' in keys + assert 'Ethernet0:1' not in keys + assert 'Ethernet0:6' not in keys + finally: + dvs.runcmd("kill -s SIGCONT {}".format(oa_pid)) + + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|1') + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|6') + self.config_db.delete_entry('BUFFER_PROFILE', 'test') + + fvs.pop("max_headroom_size") + self.state_db.delete_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0") + self.state_db.update_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0", fvs) + + dvs.port_admin_set('Ethernet0', 'down') + self.cleanup_db(dvs) From c726bbe48addb57d2f3777679f8acd5b3ec43a4b Mon Sep 17 00:00:00 2001 From: Stephen Sun Date: Wed, 10 May 2023 11:58:14 +0300 Subject: [PATCH 04/13] Stablize the test using wait_for_field_match which can wait for more seconds Signed-off-by: Stephen Sun --- tests/test_buffer_dynamic.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index 8d863dfe55..e7ddf72e98 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -807,14 +807,14 @@ def test_bufferPortMaxParameter(self, dvs, testlog): # Should not be added due to the maximum headroom exceeded self.config_db.update_entry('BUFFER_PG', 'Ethernet0|6', {'profile': 'test'}) + # Resume orchagent dvs.runcmd("kill -s SIGCONT {}".format(oa_pid)) - time.sleep(1) # Check whether BUFFER_PG_TABLE is updated as expected + self.app_db.wait_for_field_match("_BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "test"}) + keys = self.app_db.get_keys('BUFFER_PG_TABLE') - assert 'Ethernet0:3-4' in keys - assert 'Ethernet0:0' in keys assert 'Ethernet0:1' not in keys assert 'Ethernet0:6' not in keys finally: From 8a053c85c4cab5fca3f62131f1990248d360c890 Mon Sep 17 00:00:00 2001 From: Stephen Sun Date: Thu, 11 May 2023 03:50:07 +0300 Subject: [PATCH 05/13] Fix typo Signed-off-by: Stephen Sun --- tests/test_buffer_dynamic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index e7ddf72e98..9f14e04cec 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -811,7 +811,7 @@ def test_bufferPortMaxParameter(self, dvs, testlog): dvs.runcmd("kill -s SIGCONT {}".format(oa_pid)) # Check whether BUFFER_PG_TABLE is updated as expected - self.app_db.wait_for_field_match("_BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "test"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "test"}) keys = self.app_db.get_keys('BUFFER_PG_TABLE') From 4feca8985a9a5a42cf7372ed929f474886459d07 Mon Sep 17 00:00:00 2001 From: Stephen Sun Date: Thu, 11 May 2023 08:05:59 +0300 Subject: [PATCH 06/13] Add debug info Signed-off-by: Stephen Sun --- tests/test_buffer_dynamic.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index 9f14e04cec..caa5c8ee8d 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -775,6 +775,12 @@ def test_removeBufferPool(self, dvs, testlog): def test_bufferPortMaxParameter(self, dvs, testlog): self.setup_db(dvs) + # Update log level so that we can analyze the log in case the test failed + logfvs = self.config_db.wait_for_entry("LOGGER", "buffermgrd") + old_log_level = logfvs.get("LOGLEVEL") + logfvs["LOGLEVEL"] = "INFO" + self.config_db.update_entry("LOGGER", "buffermgrd", logfvs) + # Check whether port's maximum parameter has been exposed to STATE_DB fvs = self.state_db.wait_for_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0") assert int(fvs["max_queues"]) and int(fvs["max_priority_groups"]) @@ -829,6 +835,10 @@ def test_bufferPortMaxParameter(self, dvs, testlog): self.state_db.delete_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0") self.state_db.update_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0", fvs) + if old_log_level: + logfvs["LOGLEVEL"] = old_log_level + self.config_db.update_entry("LOGGER", "buffermgrd", logfvs) + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) From c12a9e5c66222eecec63ff06e488a73e703995ee Mon Sep 17 00:00:00 2001 From: Stephen Sun Date: Thu, 11 May 2023 10:42:45 +0300 Subject: [PATCH 07/13] Detailed log Signed-off-by: Stephen Sun --- tests/test_buffer_dynamic.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index caa5c8ee8d..2d8ed9d311 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -788,15 +788,18 @@ def test_bufferPortMaxParameter(self, dvs, testlog): _, oa_pid = dvs.runcmd("pgrep orchagent") try: + dvs.runcmd(f"logger -t pytest === setting max_headroom_size ===") fvs["max_headroom_size"] = "122880" self.state_db.update_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0", fvs) # Startup interface + dvs.runcmd(f"logger -t pytest === starting up interface ===") dvs.port_admin_set('Ethernet0', 'up') # Wait for the lossy profile to be handled self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:0", {"profile": "ingress_lossy_profile"}) # Stop orchagent to simulate the scenario that the system is during initialization + dvs.runcmd(f"logger -t pytest === stopping orchagent ===") dvs.runcmd("kill -s SIGSTOP {}".format(oa_pid)) # Create a lossless profile @@ -807,18 +810,23 @@ def test_bufferPortMaxParameter(self, dvs, testlog): 'dynamic_th': '0', 'pool': 'ingress_lossless_pool'}) + dvs.runcmd(f"logger -t pytest === configuring Ethernet0|3-4 ===") self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'test'}) # Should not be added due to the maximum headroom exceeded + dvs.runcmd(f"logger -t pytest === configuring Ethernet0|1 fail ===") self.config_db.update_entry('BUFFER_PG', 'Ethernet0|1', {'profile': 'ingress_lossy_profile'}) # Should not be added due to the maximum headroom exceeded + dvs.runcmd(f"logger -t pytest === configuring Ethernet0|6 fail ===") self.config_db.update_entry('BUFFER_PG', 'Ethernet0|6', {'profile': 'test'}) # Resume orchagent + dvs.runcmd(f"logger -t pytest === resuming orchagent ===") dvs.runcmd("kill -s SIGCONT {}".format(oa_pid)) # Check whether BUFFER_PG_TABLE is updated as expected + dvs.runcmd(f"logger -t pytest === checking Ethernet0:3-4 ===") self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "test"}) - + dvs.runcmd(f"logger -t pytest === checking failing keys ===") keys = self.app_db.get_keys('BUFFER_PG_TABLE') assert 'Ethernet0:1' not in keys From c7779a783b5c82c066ba2a829a92a5f1cb9bc4a2 Mon Sep 17 00:00:00 2001 From: Stephen Sun Date: Thu, 11 May 2023 12:04:37 +0300 Subject: [PATCH 08/13] Guarantee the order Signed-off-by: Stephen Sun --- tests/test_buffer_dynamic.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index 2d8ed9d311..4a978652f5 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -812,6 +812,10 @@ def test_bufferPortMaxParameter(self, dvs, testlog): dvs.runcmd(f"logger -t pytest === configuring Ethernet0|3-4 ===") self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'test'}) + + # Make sure the entry has been handled by buffermgrd and is pending on orchagent's queue + self.app_db.wait_for_field_match("_BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "test"}) + # Should not be added due to the maximum headroom exceeded dvs.runcmd(f"logger -t pytest === configuring Ethernet0|1 fail ===") self.config_db.update_entry('BUFFER_PG', 'Ethernet0|1', {'profile': 'ingress_lossy_profile'}) From 6fcd0b238fa90ce5aa9b74b53c185c2291ac9f58 Mon Sep 17 00:00:00 2001 From: Stephen Sun Date: Fri, 12 May 2023 00:39:54 +0000 Subject: [PATCH 09/13] Fix issue: wrong profile is take if no new_pg is provided Signed-off-by: Stephen Sun --- cfgmgr/buffer_check_headroom_mellanox.lua | 48 ++++++++--------------- 1 file changed, 17 insertions(+), 31 deletions(-) diff --git a/cfgmgr/buffer_check_headroom_mellanox.lua b/cfgmgr/buffer_check_headroom_mellanox.lua index dac9594436..a5b5a30fad 100644 --- a/cfgmgr/buffer_check_headroom_mellanox.lua +++ b/cfgmgr/buffer_check_headroom_mellanox.lua @@ -82,14 +82,6 @@ local function get_number_of_pgs(keyname) return size end -local no_input_pg = true -if new_pg ~= nil then - if get_number_of_pgs(new_pg) ~= 0 then - no_input_pg = false - new_pg = 'BUFFER_PG_TABLE:' .. new_pg - end -end - -- Fetch all the PGs in APPL_DB, and store them into a hash table local pg_keys = redis.call('KEYS', 'BUFFER_PG_TABLE:' .. port .. ':*') local all_pgs = {} @@ -108,37 +100,31 @@ for i = 1, #pending_pg_keys do table.insert(debuginfo, 'debug:pending entry: ' .. pending_pg_keys[i] .. ':' .. profile) end +if new_pg ~= nil and get_number_of_pgs(new_pg) ~= 0 then + all_pgs['BUFFER_PG_TABLE:' .. new_pg_pg] = input_profile_name +end + -- Handle all the PGs, accumulate the sizes -- Assume there is only one lossless profile configured among all PGs on each port table.insert(debuginfo, 'debug:other overhead:' .. accumulative_size) for pg_key, profile in pairs(all_pgs) do local current_profile_size - if no_input_pg or new_pg ~= pg_key then - if profile ~= input_profile_name and not no_input_pg then - local referenced_profile_size = redis.call('HGET', 'BUFFER_PROFILE_TABLE:' .. profile, 'size') - if not referenced_profile_size then - referenced_profile_size = redis.call('HGET', '_BUFFER_PROFILE_TABLE:' .. profile, 'size') - table.insert(debuginfo, 'debug:pending profile: ' .. profile) - end - current_profile_size = tonumber(referenced_profile_size) - else - current_profile_size = input_profile_size - profile = input_profile_name + if profile ~= input_profile_name then + local referenced_profile_size = redis.call('HGET', 'BUFFER_PROFILE_TABLE:' .. profile, 'size') + if not referenced_profile_size then + referenced_profile_size = redis.call('HGET', '_BUFFER_PROFILE_TABLE:' .. profile, 'size') + table.insert(debuginfo, 'debug:pending profile: ' .. profile) end - if current_profile_size == 0 then - current_profile_size = lossy_pg_size - end - accumulative_size = accumulative_size + current_profile_size * get_number_of_pgs(pg_key) - table.insert(debuginfo, 'debug:' .. pg_key .. ':' .. profile .. ':' .. current_profile_size .. ':' .. get_number_of_pgs(pg_key) .. ':accu:' .. accumulative_size) + current_profile_size = tonumber(referenced_profile_size) + else + current_profile_size = input_profile_size + profile = input_profile_name end -end - -if not no_input_pg then - if input_profile_size == 0 then - input_profile_size = lossy_pg_size + if current_profile_size == 0 then + current_profile_size = lossy_pg_size end - accumulative_size = accumulative_size + input_profile_size * get_number_of_pgs(new_pg) - table.insert(debuginfo, 'debug:' .. new_pg .. '*:' .. input_profile_name .. ':' .. input_profile_size .. ':' .. get_number_of_pgs(new_pg) .. ':accu:' .. accumulative_size) + accumulative_size = accumulative_size + current_profile_size * get_number_of_pgs(pg_key) + table.insert(debuginfo, 'debug:' .. pg_key .. ':' .. profile .. ':' .. current_profile_size .. ':' .. get_number_of_pgs(pg_key) .. ':accu:' .. accumulative_size) end if max_headroom_size > accumulative_size then From 4f6236f797481fb5153eb715a3acfd959e364aea Mon Sep 17 00:00:00 2001 From: Stephen Sun Date: Fri, 12 May 2023 01:08:16 +0000 Subject: [PATCH 10/13] Fix typo Signed-off-by: Stephen Sun --- cfgmgr/buffer_check_headroom_mellanox.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cfgmgr/buffer_check_headroom_mellanox.lua b/cfgmgr/buffer_check_headroom_mellanox.lua index a5b5a30fad..daf1b217d5 100644 --- a/cfgmgr/buffer_check_headroom_mellanox.lua +++ b/cfgmgr/buffer_check_headroom_mellanox.lua @@ -101,7 +101,7 @@ for i = 1, #pending_pg_keys do end if new_pg ~= nil and get_number_of_pgs(new_pg) ~= 0 then - all_pgs['BUFFER_PG_TABLE:' .. new_pg_pg] = input_profile_name + all_pgs['BUFFER_PG_TABLE:' .. new_pg] = input_profile_name end -- Handle all the PGs, accumulate the sizes From 6bad0d3073c2b8b2f8ebf311bbe80cd6f2e12790 Mon Sep 17 00:00:00 2001 From: Stephen Sun Date: Fri, 12 May 2023 04:30:43 +0300 Subject: [PATCH 11/13] Add test case to cover the latest commit Signed-off-by: Stephen Sun --- tests/test_buffer_dynamic.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index 4a978652f5..6a965c9f0c 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -803,12 +803,12 @@ def test_bufferPortMaxParameter(self, dvs, testlog): dvs.runcmd("kill -s SIGSTOP {}".format(oa_pid)) # Create a lossless profile - self.config_db.update_entry('BUFFER_PROFILE', 'test', - {'xon': '19456', - 'xoff': '10240', - 'size': '29696', - 'dynamic_th': '0', - 'pool': 'ingress_lossless_pool'}) + profile_fvs = {'xon': '19456', + 'xoff': '10240', + 'size': '29696', + 'dynamic_th': '0', + 'pool': 'ingress_lossless_pool'} + self.config_db.update_entry('BUFFER_PROFILE', 'test', profile_fvs) dvs.runcmd(f"logger -t pytest === configuring Ethernet0|3-4 ===") self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'test'}) @@ -835,6 +835,12 @@ def test_bufferPortMaxParameter(self, dvs, testlog): assert 'Ethernet0:1' not in keys assert 'Ethernet0:6' not in keys + + # Update the profile + profile_fvs['size'] = '28672' + profile_fvs['xoff'] = '9216' + self.config_db.update_entry('BUFFER_PROFILE', 'test', profile_fvs) + self.app_db.wait_for_field_match('BUFFER_PROFILE_TABLE', 'test', profile_fvs) finally: dvs.runcmd("kill -s SIGCONT {}".format(oa_pid)) From c36bd2a45922cc4795f46f926fcdb144eac3da11 Mon Sep 17 00:00:00 2001 From: Stephen Sun Date: Fri, 12 May 2023 01:39:43 +0000 Subject: [PATCH 12/13] Revert "Detailed log" This reverts commit c12a9e5c66222eecec63ff06e488a73e703995ee. --- tests/test_buffer_dynamic.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index 6a965c9f0c..02a06569bd 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -788,18 +788,15 @@ def test_bufferPortMaxParameter(self, dvs, testlog): _, oa_pid = dvs.runcmd("pgrep orchagent") try: - dvs.runcmd(f"logger -t pytest === setting max_headroom_size ===") fvs["max_headroom_size"] = "122880" self.state_db.update_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0", fvs) # Startup interface - dvs.runcmd(f"logger -t pytest === starting up interface ===") dvs.port_admin_set('Ethernet0', 'up') # Wait for the lossy profile to be handled self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:0", {"profile": "ingress_lossy_profile"}) # Stop orchagent to simulate the scenario that the system is during initialization - dvs.runcmd(f"logger -t pytest === stopping orchagent ===") dvs.runcmd("kill -s SIGSTOP {}".format(oa_pid)) # Create a lossless profile @@ -810,27 +807,22 @@ def test_bufferPortMaxParameter(self, dvs, testlog): 'pool': 'ingress_lossless_pool'} self.config_db.update_entry('BUFFER_PROFILE', 'test', profile_fvs) - dvs.runcmd(f"logger -t pytest === configuring Ethernet0|3-4 ===") self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'test'}) # Make sure the entry has been handled by buffermgrd and is pending on orchagent's queue self.app_db.wait_for_field_match("_BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "test"}) # Should not be added due to the maximum headroom exceeded - dvs.runcmd(f"logger -t pytest === configuring Ethernet0|1 fail ===") self.config_db.update_entry('BUFFER_PG', 'Ethernet0|1', {'profile': 'ingress_lossy_profile'}) # Should not be added due to the maximum headroom exceeded - dvs.runcmd(f"logger -t pytest === configuring Ethernet0|6 fail ===") self.config_db.update_entry('BUFFER_PG', 'Ethernet0|6', {'profile': 'test'}) # Resume orchagent - dvs.runcmd(f"logger -t pytest === resuming orchagent ===") dvs.runcmd("kill -s SIGCONT {}".format(oa_pid)) # Check whether BUFFER_PG_TABLE is updated as expected - dvs.runcmd(f"logger -t pytest === checking Ethernet0:3-4 ===") self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "test"}) - dvs.runcmd(f"logger -t pytest === checking failing keys ===") + keys = self.app_db.get_keys('BUFFER_PG_TABLE') assert 'Ethernet0:1' not in keys From 0c63232f9cfb9ae2a038d83a57f384c29c42bfc6 Mon Sep 17 00:00:00 2001 From: Stephen Sun Date: Thu, 18 May 2023 00:39:14 +0000 Subject: [PATCH 13/13] Remove redundant code Signed-off-by: Stephen Sun --- cfgmgr/buffer_check_headroom_mellanox.lua | 1 - 1 file changed, 1 deletion(-) diff --git a/cfgmgr/buffer_check_headroom_mellanox.lua b/cfgmgr/buffer_check_headroom_mellanox.lua index daf1b217d5..6ae5b883ba 100644 --- a/cfgmgr/buffer_check_headroom_mellanox.lua +++ b/cfgmgr/buffer_check_headroom_mellanox.lua @@ -118,7 +118,6 @@ for pg_key, profile in pairs(all_pgs) do current_profile_size = tonumber(referenced_profile_size) else current_profile_size = input_profile_size - profile = input_profile_name end if current_profile_size == 0 then current_profile_size = lossy_pg_size