From 5aa67e4aacc4b6fc9f18fab687384b39e47afbda Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Wed, 27 Jul 2022 12:12:30 +0300 Subject: [PATCH] [counters] Keep counters cache in a single directory (#2232) - What I did To fix Azure/sonic-buildimage#9817. Cache all counters in the same place. Created a UserCache helper class to access the cache directory. - How I did it Implemented UserCache class. Changed all stats commands to use new class. Adopted fast-reboot script. - How to verify it Run on the switch and verify counters stats command and clear commands work correctly. After config reload or cold/fast reboot counters cache is removed. Signed-off-by: Stepan Blyschak --- config/main.py | 5 ---- config/plugins/pbh.py | 10 ++++--- scripts/aclshow | 17 ++++++------ scripts/dropstat | 15 ++--------- scripts/fast-reboot | 21 +++++---------- scripts/flow_counters_stat | 8 +++--- scripts/intfstat | 55 +++++++++----------------------------- scripts/pfcstat | 33 +++++++---------------- scripts/pg-drop | 4 +-- scripts/portstat | 41 +++++----------------------- scripts/queuestat | 29 +++++--------------- scripts/tunnelstat | 47 +++++++------------------------- show/plugins/pbh.py | 11 +++++--- tests/aclshow_test.py | 4 +-- tests/config_test.py | 15 ++++------- tests/pbh_test.py | 6 ++--- tests/pfcstat_test.py | 17 +++--------- tests/pgdropstat_test.py | 20 +++----------- tests/portstat_test.py | 6 ++--- utilities_common/cli.py | 39 +++++++++++++++++++++++++++ 20 files changed, 140 insertions(+), 263 deletions(-) diff --git a/config/main.py b/config/main.py index ac3e84e55c..1b23beaa53 100644 --- a/config/main.py +++ b/config/main.py @@ -1517,11 +1517,6 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, disable_arp_cach if multi_asic.is_multi_asic() and file_format == 'config_db': num_cfg_file += num_asic - # Remove cached PG drop counters data - dropstat_dir_prefix = '/tmp/dropstat' - command = "rm -rf {}-*".format(dropstat_dir_prefix) - clicommon.run_command(command, display_cmd=True) - # If the user give the filename[s], extract the file names. if filename is not None: cfg_files = filename.split(',') diff --git a/config/plugins/pbh.py b/config/plugins/pbh.py index b6726aa154..ce9187a36d 100644 --- a/config/plugins/pbh.py +++ b/config/plugins/pbh.py @@ -6,13 +6,14 @@ CLI Auto-generation tool HLD - https://github.com/Azure/SONiC/pull/78 """ +import os import click import json import ipaddress import re import utilities_common.cli as clicommon -from show.plugins.pbh import deserialize_pbh_counters +from show.plugins.pbh import deserialize_pbh_counters, PBH_COUNTERS_CACHE_FILENAME GRE_KEY_RE = r"^(0x){1}[a-fA-F0-9]{1,8}/(0x){1}[a-fA-F0-9]{1,8}$" @@ -79,8 +80,6 @@ PBH_UPDATE = "UPDATE" PBH_REMOVE = "REMOVE" -PBH_COUNTERS_LOCATION = "/tmp/.pbh_counters.txt" - # # DB interface -------------------------------------------------------------------------------------------------------- # @@ -467,11 +466,14 @@ def serialize_pbh_counters(obj): obj: counters dict. """ + cache = clicommon.UserCache('pbh') + counters_cache_file = os.path.join(cache.get_directory(), PBH_COUNTERS_CACHE_FILENAME) + def remap_keys(obj): return [{'key': k, 'value': v} for k, v in obj.items()] try: - with open(PBH_COUNTERS_LOCATION, 'w') as f: + with open(counters_cache_file, 'w') as f: json.dump(remap_keys(obj), f) except IOError as err: pass diff --git a/scripts/aclshow b/scripts/aclshow index db0cc40ddf..db922a6cce 100755 --- a/scripts/aclshow +++ b/scripts/aclshow @@ -20,15 +20,13 @@ optional arguments: import argparse import json import os -from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector import sys +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from utilities_common.cli import UserCache + from tabulate import tabulate -### temp file to save counter positions when doing clear counter action. -### if we could have a SAI command to clear counters will be better, so no need to maintain -### counters in temp loaction for clear conter action -COUNTER_POSITION = '/tmp/.counters_acl.p' COUNTERS = "COUNTERS" ACL_COUNTER_RULE_MAP = "ACL_COUNTER_RULE_MAP" @@ -38,6 +36,9 @@ ACL_HEADER = ["RULE NAME", "TABLE NAME", "PRIO", "PACKETS COUNT", "BYTES COUNT"] COUNTER_PACKETS_ATTR = "SAI_ACL_COUNTER_ATTR_PACKETS" COUNTER_BYTES_ATTR = "SAI_ACL_COUNTER_ATTR_BYTES" +USER_CACHE = UserCache() +COUNTERS_CACHE_DIR = USER_CACHE.get_directory() +COUNTERS_CACHE = os.path.join(COUNTERS_CACHE_DIR, 'aclstat') class AclStat(object): """ @@ -78,9 +79,9 @@ class AclStat(object): res[e['key'][0], e['key'][1]] = e['value'] return res - if os.path.isfile(COUNTER_POSITION): + if os.path.isfile(COUNTERS_CACHE): try: - with open(COUNTER_POSITION) as fp: + with open(COUNTERS_CACHE) as fp: self.saved_acl_counters = remap_keys(json.load(fp)) except Exception: pass @@ -207,7 +208,7 @@ class AclStat(object): def remap_keys(dict): return [{'key': k, 'value': v} for k, v in dict.items()] - with open(COUNTER_POSITION, 'w') as fp: + with open(COUNTERS_CACHE, 'w') as fp: json.dump(remap_keys(self.acl_counters), fp) def main(): diff --git a/scripts/dropstat b/scripts/dropstat index 6766d2a2c1..f98fc29197 100755 --- a/scripts/dropstat +++ b/scripts/dropstat @@ -35,6 +35,7 @@ except KeyError: pass from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from utilities_common.cli import UserCache # COUNTERS_DB Tables @@ -80,8 +81,7 @@ std_switch_description_header = ['DEVICE'] def get_dropstat_dir(): - dropstat_dir_prefix = '/tmp/dropstat' - return "{}-{}/".format(dropstat_dir_prefix, os.getuid()) + return UserCache().get_directory() class DropStat(object): @@ -411,18 +411,7 @@ Examples: group = args.group counter_type = args.type - dropstat_dir = get_dropstat_dir() - - # Create the directory to hold clear results - if not os.path.exists(dropstat_dir): - try: - os.makedirs(dropstat_dir) - except IOError as e: - print(e) - sys.exit(e.errno) - dcstat = DropStat() - if command == 'clear': dcstat.clear_drop_counts() elif command == 'show': diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 2942c8ba23..cd3e8237c3 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -462,21 +462,14 @@ function unload_kernel() } function save_counters_folder() { - debug "Saving counters folder before warmboot..." + if [[ "$REBOOT_TYPE" = "warm-reboot" ]]; then + debug "Saving counters folder before warmboot..." - counters_folder="/host/counters" - if [[ ! -d $counters_folder ]]; then - mkdir $counters_folder - fi - if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" ]]; then - modules=("portstat-0" "dropstat" "pfcstat-0" "queuestat-0" "intfstat-0") - for module in ${modules[@]} - do - statfile="/tmp/$module" - if [[ -d $statfile ]]; then - cp -rf $statfile $counters_folder - fi - done + counters_folder="/host/counters" + if [[ ! -d $counters_folder ]]; then + mkdir $counters_folder + fi + cp -rf /tmp/cache $counters_folder fi } diff --git a/scripts/flow_counters_stat b/scripts/flow_counters_stat index 61c754e333..ac5ef94beb 100755 --- a/scripts/flow_counters_stat +++ b/scripts/flow_counters_stat @@ -27,6 +27,7 @@ import utilities_common.multi_asic as multi_asic_util from flow_counter_util.route import build_route_pattern, extract_route_pattern, exit_if_route_flow_counter_not_support, DEFAULT_VRF, COUNTERS_ROUTE_TO_PATTERN_MAP from utilities_common import constants from utilities_common.netstat import format_number_with_comma, table_as_json, ns_diff, format_prate +from utilities_common.cli import UserCache # Flow counter meta data, new type of flow counters can extend this dictinary to reuse existing logic flow_counter_meta = { @@ -57,9 +58,10 @@ class FlowCounterStats(object): meta_data = flow_counter_meta[args.type] self.name_map = meta_data['name_map'] self.headers = meta_data['headers'] - self.data_file = os.path.join('/tmp/{}-stats-{}'.format(args.type, os.getuid())) - if self.args.delete and os.path.exists(self.data_file): - os.remove(self.data_file) + self.cache = UserCache() + self.data_file = os.path.join(self.cache.get_directory(), "flow-counter-stats") + if self.args.delete: + self.cache.remove() self.data = {} def show(self): diff --git a/scripts/intfstat b/scripts/intfstat index 1d5da781b6..30cfbf084d 100755 --- a/scripts/intfstat +++ b/scripts/intfstat @@ -28,6 +28,7 @@ from collections import namedtuple, OrderedDict from natsort import natsorted from tabulate import tabulate from utilities_common.netstat import ns_diff, table_as_json, STATUS_NA, format_brate, format_prate +from utilities_common.cli import UserCache from swsscommon.swsscommon import SonicV2Connector nstat_fields = ( @@ -274,63 +275,34 @@ def main(): delete_saved_stats = args.delete delete_all_stats = args.delete_all use_json = args.json - tag_name = args.tag if args.tag else "" - uid = str(os.getuid()) + tag_name = args.tag wait_time_in_seconds = args.period interface_name = args.interface if args.interface else "" - # fancy filename with dashes: uid-tag / uid etc - filename_components = [uid, tag_name] + cnstat_file = "intfstat" - cnstat_file = "-".join(filter(None, filename_components)) + cache = UserCache(tag=tag_name) - cnstat_dir = "/tmp/intfstat-" + uid + cache_general = UserCache() + cnstat_dir = cache.get_directory() + cnstat_general_dir = cache_general.get_directory() + + cnstat_fqn_general_file = cnstat_general_dir + "/" + cnstat_file cnstat_fqn_file = cnstat_dir + "/" + cnstat_file if delete_all_stats: - # There is nothing to delete - if not os.path.isdir(cnstat_dir): - sys.exit(0) - - for file in os.listdir(cnstat_dir): - os.remove(cnstat_dir + "/" + file) - - try: - os.rmdir(cnstat_dir) - sys.exit(0) - except IOError as e: - print(e.errno, e) - sys.exit(e) + cache.remove_all() if delete_saved_stats: - try: - os.remove(cnstat_fqn_file) - except IOError as e: - if e.errno != ENOENT: - print(e.errno, e) - sys.exit(1) - finally: - if os.listdir(cnstat_dir) == []: - os.rmdir(cnstat_dir) - sys.exit(0) + cache.remove() intfstat = Intfstat() cnstat_dict, ratestat_dict = intfstat.get_cnstat(rif=interface_name) - # At this point, either we'll create a file or open an existing one. - if not os.path.exists(cnstat_dir): - try: - os.makedirs(cnstat_dir) - except IOError as e: - print(e.errno, e) - sys.exit(1) - if save_fresh_stats: try: # Add the information also to the general file - i.e. without the tag name - if tag_name != '' and tag_name in cnstat_fqn_file.split('/')[-1]: - gen_index = cnstat_fqn_file.rfind('/') - cnstat_fqn_general_file = cnstat_fqn_file[:gen_index] + cnstat_fqn_file[gen_index:].split('-')[0] + if tag_name is not None: if os.path.isfile(cnstat_fqn_general_file): try: general_data = pickle.load(open(cnstat_fqn_general_file, 'rb')) @@ -354,9 +326,6 @@ def main(): sys.exit(0) if wait_time_in_seconds == 0: - gen_index = cnstat_fqn_file.rfind('/') - cnstat_fqn_general_file = cnstat_fqn_file[:gen_index] + cnstat_fqn_file[gen_index:].split('-')[0] - if os.path.isfile(cnstat_fqn_file) or (os.path.isfile(cnstat_fqn_general_file)): try: cnstat_cached_dict = {} diff --git a/scripts/pfcstat b/scripts/pfcstat index 6d11361527..fb7e6018b6 100755 --- a/scripts/pfcstat +++ b/scripts/pfcstat @@ -18,9 +18,6 @@ from natsort import natsorted from tabulate import tabulate from sonic_py_common.multi_asic import get_external_ports -from utilities_common.netstat import ns_diff, STATUS_NA, format_number_with_comma -from utilities_common import multi_asic as multi_asic_util -from utilities_common import constants # mock the redis for unit test purposes # try: @@ -37,6 +34,12 @@ try: except KeyError: pass +from utilities_common.netstat import ns_diff, STATUS_NA, format_number_with_comma +from utilities_common import multi_asic as multi_asic_util +from utilities_common import constants +from utilities_common.cli import UserCache + + PStats = namedtuple("PStats", "pfc0, pfc1, pfc2, pfc3, pfc4, pfc5, pfc6, pfc7") header_Rx = ['Port Rx', 'PFC0', 'PFC1', 'PFC2', 'PFC3', 'PFC4', 'PFC5', 'PFC6', 'PFC7'] @@ -224,10 +227,10 @@ Examples: save_fresh_stats = args.clear delete_all_stats = args.delete - uid = str(os.getuid()) - cnstat_file = uid + cache = UserCache() + cnstat_file = 'pfcstat' - cnstat_dir = os.path.join(os.sep, "tmp", "pfcstat-{}".format(uid)) + cnstat_dir = cache.get_directory() cnstat_fqn_file_rx = os.path.join(cnstat_dir, "{}rx".format(cnstat_file)) cnstat_fqn_file_tx = os.path.join(cnstat_dir, "{}tx".format(cnstat_file)) @@ -239,15 +242,7 @@ Examples: pfcstat = Pfcstat(args.namespace, args.show) if delete_all_stats: - for file in os.listdir(cnstat_dir): - os.remove(os.path.join(cnstat_dir, file)) - - try: - os.rmdir(cnstat_dir) - sys.exit(0) - except IOError as e: - print(e.errno, e) - sys.exit(e) + cache.remove() """ Get the counters of pfc rx counter @@ -259,14 +254,6 @@ Examples: """ cnstat_dict_tx = deepcopy(pfcstat.get_cnstat(False)) - # At this point, either we'll create a file or open an existing one. - if not os.path.exists(cnstat_dir): - try: - os.makedirs(cnstat_dir) - except IOError as e: - print(e.errno, e) - sys.exit(1) - if save_fresh_stats: try: pickle.dump(cnstat_dict_rx, open(cnstat_fqn_file_rx, 'wb')) diff --git a/scripts/pg-drop b/scripts/pg-drop index b437e53bba..fee95124bd 100755 --- a/scripts/pg-drop +++ b/scripts/pg-drop @@ -26,6 +26,7 @@ try: except KeyError: pass +from utilities_common.cli import UserCache from swsscommon.swsscommon import ConfigDBConnector, SonicV2Connector STATUS_NA = 'N/A' @@ -38,8 +39,7 @@ COUNTERS_PG_PORT_MAP = "COUNTERS_PG_PORT_MAP" COUNTERS_PG_INDEX_MAP = "COUNTERS_PG_INDEX_MAP" def get_dropstat_dir(): - dropstat_dir_prefix = '/tmp/dropstat' - return "{}-{}/".format(dropstat_dir_prefix, os.getuid()) + return UserCache().get_directory() class PgDropStat(object): diff --git a/scripts/portstat b/scripts/portstat index abc1bc67aa..45490d29ef 100755 --- a/scripts/portstat +++ b/scripts/portstat @@ -39,6 +39,8 @@ from utilities_common.intf_filter import parse_interface_in_filter import utilities_common.multi_asic as multi_asic_util from utilities_common.netstat import ns_diff, table_as_json, format_brate, format_prate, format_util, format_number_with_comma +from utilities_common.cli import UserCache + """ The order and count of statistics mentioned below needs to be in sync with the values in portstat script So, any fields added/deleted in here should be reflected in portstat script also @@ -569,7 +571,6 @@ Examples: use_json = args.json raw_stats = args.raw tag_name = args.tag - uid = str(os.getuid()) wait_time_in_seconds = args.period print_all = args.all intf_fs = args.interface @@ -577,36 +578,17 @@ Examples: display_option = args.show detail = args.detail - if tag_name is not None: - cnstat_file = uid + "-" + tag_name - else: - cnstat_file = uid + cache = UserCache(tag=tag_name) - cnstat_dir = "/tmp/portstat-" + uid + cnstat_file = "portstat" + cnstat_dir = cache.get_directory() cnstat_fqn_file = cnstat_dir + "/" + cnstat_file if delete_all_stats: - for file in os.listdir(cnstat_dir): - os.remove(cnstat_dir + "/" + file) - - try: - os.rmdir(cnstat_dir) - sys.exit(0) - except IOError as e: - print(e.errno, e) - sys.exit(e) + cache.remove_all() if delete_saved_stats: - try: - os.remove(cnstat_fqn_file) - except IOError as e: - if e.errno != ENOENT: - print(e.errno, e) - sys.exit(1) - finally: - if os.listdir(cnstat_dir) == []: - os.rmdir(cnstat_dir) - sys.exit(0) + cache.remove() intf_list = parse_interface_in_filter(intf_fs) @@ -624,15 +606,6 @@ Examples: portstat.cnstat_print(cnstat_dict, ratestat_dict, intf_list, use_json, print_all, errors_only, rates_only) sys.exit(0) - # At this point, either we'll create a file or open an existing one. - if not os.path.exists(cnstat_dir): - try: - os.makedirs(cnstat_dir) - except IOError as e: - print(e.errno, e) - sys.exit(1) - - if save_fresh_stats: try: pickle.dump(cnstat_dict, open(cnstat_fqn_file, 'wb')) diff --git a/scripts/queuestat b/scripts/queuestat index 1455494701..bb6539bbb8 100755 --- a/scripts/queuestat +++ b/scripts/queuestat @@ -29,6 +29,7 @@ except KeyError: pass from swsscommon.swsscommon import SonicV2Connector +from utilities_common.cli import UserCache QueueStats = namedtuple("QueueStats", "queueindex, queuetype, totalpacket, totalbytes, droppacket, dropbytes") header = ['Port', 'TxQ', 'Counter/pkts', 'Counter/bytes', 'Drop/pkts', 'Drop/bytes'] @@ -313,13 +314,6 @@ class Queuestat(object): print(json_dump(json_output)) def save_fresh_stats(self): - if not os.path.exists(cnstat_dir): - try: - os.makedirs(cnstat_dir) - except IOError as e: - print(e.errno, e) - sys.exit(1) - # Get stat for each port and save for port in natsorted(self.counter_port_name_map): cnstat_dict = self.get_cnstat(self.port_queues_map[port]) @@ -354,28 +348,19 @@ Examples: args = parser.parse_args() save_fresh_stats = args.clear - delete_all_stats = args.delete + delete_stats = args.delete voq = args.voq json_opt = args.json_opt port_to_show_stats = args.port - uid = str(os.getuid()) - cnstat_file = uid - - cnstat_dir = "/tmp/queuestat-" + uid - cnstat_fqn_file = cnstat_dir + "/" + cnstat_file + cache = UserCache() - if delete_all_stats: - for file in os.listdir(cnstat_dir): - os.remove(cnstat_dir + "/" + file) + cnstat_dir = cache.get_directory() + cnstat_fqn_file = os.path.join(cnstat_dir, 'queuestat') - try: - os.rmdir(cnstat_dir) - sys.exit(0) - except IOError as e: - print(e.errno, e) - sys.exit(e) + if delete_stats: + cache.remove() queuestat = Queuestat( voq ) diff --git a/scripts/tunnelstat b/scripts/tunnelstat index 00aab5d832..8b045ec684 100755 --- a/scripts/tunnelstat +++ b/scripts/tunnelstat @@ -29,6 +29,7 @@ from collections import namedtuple, OrderedDict from natsort import natsorted from tabulate import tabulate from utilities_common.netstat import ns_diff, table_as_json, STATUS_NA, format_prate +from utilities_common.cli import UserCache from swsscommon.swsscommon import SonicV2Connector @@ -112,12 +113,12 @@ class Tunnelstat(object): if counter_tunnel_type_map is None: print("No %s in the DB!" % COUNTERS_TUNNEL_TYPE_MAP) - sys.exit(1) + sys.exit(1) if tun_type and tun_type not in counter_types: print("Unknown tunnel type %s" % tun_type) sys.exit(1) - + if tunnel and not tunnel in counter_tunnel_name_map: print("Interface %s missing from %s! Make sure it exists" % (tunnel, COUNTERS_TUNNEL_NAME_MAP)) sys.exit(2) @@ -250,56 +251,26 @@ def main(): delete_all_stats = args.delete_all use_json = args.json tag_name = args.tag if args.tag else "" - uid = str(os.getuid()) wait_time_in_seconds = args.period tunnel_name = args.tunnel if args.tunnel else "" tunnel_type = args.type if args.type else "" - # fancy filename with dashes: uid-tag-tunnel / uid-tunnel / uid-tag etc - filename_components = [uid, tag_name] - cnstat_file = "-".join(filter(None, filename_components)) + cache = UserCache(tag=tag_name) + + cnstat_file = "tunnelstat" - cnstat_dir = "/tmp/tunnelstat-" + uid + cnstat_dir = cache.get_directory() cnstat_fqn_file = cnstat_dir + "/" + cnstat_file if delete_all_stats: - # There is nothing to delete - if not os.path.isdir(cnstat_dir): - sys.exit(0) - - for file in os.listdir(cnstat_dir): - os.remove(cnstat_dir + "/" + file) - - try: - os.rmdir(cnstat_dir) - sys.exit(0) - except IOError as e: - print(e.errno, e) - sys.exit(e) + cache.remove_all() if delete_saved_stats: - try: - os.remove(cnstat_fqn_file) - except IOError as e: - if e.errno != ENOENT: - print(e.errno, e) - sys.exit(1) - finally: - if os.listdir(cnstat_dir) == []: - os.rmdir(cnstat_dir) - sys.exit(0) + cache.remove() tunnelstat = Tunnelstat() cnstat_dict,ratestat_dict = tunnelstat.get_cnstat(tunnel=tunnel_name, tun_type=tunnel_type) - # At this point, either we'll create a file or open an existing one. - if not os.path.exists(cnstat_dir): - try: - os.makedirs(cnstat_dir) - except IOError as e: - print(e.errno, e) - sys.exit(1) - if save_fresh_stats: try: pickle.dump(cnstat_dict, open(cnstat_fqn_file, 'wb')) diff --git a/show/plugins/pbh.py b/show/plugins/pbh.py index e50f6507a5..407c596163 100644 --- a/show/plugins/pbh.py +++ b/show/plugins/pbh.py @@ -14,14 +14,14 @@ import utilities_common.cli as clicommon from swsscommon.swsscommon import SonicV2Connector -PBH_COUNTERS_LOCATION = '/tmp/.pbh_counters.txt' - COUNTER_PACKETS_ATTR = "SAI_ACL_COUNTER_ATTR_PACKETS" COUNTER_BYTES_ATTR = "SAI_ACL_COUNTER_ATTR_BYTES" COUNTERS = "COUNTERS" ACL_COUNTER_RULE_MAP = "ACL_COUNTER_RULE_MAP" +PBH_COUNTERS_CACHE_FILENAME = "pbh-counters" + pbh_hash_field_tbl_name = 'PBH_HASH_FIELD' pbh_hash_tbl_name = 'PBH_HASH' pbh_table_tbl_name = 'PBH_TABLE' @@ -428,15 +428,18 @@ def deserialize_pbh_counters(): obj: counters dict. """ + cache = clicommon.UserCache('pbh') + counters_cache_file = os.path.join(cache.get_directory(), PBH_COUNTERS_CACHE_FILENAME) + def remap_keys(obj): res = {} for e in obj: res[e['key'][0], e['key'][1]] = e['value'] return res - if os.path.isfile(PBH_COUNTERS_LOCATION): + if os.path.isfile(counters_cache_file): try: - with open(PBH_COUNTERS_LOCATION, 'r') as f: + with open(counters_cache_file, 'r') as f: return remap_keys(json.load(f)) except Exception as err: pass diff --git a/tests/aclshow_test.py b/tests/aclshow_test.py index d0a92174f4..90fe46f683 100644 --- a/tests/aclshow_test.py +++ b/tests/aclshow_test.py @@ -192,8 +192,8 @@ def nullify_counters(self): This method is used to empty dumped counters if exist in /tmp/.counters_acl.p (by default). """ - if os.path.isfile(aclshow.COUNTER_POSITION): - with open(aclshow.COUNTER_POSITION, 'w') as fp: + if os.path.isfile(aclshow.COUNTERS_CACHE): + with open(aclshow.COUNTERS_CACHE, 'w') as fp: json.dump([], fp) def runTest(self): diff --git a/tests/config_test.py b/tests/config_test.py index e9dbae4194..98ff5a0c83 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -80,7 +80,6 @@ """ RELOAD_CONFIG_DB_OUTPUT = """\ -Running command: rm -rf /tmp/dropstat-* Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Restarting SONiC target ... @@ -88,7 +87,6 @@ """ RELOAD_YANG_CFG_OUTPUT = """\ -Running command: rm -rf /tmp/dropstat-* Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -Y /tmp/config.json --write-to-db Restarting SONiC target ... @@ -96,7 +94,6 @@ """ RELOAD_MASIC_CONFIG_DB_OUTPUT = """\ -Running command: rm -rf /tmp/dropstat-* Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json -n asic0 --write-to-db @@ -106,11 +103,9 @@ """ reload_config_with_sys_info_command_output="""\ -Running command: rm -rf /tmp/dropstat-* Running command: /usr/local/bin/sonic-cfggen -H -k Seastone-DX010-25-50 --write-to-db""" reload_config_with_disabled_service_output="""\ -Running command: rm -rf /tmp/dropstat-* Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Restarting SONiC target ... @@ -235,7 +230,7 @@ def test_config_reload(self, get_cmd_module, setup_single_broadcom_asic): assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')][:2]) == reload_config_with_sys_info_command_output + assert "\n".join([l.rstrip() for l in result.output.split('\n')][:1]) == reload_config_with_sys_info_command_output def test_config_reload_untriggered_timer(self, get_cmd_module, setup_single_broadcom_asic): with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect_untriggered_timer)) as mock_run_command: @@ -293,9 +288,9 @@ def test_load_minigraph(self, get_cmd_module, setup_single_broadcom_asic): traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_command_output - # Verify "systemctl reset-failed" is called for services under sonic.target + # Verify "systemctl reset-failed" is called for services under sonic.target mock_run_command.assert_any_call('systemctl reset-failed swss') - # Verify "systemctl reset-failed" is called for services under sonic-delayed.target + # Verify "systemctl reset-failed" is called for services under sonic-delayed.target mock_run_command.assert_any_call('systemctl reset-failed snmp') assert mock_run_command.call_count == 11 @@ -526,7 +521,7 @@ def test_reload_config_masic(self, get_cmd_module, setup_multi_broadcom_masic): runner = CliRunner() # 3 config files: 1 for host and 2 for asic cfg_files = "{},{},{}".format( - self.dummy_cfg_file, + self.dummy_cfg_file, self.dummy_cfg_file, self.dummy_cfg_file) result = runner.invoke( @@ -565,7 +560,7 @@ def teardown_class(cls): os.remove(cls.dummy_cfg_file) print("TEARDOWN") - + class TestConfigCbf(object): @classmethod def setup_class(cls): diff --git a/tests/pbh_test.py b/tests/pbh_test.py index 1972747782..7dddfea9ca 100644 --- a/tests/pbh_test.py +++ b/tests/pbh_test.py @@ -10,6 +10,7 @@ from .pbh_input import assert_show_output from utilities_common.db import Db +from utilities_common.cli import UserCache from click.testing import CliRunner from .mock_tables import dbconnector from .mock_tables import mock_single_asic @@ -876,10 +877,7 @@ def test_show_pbh_rule(self): def remove_pbh_counters_file(self): - SAVED_PBH_COUNTERS_FILE = '/tmp/.pbh_counters.txt' - if os.path.isfile(SAVED_PBH_COUNTERS_FILE): - os.remove(SAVED_PBH_COUNTERS_FILE) - + UserCache('pbh').remove_all() def test_show_pbh_statistics_on_empty_config(self): dbconnector.dedicated_dbs['CONFIG_DB'] = None diff --git a/tests/pfcstat_test.py b/tests/pfcstat_test.py index 955db3c23b..75f7ea6f59 100644 --- a/tests/pfcstat_test.py +++ b/tests/pfcstat_test.py @@ -8,6 +8,7 @@ import show.main as show from .utils import get_result_and_return_code +from utilities_common.cli import UserCache test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -130,9 +131,8 @@ def del_cached_stats(): - uid = str(os.getuid()) - cnstat_dir = os.path.join(os.sep, "tmp", "pfcstat-{}".format(uid)) - shutil.rmtree(cnstat_dir, ignore_errors=True, onerror=None) + cache = UserCache("pfcstat") + cache.remove_all() def pfc_clear(expected_output): @@ -143,17 +143,6 @@ def pfc_clear(expected_output): 'pfcstat -c' ) - # verify that files are created with stats - uid = str(os.getuid()) - cnstat_dir = os.path.join(os.sep, "tmp", "pfcstat-{}".format(uid)) - cnstat_fqn_file_rx = "{}rx".format(uid) - cnstat_fqn_file_tx = "{}tx".format(uid) - file_list = [cnstat_fqn_file_tx, cnstat_fqn_file_rx] - file_list.sort() - files = os.listdir(cnstat_dir) - files.sort() - assert files == file_list - return_code, result = get_result_and_return_code( 'pfcstat -s all' ) diff --git a/tests/pgdropstat_test.py b/tests/pgdropstat_test.py index 3aea0f2959..a46a05b25b 100644 --- a/tests/pgdropstat_test.py +++ b/tests/pgdropstat_test.py @@ -9,6 +9,8 @@ from click.testing import CliRunner from shutil import copyfile +from utilities_common.cli import UserCache + test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) scripts_path = os.path.join(modules_path, "scripts") @@ -88,25 +90,9 @@ def executor(self, clear_before_show): assert result.exit_code == 0 assert result.output == show_output - def test_show_pg_drop_config_reload(self): - runner = CliRunner() - self.test_show_pg_drop_clear() - - # simulate 'config reload' to provoke counters recalculation (remove backup from /tmp folder) - result = runner.invoke(config.config.commands["reload"], [ "--no_service_restart", "-y"]) - - print(result.exit_code) - print(result.output) - - assert result.exit_code == 0 - - self.test_show_pg_drop_show() - @classmethod def teardown_class(cls): os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) os.environ['UTILITIES_UNIT_TESTING'] = "0" - dropstat_dir_prefix = '/tmp/dropstat' - dir_path = "{}-{}/".format(dropstat_dir_prefix, os.getuid()) - os.system("rm -rf {}".format(dir_path)) + UserCache('pg-drop').remove_all() print("TEARDOWN") diff --git a/tests/portstat_test.py b/tests/portstat_test.py index b8dd055733..6429c4863a 100644 --- a/tests/portstat_test.py +++ b/tests/portstat_test.py @@ -6,6 +6,7 @@ import clear.main as clear import show.main as show from .utils import get_result_and_return_code +from utilities_common.cli import UserCache root_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(root_path) @@ -191,9 +192,8 @@ def remove_tmp_cnstat_file(): # remove the tmp portstat - uid = str(os.getuid()) - cnstat_dir = os.path.join(os.sep, "tmp", "portstat-{}".format(uid)) - shutil.rmtree(cnstat_dir, ignore_errors=True, onerror=None) + cache = UserCache("portstat") + cache.remove_all() def verify_after_clear(output, expected_out): diff --git a/utilities_common/cli.py b/utilities_common/cli.py index d6d8a111bf..6aaedcb209 100644 --- a/utilities_common/cli.py +++ b/utilities_common/cli.py @@ -4,6 +4,7 @@ import re import subprocess import sys +import shutil import click import json @@ -663,3 +664,41 @@ def query_yes_no(question, default="yes"): else: sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n") + + +class UserCache: + """ General purpose cache directory created per user """ + + CACHE_DIR = "/tmp/cache/" + + def __init__(self, app_name=None, tag=None): + """ Initialize UserCache and create a cache directory if it does not exist. + + Args: + tag (str): Tag the user cache. Different tags correspond to different cache directories even for the same user. + """ + self.uid = os.getuid() + self.app_name = os.path.basename(sys.argv[0]) if app_name is None else app_name + self.cache_directory_suffix = str(self.uid) if tag is None else f"{self.uid}-{tag}" + self.cache_directory_app = os.path.join(self.CACHE_DIR, self.app_name) + + prev_umask = os.umask(0) + try: + os.makedirs(self.cache_directory_app, exist_ok=True) + finally: + os.umask(prev_umask) + + self.cache_directory = os.path.join(self.cache_directory_app, self.cache_directory_suffix) + os.makedirs(self.cache_directory, exist_ok=True) + + def get_directory(self): + """ Return the cache directory path """ + return self.cache_directory + + def remove(self): + """ Remove the content of the cache directory """ + shutil.rmtree(self.cache_directory) + + def remove_all(self): + """ Remove the content of the cache for all users """ + shutil.rmtree(self.cache_directory_app)