diff --git a/scripts/dropstat b/scripts/dropstat index 4e9f5bb4d0..f98fc29197 100755 --- a/scripts/dropstat +++ b/scripts/dropstat @@ -11,7 +11,7 @@ # - Refactor calls to COUNTERS_DB to reduce redundancy # - Cache DB queries to reduce # of expensive queries -import json +import _pickle as pickle import argparse import os import socket @@ -117,10 +117,10 @@ class DropStat(object): """ try: - json.dump(self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP), - open(self.port_drop_stats_file, 'w+')) - json.dump(self.get_counts(self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP), self.get_switch_id()), - open(self.switch_drop_stats_file, 'w+')) + pickle.dump(self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP), + open(self.port_drop_stats_file, 'wb+')) + pickle.dump(self.get_counts(self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP), self.get_switch_id()), + open(self.switch_drop_stats_file, 'wb+')) except IOError as e: print(e) sys.exit(e.errno) @@ -135,7 +135,7 @@ class DropStat(object): # Grab the latest clear checkpoint, if it exists if os.path.isfile(self.port_drop_stats_file): - port_drop_ckpt = json.load(open(self.port_drop_stats_file, 'r')) + port_drop_ckpt = pickle.load(open(self.port_drop_stats_file, 'rb')) counters = self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP, group, counter_type) headers = std_port_description_header + self.gather_headers(counters, DEBUG_COUNTER_PORT_STAT_MAP) @@ -162,7 +162,7 @@ class DropStat(object): # Grab the latest clear checkpoint, if it exists if os.path.isfile(self.switch_drop_stats_file): - switch_drop_ckpt = json.load(open(self.switch_drop_stats_file, 'r')) + switch_drop_ckpt = pickle.load(open(self.switch_drop_stats_file, 'rb')) counters = self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP, group, counter_type) headers = std_switch_description_header + self.gather_headers(counters, DEBUG_COUNTER_SWITCH_STAT_MAP) diff --git a/scripts/flow_counters_stat b/scripts/flow_counters_stat index 49b97e335b..ac5ef94beb 100755 --- a/scripts/flow_counters_stat +++ b/scripts/flow_counters_stat @@ -2,7 +2,7 @@ import argparse import os -import json +import _pickle as pickle import sys from natsort import natsorted @@ -185,8 +185,8 @@ class FlowCounterStats(object): if os.path.exists(self.data_file): os.remove(self.data_file) - with open(self.data_file, 'w') as f: - json.dump(data, f) + with open(self.data_file, 'wb') as f: + pickle.dump(data, f) except IOError as e: print('Failed to save statistic - {}'.format(repr(e))) @@ -200,8 +200,8 @@ class FlowCounterStats(object): return None try: - with open(self.data_file, 'r') as f: - data = json.load(f) + with open(self.data_file, 'rb') as f: + data = pickle.load(f) except IOError as e: print('Failed to load statistic - {}'.format(repr(e))) return None diff --git a/scripts/intfstat b/scripts/intfstat index b4a770adeb..30cfbf084d 100755 --- a/scripts/intfstat +++ b/scripts/intfstat @@ -6,7 +6,7 @@ # ##################################################################### -import json +import _pickle as pickle import argparse import datetime import sys @@ -28,7 +28,7 @@ from collections import namedtuple, OrderedDict from natsort import natsorted from tabulate import tabulate from utilities_common.netstat import ns_diff, table_as_json, STATUS_NA, format_brate, format_prate -from utilities_common.cli import json_serial, UserCache +from utilities_common.cli import UserCache from swsscommon.swsscommon import SonicV2Connector nstat_fields = ( @@ -96,7 +96,7 @@ class Intfstat(object): counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name) if counter_data: fields[pos] = str(counter_data) - cntr = NStats._make(fields)._asdict() + cntr = NStats._make(fields) return cntr def get_rates(table_id): @@ -153,14 +153,14 @@ class Intfstat(object): rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) table.append((key, - data['rx_p_ok'], + data.rx_p_ok, format_brate(rates.rx_bps), format_prate(rates.rx_pps), - data['rx_p_err'], - data['tx_p_ok'], + data.rx_p_err, + data.tx_p_ok, format_brate(rates.tx_bps), format_prate(rates.tx_pps), - data['tx_p_err'])) + data.tx_p_err)) if use_json: print(table_as_json(table, header)) @@ -186,24 +186,24 @@ class Intfstat(object): if old_cntr is not None: table.append((key, - ns_diff(cntr['rx_p_ok'], old_cntr['rx_p_ok']), + ns_diff(cntr.rx_p_ok, old_cntr.rx_p_ok), format_brate(rates.rx_bps), format_prate(rates.rx_pps), - ns_diff(cntr['rx_p_err'], old_cntr['rx_p_err']), - ns_diff(cntr['tx_p_ok'], old_cntr['tx_p_ok']), + ns_diff(cntr.rx_p_err, old_cntr.rx_p_err), + ns_diff(cntr.tx_p_ok, old_cntr.tx_p_ok), format_brate(rates.tx_bps), format_prate(rates.tx_pps), - ns_diff(cntr['tx_p_err'], old_cntr['tx_p_err']))) + ns_diff(cntr.tx_p_err, old_cntr.tx_p_err))) else: table.append((key, - cntr['rx_p_ok'], + cntr.rx_p_ok, format_brate(rates.rx_bps), format_prate(rates.rx_pps), - cntr['rx_p_err'], - cntr['tx_p_ok'], + cntr.rx_p_err, + cntr.tx_p_ok, format_brate(rates.tx_bps), format_prate(rates.tx_pps), - cntr['tx_p_err'])) + cntr.tx_p_err)) if use_json: print(table_as_json(table, header)) @@ -229,17 +229,17 @@ class Intfstat(object): if cnstat_old_dict and cnstat_old_dict.get(rif): old_cntr = cnstat_old_dict.get(rif) - body = body % (ns_diff(cntr['rx_p_ok'], old_cntr['rx_p_ok']), - ns_diff(cntr['rx_b_ok'], old_cntr['rx_b_ok']), - ns_diff(cntr['rx_p_err'], old_cntr['rx_p_err']), - ns_diff(cntr['rx_b_err'], old_cntr['rx_b_err']), - ns_diff(cntr['tx_p_ok'], old_cntr['tx_p_ok']), - ns_diff(cntr['tx_b_ok'], old_cntr['tx_b_ok']), - ns_diff(cntr['tx_p_err'], old_cntr['tx_p_err']), - ns_diff(cntr['tx_b_err'], old_cntr['tx_b_err'])) + body = body % (ns_diff(cntr.rx_p_ok, old_cntr.rx_p_ok), + ns_diff(cntr.rx_b_ok, old_cntr.rx_b_ok), + ns_diff(cntr.rx_p_err, old_cntr.rx_p_err), + ns_diff(cntr.rx_b_err, old_cntr.rx_b_err), + ns_diff(cntr.tx_p_ok, old_cntr.tx_p_ok), + ns_diff(cntr.tx_b_ok, old_cntr.tx_b_ok), + ns_diff(cntr.tx_p_err, old_cntr.tx_p_err), + ns_diff(cntr.tx_b_err, old_cntr.tx_b_err)) else: - body = body % (cntr['rx_p_ok'], cntr['rx_b_ok'], cntr['rx_p_err'],cntr['rx_b_err'], - cntr['tx_p_ok'], cntr['tx_b_ok'], cntr['tx_p_err'], cntr['tx_b_err']) + body = body % (cntr.rx_p_ok, cntr.rx_b_ok, cntr.rx_p_err,cntr.rx_b_err, + cntr.tx_p_ok, cntr.tx_b_ok, cntr.tx_p_err, cntr.tx_b_err) print(header) print(body) @@ -305,20 +305,20 @@ def main(): if tag_name is not None: if os.path.isfile(cnstat_fqn_general_file): try: - general_data = json.load(open(cnstat_fqn_general_file, 'r')) + general_data = pickle.load(open(cnstat_fqn_general_file, 'rb')) for key, val in cnstat_dict.items(): general_data[key] = val - json.dump(general_data, open(cnstat_fqn_general_file, 'w')) + pickle.dump(general_data, open(cnstat_fqn_general_file, 'wb')) except IOError as e: sys.exit(e.errno) # Add the information also to tag specific file if os.path.isfile(cnstat_fqn_file): - data = json.load(open(cnstat_fqn_file, 'r')) + data = pickle.load(open(cnstat_fqn_file, 'rb')) for key, val in cnstat_dict.items(): data[key] = val - json.dump(data, open(cnstat_fqn_file, 'w')) + pickle.dump(data, open(cnstat_fqn_file, 'wb')) else: - json.dump(cnstat_dict, open(cnstat_fqn_file, 'w'), default=json_serial) + pickle.dump(cnstat_dict, open(cnstat_fqn_file, 'wb')) except IOError as e: sys.exit(e.errno) else: @@ -330,9 +330,9 @@ def main(): try: cnstat_cached_dict = {} if os.path.isfile(cnstat_fqn_file): - cnstat_cached_dict = json.load(open(cnstat_fqn_file, 'r')) + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file, 'rb')) else: - cnstat_cached_dict = json.load(open(cnstat_fqn_general_file, 'r')) + cnstat_cached_dict = pickle.load(open(cnstat_fqn_general_file, 'rb')) print("Last cached time was " + str(cnstat_cached_dict.get('time'))) if interface_name: diff --git a/scripts/pfcstat b/scripts/pfcstat index 094c6e9380..fb7e6018b6 100755 --- a/scripts/pfcstat +++ b/scripts/pfcstat @@ -6,7 +6,7 @@ # ##################################################################### -import json +import _pickle as pickle import argparse import datetime import os.path @@ -37,7 +37,7 @@ except KeyError: from utilities_common.netstat import ns_diff, STATUS_NA, format_number_with_comma from utilities_common import multi_asic as multi_asic_util from utilities_common import constants -from utilities_common.cli import json_serial, UserCache +from utilities_common.cli import UserCache PStats = namedtuple("PStats", "pfc0, pfc1, pfc2, pfc3, pfc4, pfc5, pfc6, pfc7") @@ -101,7 +101,7 @@ class Pfcstat(object): fields[pos] = STATUS_NA else: fields[pos] = str(int(counter_data)) - cntr = PStats._make(fields)._asdict() + cntr = PStats._make(fields) return cntr # Get the info from database @@ -144,14 +144,14 @@ class Pfcstat(object): if key == 'time': continue table.append((key, - format_number_with_comma(data['pfc0']), - format_number_with_comma(data['pfc1']), - format_number_with_comma(data['pfc2']), - format_number_with_comma(data['pfc3']), - format_number_with_comma(data['pfc4']), - format_number_with_comma(data['pfc5']), - format_number_with_comma(data['pfc6']), - format_number_with_comma(data['pfc7']))) + format_number_with_comma(data.pfc0), + format_number_with_comma(data.pfc1), + format_number_with_comma(data.pfc2), + format_number_with_comma(data.pfc3), + format_number_with_comma(data.pfc4), + format_number_with_comma(data.pfc5), + format_number_with_comma(data.pfc6), + format_number_with_comma(data.pfc7))) if rx: print(tabulate(table, header_Rx, tablefmt='simple', stralign='right')) @@ -173,24 +173,24 @@ class Pfcstat(object): if old_cntr is not None: table.append((key, - ns_diff(cntr['pfc0'], old_cntr['pfc0']), - ns_diff(cntr['pfc1'], old_cntr['pfc1']), - ns_diff(cntr['pfc2'], old_cntr['pfc2']), - ns_diff(cntr['pfc3'], old_cntr['pfc3']), - ns_diff(cntr['pfc4'], old_cntr['pfc4']), - ns_diff(cntr['pfc5'], old_cntr['pfc5']), - ns_diff(cntr['pfc6'], old_cntr['pfc6']), - ns_diff(cntr['pfc7'], old_cntr['pfc7']))) + ns_diff(cntr.pfc0, old_cntr.pfc0), + ns_diff(cntr.pfc1, old_cntr.pfc1), + ns_diff(cntr.pfc2, old_cntr.pfc2), + ns_diff(cntr.pfc3, old_cntr.pfc3), + ns_diff(cntr.pfc4, old_cntr.pfc4), + ns_diff(cntr.pfc5, old_cntr.pfc5), + ns_diff(cntr.pfc6, old_cntr.pfc6), + ns_diff(cntr.pfc7, old_cntr.pfc7))) else: table.append((key, - format_number_with_comma(cntr['pfc0']), - format_number_with_comma(cntr['pfc1']), - format_number_with_comma(cntr['pfc2']), - format_number_with_comma(cntr['pfc3']), - format_number_with_comma(cntr['pfc4']), - format_number_with_comma(cntr['pfc5']), - format_number_with_comma(cntr['pfc6']), - format_number_with_comma(cntr['pfc7']))) + format_number_with_comma(cntr.pfc0), + format_number_with_comma(cntr.pfc1), + format_number_with_comma(cntr.pfc2), + format_number_with_comma(cntr.pfc3), + format_number_with_comma(cntr.pfc4), + format_number_with_comma(cntr.pfc5), + format_number_with_comma(cntr.pfc6), + format_number_with_comma(cntr.pfc7))) if rx: print(tabulate(table, header_Rx, tablefmt='simple', stralign='right')) @@ -256,8 +256,8 @@ Examples: if save_fresh_stats: try: - json.dump(cnstat_dict_rx, open(cnstat_fqn_file_rx, 'w'), default=json_serial) - json.dump(cnstat_dict_tx, open(cnstat_fqn_file_tx, 'w'), default=json_serial) + pickle.dump(cnstat_dict_rx, open(cnstat_fqn_file_rx, 'wb')) + pickle.dump(cnstat_dict_tx, open(cnstat_fqn_file_tx, 'wb')) except IOError as e: print(e.errno, e) sys.exit(e.errno) @@ -271,7 +271,7 @@ Examples: """ if os.path.isfile(cnstat_fqn_file_rx): try: - cnstat_cached_dict = json.load(open(cnstat_fqn_file_rx, 'r')) + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_rx, 'rb')) print("Last cached time was " + str(cnstat_cached_dict.get('time'))) pfcstat.cnstat_diff_print(cnstat_dict_rx, cnstat_cached_dict, True) except IOError as e: @@ -286,7 +286,7 @@ Examples: """ if os.path.isfile(cnstat_fqn_file_tx): try: - cnstat_cached_dict = json.load(open(cnstat_fqn_file_tx, 'r')) + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_tx, 'rb')) print("Last cached time was " + str(cnstat_cached_dict.get('time'))) pfcstat.cnstat_diff_print(cnstat_dict_tx, cnstat_cached_dict, False) except IOError as e: diff --git a/scripts/pg-drop b/scripts/pg-drop index 7741593081..40b4e863d3 100755 --- a/scripts/pg-drop +++ b/scripts/pg-drop @@ -5,7 +5,7 @@ # pg-drop is a tool for show/clear ingress pg dropped packet stats. # ##################################################################### -import json +import _pickle as pickle import argparse import os import sys @@ -144,7 +144,7 @@ class PgDropStat(object): port_drop_ckpt = {} # Grab the latest clear checkpoint, if it exists if os.path.isfile(self.port_drop_stats_file): - port_drop_ckpt = json.load(open(self.port_drop_stats_file, 'r')) + port_drop_ckpt = pickle.load(open(self.port_drop_stats_file, 'rb')) # Header list contains the port name followed by the PGs. Fields is used to populate the pg values fields = ["0"]* (len(self.header_list) - 1) @@ -216,10 +216,10 @@ class PgDropStat(object): counter_pg_drop_array = [ "SAI_INGRESS_PRIORITY_GROUP_STAT_DROPPED_PACKETS"] try: - json.dump(self.get_counts_table( + pickle.dump(self.get_counts_table( counter_pg_drop_array, COUNTERS_PG_NAME_MAP), - open(self.port_drop_stats_file, 'w+')) + open(self.port_drop_stats_file, 'wb+')) except IOError as e: print(e) sys.exit(e.errno) diff --git a/scripts/portstat b/scripts/portstat index 09ad88b08d..27696729e9 100755 --- a/scripts/portstat +++ b/scripts/portstat @@ -6,7 +6,7 @@ # ##################################################################### -import json +import _pickle as pickle import argparse import datetime import os.path @@ -40,7 +40,7 @@ from utilities_common.intf_filter import parse_interface_in_filter import utilities_common.multi_asic as multi_asic_util from utilities_common.netstat import ns_diff, table_as_json, format_brate, format_prate, format_util, format_number_with_comma -from utilities_common.cli import json_serial, UserCache +from utilities_common.cli import UserCache """ The order and count of statistics mentioned below needs to be in sync with the values in portstat script @@ -181,7 +181,7 @@ class Portstat(object): elif fields[pos] != STATUS_NA: fields[pos] = str(int(fields[pos]) + int(fvs[counter_name])) - cntr = NStats._make(fields)._asdict() + cntr = NStats._make(fields) return cntr def get_rates(table_id): @@ -278,61 +278,61 @@ class Portstat(object): if print_all: header = header_all table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_ok']), + format_number_with_comma(data.rx_ok), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(data['rx_err']), - format_number_with_comma(data['rx_drop']), - format_number_with_comma(data['rx_ovr']), - format_number_with_comma(data['tx_ok']), + format_number_with_comma(data.rx_err), + format_number_with_comma(data.rx_drop), + format_number_with_comma(data.rx_ovr), + format_number_with_comma(data.tx_ok), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed), - format_number_with_comma(data['tx_err']), - format_number_with_comma(data['tx_drop']), - format_number_with_comma(data['tx_ovr']))) + format_number_with_comma(data.tx_err), + format_number_with_comma(data.tx_drop), + format_number_with_comma(data.tx_ovr))) elif errors_only: header = header_errors_only table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_err']), - format_number_with_comma(data['rx_drop']), - format_number_with_comma(data['rx_ovr']), - format_number_with_comma(data['tx_err']), - format_number_with_comma(data['tx_drop']), - format_number_with_comma(data['tx_ovr']))) + format_number_with_comma(data.rx_err), + format_number_with_comma(data.rx_drop), + format_number_with_comma(data.rx_ovr), + format_number_with_comma(data.tx_err), + format_number_with_comma(data.tx_drop), + format_number_with_comma(data.tx_ovr))) elif fec_stats_only: header = header_fec_only table.append((key, self.get_port_state(key), - format_number_with_comma(data['fec_corr']), - format_number_with_comma(data['fec_uncorr']), - format_number_with_comma(data['fec_symbol_err']))) + format_number_with_comma(data.fec_corr), + format_number_with_comma(data.fec_uncorr), + format_number_with_comma(data.fec_symbol_err))) elif rates_only: header = header_rates_only table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_ok']), + format_number_with_comma(data.rx_ok), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(data['tx_ok']), + format_number_with_comma(data.tx_ok), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed))) else: header = header_std table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_ok']), + format_number_with_comma(data.rx_ok), format_brate(rates.rx_bps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(data['rx_err']), - format_number_with_comma(data['rx_drop']), - format_number_with_comma(data['rx_ovr']), - format_number_with_comma(data['tx_ok']), + format_number_with_comma(data.rx_err), + format_number_with_comma(data.rx_drop), + format_number_with_comma(data.rx_ovr), + format_number_with_comma(data.tx_ok), format_brate(rates.tx_bps), format_util(rates.tx_bps, port_speed), - format_number_with_comma(data['tx_err']), - format_number_with_comma(data['tx_drop']), - format_number_with_comma(data['tx_ovr']))) + format_number_with_comma(data.tx_err), + format_number_with_comma(data.tx_drop), + format_number_with_comma(data.tx_ovr))) if use_json: print(table_as_json(table, header)) @@ -353,51 +353,51 @@ class Portstat(object): if key in cnstat_old_dict: old_cntr = cnstat_old_dict.get(key) else: - old_cntr = NStats._make([0] * BUCKET_NUM)._asdict() + old_cntr = NStats._make([0] * BUCKET_NUM) if intf_list and key not in intf_list: continue - print("Packets Received 64 Octets..................... {}".format(ns_diff(cntr['rx_64'], old_cntr['rx_64']))) - print("Packets Received 65-127 Octets................. {}".format(ns_diff(cntr['rx_65_127'], old_cntr['rx_65_127']))) - print("Packets Received 128-255 Octets................ {}".format(ns_diff(cntr['rx_128_255'], old_cntr['rx_128_255']))) - print("Packets Received 256-511 Octets................ {}".format(ns_diff(cntr['rx_256_511'], old_cntr['rx_256_511']))) - print("Packets Received 512-1023 Octets............... {}".format(ns_diff(cntr['rx_512_1023'], old_cntr['rx_512_1023']))) - print("Packets Received 1024-1518 Octets.............. {}".format(ns_diff(cntr['rx_1024_1518'], old_cntr['rx_1024_1518']))) - print("Packets Received 1519-2047 Octets.............. {}".format(ns_diff(cntr['rx_1519_2047'], old_cntr['rx_1519_2047']))) - print("Packets Received 2048-4095 Octets.............. {}".format(ns_diff(cntr['rx_2048_4095'], old_cntr['rx_2048_4095']))) - print("Packets Received 4096-9216 Octets.............. {}".format(ns_diff(cntr['rx_4096_9216'], old_cntr['rx_4096_9216']))) - print("Packets Received 9217-16383 Octets............. {}".format(ns_diff(cntr['rx_9217_16383'], old_cntr['rx_9217_16383']))) + print("Packets Received 64 Octets..................... {}".format(ns_diff(cntr.rx_64, old_cntr.rx_64))) + print("Packets Received 65-127 Octets................. {}".format(ns_diff(cntr.rx_65_127, old_cntr.rx_65_127))) + print("Packets Received 128-255 Octets................ {}".format(ns_diff(cntr.rx_128_255, old_cntr.rx_128_255))) + print("Packets Received 256-511 Octets................ {}".format(ns_diff(cntr.rx_256_511, old_cntr.rx_256_511))) + print("Packets Received 512-1023 Octets............... {}".format(ns_diff(cntr.rx_512_1023, old_cntr.rx_512_1023))) + print("Packets Received 1024-1518 Octets.............. {}".format(ns_diff(cntr.rx_1024_1518, old_cntr.rx_1024_1518))) + print("Packets Received 1519-2047 Octets.............. {}".format(ns_diff(cntr.rx_1519_2047, old_cntr.rx_1519_2047))) + print("Packets Received 2048-4095 Octets.............. {}".format(ns_diff(cntr.rx_2048_4095, old_cntr.rx_2048_4095))) + print("Packets Received 4096-9216 Octets.............. {}".format(ns_diff(cntr.rx_4096_9216, old_cntr.rx_4096_9216))) + print("Packets Received 9217-16383 Octets............. {}".format(ns_diff(cntr.rx_9217_16383, old_cntr.rx_9217_16383))) print("") - print("Total Packets Received Without Errors.......... {}".format(ns_diff(cntr['rx_all'], old_cntr['rx_all']))) - print("Unicast Packets Received....................... {}".format(ns_diff(cntr['rx_uca'], old_cntr['rx_uca']))) - print("Multicast Packets Received..................... {}".format(ns_diff(cntr['rx_mca'], old_cntr['rx_mca']))) - print("Broadcast Packets Received..................... {}".format(ns_diff(cntr['rx_bca'], old_cntr['rx_bca']))) + print("Total Packets Received Without Errors.......... {}".format(ns_diff(cntr.rx_all, old_cntr.rx_all))) + print("Unicast Packets Received....................... {}".format(ns_diff(cntr.rx_uca, old_cntr.rx_uca))) + print("Multicast Packets Received..................... {}".format(ns_diff(cntr.rx_mca, old_cntr.rx_mca))) + print("Broadcast Packets Received..................... {}".format(ns_diff(cntr.rx_bca, old_cntr.rx_bca))) print("") - print("Jabbers Received............................... {}".format(ns_diff(cntr['rx_jbr'], old_cntr['rx_jbr']))) - print("Fragments Received............................. {}".format(ns_diff(cntr['rx_frag'], old_cntr['rx_frag']))) - print("Undersize Received............................. {}".format(ns_diff(cntr['rx_usize'], old_cntr['rx_usize']))) - print("Overruns Received.............................. {}".format(ns_diff(cntr['rx_ovrrun'], old_cntr['rx_ovrrun']))) + print("Jabbers Received............................... {}".format(ns_diff(cntr.rx_jbr, old_cntr.rx_jbr))) + print("Fragments Received............................. {}".format(ns_diff(cntr.rx_frag, old_cntr.rx_frag))) + print("Undersize Received............................. {}".format(ns_diff(cntr.rx_usize, old_cntr.rx_usize))) + print("Overruns Received.............................. {}".format(ns_diff(cntr.rx_ovrrun, old_cntr.rx_ovrrun))) print("") - print("Packets Transmitted 64 Octets.................. {}".format(ns_diff(cntr['tx_64'], old_cntr['tx_64']))) - print("Packets Transmitted 65-127 Octets.............. {}".format(ns_diff(cntr['tx_65_127'], old_cntr['tx_65_127']))) - print("Packets Transmitted 128-255 Octets............. {}".format(ns_diff(cntr['tx_128_255'], old_cntr['tx_128_255']))) - print("Packets Transmitted 256-511 Octets............. {}".format(ns_diff(cntr['tx_256_511'], old_cntr['tx_256_511']))) - print("Packets Transmitted 512-1023 Octets............ {}".format(ns_diff(cntr['tx_512_1023'], old_cntr['tx_512_1023']))) - print("Packets Transmitted 1024-1518 Octets........... {}".format(ns_diff(cntr['tx_1024_1518'], old_cntr['tx_1024_1518']))) - print("Packets Transmitted 1519-2047 Octets........... {}".format(ns_diff(cntr['tx_1519_2047'], old_cntr['tx_1519_2047']))) - print("Packets Transmitted 2048-4095 Octets........... {}".format(ns_diff(cntr['tx_2048_4095'], old_cntr['tx_2048_4095']))) - print("Packets Transmitted 4096-9216 Octets........... {}".format(ns_diff(cntr['tx_4096_9216'], old_cntr['tx_4096_9216']))) - print("Packets Transmitted 9217-16383 Octets.......... {}".format(ns_diff(cntr['tx_9217_16383'], old_cntr['tx_9217_16383']))) + print("Packets Transmitted 64 Octets.................. {}".format(ns_diff(cntr.tx_64, old_cntr.tx_64))) + print("Packets Transmitted 65-127 Octets.............. {}".format(ns_diff(cntr.tx_65_127, old_cntr.tx_65_127))) + print("Packets Transmitted 128-255 Octets............. {}".format(ns_diff(cntr.tx_128_255, old_cntr.tx_128_255))) + print("Packets Transmitted 256-511 Octets............. {}".format(ns_diff(cntr.tx_256_511, old_cntr.tx_256_511))) + print("Packets Transmitted 512-1023 Octets............ {}".format(ns_diff(cntr.tx_512_1023, old_cntr.tx_512_1023))) + print("Packets Transmitted 1024-1518 Octets........... {}".format(ns_diff(cntr.tx_1024_1518, old_cntr.tx_1024_1518))) + print("Packets Transmitted 1519-2047 Octets........... {}".format(ns_diff(cntr.tx_1519_2047, old_cntr.tx_1519_2047))) + print("Packets Transmitted 2048-4095 Octets........... {}".format(ns_diff(cntr.tx_2048_4095, old_cntr.tx_2048_4095))) + print("Packets Transmitted 4096-9216 Octets........... {}".format(ns_diff(cntr.tx_4096_9216, old_cntr.tx_4096_9216))) + print("Packets Transmitted 9217-16383 Octets.......... {}".format(ns_diff(cntr.tx_9217_16383, old_cntr.tx_9217_16383))) print("") - print("Total Packets Transmitted Successfully......... {}".format(ns_diff(cntr['tx_all'], old_cntr['tx_all']))) - print("Unicast Packets Transmitted.................... {}".format(ns_diff(cntr['tx_uca'], old_cntr['tx_uca']))) - print("Multicast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_mca'], old_cntr['tx_mca']))) - print("Broadcast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_bca'], old_cntr['tx_bca']))) + print("Total Packets Transmitted Successfully......... {}".format(ns_diff(cntr.tx_all, old_cntr.tx_all))) + print("Unicast Packets Transmitted.................... {}".format(ns_diff(cntr.tx_uca, old_cntr.tx_uca))) + print("Multicast Packets Transmitted.................. {}".format(ns_diff(cntr.tx_mca, old_cntr.tx_mca))) + print("Broadcast Packets Transmitted.................. {}".format(ns_diff(cntr.tx_bca, old_cntr.tx_bca))) print("Time Since Counters Last Cleared............... " + str(cnstat_old_dict.get('time'))) @@ -434,88 +434,88 @@ class Portstat(object): header = header_all if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), + ns_diff(cntr.rx_ok, old_cntr.rx_ok), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - ns_diff(cntr['rx_err'], old_cntr['rx_err']), - ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), - ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), - ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), + ns_diff(cntr.rx_err, old_cntr.rx_err), + ns_diff(cntr.rx_drop, old_cntr.rx_drop), + ns_diff(cntr.rx_ovr, old_cntr.rx_ovr), + ns_diff(cntr.tx_ok, old_cntr.tx_ok), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed), - ns_diff(cntr['tx_err'], old_cntr['tx_err']), - ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), - ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) + ns_diff(cntr.tx_err, old_cntr.tx_err), + ns_diff(cntr.tx_drop, old_cntr.tx_drop), + ns_diff(cntr.tx_ovr, old_cntr.tx_ovr))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['rx_ok']), + format_number_with_comma(cntr.rx_ok), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr['rx_err']), - format_number_with_comma(cntr['rx_drop']), - format_number_with_comma(cntr['rx_ovr']), - format_number_with_comma(cntr['tx_ok']), + format_number_with_comma(cntr.rx_err), + format_number_with_comma(cntr.rx_drop), + format_number_with_comma(cntr.rx_ovr), + format_number_with_comma(cntr.tx_ok), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed), - format_number_with_comma(cntr['tx_err']), - format_number_with_comma(cntr['tx_drop']), - format_number_with_comma(cntr['tx_ovr']))) + format_number_with_comma(cntr.tx_err), + format_number_with_comma(cntr.tx_drop), + format_number_with_comma(cntr.tx_ovr))) elif errors_only: header = header_errors_only if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr['rx_err'], old_cntr['rx_err']), - ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), - ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), - ns_diff(cntr['tx_err'], old_cntr['tx_err']), - ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), - ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) + ns_diff(cntr.rx_err, old_cntr.rx_err), + ns_diff(cntr.rx_drop, old_cntr.rx_drop), + ns_diff(cntr.rx_ovr, old_cntr.rx_ovr), + ns_diff(cntr.tx_err, old_cntr.tx_err), + ns_diff(cntr.tx_drop, old_cntr.tx_drop), + ns_diff(cntr.tx_ovr, old_cntr.tx_ovr))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['rx_err']), - format_number_with_comma(cntr['rx_drop']), - format_number_with_comma(cntr['rx_ovr']), - format_number_with_comma(cntr['tx_err']), - format_number_with_comma(cntr['tx_drop']), - format_number_with_comma(cntr['tx_ovr']))) + format_number_with_comma(cntr.rx_err), + format_number_with_comma(cntr.rx_drop), + format_number_with_comma(cntr.rx_ovr), + format_number_with_comma(cntr.tx_err), + format_number_with_comma(cntr.tx_drop), + format_number_with_comma(cntr.tx_ovr))) elif fec_stats_only: header = header_fec_only if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr['fec_corr'], old_cntr['fec_corr']), - ns_diff(cntr['fec_uncorr'], old_cntr['fec_uncorr']), - ns_diff(cntr['fec_symbol_err'], old_cntr['fec_symbol_err']))) + ns_diff(cntr.fec_corr, old_cntr.fec_corr), + ns_diff(cntr.fec_uncorr, old_cntr.fec_uncorr), + ns_diff(cntr.fec_symbol_err, old_cntr.fec_symbol_err))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['fec_corr']), - format_number_with_comma(cntr['fec_uncorr']), - format_number_with_comma(cntr['fec_symbol_err']))) + format_number_with_comma(cntr.fec_corr), + format_number_with_comma(cntr.fec_uncorr), + format_number_with_comma(cntr.fec_symbol_err))) elif rates_only: header = header_rates_only if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), + ns_diff(cntr.rx_ok, old_cntr.rx_ok), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), + ns_diff(cntr.tx_ok, old_cntr.tx_ok), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['rx_ok']), + format_number_with_comma(cntr.rx_ok), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr['tx_ok']), + format_number_with_comma(cntr.tx_ok), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed))) @@ -524,33 +524,33 @@ class Portstat(object): if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), + ns_diff(cntr.rx_ok, old_cntr.rx_ok), format_brate(rates.rx_bps), format_util(rates.rx_bps, port_speed), - ns_diff(cntr['rx_err'], old_cntr['rx_err']), - ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), - ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), - ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), + ns_diff(cntr.rx_err, old_cntr.rx_err), + ns_diff(cntr.rx_drop, old_cntr.rx_drop), + ns_diff(cntr.rx_ovr, old_cntr.rx_ovr), + ns_diff(cntr.tx_ok, old_cntr.tx_ok), format_brate(rates.tx_bps), format_util(rates.tx_bps, port_speed), - ns_diff(cntr['tx_err'], old_cntr['tx_err']), - ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), - ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) + ns_diff(cntr.tx_err, old_cntr.tx_err), + ns_diff(cntr.tx_drop, old_cntr.tx_drop), + ns_diff(cntr.tx_ovr, old_cntr.tx_ovr))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['rx_ok']), + format_number_with_comma(cntr.rx_ok), format_brate(rates.rx_bps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr['rx_err']), - format_number_with_comma(cntr['rx_drop']), - format_number_with_comma(cntr['rx_ovr']), - format_number_with_comma(cntr['tx_ok']), + format_number_with_comma(cntr.rx_err), + format_number_with_comma(cntr.rx_drop), + format_number_with_comma(cntr.rx_ovr), + format_number_with_comma(cntr.tx_ok), format_brate(rates.tx_bps), format_util(rates.tx_bps, port_speed), - format_number_with_comma(cntr['tx_err']), - format_number_with_comma(cntr['tx_drop']), - format_number_with_comma(cntr['tx_ovr']))) + format_number_with_comma(cntr.tx_err), + format_number_with_comma(cntr.tx_drop), + format_number_with_comma(cntr.tx_ovr))) if use_json: print(table_as_json(table, header)) @@ -641,7 +641,7 @@ Examples: if save_fresh_stats: try: - json.dump(cnstat_dict, open(cnstat_fqn_file, 'w'), default=json_serial) + pickle.dump(cnstat_dict, open(cnstat_fqn_file, 'wb')) except IOError as e: sys.exit(e.errno) else: @@ -652,7 +652,7 @@ Examples: cnstat_cached_dict = OrderedDict() if os.path.isfile(cnstat_fqn_file): try: - cnstat_cached_dict = json.load(open(cnstat_fqn_file, 'r')) + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file, 'rb')) if not detail: print("Last cached time was " + str(cnstat_cached_dict.get('time'))) portstat.cnstat_diff_print(cnstat_dict, cnstat_cached_dict, ratestat_dict, intf_list, use_json, print_all, errors_only, fec_stats_only, rates_only, detail) diff --git a/scripts/queuestat b/scripts/queuestat index d82e7e4a6a..96a24b51a3 100755 --- a/scripts/queuestat +++ b/scripts/queuestat @@ -6,7 +6,7 @@ # ##################################################################### -import json +import _pickle as pickle import argparse import datetime import os.path @@ -33,7 +33,7 @@ except KeyError: pass from swsscommon.swsscommon import SonicV2Connector -from utilities_common.cli import json_serial, UserCache +from utilities_common.cli import UserCache from utilities_common import constants import utilities_common.multi_asic as multi_asic_util @@ -186,7 +186,7 @@ class Queuestat(object): fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: fields[pos] = str(int(counter_data)) - cntr = QueueStats._make(fields)._asdict() + cntr = QueueStats._make(fields) return cntr # Build a dictionary of the stats @@ -211,9 +211,9 @@ class Queuestat(object): if json_opt: json_output[port][key] = data continue - table.append((port, data['queuetype'] + str(data['queueindex']), - data['totalpacket'], data['totalbytes'], - data['droppacket'], data['dropbytes'])) + table.append((port, data.queuetype + str(data.queueindex), + data.totalpacket, data.totalbytes, + data.droppacket, data.dropbytes)) if json_opt: json_output[port].update(build_json(port, table)) @@ -241,15 +241,15 @@ class Queuestat(object): old_cntr = cnstat_old_dict.get(key) if old_cntr is not None: - table.append((port, cntr['queuetype'] + str(cntr['queueindex']), - ns_diff(cntr['totalpacket'], old_cntr['totalpacket']), - ns_diff(cntr['totalbytes'], old_cntr['totalbytes']), - ns_diff(cntr['droppacket'], old_cntr['droppacket']), - ns_diff(cntr['dropbytes'], old_cntr['dropbytes']))) + table.append((port, cntr.queuetype + str(cntr.queueindex), + ns_diff(cntr.totalpacket, old_cntr.totalpacket), + ns_diff(cntr.totalbytes, old_cntr.totalbytes), + ns_diff(cntr.droppacket, old_cntr.droppacket), + ns_diff(cntr.dropbytes, old_cntr.dropbytes))) else: - table.append((port, cntr['queuetype'] + str(cntr['queueindex']), - cntr['totalpacket'], cntr['totalbytes'], - cntr['droppacket'], cntr['dropbytes'])) + table.append((port, cntr.queuetype + str(cntr.queueindex), + cntr.totalpacket, cntr.totalbytes, + cntr.droppacket, cntr.dropbytes)) if json_opt: json_output[port].update(build_json(port, table)) @@ -273,7 +273,7 @@ class Queuestat(object): cnstat_fqn_file_name = cnstat_fqn_file + port if os.path.isfile(cnstat_fqn_file_name): try: - cnstat_cached_dict = json.load(open(cnstat_fqn_file_name, 'r')) + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_name, 'rb')) if json_opt: json_output[port].update({"cached_time":cnstat_cached_dict.get('time')}) json_output.update(self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt)) @@ -307,7 +307,7 @@ class Queuestat(object): json_output[port] = {} if os.path.isfile(cnstat_fqn_file_name): try: - cnstat_cached_dict = json.load(open(cnstat_fqn_file_name, 'r')) + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_name, 'rb')) if json_opt: json_output[port].update({"cached_time":cnstat_cached_dict.get('time')}) json_output.update(self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt)) @@ -330,7 +330,7 @@ class Queuestat(object): for port in natsorted(self.counter_port_name_map): cnstat_dict = self.get_cnstat(self.port_queues_map[port]) try: - json.dump(cnstat_dict, open(cnstat_fqn_file + port, 'w'), default=json_serial) + pickle.dump(cnstat_dict, open(cnstat_fqn_file + port, 'wb')) except IOError as e: print(e.errno, e) sys.exit(e.errno) diff --git a/scripts/tunnelstat b/scripts/tunnelstat index 3d7423e86b..8b045ec684 100755 --- a/scripts/tunnelstat +++ b/scripts/tunnelstat @@ -6,7 +6,7 @@ # ##################################################################### -import json +import _pickle as pickle import argparse import datetime import sys @@ -29,7 +29,7 @@ from collections import namedtuple, OrderedDict from natsort import natsorted from tabulate import tabulate from utilities_common.netstat import ns_diff, table_as_json, STATUS_NA, format_prate -from utilities_common.cli import json_serial, UserCache +from utilities_common.cli import UserCache from swsscommon.swsscommon import SonicV2Connector @@ -80,7 +80,7 @@ class Tunnelstat(object): counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name) if counter_data: fields[pos] = str(counter_data) - cntr = NStats._make(fields)._asdict() + cntr = NStats._make(fields) return cntr def get_rates(table_id): @@ -149,8 +149,8 @@ class Tunnelstat(object): continue rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) - table.append((key, data['rx_p_ok'], data['rx_b_ok'], format_prate(rates.rx_pps), - data['tx_p_ok'], data['tx_b_ok'], format_prate(rates.tx_pps))) + table.append((key, data.rx_p_ok, data.rx_b_ok, format_prate(rates.rx_pps), + data.tx_p_ok, data.tx_b_ok, format_prate(rates.tx_pps))) if use_json: print(table_as_json(table, header)) @@ -175,19 +175,19 @@ class Tunnelstat(object): rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) if old_cntr is not None: table.append((key, - ns_diff(cntr['rx_p_ok'], old_cntr['rx_p_ok']), - ns_diff(cntr['rx_b_ok'], old_cntr['rx_b_ok']), + ns_diff(cntr.rx_p_ok, old_cntr.rx_p_ok), + ns_diff(cntr.rx_b_ok, old_cntr.rx_b_ok), format_prate(rates.rx_pps), - ns_diff(cntr['tx_p_ok'], old_cntr['tx_p_ok']), - ns_diff(cntr['tx_b_ok'], old_cntr['tx_b_ok']), + ns_diff(cntr.tx_p_ok, old_cntr.tx_p_ok), + ns_diff(cntr.tx_b_ok, old_cntr.tx_b_ok), format_prate(rates.tx_pps))) else: table.append((key, - cntr['rx_p_ok'], - cntr['rx_b_ok'], + cntr.rx_p_ok, + cntr.rx_b_ok, format_prate(rates.rx_pps), - cntr['tx_p_ok'], - cntr['tx_b_ok'], + cntr.tx_p_ok, + cntr.tx_b_ok, format_prate(rates.tx_pps))) if use_json: print(table_as_json(table, header)) @@ -210,12 +210,12 @@ class Tunnelstat(object): if cnstat_old_dict: old_cntr = cnstat_old_dict.get(tunnel) if old_cntr: - body = body % (ns_diff(cntr['rx_p_ok'], old_cntr['rx_p_ok']), - ns_diff(cntr['rx_b_ok'], old_cntr['rx_b_ok']), - ns_diff(cntr['tx_p_ok'], old_cntr['tx_p_ok']), - ns_diff(cntr['tx_b_ok'], old_cntr['tx_b_ok'])) + body = body % (ns_diff(cntr.rx_p_ok, old_cntr.rx_p_ok), + ns_diff(cntr.rx_b_ok, old_cntr.rx_b_ok), + ns_diff(cntr.tx_p_ok, old_cntr.tx_p_ok), + ns_diff(cntr.tx_b_ok, old_cntr.tx_b_ok)) else: - body = body % (cntr['rx_p_ok'], cntr['rx_b_ok'], cntr['tx_p_ok'], cntr['tx_b_ok']) + body = body % (cntr.rx_p_ok, cntr.rx_b_ok, cntr.tx_p_ok, cntr.tx_b_ok) print(header) print(body) @@ -273,7 +273,7 @@ def main(): if save_fresh_stats: try: - json.dump(cnstat_dict, open(cnstat_fqn_file, 'w'), default=json_serial) + pickle.dump(cnstat_dict, open(cnstat_fqn_file, 'wb')) except IOError as e: sys.exit(e.errno) else: @@ -283,7 +283,7 @@ def main(): if wait_time_in_seconds == 0: if os.path.isfile(cnstat_fqn_file): try: - cnstat_cached_dict = json.load(open(cnstat_fqn_file, 'r')) + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file, 'rb')) print("Last cached time was " + str(cnstat_cached_dict.get('time'))) if tunnel_name: tunnelstat.cnstat_single_tunnel(tunnel_name, cnstat_dict, cnstat_cached_dict)