From c05845df1567c6d14312bb36f35c72c7ea6a93ee Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Thu, 25 Nov 2021 08:32:20 +0800 Subject: [PATCH] Add trap flow counter support (#1868) Add flowcnt commands * counterpoll flowcnt-trap enable/disable * counterpoll flowcnt-trap interval * show flowcnt-trap stats --- clear/main.py | 8 + config/main.py | 5 +- counterpoll/main.py | 40 +++- scripts/flow_counters_stat | 283 +++++++++++++++++++++++ setup.py | 1 + show/flow_counters.py | 22 ++ show/main.py | 2 + tests/counterpoll_input/config_db.json | 3 + tests/counterpoll_test.py | 38 ++- tests/flow_counter_stats_test.py | 209 +++++++++++++++++ tests/mock_tables/asic0/counters_db.json | 10 + tests/mock_tables/asic1/counters_db.json | 10 + tests/mock_tables/config_db.json | 4 + tests/mock_tables/counters_db.json | 10 + 14 files changed, 641 insertions(+), 4 deletions(-) create mode 100755 scripts/flow_counters_stat create mode 100644 show/flow_counters.py create mode 100644 tests/flow_counter_stats_test.py diff --git a/clear/main.py b/clear/main.py index c436f6c1d817..3ba0a1d7358c 100755 --- a/clear/main.py +++ b/clear/main.py @@ -482,6 +482,14 @@ def statistics(db): def remap_keys(dict): return [{'key': k, 'value': v} for k, v in dict.items()] +# ("sonic-clear flowcnt-trap") +@cli.command() +def flowcnt_trap(): + """ Clear trap flow counters """ + command = "flow_counters_stat -c -t trap" + run_command(command) + + # Load plugins and register them helper = util_base.UtilHelper() helper.load_and_register_plugins(plugins, cli) diff --git a/config/main.py b/config/main.py index 9d804b47d597..f684116286b8 100644 --- a/config/main.py +++ b/config/main.py @@ -5948,7 +5948,7 @@ def rate(): @rate.command() @click.argument('interval', metavar='', type=click.IntRange(min=1, max=1000), required=True) -@click.argument('rates_type', type=click.Choice(['all', 'port', 'rif']), default='all') +@click.argument('rates_type', type=click.Choice(['all', 'port', 'rif', 'flowcnt-trap']), default='all') def smoothing_interval(interval, rates_type): """Set rates smoothing interval """ counters_db = swsssdk.SonicV2Connector() @@ -5962,6 +5962,9 @@ def smoothing_interval(interval, rates_type): if rates_type in ['rif', 'all']: counters_db.set('COUNTERS_DB', 'RATES:RIF', 'RIF_SMOOTH_INTERVAL', interval) counters_db.set('COUNTERS_DB', 'RATES:RIF', 'RIF_ALPHA', alpha) + if rates_type in ['flowcnt-trap', 'all']: + counters_db.set('COUNTERS_DB', 'RATES:TRAP', 'TRAP_SMOOTH_INTERVAL', interval) + counters_db.set('COUNTERS_DB', 'RATES:TRAP', 'TRAP_ALPHA', alpha) # Load plugins and register them diff --git a/counterpoll/main.py b/counterpoll/main.py index e61d0e96ccef..e5894e16767f 100644 --- a/counterpoll/main.py +++ b/counterpoll/main.py @@ -54,12 +54,12 @@ def disable(): # Port counter commands @cli.group() def port(): - """ Queue counter commands """ + """ Port counter commands """ @port.command() @click.argument('poll_interval', type=click.IntRange(100, 30000)) def interval(poll_interval): - """ Set queue counter query interval """ + """ Set port counter query interval """ configdb = ConfigDBConnector() configdb.connect() port_info = {} @@ -314,6 +314,39 @@ def disable(): tunnel_info['FLEX_COUNTER_STATUS'] = DISABLE configdb.mod_entry("FLEX_COUNTER_TABLE", "TUNNEL", tunnel_info) +# Trap flow counter commands +@cli.group() +@click.pass_context +def flowcnt_trap(ctx): + """ Trap flow counter commands """ + ctx.obj = ConfigDBConnector() + ctx.obj.connect() + +@flowcnt_trap.command() +@click.argument('poll_interval', type=click.IntRange(1000, 30000)) +@click.pass_context +def interval(ctx, poll_interval): + """ Set trap flow counter query interval """ + fc_info = {} + fc_info['POLL_INTERVAL'] = poll_interval + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", "FLOW_CNT_TRAP", fc_info) + +@flowcnt_trap.command() +@click.pass_context +def enable(ctx): + """ Enable trap flow counter query """ + fc_info = {} + fc_info['FLEX_COUNTER_STATUS'] = 'enable' + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", "FLOW_CNT_TRAP", fc_info) + +@flowcnt_trap.command() +@click.pass_context +def disable(ctx): + """ Disable trap flow counter query """ + fc_info = {} + fc_info['FLEX_COUNTER_STATUS'] = 'disable' + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", "FLOW_CNT_TRAP", fc_info) + @cli.command() def show(): """ Show the counter configuration """ @@ -329,6 +362,7 @@ def show(): buffer_pool_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', BUFFER_POOL_WATERMARK) acl_info = configdb.get_entry('FLEX_COUNTER_TABLE', ACL) tunnel_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'TUNNEL') + trap_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_TRAP') header = ("Type", "Interval (in ms)", "Status") data = [] @@ -352,6 +386,8 @@ def show(): data.append([ACL, pg_drop_info.get("POLL_INTERVAL", DEFLT_10_SEC), acl_info.get("FLEX_COUNTER_STATUS", DISABLE)]) if tunnel_info: data.append(["TUNNEL_STAT", rif_info.get("POLL_INTERVAL", DEFLT_10_SEC), rif_info.get("FLEX_COUNTER_STATUS", DISABLE)]) + if trap_info: + data.append(["FLOW_CNT_TRAP_STAT", trap_info.get("POLL_INTERVAL", DEFLT_10_SEC), trap_info.get("FLEX_COUNTER_STATUS", DISABLE)]) click.echo(tabulate(data, headers=header, tablefmt="simple", missingval="")) diff --git a/scripts/flow_counters_stat b/scripts/flow_counters_stat new file mode 100755 index 000000000000..0c2ec80575d9 --- /dev/null +++ b/scripts/flow_counters_stat @@ -0,0 +1,283 @@ +#!/usr/bin/env python3 + +import argparse +import os +import _pickle as pickle +import sys + +from natsort import natsorted +from tabulate import tabulate + +# mock the redis for unit test purposes # +try: + if os.environ["UTILITIES_UNIT_TESTING"] == "2": + modules_path = os.path.join(os.path.dirname(__file__), "..") + tests_path = os.path.join(modules_path, "tests") + sys.path.insert(0, modules_path) + sys.path.insert(0, tests_path) + import mock_tables.dbconnector + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import mock_tables.mock_multi_asic + mock_tables.dbconnector.load_namespace_config() + +except KeyError: + pass + +import utilities_common.multi_asic as multi_asic_util +from utilities_common.netstat import format_number_with_comma, table_as_json, ns_diff, format_prate + +# Flow counter meta data, new type of flow counters can extend this dictinary to reuse existing logic +flow_counter_meta = { + 'trap': { + 'headers': ['Trap Name', 'Packets', 'Bytes', 'PPS'], + 'name_map': 'COUNTERS_TRAP_NAME_MAP', + } +} +flow_counters_fields = ['SAI_COUNTER_STAT_PACKETS', 'SAI_COUNTER_STAT_BYTES'] + +# Only do diff for 'Packets' and 'Bytes' +diff_column_positions = set([0, 1]) + +FLOW_COUNTER_TABLE_PREFIX = "COUNTERS:" +RATES_TABLE_PREFIX = 'RATES:' +PPS_FIELD = 'RX_PPS' +STATUS_NA = 'N/A' + + +class FlowCounterStats(object): + def __init__(self, args): + self.db = None + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=args.namespace) + self.args = args + meta_data = flow_counter_meta[args.type] + self.name_map = meta_data['name_map'] + self.headers = meta_data['headers'] + self.data_file = os.path.join('/tmp/{}-stats-{}'.format(args.type, os.getuid())) + if self.args.delete and os.path.exists(self.data_file): + os.remove(self.data_file) + self.data = {} + + def show(self): + """Show flow counter statistic + """ + self._collect_and_diff() + headers, table = self._prepare_show_data() + self._print_data(headers, table) + + def _collect_and_diff(self): + """Collect statistic from db and diff from old data if any + """ + self._collect() + old_data = self._load() + need_update_cache = self._diff(old_data, self.data) + if need_update_cache: + self._save(old_data) + + def _adjust_headers(self, headers): + """Adjust table headers based on platforms + + Args: + headers (list): Original headers + + Returns: + headers (list): Headers with 'ASIC ID' column if it is a multi ASIC platform + """ + return ['ASIC ID'] + headers if self.multi_asic.is_multi_asic else headers + + def _prepare_show_data(self): + """Prepare headers and table data for output + + Returns: + headers (list): Table headers + table (list): Table data + """ + table = [] + headers = self._adjust_headers(self.headers) + + for ns, stats in natsorted(self.data.items()): + if self.args.namespace is not None and self.args.namespace != ns: + continue + for name, values in natsorted(stats.items()): + if self.multi_asic.is_multi_asic: + row = [ns] + else: + row = [] + row.extend([name, format_number_with_comma(values[0]), format_number_with_comma(values[1]), format_prate(values[2])]) + table.append(row) + + return headers, table + + def _print_data(self, headers, table): + """Print statistic data based on output format + + Args: + headers (list): Table headers + table (list): Table data + """ + if self.args.json: + print(table_as_json(table, headers)) + else: + print(tabulate(table, headers, tablefmt='simple', stralign='right')) + + def clear(self): + """Clear flow counter statistic. This function does not clear data from ASIC. Instead, it saves flow counter statistic to a file. When user + issue show command after clear, it does a diff between new data and saved data. + """ + self._collect() + self._save(self.data) + print('Flow Counters were successfully cleared') + + @multi_asic_util.run_on_multi_asic + def _collect(self): + """Collect flow counter statistic from DB. This function is called on a multi ASIC context. + """ + self.data.update(self._get_stats_from_db()) + + def _get_stats_from_db(self): + """Get flow counter statistic from DB. + + Returns: + dict: A dictionary. E.g: {: {: [, , , ]}} + """ + ns = self.multi_asic.current_namespace + name_map = self.db.get_all(self.db.COUNTERS_DB, self.name_map) + data = {ns: {}} + if not name_map: + return data + + for name, counter_oid in name_map.items(): + values = self._get_stats_value(counter_oid) + + full_table_id = RATES_TABLE_PREFIX + counter_oid + counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, PPS_FIELD) + values.append(STATUS_NA if counter_data is None else counter_data) + values.append(counter_oid) + data[ns][name] = values + return data + + def _get_stats_value(self, counter_oid): + """Get statistic value from COUNTERS_DB COUNTERS table + + Args: + counter_oid (string): OID of a generic counter + + Returns: + values (list): A list of statistics value + """ + values = [] + full_table_id = FLOW_COUNTER_TABLE_PREFIX + counter_oid + for field in flow_counters_fields: + counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, field) + values.append(STATUS_NA if counter_data is None else counter_data) + return values + + def _save(self, data): + """Save flow counter statistic to a file + """ + try: + if os.path.exists(self.data_file): + os.remove(self.data_file) + + with open(self.data_file, 'wb') as f: + pickle.dump(data, f) + except IOError as e: + print('Failed to save statistic - {}'.format(repr(e))) + + def _load(self): + """Load flow counter statistic from a file + + Returns: + dict: A dictionary. E.g: {: {: [, , , ]}} + """ + if not os.path.exists(self.data_file): + return None + + try: + with open(self.data_file, 'rb') as f: + data = pickle.load(f) + except IOError as e: + print('Failed to load statistic - {}'.format(repr(e))) + return None + + return data + + def _diff(self, old_data, new_data): + """Do a diff between new data and old data. + + Args: + old_data (dict): E.g: {: {: [, , , ]}} + new_data (dict): E.g: {: {: [, , , ]}} + + Returns: + bool: True if cache need to be updated + """ + if not old_data: + return False + + need_update_cache = False + for ns, stats in new_data.items(): + if ns not in old_data: + continue + old_stats = old_data[ns] + for name, values in stats.items(): + if name not in old_stats: + continue + + old_values = old_stats[name] + if values[-1] != old_values[-1]: + # Counter OID not equal means the trap was removed and added again. Removing a trap would cause + # the stats value restart from 0. To avoid get minus value here, it should not do diff in case + # counter OID is changed. + old_values[-1] = values[-1] + for i in diff_column_positions: + old_values[i] = 0 + values[i] = ns_diff(values[i], old_values[i]) + need_update_cache = True + continue + + has_negative_diff = False + for i in diff_column_positions: + # If any diff has negative value, set all counter values to 0 and update cache + if values[i] < old_values[i]: + has_negative_diff = True + break + + if has_negative_diff: + for i in diff_column_positions: + old_values[i] = 0 + values[i] = ns_diff(values[i], old_values[i]) + need_update_cache = True + continue + + for i in diff_column_positions: + values[i] = ns_diff(values[i], old_values[i]) + + return need_update_cache + + +def main(): + parser = argparse.ArgumentParser(description='Display the flow counters', + formatter_class=argparse.RawTextHelpFormatter, + epilog=""" +Examples: + flow_counters_stat -c -t trap + flow_counters_stat -t trap + flow_counters_stat -d -t trap +""") + parser.add_argument('-c', '--clear', action='store_true', help='Copy & clear stats') + parser.add_argument('-d', '--delete', action='store_true', help='Delete saved stats') + parser.add_argument('-j', '--json', action='store_true', help='Display in JSON format') + parser.add_argument('-n','--namespace', default=None, help='Display flow counters for specific namespace') + parser.add_argument('-t', '--type', required=True, choices=['trap'],help='Flow counters type') + + args = parser.parse_args() + + stats = FlowCounterStats(args) + if args.clear: + stats.clear() + else: + stats.show() + + +if __name__ == '__main__': + main() diff --git a/setup.py b/setup.py index c725846b5426..37a4769734c4 100644 --- a/setup.py +++ b/setup.py @@ -101,6 +101,7 @@ 'scripts/fast-reboot-dump.py', 'scripts/fdbclear', 'scripts/fdbshow', + 'scripts/flow_counters_stat', 'scripts/gearboxutil', 'scripts/generate_dump', 'scripts/generate_shutdown_order.py', diff --git a/show/flow_counters.py b/show/flow_counters.py new file mode 100644 index 000000000000..9870c8308052 --- /dev/null +++ b/show/flow_counters.py @@ -0,0 +1,22 @@ +import click +import utilities_common.cli as clicommon +import utilities_common.multi_asic as multi_asic_util + +# +# 'flowcnt-trap' group ### +# + +@click.group(cls=clicommon.AliasedGroup) +def flowcnt_trap(): + """Show trap flow counter related information""" + pass + +@flowcnt_trap.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +@click.option('--namespace', '-n', 'namespace', default=None, type=click.Choice(multi_asic_util.multi_asic_ns_choices()), show_default=True, help='Namespace name or all') +def stats(verbose, namespace): + """Show trap flow counter statistic""" + cmd = "flow_counters_stat -t trap" + if namespace is not None: + cmd += " -n {}".format(namespace) + clicommon.run_command(cmd, display_cmd=verbose) diff --git a/show/main.py b/show/main.py index 4842f3f952a4..b68f0f6fa896 100755 --- a/show/main.py +++ b/show/main.py @@ -40,6 +40,7 @@ from . import dropcounters from . import feature from . import fgnhg +from . import flow_counters from . import gearbox from . import interfaces from . import kdump @@ -178,6 +179,7 @@ def cli(ctx): cli.add_command(dropcounters.dropcounters) cli.add_command(feature.feature) cli.add_command(fgnhg.fgnhg) +cli.add_command(flow_counters.flowcnt_trap) cli.add_command(kdump.kdump) cli.add_command(interfaces.interfaces) cli.add_command(kdump.kdump) diff --git a/tests/counterpoll_input/config_db.json b/tests/counterpoll_input/config_db.json index c22bf22088c6..61ceb071c2e5 100644 --- a/tests/counterpoll_input/config_db.json +++ b/tests/counterpoll_input/config_db.json @@ -784,6 +784,9 @@ }, "PORT": { "FLEX_COUNTER_STATUS": "enable" + }, + "FLOW_CNT_TRAP": { + "FLEX_COUNTER_STATUS": "enable" } }, "PORT": { diff --git a/tests/counterpoll_test.py b/tests/counterpoll_test.py index f1a4cdaaa152..71d7e914aa12 100644 --- a/tests/counterpoll_test.py +++ b/tests/counterpoll_test.py @@ -3,7 +3,6 @@ import os import pytest import sys -import time from click.testing import CliRunner from shutil import copyfile from utilities_common.db import Db @@ -27,6 +26,7 @@ PG_WATERMARK_STAT 10000 enable PG_DROP_STAT 10000 enable ACL 10000 enable +FLOW_CNT_TRAP_STAT 10000 enable """ class TestCounterpoll(object): @@ -143,6 +143,42 @@ def test_update_acl_interval(self): table = db.cfgdb.get_table("FLEX_COUNTER_TABLE") assert test_interval == table["ACL"]["POLL_INTERVAL"] + @pytest.mark.parametrize("status", ["disable", "enable"]) + def test_update_trap_counter_status(self, status): + runner = CliRunner() + db = Db() + + result = runner.invoke(counterpoll.cli.commands["flowcnt-trap"].commands[status], [], obj=db.cfgdb) + print(result.exit_code, result.output) + assert result.exit_code == 0 + + table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') + assert status == table["FLOW_CNT_TRAP"]["FLEX_COUNTER_STATUS"] + + def test_update_trap_counter_interval(self): + runner = CliRunner() + db = Db() + test_interval = "20000" + + result = runner.invoke(counterpoll.cli.commands["flowcnt-trap"].commands["interval"], [test_interval], obj=db.cfgdb) + print(result.exit_code, result.output) + assert result.exit_code == 0 + + table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') + assert test_interval == table["FLOW_CNT_TRAP"]["POLL_INTERVAL"] + + test_interval = "500" + result = runner.invoke(counterpoll.cli.commands["flowcnt-trap"].commands["interval"], [test_interval], obj=db.cfgdb) + expected = "Invalid value for \"POLL_INTERVAL\": 500 is not in the valid range of 1000 to 30000." + assert result.exit_code == 2 + assert expected in result.output + + test_interval = "40000" + result = runner.invoke(counterpoll.cli.commands["flowcnt-trap"].commands["interval"], [test_interval], obj=db.cfgdb) + expected = "Invalid value for \"POLL_INTERVAL\": 40000 is not in the valid range of 1000 to 30000." + assert result.exit_code == 2 + assert expected in result.output + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/flow_counter_stats_test.py b/tests/flow_counter_stats_test.py new file mode 100644 index 000000000000..61c938a06350 --- /dev/null +++ b/tests/flow_counter_stats_test.py @@ -0,0 +1,209 @@ +import importlib +import os +import sys + +from click.testing import CliRunner +from unittest import mock + +import show.main as show +import clear.main as clear + +from .utils import get_result_and_return_code +from utilities_common.general import load_module_from_source + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, scripts_path) + +flow_counters_stat_path = os.path.join(scripts_path, 'flow_counters_stat') +flow_counters_stat = load_module_from_source('flow_counters_stat', flow_counters_stat_path) + +expect_show_output = """\ + Trap Name Packets Bytes PPS +----------- --------- ------- ------- + dhcp 100 2,000 50.25/s +""" + +expect_show_output_json = """\ +{ + "dhcp": { + "Bytes": "2,000", + "PPS": "50.25/s", + "Packets": "100" + } +} +""" + +expect_show_output_after_clear = """\ + Trap Name Packets Bytes PPS +----------- --------- ------- ------- + dhcp 0 0 50.25/s +""" + +expect_show_output_multi_asic = """\ + ASIC ID Trap Name Packets Bytes PPS +--------- ----------- --------- ------- ------- + asic0 dhcp 100 2,000 50.25/s + asic1 dhcp 200 3,000 45.25/s +""" + +expect_show_output_json_multi_asic = """\ +{ + "asic0": { + "Bytes": "2,000", + "PPS": "50.25/s", + "Packets": "100", + "Trap Name": "dhcp" + }, + "asic1": { + "Bytes": "3,000", + "PPS": "45.25/s", + "Packets": "200", + "Trap Name": "dhcp" + } +} +""" + +expect_show_output_multi_asic_after_clear = """\ + ASIC ID Trap Name Packets Bytes PPS +--------- ----------- --------- ------- ------- + asic0 dhcp 0 0 50.25/s + asic1 dhcp 0 0 45.25/s +""" + + +def delete_cache(): + cmd = 'flow_counters_stat -t trap -d' + get_result_and_return_code(cmd) + + +class TestTrapStat: + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + delete_cache() + + def test_show(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["flowcnt-trap"].commands["stats"], + [] + ) + print(result.output) + + assert result.exit_code == 0 + assert result.output == expect_show_output + + def test_show_json(self): + cmd = 'flow_counters_stat -t trap -j' + return_code, result = get_result_and_return_code(cmd) + assert return_code == 0 + assert result == expect_show_output_json + + def test_clear(self): + runner = CliRunner() + result = runner.invoke( + clear.cli.commands["flowcnt-trap"], + [] + ) + print(result.output) + + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["flowcnt-trap"].commands["stats"], + [] + ) + print(result.output) + + assert result.exit_code == 0 + assert result.output == expect_show_output_after_clear + + def test_diff(self): + args = mock.MagicMock() + args.type = 'trap' + args.delete = False + args.namespace = None + args.json = False + stats = flow_counters_stat.FlowCounterStats(args) + stats._collect = mock.MagicMock() + old_data = { + '': { + 'bgp': [100, 200, 50.0, 1], + 'bgpv6': [100, 200, 50.0, 2], + 'lldp': [100, 200, 50.0, 3], + } + } + stats._save(old_data) + stats.data = { + '': { + 'bgp': [100, 200, 50.0, 4], + 'bgpv6': [100, 100, 50.0, 2], + 'lldp': [200, 300, 50.0, 3], + } + } + + stats._collect_and_diff() + cached_data = stats._load() + assert cached_data['']['bgp'] == [0, 0, 50.0, 4] + assert cached_data['']['bgpv6'] == [0, 0, 50.0, 2] + assert cached_data['']['lldp'] == [100, 200, 50.0, 3] + + +class TestTrapStatsMultiAsic: + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + delete_cache() + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join( + os.environ["PATH"].split(os.pathsep)[:-1] + ) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + delete_cache() + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + + def test_show(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["flowcnt-trap"].commands["stats"], + [] + ) + print(result.output) + + assert result.exit_code == 0 + assert result.output == expect_show_output_multi_asic + + def test_show_json(self): + cmd = 'flow_counters_stat -t trap -j' + return_code, result = get_result_and_return_code(cmd) + assert return_code == 0 + assert result == expect_show_output_json_multi_asic + + def test_clear(self): + runner = CliRunner() + result = runner.invoke( + clear.cli.commands["flowcnt-trap"], + [] + ) + print(result.output) + + result = runner.invoke( + show.cli.commands["flowcnt-trap"].commands["stats"], + [] + ) + print(result.output) + + assert result.exit_code == 0 + assert result.output == expect_show_output_multi_asic_after_clear diff --git a/tests/mock_tables/asic0/counters_db.json b/tests/mock_tables/asic0/counters_db.json index 05b956ffa61b..9b1688c74370 100644 --- a/tests/mock_tables/asic0/counters_db.json +++ b/tests/mock_tables/asic0/counters_db.json @@ -1805,5 +1805,15 @@ "crm_stats_acl_entry_used":"0", "crm_stats_acl_counter_available":"1280", "crm_stats_acl_entry_available":"1024" + }, + "COUNTERS_TRAP_NAME_MAP":{ + "dhcp": "oid:0x1500000000034e" + }, + "COUNTERS:oid:0x1500000000034e":{ + "SAI_COUNTER_STAT_PACKETS": 100, + "SAI_COUNTER_STAT_BYTES": 2000 + }, + "RATES:oid:0x1500000000034e":{ + "RX_PPS": 50.25 } } diff --git a/tests/mock_tables/asic1/counters_db.json b/tests/mock_tables/asic1/counters_db.json index 26e2ac033ffe..720b0f099f3a 100644 --- a/tests/mock_tables/asic1/counters_db.json +++ b/tests/mock_tables/asic1/counters_db.json @@ -1000,5 +1000,15 @@ "crm_stats_acl_entry_used":"0", "crm_stats_acl_counter_available":"1280", "crm_stats_acl_entry_available":"1024" + }, + "COUNTERS_TRAP_NAME_MAP":{ + "dhcp": "oid:0x1500000000034e" + }, + "COUNTERS:oid:0x1500000000034e":{ + "SAI_COUNTER_STAT_PACKETS": 200, + "SAI_COUNTER_STAT_BYTES": 3000 + }, + "RATES:oid:0x1500000000034e":{ + "RX_PPS": 45.25 } } diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index ad76a492dcea..02c336ee3b85 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -1557,6 +1557,10 @@ "POLL_INTERVAL": "10000", "FLEX_COUNTER_STATUS": "enable" }, + "FLEX_COUNTER_TABLE|FLOW_CNT_TRAP": { + "POLL_INTERVAL": "10000", + "FLEX_COUNTER_STATUS": "enable" + }, "PFC_WD|Ethernet0": { "action": "drop", "detection_time": "600", diff --git a/tests/mock_tables/counters_db.json b/tests/mock_tables/counters_db.json index 603a8c7f8e86..186d07b6461d 100644 --- a/tests/mock_tables/counters_db.json +++ b/tests/mock_tables/counters_db.json @@ -2030,5 +2030,15 @@ "crm_stats_acl_entry_used":"0", "crm_stats_acl_counter_available":"1280", "crm_stats_acl_entry_available":"1024" + }, + "COUNTERS_TRAP_NAME_MAP":{ + "dhcp": "oid:0x1500000000034e" + }, + "COUNTERS:oid:0x1500000000034e":{ + "SAI_COUNTER_STAT_PACKETS": 100, + "SAI_COUNTER_STAT_BYTES": 2000 + }, + "RATES:oid:0x1500000000034e":{ + "RX_PPS": 50.25 } }