From 6fd06755ab3762888cb26512c5aebccbcf3cbcf9 Mon Sep 17 00:00:00 2001 From: Vadym Hlushko <62022266+vadymhlushko-mlnx@users.noreply.github.com> Date: Wed, 25 Aug 2021 21:38:41 +0300 Subject: [PATCH] [PBH] CLI for Policy Based Hashing (#1701) - What I did Created click CLI plugins for PBH feature - How I did it The CLI plugins were auto-generated (by using the sonic-cli-gen) for show and config CLI groups, then manually those were manually edited to meet PBH the CLI requirements according to the PBH HLD - How to verify it Added the UT --- clear/main.py | 30 +- config/plugins/pbh.py | 851 ++++++++++++++++++++ doc/Command-Reference.md | 300 ++++++++ show/plugins/pbh.py | 469 +++++++++++ tests/pbh_input/assert_show_output.py | 92 +++ tests/pbh_input/counters_db.json | 10 + tests/pbh_input/counters_db_updated.json | 10 + tests/pbh_input/full_pbh_config.json | 81 ++ tests/pbh_input/hash.json | 40 + tests/pbh_input/hash_fields.json | 34 + tests/pbh_input/rule.json | 50 ++ tests/pbh_input/table.json | 17 + tests/pbh_test.py | 939 +++++++++++++++++++++++ 13 files changed, 2921 insertions(+), 2 deletions(-) create mode 100644 config/plugins/pbh.py create mode 100644 show/plugins/pbh.py create mode 100644 tests/pbh_input/assert_show_output.py create mode 100644 tests/pbh_input/counters_db.json create mode 100644 tests/pbh_input/counters_db_updated.json create mode 100644 tests/pbh_input/full_pbh_config.json create mode 100644 tests/pbh_input/hash.json create mode 100644 tests/pbh_input/hash_fields.json create mode 100644 tests/pbh_input/rule.json create mode 100644 tests/pbh_input/table.json create mode 100644 tests/pbh_test.py diff --git a/clear/main.py b/clear/main.py index 4302ae00aab1..8f93597b68da 100755 --- a/clear/main.py +++ b/clear/main.py @@ -2,11 +2,12 @@ import os import subprocess import sys - import click +import utilities_common.cli as clicommon +import json from utilities_common import util_base - +from show.plugins.pbh import read_pbh_counters from . import plugins @@ -449,6 +450,31 @@ def translations(): cmd = "natclear -t" run_command(cmd) +# 'pbh' group ("clear pbh ...") +@cli.group(cls=AliasedGroup) +def pbh(): + """ Clear the PBH info """ + pass + +# 'statistics' subcommand ("clear pbh statistics") +@pbh.command() +@clicommon.pass_db +def statistics(db): + """ Clear PBH counters + clear counters -- write current counters to file in /tmp + """ + + pbh_rules = db.cfgdb.get_table("PBH_RULE") + pbh_counters = read_pbh_counters(pbh_rules) + + try: + with open('/tmp/.pbh_counters.txt', 'w') as fp: + json.dump(remap_keys(pbh_counters), fp) + except IOError as err: + pass + +def remap_keys(dict): + return [{'key': k, 'value': v} for k, v in dict.items()] # Load plugins and register them helper = util_base.UtilHelper() diff --git a/config/plugins/pbh.py b/config/plugins/pbh.py new file mode 100644 index 000000000000..e5e5f0fdde81 --- /dev/null +++ b/config/plugins/pbh.py @@ -0,0 +1,851 @@ +""" +This CLI plugin was auto-generated by using 'sonic-cli-gen' utility, BUT +it was manually modified to meet the PBH HLD requirements. + +PBH HLD - https://github.com/Azure/SONiC/pull/773 +CLI Auto-generation tool HLD - https://github.com/Azure/SONiC/pull/78 +""" + +import click +import ipaddress +import re +import utilities_common.cli as clicommon + +hash_field_types = [ + 'INNER_IP_PROTOCOL', + 'INNER_L4_DST_PORT', + 'INNER_L4_SRC_PORT', + 'INNER_DST_IPV4', + 'INNER_SRC_IPV4', + 'INNER_DST_IPV6', + 'INNER_SRC_IPV6' +] +packet_action_types = ['SET_ECMP_HASH', 'SET_LAG_HASH'] +flow_counter_state = ['DISABLED', 'ENABLED'] + +gre_key_re = r"^(0x){1}[a-fA-F0-9]{1,8}/(0x){1}[a-fA-F0-9]{1,8}$" +ip_protocol_re = r"^(0x){1}[a-fA-F0-9]{1,2}$" +ipv6_next_header_re = ip_protocol_re +l4_dst_port_re = r"^(0x){1}[a-fA-F0-9]{1,4}$" +inner_ether_type_re = l4_dst_port_re +ether_type_re = l4_dst_port_re + +pbh_hash_field_tbl_name = 'PBH_HASH_FIELD' +pbh_hash_tbl_name = 'PBH_HASH' +pbh_table_tbl_name = 'PBH_TABLE' + + +def exit_with_error(*args, **kwargs): + """ Print a message and abort CLI. """ + + click.secho(*args, **kwargs) + raise click.Abort() + + +def add_entry(db, table, key, data): + """ Add new entry in table """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key in cfg[table]: + raise Exception("{} already exists".format(key)) + + cfg[table][key] = data + + db.set_entry(table, key, data) + + +def update_entry(db, table, key, data, create_if_not_exists=False): + """ Update entry in table and validate configuration. + If attribute value in data is None, the attribute is deleted. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + + if create_if_not_exists: + cfg[table].setdefault(key, {}) + + if key not in cfg[table]: + raise Exception("{} does not exist".format(key)) + + for attr, value in data.items(): + if value is None and attr in cfg[table][key]: + cfg[table][key].pop(attr) + else: + cfg[table][key][attr] = value + + db.set_entry(table, key, cfg[table][key]) + + +def del_entry(db, table, key): + """ Delete entry in table """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key not in cfg[table]: + raise Exception("{} does not exist".format(key)) + + cfg[table].pop(key) + + db.set_entry(table, key, None) + + +def ip_address_validator(ctx, param, value): + """ Check if the given ip address is valid + + Args: + ctx: click context, + param: click parameter context, + value: value of parameter + + Returns: + str: ip address + """ + + if value is not None: + try: + ip = ipaddress.ip_address(value) + except Exception as e: + exit_with_error("Error: invalid value '{}' for '{}' option\n{}".format(value, param.name, e), fg="red") + + return str(ip) + + +def re_match(value, param_name, regexp): + """ Regexp validation of given parameter + + Args: + value: value to validate, + param_name: parameter name, + regexp: regular expression + + Return: + str: validated value + """ + + if re.match(regexp, str(value)) is None: + exit_with_error("Error: invalid value '{}' for '{}' option".format(str(value), param_name), fg="red") + + return value + + +def pbh_re_match_validator(ctx, param, value): + """ Check if PBH rule options are valid + + Args: + ctx: click context, + param: click parameter context, + value: value of parameter + + Returns: + str: validated parameter + """ + + if value is not None: + if param.name == 'gre_key': + return re_match(value, param.name, gre_key_re) + elif param.name == 'ip_protocol': + return re_match(value, param.name, ip_protocol_re) + elif param.name == 'ipv6_next_header': + return re_match(value, param.name, ipv6_next_header_re) + elif param.name == 'l4_dst_port': + return re_match(value, param.name, l4_dst_port_re) + elif param.name == 'inner_ether_type': + return re_match(value, param.name, inner_ether_type_re) + elif param.name == 'ether_type': + return re_match(value, param.name, ether_type_re) + + +def is_exist_in_db(db, obj_list, conf_db_key): + """ Check if provided CLI option already exist in Config DB, + i.g in case of --hash-field-list option it will check + if 'hash-field' was previously added by + 'config pbh hash-field ...' CLI command + + Args: + db: reference to Config DB, + obj_list: value of 'click' option + conf_db_key: key to search in Config DB + """ + + if obj_list is None: + return True + + table = db.cfgdb.get_table(conf_db_key) + correct_list = list(table.keys()) + + splited_list = obj_list.split(',') + + for elem in splited_list: + if elem not in correct_list: + return False + + return True + + +def ip_mask_hash_field_correspondence_validator(ip_mask, hash_field): + """ Check if the --ip-mask option are correspond to + the --hash-field option + + Args: + ip_mask: ip address or None, + hash_field: hash field value, which was configured before + """ + + hf_v4 = ['INNER_DST_IPV4', 'INNER_SRC_IPV4'] + hf_v6 = ['INNER_DST_IPV6', 'INNER_SRC_IPV6'] + hf_v4_and_v6 = hf_v4 + hf_v6 + hf_no_ip = ['INNER_IP_PROTOCOL', 'INNER_L4_DST_PORT', 'INNER_L4_SRC_PORT'] + + if (hash_field in hf_no_ip) and (ip_mask): + exit_with_error("Error: the value of '--hash-field'='{}' is NOT compatible with the value of '--ip-mask'='{}'".format(hash_field, ip_mask), fg='red') + + if (hash_field in hf_v4_and_v6) and (ip_mask is None): + exit_with_error("Error: the value of '--hash-field'='{}' is NOT compatible with the value of '--ip-mask'='{}'".format(hash_field, ip_mask), fg='red') + + if (ip_mask is not None): + ip_addr_version = ipaddress.ip_address(ip_mask).version + + if (hash_field in hf_v4) and (ip_addr_version != 4): + exit_with_error("Error: the value of '--hash-field'='{}' is NOT compatible with the value of '--ip-mask'='{}'".format(hash_field, ip_mask), fg='red') + + if (hash_field in hf_v6) and (ip_addr_version != 6): + exit_with_error("Error: the value of '--hash-field'='{}' is NOT compatible with the value of '--ip-mask'='{}'".format(hash_field, ip_mask), fg='red') + + +def ip_mask_hash_field_update_validator(db, hash_field_name, ip_mask, hash_field): + """ Function to validate --ip-mask and --hash-field + correspondence, during update flow + + Args: + db: reference to CONFIG DB, + hash_field_name: name of the hash-field, + ip_mask: ip address, + hash_field: native hash field value + """ + + if (ip_mask is None) and (hash_field is None): + return + + table = db.cfgdb.get_table(pbh_hash_field_tbl_name) + hash_field_obj = table[hash_field_name] + + if (ip_mask is None) and (hash_field is not None): + + try: + ip_mask = hash_field_obj['ip_mask'] + except Exception as e: + ip_mask = None + + ip_mask_hash_field_correspondence_validator(ip_mask, hash_field) + + if (ip_mask is not None) and (hash_field is None): + + hash_field = hash_field_obj['hash_field'] + + ip_mask_hash_field_correspondence_validator(ip_mask, hash_field) + + +def interfaces_list_validator(db, interface_list, is_update): + if is_update and (interface_list is None): + return + + is_error = False + interfaces_splited = interface_list.split(',') + + for intf in interfaces_splited: + if intf.startswith('Ethernet'): + if not clicommon.is_valid_port(db.cfgdb, intf): + is_error = True + break + elif intf.startswith('PortChannel'): + if not clicommon.is_valid_portchannel(db.cfgdb, intf): + is_error = True + break + else: + is_error = True + break + + if is_error: + exit_with_error("Error: invalid value '{}', for '--interface-list' option".format(interface_list), fg="red") + + +@click.group( + name='pbh', + cls=clicommon.AliasedGroup +) +def PBH(): + """ Configure PBH (Policy based hashing) feature """ + + pass + + +@PBH.group( + name="hash-field", + cls=clicommon.AliasedGroup +) +def PBH_HASH_FIELD(): + """ Configure PBH hash field """ + + pass + + +@PBH_HASH_FIELD.command(name="add") +@click.argument( + "hash-field-name", + nargs=1, + required=True, +) +@click.option( + "--hash-field", + help="Configures native hash field for this hash field", + required=True, + type=click.Choice(hash_field_types) +) +@click.option( + "--ip-mask", + help="""Configures IPv4/IPv6 address mask for this hash field, required when the value of --hash-field is - INNER_DST_IPV4 or INNER_SRC_IPV4 or INNER_SRC_IPV6 or INNER_SRC_IPV6""", + callback=ip_address_validator, +) +@click.option( + "--sequence-id", + help="Configures in which order the fields are hashed and defines which fields should be associative", + required=True, + type=click.INT, +) +@clicommon.pass_db +def PBH_HASH_FIELD_add(db, hash_field_name, hash_field, ip_mask, sequence_id): + """ Add object to PBH_HASH_FIELD table """ + + ip_mask_hash_field_correspondence_validator(ip_mask, hash_field) + + table = pbh_hash_field_tbl_name + key = hash_field_name + data = {} + if hash_field is not None: + data["hash_field"] = hash_field + if ip_mask is not None: + data["ip_mask"] = ip_mask + if sequence_id is not None: + data["sequence_id"] = sequence_id + + try: + add_entry(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error("Error: {}".format(err), fg="red") + + +@PBH_HASH_FIELD.command(name="update") +@click.argument( + "hash-field-name", + nargs=1, + required=True, +) +@click.option( + "--hash-field", + help="Configures native hash field for this hash field", + type=click.Choice(hash_field_types) +) +@click.option( + "--ip-mask", + help="""Configures IPv4/IPv6 address mask for this hash field, required when the value of --hash-field is - INNER_DST_IPV4 or INNER_SRC_IPV4 or INNER_SRC_IPV6 or INNER_SRC_IPV6 """, + callback=ip_address_validator, +) +@click.option( + "--sequence-id", + help="Configures in which order the fields are hashed and defines which fields should be associative", + type=click.INT, +) +@clicommon.pass_db +def PBH_HASH_FIELD_update(db, hash_field_name, hash_field, ip_mask, sequence_id): + """ Update object in PBH_HASH_FIELD table """ + + ip_mask_hash_field_update_validator(db, hash_field_name, ip_mask, hash_field) + + table = pbh_hash_field_tbl_name + key = hash_field_name + data = {} + if hash_field is not None: + data["hash_field"] = hash_field + if ip_mask is not None: + data["ip_mask"] = ip_mask + if sequence_id is not None: + data["sequence_id"] = sequence_id + + try: + update_entry(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error("Error: {}".format(err), fg="red") + + +@PBH_HASH_FIELD.command(name="delete") +@click.argument( + "hash-field-name", + nargs=1, + required=True, +) +@clicommon.pass_db +def PBH_HASH_FIELD_delete(db, hash_field_name): + """ Delete object from PBH_HASH_FIELD table """ + + table = pbh_hash_field_tbl_name + key = hash_field_name + try: + del_entry(db.cfgdb, table, key) + except Exception as err: + exit_with_error("Error: {}".format(err), fg="red") + + +@PBH.group( + name="hash", + cls=clicommon.AliasedGroup +) +def PBH_HASH(): + """ Configure PBH hash """ + + pass + + +@PBH_HASH.command(name="add") +@click.argument( + "hash-name", + nargs=1, + required=True, +) +@click.option( + "--hash-field-list", + help="The list of hash fields to apply with this hash", + required=True, +) +@clicommon.pass_db +def PBH_HASH_add(db, hash_name, hash_field_list): + """ Add object to PBH_HASH table """ + + if not is_exist_in_db(db, hash_field_list, pbh_hash_field_tbl_name): + exit_with_error("Error: invalid value '{}' for '--hash-field-list' option".format(hash_field_list), fg="red") + + table = pbh_hash_tbl_name + key = hash_name + data = {} + if hash_field_list is not None: + data["hash_field_list"] = hash_field_list.split(",") + + try: + add_entry(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error("Error: {}".format(err), fg="red") + + +@PBH_HASH.command(name="update") +@click.argument( + "hash-name", + nargs=1, + required=True, +) +@click.option( + "--hash-field-list", + help="The list of hash fields to apply with this hash", +) +@clicommon.pass_db +def PBH_HASH_update(db, hash_name, hash_field_list): + """ Update object in PBH_HASH table """ + + if not is_exist_in_db(db, hash_field_list, pbh_hash_field_tbl_name): + exit_with_error("Error: invalid value '{}' for '--hash-field-list' option".format(hash_field_list), fg="red") + + table = pbh_hash_tbl_name + key = hash_name + data = {} + if hash_field_list is not None: + data["hash_field_list"] = hash_field_list.split(",") + + try: + update_entry(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error("Error: {}".format(err), fg="red") + + +@PBH_HASH.command(name="delete") +@click.argument( + "hash-name", + nargs=1, + required=True, +) +@clicommon.pass_db +def PBH_HASH_delete(db, hash_name): + """ Delete object from PBH_HASH table """ + + table = pbh_hash_tbl_name + key = hash_name + try: + del_entry(db.cfgdb, table, key) + except Exception as err: + exit_with_error("Error: {}".format(err), fg="red") + + +@PBH.group( + name="rule", + cls=clicommon.AliasedGroup +) +def PBH_RULE(): + """ Configure PBH rule """ + + pass + + +@PBH_RULE.command(name="add") +@click.argument( + "table-name", + nargs=1, + required=True, +) +@click.argument( + "rule-name", + nargs=1, + required=True, +) +@click.option( + "--priority", + help="Configures priority for this rule", + required=True, + type=click.INT, +) +@click.option( + "--gre-key", + help="Configures packet match: GRE key (value/mask)", + callback=pbh_re_match_validator, +) +@click.option( + "--ether-type", + help="Configures packet match for this rule: EtherType (IANA Ethertypes)", + callback=pbh_re_match_validator, +) +@click.option( + "--ip-protocol", + help="Configures packet match for this rule: IP protocol (IANA Protocol Numbers)", + callback=pbh_re_match_validator, +) +@click.option( + "--ipv6-next-header", + help="Configures packet match for this rule: IPv6 Next header (IANA Protocol Numbers)", + callback=pbh_re_match_validator, +) +@click.option( + "--l4-dst-port", + help="Configures packet match for this rule: L4 destination port", + callback=pbh_re_match_validator, +) +@click.option( + "--inner-ether-type", + help="Configures packet match for this rule: inner EtherType (IANA Ethertypes)", + callback=pbh_re_match_validator, +) +@click.option( + "--hash", + required=True, + help="The hash to apply with this rule", +) +@click.option( + "--packet-action", + help="Configures packet action for this rule", + type=click.Choice(packet_action_types) +) +@click.option( + "--flow-counter", + help="Enables/Disables packet/byte counter", + type=click.Choice(flow_counter_state) +) +@clicommon.pass_db +def PBH_RULE_add( + db, + table_name, + rule_name, + priority, + gre_key, + ether_type, + ip_protocol, + ipv6_next_header, + l4_dst_port, + inner_ether_type, + hash, + packet_action, + flow_counter +): + """ Add object to PBH_RULE table """ + + if not is_exist_in_db(db, table_name, pbh_table_tbl_name): + exit_with_error("Error: invalid value '{}' for 'table-name' argument".format(table_name), fg="red") + if not is_exist_in_db(db, hash, pbh_hash_tbl_name): + exit_with_error("Error: invalid value '{}' for '--hash' option".format(hash), fg="red") + + table = "PBH_RULE" + key = table_name, rule_name + data = {} + if priority is not None: + data["priority"] = priority + if gre_key is not None: + data["gre_key"] = gre_key + if ether_type is not None: + data["ether_type"] = ether_type + if ip_protocol is not None: + data["ip_protocol"] = ip_protocol + if ipv6_next_header is not None: + data["ipv6_next_header"] = ipv6_next_header + if l4_dst_port is not None: + data["l4_dst_port"] = l4_dst_port + if inner_ether_type is not None: + data["inner_ether_type"] = inner_ether_type + if hash is not None: + data["hash"] = hash + if packet_action is not None: + data["packet_action"] = packet_action + if flow_counter is not None: + data["flow_counter"] = flow_counter + + try: + add_entry(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error("Error: {}".format(err), fg="red") + + +@PBH_RULE.command(name="update") +@click.argument( + "table-name", + nargs=1, + required=True, +) +@click.argument( + "rule-name", + nargs=1, + required=True, +) +@click.option( + "--priority", + help="Configures priority for this rule", + type=click.INT, +) +@click.option( + "--gre-key", + help="Configures packet match: GRE key (value/mask)", + callback=pbh_re_match_validator, +) +@click.option( + "--ether-type", + help="Configures packet match for this rule: EtherType (IANA Ethertypes)", + callback=pbh_re_match_validator, +) +@click.option( + "--ip-protocol", + help="Configures packet match for this rule: IP protocol (IANA Protocol Numbers)", + callback=pbh_re_match_validator, +) +@click.option( + "--ipv6-next-header", + help="Configures packet match for this rule: IPv6 Next header (IANA Protocol Numbers)", + callback=pbh_re_match_validator, +) +@click.option( + "--l4-dst-port", + help="Configures packet match for this rule: L4 destination port", + callback=pbh_re_match_validator, +) +@click.option( + "--inner-ether-type", + help="Configures packet match for this rule: inner EtherType (IANA Ethertypes)", + callback=pbh_re_match_validator, +) +@click.option( + "--hash", + help="The hash to apply with this rule", +) +@click.option( + "--packet-action", + help="Configures packet action for this rule", + type=click.Choice(packet_action_types) +) +@click.option( + "--flow-counter", + help="Enables/Disables packet/byte counter", + type=click.Choice(flow_counter_state) +) +@clicommon.pass_db +def PBH_RULE_update( + db, + table_name, + rule_name, + priority, + gre_key, + ether_type, + ip_protocol, + ipv6_next_header, + l4_dst_port, + inner_ether_type, + hash, + packet_action, + flow_counter +): + """ Update object in PBH_RULE table """ + + if not is_exist_in_db(db, table_name, pbh_table_tbl_name): + exit_with_error("Error: invalid value '{}' for 'table-name' argument".format(table_name), fg="red") + if not is_exist_in_db(db, hash, pbh_hash_tbl_name): + exit_with_error("Error: invalid value '{}' for '--hash' option".format(hash), fg="red") + + table = "PBH_RULE" + key = table_name, rule_name + data = {} + if priority is not None: + data["priority"] = priority + if gre_key is not None: + data["gre_key"] = gre_key + if ether_type is not None: + data["ether_type"] = ether_type + if ip_protocol is not None: + data["ip_protocol"] = ip_protocol + if ipv6_next_header is not None: + data["ipv6_next_header"] = ipv6_next_header + if l4_dst_port is not None: + data["l4_dst_port"] = l4_dst_port + if inner_ether_type is not None: + data["inner_ether_type"] = inner_ether_type + if hash is not None: + data["hash"] = hash + if packet_action is not None: + data["packet_action"] = packet_action + if flow_counter is not None: + data["flow_counter"] = flow_counter + + try: + update_entry(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error("Error: {}".format(err), fg="red") + + +@PBH_RULE.command(name="delete") +@click.argument( + "table-name", + nargs=1, + required=True, +) +@click.argument( + "rule-name", + nargs=1, + required=True, +) +@clicommon.pass_db +def PBH_RULE_delete(db, table_name, rule_name): + """ Delete object from PBH_RULE table """ + + table = "PBH_RULE" + key = table_name, rule_name + try: + del_entry(db.cfgdb, table, key) + except Exception as err: + exit_with_error("Error: {}".format(err), fg="red") + + +@PBH.group( + name="table", + cls=clicommon.AliasedGroup +) +def PBH_TABLE(): + """ Configure PBH table""" + + pass + + +@PBH_TABLE.command(name="add") +@click.argument( + "table-name", + nargs=1, + required=True, +) +@click.option( + "--description", + help="The description of this table", + required=True, +) +@click.option( + "--interface-list", + help="Interfaces to which this table is applied", + required=True, +) +@clicommon.pass_db +def PBH_TABLE_add(db, table_name, description, interface_list): + """ Add object to PBH_TABLE table """ + + interfaces_list_validator(db, interface_list, is_update=False) + + table = "PBH_TABLE" + key = table_name + data = {} + if description is not None: + data["description"] = description + if interface_list is not None: + data["interface_list"] = interface_list.split(",") + + try: + add_entry(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error("Error: {}".format(err), fg="red") + + +@PBH_TABLE.command(name="update") +@click.argument( + "table-name", + nargs=1, + required=True, +) +@click.option( + "--description", + help="The description of this table", +) +@click.option( + "--interface-list", + help="Interfaces to which this table is applied", +) +@clicommon.pass_db +def PBH_TABLE_update(db, table_name, description, interface_list): + """ Update object in PBH_TABLE table """ + + interfaces_list_validator(db, interface_list, is_update=True) + + table = "PBH_TABLE" + key = table_name + data = {} + if description is not None: + data["description"] = description + if interface_list is not None: + data["interface_list"] = interface_list.split(",") + + try: + update_entry(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error("Error: {}".format(err), fg="red") + + +@PBH_TABLE.command(name="delete") +@click.argument( + "table-name", + nargs=1, + required=True, +) +@clicommon.pass_db +def PBH_TABLE_delete(db, table_name): + """ Delete object from PBH_TABLE table """ + + table = "PBH_TABLE" + key = table_name + try: + del_entry(db.cfgdb, table, key) + except Exception as err: + exit_with_error("Error: {}".format(err), fg="red") + + +def register(cli): + cli_node = PBH + if cli_node.name in cli.commands: + raise Exception("{} already exists in CLI".format(cli_node.name)) + cli.add_command(PBH) + diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 26b23ac31c91..160ae8b7b4bd 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -103,6 +103,9 @@ * [NTP](#ntp) * [NTP show commands](#ntp-show-commands) * [NTP config commands](#ntp-config-commands) +* [PBH](#pbh) + * [PBH show commands](#pbh-show-commands) + * [PBH config commands](#pbh-config-commands) * [PFC Watchdog Commands](#pfc-watchdog-commands) * [Platform Component Firmware](#platform-component-firmware) * [Platform Component Firmware show commands](#platform-component-firmware-show-commands) @@ -6531,6 +6534,303 @@ This command adds or deletes a member port to/from the already created portchann Go Back To [Beginning of the document](#) or [Beginning of this section](#portchannels) +## PBH + +This section explains the various show commands and configuration commands available for users. + +### PBH show commands + +This subsection explains how to display PBH configuration and statistics. + +**show pbh table** + +This command displays PBH table configuration. + +- Usage: +```bash +show pbh table +``` + +- Example: +```bash +admin@sonic:~$ show pbh table +NAME INTERFACE DESCRIPTION +--------- --------------- --------------- +pbh_table Ethernet0 NVGRE and VxLAN + Ethernet4 + PortChannel0001 + PortChannel0002 +``` + +**show pbh rule** + +This command displays PBH rule configuration. + +- Usage: +```bash +show pbh rule +``` + +- Example: +```bash +admin@sonic:~$ show pbh rule +TABLE RULE PRIORITY MATCH HASH ACTION COUNTER +--------- ------ ---------- ------------------------------------ ------------- ------------- --------- +pbh_table nvgre 2 ether_type: 0x0800 inner_v6_hash SET_ECMP_HASH DISABLED + ip_protocol: 0x2f + gre_key: 0x2500/0xffffff00 + inner_ether_type: 0x86dd +pbh_table vxlan 1 ether_type: 0x0800 inner_v4_hash SET_LAG_HASH ENABLED + ip_protocol: 0x11 + l4_dst_port: 0x12b5 + inner_ether_type: 0x0800 +``` + +**show pbh hash** + +This command displays PBH hash configuration. + +- Usage: +```bash +show pbh hash +``` + +- Example: +```bash +admin@sonic:~$ show pbh hash +NAME HASH FIELD +------------- ----------------- +inner_v4_hash inner_ip_proto + inner_l4_dst_port + inner_l4_src_port + inner_dst_ipv4 + inner_src_ipv4 +inner_v6_hash inner_ip_proto + inner_l4_dst_port + inner_l4_src_port + inner_dst_ipv6 + inner_src_ipv6 +``` + +**show pbh hash-field** + +This command displays PBH hash field configuration. + +- Usage: +```bash +show pbh hash-field +``` + +- Example: +```bash +admin@sonic:~$ show pbh hash-field +NAME FIELD MASK SEQUENCE SYMMETRIC +----------------- ----------------- --------- ---------- ----------- +inner_ip_proto INNER_IP_PROTOCOL N/A 1 No +inner_l4_dst_port INNER_L4_DST_PORT N/A 2 Yes +inner_l4_src_port INNER_L4_SRC_PORT N/A 2 Yes +inner_dst_ipv4 INNER_DST_IPV4 255.0.0.0 3 Yes +inner_src_ipv4 INNER_SRC_IPV4 0.0.0.255 3 Yes +inner_dst_ipv6 INNER_DST_IPV6 ffff:: 4 Yes +inner_src_ipv6 INNER_SRC_IPV6 ::ffff 4 Yes +``` + +- Note: + - _SYMMETRIC_ is an artificial column and is only used to indicate fields symmetry + +**show pbh statistics** + +This command displays PBH statistics. + +- Usage: +```bash +show pbh statistics +``` + +- Example: +```bash +admin@sonic:~$ show pbh statistics +TABLE RULE RX PACKETS COUNT RX BYTES COUNT +--------- ------ ------------------ ---------------- +pbh_table nvgre 0 0 +pbh_table vxlan 0 0 +``` + +- Note: + - _RX PACKETS COUNT_ and _RX BYTES COUNT_ can be cleared by user: + ```bash + admin@sonic:~$ sonic-clear pbh statistics + ``` + +### PBH config commands + +This subsection explains how to configure PBH. + +**config pbh table** + +This command is used to manage PBH table objects. +It supports add/update/remove operations. + +- Usage: +```bash +config pbh table add --interface-list --description +config pbh table update [ --interface-list ] [ --description ] +config pbh table delete +``` + +- Parameters: + - _table_name_: the name of the PBH table + - _interface_list_: interfaces to which PBH table is applied + - _description_: the description of the PBH table + +- Examples: +```bash +config pbh table add 'pbh_table' \ +--interface-list 'Ethernet0,Ethernet4,PortChannel0001,PortChannel0002' \ +--description 'NVGRE and VxLAN' +config pbh table update 'pbh_table' \ +--interface-list 'Ethernet0' +config pbh table delete 'pbh_table' +``` + +**config pbh rule** + +This command is used to manage PBH rule objects. +It supports add/update/remove operations. + +- Usage: +```bash +config pbh rule add --priority \ +[ --gre-key ] [ --ether-type ] [ --ip-protocol ] \ +[ --ipv6-next-header ] [ --l4-dst-port ] [ --inner-ether-type ] \ +--hash [ --packet-action ] [ --flow-counter ] +config pbh rule update [ --priority ] \ +[ --gre-key ] [ --ether-type ] [ --ip-protocol ] \ +[ --ipv6-next-header ] [ --l4-dst-port ] [ --inner-ether-type ] \ +[ --hash ] [ --packet-action ] [ --flow-counter ] +config pbh rule delete +``` + +- Parameters: + - _table_name_: the name of the PBH table + - _rule_name_: the name of the PBH rule + - _priority_: the priority of the PBH rule + - _gre_key_: packet match for the PBH rule: GRE key (value/mask) + - _ether_type_: packet match for the PBH rule: EtherType (IANA Ethertypes) + - _ip_protocol_: packet match for the PBH rule: IP protocol (IANA Protocol Numbers) + - _ipv6_next_header_: packet match for the PBH rule: IPv6 Next header (IANA Protocol Numbers) + - _l4_dst_port_: packet match for the PBH rule: L4 destination port + - _inner_ether_type_: packet match for the PBH rule: inner EtherType (IANA Ethertypes) + - _hash_: _hash_ object to apply with the PBH rule + - _packet_action_: packet action for the PBH rule + + Valid values: + - SET_ECMP_HASH + - SET_LAG_HASH + + Default: + - SET_ECMP_HASH + + - _flow_counter_: packet/byte counter for the PBH rule + + Valid values: + - DISABLED + - ENABLED + + Default: + - DISABLED + +- Examples: +```bash +config pbh rule add 'pbh_table' 'nvgre' \ +--priority '2' \ +--ether-type '0x0800' \ +--ip-protocol '0x2f' \ +--gre-key '0x2500/0xffffff00' \ +--inner-ether-type '0x86dd' \ +--hash 'inner_v6_hash' \ +--packet-action 'SET_ECMP_HASH' \ +--flow-counter 'DISABLED' +config pbh rule update 'pbh_table' 'nvgre' \ +--flow-counter 'ENABLED' +config pbh rule delete 'pbh_table' 'nvgre' +``` + +**config pbh hash** + +This command is used to manage PBH hash objects. +It supports add/update/remove operations. + +- Usage: +```bash +config pbh hash add --hash-field-list +config pbh hash update [ --hash-field-list ] +config pbh hash delete +``` + +- Parameters: + - _hash_name_: the name of the PBH hash + - _hash_field_list_: list of _hash-field_ objects to apply with the PBH hash + +- Examples: +```bash +config pbh hash add 'inner_v6_hash' \ +--hash-field-list 'inner_ip_proto,inner_l4_dst_port,inner_l4_src_port,inner_dst_ipv6,inner_src_ipv6' +config pbh hash update 'inner_v6_hash' \ +--hash-field-list 'inner_ip_proto' +config pbh hash delete 'inner_v6_hash' +``` + +**config pbh hash-field** + +This command is used to manage PBH hash field objects. +It supports add/update/remove operations. + +- Usage: +```bash +config pbh hash-field add \ +--hash-field [ --ip-mask ] --sequence-id +config pbh hash-field update \ +[ --hash-field ] [ --ip-mask ] [ --sequence-id ] +config pbh hash-field delete +``` + +- Parameters: + - _hash_field_name_: the name of the PBH hash field + - _hash_field_: native hash field for the PBH hash field + + Valid values: + - INNER_IP_PROTOCOL + - INNER_L4_DST_PORT + - INNER_L4_SRC_PORT + - INNER_DST_IPV4 + - INNER_SRC_IPV4 + - INNER_DST_IPV6 + - INNER_SRC_IPV6 + + - _ip_mask_: IPv4/IPv6 address mask for the PBH hash field + + Valid only: _hash_field_ is: + - INNER_DST_IPV4 + - INNER_SRC_IPV4 + - INNER_DST_IPV6 + - INNER_SRC_IPV6 + + - _sequence_id_: the order in which fields are hashed + +- Examples: +```bash +config pbh hash-field add 'inner_dst_ipv6' \ +--hash-field 'INNER_DST_IPV6' \ +--ip-mask 'ffff::' \ +--sequence-id '4' +config pbh hash-field update 'inner_dst_ipv6' \ +--ip-mask 'ffff:ffff::' +config pbh hash-field delete 'inner_dst_ipv6' +``` + +Go Back To [Beginning of the document](#) or [Beginning of this section](#pbh) + ## QoS ### QoS Show commands diff --git a/show/plugins/pbh.py b/show/plugins/pbh.py new file mode 100644 index 000000000000..d7cd929e02ac --- /dev/null +++ b/show/plugins/pbh.py @@ -0,0 +1,469 @@ +""" +This CLI plugin was auto-generated by using 'sonic-cli-gen' utility, BUT +it was manually modified to meet the PBH HLD requirements. + +PBH HLD - https://github.com/Azure/SONiC/pull/773 +CLI Auto-generation tool HLD - https://github.com/Azure/SONiC/pull/78 +""" + +import os +import click +import tabulate +import natsort +import json +import utilities_common.cli as clicommon +from swsscommon.swsscommon import SonicV2Connector + +PBH_COUNTERS_LOCATION = '/tmp/.pbh_counters.txt' + +pbh_hash_field_tbl_name = 'PBH_HASH_FIELD' +pbh_hash_tbl_name = 'PBH_HASH' +pbh_table_tbl_name = 'PBH_TABLE' +pbh_rule_tbl_name = 'PBH_RULE' + + +def format_attr_value(entry, attr): + """ Helper that formats attribute to be presented in the table output. + + Args: + entry (Dict[str, str]): CONFIG DB entry configuration. + attr (Dict): Attribute metadata. + + Returns: + str: fomatted attribute value. + """ + + if attr["is-leaf-list"]: + return "\n".join(entry.get(attr["name"], [])) + + return entry.get(attr["name"], "N/A") + + +def format_group_value(entry, attrs): + """ Helper that formats grouped attribute to be presented in the table output. + + Args: + entry (Dict[str, str]): CONFIG DB entry configuration. + attrs (List[Dict]): Attributes metadata that belongs to the same group. + + Returns: + str: fomatted group attributes. + """ + + data = [] + for attr in attrs: + if entry.get(attr["name"]): + data.append((attr["name"] + ":", format_attr_value(entry, attr))) + + return tabulate.tabulate(data, tablefmt="plain", numalign="left") + + +@click.group( + name='pbh', + cls=clicommon.AliasedGroup +) +def PBH(): + """ Show PBH (Policy based hashing) feature configuration """ + + pass + + +@PBH.group( + name="hash-field", + cls=clicommon.AliasedGroup, + invoke_without_command=True +) +@clicommon.pass_db +def PBH_HASH_FIELD(db): + """ Show the PBH hash field configuration """ + + header = [ + "NAME", + "FIELD", + "MASK", + "SEQUENCE", + "SYMMETRIC", + ] + + body = [] + + table = db.cfgdb.get_table(pbh_hash_field_tbl_name) + for key in natsort.natsorted(table): + + entry = table[key] + + if not isinstance(key, tuple): + key = (key,) + + row = [*key] + [ + format_attr_value( + entry, + { + 'name': 'hash_field', + 'description': 'Configures native hash field for this hash field', + 'is-leaf-list': False, + 'is-mandatory': True, + 'group': '' + } + ), + format_attr_value( + entry, + { + 'name': 'ip_mask', + 'description': 'Configures IPv4/IPv6 address mask for this hash field', + 'is-leaf-list': False, + 'is-mandatory': True, + 'group': '' + } + ), + format_attr_value( + entry, + { + 'name': 'sequence_id', + 'description': 'Configures in which order the fields are hashed and defines which fields should be associative', + 'is-leaf-list': False, + 'is-mandatory': True, + 'group': '' + } + ), + ] + + body.append(row) + + # sorted by 'sequence_id' + body_sorted = sorted(body, key=lambda e: int(e[3])) + inject_symmetric_field(body_sorted) + click.echo(tabulate.tabulate(body_sorted, header, numalign="left")) + + +@PBH.group( + name="hash", + cls=clicommon.AliasedGroup, + invoke_without_command=True +) +@clicommon.pass_db +def PBH_HASH(db): + """ Show the PBH hash configuration """ + + header = [ + "NAME", + "HASH FIELD", + ] + + body = [] + + table = db.cfgdb.get_table(pbh_hash_tbl_name) + for key in natsort.natsorted(table): + entry = table[key] + if not isinstance(key, tuple): + key = (key,) + + row = [*key] + [ + format_attr_value( + entry, + { + 'name': 'hash_field_list', + 'description': 'The list of hash fields to apply with this hash', + 'is-leaf-list': True, + 'is-mandatory': False, + 'group': '' + } + ), + ] + + body.append(row) + + click.echo(tabulate.tabulate(body, header, numalign="left")) + + +@PBH.group( + name="rule", + cls=clicommon.AliasedGroup, + invoke_without_command=True +) +@clicommon.pass_db +def PBH_RULE(db): + """ Show the PBH rules configuration """ + + header = [ + "TABLE", + "RULE", + "PRIORITY", + "MATCH", + "HASH", + "ACTION", + "COUNTER", + ] + + body = [] + + table = db.cfgdb.get_table(pbh_rule_tbl_name) + for key in natsort.natsorted(table): + entry = table[key] + if not isinstance(key, tuple): + key = (key,) + + row = [*key] + [ + format_attr_value( + entry, + { + 'name': 'priority', + 'description': 'Configures priority for this rule', + 'is-leaf-list': False, + 'is-mandatory': True, + 'group': '' + } + ), + format_group_value( + entry, + [ + { + 'name': 'gre_key', + 'description': 'Configures packet match for this rule: GRE key (value/mask)', + 'is-leaf-list':False, + 'is-mandatory': False, + 'group': 'Match' + }, + { + 'name': 'ether_type', + 'description': 'Configures packet match for this rule: EtherType (IANA Ethertypes)', + 'is-leaf-list': False, + 'is-mandatory': False, + 'group': 'Match' + }, + { + 'name': 'ip_protocol', + 'description': 'Configures packet match for this rule: IP protocol (value/mask)', + 'is-leaf-list': False, + 'is-mandatory': False, + 'group': 'Match' + }, + { + 'name': 'ipv6_next_header', + 'description': 'Configures packet match for this rule: IPv6 Next header (value/mask)', + 'is-leaf-list': False, + 'is-mandatory': False, + 'group': 'Match' + }, + { + 'name': 'l4_dst_port', + 'description': 'Configures packet match for this rule: L4 destination port (value/mask)', + 'is-leaf-list': False, + 'is-mandatory': False, + 'group': 'Match' + }, + { + 'name': 'inner_ether_type', + 'description': 'Configures packet match for this rule: inner EtherType (value/mask)', + 'is-leaf-list': False, + 'is-mandatory': False, + 'group': 'Match' + }, + ] + ), + format_attr_value( + entry, + { + 'name': 'hash', + 'description':'The hash to apply with this rule', + 'is-leaf-list': False, + 'is-mandatory': True, + 'group': ''} + ), + format_attr_value( + entry, + { + 'name': 'packet_action', + 'description': 'Configures packet action for this rule', + 'is-leaf-list': False, + 'is-mandatory': False, + 'group': '' + } + ), + format_attr_value( + entry, + { + 'name': 'flow_counter', + 'description': 'Configures packet action for this rule', + 'is-leaf-list': False, + 'is-mandatory': False, + 'group': '' + } + ), + ] + + body.append(row) + + # sorted by 'Priority' + body_sorted = sorted(body, key=lambda e: int(e[2]), reverse=True) + click.echo(tabulate.tabulate(body_sorted, header, numalign="left")) + + +@PBH.group( + name="table", + cls=clicommon.AliasedGroup, + invoke_without_command=True +) +@clicommon.pass_db +def PBH_TABLE(db): + """ Show the PBH table configuration """ + + header = [ + "NAME", + "INTERFACE", + "DESCRIPTION", + ] + + body = [] + + table = db.cfgdb.get_table(pbh_table_tbl_name) + for key in natsort.natsorted(table): + entry = table[key] + if not isinstance(key, tuple): + key = (key,) + + row = [*key] + [ + format_attr_value( + entry, + { + 'name': 'interface_list', + 'description': 'Interfaces to which this table is applied', + 'is-leaf-list': True, + 'is-mandatory': False, + 'group': '' + } + ), + format_attr_value( + entry, + { + 'name': 'description', + 'description': 'The description of this table', + 'is-leaf-list': False, + 'is-mandatory': True, + 'group': '' + } + ), + ] + + body.append(row) + + click.echo(tabulate.tabulate(body, header, numalign="left")) + + +@PBH.group( + name="statistics", + cls=clicommon.AliasedGroup, + invoke_without_command=True +) +@clicommon.pass_db +def PBH_STATISTICS(db): + """ Show the PBH counters """ + + header = [ + "TABLE", + "RULE", + "RX PACKETS COUNT", + "RX BYTES COUNT", + ] + + body = [] + + pbh_rules = db.cfgdb.get_table(pbh_rule_tbl_name) + pbh_counters = read_pbh_counters(pbh_rules) + saved_pbh_counters = read_saved_pbh_counters() + + for key in pbh_rules: + if pbh_rules[key]['flow_counter'] == 'ENABLED': + row = [ + key[0], + key[1], + get_counter_value(pbh_counters, saved_pbh_counters, key, 'packets'), + get_counter_value(pbh_counters, saved_pbh_counters, key, 'bytes'), + ] + body.append(row) + + click.echo(tabulate.tabulate(body, header, numalign="left")) + + +def get_counter_value(pbh_counters, saved_pbh_counters, key, type): + if not pbh_counters[key]: + return '0' + + if key in saved_pbh_counters: + new_value = int(pbh_counters[key][type]) - int(saved_pbh_counters[key][type]) + if new_value >= 0: + return str(new_value) + + return str(pbh_counters[key][type]) + + +def remap_keys(obj_list): + res = {} + for e in obj_list: + res[e['key'][0], e['key'][1]] = e['value'] + return res + + +def read_saved_pbh_counters(): + if os.path.isfile(PBH_COUNTERS_LOCATION): + try: + with open(PBH_COUNTERS_LOCATION) as fp: + return remap_keys(json.load(fp)) + except Exception: + return {} + + return {} + + +def read_pbh_counters(pbh_rules) -> dict: + pbh_counters = {} + + db_connector = SonicV2Connector(use_unix_socket_path=False) + db_connector.connect(db_connector.COUNTERS_DB) + + for table, rule in natsort.natsorted(pbh_rules): + counter_props = lowercase_keys(db_connector.get_all(db_connector.COUNTERS_DB, "COUNTERS:%s:%s" % (table, rule))) + if counter_props: + pbh_counters[table, rule] = counter_props + + return pbh_counters + + +def inject_symmetric_field(obj_list): + """ The 'Symmetric' parameter will have 'Yes' value + if there are 2 'pbh hash fields' with identical 'sequence_id' value + + Args: + obj_list: a row of pbh hash fields that will be + displayed to the user + """ + + sequence_id = 3 + counter = 0 + + for i in range(0, len(obj_list)): + for j in range(0, len(obj_list)): + if i == j: + continue + + if obj_list[i][sequence_id] == obj_list[j][sequence_id]: + counter += 1 + + if counter >= 1: + obj_list[i].append('Yes') + else: + obj_list[i].append('No') + + counter = 0 + + +def lowercase_keys(dictionary): + return dict((k.lower(), v) for k, v in dictionary.items()) if dictionary else None + + +def register(cli): + cli_node = PBH + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command(PBH) + diff --git a/tests/pbh_input/assert_show_output.py b/tests/pbh_input/assert_show_output.py new file mode 100644 index 000000000000..5b67403a174b --- /dev/null +++ b/tests/pbh_input/assert_show_output.py @@ -0,0 +1,92 @@ +""" +Module holding the correct values for show CLI command outputs for the pbh_test.py +""" + +show_pbh_hash_fields="""\ +NAME FIELD MASK SEQUENCE SYMMETRIC +----------------- ----------------- --------- ---------- ----------- +inner_ip_proto INNER_IP_PROTOCOL N/A 1 No +inner_l4_dst_port INNER_L4_DST_PORT N/A 2 Yes +inner_l4_src_port INNER_L4_SRC_PORT N/A 2 Yes +inner_dst_ipv4 INNER_DST_IPV4 255.0.0.0 3 Yes +inner_src_ipv4 INNER_SRC_IPV4 0.0.0.255 3 Yes +inner_dst_ipv6 INNER_DST_IPV6 ffff:: 4 Yes +inner_src_ipv6 INNER_SRC_IPV6 ::ffff 4 Yes +""" + + +show_pbh_hash="""\ +NAME HASH FIELD +------------- ----------------- +inner_v4_hash inner_ip_proto + inner_l4_dst_port + inner_l4_src_port + inner_dst_ipv4 + inner_src_ipv4 +inner_v6_hash inner_ip_proto + inner_l4_dst_port + inner_l4_src_port + inner_dst_ipv6 + inner_src_ipv6 +""" + + +show_pbh_table="""\ +NAME INTERFACE DESCRIPTION +---------- --------------- --------------- +pbh_table1 Ethernet0 NVGRE + Ethernet4 +pbh_table2 PortChannel0001 VxLAN + PortChannel0002 +pbh_table3 Ethernet0 NVGRE and VxLAN + Ethernet4 + PortChannel0001 + PortChannel0002 +""" + + +show_pbh_rule="""\ +TABLE RULE PRIORITY MATCH HASH ACTION COUNTER +---------- ------ ---------- ------------------------------------ ------------- ------------- --------- +pbh_table2 vxlan 2 ip_protocol: 0x11 inner_v4_hash SET_LAG_HASH ENABLED + l4_dst_port: 0x12b5 + inner_ether_type: 0x0800 +pbh_table1 nvgre 1 gre_key: 0x2500/0xffffff00 inner_v6_hash SET_ECMP_HASH ENABLED + inner_ether_type: 0x86dd +""" + + +show_pbh_statistics_empty="""\ +TABLE RULE RX PACKETS COUNT RX BYTES COUNT +------- ------ ------------------ ---------------- +""" + + +show_pbh_statistics_zero="""\ +TABLE RULE RX PACKETS COUNT RX BYTES COUNT +---------- ------ ------------------ ---------------- +pbh_table1 nvgre 0 0 +pbh_table2 vxlan 0 0 +""" + + +show_pbh_statistics="""\ +TABLE RULE RX PACKETS COUNT RX BYTES COUNT +---------- ------ ------------------ ---------------- +pbh_table1 nvgre 100 200 +pbh_table2 vxlan 300 400 +""" + +show_pbh_statistics_updated="""\ +TABLE RULE RX PACKETS COUNT RX BYTES COUNT +---------- ------ ------------------ ---------------- +pbh_table1 nvgre 400 400 +pbh_table2 vxlan 400 400 +""" + +show_pbh_statistics_after_disabling_rule="""\ +TABLE RULE RX PACKETS COUNT RX BYTES COUNT +---------- ------ ------------------ ---------------- +pbh_table1 nvgre 0 0 +""" + diff --git a/tests/pbh_input/counters_db.json b/tests/pbh_input/counters_db.json new file mode 100644 index 000000000000..1f764f32db76 --- /dev/null +++ b/tests/pbh_input/counters_db.json @@ -0,0 +1,10 @@ +{ + "COUNTERS:pbh_table1:nvgre": { + "Packets": "100", + "Bytes": "200" + }, + "COUNTERS:pbh_table2:vxlan": { + "Packets": "300", + "Bytes": "400" + } +} diff --git a/tests/pbh_input/counters_db_updated.json b/tests/pbh_input/counters_db_updated.json new file mode 100644 index 000000000000..c1771ba3ffdc --- /dev/null +++ b/tests/pbh_input/counters_db_updated.json @@ -0,0 +1,10 @@ +{ + "COUNTERS:pbh_table1:nvgre": { + "Packets": "500", + "Bytes": "600" + }, + "COUNTERS:pbh_table2:vxlan": { + "Packets": "700", + "Bytes": "800" + } +} diff --git a/tests/pbh_input/full_pbh_config.json b/tests/pbh_input/full_pbh_config.json new file mode 100644 index 000000000000..0052ad185451 --- /dev/null +++ b/tests/pbh_input/full_pbh_config.json @@ -0,0 +1,81 @@ +{ + "PBH_HASH_FIELD|inner_dst_ipv4": { + "hash_field": "INNER_DST_IPV4", + "ip_mask": "255.0.0.0", + "sequence_id": "3" + }, + "PBH_HASH_FIELD|inner_dst_ipv6": { + "hash_field": "INNER_DST_IPV6", + "ip_mask": "ffff::", + "sequence_id": "4" + }, + "PBH_HASH_FIELD|inner_ip_proto": { + "hash_field": "INNER_IP_PROTOCOL", + "sequence_id": "1" + }, + "PBH_HASH_FIELD|inner_l4_dst_port": { + "hash_field": "INNER_L4_DST_PORT", + "sequence_id": "2" + }, + "PBH_HASH_FIELD|inner_l4_src_port": { + "hash_field": "INNER_L4_SRC_PORT", + "sequence_id": "2" + }, + "PBH_HASH_FIELD|inner_src_ipv4": { + "hash_field": "INNER_SRC_IPV4", + "ip_mask": "0.0.0.255", + "sequence_id": "3" + }, + "PBH_HASH_FIELD|inner_src_ipv6": { + "hash_field": "INNER_SRC_IPV6", + "ip_mask": "::ffff", + "sequence_id": "4" + }, + "PBH_HASH|inner_v4_hash": { + "hash_field_list@": "inner_ip_proto,inner_l4_dst_port,inner_l4_src_port,inner_dst_ipv4,inner_src_ipv4" + }, + "PBH_HASH|inner_v6_hash": { + "hash_field_list@": "inner_ip_proto,inner_l4_dst_port,inner_l4_src_port,inner_dst_ipv6,inner_src_ipv6" + }, + "PORT|Ethernet0": { + "NULL": "NULL" + }, + "PORT|Ethernet4": { + "NULL": "NULL" + }, + "PORTCHANNEL|PortChannel0001": { + "NULL": "NULL" + }, + "PORTCHANNEL|PortChannel0002": { + "NULL": "NULL" + }, + "PBH_TABLE|pbh_table1": { + "description": "NVGRE", + "interface_list@": "Ethernet0,Ethernet4" + }, + "PBH_TABLE|pbh_table2": { + "description": "VxLAN", + "interface_list@": "PortChannel0001,PortChannel0002" + }, + "PBH_TABLE|pbh_table3": { + "description": "NVGRE and VxLAN", + "interface_list@": "Ethernet0,Ethernet4,PortChannel0001,PortChannel0002" + }, + "PBH_RULE|pbh_table1|nvgre": { + "priority": "1", + "gre_key": "0x2500/0xffffff00", + "inner_ether_type": "0x86dd", + "hash": "inner_v6_hash", + "packet_action": "SET_ECMP_HASH", + "flow_counter": "ENABLED" + }, + "PBH_RULE|pbh_table2|vxlan": { + "priority": "2", + "ip_protocol": "0x11", + "inner_ether_type": "0x0800", + "l4_dst_port": "0x12b5", + "hash": "inner_v4_hash", + "packet_action": "SET_LAG_HASH", + "flow_counter": "ENABLED" + } +} diff --git a/tests/pbh_input/hash.json b/tests/pbh_input/hash.json new file mode 100644 index 000000000000..d22096dab050 --- /dev/null +++ b/tests/pbh_input/hash.json @@ -0,0 +1,40 @@ +{ + "PBH_HASH_FIELD|inner_dst_ipv4": { + "hash_field": "INNER_DST_IPV4", + "ip_mask": "255.0.0.0", + "sequence_id": "3" + }, + "PBH_HASH_FIELD|inner_dst_ipv6": { + "hash_field": "INNER_DST_IPV6", + "ip_mask": "ffff::", + "sequence_id": "4" + }, + "PBH_HASH_FIELD|inner_ip_proto": { + "hash_field": "INNER_IP_PROTOCOL", + "sequence_id": "1" + }, + "PBH_HASH_FIELD|inner_l4_dst_port": { + "hash_field": "INNER_L4_DST_PORT", + "sequence_id": "2" + }, + "PBH_HASH_FIELD|inner_l4_src_port": { + "hash_field": "INNER_L4_SRC_PORT", + "sequence_id": "2" + }, + "PBH_HASH_FIELD|inner_src_ipv4": { + "hash_field": "INNER_SRC_IPV4", + "ip_mask": "0.0.0.255", + "sequence_id": "3" + }, + "PBH_HASH_FIELD|inner_src_ipv6": { + "hash_field": "INNER_SRC_IPV6", + "ip_mask": "::ffff", + "sequence_id": "4" + }, + "PBH_HASH|inner_v4_hash": { + "hash_field_list@": "inner_ip_proto,inner_l4_dst_port,inner_l4_src_port,inner_dst_ipv4,inner_src_ipv4" + }, + "PBH_HASH|inner_v6_hash": { + "hash_field_list@": "inner_ip_proto,inner_l4_dst_port,inner_l4_src_port,inner_dst_ipv6,inner_src_ipv6" + } +} diff --git a/tests/pbh_input/hash_fields.json b/tests/pbh_input/hash_fields.json new file mode 100644 index 000000000000..63e46fb86d38 --- /dev/null +++ b/tests/pbh_input/hash_fields.json @@ -0,0 +1,34 @@ +{ + "PBH_HASH_FIELD|inner_dst_ipv4": { + "hash_field": "INNER_DST_IPV4", + "ip_mask": "255.0.0.0", + "sequence_id": "3" + }, + "PBH_HASH_FIELD|inner_dst_ipv6": { + "hash_field": "INNER_DST_IPV6", + "ip_mask": "ffff::", + "sequence_id": "4" + }, + "PBH_HASH_FIELD|inner_ip_proto": { + "hash_field": "INNER_IP_PROTOCOL", + "sequence_id": "1" + }, + "PBH_HASH_FIELD|inner_l4_dst_port": { + "hash_field": "INNER_L4_DST_PORT", + "sequence_id": "2" + }, + "PBH_HASH_FIELD|inner_l4_src_port": { + "hash_field": "INNER_L4_SRC_PORT", + "sequence_id": "2" + }, + "PBH_HASH_FIELD|inner_src_ipv4": { + "hash_field": "INNER_SRC_IPV4", + "ip_mask": "0.0.0.255", + "sequence_id": "3" + }, + "PBH_HASH_FIELD|inner_src_ipv6": { + "hash_field": "INNER_SRC_IPV6", + "ip_mask": "::ffff", + "sequence_id": "4" + } +} diff --git a/tests/pbh_input/rule.json b/tests/pbh_input/rule.json new file mode 100644 index 000000000000..50ecaddd4e8f --- /dev/null +++ b/tests/pbh_input/rule.json @@ -0,0 +1,50 @@ +{ + "PORT|Ethernet0": { + "NULL": "NULL" + }, + "PORT|Ethernet4": { + "NULL": "NULL" + }, + "PBH_HASH_FIELD|inner_dst_ipv4": { + "hash_field": "INNER_DST_IPV4", + "ip_mask": "255.0.0.0", + "sequence_id": "3" + }, + "PBH_HASH_FIELD|inner_dst_ipv6": { + "hash_field": "INNER_DST_IPV6", + "ip_mask": "ffff::", + "sequence_id": "4" + }, + "PBH_HASH_FIELD|inner_ip_proto": { + "hash_field": "INNER_IP_PROTOCOL", + "sequence_id": "1" + }, + "PBH_HASH_FIELD|inner_l4_dst_port": { + "hash_field": "INNER_L4_DST_PORT", + "sequence_id": "2" + }, + "PBH_HASH_FIELD|inner_l4_src_port": { + "hash_field": "INNER_L4_SRC_PORT", + "sequence_id": "2" + }, + "PBH_HASH_FIELD|inner_src_ipv4": { + "hash_field": "INNER_SRC_IPV4", + "ip_mask": "0.0.0.255", + "sequence_id": "3" + }, + "PBH_HASH_FIELD|inner_src_ipv6": { + "hash_field": "INNER_SRC_IPV6", + "ip_mask": "::ffff", + "sequence_id": "4" + }, + "PBH_HASH|inner_v4_hash": { + "hash_field_list@": "inner_ip_proto,inner_l4_dst_port,inner_l4_src_port,inner_dst_ipv4,inner_src_ipv4" + }, + "PBH_HASH|inner_v6_hash": { + "hash_field_list@": "inner_ip_proto,inner_l4_dst_port,inner_l4_src_port,inner_dst_ipv6,inner_src_ipv6" + }, + "PBH_TABLE|pbh_table1": { + "description": "NVGRE", + "interface_list@": "Ethernet0,Ethernet4" + } +} diff --git a/tests/pbh_input/table.json b/tests/pbh_input/table.json new file mode 100644 index 000000000000..ff043da987bf --- /dev/null +++ b/tests/pbh_input/table.json @@ -0,0 +1,17 @@ +{ + "PORT|Ethernet0": { + "NULL": "NULL" + }, + "PORT|Ethernet4": { + "NULL": "NULL" + }, + "PORT|Ethernet8": { + "NULL": "NULL" + }, + "PORTCHANNEL|PortChannel0001": { + "NULL": "NULL" + }, + "PORTCHANNEL|PortChannel0002": { + "NULL": "NULL" + } +} diff --git a/tests/pbh_test.py b/tests/pbh_test.py new file mode 100644 index 000000000000..bc4c74db730b --- /dev/null +++ b/tests/pbh_test.py @@ -0,0 +1,939 @@ +#!/usr/bin/env python + +import pytest +import os +import logging +import show.main as show +import config.main as config +import clear.main as clear +import importlib + +from .pbh_input import assert_show_output +from utilities_common.db import Db +from click.testing import CliRunner +from .mock_tables import dbconnector +from .mock_tables import mock_single_asic + +logger = logging.getLogger(__name__) +test_path = os.path.dirname(os.path.abspath(__file__)) +mock_db_path = os.path.join(test_path, "pbh_input") + +SUCCESS = 0 +ERROR = 1 +ERROR2 = 2 + +INVALID_VALUE = 'INVALID' + + +class TestPBH: + @classmethod + def setup_class(cls): + logger.info("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "1" + + @classmethod + def teardown_class(cls): + logger.info("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + dbconnector.dedicated_dbs['CONFIG_DB'] = None + dbconnector.dedicated_dbs['COUNTERS_DB'] = None + + + ########## CONFIG PBH HASH-FIELD ########## + + + def test_config_pbh_hash_field_add_delete_no_ip_mask(self): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["hash-field"]. + commands["add"], ["inner_ip_proto", "--hash-field", + "INNER_IP_PROTOCOL", "--sequence-id", "1"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["hash-field"]. + commands["delete"], ["inner_ip_proto"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + + @pytest.mark.parametrize("hash_field_name,hash_field,ip_mask", [ + ("inner_dst_ipv6", "INNER_DST_IPV6", "ffff::"), + ("inner_dst_ipv4", "INNER_DST_IPV4", "255.0.0.0") + ]) + def test_config_pbh_hash_field_add_ip_mask( + self, + hash_field_name, + hash_field, + ip_mask, + ): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["hash-field"]. + commands["add"], [hash_field_name, "--hash-field", + hash_field, "--ip-mask", ip_mask, + "--sequence-id", "3"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + assert result.exit_code == SUCCESS + + + @pytest.mark.parametrize("hash_field_name,hash_field,ip_mask", [ + ("inner_ip_protocol", "INNER_IP_PROTOCOL", "255.0.0.0"), + ("inner_src_ipv6", "INNER_SRC_IPV6", "255.0.0.0"), + ("inner_src_ipv4", "INNER_SRC_IPV4", "ffff::") + ]) + def test_config_pbh_hash_field_add_mismatch_hash_field_ip_mask( + self, + hash_field_name, + hash_field, + ip_mask, + ): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["hash-field"]. + commands["add"], [hash_field_name, "--hash-field", + hash_field, "--ip-mask", ip_mask, + "--sequence-id", "1"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == ERROR + + + def test_config_pbh_hash_field_add_invalid_ip(self): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["hash-field"]. + commands["add"], ["inner_src_ipv4", "--hash-field", + "INNER_SRC_IPV4", "--ip-mask", INVALID_VALUE, + "--sequence-id", "2"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == ERROR + + + @pytest.mark.parametrize("hash_field_name,hash_field", [ + ("inner_src_ipv6", "INNER_SRC_IPV6"), + ("inner_src_ipv4", "INNER_SRC_IPV4") + ]) + def test_config_pbh_hash_field_add_none_ip_mask( + self, + hash_field_name, + hash_field, + ): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["hash-field"]. + commands["add"], [hash_field_name, "--hash-field", + hash_field, "--sequence-id", "2"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == ERROR + + + @pytest.mark.parametrize("hash_field_name,hash_field,updated_hash_field,sequence_id", [ + ("inner_ip_proto", "INNER_IP_PROTOCOL", "INNER_L4_DST_PORT", "1"), + ("inner_l4_src_port", "INNER_L4_SRC_PORT", "INNER_L4_DST_PORT", "2") + ]) + def test_config_pbh_hash_field_update_hash_field_sequence_id_no_ip( + self, + hash_field_name, + hash_field, + updated_hash_field, + sequence_id + ): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["hash-field"]. + commands["add"],[hash_field_name, "--hash-field", + hash_field, "--sequence-id", "1"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["hash-field"]. + commands["update"],[hash_field_name, "--hash-field", + updated_hash_field, "--sequence-id", sequence_id], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + + @pytest.mark.parametrize("hash_field_name,hash_field,updated_hash_field,ip_mask,updated_ip_mask", [ + ("inner_dst_ipv4", "INNER_DST_IPV4", "INNER_SRC_IPV4", "255.0.0.0", "0.0.0.255"), + ("inner_dst_ipv6", "INNER_DST_IPV6", "INNER_SRC_IPV6", "ffff::", "::ffff"), + ]) + def test_config_pbh_hash_field_update_hash_field_ip_mask( + self, + hash_field_name, + hash_field, + updated_hash_field, + ip_mask, + updated_ip_mask + ): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["hash-field"]. + commands["add"], [hash_field_name, "--hash-field", + hash_field, "--ip-mask", ip_mask, + "--sequence-id", "1"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["hash-field"]. + commands["update"], [hash_field_name, "--hash-field", + updated_hash_field, "--ip-mask", updated_ip_mask], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + + def test_config_pbh_hash_field_update_invalid_hash_field(self): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["hash-field"]. + commands["add"], ["inner_ip_proto", "--hash-field", + "INNER_IP_PROTOCOL", "--sequence-id", "1"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["hash-field"]. + commands["update"], ["inner_ip_proto", "--hash-field", + "INNER_DST_IPV4", "--sequence-id", "2"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == ERROR + + + def test_config_pbh_hash_field_update_invalid_ipv4_mask(self): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["hash-field"]. + commands["add"],["inner_ip_proto", "--hash-field", + "INNER_IP_PROTOCOL", "--sequence-id", "1"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["hash-field"]. + commands["update"], ["inner_ip_proto", "--ip-mask", + "0.0.0.255"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == ERROR + + + @pytest.mark.parametrize("hash_field_name,hash_field,ip_mask,updated_ip_mask", [ + ("inner_dst_ipv6", "INNER_DST_IPV6", "ffff::", "255.0.0.0"), + ("inner_dst_ipv4", "INNER_DST_IPV4", "255.0.0.0", "ffff::") + ]) + def test_config_pbh_hash_field_update_invalid_ip_mask( + self, + hash_field_name, + hash_field, + ip_mask, + updated_ip_mask + ): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["hash-field"]. + commands["add"], [hash_field_name, "--hash-field", + hash_field, "--ip-mask", ip_mask, "--sequence-id", + "3"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["hash-field"]. + commands["update"], [hash_field_name, "--ip-mask", + updated_ip_mask], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == ERROR + + + ########## CONFIG PBH HASH ########## + + + def test_config_pbh_hash_add_delete_ipv4(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'hash_fields') + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["hash"]. + commands["add"], ["inner_v4_hash", "--hash-field-list", + "inner_ip_proto,inner_l4_dst_port,inner_l4_src_port,inner_dst_ipv4,inner_dst_ipv4"], + obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["hash"]. + commands["delete"],["inner_v4_hash"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + + def test_config_pbh_hash_add_update_ipv6(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'hash_fields') + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["hash"]. + commands["add"], ["inner_v6_hash", "--hash-field-list", + "inner_ip_proto,inner_l4_dst_port,inner_l4_src_port,inner_dst_ipv6,inner_dst_ipv6"], + obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["hash"]. + commands["update"], ["inner_v6_hash", "--hash-field-list", + "inner_l4_dst_port,inner_l4_src_port,inner_dst_ipv6,inner_dst_ipv6"], + obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + @pytest.mark.parametrize("hash_name,hash_field_list,exit_code", [ + ("inner_v6_hash", INVALID_VALUE, ERROR), + ("inner_v6_hash", "", ERROR), + ("inner_v6_hash", None, ERROR2) + ]) + def test_config_pbh_hash_add_invalid_hash_field_list( + self, + hash_name, + hash_field_list, + exit_code + ): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'hash_fields') + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["hash"]. + commands["add"], [hash_name, "--hash-field-list", + hash_field_list], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == exit_code + + + ########## CONFIG PBH TABLE ########## + + + def test_config_pbh_table_add_delete_ports(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'table') + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["table"]. + commands["add"],["pbh_table1", "--interface-list", + "Ethernet0,Ethernet4", "--description", "NVGRE"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["table"]. + commands["delete"], ["pbh_table1"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + + def test_config_pbh_table_add_update_portchannels(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'table') + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["table"]. + commands["add"], ["pbh_table2", "--interface-list", + "PortChannel0001", "--description", "VxLAN"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["table"]. + commands["update"],["pbh_table2", "--interface-list", + "PortChannel0002", "--description", "VxLAN TEST"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["table"]. + commands["update"],["pbh_table2", "--interface-list", + "PortChannel0001"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["table"]. + commands["update"], ["pbh_table2", "--description", + "TEST"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + + def test_config_pbh_table_add_port_and_portchannel(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'table') + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["table"]. + commands["add"], ["pbh_table3", "--interface-list", + "PortChannel0002,Ethernet8", "--description", + "VxLAN adn NVGRE"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + + def test_config_pbh_table_add_invalid_port(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'table') + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["table"]. + commands["add"], ["pbh_table3", "--interface-list", + INVALID_VALUE, "--description", "VxLAN adn NVGRE"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == ERROR + + + def test_config_pbh_table_add_update_invalid_interface(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'table') + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["table"]. + commands["add"], ["pbh_table2", "--interface-list", + "PortChannel0001", "--description", "VxLAN"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["table"]. + commands["update"], ["pbh_table2", "--interface-list", + INVALID_VALUE], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == ERROR + + + ########## CONFIG PBH RULE ########## + + + def test_config_pbh_rule_add_delete_nvgre(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["rule"]. + commands["add"],["pbh_table1", "nvgre", "--priority", + "1", "--gre-key", "0x2500/0xffffff00", "--inner-ether-type", + "0x86dd", "--hash", "inner_v6_hash", "--packet-action", + "SET_ECMP_HASH", "--flow-counter", "DISABLED"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["rule"]. + commands["delete"], ["pbh_table1", "nvgre"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + + def test_config_pbh_rule_add_update_vxlan(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["rule"]. + commands["add"], ["pbh_table1", "vxlan ", + "--priority", "2", "--ip-protocol", "0x11", + "--inner-ether-type", "0x0800","--l4-dst-port", + "0x12b5", "--hash", "inner_v4_hash", "--packet-action", + "SET_LAG_HASH", "--flow-counter", "ENABLED"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["rule"]. + commands["update"], ["pbh_table1", "vxlan ", + "--priority", "3", "--inner-ether-type", "0x086d", + "--packet-action", "SET_LAG_HASH", "--flow-counter", + "DISABLED"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + + def test_config_pbh_rule_update_invalid(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["rule"]. + commands["add"], ["pbh_table1", "vxlan ", "--priority", + "2", "--ip-protocol", "0x11", "--inner-ether-type", + "0x0800", "--l4-dst-port", "0x12b5", "--hash", + "inner_v6_hash", "--packet-action", "SET_ECMP_HASH", + "--flow-counter", "ENABLED"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["rule"]. + commands["update"], ["pbh_table1", "vxlan ", + "--flow-counter", INVALID_VALUE], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == ERROR2 + + + def test_config_pbh_rule_add_invalid_ip_protocol(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["rule"]. + commands["add"], ["pbh_table1", "vxlan ", "--priority", + "2", "--ip-protocol", INVALID_VALUE, "--inner-ether-type", + "0x0800", "--l4-dst-port", "0x12b5", "--hash", "inner_v6_hash", + "--packet-action", "SET_ECMP_HASH", "--flow-counter", + "ENABLED"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == ERROR + + + def test_config_pbh_rule_add_invalid_inner_ether_type(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["rule"]. + commands["add"], ["pbh_table1", "vxlan ", "--priority", + "2", "--ip-protocol", "0x11", "--inner-ether-type", + INVALID_VALUE, "--l4-dst-port", "0x12b5", "--hash", + "inner_v6_hash", "--packet-action", "SET_ECMP_HASH", + "--flow-counter", "ENABLED"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == ERROR + + + def test_config_pbh_rule_add_invalid_hash(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["rule"]. + commands["add"], ["pbh_table1", "vxlan ", "--priority", + "2", "--ip-protocol", "0x11", "--inner-ether-type", "0x0800", + "--l4-dst-port", "0x12b5", "--hash", INVALID_VALUE, + "--packet-action", "SET_ECMP_HASH", "--flow-counter", + "ENABLED"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == ERROR + + + def test_config_pbh_rule_add_invalid_packet_action(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["rule"]. + commands["add"], ["pbh_table1", "vxlan ", "--priority", + "2", "--ip-protocol", "0x11", "--inner-ether-type", + "0x0800", "--l4-dst-port", "0x12b5", "--hash", + "inner_v6_hash", "--packet-action", INVALID_VALUE, + "--flow-counter", "ENABLED"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == ERROR2 + + + def test_config_pbh_rule_add_invalid_flow_counter(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["rule"]. + commands["add"], ["pbh_table1", "vxlan ", "--priority", + "2", "--ip-protocol", "0x11", "--inner-ether-type", + "0x0800", "--l4-dst-port", "0x12b5", "--hash", + "inner_v6_hash", "--packet-action", "SET_ECMP_HASH", + "--flow-counter", INVALID_VALUE], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == ERROR2 + + ########## SHOW PBH HASH-FIELD ########## + + def test_show_pbh_hash_field(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + db = Db() + runner = CliRunner() + + result = runner.invoke( + show.cli.commands["pbh"]. + commands["hash-field"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_pbh_hash_fields + + + ########## SHOW PBH HASH ########## + + + def test_show_pbh_hash(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + db = Db() + runner = CliRunner() + + result = runner.invoke( + show.cli.commands["pbh"]. + commands["hash"], [], obj=db + ) + + logger.debug("\n" + result.stdout) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_pbh_hash + + + ########## SHOW PBH TABLE ########## + + + def test_show_pbh_table(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + db = Db() + runner = CliRunner() + + result = runner.invoke( + show.cli.commands["pbh"]. + commands["table"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_pbh_table + + + ########## SHOW PBH RULE ########## + + + def test_show_pbh_rule(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + db = Db() + runner = CliRunner() + + result = runner.invoke( + show.cli.commands["pbh"]. + commands["rule"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_pbh_rule + + + ########## SHOW PBH STATISTICS ########## + + def test_show_pbh_statistics_on_empty_config(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = None + dbconnector.dedicated_dbs['COUNTERS_DB'] = None + + SAVED_PBH_COUNTERS_FILE = '/tmp/.pbh_counters.txt' + if os.path.isfile(SAVED_PBH_COUNTERS_FILE): + os.remove(SAVED_PBH_COUNTERS_FILE) + + db = Db() + runner = CliRunner() + + result = runner.invoke( + show.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_pbh_statistics_empty + + + def test_show_pbh_statistics(self): + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + + SAVED_PBH_COUNTERS_FILE = '/tmp/.pbh_counters.txt' + if os.path.isfile(SAVED_PBH_COUNTERS_FILE): + os.remove(SAVED_PBH_COUNTERS_FILE) + + db = Db() + runner = CliRunner() + + result = runner.invoke( + show.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_pbh_statistics + + + def test_show_pbh_statistics_after_clear(self): + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + + SAVED_PBH_COUNTERS_FILE = '/tmp/.pbh_counters.txt' + if os.path.isfile(SAVED_PBH_COUNTERS_FILE): + os.remove(SAVED_PBH_COUNTERS_FILE) + + db = Db() + runner = CliRunner() + + result = runner.invoke( + clear.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + result = runner.invoke( + show.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_pbh_statistics_zero + + + def test_show_pbh_statistics_after_clear_and_counters_updated(self): + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + + SAVED_PBH_COUNTERS_FILE = '/tmp/.pbh_counters.txt' + if os.path.isfile(SAVED_PBH_COUNTERS_FILE): + os.remove(SAVED_PBH_COUNTERS_FILE) + + db = Db() + runner = CliRunner() + + result = runner.invoke( + clear.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db_updated') + + result = runner.invoke( + show.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_pbh_statistics_updated + + + def test_show_pbh_statistics_after_disabling_rule(self): + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + + SAVED_PBH_COUNTERS_FILE = '/tmp/.pbh_counters.txt' + if os.path.isfile(SAVED_PBH_COUNTERS_FILE): + os.remove(SAVED_PBH_COUNTERS_FILE) + + db = Db() + runner = CliRunner() + + result = runner.invoke( + clear.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + result = runner.invoke( + config.config.commands["pbh"]. + commands["rule"].commands["update"], + ["pbh_table2", "vxlan", "--flow-counter", + "DISABLED"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + result = runner.invoke( + show.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_pbh_statistics_after_disabling_rule +