From a3d13453e8b13d0e7ceb862efd14ccfd3aadf030 Mon Sep 17 00:00:00 2001 From: saksarav-nokia Date: Thu, 9 Jun 2022 16:57:24 -0400 Subject: [PATCH 01/34] [gendump] Add Support to dump BCM-DNX commands (#1813) In generate_dump script, the function collect_broadcom tries to dump BCM XGS commands even in DNX switchs and error message is printed, hence generate_dump command fails. Modified the collect_broadcom function to check for BCM family and dump the DNX commands if the family is DNX. Signed-off-by: Sakthivadivu Saravanaraj sakthivadivu.saravanaraj@nokia.com What I did Modified the generate_dump script to dump the corresponding Broadcom diag shell commands based on Broadcom asic family. How I did it Modified the collect_broadcom function in generate_dump to get the Broadcom asic family by extracting the "version" command output as per Broadcom's suggestion and dump the Broadcom DNX commands if the Broadcom asic is DNX family. If it is not DNX family, dump the existing XGS commands. How to verify it Ran sonic cli command "sudo generate_dump " in DNX platform and XGS platform and verified that the tgz files are created with all the information dumped by generate_dump script. Previous command output (if the output of a command-line utility has changed) New command output (if the output of a command-line utility has changed) --- scripts/generate_dump | 90 ++++++++++++++++++++++++++++++++----------- 1 file changed, 68 insertions(+), 22 deletions(-) diff --git a/scripts/generate_dump b/scripts/generate_dump index 9223314955..b5dd472e05 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -985,28 +985,74 @@ collect_broadcom() { save_bcmcmd_all_ns "-t5 version" "broadcom.version" save_bcmcmd_all_ns "-t5 soc" "broadcom.soc" save_bcmcmd_all_ns "-t5 ps" "broadcom.ps" - save_bcmcmd_all_ns "\"l3 nat_ingress show\"" "broadcom.nat.ingress" - save_bcmcmd_all_ns "\"l3 nat_egress show\"" "broadcom.nat.egress" - save_bcmcmd_all_ns "\"ipmc table show\"" "broadcom.ipmc" - save_bcmcmd_all_ns "\"multicast show\"" "broadcom.multicast" - save_bcmcmd_all_ns "\"conf show\"" "conf.summary" - save_bcmcmd_all_ns "\"fp show\"" "fp.summary" - save_bcmcmd_all_ns "\"pvlan show\"" "pvlan.summary" - save_bcmcmd_all_ns "\"l2 show\"" "l2.summary" - save_bcmcmd_all_ns "\"l3 intf show\"" "l3.intf.summary" - save_bcmcmd_all_ns "\"l3 defip show\"" "l3.defip.summary" - save_bcmcmd_all_ns "\"l3 l3table show\"" "l3.l3table.summary" - save_bcmcmd_all_ns "\"l3 egress show\"" "l3.egress.summary" - save_bcmcmd_all_ns "\"l3 ecmp egress show\"" "l3.ecmp.egress.summary" - save_bcmcmd_all_ns "\"l3 multipath show\"" "l3.multipath.summary" - save_bcmcmd_all_ns "\"l3 ip6host show\"" "l3.ip6host.summary" - save_bcmcmd_all_ns "\"l3 ip6route show\"" "l3.ip6route.summary" - save_bcmcmd_all_ns "\"mc show\"" "multicast.summary" - save_bcmcmd_all_ns "\"cstat *\"" "cstat.summary" - save_bcmcmd_all_ns "\"mirror show\"" "mirror.summary" - save_bcmcmd_all_ns "\"mirror dest show\"" "mirror.dest.summary" - save_bcmcmd_all_ns "\"port *\"" "port.summary" - save_bcmcmd_all_ns "\"d chg my_station_tcam\"" "mystation.tcam.summary" + if [ -e /usr/share/sonic/device/${platform}/platform_asic ]; then + bcm_family=`cat /usr/share/sonic/device/${platform}/platform_asic` + else + echo "'/usr/share/sonic/device/${platform}/platform_asic' does not exist" > /tmp/error + save_file /tmp/error sai false + return + fi + + if [ "$bcm_family" == "broadcom-dnx" ]; then + save_bcmcmd_all_ns "\"l2 show\"" "l2.summary" + save_bcmcmd_all_ns "\"field group list\"" "fpgroup.list.summary" + total_fp_groups=34 + for (( fp_grp=0; fp_grp<$total_fp_groups; fp_grp++ )) + do + save_bcmcmd_all_ns "\"field group info group=$fp_grp\"" "fpgroup$fp_grp.info.summary" + done + save_bcmcmd_all_ns "\"dbal table dump table=IPV4_UNICAST_PRIVATE_LPM_FORWARD\"" "l3.ipv4.lpm.summary" + save_bcmcmd_all_ns "\"dbal table dump table=IPV6_UNICAST_PRIVATE_LPM_FORWARD\"" "l3.ipv6.lpm.summary" + save_bcmcmd_all_ns "\"dbal table dump table=IPV4_UNICAST_PRIVATE_HOST\"" "l3.ipv4.host.summary" + save_bcmcmd_all_ns "\"dbal table dump table=IPV6_UNICAST_PRIVATE_HOST\"" "l3.ipv6.host.summary" + save_bcmcmd_all_ns "\"dbal table dump table=SUPER_FEC_1ST_HIERARCHY\"" "l3.egress.fec.summary" + save_bcmcmd_all_ns "\"dbal table dump table=ECMP_TABLE\"" "ecmp.table.summary" + save_bcmcmd_all_ns "\"dbal table dump table=ECMP_GROUP_PROFILE_TABLE\"" "ecmp.group.summary" + save_bcmcmd_all_ns "\"dbal table dump table=ING_VSI_INFO_DB\"" "ing.vsi.summary" + save_bcmcmd_all_ns "\"dbal table dump table=L3_MY_MAC_DA_PREFIXES\"" "l3.mymac.summary" + save_bcmcmd_all_ns "\"dbal table dump table=INGRESS_VLAN_MEMBERSHIP\"" "ing.vlan.summary" + save_bcmcmd_all_ns "\"dbal table dump table=LOCAL_SBC_IN_LIF_MATCH_INFO_SW\"" "sbc.inlif.summary" + save_bcmcmd_all_ns "\"dbal table dump table=SNIF_COMMAND_TABLE\"" "snif.command.summary" + save_bcmcmd_all_ns "\"port mgmt dump full\"" "port.mgmt.summary" + save_bcmcmd_all_ns "\"tm lag\"" "tm.lag.summary" + save_bcmcmd_all_ns "\"pp info fec\"" "pp.fec.summary" + save_bcmcmd_all_ns "\"nif sts\"" "nif.sts.summary" + save_bcmcmd_all_ns "\"port pm info\"" "port.pm.summary" + save_bcmcmd_all_ns "\"conf show\"" "conf.show.summary" + save_bcmcmd_all_ns "\"show counters\"" "show.counters.summary" + save_bcmcmd_all_ns "\"diag counter g\"" "diag.counter.summary" + save_bcmcmd_all_ns "\"tm ing q map\"" "tm.ingress.qmap.summary" + save_bcmcmd_all_ns "\"tm ing vsq resources\"" "tm.ing.vsq.res.summary" + for group in {a..f} + do + save_bcmcmd_all_ns "\"tm ing vsq non g=$group\"" "tm.ing.vsq.non.group-$group.summary" + done + save_bcmcmd_all_ns "\"fabric connectivity\"" "fabric.connect.summary" + save_bcmcmd_all_ns "\"port status\"" "port.status.summary" + else + save_bcmcmd_all_ns "\"l3 nat_ingress show\"" "broadcom.nat.ingress" + save_bcmcmd_all_ns "\"l3 nat_egress show\"" "broadcom.nat.egress" + save_bcmcmd_all_ns "\"ipmc table show\"" "broadcom.ipmc" + save_bcmcmd_all_ns "\"multicast show\"" "broadcom.multicast" + save_bcmcmd_all_ns "\"conf show\"" "conf.summary" + save_bcmcmd_all_ns "\"fp show\"" "fp.summary" + save_bcmcmd_all_ns "\"pvlan show\"" "pvlan.summary" + save_bcmcmd_all_ns "\"l2 show\"" "l2.summary" + save_bcmcmd_all_ns "\"l3 intf show\"" "l3.intf.summary" + save_bcmcmd_all_ns "\"l3 defip show\"" "l3.defip.summary" + save_bcmcmd_all_ns "\"l3 l3table show\"" "l3.l3table.summary" + save_bcmcmd_all_ns "\"l3 egress show\"" "l3.egress.summary" + save_bcmcmd_all_ns "\"l3 ecmp egress show\"" "l3.ecmp.egress.summary" + save_bcmcmd_all_ns "\"l3 multipath show\"" "l3.multipath.summary" + save_bcmcmd_all_ns "\"l3 ip6host show\"" "l3.ip6host.summary" + save_bcmcmd_all_ns "\"l3 ip6route show\"" "l3.ip6route.summary" + save_bcmcmd_all_ns "\"mc show\"" "multicast.summary" + save_bcmcmd_all_ns "\"cstat *\"" "cstat.summary" + save_bcmcmd_all_ns "\"mirror show\"" "mirror.summary" + save_bcmcmd_all_ns "\"mirror dest show\"" "mirror.dest.summary" + save_bcmcmd_all_ns "\"port *\"" "port.summary" + save_bcmcmd_all_ns "\"d chg my_station_tcam\"" "mystation.tcam.summary" + fi copy_from_masic_docker "syncd" "/var/log/diagrun.log" "/var/log/diagrun.log" copy_from_masic_docker "syncd" "/var/log/bcm_diag_post" "/var/log/bcm_diag_post" From a50eca0e8bb3b5aabe5c8ef27f0b4553580a89b1 Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Tue, 14 Jun 2022 18:23:53 -0700 Subject: [PATCH 02/34] [generic-config-updater] Add NTP validator (#2212) What I did When GCU config NTP_SERVER table, its change is not actually taken info effect. Because NTP service is not restarted. So I add NTP service validator and restart service when NTP_SERVER is changed. How I did it When NTP_SERVER table being configured through GCU, the ntp service will restart. How to verify it Run GCU E2E test for NTP_SERVER table. --- generic_config_updater/generic_config_updater.conf.json | 6 ++++++ generic_config_updater/services_validator.py | 3 +++ 2 files changed, 9 insertions(+) diff --git a/generic_config_updater/generic_config_updater.conf.json b/generic_config_updater/generic_config_updater.conf.json index 417da035cd..907b5a6863 100644 --- a/generic_config_updater/generic_config_updater.conf.json +++ b/generic_config_updater/generic_config_updater.conf.json @@ -45,6 +45,9 @@ }, "ACL_RULE": { "services_to_validate": [ "caclmgrd-service" ] + }, + "NTP_SERVER": { + "services_to_validate": [ "ntp-service" ] } }, "services": { @@ -65,6 +68,9 @@ }, "caclmgrd-service": { "validate_commands": [ "generic_config_updater.services_validator.caclmgrd_validator" ] + }, + "ntp-service": { + "validate_commands": [ "generic_config_updater.services_validator.ntp_validator" ] } } } diff --git a/generic_config_updater/services_validator.py b/generic_config_updater/services_validator.py index b059677c59..44a9e095eb 100644 --- a/generic_config_updater/services_validator.py +++ b/generic_config_updater/services_validator.py @@ -98,3 +98,6 @@ def caclmgrd_validator(old_config, upd_config, keys): # No update to ACL_RULE. return True + +def ntp_validator(old_config, upd_config, keys): + return _service_restart("ntp-config") From c0dffbae549f02ff3fef59f96e8264b7849b0670 Mon Sep 17 00:00:00 2001 From: vdahiya12 <67608553+vdahiya12@users.noreply.github.com> Date: Wed, 15 Jun 2022 13:46:59 -0700 Subject: [PATCH 03/34] [config][muxcable] fix minor config DB logic issue (#2210) Signed-off-by: vaibhav-dahiya vdahiya@microsoft.com For active-standby type cable there will not be any soc_ipv4 entry in the "MUX_CABLE" table, hence when updating the config DB entry with update for sudo config mux mode we should just check the cable_type and update the table entry if no soc_ipv4 the value written in none in that case What I did How I did it How to verify it Unit-tests cover the change Signed-off-by: vaibhav-dahiya --- config/muxcable.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/muxcable.py b/config/muxcable.py index 29375feb7a..fec0f16b3b 100644 --- a/config/muxcable.py +++ b/config/muxcable.py @@ -254,7 +254,7 @@ def lookup_statedb_and_update_configdb(db, per_npu_statedb, config_db, port, sta if str(state_cfg_val) == str(configdb_state): port_status_dict[port_name] = 'OK' else: - if cable_type is not None and soc_ipv4_value is not None: + if cable_type is not None or soc_ipv4_value is not None: config_db.set_entry("MUX_CABLE", port, {"state": state_cfg_val, "server_ipv4": ipv4_value, "server_ipv6": ipv6_value, From fc5633fa15bf146f7c97408f8fc1d2a5cea2d10a Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Wed, 15 Jun 2022 22:11:30 -0700 Subject: [PATCH 04/34] increase coverage to 80% (#2214) What I did Change diff coverage threshold to 80% --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 202bba1d5b..6cbc9d4316 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -20,7 +20,7 @@ stages: - job: displayName: "Python3" variables: - DIFF_COVER_CHECK_THRESHOLD: 50 + DIFF_COVER_CHECK_THRESHOLD: 80 DIFF_COVER_ENABLE: 'true' pool: vmImage: ubuntu-20.04 From 05c79ef0842deb93e05e3947c4179cb6e0c486e6 Mon Sep 17 00:00:00 2001 From: Dror Prital <76714716+dprital@users.noreply.github.com> Date: Thu, 16 Jun 2022 11:38:54 +0300 Subject: [PATCH 05/34] Fix header for the output table following 'show ipv6 interface' command (#2219) - What I did Fix the header for "show ipv6 interface" output that display "IPv4" instead of "IPv6" This PR fix the following issue: Azure/sonic-buildimage#11124 - How I did it Check if the table is for IPv4 or IPv6 and present the header accordingly. In addition, adjust the UT - How to verify it Run "show ip interface" CLi command and verify that the output header present "IPv4" Run "show ipv6 interface" CLi command and verify that the output header present "IPv6" --- scripts/ipintutil | 8 ++++++-- tests/show_ip_int_test.py | 6 +++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/scripts/ipintutil b/scripts/ipintutil index 5eb1f66524..c61c622a87 100755 --- a/scripts/ipintutil +++ b/scripts/ipintutil @@ -193,9 +193,13 @@ def get_ip_intfs_in_namespace(af, namespace, display): return ip_intfs -def display_ip_intfs(ip_intfs): +def display_ip_intfs(ip_intfs,address_family): header = ['Interface', 'Master', 'IPv4 address/mask', 'Admin/Oper', 'BGP Neighbor', 'Neighbor IP'] + + if address_family == 'ipv6': + header[2] = 'IPv6 address/mask' + data = [] for ip_intf, v in natsorted(ip_intfs.items()): ip_address = v['ipaddr'][0][1] @@ -265,7 +269,7 @@ def main(): load_db_config() ip_intfs = get_ip_intfs(af, namespace, display) - display_ip_intfs(ip_intfs) + display_ip_intfs(ip_intfs,args.address_family) sys.exit(0) diff --git a/tests/show_ip_int_test.py b/tests/show_ip_int_test.py index 31350d3ea5..d2abdbbf5d 100644 --- a/tests/show_ip_int_test.py +++ b/tests/show_ip_int_test.py @@ -19,7 +19,7 @@ Vlan100 40.1.1.1/24 error/down N/A N/A""" show_ipv6_intf_with_multiple_ips = """\ -Interface Master IPv4 address/mask Admin/Oper BGP Neighbor Neighbor IP +Interface Master IPv6 address/mask Admin/Oper BGP Neighbor Neighbor IP --------------- -------- -------------------------------------------- ------------ -------------- ------------- Ethernet0 2100::1/64 error/down N/A N/A aa00::1/64 N/A N/A @@ -36,7 +36,7 @@ PortChannel0001 20.1.1.1/24 error/down T2-Peer 20.1.1.5""" show_multi_asic_ipv6_intf = """\ -Interface Master IPv4 address/mask Admin/Oper BGP Neighbor Neighbor IP +Interface Master IPv6 address/mask Admin/Oper BGP Neighbor Neighbor IP --------------- -------- -------------------------------------- ------------ -------------- ------------- Loopback0 fe80::60a5:9dff:fef4:1696%Loopback0/64 error/down N/A N/A PortChannel0001 aa00::1/64 error/down N/A N/A @@ -54,7 +54,7 @@ veth@eth2 193.1.1.1/24 error/down N/A N/A""" show_multi_asic_ipv6_intf_all = """\ -Interface Master IPv4 address/mask Admin/Oper BGP Neighbor Neighbor IP +Interface Master IPv6 address/mask Admin/Oper BGP Neighbor Neighbor IP --------------- -------- -------------------------------------- ------------ -------------- ------------- Loopback0 fe80::60a5:9dff:fef4:1696%Loopback0/64 error/down N/A N/A PortChannel0001 aa00::1/64 error/down N/A N/A From 9f2607da27c218d82161468f06b76099c3500b76 Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Sun, 19 Jun 2022 04:31:07 -0700 Subject: [PATCH 06/34] [config reload] Fixing config reload when timer based services are disabled (#2200) - What I did Fixed config reload when timer based delayed services are disabled. When they are disabled, the property property=LastTriggerUSecMonotonic returns "0". This will cause config reload to fail even though all enabled services are up. - How I did it Fixed the delayed services logic to check if the services are enabled before getting the property LastTriggerUSecMonotonic . Additionally fixed the return codes when config reload fails due to system checks - How to verify it Added UT to verify it. Modified sonic-mgmt tests to verify it additionally. Signed-off-by: Sudharsan Dhamal Gopalarathnam --- config/main.py | 22 +++++----- tests/config_test.py | 98 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+), 11 deletions(-) diff --git a/config/main.py b/config/main.py index b1bf599a83..16aea6b610 100644 --- a/config/main.py +++ b/config/main.py @@ -812,7 +812,7 @@ def _get_sonic_services(): return (unit.strip() for unit in out.splitlines()) -def _get_delayed_sonic_services(): +def _get_delayed_sonic_units(get_timers=False): rc1 = clicommon.run_command("systemctl list-dependencies --plain sonic-delayed.target | sed '1d'", return_cmd=True) rc2 = clicommon.run_command("systemctl is-enabled {}".format(rc1.replace("\n", " ")), return_cmd=True) timer = [line.strip() for line in rc1.splitlines()] @@ -820,12 +820,15 @@ def _get_delayed_sonic_services(): services = [] for unit in timer: if state[timer.index(unit)] == "enabled": - services.append(re.sub('\.timer$', '', unit, 1)) + if not get_timers: + services.append(re.sub('\.timer$', '', unit, 1)) + else: + services.append(unit) return services def _reset_failed_services(): - for service in itertools.chain(_get_sonic_services(), _get_delayed_sonic_services()): + for service in itertools.chain(_get_sonic_services(), _get_delayed_sonic_units()): clicommon.run_command("systemctl reset-failed {}".format(service)) @@ -844,12 +847,8 @@ def _restart_services(): click.echo("Reloading Monit configuration ...") clicommon.run_command("sudo monit reload") -def _get_delay_timers(): - out = clicommon.run_command("systemctl list-dependencies sonic-delayed.target --plain |sed '1d'", return_cmd=True) - return [timer.strip() for timer in out.splitlines()] - def _delay_timers_elapsed(): - for timer in _get_delay_timers(): + for timer in _get_delayed_sonic_units(get_timers=True): out = clicommon.run_command("systemctl show {} --property=LastTriggerUSecMonotonic --value".format(timer), return_cmd=True) if out.strip() == "0": return False @@ -1447,18 +1446,19 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, disable_arp_cach """Clear current configuration and import a previous saved config DB dump file. : Names of configuration file(s) to load, separated by comma with no spaces in between """ + CONFIG_RELOAD_NOT_READY = 1 if not force and not no_service_restart: if _is_system_starting(): click.echo("System is not up. Retry later or use -f to avoid system checks") - return + sys.exit(CONFIG_RELOAD_NOT_READY) if not _delay_timers_elapsed(): click.echo("Relevant services are not up. Retry later or use -f to avoid system checks") - return + sys.exit(CONFIG_RELOAD_NOT_READY) if not _swss_ready(): click.echo("SwSS container is not ready. Retry later or use -f to avoid system checks") - return + sys.exit(CONFIG_RELOAD_NOT_READY) if filename is None: message = 'Clear current config and reload config in {} format from the default config file(s) ?'.format(file_format) diff --git a/tests/config_test.py b/tests/config_test.py index 5263b8e54d..87b66f7e61 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -73,6 +73,18 @@ Running command: rm -rf /tmp/dropstat-* Running command: /usr/local/bin/sonic-cfggen -H -k Seastone-DX010-25-50 --write-to-db""" +reload_config_with_disabled_service_output="""\ +Running command: rm -rf /tmp/dropstat-* +Stopping SONiC target ... +Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db +Restarting SONiC target ... +Reloading Monit configuration ... +""" + +reload_config_with_untriggered_timer_output="""\ +Relevant services are not up. Retry later or use -f to avoid system checks +""" + def mock_run_command_side_effect(*args, **kwargs): command = args[0] @@ -89,6 +101,44 @@ def mock_run_command_side_effect(*args, **kwargs): else: return '' +def mock_run_command_side_effect_disabled_timer(*args, **kwargs): + command = args[0] + + if kwargs.get('display_cmd'): + click.echo(click.style("Running command: ", fg='cyan') + click.style(command, fg='green')) + + if kwargs.get('return_cmd'): + if command == "systemctl list-dependencies --plain sonic-delayed.target | sed '1d'": + return 'snmp.timer' + elif command == "systemctl list-dependencies --plain sonic.target | sed '1d'": + return 'swss' + elif command == "systemctl is-enabled snmp.timer": + return 'masked' + elif command == "systemctl show swss.service --property ActiveState --value": + return 'active' + elif command == "systemctl show swss.service --property ActiveEnterTimestampMonotonic --value": + return '0' + else: + return '' + +def mock_run_command_side_effect_untriggered_timer(*args, **kwargs): + command = args[0] + + if kwargs.get('display_cmd'): + click.echo(click.style("Running command: ", fg='cyan') + click.style(command, fg='green')) + + if kwargs.get('return_cmd'): + if command == "systemctl list-dependencies --plain sonic-delayed.target | sed '1d'": + return 'snmp.timer' + elif command == "systemctl list-dependencies --plain sonic.target | sed '1d'": + return 'swss' + elif command == "systemctl is-enabled snmp.timer": + return 'enabled' + elif command == "systemctl show snmp.timer --property=LastTriggerUSecMonotonic --value": + return '0' + else: + return '' + def mock_run_command_side_effect_gnmi(*args, **kwargs): command = args[0] @@ -111,6 +161,8 @@ def mock_run_command_side_effect_gnmi(*args, **kwargs): class TestConfigReload(object): + dummy_cfg_file = os.path.join(os.sep, "tmp", "config.json") + @classmethod def setup_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "1" @@ -121,6 +173,7 @@ def setup_class(cls): import config.main importlib.reload(config.main) + open(cls.dummy_cfg_file, 'w').close() def test_config_reload(self, get_cmd_module, setup_single_broadcom_asic): with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: @@ -148,6 +201,32 @@ def test_config_reload(self, get_cmd_module, setup_single_broadcom_asic): assert "\n".join([l.rstrip() for l in result.output.split('\n')][:2]) == reload_config_with_sys_info_command_output + def test_config_reload_untriggered_timer(self, get_cmd_module, setup_single_broadcom_asic): + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect_untriggered_timer)) as mock_run_command: + (config, show) = get_cmd_module + + jsonfile_config = os.path.join(mock_db_path, "config_db.json") + jsonfile_init_cfg = os.path.join(mock_db_path, "init_cfg.json") + + # create object + config.INIT_CFG_FILE = jsonfile_init_cfg + config.DEFAULT_CONFIG_DB_FILE = jsonfile_config + + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # simulate 'config reload' to provoke load_sys_info option + result = runner.invoke(config.config.commands["reload"], ["-l", "-y"], obj=obj) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 1 + + assert "\n".join([l.rstrip() for l in result.output.split('\n')][:2]) == reload_config_with_untriggered_timer_output + @classmethod def teardown_class(cls): print("TEARDOWN") @@ -312,6 +391,25 @@ def test_reload_config(self, get_cmd_module, setup_single_broadcom_asic): assert "\n".join([l.rstrip() for l in result.output.split('\n')]) \ == RELOAD_CONFIG_DB_OUTPUT + def test_config_reload_disabled_service(self, get_cmd_module, setup_single_broadcom_asic): + with mock.patch( + "utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect_disabled_timer) + ) as mock_run_command: + (config, show) = get_cmd_module + + runner = CliRunner() + result = runner.invoke(config.config.commands["reload"], [self.dummy_cfg_file, "-y"]) + + print(result.exit_code) + print(result.output) + print(reload_config_with_disabled_service_output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + + assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == reload_config_with_disabled_service_output + def test_reload_config_masic(self, get_cmd_module, setup_multi_broadcom_masic): with mock.patch( "utilities_common.cli.run_command", From 248ddd5b9185f59479330d7d0db143ef1b3d8ea2 Mon Sep 17 00:00:00 2001 From: Nathan Cohen <66022536+nathcohe@users.noreply.github.com> Date: Mon, 20 Jun 2022 04:27:14 -0700 Subject: [PATCH 07/34] Gives cisco-8000 more flexibility to easily add subcommnads under show platform (#2213) What I did Gave cisco-8000 the ability to add sub-commands under show platform <> in our downstream repo. Currently every time we want to add/remove/update a cli, we must raise a PR upstream. How I did it I have the cisco-8000.py module import a list of click commands that are written in a module that is located in our platform code. How to verify it Run show platform -h to see all commands. We will be able to see show platform inventory. This is only available on cisco devices. --- show/plugins/cisco-8000.py | 31 ++++++++++--------------------- 1 file changed, 10 insertions(+), 21 deletions(-) diff --git a/show/plugins/cisco-8000.py b/show/plugins/cisco-8000.py index c3e5e0327b..2113336bdb 100644 --- a/show/plugins/cisco-8000.py +++ b/show/plugins/cisco-8000.py @@ -1,36 +1,25 @@ #!/usr/bin/env python ######################################################### -# Copyright 2021 Cisco Systems, Inc. +# Copyright 2021-2022 Cisco Systems, Inc. # All rights reserved. # # CLI Extensions for show command ######################################################### try: - import click - import yaml - from show import platform from sonic_py_common import device_info import utilities_common.cli as clicommon except ImportError as e: - raise ImportError("%s - required module not found" % str(e)) + raise ImportError("%s - required module not found".format(str(e))) -PLATFORM_PY = '/opt/cisco/bin/platform.py' - -@click.command() -def inventory(): - """Show Platform Inventory""" - args = [ PLATFORM_PY, 'inventoryshow' ] - clicommon.run_command(args) +try: + from sonic_platform.cli import PLATFORM_CLIS +except ImportError: + PLATFORM_CLIS = [] -@click.command() -def idprom(): - """Show Platform Idprom Inventory""" - args = [ PLATFORM_PY, 'idprom' ] - clicommon.run_command(args) def register(cli): - version_info = device_info.get_sonic_version_info() - if (version_info and version_info.get('asic_type') == 'cisco-8000'): - cli.commands['platform'].add_command(inventory) - cli.commands['platform'].add_command(idprom) + version_info = device_info.get_sonic_version_info() + if version_info and version_info.get("asic_type") == "cisco-8000": + for c in PLATFORM_CLIS: + cli.commands["platform"].add_command(c) From 6dbb4bdaa77c3d02cfa13667a3de58b19d3c57f8 Mon Sep 17 00:00:00 2001 From: Sambath Kumar Balasubramanian <63021927+skbarista@users.noreply.github.com> Date: Mon, 20 Jun 2022 21:20:00 -0700 Subject: [PATCH 08/34] Add an option in queue stat to display voq counters. (#1827) What I did Added a new option in show queue counters command to display voq statistics How I did it Enhanced queue stat to add an option to display voq statistics. When voq option is given look at COUNTERS_SYSTEM_PORT_NAME_MAP and COUNTERS_VOQ_NAME_MAP to derive the queue information to lookup. Added a modified header for voq statistics. How to verify it Added a unit test for the new voq option. Manually verified it on a voq chassis --- scripts/queuestat | 33 ++- show/main.py | 6 +- tests/mock_tables/counters_db.json | 256 +++++++++++++++++++++++- tests/queue_counter_test.py | 309 +++++++++++++++++++++++++++++ 4 files changed, 594 insertions(+), 10 deletions(-) diff --git a/scripts/queuestat b/scripts/queuestat index b7f4f1d382..1455494701 100755 --- a/scripts/queuestat +++ b/scripts/queuestat @@ -32,6 +32,7 @@ from swsscommon.swsscommon import SonicV2Connector QueueStats = namedtuple("QueueStats", "queueindex, queuetype, totalpacket, totalbytes, droppacket, dropbytes") header = ['Port', 'TxQ', 'Counter/pkts', 'Counter/bytes', 'Drop/pkts', 'Drop/bytes'] +voq_header = ['Port', 'Voq', 'Counter/pkts', 'Counter/bytes', 'Drop/pkts', 'Drop/bytes'] counter_bucket_dict = { 'SAI_QUEUE_STAT_PACKETS': 2, @@ -46,13 +47,17 @@ from utilities_common.netstat import ns_diff, STATUS_NA QUEUE_TYPE_MC = 'MC' QUEUE_TYPE_UC = 'UC' QUEUE_TYPE_ALL = 'ALL' +QUEUE_TYPE_VOQ = 'VOQ' SAI_QUEUE_TYPE_MULTICAST = "SAI_QUEUE_TYPE_MULTICAST" SAI_QUEUE_TYPE_UNICAST = "SAI_QUEUE_TYPE_UNICAST" +SAI_QUEUE_TYPE_UNICAST_VOQ = "SAI_QUEUE_TYPE_UNICAST_VOQ" SAI_QUEUE_TYPE_ALL = "SAI_QUEUE_TYPE_ALL" COUNTER_TABLE_PREFIX = "COUNTERS:" COUNTERS_PORT_NAME_MAP = "COUNTERS_PORT_NAME_MAP" +COUNTERS_SYSTEM_PORT_NAME_MAP = "COUNTERS_SYSTEM_PORT_NAME_MAP" COUNTERS_QUEUE_NAME_MAP = "COUNTERS_QUEUE_NAME_MAP" +COUNTERS_VOQ_NAME_MAP= "COUNTERS_VOQ_NAME_MAP" COUNTERS_QUEUE_TYPE_MAP = "COUNTERS_QUEUE_TYPE_MAP" COUNTERS_QUEUE_INDEX_MAP = "COUNTERS_QUEUE_INDEX_MAP" COUNTERS_QUEUE_PORT_MAP = "COUNTERS_QUEUE_PORT_MAP" @@ -79,9 +84,10 @@ def build_json(port, cnstat): class Queuestat(object): - def __init__(self): + def __init__(self, voq=False): self.db = SonicV2Connector(use_unix_socket_path=False) self.db.connect(self.db.COUNTERS_DB) + self.voq = voq def get_queue_port(table_id): port_table_id = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_PORT_MAP, table_id) @@ -92,7 +98,11 @@ class Queuestat(object): return port_table_id # Get all ports - self.counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) + if voq: + self.counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_SYSTEM_PORT_NAME_MAP) + else: + self.counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) + if self.counter_port_name_map is None: print("COUNTERS_PORT_NAME_MAP is empty!") sys.exit(1) @@ -104,8 +114,13 @@ class Queuestat(object): self.port_queues_map[port] = {} self.port_name_map[self.counter_port_name_map[port]] = port + counter_queue_name_map = None # Get Queues for each port - counter_queue_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_QUEUE_NAME_MAP) + if voq: + counter_queue_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_VOQ_NAME_MAP) + else: + counter_queue_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_QUEUE_NAME_MAP) + if counter_queue_name_map is None: print("COUNTERS_QUEUE_NAME_MAP is empty!") sys.exit(1) @@ -139,6 +154,8 @@ class Queuestat(object): return QUEUE_TYPE_MC elif queue_type == SAI_QUEUE_TYPE_UNICAST: return QUEUE_TYPE_UC + elif queue_type == SAI_QUEUE_TYPE_UNICAST_VOQ: + return QUEUE_TYPE_VOQ elif queue_type == SAI_QUEUE_TYPE_ALL: return QUEUE_TYPE_ALL else: @@ -189,7 +206,8 @@ class Queuestat(object): json_output[port].update(build_json(port, table)) return json_output else: - print(tabulate(table, header, tablefmt='simple', stralign='right')) + hdr = voq_header if self.voq else header + print(tabulate(table, hdr, tablefmt='simple', stralign='right')) print() def cnstat_diff_print(self, port, cnstat_new_dict, cnstat_old_dict, json_opt): @@ -224,7 +242,8 @@ class Queuestat(object): json_output[port].update(build_json(port, table)) return json_output else: - print(tabulate(table, header, tablefmt='simple', stralign='right')) + hdr = voq_header if self.voq else header + print(tabulate(table, hdr, tablefmt='simple', stralign='right')) print() def get_print_all_stat(self, json_opt): @@ -331,10 +350,12 @@ Examples: parser.add_argument('-d', '--delete', action='store_true', help='Delete saved stats') parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') parser.add_argument('-j', '--json_opt', action='store_true', help='Print in JSON format') + parser.add_argument('-V', '--voq', action='store_true', help='display voq stats') args = parser.parse_args() save_fresh_stats = args.clear delete_all_stats = args.delete + voq = args.voq json_opt = args.json_opt port_to_show_stats = args.port @@ -356,7 +377,7 @@ Examples: print(e.errno, e) sys.exit(e) - queuestat = Queuestat() + queuestat = Queuestat( voq ) if save_fresh_stats: queuestat.save_fresh_stats() diff --git a/show/main.py b/show/main.py index 84f6cee981..a0615529d4 100755 --- a/show/main.py +++ b/show/main.py @@ -668,7 +668,8 @@ def queue(): @click.argument('interfacename', required=False) @click.option('--verbose', is_flag=True, help="Enable verbose output") @click.option('--json', is_flag=True, help="JSON output") -def counters(interfacename, verbose, json): +@click.option('--voq', is_flag=True, help="VOQ counters") +def counters(interfacename, verbose, json, voq): """Show queue counters""" cmd = "queuestat" @@ -683,6 +684,9 @@ def counters(interfacename, verbose, json): if json: cmd += " -j" + if voq: + cmd += " -V" + run_command(cmd, display_cmd=verbose) # diff --git a/tests/mock_tables/counters_db.json b/tests/mock_tables/counters_db.json index b79c839288..e12e3347fd 100644 --- a/tests/mock_tables/counters_db.json +++ b/tests/mock_tables/counters_db.json @@ -398,6 +398,150 @@ "SAI_QUEUE_STAT_PACKETS": "20", "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "81" }, + "COUNTERS:oid:0x15000000000657": { + "SAI_QUEUE_STAT_BYTES": "30", + "SAI_QUEUE_STAT_DROPPED_BYTES": "74", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "56", + "SAI_QUEUE_STAT_PACKETS": "68" + }, + "COUNTERS:oid:0x15000000000658": { + "SAI_QUEUE_STAT_BYTES": "43", + "SAI_QUEUE_STAT_DROPPED_BYTES": "1", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "39", + "SAI_QUEUE_STAT_PACKETS": "60" + }, + "COUNTERS:oid:0x15000000000659": { + "SAI_QUEUE_STAT_BYTES": "7", + "SAI_QUEUE_STAT_DROPPED_BYTES": "21", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "39", + "SAI_QUEUE_STAT_PACKETS": "82" + }, + "COUNTERS:oid:0x1500000000065a": { + "SAI_QUEUE_STAT_BYTES": "59", + "SAI_QUEUE_STAT_DROPPED_BYTES": "94", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "12", + "SAI_QUEUE_STAT_PACKETS": "11" + }, + "COUNTERS:oid:0x1500000000065b": { + "SAI_QUEUE_STAT_BYTES": "62", + "SAI_QUEUE_STAT_DROPPED_BYTES": "40", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "35", + "SAI_QUEUE_STAT_PACKETS": "36" + }, + "COUNTERS:oid:0x1500000000065c": { + "SAI_QUEUE_STAT_BYTES": "91", + "SAI_QUEUE_STAT_DROPPED_BYTES": "88", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "2", + "SAI_QUEUE_STAT_PACKETS": "49" + }, + "COUNTERS:oid:0x1500000000065d": { + "SAI_QUEUE_STAT_BYTES": "17", + "SAI_QUEUE_STAT_DROPPED_BYTES": "74", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "94", + "SAI_QUEUE_STAT_PACKETS": "33" + }, + "COUNTERS:oid:0x1500000000065e": { + "SAI_QUEUE_STAT_BYTES": "71", + "SAI_QUEUE_STAT_DROPPED_BYTES": "33", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "95", + "SAI_QUEUE_STAT_PACKETS": "40" + }, + "COUNTERS:oid:0x15000000000667": { + "SAI_QUEUE_STAT_BYTES": "8", + "SAI_QUEUE_STAT_DROPPED_BYTES": "78", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "93", + "SAI_QUEUE_STAT_PACKETS": "54" + }, + "COUNTERS:oid:0x15000000000668": { + "SAI_QUEUE_STAT_BYTES": "96", + "SAI_QUEUE_STAT_DROPPED_BYTES": "9", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "74", + "SAI_QUEUE_STAT_PACKETS": "83" + }, + "COUNTERS:oid:0x15000000000669": { + "SAI_QUEUE_STAT_BYTES": "60", + "SAI_QUEUE_STAT_DROPPED_BYTES": "31", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "61", + "SAI_QUEUE_STAT_PACKETS": "15" + }, + "COUNTERS:oid:0x1500000000066a": { + "SAI_QUEUE_STAT_BYTES": "52", + "SAI_QUEUE_STAT_DROPPED_BYTES": "94", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "82", + "SAI_QUEUE_STAT_PACKETS": "45" + }, + "COUNTERS:oid:0x1500000000066b": { + "SAI_QUEUE_STAT_BYTES": "88", + "SAI_QUEUE_STAT_DROPPED_BYTES": "52", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "89", + "SAI_QUEUE_STAT_PACKETS": "55" + }, + "COUNTERS:oid:0x1500000000066c": { + "SAI_QUEUE_STAT_BYTES": "70", + "SAI_QUEUE_STAT_DROPPED_BYTES": "79", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "95", + "SAI_QUEUE_STAT_PACKETS": "14" + }, + "COUNTERS:oid:0x1500000000066d": { + "SAI_QUEUE_STAT_BYTES": "60", + "SAI_QUEUE_STAT_DROPPED_BYTES": "81", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "66", + "SAI_QUEUE_STAT_PACKETS": "68" + }, + "COUNTERS:oid:0x1500000000066e": { + "SAI_QUEUE_STAT_BYTES": "4", + "SAI_QUEUE_STAT_DROPPED_BYTES": "76", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "48", + "SAI_QUEUE_STAT_PACKETS": "63" + }, + "COUNTERS:oid:0x15000000000677": { + "SAI_QUEUE_STAT_BYTES": "73", + "SAI_QUEUE_STAT_DROPPED_BYTES": "74", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "77", + "SAI_QUEUE_STAT_PACKETS": "41" + }, + "COUNTERS:oid:0x15000000000678": { + "SAI_QUEUE_STAT_BYTES": "21", + "SAI_QUEUE_STAT_DROPPED_BYTES": "54", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "56", + "SAI_QUEUE_STAT_PACKETS": "60" + }, + "COUNTERS:oid:0x15000000000679": { + "SAI_QUEUE_STAT_BYTES": "31", + "SAI_QUEUE_STAT_DROPPED_BYTES": "39", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "12", + "SAI_QUEUE_STAT_PACKETS": "57" + }, + "COUNTERS:oid:0x1500000000067a": { + "SAI_QUEUE_STAT_BYTES": "96", + "SAI_QUEUE_STAT_DROPPED_BYTES": "98", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "70", + "SAI_QUEUE_STAT_PACKETS": "41" + }, + "COUNTERS:oid:0x1500000000067b": { + "SAI_QUEUE_STAT_BYTES": "49", + "SAI_QUEUE_STAT_DROPPED_BYTES": "36", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "63", + "SAI_QUEUE_STAT_PACKETS": "18" + }, + "COUNTERS:oid:0x1500000000067c": { + "SAI_QUEUE_STAT_BYTES": "90", + "SAI_QUEUE_STAT_DROPPED_BYTES": "15", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "3", + "SAI_QUEUE_STAT_PACKETS": "99" + }, + "COUNTERS:oid:0x1500000000067d": { + "SAI_QUEUE_STAT_BYTES": "84", + "SAI_QUEUE_STAT_DROPPED_BYTES": "94", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "82", + "SAI_QUEUE_STAT_PACKETS": "8" + }, + "COUNTERS:oid:0x1500000000067e": { + "SAI_QUEUE_STAT_BYTES": "15", + "SAI_QUEUE_STAT_DROPPED_BYTES": "92", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "75", + "SAI_QUEUE_STAT_PACKETS": "83" + }, "COUNTERS:oid:0x60000000005a3": { "SAI_ROUTER_INTERFACE_STAT_IN_ERROR_OCTETS": "0", "SAI_ROUTER_INTERFACE_STAT_IN_ERROR_PACKETS": "0", @@ -904,7 +1048,40 @@ "COUNTERS:oid:0x1a0000000003a6" : { "SAI_INGRESS_PRIORITY_GROUP_STAT_DROPPED_PACKETS" : "107" }, + + "COUNTERS_SYSTEM_PORT_NAME_MAP": { + "Ethernet0": "oid:0x1000000000042", + "Ethernet4": "oid:0x1000000000043", + "Ethernet8": "oid:0x1000000000044" + }, + "COUNTERS_VOQ_NAME_MAP": { + "Ethernet0:0": "oid:0x15000000000657", + "Ethernet0:1": "oid:0x15000000000658", + "Ethernet0:2": "oid:0x15000000000659", + "Ethernet0:3": "oid:0x1500000000065a", + "Ethernet0:4": "oid:0x1500000000065b", + "Ethernet0:5": "oid:0x1500000000065c", + "Ethernet0:6": "oid:0x1500000000065d", + "Ethernet0:7": "oid:0x1500000000065e", + "Ethernet4:0": "oid:0x15000000000667", + "Ethernet4:1": "oid:0x15000000000668", + "Ethernet4:2": "oid:0x15000000000669", + "Ethernet4:3": "oid:0x1500000000066a", + "Ethernet4:4": "oid:0x1500000000066b", + "Ethernet4:5": "oid:0x1500000000066c", + "Ethernet4:6": "oid:0x1500000000066d", + "Ethernet4:7": "oid:0x1500000000066e", + "Ethernet8:0": "oid:0x15000000000677", + "Ethernet8:1": "oid:0x15000000000678", + "Ethernet8:2": "oid:0x15000000000679", + "Ethernet8:3": "oid:0x1500000000067a", + "Ethernet8:4": "oid:0x1500000000067b", + "Ethernet8:5": "oid:0x1500000000067c", + "Ethernet8:6": "oid:0x1500000000067d", + "Ethernet8:7": "oid:0x1500000000067e" + }, + "COUNTERS_PORT_NAME_MAP": { "Ethernet0": "oid:0x1000000000012", "Ethernet4": "oid:0x1000000000013", @@ -1155,8 +1332,33 @@ "oid:0x150000000003c3": "oid:0x1000000000014", "oid:0x150000000003c4": "oid:0x1000000000014", "oid:0x150000000003c5": "oid:0x1000000000014", - "oid:0x150000000003c6": "oid:0x1000000000014" + "oid:0x150000000003c6": "oid:0x1000000000014", + "oid:0x15000000000657": "oid:0x1000000000042", + "oid:0x15000000000658": "oid:0x1000000000042", + "oid:0x15000000000659": "oid:0x1000000000042", + "oid:0x1500000000065a": "oid:0x1000000000042", + "oid:0x1500000000065b": "oid:0x1000000000042", + "oid:0x1500000000065c": "oid:0x1000000000042", + "oid:0x1500000000065d": "oid:0x1000000000042", + "oid:0x1500000000065e": "oid:0x1000000000042", + "oid:0x15000000000667": "oid:0x1000000000043", + "oid:0x15000000000668": "oid:0x1000000000043", + "oid:0x15000000000669": "oid:0x1000000000043", + "oid:0x1500000000066a": "oid:0x1000000000043", + "oid:0x1500000000066b": "oid:0x1000000000043", + "oid:0x1500000000066c": "oid:0x1000000000043", + "oid:0x1500000000066d": "oid:0x1000000000043", + "oid:0x1500000000066e": "oid:0x1000000000043", + "oid:0x15000000000677": "oid:0x1000000000044", + "oid:0x15000000000678": "oid:0x1000000000044", + "oid:0x15000000000679": "oid:0x1000000000044", + "oid:0x1500000000067a": "oid:0x1000000000044", + "oid:0x1500000000067b": "oid:0x1000000000044", + "oid:0x1500000000067c": "oid:0x1000000000044", + "oid:0x1500000000067d": "oid:0x1000000000044", + "oid:0x1500000000067e": "oid:0x1000000000044" }, + "COUNTERS_PG_INDEX_MAP": { "oid:0x1a00000000034f": "0", "oid:0x1a000000000350": "1", @@ -1273,7 +1475,31 @@ "oid:0x150000000003c3": "26", "oid:0x150000000003c4": "27", "oid:0x150000000003c5": "28", - "oid:0x150000000003c6": "29" + "oid:0x150000000003c6": "29", + "oid:0x15000000000657": "0", + "oid:0x15000000000658": "1", + "oid:0x15000000000659": "2", + "oid:0x1500000000065a": "3", + "oid:0x1500000000065b": "4", + "oid:0x1500000000065c": "5", + "oid:0x1500000000065d": "6", + "oid:0x1500000000065e": "7", + "oid:0x15000000000667": "0", + "oid:0x15000000000668": "1", + "oid:0x15000000000669": "2", + "oid:0x1500000000066a": "3", + "oid:0x1500000000066b": "4", + "oid:0x1500000000066c": "5", + "oid:0x1500000000066d": "6", + "oid:0x1500000000066e": "7", + "oid:0x15000000000677": "0", + "oid:0x15000000000678": "1", + "oid:0x15000000000679": "2", + "oid:0x1500000000067a": "3", + "oid:0x1500000000067b": "4", + "oid:0x1500000000067c": "5", + "oid:0x1500000000067d": "6", + "oid:0x1500000000067e": "7" }, "COUNTERS_QUEUE_TYPE_MAP": { "oid:0x15000000000357": "SAI_QUEUE_TYPE_UNICAST", @@ -1365,7 +1591,31 @@ "oid:0x150000000003c3": "SAI_QUEUE_TYPE_ALL", "oid:0x150000000003c4": "SAI_QUEUE_TYPE_ALL", "oid:0x150000000003c5": "SAI_QUEUE_TYPE_ALL", - "oid:0x150000000003c6": "SAI_QUEUE_TYPE_ALL" + "oid:0x150000000003c6": "SAI_QUEUE_TYPE_ALL", + "oid:0x15000000000657": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x15000000000658": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x15000000000659": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x1500000000065a": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x1500000000065b": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x1500000000065c": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x1500000000065d": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x1500000000065e": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x15000000000667": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x15000000000668": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x15000000000669": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x1500000000066a": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x1500000000066b": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x1500000000066c": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x1500000000066d": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x1500000000066e": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x15000000000677": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x15000000000678": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x15000000000679": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x1500000000067a": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x1500000000067b": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x1500000000067c": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x1500000000067d": "SAI_QUEUE_TYPE_UNICAST_VOQ", + "oid:0x1500000000067e": "SAI_QUEUE_TYPE_UNICAST_VOQ" }, "COUNTERS_DEBUG_NAME_PORT_STAT_MAP": { "DEBUG_0": "SAI_PORT_STAT_IN_DROP_REASON_RANGE_BASE", diff --git a/tests/queue_counter_test.py b/tests/queue_counter_test.py index 66dfc828ed..b7b3637126 100644 --- a/tests/queue_counter_test.py +++ b/tests/queue_counter_test.py @@ -895,6 +895,263 @@ } }""" +show_queue_voq_counters = """\ + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes +--------- ----- -------------- --------------- ----------- ------------ +Ethernet0 VOQ0 68 30 56 74 +Ethernet0 VOQ1 60 43 39 1 +Ethernet0 VOQ2 82 7 39 21 +Ethernet0 VOQ3 11 59 12 94 +Ethernet0 VOQ4 36 62 35 40 +Ethernet0 VOQ5 49 91 2 88 +Ethernet0 VOQ6 33 17 94 74 +Ethernet0 VOQ7 40 71 95 33 + + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes +--------- ----- -------------- --------------- ----------- ------------ +Ethernet4 VOQ0 54 8 93 78 +Ethernet4 VOQ1 83 96 74 9 +Ethernet4 VOQ2 15 60 61 31 +Ethernet4 VOQ3 45 52 82 94 +Ethernet4 VOQ4 55 88 89 52 +Ethernet4 VOQ5 14 70 95 79 +Ethernet4 VOQ6 68 60 66 81 +Ethernet4 VOQ7 63 4 48 76 + + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes +--------- ----- -------------- --------------- ----------- ------------ +Ethernet8 VOQ0 41 73 77 74 +Ethernet8 VOQ1 60 21 56 54 +Ethernet8 VOQ2 57 31 12 39 +Ethernet8 VOQ3 41 96 70 98 +Ethernet8 VOQ4 18 49 63 36 +Ethernet8 VOQ5 99 90 3 15 +Ethernet8 VOQ6 8 84 82 94 +Ethernet8 VOQ7 83 15 75 92 + +""" + +show_queue_port_voq_counters = """\ + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes +--------- ----- -------------- --------------- ----------- ------------ +Ethernet0 VOQ0 68 30 56 74 +Ethernet0 VOQ1 60 43 39 1 +Ethernet0 VOQ2 82 7 39 21 +Ethernet0 VOQ3 11 59 12 94 +Ethernet0 VOQ4 36 62 35 40 +Ethernet0 VOQ5 49 91 2 88 +Ethernet0 VOQ6 33 17 94 74 +Ethernet0 VOQ7 40 71 95 33 + +""" + +show_queue_voq_counters_json = """\ +{ + "Ethernet0": { + "VOQ0": { + "dropbytes": "74", + "droppacket": "56", + "totalbytes": "30", + "totalpacket": "68" + }, + "VOQ1": { + "dropbytes": "1", + "droppacket": "39", + "totalbytes": "43", + "totalpacket": "60" + }, + "VOQ2": { + "dropbytes": "21", + "droppacket": "39", + "totalbytes": "7", + "totalpacket": "82" + }, + "VOQ3": { + "dropbytes": "94", + "droppacket": "12", + "totalbytes": "59", + "totalpacket": "11" + }, + "VOQ4": { + "dropbytes": "40", + "droppacket": "35", + "totalbytes": "62", + "totalpacket": "36" + }, + "VOQ5": { + "dropbytes": "88", + "droppacket": "2", + "totalbytes": "91", + "totalpacket": "49" + }, + "VOQ6": { + "dropbytes": "74", + "droppacket": "94", + "totalbytes": "17", + "totalpacket": "33" + }, + "VOQ7": { + "dropbytes": "33", + "droppacket": "95", + "totalbytes": "71", + "totalpacket": "40" + } + }, + "Ethernet4": { + "VOQ0": { + "dropbytes": "78", + "droppacket": "93", + "totalbytes": "8", + "totalpacket": "54" + }, + "VOQ1": { + "dropbytes": "9", + "droppacket": "74", + "totalbytes": "96", + "totalpacket": "83" + }, + "VOQ2": { + "dropbytes": "31", + "droppacket": "61", + "totalbytes": "60", + "totalpacket": "15" + }, + "VOQ3": { + "dropbytes": "94", + "droppacket": "82", + "totalbytes": "52", + "totalpacket": "45" + }, + "VOQ4": { + "dropbytes": "52", + "droppacket": "89", + "totalbytes": "88", + "totalpacket": "55" + }, + "VOQ5": { + "dropbytes": "79", + "droppacket": "95", + "totalbytes": "70", + "totalpacket": "14" + }, + "VOQ6": { + "dropbytes": "81", + "droppacket": "66", + "totalbytes": "60", + "totalpacket": "68" + }, + "VOQ7": { + "dropbytes": "76", + "droppacket": "48", + "totalbytes": "4", + "totalpacket": "63" + } + }, + "Ethernet8": { + "VOQ0": { + "dropbytes": "74", + "droppacket": "77", + "totalbytes": "73", + "totalpacket": "41" + }, + "VOQ1": { + "dropbytes": "54", + "droppacket": "56", + "totalbytes": "21", + "totalpacket": "60" + }, + "VOQ2": { + "dropbytes": "39", + "droppacket": "12", + "totalbytes": "31", + "totalpacket": "57" + }, + "VOQ3": { + "dropbytes": "98", + "droppacket": "70", + "totalbytes": "96", + "totalpacket": "41" + }, + "VOQ4": { + "dropbytes": "36", + "droppacket": "63", + "totalbytes": "49", + "totalpacket": "18" + }, + "VOQ5": { + "dropbytes": "15", + "droppacket": "3", + "totalbytes": "90", + "totalpacket": "99" + }, + "VOQ6": { + "dropbytes": "94", + "droppacket": "82", + "totalbytes": "84", + "totalpacket": "8" + }, + "VOQ7": { + "dropbytes": "92", + "droppacket": "75", + "totalbytes": "15", + "totalpacket": "83" + } + } +}""" + +show_queue_port_voq_counters_json = """\ +{ + "Ethernet0": { + "VOQ0": { + "dropbytes": "74", + "droppacket": "56", + "totalbytes": "30", + "totalpacket": "68" + }, + "VOQ1": { + "dropbytes": "1", + "droppacket": "39", + "totalbytes": "43", + "totalpacket": "60" + }, + "VOQ2": { + "dropbytes": "21", + "droppacket": "39", + "totalbytes": "7", + "totalpacket": "82" + }, + "VOQ3": { + "dropbytes": "94", + "droppacket": "12", + "totalbytes": "59", + "totalpacket": "11" + }, + "VOQ4": { + "dropbytes": "40", + "droppacket": "35", + "totalbytes": "62", + "totalpacket": "36" + }, + "VOQ5": { + "dropbytes": "88", + "droppacket": "2", + "totalbytes": "91", + "totalpacket": "49" + }, + "VOQ6": { + "dropbytes": "74", + "droppacket": "94", + "totalbytes": "17", + "totalpacket": "33" + }, + "VOQ7": { + "dropbytes": "33", + "droppacket": "95", + "totalbytes": "71", + "totalpacket": "40" + } + } +}""" class TestQueue(object): @classmethod @@ -953,6 +1210,58 @@ def test_queue_counters_port_json(self): del v["time"] assert json_dump(json_output) == show_queue_counters_port_json + def test_queue_voq_counters(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["queue"].commands["counters"], + ["--voq"] + ) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_queue_voq_counters + + def test_queue_port_voq_counters(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["queue"].commands["counters"], + ["Ethernet0 --voq"] + ) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_queue_port_voq_counters + + def test_queue_voq_counters_json(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["queue"].commands["counters"], + ["--voq", "--json"] + ) + assert result.exit_code == 0 + print(result.output) + json_output = json.loads(result.output) + + # remove "time" from the output + for _, v in json_output.items(): + del v["time"] + print(json_dump(json_output)) + print(show_queue_voq_counters_json) + assert json_dump(json_output) == show_queue_voq_counters_json + + def test_queue_voq_counters_port_json(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["queue"].commands["counters"], + ["Ethernet0", "--voq", "--json"] + ) + assert result.exit_code == 0 + print(result.output) + json_output = json.loads(result.output) + + # remove "time" from the output + for _, v in json_output.items(): + del v["time"] + assert json_dump(json_output) == show_queue_port_voq_counters_json + @classmethod def teardown_class(cls): os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) From cc775abc56c65d2596654b3aa1738263b243cb28 Mon Sep 17 00:00:00 2001 From: Vivek R Date: Wed, 22 Jun 2022 18:30:18 -0700 Subject: [PATCH 09/34] [generate dump] Move the Core/Log collection to the End of process Execution and removed default timeout (#2209) Recently an issue is seen during the test_max_limit[core] sonic-mgmt test (Test for auto-techsupport). This was a diff taken from two techsupport process runs and in the first case the core file to save was too large (almost 1G) and thus took upto 10 sec and all the commands that followed which append to the tar file have shown increased execution time. Finally, after 30 mins the execution timed out. Besides, it's better to collect the logs in the end, since we could collect more info and also core files are mostly static and it shouldn't matter much even if we collect them late. Thus moved the core/log collection to the end. But there is a catch regarding the above change, For eg: system is in a unstable state and most of the individual commands start to timeout, the techsupport dump eventually times out at 30m (because of the global timeout), then the dump is pretty useless, since it might not have any useful information at all Thus, i've removed the default global timeout, Clients can/should knowingly provide a value using -g option if the execution time has to be capped. A global timeout of 60 mins is used for Global timeout for Auto-techsupport invocation. Fix related to since argument was overwritten by the latest commit related to auto-techsupport on master and thus the reason for issues like this Auto-Techsupport collect logs beyond since value sonic-buildimage#11208, Made changes to fix the issue in here Signed-off-by: Vivek Reddy Karri --- scripts/generate_dump | 9 ++++---- show/main.py | 7 ++++-- tests/coredump_gen_handler_test.py | 24 +++++++++++++++++++-- tests/techsupport_test.py | 14 ++++++------ utilities_common/auto_techsupport_helper.py | 4 ++-- 5 files changed, 40 insertions(+), 18 deletions(-) diff --git a/scripts/generate_dump b/scripts/generate_dump index b5dd472e05..018d884330 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1287,11 +1287,6 @@ main() { end_t=$(date +%s%3N) echo "[ Capture Proc State ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO - # Save logs and cores early - save_log_files - save_crash_files - save_warmboot_files - # Save all the processes within each docker save_cmd "show services" services.summary @@ -1426,6 +1421,10 @@ main() { end_t=$(date +%s%3N) echo "[ TAR /etc Files ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO + save_log_files + save_crash_files + save_warmboot_files + finalize } diff --git a/show/main.py b/show/main.py index a0615529d4..fd7325677a 100755 --- a/show/main.py +++ b/show/main.py @@ -1266,7 +1266,7 @@ def users(verbose): @cli.command() @click.option('--since', required=False, help="Collect logs and core files since given date") -@click.option('-g', '--global-timeout', default=30, type=int, help="Global timeout in minutes. Default 30 mins") +@click.option('-g', '--global-timeout', required=False, type=int, help="Global timeout in minutes. WARN: Dump might be incomplete if enforced") @click.option('-c', '--cmd-timeout', default=5, type=int, help="Individual command timeout in minutes. Default 5 mins") @click.option('--verbose', is_flag=True, help="Enable verbose output") @click.option('--allow-process-stop', is_flag=True, help="Dump additional data which may require system interruption") @@ -1275,7 +1275,10 @@ def users(verbose): @click.option('--redirect-stderr', '-r', is_flag=True, help="Redirect an intermediate errors to STDERR") def techsupport(since, global_timeout, cmd_timeout, verbose, allow_process_stop, silent, debug_dump, redirect_stderr): """Gather information for troubleshooting""" - cmd = "sudo timeout --kill-after={}s -s SIGTERM --foreground {}m".format(COMMAND_TIMEOUT, global_timeout) + cmd = "sudo" + + if global_timeout: + cmd += " timeout --kill-after={}s -s SIGTERM --foreground {}m".format(COMMAND_TIMEOUT, global_timeout) if allow_process_stop: cmd += " -a" diff --git a/tests/coredump_gen_handler_test.py b/tests/coredump_gen_handler_test.py index 74d965fe9e..f704311afd 100644 --- a/tests/coredump_gen_handler_test.py +++ b/tests/coredump_gen_handler_test.py @@ -21,6 +21,8 @@ /tmp/saisdkdump """ +TS_DEFAULT_CMD = "show techsupport --silent --global-timeout 60 --since 2 days ago" + def signal_handler(signum, frame): raise Exception("Timed out!") @@ -270,7 +272,7 @@ def test_since_argument(self): def mock_cmd(cmd, env): ts_dump = "/var/dump/sonic_dump_random3.tar.gz" cmd_str = " ".join(cmd) - if "--since '4 days ago'" in cmd_str: + if "--since 4 days ago" in cmd_str: patcher.fs.create_file(ts_dump) return 0, AUTO_TS_STDOUT + ts_dump, "" elif "date --date=4 days ago" in cmd_str: @@ -336,7 +338,7 @@ def test_invalid_since_argument(self): def mock_cmd(cmd, env): ts_dump = "/var/dump/sonic_dump_random3.tar.gz" cmd_str = " ".join(cmd) - if "--since '2 days ago'" in cmd_str: + if "--since 2 days ago" in cmd_str: patcher.fs.create_file(ts_dump) print(AUTO_TS_STDOUT + ts_dump) return 0, AUTO_TS_STDOUT + ts_dump, "" @@ -429,3 +431,21 @@ def mock_cmd(cmd, env): finally: signal.alarm(0) + def test_auto_ts_options(self): + """ + Scenario: Check if the techsupport is called as expected + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, state="enabled", since_cfg="2 days ago") + set_feature_table_cfg(redis_mock, state="enabled") + with Patcher() as patcher: + def mock_cmd(cmd, env): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str and cmd_str != TS_DEFAULT_CMD: + assert False, "Expected TS_CMD: {}, Recieved: {}".format(TS_DEFAULT_CMD, cmd_str) + return 0, AUTO_TS_STDOUT, "" + ts_helper.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", "swss", redis_mock) + cls.handle_core_dump_creation_event() diff --git a/tests/techsupport_test.py b/tests/techsupport_test.py index 64bc133627..8effa89887 100644 --- a/tests/techsupport_test.py +++ b/tests/techsupport_test.py @@ -3,18 +3,18 @@ from unittest.mock import patch, Mock from click.testing import CliRunner -EXPECTED_BASE_COMMAND = 'sudo timeout --kill-after=300s -s SIGTERM --foreground ' +EXPECTED_BASE_COMMAND = 'sudo ' @patch("show.main.run_command") @pytest.mark.parametrize( "cli_arguments,expected", [ - ([], '30m generate_dump -v -t 5'), - (['--since', '2 days ago'], "30m generate_dump -v -s '2 days ago' -t 5"), - (['-g', '50'], '50m generate_dump -v -t 5'), - (['--allow-process-stop'], '30m -a generate_dump -v -t 5'), - (['--silent'], '30m generate_dump -t 5'), - (['--debug-dump', '--redirect-stderr'], '30m generate_dump -v -d -t 5 -r'), + ([], 'generate_dump -v -t 5'), + (['--since', '2 days ago'], "generate_dump -v -s '2 days ago' -t 5"), + (['-g', '50'], 'timeout --kill-after=300s -s SIGTERM --foreground 50m generate_dump -v -t 5'), + (['--allow-process-stop'], '-a generate_dump -v -t 5'), + (['--silent'], 'generate_dump -t 5'), + (['--debug-dump', '--redirect-stderr'], 'generate_dump -v -d -t 5 -r'), ] ) def test_techsupport(run_command, cli_arguments, expected): diff --git a/utilities_common/auto_techsupport_helper.py b/utilities_common/auto_techsupport_helper.py index b56693ac39..ee4f9d0c10 100644 --- a/utilities_common/auto_techsupport_helper.py +++ b/utilities_common/auto_techsupport_helper.py @@ -68,6 +68,7 @@ TIME_BUF = 20 SINCE_DEFAULT = "2 days ago" +TS_GLOBAL_TIMEOUT = "60" # Explicity Pass this to the subprocess invoking techsupport ENV_VAR = os.environ @@ -229,8 +230,7 @@ def parse_ts_dump_name(ts_stdout): def invoke_ts_cmd(db, num_retry=0): """Invoke techsupport generation command""" since_cfg = get_since_arg(db) - since_cfg = "'" + since_cfg + "'" - cmd_opts = ["show", "techsupport", "--silent", "--since", since_cfg] + cmd_opts = ["show", "techsupport", "--silent", "--global-timeout", TS_GLOBAL_TIMEOUT, "--since", since_cfg] cmd = " ".join(cmd_opts) rc, stdout, stderr = subprocess_exec(cmd_opts, env=ENV_VAR) new_dump = "" From 2f6a547d9a53db77b5d70f71bd76d984402f0682 Mon Sep 17 00:00:00 2001 From: Yevhen Fastiuk Date: Fri, 24 Jun 2022 01:31:55 +0300 Subject: [PATCH 10/34] Image-installer: Fix duplication of image prefix (#2172) * SONiC prefix shown twice when working branch contains "image-" in its name. #### Why I did it SONiC prefix is shown twice when the working branch contains "image-" in its name. Screenshot 2022-05-18 at 14 08 12 #### How I did it Fixed sonic-installer's one bootloader wrapper to show correct version. Screenshot 2022-05-18 at 14 08 33 #### How to verify it Build image (the branch is important): ``` cd sonic-buildimage git checkout -b dev-image-test make configure PLATFORM=mellanox && make target/sonic-mellanox.bin ``` Run image on the switch and execute: ``` sudo sonic-installer list ``` #### Previous command output (if the output of a command-line utility has changed) Screenshot 2022-05-18 at 14 08 12 #### New command output (if the output of a command-line utility has changed) Screenshot 2022-05-18 at 14 08 33 --- sonic_installer/bootloader/aboot.py | 9 ++-- sonic_installer/bootloader/bootloader.py | 2 +- sonic_installer/bootloader/grub.py | 2 +- sonic_installer/bootloader/onie.py | 5 +- sonic_installer/bootloader/uboot.py | 2 +- sonic_installer/main.py | 4 +- tests/installer_bootloader_aboot_test.py | 52 +++++++++++++++++++ tests/installer_bootloader_bootloader_test.py | 16 ++++++ tests/installer_bootloader_grub_test.py | 26 ++++++++++ tests/installer_bootloader_onie_test.py | 17 ++++++ tests/installer_bootloader_uboot_test.py | 29 +++++++++++ 11 files changed, 154 insertions(+), 10 deletions(-) create mode 100644 tests/installer_bootloader_aboot_test.py create mode 100644 tests/installer_bootloader_bootloader_test.py create mode 100644 tests/installer_bootloader_grub_test.py create mode 100644 tests/installer_bootloader_onie_test.py create mode 100644 tests/installer_bootloader_uboot_test.py diff --git a/sonic_installer/bootloader/aboot.py b/sonic_installer/bootloader/aboot.py index ab4c0ff38c..ea1a95c2fe 100644 --- a/sonic_installer/bootloader/aboot.py +++ b/sonic_installer/bootloader/aboot.py @@ -92,7 +92,7 @@ def _boot_config_set(self, **kwargs): self._boot_config_write(config, path=path) def _swi_image_path(self, image): - image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) + image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX, 1) if is_secureboot(): return 'flash:%s/sonic.swi' % image_dir return 'flash:%s/.sonic-boot.swi' % image_dir @@ -100,19 +100,20 @@ def _swi_image_path(self, image): def get_current_image(self): with open('/proc/cmdline') as f: current = re.search(r"loop=/*(\S+)/", f.read()).group(1) - return current.replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX) + return current.replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX, 1) def get_installed_images(self): images = [] for filename in os.listdir(HOST_PATH): if filename.startswith(IMAGE_DIR_PREFIX): - images.append(filename.replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX)) + images.append(filename.replace(IMAGE_DIR_PREFIX, + IMAGE_PREFIX, 1)) return images def get_next_image(self): config = self._boot_config_read() match = re.search(r"flash:/*(\S+)/", config['SWI']) - return match.group(1).replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX) + return match.group(1).replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX, 1) def set_default_image(self, image): image_path = self._swi_image_path(image) diff --git a/sonic_installer/bootloader/bootloader.py b/sonic_installer/bootloader/bootloader.py index aaeddeba2f..a4fcdded4b 100644 --- a/sonic_installer/bootloader/bootloader.py +++ b/sonic_installer/bootloader/bootloader.py @@ -76,7 +76,7 @@ def detect(cls): def get_image_path(cls, image): """returns the image path""" prefix = path.join(HOST_PATH, IMAGE_DIR_PREFIX) - return image.replace(IMAGE_PREFIX, prefix) + return image.replace(IMAGE_PREFIX, prefix, 1) @contextmanager def get_rootfs_path(self, image_path): diff --git a/sonic_installer/bootloader/grub.py b/sonic_installer/bootloader/grub.py index 11ee3de1f4..85563dabe6 100644 --- a/sonic_installer/bootloader/grub.py +++ b/sonic_installer/bootloader/grub.py @@ -77,7 +77,7 @@ def remove_image(self, image): config.close() click.echo('Done') - image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) + image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX, 1) click.echo('Removing image root filesystem...') subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir]) click.echo('Done') diff --git a/sonic_installer/bootloader/onie.py b/sonic_installer/bootloader/onie.py index be17ba5619..f5412fc860 100644 --- a/sonic_installer/bootloader/onie.py +++ b/sonic_installer/bootloader/onie.py @@ -21,7 +21,10 @@ def get_current_image(self): cmdline = open('/proc/cmdline', 'r') current = re.search(r"loop=(\S+)/fs.squashfs", cmdline.read()).group(1) cmdline.close() - return current.replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX) + # Replace only the first occurrence, since we are using branch name as + # tagging for version, IMAGE_PREFIX in the name of the branch may be + # replaced as well. + return current.replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX, 1) def get_binary_image_version(self, image_path): """returns the version of the image""" diff --git a/sonic_installer/bootloader/uboot.py b/sonic_installer/bootloader/uboot.py index bc4b98daeb..16bc3bcc59 100644 --- a/sonic_installer/bootloader/uboot.py +++ b/sonic_installer/bootloader/uboot.py @@ -73,7 +73,7 @@ def remove_image(self, image): elif image in images[1]: run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"') run_command('/usr/bin/fw_setenv sonic_version_2 "NONE"') - image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) + image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX, 1) click.echo('Removing image root filesystem...') subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir]) click.echo('Done') diff --git a/sonic_installer/main.py b/sonic_installer/main.py index 6902c7edce..3d7c4b9ecc 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -272,7 +272,7 @@ def update_sonic_environment(bootloader, binary_image_version): SONIC_ENV_TEMPLATE_FILE = os.path.join("usr", "share", "sonic", "templates", "sonic-environment.j2") SONIC_VERSION_YML_FILE = os.path.join("etc", "sonic", "sonic_version.yml") - sonic_version = re.sub(IMAGE_PREFIX, '', binary_image_version) + sonic_version = re.sub(IMAGE_PREFIX, '', binary_image_version, 1) new_image_dir = bootloader.get_image_path(binary_image_version) new_image_mount = os.path.join('/', "tmp", "image-{0}-fs".format(sonic_version)) env_dir = os.path.join(new_image_dir, "sonic-config") @@ -327,7 +327,7 @@ def migrate_sonic_packages(bootloader, binary_image_version): tmp_dir = "tmp" packages_file = "packages.json" packages_path = os.path.join(PACKAGE_MANAGER_DIR, packages_file) - sonic_version = re.sub(IMAGE_PREFIX, '', binary_image_version) + sonic_version = re.sub(IMAGE_PREFIX, '', binary_image_version, 1) new_image_dir = bootloader.get_image_path(binary_image_version) new_image_upper_dir = os.path.join(new_image_dir, UPPERDIR_NAME) new_image_work_dir = os.path.join(new_image_dir, WORKDIR_NAME) diff --git a/tests/installer_bootloader_aboot_test.py b/tests/installer_bootloader_aboot_test.py new file mode 100644 index 0000000000..b00d6ffef6 --- /dev/null +++ b/tests/installer_bootloader_aboot_test.py @@ -0,0 +1,52 @@ +from unittest.mock import Mock, patch + +# Import test module +import sonic_installer.bootloader.aboot as aboot + +# Constants +image_dir = f'{aboot.IMAGE_DIR_PREFIX}expeliarmus-{aboot.IMAGE_DIR_PREFIX}abcde' +exp_image = f'{aboot.IMAGE_PREFIX}expeliarmus-{aboot.IMAGE_DIR_PREFIX}abcde' +image_dirs = [image_dir] + + +@patch('sonic_installer.bootloader.aboot.is_secureboot', + Mock(return_value=False)) +def test_swi_image_path(): + # Constants + image_id = f'{aboot.IMAGE_PREFIX}expeliarmus-{aboot.IMAGE_PREFIX}abcde' + exp_image_path = f'flash:{aboot.IMAGE_DIR_PREFIX}expeliarmus-'\ + f'{aboot.IMAGE_PREFIX}abcde/.sonic-boot.swi' + + bootloader = aboot.AbootBootloader() + + # Verify converted swi image path + image_path = bootloader._swi_image_path(image_id) + assert image_path == exp_image_path + + +@patch("sonic_installer.bootloader.aboot.re.search") +def test_get_current_image(re_search_patch): + bootloader = aboot.AbootBootloader() + + # Test convertion image dir to image name + re_search_patch().group = Mock(return_value=image_dir) + assert bootloader.get_current_image() == exp_image + + +@patch('sonic_installer.bootloader.aboot.os.listdir', + Mock(return_value=image_dirs)) +def test_get_installed_images(): + bootloader = aboot.AbootBootloader() + + # Test convertion image dir to image name + assert bootloader.get_installed_images() == [exp_image] + + +@patch("sonic_installer.bootloader.aboot.re.search") +def test_get_next_image(re_search_patch): + bootloader = aboot.AbootBootloader() + bootloader._boot_config_read = Mock(return_value={'SWI': None}) + + # Test convertion image dir to image name + re_search_patch().group = Mock(return_value=image_dir) + assert bootloader.get_next_image() == exp_image diff --git a/tests/installer_bootloader_bootloader_test.py b/tests/installer_bootloader_bootloader_test.py new file mode 100644 index 0000000000..e4f4a78d53 --- /dev/null +++ b/tests/installer_bootloader_bootloader_test.py @@ -0,0 +1,16 @@ +import os + +# Import test module +import sonic_installer.bootloader.bootloader as bl + + +def test_get_image_path(): + # Constants + image = f'{bl.IMAGE_PREFIX}expeliarmus-{bl.IMAGE_PREFIX}abcde' + path_prefix = os.path.join(bl.HOST_PATH, bl.IMAGE_DIR_PREFIX) + exp_image_path = f'{path_prefix}expeliarmus-{bl.IMAGE_PREFIX}abcde' + + bootloader = bl.Bootloader() + + # Test replacement image id with image path + assert bootloader.get_image_path(image) == exp_image_path diff --git a/tests/installer_bootloader_grub_test.py b/tests/installer_bootloader_grub_test.py new file mode 100644 index 0000000000..faaa8d75fc --- /dev/null +++ b/tests/installer_bootloader_grub_test.py @@ -0,0 +1,26 @@ +import os +from unittest.mock import Mock, patch + +# Import test module +import sonic_installer.bootloader.grub as grub + + +@patch("sonic_installer.bootloader.grub.subprocess.call", Mock()) +@patch("sonic_installer.bootloader.grub.open") +@patch("sonic_installer.bootloader.grub.run_command") +@patch("sonic_installer.bootloader.grub.re.search") +def test_remove_image(open_patch, run_command_patch, re_search_patch): + # Constants + image_path_prefix = os.path.join(grub.HOST_PATH, grub.IMAGE_DIR_PREFIX) + exp_image_path = f'{image_path_prefix}expeliarmus-{grub.IMAGE_PREFIX}abcde' + image = f'{grub.IMAGE_PREFIX}expeliarmus-{grub.IMAGE_PREFIX}abcde' + + bootloader = grub.GrubBootloader() + + # Verify rm command was executed with image path + bootloader.remove_image(image) + args_list = grub.subprocess.call.call_args_list + assert len(args_list) > 0 + + args, _ = args_list[0] + assert exp_image_path in args[0] diff --git a/tests/installer_bootloader_onie_test.py b/tests/installer_bootloader_onie_test.py new file mode 100644 index 0000000000..f5c2d96f7d --- /dev/null +++ b/tests/installer_bootloader_onie_test.py @@ -0,0 +1,17 @@ +from unittest.mock import Mock, patch + +# Import test module +import sonic_installer.bootloader.onie as onie + + +@patch("sonic_installer.bootloader.onie.re.search") +def test_get_current_image(re_search): + # Constants + image = f'{onie.IMAGE_DIR_PREFIX}expeliarmus-{onie.IMAGE_DIR_PREFIX}abcde' + exp_image = f'{onie.IMAGE_PREFIX}expeliarmus-{onie.IMAGE_DIR_PREFIX}abcde' + + bootloader = onie.OnieInstallerBootloader() + + # Test image dir conversion + onie.re.search().group = Mock(return_value=image) + assert bootloader.get_current_image() == exp_image diff --git a/tests/installer_bootloader_uboot_test.py b/tests/installer_bootloader_uboot_test.py new file mode 100644 index 0000000000..069b398dca --- /dev/null +++ b/tests/installer_bootloader_uboot_test.py @@ -0,0 +1,29 @@ +import os +from unittest.mock import Mock, patch + +# Import test module +import sonic_installer.bootloader.uboot as uboot + + +@patch("sonic_installer.bootloader.uboot.subprocess.call", Mock()) +@patch("sonic_installer.bootloader.uboot.run_command") +def test_remove_image(run_command_patch): + # Constants + image_path_prefix = os.path.join(uboot.HOST_PATH, uboot.IMAGE_DIR_PREFIX) + exp_image_path = f'{image_path_prefix}expeliarmus-{uboot.IMAGE_PREFIX}abcde' + + intstalled_images = [ + f'{uboot.IMAGE_PREFIX}expeliarmus-{uboot.IMAGE_PREFIX}abcde', + f'{uboot.IMAGE_PREFIX}expeliarmus-abcde', + ] + + bootloader = uboot.UbootBootloader() + bootloader.get_installed_images = Mock(return_value=intstalled_images) + + # Verify rm command was executed with image path + bootloader.remove_image(intstalled_images[0]) + args_list = uboot.subprocess.call.call_args_list + assert len(args_list) > 0 + + args, _ = args_list[0] + assert exp_image_path in args[0] From f64d2807381bc4faf0b8d66c73b3a14b277d0408 Mon Sep 17 00:00:00 2001 From: Kebo Liu Date: Fri, 24 Jun 2022 13:25:14 +0800 Subject: [PATCH 11/34] [sfpshow/sfputil] Enhance sfpshow and sfputil to behavior correctly on RJ45 ports (#2111) * enhance show interface transceiver eeprom logic with RJ45 port support Signed-off-by: Kebo Liu * enhance sfputil to support RJ45 port, exclude error status * fix sfputil issue on RJ45 port Signed-off-by: Kebo Liu * [RJ45] change the way to judge port type and add more UT test case Signed-off-by: Kebo Liu * [sfputil] simplity the logic for RJ45 support Signed-off-by: Kebo Liu * Support sfputil show present Signed-off-by: Stephen Sun * Support rj45 in sfpshow Signed-off-by: Stephen Sun * Add test case for sfputil with RJ45 supported Signed-off-by: Stephen Sun * Add mock data for RJ45 ports into STATE_DB Signed-off-by: Stephen Sun * Add test for sfputil show for RJ45 ports Signed-off-by: Stephen Sun * remove debug code in sfputil test case Signed-off-by: Kebo Liu * remove unnecessary argument for format() Signed-off-by: Kebo Liu * Revert the logic to fetch presence status from error status for RJ45 port (#17) * Revert the logic to fetch presence status from error status Signed-off-by: Stephen Sun * Unit test Signed-off-by: Stephen Sun * Fix error Signed-off-by: Stephen Sun * Add test cases to cover lpmode and error status Signed-off-by: Stephen Sun * add comments to describe the usage of functions to judge the port type Signed-off-by: Kebo Liu * add more testcase for sfputil Signed-off-by: Kebo Liu * fix typo in testcase name Signed-off-by: Kebo Liu Co-authored-by: Stephen Sun Co-authored-by: Stephen Sun <5379172+stephenxs@users.noreply.github.com> --- scripts/sfpshow | 23 ++-- sfputil/main.py | 113 ++++++++++++++---- tests/mock_tables/state_db.json | 12 ++ tests/sfp_test.py | 31 +++++ tests/sfputil_test.py | 204 +++++++++++++++++++++++++++++++- 5 files changed, 349 insertions(+), 34 deletions(-) diff --git a/scripts/sfpshow b/scripts/sfpshow index 9e06333277..3d71408202 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -215,6 +215,8 @@ QSFP_DD_DOM_VALUE_UNIT_MAP = { 'voltage': 'Volts' } +RJ45_PORT_TYPE = 'RJ45' + def display_invalid_intf_eeprom(intf_name): output = intf_name + ': SFP EEPROM Not detected\n' @@ -392,15 +394,18 @@ class SFPShow(object): output = '' sfp_info_dict = state_db.get_all(state_db.STATE_DB, 'TRANSCEIVER_INFO|{}'.format(interface_name)) - output = 'SFP EEPROM detected\n' - sfp_info_output = self.convert_sfp_info_to_output_string(sfp_info_dict) - output += sfp_info_output - - if dump_dom: - sfp_type = sfp_info_dict['type'] - dom_info_dict = state_db.get_all(state_db.STATE_DB, 'TRANSCEIVER_DOM_SENSOR|{}'.format(interface_name)) - dom_output = self.convert_dom_to_output_string(sfp_type, dom_info_dict) - output += dom_output + if sfp_info_dict['type'] == RJ45_PORT_TYPE: + output = 'SFP EEPROM is not applicable for RJ45 port\n' + else: + output = 'SFP EEPROM detected\n' + sfp_info_output = self.convert_sfp_info_to_output_string(sfp_info_dict) + output += sfp_info_output + + if dump_dom: + sfp_type = sfp_info_dict['type'] + dom_info_dict = state_db.get_all(state_db.STATE_DB, 'TRANSCEIVER_DOM_SENSOR|{}'.format(interface_name)) + dom_output = self.convert_dom_to_output_string(sfp_type, dom_info_dict) + output += dom_output return output diff --git a/sfputil/main.py b/sfputil/main.py index 68b91a9998..d567f39a0d 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -267,6 +267,7 @@ 'voltage': 'Volts' } +RJ45_PORT_TYPE = 'RJ45' # Global platform-specific Chassis class instance platform_chassis = None @@ -289,6 +290,34 @@ def is_sfp_present(port_name): return bool(presence) + +# Below defined two flavors of functions to determin whether a port is a RJ45 port. +# They serve different types of SFP utilities. One type of SFP utility consume the +# info stored in the STATE_DB, these utilities shall call 'is_rj45_port_from_db' +# to judge the port type. Another type of utilities will call the platform API +# directly to access SFP, for them shall use 'is_rj45_port_from_api'. +def is_rj45_port_from_db(port_name, db): + intf_type = db.get(db.STATE_DB, 'TRANSCEIVER_INFO|{}'.format(port_name), 'type') + return intf_type == RJ45_PORT_TYPE + + +def is_rj45_port_from_api(port_name): + physical_port = logical_port_to_physical_port_index(port_name) + sfp = platform_chassis.get_sfp(physical_port) + + try: + port_type = sfp.get_transceiver_info()['type'] + except NotImplementedError: + click.echo("Not able to judge the port type due to get_transceiver_info not implemented!", err=True) + sys.exit(ERROR_NOT_IMPLEMENTED) + + return port_type == RJ45_PORT_TYPE + + +def skip_if_port_is_rj45(port_name): + if is_rj45_port_from_api(port_name): + click.echo("This functionality is not applicable for RJ45 port {}.".format(port_name)) + sys.exit(EXIT_FAIL) # ========================== Methods for formatting output ========================== # Convert dict values to cli output string @@ -630,6 +659,11 @@ def eeprom(port, dump_dom, namespace): for physical_port in physical_port_list: port_name = get_physical_port_name(logical_port_name, i, ganged) + if is_rj45_port_from_api(port_name): + output += "{}: SFP EEPROM is not applicable for RJ45 port\n".format(port_name) + output += '\n' + continue + try: presence = platform_chassis.get_sfp(physical_port).get_presence() except NotImplementedError: @@ -783,7 +817,10 @@ def fetch_error_status_from_platform_api(port): physical_port_list = logical_port_name_to_physical_port_list(logical_port_name) port_name = get_physical_port_name(logical_port_name, 1, False) - output.append([port_name, output_dict.get(physical_port_list[0])]) + if is_rj45_port_from_api(logical_port_name): + output.append([port_name, "N/A"]) + else: + output.append([port_name, output_dict.get(physical_port_list[0])]) return output @@ -806,15 +843,18 @@ def fetch_error_status_from_state_db(port, state_db): sorted_ports = natsort.natsorted(status) output = [] for port in sorted_ports: - statestring = status[port].get('status') - description = status[port].get('error') - if statestring == '1': - description = 'OK' - elif statestring == '0': - description = 'Unplugged' - elif description == 'N/A': - log.log_error("Inconsistent state found for port {}: state is {} but error description is N/A".format(port, statestring)) - description = 'Unknown state: {}'.format(statestring) + if is_rj45_port_from_db(port, state_db): + description = "N/A" + else: + statestring = status[port].get('status') + description = status[port].get('error') + if statestring == '1': + description = 'OK' + elif statestring == '0': + description = 'Unplugged' + elif description == 'N/A': + log.log_error("Inconsistent state found for port {}: state is {} but error description is N/A".format(port, statestring)) + description = 'Unknown state: {}'.format(statestring) output.append([port, description]) @@ -879,24 +919,27 @@ def lpmode(port): click.echo("Error: No physical ports found for logical port '{}'".format(logical_port_name)) return - if len(physical_port_list) > 1: - ganged = True + if is_rj45_port_from_api(logical_port_name): + output_table.append([logical_port_name, "N/A"]) + else: + if len(physical_port_list) > 1: + ganged = True - for physical_port in physical_port_list: - port_name = get_physical_port_name(logical_port_name, i, ganged) + for physical_port in physical_port_list: + port_name = get_physical_port_name(logical_port_name, i, ganged) - try: - lpmode = platform_chassis.get_sfp(physical_port).get_lpmode() - except NotImplementedError: - click.echo("This functionality is currently not implemented for this platform") - sys.exit(ERROR_NOT_IMPLEMENTED) + try: + lpmode = platform_chassis.get_sfp(physical_port).get_lpmode() + except NotImplementedError: + click.echo("This functionality is currently not implemented for this platform") + sys.exit(ERROR_NOT_IMPLEMENTED) - if lpmode: - output_table.append([port_name, "On"]) - else: - output_table.append([port_name, "Off"]) + if lpmode: + output_table.append([port_name, "On"]) + else: + output_table.append([port_name, "Off"]) - i += 1 + i += 1 click.echo(tabulate(output_table, table_header, tablefmt='simple')) @@ -919,6 +962,10 @@ def fwversion(port_name): physical_port = logical_port_to_physical_port_index(port_name) sfp = platform_chassis.get_sfp(physical_port) + if is_rj45_port_from_api(port_name): + click.echo("Show firmware version is not applicable for RJ45 port {}.".format(port_name)) + sys.exit(EXIT_FAIL) + try: presence = sfp.get_presence() except NotImplementedError: @@ -954,6 +1001,10 @@ def set_lpmode(logical_port, enable): click.echo("Error: No physical ports found for logical port '{}'".format(logical_port)) return + if is_rj45_port_from_api(logical_port): + click.echo("{} low-power mode is not applicable for RJ45 port {}.".format("Enabling" if enable else "Disabling", logical_port)) + sys.exit(EXIT_FAIL) + if len(physical_port_list) > 1: ganged = True @@ -1010,6 +1061,10 @@ def reset(port_name): click.echo("Error: No physical ports found for logical port '{}'".format(port_name)) return + if is_rj45_port_from_api(port_name): + click.echo("Reset is not applicable for RJ45 port {}.".format(port_name)) + sys.exit(EXIT_FAIL) + if len(physical_port_list) > 1: ganged = True @@ -1175,6 +1230,8 @@ def run(port_name, mode): click.echo("{}: SFP EEPROM not detected\n".format(port_name)) sys.exit(EXIT_FAIL) + skip_if_port_is_rj45(port_name) + status = run_firmware(port_name, int(mode)) if status != 1: click.echo('Failed to run firmware in mode={}! CDB status: {}'.format(mode, status)) @@ -1192,6 +1249,8 @@ def commit(port_name): click.echo("{}: SFP EEPROM not detected\n".format(port_name)) sys.exit(EXIT_FAIL) + skip_if_port_is_rj45(port_name) + status = commit_firmware(port_name) if status != 1: click.echo('Failed to commit firmware! CDB status: {}'.format(status)) @@ -1212,6 +1271,8 @@ def upgrade(port_name, filepath): click.echo("{}: SFP EEPROM not detected\n".format(port_name)) sys.exit(EXIT_FAIL) + skip_if_port_is_rj45(port_name) + show_firmware_version(physical_port) status = download_firmware(port_name, filepath) @@ -1246,6 +1307,8 @@ def download(port_name, filepath): click.echo("{}: SFP EEPROM not detected\n".format(port_name)) sys.exit(EXIT_FAIL) + skip_if_port_is_rj45(port_name) + start = time.time() status = download_firmware(port_name, filepath) if status == 1: @@ -1266,6 +1329,8 @@ def unlock(port_name, password): physical_port = logical_port_to_physical_port_index(port_name) sfp = platform_chassis.get_sfp(physical_port) + skip_if_port_is_rj45(port_name) + if not is_sfp_present(port_name): click.echo("{}: SFP EEPROM not detected\n".format(port_name)) sys.exit(EXIT_FAIL) diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 8a323717af..06eba551c1 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -221,6 +221,18 @@ "status": "255", "error": "N/A" }, + "TRANSCEIVER_STATUS|Ethernet16": { + "status": "0", + "error": "N/A" + }, + "TRANSCEIVER_STATUS|Ethernet28": { + "status": "0", + "error": "N/A" + }, + "TRANSCEIVER_STATUS|Ethernet36": { + "status": "255", + "error": "Unknown" + }, "CHASSIS_INFO|chassis 1": { "psu_num": "2" }, diff --git a/tests/sfp_test.py b/tests/sfp_test.py index 3cbd9ecda8..a69872ab76 100644 --- a/tests/sfp_test.py +++ b/tests/sfp_test.py @@ -344,6 +344,30 @@ def test_sfp_presence(self): expected = """Port Presence ----------- ----------- Ethernet200 Not present +""" + assert result.exit_code == 0 + assert result.output == expected + + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["presence"], ["Ethernet16"]) + expected = """Port Presence +---------- ---------- +Ethernet16 Present +""" + assert result.exit_code == 0 + assert result.output == expected + + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["presence"], ["Ethernet28"]) + expected = """Port Presence +---------- ---------- +Ethernet28 Present +""" + assert result.exit_code == 0 + assert result.output == expected + + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["presence"], ["Ethernet36"]) + expected = """Port Presence +---------- ---------- +Ethernet36 Present """ assert result.exit_code == 0 assert result.output == expected @@ -377,6 +401,13 @@ def test_qsfp_dd_eeprom(self): assert result.exit_code == 0 assert "result.output == test_qsfp_dd_eeprom_output" + def test_rj45_eeprom(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["Ethernet36"]) + result_lines = result.output.strip('\n') + expected = "Ethernet36: SFP EEPROM is not applicable for RJ45 port" + assert result_lines == expected + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index b8b26696e2..a4d568d20e 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -16,6 +16,7 @@ sys.modules['sonic_platform'] = mock.MagicMock() import sfputil.main as sfputil +EXIT_FAIL = -1 class TestSfputil(object): def test_format_dict_value_to_string(self): @@ -272,7 +273,10 @@ def test_error_status_from_db(self): expected_output = [['Ethernet0', 'Blocking Error|High temperature'], ['Ethernet4', 'OK'], ['Ethernet8', 'Unplugged'], - ['Ethernet12', 'Unknown state: 255']] + ['Ethernet12', 'Unknown state: 255'], + ['Ethernet16', 'N/A'], + ['Ethernet28', 'N/A'], + ['Ethernet36', 'N/A']] output = sfputil.fetch_error_status_from_state_db(None, db.db) assert output == expected_output @@ -280,6 +284,47 @@ def test_error_status_from_db(self): output = sfputil.fetch_error_status_from_state_db('Ethernet0', db.db) assert output == expected_output_ethernet0 + expected_output_ethernet16 = expected_output[4:5] + output = sfputil.fetch_error_status_from_state_db('Ethernet16', db.db) + assert output == expected_output_ethernet16 + + @patch('sfputil.main.is_rj45_port_from_db', MagicMock(return_value=True)) + def test_error_status_from_db_RJ45(self): + db = Db() + expected_output = [['Ethernet0', 'N/A'], + ['Ethernet4', 'N/A'], + ['Ethernet8', 'N/A'], + ['Ethernet12', 'N/A'], + ['Ethernet16', 'N/A'], + ['Ethernet28', 'N/A'], + ['Ethernet36', 'N/A']] + output = sfputil.fetch_error_status_from_state_db(None, db.db) + assert output == expected_output + + expected_output_ethernet0 = expected_output[:1] + output = sfputil.fetch_error_status_from_state_db('Ethernet0', db.db) + assert output == expected_output_ethernet0 + + expected_output_ethernet16 = expected_output[4:5] + output = sfputil.fetch_error_status_from_state_db('Ethernet16', db.db) + assert output == expected_output_ethernet16 + + @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=[1])) + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.is_rj45_port_from_api', MagicMock(return_value=False)) + @patch('subprocess.check_output', MagicMock(return_value="['0:OK']")) + def test_fetch_error_status_from_platform_api(self): + output = sfputil.fetch_error_status_from_platform_api('Ethernet0') + assert output == [['Ethernet0', None]] + + @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=[1])) + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('subprocess.check_output', MagicMock(return_value="['0:OK']")) + @patch('sfputil.main.is_rj45_port_from_api', MagicMock(return_value=True)) + def test_fetch_error_status_from_platform_api_RJ45(self): + output = sfputil.fetch_error_status_from_platform_api('Ethernet0') + assert output == [['Ethernet0', 'N/A']] + @patch('sfputil.main.platform_chassis') @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) def test_show_firmware_version(self, mock_chassis): @@ -293,6 +338,116 @@ def test_show_firmware_version(self, mock_chassis): result = runner.invoke(sfputil.cli.commands['show'].commands['fwversion'], ["Ethernet0"]) assert result.exit_code == 0 + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=[1])) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + def test_show_presence(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + mock_sfp.get_presence.return_value = True + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['show'].commands['presence'], ["-p", "Ethernet16"]) + assert result.exit_code == 0 + expected_output = """Port Presence +---------- ---------- +Ethernet16 Present +""" + assert result.output == expected_output + + result = runner.invoke(sfputil.cli.commands['show'].commands['presence'], ["-p", "Ethernet28"]) + assert result.exit_code == 0 + expected_output = """Port Presence +---------- ---------- +Ethernet28 Present +""" + assert result.output == expected_output + + result = runner.invoke(sfputil.cli.commands['show'].commands['presence'], ["-p", "Ethernet36"]) + assert result.exit_code == 0 + expected_output = """Port Presence +---------- ---------- +Ethernet36 Present +""" + assert result.output == expected_output + + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=[1])) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + def test_show_lpmode(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + mock_sfp.get_lpmode.return_value = True + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['show'].commands['lpmode'], ["-p", "Ethernet0"]) + assert result.exit_code == 0 + expected_output = """Port Low-power Mode +--------- ---------------- +Ethernet0 On +""" + assert result.output == expected_output + + mock_sfp.get_lpmode.return_value = False + result = runner.invoke(sfputil.cli.commands['show'].commands['lpmode'], ["-p", "Ethernet0"]) + assert result.exit_code == 0 + expected_output = """Port Low-power Mode +--------- ---------------- +Ethernet0 Off +""" + assert result.output == expected_output + + mock_sfp.get_lpmode.return_value = False + mock_sfp.get_transceiver_info = MagicMock(return_value={'type': sfputil.RJ45_PORT_TYPE}) + result = runner.invoke(sfputil.cli.commands['show'].commands['lpmode'], ["-p", "Ethernet0"]) + assert result.exit_code == 0 + expected_output = """Port Low-power Mode +--------- ---------------- +Ethernet0 N/A +""" + assert result.output == expected_output + + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=[1])) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.is_rj45_port_from_api', MagicMock(return_value=True)) + def test_show_eeprom_RJ45(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['show'].commands['eeprom'], ["-p", "Ethernet16", "-d"]) + assert result.exit_code == 0 + expected_output = "Ethernet16: SFP EEPROM is not applicable for RJ45 port\n\n\n" + assert result.output == expected_output + + @patch('sfputil.main.is_rj45_port_from_api', MagicMock(return_value=True)) + @patch('sys.exit', MagicMock(return_value=EXIT_FAIL)) + def test_skip_if_port_is_rj45(self): + result = sfputil.skip_if_port_is_rj45('Ethernet0') + assert result == None + + @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=1)) + @patch('sfputil.main.is_rj45_port_from_api', MagicMock(return_value=True)) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + def test_lpmode_set(self): + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['lpmode'].commands['on'], ["Ethernet0"]) + assert result.output == 'Enabling low-power mode is not applicable for RJ45 port Ethernet0.\n' + assert result.exit_code == EXIT_FAIL + + @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=1)) + @patch('sfputil.main.is_rj45_port_from_api', MagicMock(return_value=True)) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + def test_reset_RJ45(self): + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['reset'], ["Ethernet0"]) + assert result.output == 'Reset is not applicable for RJ45 port Ethernet0.\n' + assert result.exit_code == EXIT_FAIL + @patch('sfputil.main.platform_chassis') @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) def test_unlock_firmware(self, mock_chassis): @@ -306,6 +461,19 @@ def test_unlock_firmware(self, mock_chassis): result = runner.invoke(sfputil.cli.commands['firmware'].commands['unlock'], ["Ethernet0"]) assert result.exit_code == 0 + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.is_rj45_port_from_api', MagicMock(return_value=True)) + def test_show_fwversion_Rj45(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + mock_sfp.get_presence.return_value = True + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['show'].commands['fwversion'], ["Ethernet0"]) + assert result.output == 'Show firmware version is not applicable for RJ45 port Ethernet0.\n' + assert result.exit_code == EXIT_FAIL @patch('sfputil.main.platform_chassis') @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) @@ -330,3 +498,37 @@ def test_commit_firmwre(self, mock_chassis): mock_api.cdb_commit_firmware.return_value = 1 status = sfputil.commit_firmware("Ethernet0") assert status == 1 + + @patch('sfputil.main.is_sfp_present', MagicMock(return_value=True)) + @patch('sfputil.main.is_rj45_port_from_api', MagicMock(return_value=True)) + def test_firmware_run_RJ45(self): + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['firmware'].commands['run'], ["--mode", "0", "Ethernet0"]) + assert result.output == 'This functionality is not applicable for RJ45 port Ethernet0.\n' + assert result.exit_code == EXIT_FAIL + + @patch('sfputil.main.is_sfp_present', MagicMock(return_value=True)) + @patch('sfputil.main.is_rj45_port_from_api', MagicMock(return_value=True)) + def test_firmware_commit_RJ45(self): + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['firmware'].commands['commit'], ["Ethernet0"]) + assert result.output == 'This functionality is not applicable for RJ45 port Ethernet0.\n' + assert result.exit_code == EXIT_FAIL + + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.is_rj45_port_from_api', MagicMock(return_value=True)) + @patch('sfputil.main.is_sfp_present', MagicMock(return_value=1)) + def test_firmware_upgrade_RJ45(self): + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['firmware'].commands['upgrade'], ["Ethernet0", "a.b"]) + assert result.output == 'This functionality is not applicable for RJ45 port Ethernet0.\n' + assert result.exit_code == EXIT_FAIL + + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.is_rj45_port_from_api', MagicMock(return_value=True)) + @patch('sfputil.main.is_sfp_present', MagicMock(return_value=1)) + def test_firmware_download_RJ45(self): + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['firmware'].commands['download'], ["Ethernet0", "a.b"]) + assert result.output == 'This functionality is not applicable for RJ45 port Ethernet0.\n' + assert result.exit_code == EXIT_FAIL From c3620fcde29b38f2ec220ceb667f5704a202e062 Mon Sep 17 00:00:00 2001 From: Mohamed Ghoneim Date: Tue, 28 Jun 2022 13:56:09 -0700 Subject: [PATCH 12/34] [GCU] Moving UniqueLanes from only validating moves, to be a supplemental YANG validator (#2234) #### What I did - Added a new supplemental YANG validator to validate UniqueLanes in `validate_config_db_config` - Removed UniqueLanesMoveValidator as the lanes validation will be taken care of by FullConfigMoveValidator which uses `validate_config_db_config` The benefit of this is at the beginning of `apply-patch` we make a call to `validate_config_db_config` to check if the given patch is valid or not ([code](https://github.com/Azure/sonic-utilities/blob/e6e4f8ceb9a59fb7b3767a65ffc4f017d0807832/generic_config_updater/patch_sorter.py#L1522)). Now we will fail early, instead of going for the move generation and not being able to generate a moves. #### How I did it Check code. #### How to verify it Added unit-tests. #### Previous command output (if the output of a command-line utility has changed) #### New command output (if the output of a command-line utility has changed) --- generic_config_updater/gu_common.py | 43 +++++++++- generic_config_updater/patch_sorter.py | 25 ------ .../generic_config_updater/gu_common_test.py | 78 +++++++++++++++++++ .../patch_sorter_test.py | 44 ----------- 4 files changed, 119 insertions(+), 71 deletions(-) diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index fb334c17db..1397396b75 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -113,6 +113,10 @@ def validate_sonic_yang_config(self, sonic_yang_as_json): def validate_config_db_config(self, config_db_as_json): sy = self.create_sonic_yang_with_loaded_models() + # TODO: Move these validators to YANG models + supplemental_yang_validators = [self.validate_bgp_peer_group, + self.validate_lanes] + try: tmp_config_db_as_json = copy.deepcopy(config_db_as_json) @@ -120,11 +124,46 @@ def validate_config_db_config(self, config_db_as_json): sy.validate_data_tree() - # TODO: modularize custom validations better or move directly to sonic-yang module - return self.validate_bgp_peer_group(config_db_as_json) + for supplemental_yang_validator in supplemental_yang_validators: + success, error = supplemental_yang_validator(config_db_as_json) + if not success: + return success, error except sonic_yang.SonicYangException as ex: return False, ex + return True, None + + def validate_lanes(self, config_db): + if "PORT" not in config_db: + return True, None + + ports = config_db["PORT"] + + # Validate each lane separately, make sure it is not empty, and is a number + port_to_lanes_map = {} + for port in ports: + attrs = ports[port] + if "lanes" in attrs: + lanes_str = attrs["lanes"] + lanes_with_whitespaces = lanes_str.split(",") + lanes = [lane.strip() for lane in lanes_with_whitespaces] + for lane in lanes: + if not lane: + return False, f"PORT '{port}' has an empty lane" + if not lane.isdigit(): + return False, f"PORT '{port}' has an invalid lane '{lane}'" + port_to_lanes_map[port] = lanes + + # Validate lanes are unique + existing = {} + for port in port_to_lanes_map: + lanes = port_to_lanes_map[port] + for lane in lanes: + if lane in existing: + return False, f"'{lane}' lane is used multiple times in PORT: {set([port, existing[lane]])}" + existing[lane] = port + return True, None + def validate_bgp_peer_group(self, config_db): if "BGP_PEER_RANGE" not in config_db: return True, None diff --git a/generic_config_updater/patch_sorter.py b/generic_config_updater/patch_sorter.py index f23b347bde..83ed4a88cb 100644 --- a/generic_config_updater/patch_sorter.py +++ b/generic_config_updater/patch_sorter.py @@ -565,30 +565,6 @@ def validate(self, move, diff): is_valid, error = self.config_wrapper.validate_config_db_config(simulated_config) return is_valid -# TODO: Add this validation to YANG models instead -class UniqueLanesMoveValidator: - """ - A class to validate lanes and any port are unique between all ports. - """ - def validate(self, move, diff): - simulated_config = move.apply(diff.current_config) - - if "PORT" not in simulated_config: - return True - - ports = simulated_config["PORT"] - existing = set() - for port in ports: - attrs = ports[port] - if "lanes" in attrs: - lanes_str = attrs["lanes"] - lanes = lanes_str.split(", ") - for lane in lanes: - if lane in existing: - return False - existing.add(lane) - return True - class CreateOnlyMoveValidator: """ A class to validate create-only fields are only created, but never modified/updated. In other words: @@ -1507,7 +1483,6 @@ def create(self, algorithm=Algorithm.DFS): move_validators = [DeleteWholeConfigMoveValidator(), FullConfigMoveValidator(self.config_wrapper), NoDependencyMoveValidator(self.path_addressing, self.config_wrapper), - UniqueLanesMoveValidator(), CreateOnlyMoveValidator(self.path_addressing), RequiredValueMoveValidator(self.path_addressing), NoEmptyTableMoveValidator(self.path_addressing)] diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index 8902df649a..6ba4923664 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -260,6 +260,84 @@ def check_validate_bgp_peer_group(self, ip_range, other_ip_range=[], duplicated_ self.assertFalse(actual) self.assertTrue(duplicated_ip in error) + def test_validate_lanes__no_port_table__success(self): + config = {"ACL_TABLE": {}} + self.validate_lanes(config) + + def test_validate_lanes__empty_port_table__success(self): + config = {"PORT": {}} + self.validate_lanes(config) + + def test_validate_lanes__empty_lane__failure(self): + config = {"PORT": {"Ethernet0": {"lanes": "", "speed":"10000"}}} + self.validate_lanes(config, 'has an empty lane') + + def test_validate_lanes__whitespace_lane__failure(self): + config = {"PORT": {"Ethernet0": {"lanes": " ", "speed":"10000"}}} + self.validate_lanes(config, 'has an empty lane') + + def test_validate_lanes__non_digits_lane__failure(self): + config = {"PORT": {"Ethernet0": {"lanes": "10g", "speed":"10000"}}} + self.validate_lanes(config, "has an invalid lane '10g'") + + def test_validate_lanes__space_between_digits_lane__failure(self): + config = {"PORT": {"Ethernet0": {"lanes": " 1 0 ", "speed":"10000"}}} + self.validate_lanes(config, "has an invalid lane '1 0'") + + def test_validate_lanes__single_valid_lane__success(self): + config = {"PORT": {"Ethernet0": {"lanes": "66", "speed":"10000"}}} + self.validate_lanes(config) + + def test_validate_lanes__different_valid_lanes_single_port__success(self): + config = {"PORT": {"Ethernet0": {"lanes": "66, 67, 68", "speed":"10000"}}} + self.validate_lanes(config) + + def test_validate_lanes__different_valid_and_invalid_empty_lanes_single_port__failure(self): + config = {"PORT": {"Ethernet0": {"lanes": "66, , 68", "speed":"10000"}}} + self.validate_lanes(config, 'has an empty lane') + + def test_validate_lanes__different_valid_and_invalid_non_digit_lanes_single_port__failure(self): + config = {"PORT": {"Ethernet0": {"lanes": "66, 67, 10g", "speed":"10000"}}} + self.validate_lanes(config, "has an invalid lane '10g'") + + def test_validate_lanes__different_valid_lanes_multi_ports__success(self): + config = {"PORT": { + "Ethernet0": {"lanes": " 64 , 65 \t", "speed":"10000"}, + "Ethernet1": {"lanes": " 66 , 67 \r\t\n, 68 ", "speed":"10000"}, + }} + self.validate_lanes(config) + + def test_validate_lanes__same_valid_lanes_single_port__failure(self): + config = {"PORT": {"Ethernet0": {"lanes": "65 \r\t\n, 65", "speed":"10000"}}} + self.validate_lanes(config, '65') + + def test_validate_lanes__same_valid_lanes_multi_ports__failure(self): + config = {"PORT": { + "Ethernet0": {"lanes": "64, 65, 67", "speed":"10000"}, + "Ethernet1": {"lanes": "66, 67, 68", "speed":"10000"}, + }} + self.validate_lanes(config, '67') + + def test_validate_lanes__same_valid_lanes_multi_ports_no_spaces__failure(self): + config = {"PORT": { + "Ethernet0": {"lanes": "64,65,67", "speed":"10000"}, + "Ethernet1": {"lanes": "66,67,68", "speed":"10000"}, + }} + self.validate_lanes(config, '67') + + def validate_lanes(self, config_db, expected_error=None): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = expected_error is None # if expected_error is None, then the input is valid + + # Act + actual, error = config_wrapper.validate_lanes(config_db) + + # Assert + self.assertEqual(expected, actual) + if expected_error: + self.assertTrue(expected_error in error) + def test_crop_tables_without_yang__returns_cropped_config_db_as_json(self): # Arrange config_wrapper = gu_common.ConfigWrapper() diff --git a/tests/generic_config_updater/patch_sorter_test.py b/tests/generic_config_updater/patch_sorter_test.py index ce4e1a3a13..68a6b09a54 100644 --- a/tests/generic_config_updater/patch_sorter_test.py +++ b/tests/generic_config_updater/patch_sorter_test.py @@ -868,49 +868,6 @@ def verify(self, operation_type, path, expected): # Assert self.assertEqual(expected, actual) -class TestUniqueLanesMoveValidator(unittest.TestCase): - def setUp(self): - self.validator = ps.UniqueLanesMoveValidator() - - def test_validate__no_port_table__success(self): - config = {"ACL_TABLE": {}} - self.validate_target_config(config) - - def test_validate__empty_port_table__success(self): - config = {"PORT": {}} - self.validate_target_config(config) - - def test_validate__single_lane__success(self): - config = {"PORT": {"Ethernet0": {"lanes": "66", "speed":"10000"}}} - self.validate_target_config(config) - - def test_validate__different_lanes_single_port___success(self): - config = {"PORT": {"Ethernet0": {"lanes": "66, 67, 68", "speed":"10000"}}} - self.validate_target_config(config) - - def test_validate__different_lanes_multi_ports___success(self): - config = {"PORT": { - "Ethernet0": {"lanes": "64, 65", "speed":"10000"}, - "Ethernet1": {"lanes": "66, 67, 68", "speed":"10000"}, - }} - self.validate_target_config(config) - - def test_validate__same_lanes_single_port___success(self): - config = {"PORT": {"Ethernet0": {"lanes": "65, 65", "speed":"10000"}}} - self.validate_target_config(config, False) - - def validate_target_config(self, target_config, expected=True): - # Arrange - current_config = {} - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) - - # Act - actual = self.validator.validate(move, diff) - - # Assert - self.assertEqual(expected, actual) - class TestFullConfigMoveValidator(unittest.TestCase): def setUp(self): self.any_current_config = Mock() @@ -3038,7 +2995,6 @@ def verify(self, algo, algo_class): expected_validator = [ps.DeleteWholeConfigMoveValidator, ps.FullConfigMoveValidator, ps.NoDependencyMoveValidator, - ps.UniqueLanesMoveValidator, ps.CreateOnlyMoveValidator, ps.RequiredValueMoveValidator, ps.NoEmptyTableMoveValidator] From 7d9faf348666196a3e8537f8263bc7b7328391bd Mon Sep 17 00:00:00 2001 From: gregshpit Date: Wed, 29 Jun 2022 21:33:36 +0300 Subject: [PATCH 13/34] Added support for Sonic cross-compilation build. (#2233) Signed-off-by: marvell Co-authored-by: marvell --- utilities_common/auto_techsupport_helper.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/utilities_common/auto_techsupport_helper.py b/utilities_common/auto_techsupport_helper.py index ee4f9d0c10..9bbfe9a87d 100644 --- a/utilities_common/auto_techsupport_helper.py +++ b/utilities_common/auto_techsupport_helper.py @@ -72,8 +72,10 @@ # Explicity Pass this to the subprocess invoking techsupport ENV_VAR = os.environ -PATH_PREV = ENV_VAR["PATH"] if "PATH" in ENV_VAR else "" -ENV_VAR["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:" + PATH_PREV +if ('CROSS_BUILD_ENVIRON' not in ENV_VAR) or (ENV_VAR['CROSS_BUILD_ENVIRON'] != 'y'): + # Add native system directories to PATH variable only if it is not cross-compilation build + PATH_PREV = ENV_VAR["PATH"] if "PATH" in ENV_VAR else "" + ENV_VAR["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:" + PATH_PREV # Techsupport Exit Codes EXT_LOCKFAIL = 2 From 8dee36cb826eb07948c04083ed4e3b074fc27c57 Mon Sep 17 00:00:00 2001 From: Junhua Zhai Date: Thu, 30 Jun 2022 08:57:08 +0000 Subject: [PATCH 14/34] [portstat] Update portstat to use CounterTable API (#2207) What I did To support gearbox port counter in CLI, following Azure/sonic-swss#2218 and Azure/sonic-swss-common#622. How I did it Use swsscommon CounterTable API and PortCounter type, covering different underlying switch architecture, asic-only, asic+gearbox. --- scripts/portstat | 14 ++++++++------ tests/mock_tables/dbconnector.py | 20 ++++++++++++++++++++ 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/scripts/portstat b/scripts/portstat index 24c085e9c1..abc1bc67aa 100755 --- a/scripts/portstat +++ b/scripts/portstat @@ -33,6 +33,7 @@ try: except KeyError: pass +from swsscommon.swsscommon import CounterTable, PortCounter from utilities_common import constants from utilities_common.intf_filter import parse_interface_in_filter import utilities_common.multi_asic as multi_asic_util @@ -157,20 +158,20 @@ class Portstat(object): """ Get the counters info from database. """ - def get_counters(table_id): + def get_counters(port): """ Get the counters from specific table. """ fields = ["0"]*BUCKET_NUM + _, fvs = counter_table.get(PortCounter(), port) + fvs = dict(fvs) for pos, cntr_list in counter_bucket_dict.items(): for counter_name in cntr_list: - full_table_id = COUNTER_TABLE_PREFIX + table_id - counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name) - if counter_data is None: + if counter_name not in fvs: fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: - fields[pos] = str(int(fields[pos]) + int(counter_data)) + fields[pos] = str(int(fields[pos]) + int(fvs[counter_name])) cntr = NStats._make(fields) return cntr @@ -196,13 +197,14 @@ class Portstat(object): cnstat_dict = OrderedDict() cnstat_dict['time'] = datetime.datetime.now() ratestat_dict = OrderedDict() + counter_table = CounterTable(self.db.get_redis_client(self.db.COUNTERS_DB)) if counter_port_name_map is None: return cnstat_dict, ratestat_dict for port in natsorted(counter_port_name_map): port_name = port.split(":")[0] if self.multi_asic.skip_display(constants.PORT_OBJ, port_name): continue - cnstat_dict[port] = get_counters(counter_port_name_map[port]) + cnstat_dict[port] = get_counters(port) ratestat_dict[port] = get_rates(counter_port_name_map[port]) return cnstat_dict, ratestat_dict diff --git a/tests/mock_tables/dbconnector.py b/tests/mock_tables/dbconnector.py index 80d74cafd7..f6db0d9794 100644 --- a/tests/mock_tables/dbconnector.py +++ b/tests/mock_tables/dbconnector.py @@ -182,6 +182,24 @@ def keys(self, pattern='*'): return [key for key in self.redis if regex.match(key)] +class PortCounter: + pass + + +class CounterTable: + def __init__(self, db): + self.db = db + + def get(self, counter, name): + if isinstance(counter, PortCounter): + name_map = "COUNTERS_PORT_NAME_MAP" + else: + return False, () + + key = self.db.hget(name_map, name) + return True, tuple(self.db.get("COUNTERS:" + key).items()) + + swsssdk.interface.DBInterface._subscribe_keyspace_notification = _subscribe_keyspace_notification mockredis.MockRedis.config_set = config_set redis.StrictRedis = SwssSyncClient @@ -189,3 +207,5 @@ def keys(self, pattern='*'): swsscommon.SonicV2Connector = SonicV2Connector swsscommon.ConfigDBConnector = ConfigDBConnector swsscommon.ConfigDBPipeConnector = ConfigDBPipeConnector +swsscommon.CounterTable = CounterTable +swsscommon.PortCounter = PortCounter From 3274b0ee0bcc3b161d6a64db0cc12476dfb71fa0 Mon Sep 17 00:00:00 2001 From: Vadym Yashchenko Date: Thu, 30 Jun 2022 19:59:19 +0300 Subject: [PATCH 15/34] Added bf_drivers.log to zipped dump after execution of "show techsupport" (#2164) What I did I add all bf_drivers*.log files into log folder of zipped filed created after perform the "show techsupport" How I did it I gather all bf_drivers*.log files and make copy from syncd container into /var/log folder How to verify it I verified it on device with performing of "show techsupport" --- scripts/generate_dump | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/scripts/generate_dump b/scripts/generate_dump index 018d884330..62a5a75f17 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1058,6 +1058,31 @@ collect_broadcom() { copy_from_masic_docker "syncd" "/var/log/bcm_diag_post" "/var/log/bcm_diag_post" } +############################################################################### +# Collect Barefoot specific information +# Globals: +# None +# Arguments: +# None +# Returns: +# None +############################################################################### +collect_barefoot() { + local bf_logs="/tmp/bf_logs" + $( rm -rf ${bf_logs} ) + $( mkdir ${bf_logs} ) + unset array + array=( $(docker exec -ti syncd ls -1 | grep bf_drivers.log) ) + for y in "${array[@]}"; do + itstr=`echo ${y} | tr -d "\r\n"` + copy_from_masic_docker "syncd" "/${itstr}" "${bf_logs}" + done + + for file in $(find /tmp/bf_logs -type f); do + save_file "${file}" log true true + done +} + ############################################################################### # Save log file # Globals: @@ -1371,6 +1396,10 @@ main() { save_saidump + if [ "$asic" = "barefoot" ]; then + collect_barefoot + fi + if [[ "$asic" = "mellanox" ]]; then collect_mellanox fi From b5d6659ab3ef6ff38248dea6ff4a0132fe17cd3a Mon Sep 17 00:00:00 2001 From: Jing Kan <672454911@qq.com> Date: Fri, 1 Jul 2022 17:13:39 +0800 Subject: [PATCH 16/34] [config/load_mgmt_config] Support load IPv6 mgmt IP (#2206) * [config/load_mgmt_config] Support load IPv6 mgmt IP Signed-off-by: Jing Kan jika@microsoft.com --- config/main.py | 29 ++++++--- doc/Command-Reference.md | 2 +- tests/config_test.py | 130 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 150 insertions(+), 11 deletions(-) diff --git a/config/main.py b/config/main.py index 16aea6b610..47791cf69d 100644 --- a/config/main.py +++ b/config/main.py @@ -1625,16 +1625,25 @@ def load_mgmt_config(filename): config_data = parse_device_desc_xml(filename) hostname = config_data['DEVICE_METADATA']['localhost']['hostname'] _change_hostname(hostname) - mgmt_conf = netaddr.IPNetwork(list(config_data['MGMT_INTERFACE'].keys())[0][1]) - gw_addr = list(config_data['MGMT_INTERFACE'].values())[0]['gwaddr'] - command = "ifconfig eth0 {} netmask {}".format(str(mgmt_conf.ip), str(mgmt_conf.netmask)) - clicommon.run_command(command, display_cmd=True) - command = "ip route add default via {} dev eth0 table default".format(gw_addr) - clicommon.run_command(command, display_cmd=True, ignore_error=True) - command = "ip rule add from {} table default".format(str(mgmt_conf.ip)) - clicommon.run_command(command, display_cmd=True, ignore_error=True) - command = "[ -f /var/run/dhclient.eth0.pid ] && kill `cat /var/run/dhclient.eth0.pid` && rm -f /var/run/dhclient.eth0.pid" - clicommon.run_command(command, display_cmd=True, ignore_error=True) + for key in list(config_data['MGMT_INTERFACE'].keys()): + # key: (eth0, ipprefix) + # value: { gwaddr: ip } + mgmt_conf = netaddr.IPNetwork(key[1]) + gw_addr = config_data['MGMT_INTERFACE'][key]['gwaddr'] + if mgmt_conf.version == 4: + command = "ifconfig eth0 {} netmask {}".format(str(mgmt_conf.ip), str(mgmt_conf.netmask)) + clicommon.run_command(command, display_cmd=True) + else: + command = "ifconfig eth0 add {}".format(str(mgmt_conf)) + # Ignore error for IPv6 configuration command due to it not allows config the same IP twice + clicommon.run_command(command, display_cmd=True, ignore_error=True) + command = "ip{} route add default via {} dev eth0 table default".format(" -6" if mgmt_conf.version == 6 else "", gw_addr) + clicommon.run_command(command, display_cmd=True, ignore_error=True) + command = "ip{} rule add from {} table default".format(" -6" if mgmt_conf.version == 6 else "", str(mgmt_conf.ip)) + clicommon.run_command(command, display_cmd=True, ignore_error=True) + if len(config_data['MGMT_INTERFACE'].keys()) > 0: + command = "[ -f /var/run/dhclient.eth0.pid ] && kill `cat /var/run/dhclient.eth0.pid` && rm -f /var/run/dhclient.eth0.pid" + clicommon.run_command(command, display_cmd=True, ignore_error=True) click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.") @config.command("load_minigraph") diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index ebba414a53..4b78da135b 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -5192,7 +5192,7 @@ When user specifies the optional argument "-f" or "--force", this command ignore This command is used to reconfigure hostname and mgmt interface based on device description file. This command either uses the optional file specified as arguement or looks for the file "/etc/sonic/device_desc.xml". -If the file does not exist or if the file does not have valid fields for "hostname" and "ManagementAddress", it fails. +If the file does not exist or if the file does not have valid fields for "hostname" and "ManagementAddress" (or "ManagementAddressV6"), it fails. When user specifies the optional argument "-y" or "--yes", this command forces the loading without prompting the user for confirmation. If the argument is not specified, it prompts the user to confirm whether user really wants to load this configuration file. diff --git a/tests/config_test.py b/tests/config_test.py index 87b66f7e61..c7d7512234 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -6,6 +6,7 @@ import jsonpatch import sys import unittest +import ipaddress from unittest import mock import click @@ -42,6 +43,41 @@ Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`. """ +load_mgmt_config_command_ipv4_only_output="""\ +Running command: /usr/local/bin/sonic-cfggen -M device_desc.xml --write-to-db +parse dummy device_desc.xml +change hostname to dummy +Running command: ifconfig eth0 10.0.0.100 netmask 255.255.255.0 +Running command: ip route add default via 10.0.0.1 dev eth0 table default +Running command: ip rule add from 10.0.0.100 table default +Running command: [ -f /var/run/dhclient.eth0.pid ] && kill `cat /var/run/dhclient.eth0.pid` && rm -f /var/run/dhclient.eth0.pid +Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`. +""" + +load_mgmt_config_command_ipv6_only_output="""\ +Running command: /usr/local/bin/sonic-cfggen -M device_desc.xml --write-to-db +parse dummy device_desc.xml +change hostname to dummy +Running command: ifconfig eth0 add fc00:1::32/64 +Running command: ip -6 route add default via fc00:1::1 dev eth0 table default +Running command: ip -6 rule add from fc00:1::32 table default +Running command: [ -f /var/run/dhclient.eth0.pid ] && kill `cat /var/run/dhclient.eth0.pid` && rm -f /var/run/dhclient.eth0.pid +Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`. +""" + +load_mgmt_config_command_ipv4_ipv6_output="""\ +Running command: /usr/local/bin/sonic-cfggen -M device_desc.xml --write-to-db +parse dummy device_desc.xml +change hostname to dummy +Running command: ifconfig eth0 10.0.0.100 netmask 255.255.255.0 +Running command: ip route add default via 10.0.0.1 dev eth0 table default +Running command: ip rule add from 10.0.0.100 table default +Running command: ifconfig eth0 add fc00:1::32/64 +Running command: ip -6 route add default via fc00:1::1 dev eth0 table default +Running command: ip -6 rule add from fc00:1::32 table default +Running command: [ -f /var/run/dhclient.eth0.pid ] && kill `cat /var/run/dhclient.eth0.pid` && rm -f /var/run/dhclient.eth0.pid +Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`. +""" RELOAD_CONFIG_DB_OUTPUT = """\ Running command: rm -rf /tmp/dropstat-* @@ -1356,3 +1392,97 @@ def validate_list_checkpoints_optional_parameter(self, param_args, expected_call self.assertTrue(expected_output in result.output) mock_generic_updater.list_checkpoints.assert_called_once() mock_generic_updater.list_checkpoints.assert_has_calls([expected_call]) + + +class TestConfigLoadMgmtConfig(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "1" + print("SETUP") + + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + + import config.main + importlib.reload(config.main) + + def test_config_load_mgmt_config_ipv4_only(self, get_cmd_module, setup_single_broadcom_asic): + device_desc_result = { + 'DEVICE_METADATA': { + 'localhost': { + 'hostname': 'dummy' + } + }, + 'MGMT_INTERFACE': { + ('eth0', '10.0.0.100/24') : { + 'gwaddr': ipaddress.ip_address(u'10.0.0.1') + } + } + } + self.check_output(get_cmd_module, device_desc_result, load_mgmt_config_command_ipv4_only_output, 5) + + def test_config_load_mgmt_config_ipv6_only(self, get_cmd_module, setup_single_broadcom_asic): + device_desc_result = { + 'DEVICE_METADATA': { + 'localhost': { + 'hostname': 'dummy' + } + }, + 'MGMT_INTERFACE': { + ('eth0', 'FC00:1::32/64') : { + 'gwaddr': ipaddress.ip_address(u'fc00:1::1') + } + } + } + self.check_output(get_cmd_module, device_desc_result, load_mgmt_config_command_ipv6_only_output, 5) + + def test_config_load_mgmt_config_ipv4_ipv6(self, get_cmd_module, setup_single_broadcom_asic): + device_desc_result = { + 'DEVICE_METADATA': { + 'localhost': { + 'hostname': 'dummy' + } + }, + 'MGMT_INTERFACE': { + ('eth0', '10.0.0.100/24') : { + 'gwaddr': ipaddress.ip_address(u'10.0.0.1') + }, + ('eth0', 'FC00:1::32/64') : { + 'gwaddr': ipaddress.ip_address(u'fc00:1::1') + } + } + } + self.check_output(get_cmd_module, device_desc_result, load_mgmt_config_command_ipv4_ipv6_output, 8) + + def check_output(self, get_cmd_module, parse_device_desc_xml_result, expected_output, expected_command_call_count): + def parse_device_desc_xml_side_effect(filename): + print("parse dummy device_desc.xml") + return parse_device_desc_xml_result + def change_hostname_side_effect(hostname): + print("change hostname to {}".format(hostname)) + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + with mock.patch('config.main.parse_device_desc_xml', mock.MagicMock(side_effect=parse_device_desc_xml_side_effect)): + with mock.patch('config.main._change_hostname', mock.MagicMock(side_effect=change_hostname_side_effect)): + (config, show) = get_cmd_module + runner = CliRunner() + with runner.isolated_filesystem(): + with open('device_desc.xml', 'w') as f: + f.write('dummy') + result = runner.invoke(config.config.commands["load_mgmt_config"], ["-y", "device_desc.xml"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == expected_output + assert mock_run_command.call_count == expected_command_call_count + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + + # change back to single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + dbconnector.load_namespace_config() From 7070794d1378802a8d452f8f2c4b1c66335f18cc Mon Sep 17 00:00:00 2001 From: Hua Liu <58683130+liuh-80@users.noreply.github.com> Date: Mon, 4 Jul 2022 08:51:39 +0800 Subject: [PATCH 17/34] Fix DBConfig not initialize issue in pfcwd (#2238) #### What I did Fix pfcwd connect DB with exception issue: https://github.com/Azure/sonic-buildimage/issues/11269 pfcwd implicit depends on InterfaceAliasConverter() to initialize DB config, however following PR change InterfaceAliasConverter() behavior to lazy initialize, then pfcwd failed when try connect to DB without initialize DB config: https://github.com/Azure/sonic-utilities/pull/2183 #### How I did it Load DB config in pfcwd. #### How to verify it Pass all UT. --- pfcwd/main.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pfcwd/main.py b/pfcwd/main.py index 4d3e73fb75..76fa31b4fb 100644 --- a/pfcwd/main.py +++ b/pfcwd/main.py @@ -9,6 +9,7 @@ from tabulate import tabulate from utilities_common import multi_asic as multi_asic_util from utilities_common import constants +from utilities_common.general import load_db_config from sonic_py_common import logger SYSLOG_IDENTIFIER = "config" @@ -62,7 +63,7 @@ @click.group() def cli(): """ SONiC PFC Watchdog """ - + load_db_config() def get_all_queues(db, namespace=None, display=constants.DISPLAY_ALL): queue_names = db.get_all(db.COUNTERS_DB, 'COUNTERS_QUEUE_NAME_MAP') From c7389bd5df69bec6037addadebd0b56df448ce29 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 5 Jul 2022 13:41:09 -0700 Subject: [PATCH 18/34] show linkmgrd status in `show mux status` (#2254) What I did Replace status in show mux status with APP DB MUX_CABLE_TABLE:, which is written by linkmgrd and indicates linkmgrd's state transition. This change is required for active-active dualtor setup. In active-active setup, we care more about whether state transition happens on ToR side. In tests, we want to see if linkmgrd makes immediate reaction on link state changes. If we continue to use STATE DB entries, when gRPC is not available, we will get unknow in show mux status, which is not informatic and not the actual value we want to check. sign-off: Jing Zhang zhangjing@microsoft.com How I did it Use APP DB entries instead of STATE DB. Move STATE DB to column SERVER_STATUS. How to verify it Unit tests with mock DB values. Tests on dual testbeds. --- show/muxcable.py | 53 +++++++++++++++++++++++----------- tests/mock_tables/appl_db.json | 18 ++++++++++++ tests/muxcable_test.py | 48 ++++++++++++++++++------------ 3 files changed, 84 insertions(+), 35 deletions(-) diff --git a/show/muxcable.py b/show/muxcable.py index 6d01727868..e99a3332d8 100644 --- a/show/muxcable.py +++ b/show/muxcable.py @@ -401,13 +401,15 @@ def get_switch_name(config_db): sys.exit(STATUS_FAIL) -def create_json_dump_per_port_status(db, port_status_dict, muxcable_info_dict, muxcable_health_dict, muxcable_metrics_dict, asic_index, port): +def create_json_dump_per_port_status(db, port_status_dict, muxcable_info_dict, muxcable_grpc_dict, muxcable_health_dict, muxcable_metrics_dict, asic_index, port): res_dict = {} status_value = get_value_for_key_in_dict(muxcable_info_dict[asic_index], port, "state", "MUX_CABLE_TABLE") port_name = platform_sfputil_helper.get_interface_alias(port, db) port_status_dict["MUX_CABLE"][port_name] = {} port_status_dict["MUX_CABLE"][port_name]["STATUS"] = status_value + gRPC_value = get_value_for_key_in_dict(muxcable_grpc_dict[asic_index], port, "state", "MUX_CABLE_TABLE") + port_status_dict["MUX_CABLE"][port_name]["SERVER_STATUS"] = gRPC_value health_value = get_value_for_key_in_dict(muxcable_health_dict[asic_index], port, "state", "MUX_LINKMGR_TABLE") port_status_dict["MUX_CABLE"][port_name]["HEALTH"] = health_value res_dict = get_hwmode_mux_direction_port(db, port) @@ -428,7 +430,7 @@ def create_json_dump_per_port_status(db, port_status_dict, muxcable_info_dict, m last_switch_end_time = muxcable_metrics_dict[asic_index].get("linkmgrd_switch_active_end") port_status_dict["MUX_CABLE"][port_name]["LAST_SWITCHOVER_TIME"] = last_switch_end_time -def create_table_dump_per_port_status(db, print_data, muxcable_info_dict, muxcable_health_dict, muxcable_metrics_dict, asic_index, port): +def create_table_dump_per_port_status(db, print_data, muxcable_info_dict, muxcable_grpc_dict, muxcable_health_dict, muxcable_metrics_dict, asic_index, port): print_port_data = [] res_dict = {} @@ -436,6 +438,7 @@ def create_table_dump_per_port_status(db, print_data, muxcable_info_dict, muxcab res_dict = get_hwmode_mux_direction_port(db, port) status_value = get_value_for_key_in_dict(muxcable_info_dict[asic_index], port, "state", "MUX_CABLE_TABLE") #status_value = get_value_for_key_in_tbl(y_cable_asic_table, port, "status") + gRPC_value = get_value_for_key_in_dict(muxcable_grpc_dict[asic_index], port, "state", "MUX_CABLE_TABLE") health_value = get_value_for_key_in_dict(muxcable_health_dict[asic_index], port, "state", "MUX_LINKMGR_TABLE") last_switch_end_time = "" @@ -447,6 +450,7 @@ def create_table_dump_per_port_status(db, print_data, muxcable_info_dict, muxcab port_name = platform_sfputil_helper.get_interface_alias(port, db) print_port_data.append(port_name) print_port_data.append(status_value) + print_port_data.append(gRPC_value) print_port_data.append(health_value) if res_dict[2] == "False": hwstatus = "absent" @@ -510,10 +514,13 @@ def status(db, port, json_output): port = platform_sfputil_helper.get_interface_name(port, db) port_table_keys = {} + appl_db_muxcable_tbl_keys = {} port_health_table_keys = {} port_metrics_table_keys = {} per_npu_statedb = {} + per_npu_appl_db = {} muxcable_info_dict = {} + muxcable_grpc_dict = {} muxcable_health_dict = {} muxcable_metrics_dict = {} @@ -525,6 +532,11 @@ def status(db, port, json_output): per_npu_statedb[asic_id] = SonicV2Connector(use_unix_socket_path=False, namespace=namespace) per_npu_statedb[asic_id].connect(per_npu_statedb[asic_id].STATE_DB) + per_npu_appl_db[asic_id] = swsscommon.SonicV2Connector(use_unix_socket_path=False, namespace=namespace) + per_npu_appl_db[asic_id].connect(per_npu_appl_db[asic_id].APPL_DB) + + appl_db_muxcable_tbl_keys[asic_id] = per_npu_appl_db[asic_id].keys( + per_npu_appl_db[asic_id].APPL_DB, 'MUX_CABLE_TABLE:*') port_table_keys[asic_id] = per_npu_statedb[asic_id].keys( per_npu_statedb[asic_id].STATE_DB, 'MUX_CABLE_TABLE|*') port_health_table_keys[asic_id] = per_npu_statedb[asic_id].keys( @@ -546,17 +558,20 @@ def status(db, port, json_output): click.echo("Got invalid asic index for port {}, cant retreive mux status".format(port_name)) sys.exit(STATUS_FAIL) - muxcable_info_dict[asic_index] = per_npu_statedb[asic_index].get_all( + muxcable_info_dict[asic_index] = per_npu_appl_db[asic_id].get_all( + per_npu_appl_db[asic_id].APPL_DB, 'MUX_CABLE_TABLE:{}'.format(port)) + muxcable_grpc_dict[asic_index] = per_npu_statedb[asic_index].get_all( per_npu_statedb[asic_index].STATE_DB, 'MUX_CABLE_TABLE|{}'.format(port)) muxcable_health_dict[asic_index] = per_npu_statedb[asic_index].get_all( per_npu_statedb[asic_index].STATE_DB, 'MUX_LINKMGR_TABLE|{}'.format(port)) muxcable_metrics_dict[asic_index] = per_npu_statedb[asic_index].get_all( per_npu_statedb[asic_index].STATE_DB, 'MUX_METRICS_TABLE|{}'.format(port)) + if muxcable_info_dict[asic_index] is not None: - logical_key = "MUX_CABLE_TABLE|{}".format(port) + logical_key = "MUX_CABLE_TABLE:{}".format(port) logical_health_key = "MUX_LINKMGR_TABLE|{}".format(port) logical_metrics_key = "MUX_METRICS_TABLE|{}".format(port) - if logical_key in port_table_keys[asic_index] and logical_health_key in port_health_table_keys[asic_index]: + if logical_key in appl_db_muxcable_tbl_keys[asic_index] and logical_health_key in port_health_table_keys[asic_index]: if logical_metrics_key not in port_metrics_table_keys[asic_index]: muxcable_metrics_dict[asic_index] = {} @@ -565,7 +580,7 @@ def status(db, port, json_output): port_status_dict = {} port_status_dict["MUX_CABLE"] = {} - create_json_dump_per_port_status(db, port_status_dict, muxcable_info_dict, + create_json_dump_per_port_status(db, port_status_dict, muxcable_info_dict, muxcable_grpc_dict, muxcable_health_dict, muxcable_metrics_dict, asic_index, port) click.echo("{}".format(json.dumps(port_status_dict, indent=4))) @@ -573,10 +588,10 @@ def status(db, port, json_output): else: print_data = [] - create_table_dump_per_port_status(db, print_data, muxcable_info_dict, + create_table_dump_per_port_status(db, print_data, muxcable_info_dict, muxcable_grpc_dict, muxcable_health_dict, muxcable_metrics_dict, asic_index, port) - headers = ['PORT', 'STATUS', 'HEALTH', 'HWSTATUS', 'LAST_SWITCHOVER_TIME'] + headers = ['PORT', 'STATUS', 'SERVER_STATUS', 'HEALTH', 'HWSTATUS', 'LAST_SWITCHOVER_TIME'] click.echo(tabulate(print_data, headers=headers)) sys.exit(STATUS_SUCCESSFUL) @@ -595,9 +610,11 @@ def status(db, port, json_output): port_status_dict["MUX_CABLE"] = {} for namespace in namespaces: asic_id = multi_asic.get_asic_index_from_namespace(namespace) - for key in natsorted(port_table_keys[asic_id]): - port = key.split("|")[1] - muxcable_info_dict[asic_id] = per_npu_statedb[asic_id].get_all( + for key in natsorted(appl_db_muxcable_tbl_keys[asic_id]): + port = key.split(":")[1] + muxcable_info_dict[asic_id] = per_npu_appl_db[asic_id].get_all( + per_npu_appl_db[asic_id].APPL_DB, 'MUX_CABLE_TABLE:{}'.format(port)) + muxcable_grpc_dict[asic_id] = per_npu_statedb[asic_id].get_all( per_npu_statedb[asic_id].STATE_DB, 'MUX_CABLE_TABLE|{}'.format(port)) muxcable_health_dict[asic_id] = per_npu_statedb[asic_id].get_all( per_npu_statedb[asic_id].STATE_DB, 'MUX_LINKMGR_TABLE|{}'.format(port)) @@ -605,7 +622,7 @@ def status(db, port, json_output): per_npu_statedb[asic_id].STATE_DB, 'MUX_METRICS_TABLE|{}'.format(port)) if not muxcable_metrics_dict[asic_id]: muxcable_metrics_dict[asic_id] = {} - create_json_dump_per_port_status(db, port_status_dict, muxcable_info_dict, + create_json_dump_per_port_status(db, port_status_dict, muxcable_info_dict, muxcable_grpc_dict, muxcable_health_dict, muxcable_metrics_dict, asic_id, port) click.echo("{}".format(json.dumps(port_status_dict, indent=4))) @@ -613,20 +630,22 @@ def status(db, port, json_output): print_data = [] for namespace in namespaces: asic_id = multi_asic.get_asic_index_from_namespace(namespace) - for key in natsorted(port_table_keys[asic_id]): - port = key.split("|")[1] + for key in natsorted(appl_db_muxcable_tbl_keys[asic_id]): + port = key.split(":")[1] + muxcable_info_dict[asic_id] = per_npu_appl_db[asic_id].get_all( + per_npu_appl_db[asic_id].APPL_DB, 'MUX_CABLE_TABLE:{}'.format(port)) muxcable_health_dict[asic_id] = per_npu_statedb[asic_id].get_all( per_npu_statedb[asic_id].STATE_DB, 'MUX_LINKMGR_TABLE|{}'.format(port)) - muxcable_info_dict[asic_id] = per_npu_statedb[asic_id].get_all( + muxcable_grpc_dict[asic_id] = per_npu_statedb[asic_id].get_all( per_npu_statedb[asic_id].STATE_DB, 'MUX_CABLE_TABLE|{}'.format(port)) muxcable_metrics_dict[asic_id] = per_npu_statedb[asic_id].get_all( per_npu_statedb[asic_id].STATE_DB, 'MUX_METRICS_TABLE|{}'.format(port)) if not muxcable_metrics_dict[asic_id]: muxcable_metrics_dict[asic_id] = {} - create_table_dump_per_port_status(db, print_data, muxcable_info_dict, + create_table_dump_per_port_status(db, print_data, muxcable_info_dict, muxcable_grpc_dict, muxcable_health_dict, muxcable_metrics_dict, asic_id, port) - headers = ['PORT', 'STATUS', 'HEALTH', 'HWSTATUS','LAST_SWITCHOVER_TIME'] + headers = ['PORT', 'STATUS', 'SERVER_STATUS', 'HEALTH', 'HWSTATUS', 'LAST_SWITCHOVER_TIME'] click.echo(tabulate(print_data, headers=headers)) sys.exit(STATUS_SUCCESSFUL) diff --git a/tests/mock_tables/appl_db.json b/tests/mock_tables/appl_db.json index e3cacf284b..6e0e333372 100644 --- a/tests/mock_tables/appl_db.json +++ b/tests/mock_tables/appl_db.json @@ -263,5 +263,23 @@ }, "VXLAN_REMOTE_VNI_TABLE:Vlan200:25.25.25.27": { "vni": "200" + }, + "MUX_CABLE_TABLE:Ethernet32": { + "state": "active" + }, + "MUX_CABLE_TABLE:Ethernet0": { + "state": "active" + }, + "MUX_CABLE_TABLE:Ethernet4": { + "state": "standby" + }, + "MUX_CABLE_TABLE:Ethernet8": { + "state": "standby" + }, + "MUX_CABLE_TABLE:Ethernet16": { + "state": "standby" + }, + "MUX_CABLE_TABLE:Ethernet12": { + "state": "active" } } diff --git a/tests/muxcable_test.py b/tests/muxcable_test.py index d79606e3dc..fab77e055e 100644 --- a/tests/muxcable_test.py +++ b/tests/muxcable_test.py @@ -25,25 +25,25 @@ tabular_data_status_output_expected = """\ -PORT STATUS HEALTH HWSTATUS LAST_SWITCHOVER_TIME ----------- -------- --------- ------------ --------------------------- -Ethernet0 active healthy inconsistent 2021-May-13 10:01:15.696728 -Ethernet4 standby healthy consistent -Ethernet8 standby unhealthy consistent -Ethernet12 unknown unhealthy inconsistent -Ethernet16 standby healthy consistent -Ethernet32 active healthy inconsistent +PORT STATUS SERVER_STATUS HEALTH HWSTATUS LAST_SWITCHOVER_TIME +---------- -------- --------------- --------- ------------ --------------------------- +Ethernet0 active active healthy inconsistent 2021-May-13 10:01:15.696728 +Ethernet4 standby standby healthy consistent +Ethernet8 standby standby unhealthy consistent +Ethernet12 active unknown unhealthy inconsistent +Ethernet16 standby standby healthy consistent +Ethernet32 active active healthy inconsistent """ tabular_data_status_output_expected_alias = """\ -PORT STATUS HEALTH HWSTATUS LAST_SWITCHOVER_TIME ------- -------- --------- ------------ --------------------------- -etp1 active healthy inconsistent 2021-May-13 10:01:15.696728 -etp2 standby healthy consistent -etp3 standby unhealthy consistent -etp4 unknown unhealthy inconsistent -etp5 standby healthy consistent -etp9 active healthy inconsistent +PORT STATUS SERVER_STATUS HEALTH HWSTATUS LAST_SWITCHOVER_TIME +------ -------- --------------- --------- ------------ --------------------------- +etp1 active active healthy inconsistent 2021-May-13 10:01:15.696728 +etp2 standby standby healthy consistent +etp3 standby standby unhealthy consistent +etp4 active unknown unhealthy inconsistent +etp5 standby standby healthy consistent +etp9 active active healthy inconsistent """ @@ -52,36 +52,42 @@ "MUX_CABLE": { "Ethernet0": { "STATUS": "active", + "SERVER_STATUS": "active", "HEALTH": "healthy", "HWSTATUS": "inconsistent", "LAST_SWITCHOVER_TIME": "2021-May-13 10:01:15.696728" }, "Ethernet4": { "STATUS": "standby", + "SERVER_STATUS": "standby", "HEALTH": "healthy", "HWSTATUS": "consistent", "LAST_SWITCHOVER_TIME": "" }, "Ethernet8": { "STATUS": "standby", + "SERVER_STATUS": "standby", "HEALTH": "unhealthy", "HWSTATUS": "consistent", "LAST_SWITCHOVER_TIME": "" }, "Ethernet12": { - "STATUS": "unknown", + "STATUS": "active", + "SERVER_STATUS": "unknown", "HEALTH": "unhealthy", "HWSTATUS": "inconsistent", "LAST_SWITCHOVER_TIME": "" }, "Ethernet16": { "STATUS": "standby", + "SERVER_STATUS": "standby", "HEALTH": "healthy", "HWSTATUS": "consistent", "LAST_SWITCHOVER_TIME": "" }, "Ethernet32": { "STATUS": "active", + "SERVER_STATUS": "active", "HEALTH": "healthy", "HWSTATUS": "inconsistent", "LAST_SWITCHOVER_TIME": "" @@ -95,36 +101,42 @@ "MUX_CABLE": { "etp1": { "STATUS": "active", + "SERVER_STATUS": "active", "HEALTH": "healthy", "HWSTATUS": "inconsistent", "LAST_SWITCHOVER_TIME": "2021-May-13 10:01:15.696728" }, "etp2": { "STATUS": "standby", + "SERVER_STATUS": "standby", "HEALTH": "healthy", "HWSTATUS": "consistent", "LAST_SWITCHOVER_TIME": "" }, "etp3": { "STATUS": "standby", + "SERVER_STATUS": "standby", "HEALTH": "unhealthy", "HWSTATUS": "consistent", "LAST_SWITCHOVER_TIME": "" }, "etp4": { - "STATUS": "unknown", + "STATUS": "active", + "SERVER_STATUS": "unknown", "HEALTH": "unhealthy", "HWSTATUS": "inconsistent", "LAST_SWITCHOVER_TIME": "" }, "etp5": { "STATUS": "standby", + "SERVER_STATUS": "standby", "HEALTH": "healthy", "HWSTATUS": "consistent", "LAST_SWITCHOVER_TIME": "" }, "etp9": { "STATUS": "active", + "SERVER_STATUS": "active", "HEALTH": "healthy", "HWSTATUS": "inconsistent", "LAST_SWITCHOVER_TIME": "" From 1518ca92df1e794222bf45100246c8ef956d7af6 Mon Sep 17 00:00:00 2001 From: Neetha John Date: Wed, 6 Jul 2022 14:56:26 -0700 Subject: [PATCH 19/34] Update load minigraph to load backend acl (#2236) Signed-off-by: Neetha John What I did Load backend acl template as part of the load minigraph if the device type is a 'BackEndToRRouter' and the device is a storage device How to verify it Added unit tests to verify if the backend acl load commands are applied --- config/main.py | 43 +++++++++++++++++++++++++++++++++++++++++-- tests/config_test.py | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 2 deletions(-) diff --git a/config/main.py b/config/main.py index 47791cf69d..368e73ab79 100644 --- a/config/main.py +++ b/config/main.py @@ -1129,6 +1129,41 @@ def validate_gre_type(ctx, _, value): except ValueError: raise click.UsageError("{} is not a valid GRE type".format(value)) +def _is_storage_device(cfg_db): + """ + Check if the device is a storage device or not + """ + device_metadata = cfg_db.get_entry("DEVICE_METADATA", "localhost") + return device_metadata.get("storage_device", "Unknown") == "true" + +def _is_acl_table_present(cfg_db, acl_table_name): + """ + Check if acl table exists + """ + return acl_table_name in cfg_db.get_keys("ACL_TABLE") + +def load_backend_acl(cfg_db, device_type): + """ + Load acl on backend storage device + """ + + BACKEND_ACL_TEMPLATE_FILE = os.path.join('/', "usr", "share", "sonic", "templates", "backend_acl.j2") + BACKEND_ACL_FILE = os.path.join('/', "etc", "sonic", "backend_acl.json") + + if device_type and device_type == "BackEndToRRouter" and _is_storage_device(cfg_db) and _is_acl_table_present(cfg_db, "DATAACL"): + if os.path.isfile(BACKEND_ACL_TEMPLATE_FILE): + clicommon.run_command( + "{} -d -t {},{}".format( + SONIC_CFGGEN_PATH, + BACKEND_ACL_TEMPLATE_FILE, + BACKEND_ACL_FILE + ), + display_cmd=True + ) + if os.path.isfile(BACKEND_ACL_FILE): + clicommon.run_command("acl-loader update incremental {}".format(BACKEND_ACL_FILE), display_cmd=True) + + # This is our main entrypoint - the main 'config' command @click.group(cls=clicommon.AbbreviationGroup, context_settings=CONTEXT_SETTINGS) @click.pass_context @@ -1693,6 +1728,12 @@ def load_minigraph(db, no_service_restart): if os.path.isfile('/etc/sonic/acl.json'): clicommon.run_command("acl-loader update full /etc/sonic/acl.json", display_cmd=True) + # get the device type + device_type = _get_device_type() + + # Load backend acl + load_backend_acl(db.cfgdb, device_type) + # Load port_config.json try: load_port_config(db.cfgdb, '/etc/sonic/port_config.json') @@ -1702,8 +1743,6 @@ def load_minigraph(db, no_service_restart): # generate QoS and Buffer configs clicommon.run_command("config qos reload --no-dynamic-buffer", display_cmd=True) - # get the device type - device_type = _get_device_type() if device_type != 'MgmtToRRouter' and device_type != 'MgmtTsToR' and device_type != 'BmcMgmtToRRouter' and device_type != 'EPMS': clicommon.run_command("pfcwd start_default", display_cmd=True) diff --git a/tests/config_test.py b/tests/config_test.py index c7d7512234..ca06900817 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -357,6 +357,49 @@ def test_load_minigraph_with_port_config(self, get_cmd_module, setup_single_broa port_config = [{"PORT": {"Ethernet0": {"admin_status": "up"}}}] self.check_port_config(db, config, port_config, "config interface startup Ethernet0") + def test_load_backend_acl(self, get_cmd_module, setup_single_broadcom_asic): + db = Db() + db.cfgdb.set_entry("DEVICE_METADATA", "localhost", {"storage_device": "true"}) + self.check_backend_acl(get_cmd_module, db, device_type='BackEndToRRouter', condition=True) + + def test_load_backend_acl_not_storage(self, get_cmd_module, setup_single_broadcom_asic): + db = Db() + self.check_backend_acl(get_cmd_module, db, device_type='BackEndToRRouter', condition=False) + + def test_load_backend_acl_storage_leaf(self, get_cmd_module, setup_single_broadcom_asic): + db = Db() + db.cfgdb.set_entry("DEVICE_METADATA", "localhost", {"storage_device": "true"}) + self.check_backend_acl(get_cmd_module, db, device_type='BackEndLeafRouter', condition=False) + + def test_load_backend_acl_storage_no_dataacl(self, get_cmd_module, setup_single_broadcom_asic): + db = Db() + db.cfgdb.set_entry("DEVICE_METADATA", "localhost", {"storage_device": "true"}) + db.cfgdb.set_entry("ACL_TABLE", "DATAACL", None) + self.check_backend_acl(get_cmd_module, db, device_type='BackEndToRRouter', condition=False) + + def check_backend_acl(self, get_cmd_module, db, device_type='BackEndToRRouter', condition=True): + def is_file_side_effect(filename): + return True if 'backend_acl' in filename else False + with mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): + with mock.patch('config.main._get_device_type', mock.MagicMock(return_value=device_type)): + with mock.patch( + "utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + (config, show) = get_cmd_module + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["-y"], obj=db) + print(result.exit_code) + expected_output = ['Running command: acl-loader update incremental /etc/sonic/backend_acl.json', + 'Running command: /usr/local/bin/sonic-cfggen -d -t /usr/share/sonic/templates/backend_acl.j2,/etc/sonic/backend_acl.json' + ] + print(result.output) + assert result.exit_code == 0 + output = result.output.split('\n') + if condition: + assert set(expected_output).issubset(set(output)) + else: + assert not(set(expected_output).issubset(set(output))) + def check_port_config(self, db, config, port_config, expected_output): def read_json_file_side_effect(filename): return port_config From 3600639c16079f17089efd655c818022987032bb Mon Sep 17 00:00:00 2001 From: Lior Avramov <73036155+liorghub@users.noreply.github.com> Date: Thu, 7 Jul 2022 09:31:57 +0300 Subject: [PATCH 20/34] Add support for IP interface loopback action (#2192) - What I did Add support for IP interface loopback action. - How I did it Add new commands and handilig, Update the commad reference guide to incldue the new show command - How to verify it New CLI unittests were added. Previous command output (if the output of a command-line utility has changed) None New command output (if the output of a command-line utility has changed) root@sonic:~# config interface ip loopback-action Ethernet0 drop root@sonic:~# show ip interfaces loopback-action Interface Action ------------ ---------- Ethernet232 drop Vlan100 forward --- config/main.py | 29 ++++++- doc/Command-Reference.md | 43 ++++++++++ show/main.py | 48 +++++++++-- tests/loopback_action_test.py | 139 +++++++++++++++++++++++++++++++ tests/mock_tables/config_db.json | 10 ++- utilities_common/cli.py | 1 + 6 files changed, 259 insertions(+), 11 deletions(-) create mode 100644 tests/loopback_action_test.py diff --git a/config/main.py b/config/main.py index 368e73ab79..8a4c595562 100644 --- a/config/main.py +++ b/config/main.py @@ -4208,7 +4208,7 @@ def fec(ctx, interface_name, interface_fec, verbose): @interface.group(cls=clicommon.AbbreviationGroup) @click.pass_context def ip(ctx): - """Add or remove IP address""" + """Set IP interface attributes""" pass # @@ -4339,6 +4339,32 @@ def remove(ctx, interface_name, ip_addr): command = "ip neigh flush dev {} {}".format(interface_name, str(ip_address)) clicommon.run_command(command) +# +# 'loopback-action' subcommand +# + +@ip.command() +@click.argument('interface_name', metavar='', required=True) +@click.argument('action', metavar='', required=True) +@click.pass_context +def loopback_action(ctx, interface_name, action): + """Set IP interface loopback action""" + config_db = ctx.obj['config_db'] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail('Interface {} is invalid'.format(interface_name)) + + if not clicommon.is_interface_in_config_db(config_db, interface_name): + ctx.fail('Interface {} is not an IP interface'.format(interface_name)) + + allowed_actions = ['drop', 'forward'] + if action not in allowed_actions: + ctx.fail('Invalid action') + + table_name = get_interface_table_name(interface_name) + config_db.mod_entry(table_name, interface_name, {"loopback_action": action}) # # buffer commands and utilities @@ -4860,7 +4886,6 @@ def unbind(ctx, interface_name): remove_router_interface_ip_address(config_db, interface_name, ipaddress) config_db.set_entry(table_name, interface_name, None) - # # 'ipv6' subgroup ('config interface ipv6 ...') # diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 4b78da135b..2c873235e1 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -3657,6 +3657,25 @@ This command is used to display the configured MPLS state for the list of config Ethernet4 enable ``` +**show interfaces loopback-action** + +This command displays the configured loopback action + +- Usage: + ``` + show ip interfaces loopback-action + ``` + +- Example: + ``` + root@sonic:~# show ip interfaces loopback-action + Interface Action + ------------ ---------- + Ethernet232 drop + Vlan100 forward + ``` + + **show interfaces tpid** This command displays the key fields of the interfaces such as Operational Status, Administrative Status, Alias and TPID. @@ -3803,6 +3822,7 @@ This sub-section explains the following list of configuration on the interfaces. 9) advertised-types - to set interface advertised types 10) type - to set interface type 11) mpls - To add or remove MPLS operation for the interface +12) loopback-action - to set action for packet that ingress and gets routed on the same IP interface From 201904 release onwards, the “config interface” command syntax is changed and the format is as follows: @@ -4336,6 +4356,29 @@ MPLS operation for either physical, portchannel, or VLAN interface can be config admin@sonic:~$ sudo config interface mpls remove Ethernet4 ``` +**config interface ip loopback-action (Versions >= 202205)** + +This command is used for setting the action being taken on packets that ingress and get routed on the same IP interface. +Loopback action can be set on IP interface from type physical, portchannel, VLAN interface and VLAN subinterface. +Loopback action can be drop or forward. + +- Usage: + ``` + config interface ip loopback-action --help + Usage: config interface ip loopback-action [OPTIONS] + + Set IP interface loopback action + + Options: + -?, -h, --help Show this message and exit. + ``` + +- Example: + ``` + admin@sonic:~$ config interface ip loopback-action Ethernet0 drop + admin@sonic:~$ config interface ip loopback-action Ethernet0 forward + + ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#interfaces) ## Interface Naming Mode diff --git a/show/main.py b/show/main.py index fd7325677a..96fc23d728 100755 --- a/show/main.py +++ b/show/main.py @@ -933,15 +933,49 @@ def ip(): # Addresses from all scopes are included. Interfaces with no addresses are # excluded. # -@ip.command() + +@ip.group(invoke_without_command=True) @multi_asic_util.multi_asic_click_options -def interfaces(namespace, display): - cmd = "sudo ipintutil -a ipv4" - if namespace is not None: - cmd += " -n {}".format(namespace) +@click.pass_context +def interfaces(ctx, namespace, display): + if ctx.invoked_subcommand is None: + cmd = "sudo ipintutil -a ipv4" + if namespace is not None: + cmd += " -n {}".format(namespace) - cmd += " -d {}".format(display) - clicommon.run_command(cmd) + cmd += " -d {}".format(display) + clicommon.run_command(cmd) + +# +# 'show ip interfaces loopback-action' command +# + +@interfaces.command() +def loopback_action(): + """show ip interfaces loopback-action""" + config_db = ConfigDBConnector() + config_db.connect() + header = ['Interface', 'Action'] + body = [] + + if_tbl = config_db.get_table('INTERFACE') + vlan_if_tbl = config_db.get_table('VLAN_INTERFACE') + po_if_tbl = config_db.get_table('PORTCHANNEL_INTERFACE') + sub_if_tbl = config_db.get_table('VLAN_SUB_INTERFACE') + + all_tables = {} + for tbl in [if_tbl, vlan_if_tbl, po_if_tbl, sub_if_tbl]: + all_tables.update(tbl) + + if all_tables: + ifs_action = [] + ifs = list(all_tables.keys()) + for iface in ifs: + if 'loopback_action' in all_tables[iface]: + action = all_tables[iface]['loopback_action'] + ifs_action.append([iface, action]) + body = natsorted(ifs_action) + click.echo(tabulate(body, header)) # # 'route' subcommand ("show ip route") diff --git a/tests/loopback_action_test.py b/tests/loopback_action_test.py new file mode 100644 index 0000000000..58942b0c4b --- /dev/null +++ b/tests/loopback_action_test.py @@ -0,0 +1,139 @@ +import os +from click.testing import CliRunner +import config.main as config +import show.main as show +from utilities_common.db import Db + +show_ip_interfaces_loopback_action_output="""\ +Interface Action +--------------- -------- +Eth32.10 drop +Ethernet0 forward +PortChannel0001 drop +Vlan3000 forward +""" + +class TestLoopbackAction(object): + @classmethod + def setup_class(cls): + print("\nSETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "1" + + def test_config_loopback_action_on_physical_interface(self): + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + action = 'drop' + iface = 'Ethernet0' + + result = runner.invoke(config.config.commands['interface'].commands["ip"].commands['loopback-action'], [iface, action], obj=obj) + + table = db.cfgdb.get_table('INTERFACE') + assert(table[iface]['loopback_action'] == action) + + print(result.exit_code, result.output) + assert result.exit_code == 0 + + def test_config_loopback_action_on_physical_interface_alias(self): + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + action = 'forward' + iface = 'Ethernet0' + iface_alias = 'etp1' + + os.environ['SONIC_CLI_IFACE_MODE'] = "alias" + result = runner.invoke(config.config.commands['interface'].commands["ip"].commands['loopback-action'], [iface_alias, action], obj=obj) + os.environ['SONIC_CLI_IFACE_MODE'] = "default" + + table = db.cfgdb.get_table('INTERFACE') + assert(table[iface]['loopback_action'] == action) + + print(result.exit_code, result.output) + assert result.exit_code == 0 + + def test_config_loopback_action_on_port_channel_interface(self): + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + action = 'forward' + iface = 'PortChannel0002' + + result = runner.invoke(config.config.commands['interface'].commands["ip"].commands['loopback-action'], [iface, action], obj=obj) + + table = db.cfgdb.get_table('PORTCHANNEL_INTERFACE') + assert(table[iface]['loopback_action'] == action) + + print(result.exit_code, result.output) + assert result.exit_code == 0 + + def test_config_loopback_action_on_vlan_interface(self): + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + action = 'drop' + iface = 'Vlan1000' + + result = runner.invoke(config.config.commands['interface'].commands["ip"].commands['loopback-action'], [iface, action], obj=obj) + + table = db.cfgdb.get_table('VLAN_INTERFACE') + assert(table[iface]['loopback_action'] == action) + + print(result.exit_code, result.output) + assert result.exit_code == 0 + + def test_config_loopback_action_on_subinterface(self): + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + action = 'forward' + iface = 'Ethernet0.10' + + result = runner.invoke(config.config.commands['interface'].commands["ip"].commands['loopback-action'], [iface, action], obj=obj) + + table = db.cfgdb.get_table('VLAN_SUB_INTERFACE') + assert(table[iface]['loopback_action'] == action) + + print(result.exit_code, result.output) + assert result.exit_code == 0 + + def test_show_ip_interfaces_loopback_action(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["ip"].commands["interfaces"].commands["loopback-action"], []) + + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert result.output == show_ip_interfaces_loopback_action_output + + def test_config_loopback_action_on_non_ip_interface(self): + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + action = 'forward' + iface = 'Ethernet0.11' + ERROR_MSG = "Error: Interface {} is not an IP interface".format(iface) + + result = runner.invoke(config.config.commands['interface'].commands["ip"].commands['loopback-action'], [iface, action], obj=obj) + + print(result.exit_code, result.output) + assert result.exit_code != 0 + assert ERROR_MSG in result.output + + def test_config_loopback_action_invalid_action(self): + runner = CliRunner() + db = Db() + obj = {'config_db':db.cfgdb} + action = 'xforwardx' + iface = 'Ethernet0' + ERROR_MSG = "Error: Invalid action" + + result = runner.invoke(config.config.commands['interface'].commands["ip"].commands['loopback-action'], [iface, action], obj=obj) + + print(result.exit_code, result.output) + assert result.exit_code != 0 + assert ERROR_MSG in result.output + + @classmethod + def teardown_class(cls): + print("\nTEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 3314fef9e9..060115d8a9 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -378,6 +378,7 @@ }, "VLAN_SUB_INTERFACE|Eth32.10": { "admin_status": "up", + "loopback_action": "drop", "vlan": "100" }, "VLAN_SUB_INTERFACE|Eth32.10|32.10.11.12/24": { @@ -562,6 +563,9 @@ "VLAN_INTERFACE|Vlan2000": { "proxy_arp": "enabled" }, + "VLAN_INTERFACE|Vlan3000": { + "loopback_action": "forward" + }, "VLAN_INTERFACE|Vlan1000|192.168.0.1/21": { "NULL": "NULL" }, @@ -646,7 +650,8 @@ "NULL": "NULL" }, "PORTCHANNEL_INTERFACE|PortChannel0001": { - "ipv6_use_link_local_only": "disable" + "ipv6_use_link_local_only": "disable", + "loopback_action": "drop" }, "PORTCHANNEL_INTERFACE|PortChannel0002": { "NULL": "NULL" @@ -682,7 +687,8 @@ "NULL": "NULL" }, "INTERFACE|Ethernet0": { - "ipv6_use_link_local_only": "disable" + "ipv6_use_link_local_only": "disable", + "loopback_action": "forward" }, "INTERFACE|Ethernet0|14.14.0.1/24": { "NULL": "NULL" diff --git a/utilities_common/cli.py b/utilities_common/cli.py index 771bb9121e..d6d8a111bf 100644 --- a/utilities_common/cli.py +++ b/utilities_common/cli.py @@ -594,6 +594,7 @@ def is_interface_in_config_db(config_db, interface_name): if (not interface_name in config_db.get_keys('VLAN_INTERFACE') and not interface_name in config_db.get_keys('INTERFACE') and not interface_name in config_db.get_keys('PORTCHANNEL_INTERFACE') and + not interface_name in config_db.get_keys('VLAN_SUB_INTERFACE') and not interface_name == 'null'): return False From df54138b40afa276c40eefd1f5522ec766e551c6 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Thu, 7 Jul 2022 10:38:57 -0700 Subject: [PATCH 21/34] [show] add new CLI to show tunnel route objects (#2255) What I did Add new CLI support to show tunnel route objects in ASIC DB. sign-off: Jing Zhang zhangjing@microsoft.com How I did it Check if tunnel route object exists for server_ipv4, server_ipv6, soc_ipv4. If existing, print it out. If not specifying port name, print all tunnel route objects. How to verify it Added unit tests. Tested on dual testbed. --- show/muxcable.py | 137 ++++++++++++++++++++++++++++++++- tests/mock_tables/appl_db.json | 6 ++ tests/muxcable_test.py | 96 +++++++++++++++++++++++ 3 files changed, 237 insertions(+), 2 deletions(-) diff --git a/show/muxcable.py b/show/muxcable.py index e99a3332d8..e639890ce9 100644 --- a/show/muxcable.py +++ b/show/muxcable.py @@ -503,6 +503,42 @@ def create_json_dump_per_port_config(db, port_status_dict, per_npu_configdb, asi if soc_ipv4_value is not None: port_status_dict["MUX_CABLE"]["PORTS"][port_name]["SERVER"]["soc_ipv4"] = soc_ipv4_value +def get_tunnel_route_per_port(db, port_tunnel_route, per_npu_configdb, per_npu_appl_db, asic_id, port): + + mux_cfg_dict = per_npu_configdb[asic_id].get_all( + per_npu_configdb[asic_id].CONFIG_DB, 'MUX_CABLE|{}'.format(port)) + dest_names = ["server_ipv4", "server_ipv6", "soc_ipv4"] + + for name in dest_names: + dest_address = mux_cfg_dict.get(name, None) + + if dest_address is not None: + route_keys = per_npu_appl_db[asic_id].keys( + per_npu_appl_db[asic_id].APPL_DB, 'TUNNEL_ROUTE_TABLE:*{}'.format(dest_address)) + + if route_keys is not None and len(route_keys): + + port_tunnel_route["TUNNEL_ROUTE"][port] = port_tunnel_route["TUNNEL_ROUTE"].get(port, {}) + port_tunnel_route["TUNNEL_ROUTE"][port][name] = {} + port_tunnel_route["TUNNEL_ROUTE"][port][name]['DEST'] = dest_address + +def create_json_dump_per_port_tunnel_route(db, port_tunnel_route, per_npu_configdb, per_npu_appl_db, asic_id, port): + + get_tunnel_route_per_port(db, port_tunnel_route, per_npu_configdb, per_npu_appl_db, asic_id, port) + +def create_table_dump_per_port_tunnel_route(db, print_data, per_npu_configdb, per_npu_appl_db, asic_id, port): + + port_tunnel_route = {} + port_tunnel_route["TUNNEL_ROUTE"] = {} + get_tunnel_route_per_port(db, port_tunnel_route, per_npu_configdb, per_npu_appl_db, asic_id, port) + + for port, route in port_tunnel_route["TUNNEL_ROUTE"].items(): + for dest_name, values in route.items(): + print_line = [] + print_line.append(port) + print_line.append(dest_name) + print_line.append(values['DEST']) + print_data.append(print_line) @muxcable.command() @click.argument('port', required=False, default=None) @@ -558,8 +594,8 @@ def status(db, port, json_output): click.echo("Got invalid asic index for port {}, cant retreive mux status".format(port_name)) sys.exit(STATUS_FAIL) - muxcable_info_dict[asic_index] = per_npu_appl_db[asic_id].get_all( - per_npu_appl_db[asic_id].APPL_DB, 'MUX_CABLE_TABLE:{}'.format(port)) + muxcable_info_dict[asic_index] = per_npu_appl_db[asic_index].get_all( + per_npu_appl_db[asic_index].APPL_DB, 'MUX_CABLE_TABLE:{}'.format(port)) muxcable_grpc_dict[asic_index] = per_npu_statedb[asic_index].get_all( per_npu_statedb[asic_index].STATE_DB, 'MUX_CABLE_TABLE|{}'.format(port)) muxcable_health_dict[asic_index] = per_npu_statedb[asic_index].get_all( @@ -1751,3 +1787,100 @@ def packetloss(db, port, json_output): click.echo(tabulate(print_count, headers=count_headers)) click.echo(tabulate(print_event, headers=event_headers)) + +@muxcable.command() +@click.argument('port', metavar='', required=False, default=None) +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format") +@clicommon.pass_db +def tunnel_route(db, port, json_output): + """show muxcable tunnel-route """ + + port = platform_sfputil_helper.get_interface_name(port, db) + + per_npu_appl_db = {} + per_npu_configdb = {} + mux_tbl_keys = {} + + namespaces = multi_asic.get_front_end_namespaces() + for namespace in namespaces: + asic_id = multi_asic.get_asic_index_from_namespace(namespace) + + per_npu_appl_db[asic_id] = swsscommon.SonicV2Connector(use_unix_socket_path=False, namespace=namespace) + per_npu_appl_db[asic_id].connect(per_npu_appl_db[asic_id].APPL_DB) + + per_npu_configdb[asic_id] = swsscommon.SonicV2Connector(use_unix_socket_path=False, namespace=namespace) + per_npu_configdb[asic_id].connect(per_npu_configdb[asic_id].CONFIG_DB) + + mux_tbl_keys[asic_id] = per_npu_configdb[asic_id].keys( + per_npu_configdb[asic_id].CONFIG_DB, "MUX_CABLE|*") + + if port is not None: + + logical_port_list = platform_sfputil_helper.get_logical_list() + + if port not in logical_port_list: + port_name = platform_sfputil_helper.get_interface_alias(port, db) + click.echo(("ERR: Not a valid logical port for dualtor firmware {}".format(port_name))) + sys.exit(CONFIG_FAIL) + + asic_index = None + if platform_sfputil is not None: + asic_index = platform_sfputil_helper.get_asic_id_for_logical_port(port) + if asic_index is None: + # TODO this import is only for unit test purposes, and should be removed once sonic_platform_base + # is fully mocked + import sonic_platform_base.sonic_sfp.sfputilhelper + asic_index = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper().get_asic_id_for_logical_port(port) + if asic_index is None: + port_name = platform_sfputil_helper.get_interface_alias(port, db) + click.echo("Got invalid asic index for port {}, cant retreive tunnel route info".format(port_name)) + sys.exit(STATUS_FAIL) + + if mux_tbl_keys[asic_index] is not None and "MUX_CABLE|{}".format(port) in mux_tbl_keys[asic_index]: + if json_output: + port_tunnel_route = {} + port_tunnel_route["TUNNEL_ROUTE"] = {} + + create_json_dump_per_port_tunnel_route(db, port_tunnel_route, per_npu_configdb, per_npu_appl_db, asic_index, port) + + click.echo("{}".format(json.dumps(port_tunnel_route, indent=4))) + + else: + print_data = [] + + create_table_dump_per_port_tunnel_route(db, print_data, per_npu_configdb, per_npu_appl_db, asic_index, port) + + headers = ['PORT', 'DEST_TYPE', 'DEST_ADDRESS'] + + click.echo(tabulate(print_data, headers=headers)) + else: + click.echo("this is not a valid port present on dualToR".format(port)) + sys.exit(STATUS_FAIL) + + else: + if json_output: + port_tunnel_route = {} + port_tunnel_route["TUNNEL_ROUTE"] = {} + for namespace in namespaces: + asic_id = multi_asic.get_asic_index_from_namespace(namespace) + for key in natsorted(mux_tbl_keys[asic_id]): + port = key.split("|")[1] + + create_json_dump_per_port_tunnel_route(db, port_tunnel_route, per_npu_configdb, per_npu_appl_db, asic_id, port) + + click.echo("{}".format(json.dumps(port_tunnel_route, indent=4))) + else: + print_data = [] + + for namespace in namespaces: + asic_id = multi_asic.get_asic_index_from_namespace(namespace) + for key in natsorted(mux_tbl_keys[asic_id]): + port = key.split("|")[1] + + create_table_dump_per_port_tunnel_route(db, print_data, per_npu_configdb, per_npu_appl_db, asic_id, port) + + headers = ['PORT', 'DEST_TYPE', 'DEST_ADDRESS'] + + click.echo(tabulate(print_data, headers=headers)) + + sys.exit(STATUS_SUCCESSFUL) diff --git a/tests/mock_tables/appl_db.json b/tests/mock_tables/appl_db.json index 6e0e333372..29e161c9ca 100644 --- a/tests/mock_tables/appl_db.json +++ b/tests/mock_tables/appl_db.json @@ -281,5 +281,11 @@ }, "MUX_CABLE_TABLE:Ethernet12": { "state": "active" + }, + "TUNNEL_ROUTE_TABLE:10.2.1.1": { + "alias": "Vlan1000" + }, + "TUNNEL_ROUTE_TABLE:10.3.1.1": { + "alias": "Vlan1000" } } diff --git a/tests/muxcable_test.py b/tests/muxcable_test.py index fab77e055e..2b4d221171 100644 --- a/tests/muxcable_test.py +++ b/tests/muxcable_test.py @@ -477,6 +477,48 @@ } """ +show_muxcable_tunnel_route_expected_output_json="""\ +{ + "TUNNEL_ROUTE": { + "Ethernet0": { + "server_ipv4": { + "DEST": "10.2.1.1" + } + }, + "Ethernet4": { + "server_ipv4": { + "DEST": "10.3.1.1" + } + } + } +} +""" + +show_muxcable_tunnel_route_expected_output="""\ +PORT DEST_TYPE DEST_ADDRESS +--------- ----------- -------------- +Ethernet0 server_ipv4 10.2.1.1 +Ethernet4 server_ipv4 10.3.1.1 +""" + +show_muxcable_tunnel_route_expected_output_port_json="""\ +{ + "TUNNEL_ROUTE": { + "Ethernet0": { + "server_ipv4": { + "DEST": "10.2.1.1" + } + } + } +} +""" + +show_muxcable_tunnel_route_expected_port_output="""\ +PORT DEST_TYPE DEST_ADDRESS +--------- ----------- -------------- +Ethernet0 server_ipv4 10.2.1.1 +""" + class TestMuxcable(object): @classmethod def setup_class(cls): @@ -2113,6 +2155,60 @@ def test_show_muxcable_packetloss_port_json(self): assert result.exit_code == 0 assert result.output == show_muxcable_packetloss_expected_output_json + @mock.patch('utilities_common.platform_sfputil_helper.get_logical_list', mock.MagicMock(return_value=["Ethernet0", "Ethernet12"])) + @mock.patch('utilities_common.platform_sfputil_helper.get_asic_id_for_logical_port', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.platform_sfputil', mock.MagicMock(return_value={0: ["Ethernet12", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.logical_port_name_to_physical_port_list', mock.MagicMock(return_value=[0])) + def test_show_muxcable_tunnel_route(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["tunnel-route"], obj=db) + + assert result.exit_code == 0 + assert result.output == show_muxcable_tunnel_route_expected_output + + @mock.patch('utilities_common.platform_sfputil_helper.get_logical_list', mock.MagicMock(return_value=["Ethernet0", "Ethernet12"])) + @mock.patch('utilities_common.platform_sfputil_helper.get_asic_id_for_logical_port', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.platform_sfputil', mock.MagicMock(return_value={0: ["Ethernet12", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.logical_port_name_to_physical_port_list', mock.MagicMock(return_value=[0])) + def test_show_muxcable_tunnel_route_json(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["tunnel-route"], + ["--json"], obj=db) + + assert result.exit_code == 0 + assert result.output == show_muxcable_tunnel_route_expected_output_json + + @mock.patch('utilities_common.platform_sfputil_helper.get_logical_list', mock.MagicMock(return_value=["Ethernet0", "Ethernet12"])) + @mock.patch('utilities_common.platform_sfputil_helper.get_asic_id_for_logical_port', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.platform_sfputil', mock.MagicMock(return_value={0: ["Ethernet12", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.logical_port_name_to_physical_port_list', mock.MagicMock(return_value=[0])) + def test_show_muxcable_tunnel_route_port(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["tunnel-route"], + ["Ethernet0"], obj=db) + + assert result.exit_code == 0 + assert result.output == show_muxcable_tunnel_route_expected_port_output + + @mock.patch('utilities_common.platform_sfputil_helper.get_logical_list', mock.MagicMock(return_value=["Ethernet0", "Ethernet12"])) + @mock.patch('utilities_common.platform_sfputil_helper.get_asic_id_for_logical_port', mock.MagicMock(return_value=0)) + @mock.patch('show.muxcable.platform_sfputil', mock.MagicMock(return_value={0: ["Ethernet12", "Ethernet0"]})) + @mock.patch('utilities_common.platform_sfputil_helper.logical_port_name_to_physical_port_list', mock.MagicMock(return_value=[0])) + def test_show_muxcable_tunnel_route_json_port(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(show.cli.commands["muxcable"].commands["tunnel-route"], + ["Ethernet0", "--json"], obj=db) + assert result.exit_code == 0 + assert result.output == show_muxcable_tunnel_route_expected_output_port_json + @classmethod def teardown_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "0" From 04dbdf6f2c12a8d8f5d09ff466ccd6ea9dfd5c20 Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Fri, 8 Jul 2022 16:29:09 +0800 Subject: [PATCH 22/34] Support to enable fips for the command sonic_installer (#2154) What I did Support to enable fips for the command sonic_installer See sonic-net/SONiC#997 How I did it sonic-installer set-fips [--enable-fips|--disable-fips] sonic-installer get-fips --- sonic_installer/bootloader/aboot.py | 16 ++++++ sonic_installer/bootloader/bootloader.py | 8 +++ sonic_installer/bootloader/grub.py | 39 ++++++++++++++ sonic_installer/bootloader/uboot.py | 16 ++++++ sonic_installer/main.py | 32 ++++++++++++ tests/installer_bootloader_aboot_test.py | 23 +++++++++ tests/installer_bootloader_grub_test.py | 29 +++++++++++ .../host/grub/grub.cfg | 51 +++++++++++++++++++ tests/installer_bootloader_uboot_test.py | 36 +++++++++++++ tests/test_sonic_installer.py | 30 +++++++++++ 10 files changed, 280 insertions(+) create mode 100644 tests/installer_bootloader_input/host/grub/grub.cfg diff --git a/sonic_installer/bootloader/aboot.py b/sonic_installer/bootloader/aboot.py index ea1a95c2fe..ff8be7896b 100644 --- a/sonic_installer/bootloader/aboot.py +++ b/sonic_installer/bootloader/aboot.py @@ -147,6 +147,11 @@ def _get_image_cmdline(self, image): with open(os.path.join(image_path, KERNEL_CMDLINE_NAME)) as f: return f.read() + def _set_image_cmdline(self, image, cmdline): + image_path = self.get_image_path(image) + with open(os.path.join(image_path, KERNEL_CMDLINE_NAME), 'w') as f: + return f.write(cmdline) + def supports_package_migration(self, image): if is_secureboot(): # NOTE: unsafe until migration can guarantee migration safety @@ -205,6 +210,17 @@ def verify_next_image(self): image_path = os.path.join(self.get_image_path(image), DEFAULT_SWI_IMAGE) return self._verify_secureboot_image(image_path) + def set_fips(self, image, enable): + fips = "1" if enable else "0" + cmdline = self._get_image_cmdline(image) + cmdline = re.sub(r' sonic_fips=[^\s]', '', cmdline) + " sonic_fips=" + fips + self._set_image_cmdline(image, cmdline) + click.echo('Done') + + def get_fips(self, image): + cmdline = self._get_image_cmdline(image) + return 'sonic_fips=1' in cmdline + def _verify_secureboot_image(self, image_path): if is_secureboot(): cert = self.getCert(image_path) diff --git a/sonic_installer/bootloader/bootloader.py b/sonic_installer/bootloader/bootloader.py index a4fcdded4b..91dcaf4665 100644 --- a/sonic_installer/bootloader/bootloader.py +++ b/sonic_installer/bootloader/bootloader.py @@ -57,6 +57,14 @@ def verify_secureboot_image(self, image_path): """verify that the image is secure running image""" raise NotImplementedError + def set_fips(self, image, enable): + """set fips""" + raise NotImplementedError + + def get_fips(self, image): + """returns true if fips set""" + raise NotImplementedError + def verify_next_image(self): """verify the next image for reboot""" image = self.get_next_image() diff --git a/sonic_installer/bootloader/grub.py b/sonic_installer/bootloader/grub.py index 85563dabe6..7ab5c6c0bc 100644 --- a/sonic_installer/bootloader/grub.py +++ b/sonic_installer/bootloader/grub.py @@ -85,6 +85,45 @@ def remove_image(self, image): run_command('grub-set-default --boot-directory=' + HOST_PATH + ' 0') click.echo('Image removed') + def get_linux_cmdline(self, image): + cmdline = None + config = open(HOST_PATH + '/grub/grub.cfg', 'r') + old_config = config.read() + menuentry = re.search("menuentry '" + image + "[^}]*}", old_config).group() + config.close() + for line in menuentry.split('\n'): + line = line.strip() + if line.startswith('linux '): + cmdline = line[6:].strip() + break + return cmdline + + def set_linux_cmdline(self, image, cmdline): + config = open(HOST_PATH + '/grub/grub.cfg', 'r') + old_config = config.read() + old_menuentry = re.search("menuentry '" + image + "[^}]*}", old_config).group() + config.close() + new_menuentry = old_menuentry + for line in old_menuentry.split('\n'): + line = line.strip() + if line.startswith('linux '): + new_menuentry = old_menuentry.replace(line, "linux " + cmdline) + break + config = open(HOST_PATH + '/grub/grub.cfg', 'w') + config.write(old_config.replace(old_menuentry, new_menuentry)) + config.close() + + def set_fips(self, image, enable): + fips = "1" if enable else "0" + cmdline = self.get_linux_cmdline(image) + cmdline = re.sub(r' sonic_fips=[^\s]', '', cmdline) + " sonic_fips=" + fips + self.set_linux_cmdline(image, cmdline) + click.echo('Done') + + def get_fips(self, image): + cmdline = self.get_linux_cmdline(image) + return 'sonic_fips=1' in cmdline + def platform_in_platforms_asic(self, platform, image_path): """ For those images that don't have devices list builtin, 'tar' will have non-zero returncode. diff --git a/sonic_installer/bootloader/uboot.py b/sonic_installer/bootloader/uboot.py index 16bc3bcc59..f51085a03e 100644 --- a/sonic_installer/bootloader/uboot.py +++ b/sonic_installer/bootloader/uboot.py @@ -5,6 +5,7 @@ import platform import subprocess import os +import re import click @@ -81,6 +82,21 @@ def remove_image(self, image): def verify_image_platform(self, image_path): return os.path.isfile(image_path) + def set_fips(self, image, enable): + fips = "1" if enable else "0" + proc = subprocess.Popen("/usr/bin/fw_printenv linuxargs", shell=True, text=True, stdout=subprocess.PIPE) + (out, _) = proc.communicate() + cmdline = out.strip() + cmdline = re.sub('^linuxargs=', '', cmdline) + cmdline = re.sub(r' sonic_fips=[^\s]', '', cmdline) + " sonic_fips=" + fips + run_command('/usr/bin/fw_setenv linuxargs ' + cmdline) + click.echo('Done') + + def get_fips(self, image): + proc = subprocess.Popen("/usr/bin/fw_printenv linuxargs", shell=True, text=True, stdout=subprocess.PIPE) + (out, _) = proc.communicate() + return 'sonic_fips=1' in out + @classmethod def detect(cls): arch = platform.machine() diff --git a/sonic_installer/main.py b/sonic_installer/main.py index 3d7c4b9ecc..0056bb77e5 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -637,6 +637,38 @@ def set_next_boot(image): sys.exit(1) bootloader.set_next_image(image) +# Set fips for image +@sonic_installer.command('set-fips') +@click.argument('image', required=False) +@click.option('--enable-fips/--disable-fips', is_flag=True, default=True, + help="Enable or disable FIPS, the default value is to enable FIPS") +def set_fips(image, enable_fips): + """ Set fips for the image """ + bootloader = get_bootloader() + if not image: + image = bootloader.get_next_image() + if image not in bootloader.get_installed_images(): + echo_and_log('Error: Image does not exist', LOG_ERR) + sys.exit(1) + bootloader.set_fips(image, enable=enable_fips) + click.echo('Set FIPS for the image successfully') + +# Get fips for image +@sonic_installer.command('get-fips') +@click.argument('image', required=False) +def get_fips(image): + """ Get the fips enabled or disabled status for the image """ + bootloader = get_bootloader() + if not image: + image = bootloader.get_next_image() + if image not in bootloader.get_installed_images(): + echo_and_log('Error: Image does not exist', LOG_ERR) + sys.exit(1) + enable = bootloader.get_fips(image) + if enable: + click.echo("FIPS is enabled") + else: + click.echo("FIPS is disabled") # Uninstall image @sonic_installer.command('remove') diff --git a/tests/installer_bootloader_aboot_test.py b/tests/installer_bootloader_aboot_test.py index b00d6ffef6..56eee4872e 100644 --- a/tests/installer_bootloader_aboot_test.py +++ b/tests/installer_bootloader_aboot_test.py @@ -2,6 +2,8 @@ # Import test module import sonic_installer.bootloader.aboot as aboot +import tempfile +import shutil # Constants image_dir = f'{aboot.IMAGE_DIR_PREFIX}expeliarmus-{aboot.IMAGE_DIR_PREFIX}abcde' @@ -50,3 +52,24 @@ def test_get_next_image(re_search_patch): # Test convertion image dir to image name re_search_patch().group = Mock(return_value=image_dir) assert bootloader.get_next_image() == exp_image + +def test_set_fips_aboot(): + image = 'test-image' + dirpath = tempfile.mkdtemp() + bootloader = aboot.AbootBootloader() + bootloader.get_image_path = Mock(return_value=dirpath) + + # The the default setting + bootloader._set_image_cmdline(image, 'test=1') + assert not bootloader.get_fips(image) + + # Test fips enabled + bootloader.set_fips(image, True) + assert bootloader.get_fips(image) + + # Test fips disabled + bootloader.set_fips(image, False) + assert not bootloader.get_fips(image) + + # Cleanup + shutil.rmtree(dirpath) diff --git a/tests/installer_bootloader_grub_test.py b/tests/installer_bootloader_grub_test.py index faaa8d75fc..ff35e13b37 100644 --- a/tests/installer_bootloader_grub_test.py +++ b/tests/installer_bootloader_grub_test.py @@ -1,4 +1,5 @@ import os +import shutil from unittest.mock import Mock, patch # Import test module @@ -24,3 +25,31 @@ def test_remove_image(open_patch, run_command_patch, re_search_patch): args, _ = args_list[0] assert exp_image_path in args[0] + +@patch("sonic_installer.bootloader.grub.HOST_PATH", os.path.join(os.path.dirname(os.path.abspath(__file__)), 'installer_bootloader_input/_tmp_host')) +def test_set_fips_grub(): + # Prepare the grub.cfg in the _tmp_host folder + current_path = os.path.dirname(os.path.abspath(__file__)) + grub_config = os.path.join(current_path, 'installer_bootloader_input/host/grub/grub.cfg') + tmp_host_path = os.path.join(current_path, 'installer_bootloader_input/_tmp_host') + tmp_grub_path = os.path.join(tmp_host_path, 'grub') + tmp_grub_config = os.path.join(tmp_grub_path, 'grub.cfg') + os.makedirs(tmp_grub_path, exist_ok=True) + shutil.copy(grub_config, tmp_grub_path) + + image = 'SONiC-OS-internal-202205.57377412-84a9a7f11b' + bootloader = grub.GrubBootloader() + + # The the default setting + assert not bootloader.get_fips(image) + + # Test fips enabled + bootloader.set_fips(image, True) + assert bootloader.get_fips(image) + + # Test fips disabled + bootloader.set_fips(image, False) + assert not bootloader.get_fips(image) + + # Cleanup the _tmp_host folder + shutil.rmtree(tmp_host_path) diff --git a/tests/installer_bootloader_input/host/grub/grub.cfg b/tests/installer_bootloader_input/host/grub/grub.cfg new file mode 100644 index 0000000000..5b44bae12b --- /dev/null +++ b/tests/installer_bootloader_input/host/grub/grub.cfg @@ -0,0 +1,51 @@ +serial --port=0x3f8 --speed=9600 --word=8 --parity=no --stop=1 +terminal_input console serial +terminal_output console serial + +set timeout=5 + +if [ -s $prefix/grubenv ]; then + load_env +fi +if [ "${saved_entry}" ]; then + set default="${saved_entry}" +fi +if [ "${next_entry}" ]; then + set default="${next_entry}" + unset next_entry + save_env next_entry +fi +if [ "${onie_entry}" ]; then + set next_entry="${default}" + set default="${onie_entry}" + unset onie_entry + save_env onie_entry next_entry +fi + +menuentry 'SONiC-OS-internal-202205.57377412-84a9a7f11b' { + search --no-floppy --label --set=root SONiC-OS + echo 'Loading SONiC-OS OS kernel ...' + insmod gzio + if [ x = xxen ]; then insmod xzio; insmod lzopio; fi + insmod part_msdos + insmod ext2 + linux /image-internal-202205.57377412-84a9a7f11b/boot/vmlinuz-5.10.0-12-2-amd64 root=UUID=df89970c-bf6d-40cf-80fc-a977c89054dd rw console=tty0 console=ttyS0,9600n8 quiet intel_idle.max_cstate=0 net.ifnames=0 biosdevname=0 loop=image-internal-202205.57377412-84a9a7f11b/fs.squashfs loopfstype=squashfs systemd.unified_cgroup_hierarchy=0 apparmor=1 security=apparmor varlog_size=4096 usbcore.autosuspend=-1 acpi_enforce_resources=lax acpi=noirq + echo 'Loading SONiC-OS OS initial ramdisk ...' + initrd /image-internal-202205.57377412-84a9a7f11b/boot/initrd.img-5.10.0-12-2-amd64 +} +menuentry 'SONiC-OS-master-11298.116581-1a4f95389' { + search --no-floppy --label --set=root SONiC-OS + echo 'Loading SONiC-OS OS kernel ...' + insmod gzio + if [ x = xxen ]; then insmod xzio; insmod lzopio; fi + insmod part_msdos + insmod ext2 + linux /image-master-11298.116581-1a4f95389/boot/vmlinuz-5.10.0-12-2-amd64 root=UUID=df89970c-bf6d-40cf-80fc-a977c89054dd rw console=tty0 console=ttyS0,9600n8 quiet intel_idle.max_cstate=0 sonic_fips=1 net.ifnames=0 biosdevname=0 loop=image-master-11298.116581-1a4f95389/fs.squashfs loopfstype=squashfs systemd.unified_cgroup_hierarchy=0 apparmor=1 security=apparmor varlog_size=4096 usbcore.autosuspend=-1 acpi_enforce_resources=lax acpi=noirq + echo 'Loading SONiC-OS OS initial ramdisk ...' + initrd /image-master-11298.116581-1a4f95389/boot/initrd.img-5.10.0-12-2-amd64 +} +menuentry ONIE { + search --no-floppy --label --set=root ONIE-BOOT + echo 'Loading ONIE ...' + chainloader +1 +} diff --git a/tests/installer_bootloader_uboot_test.py b/tests/installer_bootloader_uboot_test.py index 069b398dca..b0fc7c61a7 100644 --- a/tests/installer_bootloader_uboot_test.py +++ b/tests/installer_bootloader_uboot_test.py @@ -4,6 +4,13 @@ # Import test module import sonic_installer.bootloader.uboot as uboot +class MockProc(): + commandline = "linuxargs=" + def communicate(): + return commandline, None + +def mock_run_command(cmd): + MockProc.commandline = cmd @patch("sonic_installer.bootloader.uboot.subprocess.call", Mock()) @patch("sonic_installer.bootloader.uboot.run_command") @@ -27,3 +34,32 @@ def test_remove_image(run_command_patch): args, _ = args_list[0] assert exp_image_path in args[0] + +@patch("sonic_installer.bootloader.uboot.subprocess.Popen") +@patch("sonic_installer.bootloader.uboot.run_command") +def test_set_fips_uboot(run_command_patch, popen_patch): + class MockProc(): + commandline = "linuxargs" + def communicate(self): + return MockProc.commandline, None + + def mock_run_command(cmd): + # Remove leading string "/usr/bin/fw_setenv linuxargs " -- the 29 characters + MockProc.commandline = 'linuxargs=' + cmd[29:] + + run_command_patch.side_effect = mock_run_command + popen_patch.return_value = MockProc() + + image = 'test-image' + bootloader = uboot.UbootBootloader() + + # The the default setting + assert not bootloader.get_fips(image) + + # Test fips enabled + bootloader.set_fips(image, True) + assert bootloader.get_fips(image) + + # Test fips disabled + bootloader.set_fips(image, False) + assert not bootloader.get_fips(image) diff --git a/tests/test_sonic_installer.py b/tests/test_sonic_installer.py index 2fa306c213..c004bba9dc 100644 --- a/tests/test_sonic_installer.py +++ b/tests/test_sonic_installer.py @@ -81,3 +81,33 @@ def rootfs_path_mock(path): call(["rm", "-rf", mounted_image_folder], raise_exception=False), ] assert run_command_or_raise.call_args_list == expected_call_list + +@patch("sonic_installer.main.get_bootloader") +def test_set_fips(get_bootloader): + """ This test covers the execution of "sonic-installer set-fips/get-fips" command. """ + + image = "image_1" + next_image = "image_2" + + # Setup bootloader mock + mock_bootloader = Mock() + mock_bootloader.get_next_image = Mock(return_value=next_image) + mock_bootloader.get_installed_images = Mock(return_value=[image, next_image]) + mock_bootloader.set_fips = Mock() + mock_bootloader.get_fips = Mock(return_value=False) + get_bootloader.return_value=mock_bootloader + + runner = CliRunner() + + # Test set-fips command options: --enable-fips/--disable-fips + result = runner.invoke(sonic_installer.commands["set-fips"], [next_image, '--enable-fips']) + assert 'Set FIPS' in result.output + result = runner.invoke(sonic_installer.commands["set-fips"], ['--disable-fips']) + assert 'Set FIPS' in result.output + + # Test command get-fips options + result = runner.invoke(sonic_installer.commands["get-fips"]) + assert "FIPS is disabled" in result.output + mock_bootloader.get_fips = Mock(return_value=True) + result = runner.invoke(sonic_installer.commands["get-fips"], [next_image]) + assert "FIPS is enabled" in result.output From 28926b0f80ae5fb4228d5ba2743ec146f69c7963 Mon Sep 17 00:00:00 2001 From: tjchadaga <85581939+tjchadaga@users.noreply.github.com> Date: Fri, 8 Jul 2022 16:54:38 -0700 Subject: [PATCH 23/34] Add 'traffic_shift_away' option to config load_minigraph (#2240) --- config/main.py | 10 +++++++++- doc/Command-Reference.md | 4 +++- tests/config_test.py | 28 ++++++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 2 deletions(-) diff --git a/config/main.py b/config/main.py index 8a4c595562..f95c64c2b8 100644 --- a/config/main.py +++ b/config/main.py @@ -1685,8 +1685,9 @@ def load_mgmt_config(filename): @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Reload config from minigraph?') @click.option('-n', '--no_service_restart', default=False, is_flag=True, help='Do not restart docker services') +@click.option('-t', '--traffic_shift_away', default=False, is_flag=True, help='Keep device in maintenance with TSA') @clicommon.pass_db -def load_minigraph(db, no_service_restart): +def load_minigraph(db, no_service_restart, traffic_shift_away): """Reconfigure based on minigraph.""" log.log_info("'load_minigraph' executing...") @@ -1756,6 +1757,13 @@ def load_minigraph(db, no_service_restart): cfggen_namespace_option = " -n {}".format(namespace) clicommon.run_command(db_migrator + ' -o set_version' + cfggen_namespace_option) + # Keep device isolated with TSA + if traffic_shift_away: + clicommon.run_command("TSA", display_cmd=True) + if os.path.isfile(DEFAULT_GOLDEN_CONFIG_DB_FILE): + log.log_warning("Golden configuration may override System Maintenance state. Please execute TSC to check the current System mode") + click.secho("[WARNING] Golden configuration may override Traffic-shift-away state. Please execute TSC to check the current System mode") + # Load golden_config_db.json if os.path.isfile(DEFAULT_GOLDEN_CONFIG_DB_FILE): override_config_by(DEFAULT_GOLDEN_CONFIG_DB_FILE) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 2c873235e1..012834b017 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -5149,9 +5149,11 @@ When user specifies the optional argument "-n" or "--no-service-restart", this c running on the device. One use case for this option is during boot time when config-setup service loads minigraph configuration and there is no services running on the device. +When user specifies the optional argument "-t" or "--traffic-shift-away", this command executes TSA command at the end to ensure the device remains in maintenance after loading minigraph. + - Usage: ``` - config load_minigraph [-y|--yes] [-n|--no-service-restart] + config load_minigraph [-y|--yes] [-n|--no-service-restart] [-t|--traffic-shift-away] ``` - Example: diff --git a/tests/config_test.py b/tests/config_test.py index ca06900817..18fa251e9b 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -435,6 +435,34 @@ def is_file_side_effect(filename): assert result.exit_code == 0 assert expected_output in result.output + def test_load_minigraph_with_traffic_shift_away(self, get_cmd_module): + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + (config, show) = get_cmd_module + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["-ty"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "TSA" in result.output + + def test_load_minigraph_with_traffic_shift_away_with_golden_config(self, get_cmd_module): + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + def is_file_side_effect(filename): + return True if 'golden_config' in filename else False + with mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): + (config, show) = get_cmd_module + db = Db() + golden_config = {} + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["-ty"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "TSA" in result.output + assert "[WARNING] Golden configuration may override Traffic-shift-away state" in result.output + @classmethod def teardown_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "0" From b41da8f2506a19bfa8b4a45b13808dc4ac984eb5 Mon Sep 17 00:00:00 2001 From: isabelmsft <67024108+isabelmsft@users.noreply.github.com> Date: Tue, 12 Jul 2022 10:47:07 -0700 Subject: [PATCH 24/34] Fix README to reflect sonic-utilities being built in Bullseye Fix README to reflect sonic-utilities being built in Bullseye --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1c8a33fc7e..3ba6ce26f5 100644 --- a/README.md +++ b/README.md @@ -40,9 +40,9 @@ A convenient alternative is to let the SONiC build system configure a build envi make configure PLATFORM=generic ``` -2. Build the sonic-utilities Python wheel package inside the Buster slave container, and tell the build system to keep the container alive when finished +2. Build the sonic-utilities Python wheel package inside the Bullseye slave container, and tell the build system to keep the container alive when finished ``` - make NOJESSIE=1 NOSTRETCH=1 KEEP_SLAVE_ON=yes target/python-wheels/sonic_utilities-1.2-py3-none-any.whl + make NOSTRETCH=1 NOBUSTER=1 KEEP_SLAVE_ON=yes target/python-wheels/bullseye/sonic_utilities-1.2-py3-none-any.whl ``` 3. When the build finishes, your prompt will change to indicate you are inside the slave container. Change into the `src/sonic-utilities/` directory From d6b886989cc1f2104613c2cb21d406f9ab5127dc Mon Sep 17 00:00:00 2001 From: Vivek Date: Wed, 13 Jul 2022 06:22:18 -0700 Subject: [PATCH 25/34] [Auto-Techsupport] Fix the coredump_gen_handler Exception when the History table is empty (#2265) coredump_gen_handler script is throwing exception currently when the history table is empty. Fix this issue and add a UT Signed-off-by: Vivek Reddy Karri --- tests/coredump_gen_handler_test.py | 20 ++++++++++++++++++++ utilities_common/auto_techsupport_helper.py | 2 +- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/tests/coredump_gen_handler_test.py b/tests/coredump_gen_handler_test.py index f704311afd..e726dbbbed 100644 --- a/tests/coredump_gen_handler_test.py +++ b/tests/coredump_gen_handler_test.py @@ -449,3 +449,23 @@ def mock_cmd(cmd, env): patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", "swss", redis_mock) cls.handle_core_dump_creation_event() + + def test_auto_ts_empty_state_db(self): + """ + Scenario: Check if the techsupport is called as expected even when the history table in empty + and container cooloff is non-zero + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, state="enabled", since_cfg="2 days ago") + set_feature_table_cfg(redis_mock, state="enabled", rate_limit_interval="300") + with Patcher() as patcher: + def mock_cmd(cmd, env): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str and cmd_str != TS_DEFAULT_CMD: + assert False, "Expected TS_CMD: {}, Recieved: {}".format(TS_DEFAULT_CMD, cmd_str) + return 0, AUTO_TS_STDOUT, "" + ts_helper.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", "swss", redis_mock) + cls.handle_core_dump_creation_event() diff --git a/utilities_common/auto_techsupport_helper.py b/utilities_common/auto_techsupport_helper.py index 9bbfe9a87d..30c99ce0c9 100644 --- a/utilities_common/auto_techsupport_helper.py +++ b/utilities_common/auto_techsupport_helper.py @@ -259,7 +259,7 @@ def get_ts_map(db): ts_map = {} ts_keys = db.keys(STATE_DB, TS_MAP+"*") if not ts_keys: - return + return ts_map for ts_key in ts_keys: data = db.get_all(STATE_DB, ts_key) if not data: From 1dacb7f2a577c0cc535c5c7ba572c52bd0bd4ad3 Mon Sep 17 00:00:00 2001 From: Hua Liu <58683130+liuh-80@users.noreply.github.com> Date: Thu, 14 Jul 2022 07:57:16 +0800 Subject: [PATCH 26/34] Replace pyswsssdk with swsscommon (#2251) #### What I did Replace sonic-py-swsssdk with sonic-swss-common. #### How I did it Update all sonic-py-swsssdk to use sonic-swss-common or sonic-py-common. #### How to verify it Pass all UT and E2E test. #### Previous command output (if the output of a command-line utility has changed) #### New command output (if the output of a command-line utility has changed) --- README.md | 2 +- config/config_mgmt.py | 2 +- config/main.py | 2 +- scripts/fdbshow | 2 +- scripts/nbrshow | 2 +- setup.py | 1 - tests/config_test.py | 28 ++++++++++++++++++++++++++++ tests/db_migrator_test.py | 2 +- tests/null_route_helper_test.py | 2 +- tests/show_snmp_test.py | 1 - tests/tpid_test.py | 1 - 11 files changed, 35 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 3ba6ce26f5..46b7af71f1 100644 --- a/README.md +++ b/README.md @@ -26,11 +26,11 @@ Currently, this list of dependencies is as follows: - libyang-cpp_1.0.73_amd64.deb - python3-yang_1.0.73_amd64.deb - redis_dump_load-1.1-py3-none-any.whl -- swsssdk-2.0.1-py3-none-any.whl - sonic_py_common-1.0-py3-none-any.whl - sonic_config_engine-1.0-py3-none-any.whl - sonic_yang_mgmt-1.0-py3-none-any.whl - sonic_yang_models-1.0-py3-none-any.whl +- python-swsscommon_1.0.0_amd64.deb A convenient alternative is to let the SONiC build system configure a build enviroment for you. This can be done by cloning the [sonic-buildimage](https://github.com/Azure/sonic-buildimage) repo, building the sonic-utilities package inside the Debian Buster slave container, and staying inside the container once the build finishes. During the build process, the SONiC build system will build and install all the necessary dependencies inside the container. After following the instructions to clone and initialize the sonic-buildimage repo, this can be done as follows: diff --git a/config/config_mgmt.py b/config/config_mgmt.py index ba3f7b0441..a10393c72c 100644 --- a/config/config_mgmt.py +++ b/config/config_mgmt.py @@ -15,7 +15,7 @@ import sonic_yang from jsondiff import diff -from swsssdk import port_util +from sonic_py_common import port_util from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector from utilities_common.general import load_module_from_source diff --git a/config/main.py b/config/main.py index f95c64c2b8..d11e63ea89 100644 --- a/config/main.py +++ b/config/main.py @@ -6611,7 +6611,7 @@ def rate(): @click.argument('rates_type', type=click.Choice(['all', 'port', 'rif', 'flowcnt-trap']), default='all') def smoothing_interval(interval, rates_type): """Set rates smoothing interval """ - counters_db = swsssdk.SonicV2Connector() + counters_db = SonicV2Connector() counters_db.connect('COUNTERS_DB') alpha = 2.0/(interval + 1) diff --git a/scripts/fdbshow b/scripts/fdbshow index 475a5dee7d..3348fefad9 100755 --- a/scripts/fdbshow +++ b/scripts/fdbshow @@ -56,7 +56,7 @@ try: # pragma: no cover except KeyError: # pragma: no cover pass -from swsssdk import port_util +from sonic_py_common import port_util from swsscommon.swsscommon import SonicV2Connector from tabulate import tabulate diff --git a/scripts/nbrshow b/scripts/nbrshow index b59acf0287..2a2d85354a 100644 --- a/scripts/nbrshow +++ b/scripts/nbrshow @@ -33,7 +33,7 @@ import subprocess import re from natsort import natsorted -from swsssdk import port_util +from sonic_py_common import port_util from swsscommon.swsscommon import SonicV2Connector from tabulate import tabulate diff --git a/setup.py b/setup.py index 8cb4a7515f..3f5e1b6633 100644 --- a/setup.py +++ b/setup.py @@ -211,7 +211,6 @@ 'sonic-platform-common', 'sonic-py-common', 'sonic-yang-mgmt', - 'swsssdk>=2.0.1', 'tabulate==0.8.2', 'toposort==1.6', 'www-authenticate==0.9.2', diff --git a/tests/config_test.py b/tests/config_test.py index 18fa251e9b..e9dbae4194 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -1557,3 +1557,31 @@ def teardown_class(cls): from .mock_tables import mock_single_asic importlib.reload(mock_single_asic) dbconnector.load_namespace_config() + +class TestConfigRate(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "1" + print("SETUP") + + import config.main + importlib.reload(config.main) + + def test_config_rate(self, get_cmd_module, setup_single_broadcom_asic): + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + (config, show) = get_cmd_module + + runner = CliRunner() + result = runner.invoke(config.config.commands["rate"], ["smoothing-interval", "500"]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert result.output == "" + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index 96e139b934..7e0d505775 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -4,7 +4,7 @@ from deepdiff import DeepDiff -from swsssdk import SonicV2Connector +from swsscommon.swsscommon import SonicV2Connector from sonic_py_common import device_info from .mock_tables import dbconnector diff --git a/tests/null_route_helper_test.py b/tests/null_route_helper_test.py index f07a981aa3..f82b0d3675 100644 --- a/tests/null_route_helper_test.py +++ b/tests/null_route_helper_test.py @@ -3,7 +3,7 @@ import imp from click.testing import CliRunner -from swsssdk import ConfigDBConnector +from swsscommon.swsscommon import ConfigDBConnector null_route_helper = imp.load_source('null_route_helper', os.path.join(os.path.dirname(__file__), '..', 'scripts','null_route_helper')) null_route_helper.ConfigDBConnector = ConfigDBConnector diff --git a/tests/show_snmp_test.py b/tests/show_snmp_test.py index 753e20c418..38a49882ff 100644 --- a/tests/show_snmp_test.py +++ b/tests/show_snmp_test.py @@ -3,7 +3,6 @@ import click from click.testing import CliRunner import pytest -import swsssdk import traceback test_path = os.path.dirname(os.path.abspath(__file__)) diff --git a/tests/tpid_test.py b/tests/tpid_test.py index b4c56ff084..02726f5180 100644 --- a/tests/tpid_test.py +++ b/tests/tpid_test.py @@ -3,7 +3,6 @@ import click from click.testing import CliRunner import pytest -import swsssdk import traceback test_path = os.path.dirname(os.path.abspath(__file__)) From 8e5d4789642e6a8954725639b39deef9dbbc7304 Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Mon, 18 Jul 2022 07:23:03 +0300 Subject: [PATCH 27/34] [ssip]: Add CLI (#2191) - What I did Implemented CLI for Syslog Source IP How I did it N/A - How to verify it N/A Previous command output (if the output of a command-line utility has changed) - N/A New command output (if the output of a command-line utility has changed) root@sonic:/home/admin# show syslog SERVER IP SOURCE IP PORT VRF ----------- ----------- ------ -------- 2.2.2.2 1.1.1.1 514 default 3.3.3.3 1.1.1.1 514 mgmt 2222::2222 1111::1111 514 Vrf-Data Signed-off-by: Nazarii Hnydyn --- config/main.py | 62 +--- config/syslog.py | 449 +++++++++++++++++++++++ doc/Command-Reference.md | 56 ++- show/main.py | 49 ++- show/syslog.py | 53 +++ tests/syslog_input/assert_show_output.py | 17 + tests/syslog_input/config_mock.py | 70 ++++ tests/syslog_input/syslog_cdb.json | 20 + tests/syslog_input/vrf_cdb.json | 8 + tests/syslog_test.py | 244 ++++++++++++ 10 files changed, 937 insertions(+), 91 deletions(-) create mode 100644 config/syslog.py create mode 100644 show/syslog.py create mode 100644 tests/syslog_input/assert_show_output.py create mode 100644 tests/syslog_input/config_mock.py create mode 100644 tests/syslog_input/syslog_cdb.json create mode 100644 tests/syslog_input/vrf_cdb.json create mode 100644 tests/syslog_test.py diff --git a/config/main.py b/config/main.py index d11e63ea89..e26eac133c 100644 --- a/config/main.py +++ b/config/main.py @@ -47,6 +47,7 @@ from . import plugins from .config_mgmt import ConfigMgmtDPB from . import mclag +from . import syslog # mock masic APIs for unit test try: @@ -1213,6 +1214,9 @@ def config(ctx): config.add_command(mclag.mclag_member) config.add_command(mclag.mclag_unique_ip) +# syslog module +config.add_command(syslog.syslog) + @config.command() @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Existing files will be overwritten, continue?') @@ -5056,6 +5060,12 @@ def del_vrf(ctx, vrf_name): ctx.fail("'vrf_name' is not start with Vrf, mgmt or management!") if len(vrf_name) > 15: ctx.fail("'vrf_name' is too long!") + syslog_table = config_db.get_table("SYSLOG_SERVER") + syslog_vrf_dev = "mgmt" if vrf_name == "management" else vrf_name + for syslog_entry, syslog_data in syslog_table.items(): + syslog_vrf = syslog_data.get("vrf") + if syslog_vrf == syslog_vrf_dev: + ctx.fail("Failed to remove VRF device: {} is in use by SYSLOG_SERVER|{}".format(syslog_vrf, syslog_entry)) if (vrf_name == 'mgmt' or vrf_name == 'management'): vrf_delete_management_vrf(config_db) else: @@ -6082,58 +6092,6 @@ def enable(enable): command = "ztp enable" clicommon.run_command(command, display_cmd=True) -# -# 'syslog' group ('config syslog ...') -# -@config.group(cls=clicommon.AbbreviationGroup, name='syslog') -@click.pass_context -def syslog_group(ctx): - """Syslog server configuration tasks""" - config_db = ConfigDBConnector() - config_db.connect() - ctx.obj = {'db': config_db} - -@syslog_group.command('add') -@click.argument('syslog_ip_address', metavar='', required=True) -@click.pass_context -def add_syslog_server(ctx, syslog_ip_address): - """ Add syslog server IP """ - if not clicommon.is_ipaddress(syslog_ip_address): - ctx.fail('Invalid ip address') - db = ctx.obj['db'] - syslog_servers = db.get_table("SYSLOG_SERVER") - if syslog_ip_address in syslog_servers: - click.echo("Syslog server {} is already configured".format(syslog_ip_address)) - return - else: - db.set_entry('SYSLOG_SERVER', syslog_ip_address, {'NULL': 'NULL'}) - click.echo("Syslog server {} added to configuration".format(syslog_ip_address)) - try: - click.echo("Restarting rsyslog-config service...") - clicommon.run_command("systemctl restart rsyslog-config", display_cmd=False) - except SystemExit as e: - ctx.fail("Restart service rsyslog-config failed with error {}".format(e)) - -@syslog_group.command('del') -@click.argument('syslog_ip_address', metavar='', required=True) -@click.pass_context -def del_syslog_server(ctx, syslog_ip_address): - """ Delete syslog server IP """ - if not clicommon.is_ipaddress(syslog_ip_address): - ctx.fail('Invalid IP address') - db = ctx.obj['db'] - syslog_servers = db.get_table("SYSLOG_SERVER") - if syslog_ip_address in syslog_servers: - db.set_entry('SYSLOG_SERVER', '{}'.format(syslog_ip_address), None) - click.echo("Syslog server {} removed from configuration".format(syslog_ip_address)) - else: - ctx.fail("Syslog server {} is not configured.".format(syslog_ip_address)) - try: - click.echo("Restarting rsyslog-config service...") - clicommon.run_command("systemctl restart rsyslog-config", display_cmd=False) - except SystemExit as e: - ctx.fail("Restart service rsyslog-config failed with error {}".format(e)) - # # 'ntp' group ('config ntp ...') # diff --git a/config/syslog.py b/config/syslog.py new file mode 100644 index 0000000000..be16a12d38 --- /dev/null +++ b/config/syslog.py @@ -0,0 +1,449 @@ +import click + +import json +import ipaddress +import subprocess + +import utilities_common.cli as clicommon +from sonic_py_common import logger + + +SYSLOG_TABLE_CDB = "SYSLOG_SERVER" + +SYSLOG_SOURCE = "source" +SYSLOG_PORT = "port" +SYSLOG_VRF = "vrf" + +VRF_TABLE_CDB = "VRF" +MGMT_VRF_TABLE_CDB = "MGMT_VRF_CONFIG" + +MGMT_VRF_GLOBAL = "vrf_global" +MGMT_VRF_GLOBAL_ENABLED = "mgmtVrfEnabled" + + +log = logger.Logger() +log.set_min_log_priority_info() + +# +# Syslog helpers ------------------------------------------------------------------------------------------------------ +# + +def exec_cmd(cmd): + """ Execute shell command """ + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + + +def get_vrf_list(): + """ Get Linux VRF device list """ + vrf_list = [] + vrf_data = json.loads(exec_cmd('ip --json vrf show')) + for vrf_entry in vrf_data: + vrf_name = vrf_entry.get('name', None) + if vrf_name is not None: + vrf_list.append(vrf_name) + return vrf_list + + +def get_vrf_member_dict(): + """ Get Linux VRF device to member dict """ + vrf_member_dict = {} + vrf_list = get_vrf_list() + for vrf_name in vrf_list: + vrf_member_dict[vrf_name] = [] + vrf_member_data = json.loads(exec_cmd('ip --json link show vrf {}'.format(vrf_name))) + for vrf_member_entry in vrf_member_data: + vrf_member_name = vrf_member_entry.get('ifname', None) + if vrf_member_name is not None: + vrf_member_dict[vrf_name].append(vrf_member_name) + return vrf_member_dict + + +def get_ip_addr_dict(): + """ Get Linux interface to IPv4/IPv6 address list dict """ + ip_addr_dict = {} + ip_addr_data = json.loads(exec_cmd('ip --json address show')) + for ip_addr_entry in ip_addr_data: + link_name = ip_addr_entry.get('ifname', None) + if link_name is not None: + ip_addr_dict[link_name] = [] + ip_data = ip_addr_entry.get('addr_info', None) + if ip_data is not None: + for ip_entry in ip_data: + ip_addr = ip_entry.get('local', None) + if ip_addr is not None: + ip_addr_dict[link_name].append(ip_addr) + return ip_addr_dict + + +def get_param(ctx, name): + """ Get click parameter """ + for param in ctx.command.params: + if param.name == name: + return param + return None + + +def get_param_hint(ctx, name): + """ Get click parameter description """ + return get_param(ctx, name).get_error_hint(ctx) + +# +# Syslog DB interface ------------------------------------------------------------------------------------------------- +# + +def add_entry(db, table, key, data): + """ Add new entry in table """ + cfg = db.get_config() + cfg.setdefault(table, {}) + + if key in cfg[table]: + raise click.ClickException("{}{}{} already exists in Config DB".format( + table, db.TABLE_NAME_SEPARATOR, db.serialize_key(key) + ) + ) + + db.set_entry(table, key, data) + + +def del_entry(db, table, key): + """ Delete entry in table """ + cfg = db.get_config() + cfg.setdefault(table, {}) + + if key not in cfg[table]: + raise click.ClickException("{}{}{} doesn't exist in Config DB".format( + table, db.TABLE_NAME_SEPARATOR, db.serialize_key(key) + ) + ) + + db.set_entry(table, key, None) + + +def is_exist_in_db(db, table, key): + """ + Check if provided hash already exists in Config DB + + Args: + db: reference to Config DB + table: table to search in Config DB + key: key to search in Config DB + + Returns: + bool: The return value. True for success, False otherwise + """ + if (not table) or (not key): + return False + + if key not in db.get_keys(table): + return False + + return True + + +def is_mgmt_vrf_enabled(db): + """ + Check if management VRF is enabled in Config DB + + Args: + db: reference to Config DB + + Returns: + bool: The return value. True for success, False otherwise + """ + entry = db.get_entry(MGMT_VRF_TABLE_CDB, MGMT_VRF_GLOBAL) + if not entry: + return False + + value = entry.get(MGMT_VRF_GLOBAL_ENABLED, None) + if not value: + return False + + return value.title() == 'True' + +# +# Syslog validators --------------------------------------------------------------------------------------------------- +# + +def server_validator(ctx, db, ip_addr, is_exist=True): + """ + Check if server IP address exists in Config DB + + Args: + ctx: click context + db: reference to Config DB + ip_addr: server IP address + is_exist: entry existence flag. True for presence assert, False otherwise + """ + if is_exist: + if not is_exist_in_db(db, str(SYSLOG_TABLE_CDB), str(ip_addr)): + raise click.UsageError("Invalid value for {}: {} is not a valid syslog server".format( + get_param_hint(ctx, "server_ip_address"), ip_addr), ctx + ) + else: + if is_exist_in_db(db, str(SYSLOG_TABLE_CDB), str(ip_addr)): + raise click.UsageError("Invalid value for {}: {} is a valid syslog server".format( + get_param_hint(ctx, "server_ip_address"), ip_addr), ctx + ) + + +def ip_addr_validator(ctx, param, value): + """ + Check if IP address option is valid + + Args: + ctx: click context + param: click parameter context + value: value of parameter + + Returns: + str: validated parameter + """ + if value is None: + return None + + try: + ip = ipaddress.ip_address(value) + except Exception as e: + raise click.UsageError("Invalid value for {}: {}".format( + param.get_error_hint(ctx), e), ctx + ) + + return str(ip) + + +def source_validator(ctx, server, source): + """ + Check if source option is valid + + Args: + ctx: click context + server: server IP address + source: source IP address + """ + source_ip = ipaddress.ip_address(source) + if source_ip.is_loopback or source_ip.is_multicast or source_ip.is_link_local: + raise click.UsageError("Invalid value for {}: {} is a loopback/multicast/link-local IP address".format( + get_param_hint(ctx, "source"), source), ctx + ) + + server_ip = ipaddress.ip_address(server) + if server_ip.version != source_ip.version: + raise click.UsageError("Invalid value for {} / {}: {} / {} IP address family mismatch".format( + get_param_hint(ctx, "server_ip_address"), get_param_hint(ctx, "source"), server, source), ctx + ) + + +def vrf_validator(ctx, db, value): + """ + Check if VRF device option is valid + + Args: + ctx: click context + db: reference to Config DB + value: value of parameter + + Returns: + str: validated parameter + """ + if value is None: + return None + + vrf_list = ["default"] + if is_mgmt_vrf_enabled(db): + vrf_list.append("mgmt") + vrf_list.extend(db.get_keys(VRF_TABLE_CDB)) + + return click.Choice(vrf_list).convert(value, get_param(ctx, "vrf"), ctx) + + +def source_to_vrf_validator(ctx, source, vrf): + """ + Check if source IP address and VRF device are compliant to Linux configuration + + I. VRF/Source: unset/unset + + Linux kernel decides which source IP to use within the default VRF + + II. VRF/Source: unset/set + + Check if source IP is configured on any default VRF member: + yes - set source IP, no - generate error + + III. VRF/Source: set/unset + + Check VRF type: + 1. Default + 2. MGMT + 3. DATA + + Default VRF: + 1. Skip VRF configuration + + MGMT VRF: + 1. Check if MGMT VRF is enabled: + yes - set VRF, no - generate error + + DATA VRF: + 1. Check if VRF is a member of SONiC VRF table: + yes - set VRF, no - generate error + + IV. VRF/Source: set/set + + Check VRF type: + 1. Default + 2. MGMT + 3. DATA + + Default VRF: + 1. Check if source IP is configured on any DEFAULT VRF member: + yes - set source IP, no - generate error + 2. Skip VRF configuration + + MGMT VRF: + 1. Check if MGMT VRF is enabled: + yes - set VRF, no - generate error + 2. Check if source IP is configured on any MGMT VRF member: + yes - set source IP, no - generate error + + DATA VRF: + 1. Check if VRF is a member of SONiC VRF table: + yes - set VRF, no - generate error + 2. Check if source IP is configured on any DATA VRF member: + yes - set source IP, no - generate error + + Args: + ctx: click context + source: source IP address + vrf: VRF device + """ + def to_ip_addr_list(ip_addr_dict): + return list(set([ip_addr for _, ip_addr_list in ip_addr_dict.items() for ip_addr in ip_addr_list])) + + if (source is None) and (vrf is None): + return + + try: + vrf_list = get_vrf_list() + vm_dict = get_vrf_member_dict() + ip_dict = get_ip_addr_dict() + except Exception as e: + raise click.ClickException(str(e)) + + if vrf is not None and vrf != "default": # Non default VRF device + if vrf not in vrf_list: + raise click.UsageError("Invalid value for {}: {} VRF doesn't exist in Linux".format( + get_param_hint(ctx, "vrf"), vrf), ctx + ) + if source is not None: + filter_out = vm_dict[vrf] + ip_vrf_dict = dict(filter(lambda value: value[0] in filter_out, ip_dict.items())) + if source not in to_ip_addr_list(ip_vrf_dict): + raise click.UsageError("Invalid value for {}: {} IP doesn't exist in Linux {} VRF".format( + get_param_hint(ctx, "source"), source, vrf), ctx + ) + else: # Default VRF device + if source is not None: + filter_out = vrf_list + filter_out.extend([vm for _, vm_list in vm_dict.items() for vm in vm_list]) + ip_vrf_dict = dict(filter(lambda value: value[0] not in filter_out, ip_dict.items())) + if source not in to_ip_addr_list(ip_vrf_dict): + raise click.UsageError("Invalid value for {}: {} IP doesn't exist in Linux default VRF".format( + get_param_hint(ctx, "source"), source), ctx + ) + +# +# Syslog CLI ---------------------------------------------------------------------------------------------------------- +# + +@click.group( + name="syslog", + cls=clicommon.AliasedGroup +) +def syslog(): + """ Configure syslog server """ + pass + + +@syslog.command("add") +@click.argument( + "server_ip_address", + nargs=1, + required=True, + callback=ip_addr_validator +) +@click.option( + "-s", "--source", + help="Configures syslog source IP address", + callback=ip_addr_validator +) +@click.option( + "-p", "--port", + help="Configures syslog server UDP port", + type=click.IntRange(min=0, max=65535, clamp=False) +) +@click.option( + "-r", "--vrf", + help="Configures syslog VRF device" +) +@clicommon.pass_db +def add(db, server_ip_address, source, port, vrf): + """ Add object to SYSLOG_SERVER table """ + ctx = click.get_current_context() + + server_validator(ctx, db.cfgdb, server_ip_address, False) + + table = str(SYSLOG_TABLE_CDB) + key = str(server_ip_address) + data = {} + + if source is not None: + source_validator(ctx, server_ip_address, source) + data[SYSLOG_SOURCE] = source + if port is not None: + data[SYSLOG_PORT] = port + if vrf is not None: + vrf_validator(ctx, db.cfgdb, vrf) + data[SYSLOG_VRF] = vrf + + source_to_vrf_validator(ctx, source, vrf) + + try: + add_entry(db.cfgdb, table, key, data) + clicommon.run_command("systemctl reset-failed rsyslog-config rsyslog", display_cmd=True) + clicommon.run_command("systemctl restart rsyslog-config", display_cmd=True) + log.log_notice("Added remote syslog logging: server={},source={},port={},vrf={}".format( + server_ip_address, + data.get(SYSLOG_SOURCE, "N/A"), + data.get(SYSLOG_PORT, "N/A"), + data.get(SYSLOG_VRF, "N/A") + )) + except Exception as e: + log.log_error("Failed to add remote syslog logging: {}".format(str(e))) + ctx.fail(str(e)) + + +@syslog.command("del") +@click.argument( + "server_ip_address", + nargs=1, + required=True, + callback=ip_addr_validator +) +@clicommon.pass_db +def delete(db, server_ip_address): + """ Delete object from SYSLOG_SERVER table """ + ctx = click.get_current_context() + + server_validator(ctx, db.cfgdb, server_ip_address) + + table = str(SYSLOG_TABLE_CDB) + key = str(server_ip_address) + + try: + del_entry(db.cfgdb, table, key) + clicommon.run_command("systemctl reset-failed rsyslog-config rsyslog", display_cmd=True) + clicommon.run_command("systemctl restart rsyslog-config", display_cmd=True) + log.log_notice("Removed remote syslog logging: server={}".format(server_ip_address)) + except Exception as e: + log.log_error("Failed to remove remote syslog logging: {}".format(str(e))) + ctx.fail(str(e)) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 012834b017..670049fe56 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -148,6 +148,7 @@ * [Subinterfaces Show Commands](#subinterfaces-show-commands) * [Subinterfaces Config Commands](#subinterfaces-config-commands) * [Syslog](#syslog) + * [Syslog show commands](#syslog-show-commands) * [Syslog config commands](#syslog-config-commands) * [System State](#system-state) * [Processes](#processes) @@ -8590,40 +8591,71 @@ Go Back To [Beginning of the document](#) or [Beginning of this section](#static ## Syslog +### Syslog Show Commands + +This subsection explains how to display configured syslog servers. + +**show syslog** + +This command displays configured syslog servers. + +- Usage: + ``` + show syslog + ``` + +- Example: + ``` + admin@sonic:~$ show syslog + SERVER IP SOURCE IP PORT VRF + ----------- ----------- ------ ------- + 2.2.2.2 1.1.1.1 514 default + ``` + ### Syslog Config Commands -This sub-section of commands is used to add or remove the configured syslog servers. +This subsection explains how to configure syslog servers. **config syslog add** -This command is used to add a SYSLOG server to the syslog server list. Note that more that one syslog server can be added in the device. +This command is used to add a syslog server to the syslog server list. +Note that more that one syslog server can be added in the device. - Usage: ``` - config syslog add + config syslog add ``` +- Parameters: + - _server_address_: syslog server IP address + - _source_: syslog source IP address + - _port_: syslog server UDP port + - _vrf_: syslog VRF device + - Example: ``` - admin@sonic:~$ sudo config syslog add 1.1.1.1 - Syslog server 1.1.1.1 added to configuration - Restarting rsyslog-config service... + admin@sonic:~$ sudo config syslog add 2.2.2.2 --source 1.1.1.1 --port 514 --vrf default + Running command: systemctl reset-failed rsyslog-config + Running command: systemctl restart rsyslog-config ``` -**config syslog delete** +**config syslog del** -This command is used to delete the syslog server configured. +This command is used to delete the configured syslog server. - Usage: ``` - config syslog del + config syslog del ``` +- Parameters: + - _server_address_: syslog server IP address + - Example: ``` - admin@sonic:~$ sudo config syslog del 1.1.1.1 - Syslog server 1.1.1.1 removed from configuration - Restarting rsyslog-config service... + admin@sonic:~$ sudo config syslog del 2.2.2.2 + Running command: systemctl reset-failed rsyslog-config + Running command: systemctl restart rsyslog-config ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#syslog) diff --git a/show/main.py b/show/main.py index 96fc23d728..9c07d92080 100755 --- a/show/main.py +++ b/show/main.py @@ -60,6 +60,7 @@ from . import system_health from . import warm_restart from . import plugins +from . import syslog # Global Variables PLATFORM_JSON = 'platform.json' @@ -282,6 +283,9 @@ def cli(ctx): cli.add_command(system_health.system_health) cli.add_command(warm_restart.warm_restart) +# syslog module +cli.add_command(syslog.syslog) + # Add greabox commands only if GEARBOX is configured if is_gearbox_configured(): cli.add_command(gearbox.gearbox) @@ -1585,34 +1589,25 @@ def show_run_snmp(db, ctx): @runningconfiguration.command() @click.option('--verbose', is_flag=True, help="Enable verbose output") def syslog(verbose): - """Show Syslog running configuration - To match below cases(port is optional): - *.* @IPv4:port - *.* @@IPv4:port - *.* @[IPv4]:port - *.* @@[IPv4]:port - *.* @[IPv6]:port - *.* @@[IPv6]:port - """ - syslog_servers = [] - syslog_dict = {} - re_ipv4_1 = re.compile(r'^\*\.\* @{1,2}(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})(:\d+)?') - re_ipv4_2 = re.compile(r'^\*\.\* @{1,2}\[(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\](:\d+)?') - re_ipv6 = re.compile(r'^\*\.\* @{1,2}\[([0-9a-fA-F:.]+)\](:\d+)?') - with open("/etc/rsyslog.conf") as syslog_file: - data = syslog_file.readlines() + """Show Syslog running configuration""" + + header = ["Syslog Servers"] + body = [] + + re_syslog = re.compile(r'^\*\.\* action\(.*target=\"{1}(.+?)\"{1}.*\)') + + try: + with open("/etc/rsyslog.conf") as syslog_file: + data = syslog_file.readlines() + except Exception as e: + raise click.ClickException(str(e)) + for line in data: - if re_ipv4_1.match(line): - server = re_ipv4_1.match(line).group(1) - elif re_ipv4_2.match(line): - server = re_ipv4_2.match(line).group(1) - elif re_ipv6.match(line): - server = re_ipv6.match(line).group(1) - else: - continue - syslog_servers.append("[{}]".format(server)) - syslog_dict['Syslog Servers'] = syslog_servers - print(tabulate(syslog_dict, headers=list(syslog_dict.keys()), tablefmt="simple", stralign='left', missingval="")) + re_match = re_syslog.match(line) + if re_match: + body.append(["[{}]".format(re_match.group(1))]) + + click.echo(tabulate(body, header, tablefmt="simple", stralign="left", missingval="")) # diff --git a/show/syslog.py b/show/syslog.py new file mode 100644 index 0000000000..ed112e4c2d --- /dev/null +++ b/show/syslog.py @@ -0,0 +1,53 @@ +import click + +import tabulate +from natsort import natsorted + +import utilities_common.cli as clicommon + + +SYSLOG_TABLE = "SYSLOG_SERVER" + +SYSLOG_SOURCE = "source" +SYSLOG_PORT = "port" +SYSLOG_VRF = "vrf" + +# +# Syslog helpers ------------------------------------------------------------------------------------------------------ +# + +def format(header, body): + return tabulate.tabulate(body, header, tablefmt="simple", numalign="left", stralign="left") + +# +# Syslog CLI ---------------------------------------------------------------------------------------------------------- +# + +@click.group( + name='syslog', + cls=clicommon.AliasedGroup, + invoke_without_command=True +) +@clicommon.pass_db +def syslog(db): + """ Show syslog server configuration """ + + header = [ + "SERVER IP", + "SOURCE IP", + "PORT", + "VRF", + ] + body = [] + + table = db.cfgdb.get_table(SYSLOG_TABLE) + for key in natsorted(table): + entry = table[key] + row = [key] + [ + entry.get(SYSLOG_SOURCE, "N/A"), + entry.get(SYSLOG_PORT, "N/A"), + entry.get(SYSLOG_VRF, "N/A"), + ] + body.append(row) + + click.echo(format(header, body)) diff --git a/tests/syslog_input/assert_show_output.py b/tests/syslog_input/assert_show_output.py new file mode 100644 index 0000000000..64924d0e69 --- /dev/null +++ b/tests/syslog_input/assert_show_output.py @@ -0,0 +1,17 @@ +""" +Module holding the correct values for show CLI command outputs for the syslog_test.py +""" + +show_syslog_empty="""\ +SERVER IP SOURCE IP PORT VRF +----------- ----------- ------ ----- +""" + + +show_syslog="""\ +SERVER IP SOURCE IP PORT VRF +----------- ----------- ------ -------- +2.2.2.2 1.1.1.1 514 default +3.3.3.3 1.1.1.1 514 mgmt +2222::2222 1111::1111 514 Vrf-Data +""" diff --git a/tests/syslog_input/config_mock.py b/tests/syslog_input/config_mock.py new file mode 100644 index 0000000000..d595b891df --- /dev/null +++ b/tests/syslog_input/config_mock.py @@ -0,0 +1,70 @@ +""" +Module holding IP/VRF mock data for config CLI command of the syslog_test.py +""" + +VRF_LIST = ''' +[ + { + "name": "mgmt" + }, + { + "name": "Vrf-Data" + } +] +''' + +VRF_MGMT_MEMBERS = ''' +[ + { + "ifname": "eth0" + } +] +''' + +VRF_DATA_MEMBERS = ''' +[ + { + "ifname": "Ethernet0" + } +] +''' + +IP_ADDR_LIST = ''' +[ + { + "ifname": "Ethernet0", + "addr_info": [ + { + "local": "1111::1111" + } + ] + }, + { + "ifname": "Loopback0", + "addr_info": [ + { + "local": "1.1.1.1" + } + ] + }, + { + "ifname": "eth0", + "addr_info": [ + { + "local": "3.3.3.3" + } + ] + } +] +''' + +def exec_cmd_mock(cmd): + if cmd == 'ip --json vrf show': + return VRF_LIST + elif cmd == 'ip --json link show vrf mgmt': + return VRF_MGMT_MEMBERS + elif cmd == 'ip --json link show vrf Vrf-Data': + return VRF_DATA_MEMBERS + elif cmd == 'ip --json address show': + return IP_ADDR_LIST + raise Exception("{}: unknown command: {}".format(__name__, cmd)) diff --git a/tests/syslog_input/syslog_cdb.json b/tests/syslog_input/syslog_cdb.json new file mode 100644 index 0000000000..5ed3d26b4e --- /dev/null +++ b/tests/syslog_input/syslog_cdb.json @@ -0,0 +1,20 @@ +{ + "VRF|Vrf-Data": { + "NULL": "NULL" + }, + "SYSLOG_SERVER|2.2.2.2": { + "source": "1.1.1.1", + "port": "514", + "vrf": "default" + }, + "SYSLOG_SERVER|3.3.3.3": { + "source": "1.1.1.1", + "port": "514", + "vrf": "mgmt" + }, + "SYSLOG_SERVER|2222::2222": { + "source": "1111::1111", + "port": "514", + "vrf": "Vrf-Data" + } +} diff --git a/tests/syslog_input/vrf_cdb.json b/tests/syslog_input/vrf_cdb.json new file mode 100644 index 0000000000..8d57adb664 --- /dev/null +++ b/tests/syslog_input/vrf_cdb.json @@ -0,0 +1,8 @@ +{ + "MGMT_VRF_CONFIG|vrf_global": { + "mgmtVrfEnabled": "true" + }, + "VRF|Vrf-Data": { + "NULL": "NULL" + } +} diff --git a/tests/syslog_test.py b/tests/syslog_test.py new file mode 100644 index 0000000000..768f6219f0 --- /dev/null +++ b/tests/syslog_test.py @@ -0,0 +1,244 @@ +#!/usr/bin/env python + +import pytest + +import os +import mock +import logging + +import show.main as show +import config.main as config + +from click.testing import CliRunner +from utilities_common.db import Db + +from .mock_tables import dbconnector +from .syslog_input import config_mock +from .syslog_input import assert_show_output + + +ERROR_PATTERN_INVALID_IP = "does not appear to be an IPv4 or IPv6 address" +ERROR_PATTERN_PROHIBITED_IP = "is a loopback/multicast/link-local IP address" +ERROR_PATTERN_IP_FAMILY_MISMATCH = "IP address family mismatch" + +ERROR_PATTERN_INVALID_PORT = "is not a valid integer" +ERROR_PATTERN_INVALID_PORT_RANGE = "is not in the valid range of 0 to 65535" + +ERROR_PATTERN_INVALID_VRF = "invalid choice" +ERROR_PATTERN_NONEXISTENT_VRF = "VRF doesn't exist in Linux" + +SUCCESS = 0 +ERROR2 = 2 + + +test_path = os.path.dirname(os.path.abspath(__file__)) +mock_db_path = os.path.join(test_path, "syslog_input") +logger = logging.getLogger(__name__) + + +class TestSyslog: + @classmethod + def setup_class(cls): + logger.info("SETUP") + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + @classmethod + def teardown_class(cls): + logger.info("TEARDOWN") + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + dbconnector.dedicated_dbs["CONFIG_DB"] = None + + ########## CONFIG SYSLOG ########## + + @mock.patch("utilities_common.cli.run_command", mock.MagicMock(return_value=None)) + @pytest.mark.parametrize("server_ip", ["2.2.2.2", "2222::2222"]) + def test_config_syslog_basic(self, server_ip): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["syslog"].commands["add"], [server_ip], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["syslog"].commands["del"], [server_ip], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + @mock.patch("utilities_common.cli.run_command", mock.MagicMock(return_value=None)) + @mock.patch("config.syslog.exec_cmd", mock.MagicMock(side_effect=config_mock.exec_cmd_mock)) + @pytest.mark.parametrize("server_ip,source_ip,port,vrf", [ + ("2.2.2.2", "1.1.1.1", "514", "default"), + ("4.4.4.4", "3.3.3.3", "514", "mgmt"), + ("2222::2222", "1111::1111", "514", "Vrf-Data") + ]) + def test_config_syslog_extended(self, server_ip, source_ip, port, vrf): + dbconnector.dedicated_dbs["CONFIG_DB"] = os.path.join(mock_db_path, "vrf_cdb") + + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["syslog"].commands["add"], + [server_ip, "--source", source_ip, "--port", port, "--vrf", vrf], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["syslog"].commands["del"], [server_ip], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + @pytest.mark.parametrize("server_ip,source_ip", [ + ("2.2.2.2", "1.1.1.1111"), + ("4.4.4.4444", "3.3.3.3") + ]) + def test_config_syslog_invalid_ip(self, server_ip, source_ip): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["syslog"].commands["add"], + [server_ip, "--source", source_ip], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert ERROR_PATTERN_INVALID_IP in result.output + assert result.exit_code == ERROR2 + + @pytest.mark.parametrize("source_ip", ["127.0.0.1", "224.0.0.1"]) + def test_config_syslog_prohibited_sip(self, source_ip): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["syslog"].commands["add"], + ["2.2.2.2", "--source", source_ip], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert ERROR_PATTERN_PROHIBITED_IP in result.output + assert result.exit_code == ERROR2 + + def test_config_syslog_ip_family_mismatch(self): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["syslog"].commands["add"], + ["2.2.2.2", "--source", "1111::1111"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert ERROR_PATTERN_IP_FAMILY_MISMATCH in result.output + assert result.exit_code == ERROR2 + + def test_config_syslog_invalid_port(self): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["syslog"].commands["add"], + ["2.2.2.2", "--port", "514p"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert ERROR_PATTERN_INVALID_PORT in result.output + assert result.exit_code == ERROR2 + + @pytest.mark.parametrize("port", ["-1", "65536"]) + def test_config_syslog_invalid_port_range(self, port): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["syslog"].commands["add"], + ["2.2.2.2", "--port", port], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert ERROR_PATTERN_INVALID_PORT_RANGE in result.output + assert result.exit_code == ERROR2 + + def test_config_syslog_invalid_vrf(self): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["syslog"].commands["add"], + ["2.2.2.2", "--vrf", "default1"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert ERROR_PATTERN_INVALID_VRF in result.output + assert result.exit_code == ERROR2 + + @pytest.mark.parametrize("vrf", ["mgmt", "Vrf-Data"]) + @mock.patch("config.syslog.get_vrf_list", mock.MagicMock(return_value=[])) + @mock.patch("config.syslog.get_vrf_member_dict", mock.MagicMock(return_value={})) + @mock.patch("config.syslog.get_ip_addr_dict", mock.MagicMock(return_value={})) + def test_config_syslog_nonexistent_vrf(self, vrf): + dbconnector.dedicated_dbs["CONFIG_DB"] = os.path.join(mock_db_path, "vrf_cdb") + + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["syslog"].commands["add"], + ["2.2.2.2", "--vrf", vrf], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert ERROR_PATTERN_NONEXISTENT_VRF in result.output + assert result.exit_code == ERROR2 + + ########## SHOW SYSLOG ########## + + def test_show_syslog_empty(self): + db = Db() + runner = CliRunner() + + result = runner.invoke( + show.cli.commands["syslog"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_syslog_empty + + def test_show_syslog(self): + dbconnector.dedicated_dbs["CONFIG_DB"] = os.path.join(mock_db_path, "syslog_cdb") + + db = Db() + runner = CliRunner() + + result = runner.invoke( + show.cli.commands["syslog"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_syslog From ea11b22f596fb90c2d85cc27c0f73b50039a42ad Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Mon, 18 Jul 2022 07:25:31 +0300 Subject: [PATCH 28/34] [sonic-bootchart] add sonic-bootchart (#2195) - What I did Implemented sonic-net/SONiC#1001 - How I did it Added a new sonic-bootchart script and added UT for it - How to verify it Run on the switch. Depends on Azure/sonic-buildimage#11047 Signed-off-by: Stepan Blyschak --- scripts/sonic-bootchart | 139 ++++++++++++++++++++++++++++++++++ setup.py | 1 + tests/sonic_bootchart_test.py | 124 ++++++++++++++++++++++++++++++ 3 files changed, 264 insertions(+) create mode 100755 scripts/sonic-bootchart create mode 100755 tests/sonic_bootchart_test.py diff --git a/scripts/sonic-bootchart b/scripts/sonic-bootchart new file mode 100755 index 0000000000..86e993d395 --- /dev/null +++ b/scripts/sonic-bootchart @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 + +import click +import sys +import configparser +import functools +import os +import glob +from tabulate import tabulate +import utilities_common.cli as clicommon + +SYSTEMD_BOOTCHART = "/lib/systemd/systemd-bootchart" +BOOTCHART_CONF = "/etc/systemd/bootchart.conf" +BOOTCHART_DEFAULT_OUTPUT_DIR = "/run/log/" +BOOTCHART_DEFAULT_OUTPUT_GLOB = os.path.join(BOOTCHART_DEFAULT_OUTPUT_DIR, "bootchart-*.svg") + +class BootChartConfigParser(configparser.ConfigParser): + """ Custom bootchart config parser. Changes the way ConfigParser passes options """ + + def optionxform(self, option): + """ Pass options as is, without modifications """ + return option + + +def exit_cli(*args, **kwargs): + """ Print a message and exit with rc 1. """ + click.secho(*args, **kwargs) + sys.exit(1) + + +def root_privileges_required(func): + """ Decorates a function, so that the function is invoked + only if the user is root. """ + @functools.wraps(func) + def wrapped_function(*args, **kwargs): + """ Wrapper around func. """ + if os.geteuid() != 0: + exit_cli("Root privileges required for this operation", fg="red") + return func(*args, **kwargs) + + wrapped_function.__doc__ += "\n\n NOTE: This command requires elevated (root) privileges to run." + return wrapped_function + + +def check_bootchart_installed(): + """ Fails imidiatelly if bootchart is not installed """ + if not os.path.exists(SYSTEMD_BOOTCHART): + exit_cli("systemd-bootchart is not installed", fg="red") + + +def get_enabled_status(): + """ Get systemd-bootchart status """ + return clicommon.run_command("systemctl is-enabled systemd-bootchart", return_cmd=True) + +def get_active_status(): + """ Get systemd-bootchart status """ + return clicommon.run_command("systemctl is-active systemd-bootchart", return_cmd=True) + +def get_output_files(): + bootchart_output_files = [] + for bootchart_output_file in glob.glob(BOOTCHART_DEFAULT_OUTPUT_GLOB): + bootchart_output_files.append(bootchart_output_file) + return "\n".join(bootchart_output_files) + + +@click.group() +def cli(): + """ Main CLI group """ + check_bootchart_installed() + + +@cli.command() +@root_privileges_required +def enable(): + """ Enable bootchart """ + clicommon.run_command("systemctl enable systemd-bootchart", display_cmd=True) + + +@cli.command() +@root_privileges_required +def disable(): + """ Disable bootchart """ + clicommon.run_command("systemctl disable systemd-bootchart", display_cmd=True) + + +@cli.command() +@click.option('--time', type=click.IntRange(min=1), required=True) +@click.option('--frequency', type=click.IntRange(min=1), required=True) +@root_privileges_required +def config(time, frequency): + """ Configure bootchart """ + samples = time * frequency + + config = { + 'Samples': str(samples), + 'Frequency': str(frequency), + } + bootchart_config = BootChartConfigParser() + bootchart_config.read(BOOTCHART_CONF) + bootchart_config['Bootchart'].update(config) + with open(BOOTCHART_CONF, 'w') as config_file: + bootchart_config.write(config_file, space_around_delimiters=False) + + +@cli.command() +def show(): + """ Display bootchart configuration """ + bootchart_config = BootChartConfigParser() + bootchart_config.read(BOOTCHART_CONF) + + try: + samples = int(bootchart_config["Bootchart"]["Samples"]) + frequency = int(bootchart_config["Bootchart"]["Frequency"]) + except KeyError as key: + raise click.ClickException(f"Failed to parse bootchart config: {key} not found") + except ValueError as err: + raise click.ClickException(f"Failed to parse bootchart config: {err}") + + try: + time = samples // frequency + except ZeroDivisionError: + raise click.ClickException(f"Invalid frequency value: {frequency}") + + field_values = { + "Status": get_enabled_status(), + "Operational Status": get_active_status(), + "Frequency": frequency, + "Time (sec)": time, + "Output": get_output_files(), + } + + click.echo(tabulate([field_values.values()], field_values.keys())) + + +def main(): + cli() + +if __name__ == "__main__": + main() diff --git a/setup.py b/setup.py index 3f5e1b6633..7f617905da 100644 --- a/setup.py +++ b/setup.py @@ -143,6 +143,7 @@ 'scripts/watermarkstat', 'scripts/watermarkcfg', 'scripts/sonic-kdump-config', + 'scripts/sonic-bootchart', 'scripts/centralize_database', 'scripts/null_route_helper', 'scripts/coredump_gen_handler.py', diff --git a/tests/sonic_bootchart_test.py b/tests/sonic_bootchart_test.py new file mode 100755 index 0000000000..f9ecdab1dc --- /dev/null +++ b/tests/sonic_bootchart_test.py @@ -0,0 +1,124 @@ +import os +import subprocess +import pytest +from click.testing import CliRunner +from unittest.mock import patch, Mock +import utilities_common +import imp + +sonic_bootchart = imp.load_source('sonic-bootchart', 'scripts/sonic-bootchart') + +BOOTCHART_OUTPUT_FILES = [ + os.path.join(sonic_bootchart.BOOTCHART_DEFAULT_OUTPUT_DIR, "bootchart-20220504-1040.svg"), + os.path.join(sonic_bootchart.BOOTCHART_DEFAULT_OUTPUT_DIR, "bootchart-20220504-1045.svg"), +] + +@pytest.fixture(autouse=True) +def setup(fs): + # create required file for bootchart installation check + fs.create_file(sonic_bootchart.SYSTEMD_BOOTCHART) + fs.create_file(sonic_bootchart.BOOTCHART_CONF) + for bootchart_output_file in BOOTCHART_OUTPUT_FILES: + fs.create_file(bootchart_output_file) + + with open(sonic_bootchart.BOOTCHART_CONF, 'w') as config_file: + config_file.write(""" + [Bootchart] + Samples=500 + Frequency=25 + """) + + # pass the root user check + with patch("os.geteuid") as mock: + mock.return_value = 0 + yield + + +@patch("utilities_common.cli.run_command") +class TestSonicBootchart: + def test_enable(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(sonic_bootchart.cli.commands['enable'], []) + assert not result.exit_code + mock_run_command.assert_called_with("systemctl enable systemd-bootchart", display_cmd=True) + + def test_disable(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(sonic_bootchart.cli.commands['disable'], []) + assert not result.exit_code + mock_run_command.assert_called_with("systemctl disable systemd-bootchart", display_cmd=True) + + def test_config_show(self, mock_run_command): + def run_command_side_effect(command, **kwargs): + if "is-enabled" in command: + return "enabled" + elif "is-active" in command: + return "active" + else: + raise Exception("unknown command") + + mock_run_command.side_effect = run_command_side_effect + + runner = CliRunner() + result = runner.invoke(sonic_bootchart.cli.commands['show'], []) + assert not result.exit_code + assert result.output == \ + "Status Operational Status Frequency Time (sec) Output\n" \ + "-------- -------------------- ----------- ------------ ------------------------------------\n" \ + "enabled active 25 20 /run/log/bootchart-20220504-1040.svg\n" \ + " /run/log/bootchart-20220504-1045.svg\n" + + result = runner.invoke(sonic_bootchart.cli.commands["config"], ["--time", "2", "--frequency", "50"]) + assert not result.exit_code + + result = runner.invoke(sonic_bootchart.cli.commands['show'], []) + assert not result.exit_code + assert result.output == \ + "Status Operational Status Frequency Time (sec) Output\n" \ + "-------- -------------------- ----------- ------------ ------------------------------------\n" \ + "enabled active 50 2 /run/log/bootchart-20220504-1040.svg\n" \ + " /run/log/bootchart-20220504-1045.svg\n" + + # Input validation tests + + result = runner.invoke(sonic_bootchart.cli.commands["config"], ["--time", "0", "--frequency", "50"]) + assert result.exit_code + + result = runner.invoke(sonic_bootchart.cli.commands["config"], ["--time", "2", "--frequency", "-5"]) + assert result.exit_code + + def test_invalid_config_show(self, mock_run_command): + with open(sonic_bootchart.BOOTCHART_CONF, 'w') as config_file: + config_file.write(""" + [Bootchart] + Samples=100 + """) + + runner = CliRunner() + result = runner.invoke(sonic_bootchart.cli.commands['show'], []) + assert result.exit_code + assert result.output == "Error: Failed to parse bootchart config: 'Frequency' not found\n" + + with open(sonic_bootchart.BOOTCHART_CONF, 'w') as config_file: + config_file.write(""" + [Bootchart] + Samples=abc + Frequency=def + """) + + runner = CliRunner() + result = runner.invoke(sonic_bootchart.cli.commands['show'], []) + assert result.exit_code + assert result.output == "Error: Failed to parse bootchart config: invalid literal for int() with base 10: 'abc'\n" + + with open(sonic_bootchart.BOOTCHART_CONF, 'w') as config_file: + config_file.write(""" + [Bootchart] + Samples=100 + Frequency=0 + """) + + runner = CliRunner() + result = runner.invoke(sonic_bootchart.cli.commands['show'], []) + assert result.exit_code + assert result.output == "Error: Invalid frequency value: 0\n" From e49b1e8e40a4df5664d36b24b0e0cb9a223fa639 Mon Sep 17 00:00:00 2001 From: Volodymyr Samotiy Date: Mon, 18 Jul 2022 19:22:18 +0300 Subject: [PATCH 29/34] [vnet_route_check] Align DB data parse logic with format used by swsscommon API (#2268) * swsscommon API was changed in order to return data from DB as a tuple instead of dictionary. * In some places vnet_route_check still was expecting data from DB in old format - as a dictionary. * But now it is a tuple, so as a result vnet_route_check was failing with "KeyError" exeption. * These changes fixed all the places in vnet_route_check script that used invalid data format. Signed-off-by: Volodymyr Samotiy --- scripts/vnet_route_check.py | 13 ++++--------- tests/vnet_route_check_test.py | 2 +- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/scripts/vnet_route_check.py b/scripts/vnet_route_check.py index b2c798dfb0..db50503cd9 100755 --- a/scripts/vnet_route_check.py +++ b/scripts/vnet_route_check.py @@ -93,7 +93,7 @@ def get_vnet_intfs(): vnet_intfs = {} for intf_key in intfs_keys: - intf_attrs = intfs_table.get(intf_key)[1] + intf_attrs = dict(intfs_table.get(intf_key)[1]) if 'vnet_name' in intf_attrs: vnet_name = intf_attrs['vnet_name'] @@ -110,14 +110,9 @@ def get_all_rifs_oids(): Format: { : } ''' db = swsscommon.DBConnector('COUNTERS_DB', 0) - rif_table = swsscommon.Table(db, 'COUNTERS_RIF_NAME_MAP') - rif_keys = rif_table.getKeys() - - rif_name_oid_map = {} - for rif_name in rif_keys: - rif_name_oid_map[rif_name] = rif_table.get(rif_name)[1] + rif_name_oid_map = dict(rif_table.get('')[1]) return rif_name_oid_map @@ -156,8 +151,8 @@ def get_vrf_entries(): db_keys = rif_table.getKeys() for db_key in db_keys: - if 'SAI_OBJECT_TYPE_ROUTER_INTERFACE' in db_key: - rif_attrs = rif_table.get(db_key)[1] + if (db_key == f'SAI_OBJECT_TYPE_ROUTER_INTERFACE:{vnet_rifs_oids[vnet_rif_name]}'): + rif_attrs = dict(rif_table.get(db_key)[1]) rif_vrf_map[vnet_rif_name] = rif_attrs['SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID'] return rif_vrf_map diff --git a/tests/vnet_route_check_test.py b/tests/vnet_route_check_test.py index 09f35761a4..c06ea10ea3 100644 --- a/tests/vnet_route_check_test.py +++ b/tests/vnet_route_check_test.py @@ -254,7 +254,7 @@ def getKeys(self): return list(self.data.keys()) def get(self, key): - ret = copy.deepcopy(self.data.get(key, {})) + ret = copy.deepcopy(self.data.get(key, self.data)) return (True, ret) From 308e25f82ba029d8a3b176982cc7cc41451e16b7 Mon Sep 17 00:00:00 2001 From: Prince George <45705344+prgeor@users.noreply.github.com> Date: Tue, 19 Jul 2022 12:22:07 -0700 Subject: [PATCH 30/34] portconfig option to configure Tx power and laser frequency of ZR transceiver module (#2197) * New CLI option for configuring Xcvr frequency and Tx power * portconfig changes to configure xcvr's frequency and laser power * Added unit tests * Added check for tx_power and freq * Address review comment * Address review comment --- config/main.py | 65 +++++++++++++++++++++++++++++++++++++-- scripts/portconfig | 32 ++++++++++++++++++- tests/config_xcvr_test.py | 49 +++++++++++++++++++++++++++++ 3 files changed, 143 insertions(+), 3 deletions(-) create mode 100644 tests/config_xcvr_test.py diff --git a/config/main.py b/config/main.py index e26eac133c..56aeddc673 100644 --- a/config/main.py +++ b/config/main.py @@ -668,7 +668,7 @@ def is_storm_control_supported(storm_type, namespace): supported = state_db.get(state_db.STATE_DB, entry_name,"supported") return supported -#API to configure the PORT_STORM_CONTROL table +#API to configure the PORT_STORM_CONTROL table def storm_control_set_entry(port_name, kbps, storm_type, namespace): if storm_control_interface_validate(port_name) is False: @@ -693,7 +693,7 @@ def storm_control_set_entry(port_name, kbps, storm_type, namespace): return True -#API to remove an entry from PORT_STORM_CONTROL table +#API to remove an entry from PORT_STORM_CONTROL table def storm_control_delete_entry(port_name, storm_type): if storm_control_interface_validate(port_name) is False: @@ -4719,6 +4719,67 @@ def transceiver(ctx): """SFP transceiver configuration""" pass +# +# 'frequency' subcommand ('config interface transceiver frequency ...') +# +@transceiver.command() +@click.pass_context +@click.argument('interface_name', metavar='', required=True) +@click.argument('frequency', metavar='', required=True, type=int) +def frequency(ctx, interface_name, frequency): + """Set transciever (only for 400G-ZR) frequency""" + # Get the config_db connector + config_db = ctx.obj['config_db'] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + if interface_name_is_valid(config_db, interface_name) is False: + ctx.fail("Interface name is invalid. Please enter a valid interface name!!") + + log.log_info("{} Setting transceiver frequency {} GHz".format(interface_name, frequency)) + + if ctx.obj['namespace'] is DEFAULT_NAMESPACE: + command = "portconfig -p {} -F {}".format(interface_name, frequency) + else: + command = "portconfig -p {} -F {} -n {}".format(interface_name, frequency, ctx.obj['namespace']) + + clicommon.run_command(command) + + +# +# 'tx_power' subcommand ('config interface transceiver tx_power ...') +# For negative float use:- +# config interface transceiver tx_power Ethernet0 -- -27.4" +# +@transceiver.command('tx_power') +@click.pass_context +@click.argument('interface_name', metavar='', required=True) +@click.argument('tx-power', metavar='', required=True, type=float) +def tx_power(ctx, interface_name, tx_power): + """Set transciever (only for 400G-ZR) Tx laser power""" + # Get the config_db connector + config_db = ctx.obj['config_db'] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + if interface_name_is_valid(config_db, interface_name) is False: + ctx.fail("Interface name is invalid. Please enter a valid interface name!!") + + log.log_info("{} Setting transceiver power {} dBm".format(interface_name, tx_power)) + + if ctx.obj['namespace'] is DEFAULT_NAMESPACE: + command = "portconfig -p {} -P {}".format(interface_name, tx_power) + else: + command = "portconfig -p {} -P {} -n {}".format(interface_name, tx_power, ctx.obj['namespace']) + + clicommon.run_command(command) + # # 'lpmode' subcommand ('config interface transceiver lpmode ...') # diff --git a/scripts/portconfig b/scripts/portconfig index dcb98cb403..63bb463868 100755 --- a/scripts/portconfig +++ b/scripts/portconfig @@ -22,9 +22,12 @@ optional arguments: -t --interface-type port interface type -T --adv-interface-types port advertised interface types -lt --link-training port link training mode + -P --tx-power 400G ZR modulet target Tx output power (dBm) + -F --laser-freq 400G ZR module 75GHz grid frequency (GHz) """ import os import sys +import decimal import argparse # mock the redis for unit test purposes # @@ -51,6 +54,8 @@ PORT_ADV_SPEEDS_CONFIG_FIELD_NAME = "adv_speeds" PORT_INTERFACE_TYPE_CONFIG_FIELD_NAME = "interface_type" PORT_ADV_INTERFACE_TYPES_CONFIG_FIELD_NAME = "adv_interface_types" PORT_LINK_TRAINING_CONFIG_FIELD_NAME = "link_training" +PORT_XCVR_LASER_FREQ_FIELD_NAME = "laser_freq" +PORT_XCVR_TX_POWER_FIELD_NAME = "tx_power" PORT_CHANNEL_TABLE_NAME = "PORTCHANNEL" PORT_CHANNEL_MBR_TABLE_NAME = "PORTCHANNEL_MEMBER" TPID_CONFIG_FIELD_NAME = "tpid" @@ -152,6 +157,14 @@ class portconfig(object): mode = 'on' if mode == 'enabled' else 'off' self.db.mod_entry(PORT_TABLE_NAME, port, {PORT_AUTONEG_CONFIG_FIELD_NAME: mode}) + def set_tx_power(self, port, tx_power): + print("Setting target Tx output power to %s dBm on port %s" % (tx_power, port)) + self.db.mod_entry(PORT_TABLE_NAME, port, {PORT_XCVR_TX_POWER_FIELD_NAME: tx_power}) + + def set_laser_freq(self, port, laser_freq): + print("Setting laser frequency to %s GHz on port %s" % (laser_freq, port)) + self.db.mod_entry(PORT_TABLE_NAME, port, {PORT_XCVR_LASER_FREQ_FIELD_NAME: laser_freq}) + def set_adv_speeds(self, port, adv_speeds): if self.verbose: print("Setting adv_speeds %s on port %s" % (adv_speeds, port)) @@ -280,6 +293,10 @@ def main(): help = 'port advertised interface types', default=None) parser.add_argument('-lt', '--link-training', type = str, required = False, help = 'port link training mode', default=None) + parser.add_argument('-P', '--tx-power', type=float, required=False, + help='Tx output power(dBm)', default=None) + parser.add_argument('-F', '--laser-freq', type=int, required=False, + help='Laser frequency(GHz)', default=None) args = parser.parse_args() # Load database config files @@ -288,7 +305,9 @@ def main(): port = portconfig(args.verbose, args.port, args.namespace) if args.list: port.list_params(args.port) - elif args.speed or args.fec or args.mtu or args.link_training or args.autoneg or args.adv_speeds or args.interface_type or args.adv_interface_types or args.tpid: + elif args.speed or args.fec or args.mtu or args.link_training or args.autoneg or args.adv_speeds or \ + args.interface_type or args.adv_interface_types or args.tpid or \ + args.tx_power or args.laser_freq: if args.speed: port.set_speed(args.port, args.speed) if args.fec: @@ -307,6 +326,17 @@ def main(): port.set_adv_interface_types(args.port, args.adv_interface_types) if args.tpid: port.set_tpid(args.port, args.tpid) + if args.tx_power: + d = decimal.Decimal(str(args.tx_power)) + if d.as_tuple().exponent < -1: + print("Error: tx power must be with single decimal place") + sys.exit(1) + port.set_tx_power(args.port, args.tx_power) + if args.laser_freq: + if args.laser_freq <= 0: + print("Error: Frequency must be > 0") + sys.exit(1) + port.set_laser_freq(args.port, args.laser_freq) else: parser.print_help() sys.exit(1) diff --git a/tests/config_xcvr_test.py b/tests/config_xcvr_test.py new file mode 100644 index 0000000000..1ecc452f52 --- /dev/null +++ b/tests/config_xcvr_test.py @@ -0,0 +1,49 @@ +import click +import config.main as config +import operator +import os +import pytest +import sys + +from click.testing import CliRunner +from utilities_common.db import Db + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, modules_path) + + +@pytest.fixture(scope='module') +def ctx(scope='module'): + db = Db() + obj = {'config_db':db.cfgdb, 'namespace': ''} + yield obj + + +class TestConfigXcvr(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + def test_config_laser_frequency(self, ctx): + #self.basic_check("link-training", ["Ethernet0", "on"], ctx) + result = self.basic_check("frequency", ["Ethernet0", "191300"], ctx) + assert "Setting laser frequency" in result.output + result = self.basic_check("frequency", ["Ethernet0", "--", "-1"], ctx, op=operator.ne) + assert "Error: Frequency must be > 0" in result.output + + def test_config_tx_power(self, ctx): + result = self.basic_check("tx_power", ["Ethernet0", "11.3"], ctx) + assert "Setting target Tx output power" in result.output + result = self.basic_check("tx_power", ["Ethernet0", "11.34"], ctx, op=operator.ne) + assert "Error: tx power must be with single decimal place" in result.output + + def basic_check(self, command_name, para_list, ctx, op=operator.eq, expect_result=0): + runner = CliRunner() + result = runner.invoke(config.config.commands["interface"].commands["transceiver"].commands[command_name], para_list, obj = ctx) + print(result.output) + assert op(result.exit_code, expect_result) + return result From 65346ce8239ce3d1600c2d342296be381163ac97 Mon Sep 17 00:00:00 2001 From: "microsoft-github-policy-service[bot]" <77245923+microsoft-github-policy-service[bot]@users.noreply.github.com> Date: Wed, 20 Jul 2022 10:36:27 -0700 Subject: [PATCH 31/34] Microsoft mandatory file (#2177) Please accept this contribution adding the standard Microsoft SECURITY.MD :lock: file to help the community understand the security policy and how to safely report security issues. GitHub uses the presence of this file to light-up security reminders and a link to the file. This pull request commits the latest official SECURITY.MD file from https://github.com/microsoft/repo-templates/blob/main/shared/SECURITY.md. Microsoft teams can [learn more about this effort and share feedback](https://docs.opensource.microsoft.com/releasing/maintain/templates/) within the open source guidance available internally. --- SECURITY.md | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..869fdfe2b2 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + From 9e3ba828b041706f206953982905c08db6ee674a Mon Sep 17 00:00:00 2001 From: Vaibhav Hemant Dixit Date: Wed, 20 Jul 2022 15:58:28 -0700 Subject: [PATCH 32/34] Change db_migrator major version on master branch from version 2 to 3 (#2272) To add room for db_migrator changes in 202012 branch, changed the major version on master/202205 branches from "2" to "3" This change is needed: 1. Since the existing master/202205 db versions cannot be backported to 202012 as they are not relevant to 202012. Current latest DB version for 202012 is 2_0_0, to add newer versions 2_0_1 cannot be added as it is already used for master/202205 branch. 2. Since master/202205 branches should incremented major version that was reserved for 202012. Since 202205 used same major version as 202012, there is no place for migrator changes to go into 202012 branch. Changed major version on migrator script. Updated test files. Signed-off-by: Vaibhav Hemant Dixit --- scripts/db_migrator.py | 65 +++++++++---------- scripts/mellanox_buffer_migrator.py | 4 +- ...json => acs-msn2700-t0-version_3_0_0.json} | 0 ...json => acs-msn2700-t0-version_3_0_3.json} | 0 ...json => acs-msn2700-t1-version_3_0_0.json} | 0 ...json => acs-msn2700-t1-version_3_0_3.json} | 0 ...json => acs-msn3700-t0-version_3_0_0.json} | 0 ...json => acs-msn3700-t0-version_3_0_3.json} | 0 ...json => acs-msn3700-t1-version_3_0_0.json} | 0 ...json => acs-msn3700-t1-version_3_0_3.json} | 0 ...json => acs-msn3800-t0-version_3_0_0.json} | 0 ...json => acs-msn3800-t0-version_3_0_3.json} | 0 ...json => acs-msn3800-t1-version_3_0_0.json} | 0 ...json => acs-msn3800-t1-version_3_0_3.json} | 0 ...json => acs-msn4700-t0-version_3_0_0.json} | 0 ...json => acs-msn4700-t0-version_3_0_3.json} | 0 ...json => acs-msn4700-t1-version_3_0_0.json} | 0 ...json => acs-msn4700-t1-version_3_0_3.json} | 0 ...0-c28d8-single-pool-t0-version_3_0_0.json} | 0 ...0-c28d8-single-pool-t0-version_3_0_3.json} | 0 ...0-c28d8-single-pool-t1-version_3_0_0.json} | 0 ...0-c28d8-single-pool-t1-version_3_0_3.json} | 0 ...llanox-sn2700-c28d8-t0-version_3_0_0.json} | 0 ...llanox-sn2700-c28d8-t0-version_3_0_3.json} | 0 ...llanox-sn2700-c28d8-t1-version_3_0_0.json} | 0 ...llanox-sn2700-c28d8-t1-version_3_0_3.json} | 0 ...anox-sn2700-d40c8s8-t0-version_3_0_0.json} | 0 ...anox-sn2700-d40c8s8-t0-version_3_0_3.json} | 0 ...anox-sn2700-d40c8s8-t1-version_3_0_0.json} | 0 ...anox-sn2700-d40c8s8-t1-version_3_0_3.json} | 0 ...0-d48c8-single-pool-t0-version_3_0_0.json} | 0 ...0-d48c8-single-pool-t0-version_3_0_3.json} | 0 ...0-d48c8-single-pool-t1-version_3_0_0.json} | 0 ...0-d48c8-single-pool-t1-version_3_0_3.json} | 0 ...llanox-sn2700-d48c8-t0-version_3_0_0.json} | 0 ...llanox-sn2700-d48c8-t0-version_3_0_3.json} | 0 ...llanox-sn2700-d48c8-t1-version_3_0_0.json} | 0 ...llanox-sn2700-d48c8-t1-version_3_0_3.json} | 0 ...-sn2700-single-pool-t0-version_3_0_0.json} | 0 ...-sn2700-single-pool-t0-version_3_0_3.json} | 0 ...-sn2700-single-pool-t1-version_3_0_0.json} | 0 ...-sn2700-single-pool-t1-version_3_0_3.json} | 0 ... => mellanox-sn2700-t0-version_3_0_0.json} | 0 ... => mellanox-sn2700-t0-version_3_0_3.json} | 0 ... => mellanox-sn2700-t1-version_3_0_0.json} | 0 ... => mellanox-sn2700-t1-version_3_0_3.json} | 0 ...mellanox-sn3800-c64-t0-version_3_0_0.json} | 0 ...mellanox-sn3800-c64-t0-version_3_0_3.json} | 0 ...mellanox-sn3800-c64-t1-version_3_0_0.json} | 0 ...mellanox-sn3800-c64-t1-version_3_0_3.json} | 0 ...lanox-sn3800-d112c8-t0-version_3_0_0.json} | 0 ...lanox-sn3800-d112c8-t0-version_3_0_3.json} | 0 ...lanox-sn3800-d112c8-t1-version_3_0_0.json} | 0 ...lanox-sn3800-d112c8-t1-version_3_0_3.json} | 0 ...lanox-sn3800-d24c52-t0-version_3_0_0.json} | 0 ...lanox-sn3800-d24c52-t0-version_3_0_3.json} | 0 ...lanox-sn3800-d24c52-t1-version_3_0_0.json} | 0 ...lanox-sn3800-d24c52-t1-version_3_0_3.json} | 0 ...lanox-sn3800-d28c50-t0-version_3_0_0.json} | 0 ...lanox-sn3800-d28c50-t0-version_3_0_3.json} | 0 ...lanox-sn3800-d28c50-t1-version_3_0_0.json} | 0 ...lanox-sn3800-d28c50-t1-version_3_0_3.json} | 0 ...b_field_value_reference_format_3_0_1.json} | 0 ...b_field_value_reference_format_3_0_3.json} | 0 ...json => acs-msn2700-t0-version_3_0_0.json} | 2 +- ...json => acs-msn2700-t0-version_3_0_3.json} | 2 +- ...json => acs-msn2700-t1-version_3_0_0.json} | 2 +- ...json => acs-msn2700-t1-version_3_0_3.json} | 2 +- ...json => acs-msn3700-t0-version_3_0_0.json} | 2 +- ...json => acs-msn3700-t0-version_3_0_3.json} | 2 +- ...json => acs-msn3700-t1-version_3_0_0.json} | 2 +- ...json => acs-msn3700-t1-version_3_0_3.json} | 2 +- ...json => acs-msn3800-t0-version_3_0_0.json} | 2 +- ...json => acs-msn3800-t0-version_3_0_3.json} | 2 +- ...json => acs-msn3800-t1-version_3_0_0.json} | 2 +- ...json => acs-msn3800-t1-version_3_0_3.json} | 2 +- ...json => acs-msn4700-t0-version_3_0_0.json} | 2 +- ...json => acs-msn4700-t0-version_3_0_3.json} | 2 +- ...json => acs-msn4700-t1-version_3_0_0.json} | 2 +- ...json => acs-msn4700-t1-version_3_0_3.json} | 2 +- .../config_db/empty-config-expected.json | 2 +- ...0-c28d8-single-pool-t0-version_3_0_0.json} | 2 +- ...0-c28d8-single-pool-t0-version_3_0_3.json} | 2 +- ...0-c28d8-single-pool-t1-version_3_0_0.json} | 2 +- ...0-c28d8-single-pool-t1-version_3_0_3.json} | 2 +- ...llanox-sn2700-c28d8-t0-version_3_0_0.json} | 2 +- ...llanox-sn2700-c28d8-t0-version_3_0_3.json} | 2 +- ...llanox-sn2700-c28d8-t1-version_3_0_0.json} | 2 +- ...llanox-sn2700-c28d8-t1-version_3_0_3.json} | 2 +- ...anox-sn2700-d40c8s8-t0-version_3_0_0.json} | 2 +- ...anox-sn2700-d40c8s8-t0-version_3_0_3.json} | 2 +- ...anox-sn2700-d40c8s8-t1-version_3_0_0.json} | 2 +- ...anox-sn2700-d40c8s8-t1-version_3_0_3.json} | 2 +- ...0-d48c8-single-pool-t0-version_3_0_0.json} | 2 +- ...0-d48c8-single-pool-t0-version_3_0_3.json} | 2 +- ...0-d48c8-single-pool-t1-version_3_0_0.json} | 2 +- ...0-d48c8-single-pool-t1-version_3_0_3.json} | 2 +- ...llanox-sn2700-d48c8-t0-version_3_0_0.json} | 2 +- ...llanox-sn2700-d48c8-t0-version_3_0_3.json} | 2 +- ...llanox-sn2700-d48c8-t1-version_3_0_0.json} | 2 +- ...llanox-sn2700-d48c8-t1-version_3_0_3.json} | 2 +- ...-sn2700-single-pool-t0-version_3_0_0.json} | 2 +- ...-sn2700-single-pool-t0-version_3_0_3.json} | 2 +- ...-sn2700-single-pool-t1-version_3_0_0.json} | 2 +- ...-sn2700-single-pool-t1-version_3_0_3.json} | 2 +- ... => mellanox-sn2700-t0-version_3_0_0.json} | 2 +- ... => mellanox-sn2700-t0-version_3_0_3.json} | 2 +- ... => mellanox-sn2700-t1-version_3_0_0.json} | 2 +- ... => mellanox-sn2700-t1-version_3_0_3.json} | 2 +- ...mellanox-sn3800-c64-t0-version_3_0_0.json} | 2 +- ...mellanox-sn3800-c64-t0-version_3_0_3.json} | 2 +- ...mellanox-sn3800-c64-t1-version_3_0_0.json} | 2 +- ...mellanox-sn3800-c64-t1-version_3_0_3.json} | 2 +- ...lanox-sn3800-d112c8-t0-version_3_0_0.json} | 2 +- ...lanox-sn3800-d112c8-t0-version_3_0_3.json} | 2 +- ...lanox-sn3800-d112c8-t1-version_3_0_0.json} | 2 +- ...lanox-sn3800-d112c8-t1-version_3_0_3.json} | 2 +- ...lanox-sn3800-d24c52-t0-version_3_0_0.json} | 2 +- ...lanox-sn3800-d24c52-t0-version_3_0_3.json} | 2 +- ...lanox-sn3800-d24c52-t1-version_3_0_0.json} | 2 +- ...lanox-sn3800-d24c52-t1-version_3_0_3.json} | 2 +- ...lanox-sn3800-d28c50-t0-version_3_0_0.json} | 2 +- ...lanox-sn3800-d28c50-t0-version_3_0_3.json} | 2 +- ...lanox-sn3800-d28c50-t1-version_3_0_0.json} | 2 +- ...lanox-sn3800-d28c50-t1-version_3_0_3.json} | 2 +- .../non-default-config-expected.json | 2 +- ...fault-lossless-profile-in-pg-expected.json | 2 +- ...-default-lossy-profile-in-pg-expected.json | 2 +- .../config_db/non-default-pg-expected.json | 2 +- .../config_db/non-default-xoff-expected.json | 2 +- .../config_db/port-an-expected.json | 2 +- .../config_db/port-an-input.json | 2 +- .../config_db/portchannel-expected.json | 2 +- .../config_db/portchannel-input.json | 2 +- .../config_db/qos_map_table_expected.json | 2 +- .../config_db/qos_map_table_input.json | 2 +- ...b_field_value_reference_format_3_0_1.json} | 2 +- ...b_field_value_reference_format_3_0_3.json} | 2 +- ...-buffer-dynamic-double-pools-expected.json | 2 +- ...ing-buffer-dynamic-double-pools-input.json | 2 +- ...g-buffer-dynamic-single-pool-expected.json | 2 +- ...ming-buffer-dynamic-single-pool-input.json | 2 +- ...fer-traditional-double-pools-expected.json | 2 +- ...buffer-traditional-double-pools-input.json | 2 +- ...ffer-traditional-single-pool-expected.json | 2 +- ...-buffer-traditional-single-pool-input.json | 2 +- ...reclaiming-buffer-warmreboot-expected.json | 2 +- tests/db_migrator_test.py | 17 ++--- 148 files changed, 126 insertions(+), 126 deletions(-) rename tests/db_migrator_input/appl_db/{acs-msn2700-t0-version_2_0_0.json => acs-msn2700-t0-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{acs-msn2700-t0-version_2_0_3.json => acs-msn2700-t0-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{acs-msn2700-t1-version_2_0_0.json => acs-msn2700-t1-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{acs-msn2700-t1-version_2_0_3.json => acs-msn2700-t1-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{acs-msn3700-t0-version_2_0_0.json => acs-msn3700-t0-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{acs-msn3700-t0-version_2_0_3.json => acs-msn3700-t0-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{acs-msn3700-t1-version_2_0_0.json => acs-msn3700-t1-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{acs-msn3700-t1-version_2_0_3.json => acs-msn3700-t1-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{acs-msn3800-t0-version_2_0_0.json => acs-msn3800-t0-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{acs-msn3800-t0-version_2_0_3.json => acs-msn3800-t0-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{acs-msn3800-t1-version_2_0_0.json => acs-msn3800-t1-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{acs-msn3800-t1-version_2_0_3.json => acs-msn3800-t1-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{acs-msn4700-t0-version_2_0_0.json => acs-msn4700-t0-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{acs-msn4700-t0-version_2_0_3.json => acs-msn4700-t0-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{acs-msn4700-t1-version_2_0_0.json => acs-msn4700-t1-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{acs-msn4700-t1-version_2_0_3.json => acs-msn4700-t1-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-c28d8-single-pool-t0-version_2_0_0.json => mellanox-sn2700-c28d8-single-pool-t0-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-c28d8-single-pool-t0-version_2_0_3.json => mellanox-sn2700-c28d8-single-pool-t0-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-c28d8-single-pool-t1-version_2_0_0.json => mellanox-sn2700-c28d8-single-pool-t1-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-c28d8-single-pool-t1-version_2_0_3.json => mellanox-sn2700-c28d8-single-pool-t1-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-c28d8-t0-version_2_0_0.json => mellanox-sn2700-c28d8-t0-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-c28d8-t0-version_2_0_3.json => mellanox-sn2700-c28d8-t0-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-c28d8-t1-version_2_0_0.json => mellanox-sn2700-c28d8-t1-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-c28d8-t1-version_2_0_3.json => mellanox-sn2700-c28d8-t1-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-d40c8s8-t0-version_2_0_0.json => mellanox-sn2700-d40c8s8-t0-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-d40c8s8-t0-version_2_0_3.json => mellanox-sn2700-d40c8s8-t0-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-d40c8s8-t1-version_2_0_0.json => mellanox-sn2700-d40c8s8-t1-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-d40c8s8-t1-version_2_0_3.json => mellanox-sn2700-d40c8s8-t1-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-d48c8-single-pool-t0-version_2_0_0.json => mellanox-sn2700-d48c8-single-pool-t0-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-d48c8-single-pool-t0-version_2_0_3.json => mellanox-sn2700-d48c8-single-pool-t0-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-d48c8-single-pool-t1-version_2_0_0.json => mellanox-sn2700-d48c8-single-pool-t1-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-d48c8-single-pool-t1-version_2_0_3.json => mellanox-sn2700-d48c8-single-pool-t1-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-d48c8-t0-version_2_0_0.json => mellanox-sn2700-d48c8-t0-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-d48c8-t0-version_2_0_3.json => mellanox-sn2700-d48c8-t0-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-d48c8-t1-version_2_0_0.json => mellanox-sn2700-d48c8-t1-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-d48c8-t1-version_2_0_3.json => mellanox-sn2700-d48c8-t1-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-single-pool-t0-version_2_0_0.json => mellanox-sn2700-single-pool-t0-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-single-pool-t0-version_2_0_3.json => mellanox-sn2700-single-pool-t0-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-single-pool-t1-version_2_0_0.json => mellanox-sn2700-single-pool-t1-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-single-pool-t1-version_2_0_3.json => mellanox-sn2700-single-pool-t1-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-t0-version_2_0_0.json => mellanox-sn2700-t0-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-t0-version_2_0_3.json => mellanox-sn2700-t0-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-t1-version_2_0_0.json => mellanox-sn2700-t1-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn2700-t1-version_2_0_3.json => mellanox-sn2700-t1-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn3800-c64-t0-version_2_0_0.json => mellanox-sn3800-c64-t0-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn3800-c64-t0-version_2_0_3.json => mellanox-sn3800-c64-t0-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn3800-c64-t1-version_2_0_0.json => mellanox-sn3800-c64-t1-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn3800-c64-t1-version_2_0_3.json => mellanox-sn3800-c64-t1-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn3800-d112c8-t0-version_2_0_0.json => mellanox-sn3800-d112c8-t0-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn3800-d112c8-t0-version_2_0_3.json => mellanox-sn3800-d112c8-t0-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn3800-d112c8-t1-version_2_0_0.json => mellanox-sn3800-d112c8-t1-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn3800-d112c8-t1-version_2_0_3.json => mellanox-sn3800-d112c8-t1-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn3800-d24c52-t0-version_2_0_0.json => mellanox-sn3800-d24c52-t0-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn3800-d24c52-t0-version_2_0_3.json => mellanox-sn3800-d24c52-t0-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn3800-d24c52-t1-version_2_0_0.json => mellanox-sn3800-d24c52-t1-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn3800-d24c52-t1-version_2_0_3.json => mellanox-sn3800-d24c52-t1-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn3800-d28c50-t0-version_2_0_0.json => mellanox-sn3800-d28c50-t0-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn3800-d28c50-t0-version_2_0_3.json => mellanox-sn3800-d28c50-t0-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn3800-d28c50-t1-version_2_0_0.json => mellanox-sn3800-d28c50-t1-version_3_0_0.json} (100%) rename tests/db_migrator_input/appl_db/{mellanox-sn3800-d28c50-t1-version_2_0_3.json => mellanox-sn3800-d28c50-t1-version_3_0_3.json} (100%) rename tests/db_migrator_input/appl_db/{qos_tables_db_field_value_reference_format_2_0_1.json => qos_tables_db_field_value_reference_format_3_0_1.json} (100%) rename tests/db_migrator_input/appl_db/{qos_tables_db_field_value_reference_format_2_0_3.json => qos_tables_db_field_value_reference_format_3_0_3.json} (100%) rename tests/db_migrator_input/config_db/{acs-msn2700-t0-version_2_0_0.json => acs-msn2700-t0-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{acs-msn2700-t0-version_2_0_3.json => acs-msn2700-t0-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{acs-msn2700-t1-version_2_0_0.json => acs-msn2700-t1-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{acs-msn2700-t1-version_2_0_3.json => acs-msn2700-t1-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{acs-msn3700-t0-version_2_0_0.json => acs-msn3700-t0-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{acs-msn3700-t0-version_2_0_3.json => acs-msn3700-t0-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{acs-msn3700-t1-version_2_0_0.json => acs-msn3700-t1-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{acs-msn3700-t1-version_2_0_3.json => acs-msn3700-t1-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{acs-msn3800-t0-version_2_0_0.json => acs-msn3800-t0-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{acs-msn3800-t0-version_2_0_3.json => acs-msn3800-t0-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{acs-msn3800-t1-version_2_0_0.json => acs-msn3800-t1-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{acs-msn3800-t1-version_2_0_3.json => acs-msn3800-t1-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{acs-msn4700-t0-version_2_0_0.json => acs-msn4700-t0-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{acs-msn4700-t0-version_2_0_3.json => acs-msn4700-t0-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{acs-msn4700-t1-version_2_0_0.json => acs-msn4700-t1-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{acs-msn4700-t1-version_2_0_3.json => acs-msn4700-t1-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-c28d8-single-pool-t0-version_2_0_0.json => mellanox-sn2700-c28d8-single-pool-t0-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-c28d8-single-pool-t0-version_2_0_3.json => mellanox-sn2700-c28d8-single-pool-t0-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-c28d8-single-pool-t1-version_2_0_0.json => mellanox-sn2700-c28d8-single-pool-t1-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-c28d8-single-pool-t1-version_2_0_3.json => mellanox-sn2700-c28d8-single-pool-t1-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-c28d8-t0-version_2_0_0.json => mellanox-sn2700-c28d8-t0-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-c28d8-t0-version_2_0_3.json => mellanox-sn2700-c28d8-t0-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-c28d8-t1-version_2_0_0.json => mellanox-sn2700-c28d8-t1-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-c28d8-t1-version_2_0_3.json => mellanox-sn2700-c28d8-t1-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-d40c8s8-t0-version_2_0_0.json => mellanox-sn2700-d40c8s8-t0-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-d40c8s8-t0-version_2_0_3.json => mellanox-sn2700-d40c8s8-t0-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-d40c8s8-t1-version_2_0_0.json => mellanox-sn2700-d40c8s8-t1-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-d40c8s8-t1-version_2_0_3.json => mellanox-sn2700-d40c8s8-t1-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-d48c8-single-pool-t0-version_2_0_0.json => mellanox-sn2700-d48c8-single-pool-t0-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-d48c8-single-pool-t0-version_2_0_3.json => mellanox-sn2700-d48c8-single-pool-t0-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-d48c8-single-pool-t1-version_2_0_0.json => mellanox-sn2700-d48c8-single-pool-t1-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-d48c8-single-pool-t1-version_2_0_3.json => mellanox-sn2700-d48c8-single-pool-t1-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-d48c8-t0-version_2_0_0.json => mellanox-sn2700-d48c8-t0-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-d48c8-t0-version_2_0_3.json => mellanox-sn2700-d48c8-t0-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-d48c8-t1-version_2_0_0.json => mellanox-sn2700-d48c8-t1-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-d48c8-t1-version_2_0_3.json => mellanox-sn2700-d48c8-t1-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-single-pool-t0-version_2_0_0.json => mellanox-sn2700-single-pool-t0-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-single-pool-t0-version_2_0_3.json => mellanox-sn2700-single-pool-t0-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-single-pool-t1-version_2_0_0.json => mellanox-sn2700-single-pool-t1-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-single-pool-t1-version_2_0_3.json => mellanox-sn2700-single-pool-t1-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-t0-version_2_0_0.json => mellanox-sn2700-t0-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-t0-version_2_0_3.json => mellanox-sn2700-t0-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-t1-version_2_0_0.json => mellanox-sn2700-t1-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn2700-t1-version_2_0_3.json => mellanox-sn2700-t1-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn3800-c64-t0-version_2_0_0.json => mellanox-sn3800-c64-t0-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn3800-c64-t0-version_2_0_3.json => mellanox-sn3800-c64-t0-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn3800-c64-t1-version_2_0_0.json => mellanox-sn3800-c64-t1-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn3800-c64-t1-version_2_0_3.json => mellanox-sn3800-c64-t1-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn3800-d112c8-t0-version_2_0_0.json => mellanox-sn3800-d112c8-t0-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn3800-d112c8-t0-version_2_0_3.json => mellanox-sn3800-d112c8-t0-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn3800-d112c8-t1-version_2_0_0.json => mellanox-sn3800-d112c8-t1-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn3800-d112c8-t1-version_2_0_3.json => mellanox-sn3800-d112c8-t1-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn3800-d24c52-t0-version_2_0_0.json => mellanox-sn3800-d24c52-t0-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn3800-d24c52-t0-version_2_0_3.json => mellanox-sn3800-d24c52-t0-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn3800-d24c52-t1-version_2_0_0.json => mellanox-sn3800-d24c52-t1-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn3800-d24c52-t1-version_2_0_3.json => mellanox-sn3800-d24c52-t1-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn3800-d28c50-t0-version_2_0_0.json => mellanox-sn3800-d28c50-t0-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn3800-d28c50-t0-version_2_0_3.json => mellanox-sn3800-d28c50-t0-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn3800-d28c50-t1-version_2_0_0.json => mellanox-sn3800-d28c50-t1-version_3_0_0.json} (99%) rename tests/db_migrator_input/config_db/{mellanox-sn3800-d28c50-t1-version_2_0_3.json => mellanox-sn3800-d28c50-t1-version_3_0_3.json} (99%) rename tests/db_migrator_input/config_db/{qos_tables_db_field_value_reference_format_2_0_1.json => qos_tables_db_field_value_reference_format_3_0_1.json} (99%) rename tests/db_migrator_input/config_db/{qos_tables_db_field_value_reference_format_2_0_3.json => qos_tables_db_field_value_reference_format_3_0_3.json} (99%) diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index e41bef1334..ae7437389a 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -44,7 +44,7 @@ def __init__(self, namespace, socket=None): none-zero values. build: sequentially increase within a minor version domain. """ - self.CURRENT_VERSION = 'version_2_0_5' + self.CURRENT_VERSION = 'version_3_0_5' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' @@ -600,13 +600,13 @@ def version_1_0_6(self): abandon_method = self.mellanox_buffer_migrator.mlnx_abandon_pending_buffer_configuration append_method = self.mellanox_buffer_migrator.mlnx_append_item_on_pending_configuration_list - if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_6', 'version_2_0_0') \ - and self.mellanox_buffer_migrator.mlnx_migrate_buffer_profile('version_1_0_6', 'version_2_0_0') \ + if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_6', 'version_3_0_0') \ + and self.mellanox_buffer_migrator.mlnx_migrate_buffer_profile('version_1_0_6', 'version_3_0_0') \ and (not self.mellanox_buffer_migrator.mlnx_is_buffer_model_dynamic() or \ self.migrate_config_db_buffer_tables_for_dynamic_calculation(speed_list, cable_len_list, '0', abandon_method, append_method)) \ and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration() \ and self.prepare_dynamic_buffer_for_warm_reboot(buffer_pools, buffer_profiles, buffer_pgs): - self.set_version('version_2_0_0') + self.set_version('version_3_0_0') else: self.prepare_dynamic_buffer_for_warm_reboot() @@ -615,24 +615,24 @@ def version_1_0_6(self): self.configDB.set_entry('DEVICE_METADATA', 'localhost', metadata) log.log_notice('Setting buffer_model to traditional') - self.set_version('version_2_0_0') + self.set_version('version_3_0_0') - return 'version_2_0_0' + return 'version_3_0_0' - def version_2_0_0(self): + def version_3_0_0(self): """ - Version 2_0_0. + Version 3_0_0. """ - log.log_info('Handling version_2_0_0') + log.log_info('Handling version_3_0_0') self.migrate_config_db_port_table_for_auto_neg() - self.set_version('version_2_0_1') - return 'version_2_0_1' + self.set_version('version_3_0_1') + return 'version_3_0_1' - def version_2_0_1(self): + def version_3_0_1(self): """ - Version 2_0_1. + Version 3_0_1. """ - log.log_info('Handling version_2_0_1') + log.log_info('Handling version_3_0_1') warmreboot_state = self.stateDB.get(self.stateDB.STATE_DB, 'WARM_RESTART_ENABLE_TABLE|system', 'enable') if warmreboot_state != 'true': @@ -640,34 +640,34 @@ def version_2_0_1(self): for name, data in portchannel_table.items(): data['lacp_key'] = 'auto' self.configDB.set_entry('PORTCHANNEL', name, data) - self.set_version('version_2_0_2') - return 'version_2_0_2' + self.set_version('version_3_0_2') + return 'version_3_0_2' - def version_2_0_2(self): + def version_3_0_2(self): """ - Version 2_0_2. + Version 3_0_2. """ - log.log_info('Handling version_2_0_2') + log.log_info('Handling version_3_0_2') self.migrate_qos_fieldval_reference_format() - self.set_version('version_2_0_3') - return 'version_2_0_3' + self.set_version('version_3_0_3') + return 'version_3_0_3' - def version_2_0_3(self): + def version_3_0_3(self): """ - Version 2_0_3 + Version 3_0_3 """ - log.log_info('Handling version_2_0_3') + log.log_info('Handling version_3_0_3') if self.asic_type == "mellanox": self.mellanox_buffer_migrator.mlnx_reclaiming_unused_buffer() - self.set_version('version_2_0_4') - return 'version_2_0_4' + self.set_version('version_3_0_4') + return 'version_3_0_4' - def version_2_0_4(self): + def version_3_0_4(self): """ - Version 2_0_4 + Version 3_0_4 """ - log.log_info('Handling version_2_0_4') + log.log_info('Handling version_3_0_4') # Migrate "pfc_enable" to "pfc_enable" and "pfcwd_sw_enable" # 1. pfc_enable means enable pfc on certain queues # 2. pfcwd_sw_enable means enable PFC software watchdog on certain queues @@ -677,14 +677,13 @@ def version_2_0_4(self): if 'pfc_enable' in v: v['pfcwd_sw_enable'] = v['pfc_enable'] self.configDB.set_entry('PORT_QOS_MAP', k, v) + return 'version_3_0_5' - return 'version_2_0_5' - - def version_2_0_5(self): + def version_3_0_5(self): """ Current latest version. Nothing to do here. """ - log.log_info('Handling version_2_0_5') + log.log_info('Handling version_3_0_5') return None def get_version(self): diff --git a/scripts/mellanox_buffer_migrator.py b/scripts/mellanox_buffer_migrator.py index 6706969be1..bc5c7cab16 100755 --- a/scripts/mellanox_buffer_migrator.py +++ b/scripts/mellanox_buffer_migrator.py @@ -480,8 +480,8 @@ def __init__(self, configDB, appDB, stateDB): "spc2_3800-d24c52_t1_pool_shp", "spc2_3800-d28c50_t1_pool_shp"], } }, - "version_2_0_0": { - # Version 2.0.0 is introduced for dynamic buffer calculation + "version_3_0_0": { + # Version 3.0.0 is introduced for dynamic buffer calculation # "pool_mapped_from_old_version": { "spc1_t0_pool": "spc1_pool", diff --git a/tests/db_migrator_input/appl_db/acs-msn2700-t0-version_2_0_0.json b/tests/db_migrator_input/appl_db/acs-msn2700-t0-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/acs-msn2700-t0-version_2_0_0.json rename to tests/db_migrator_input/appl_db/acs-msn2700-t0-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/acs-msn2700-t0-version_2_0_3.json b/tests/db_migrator_input/appl_db/acs-msn2700-t0-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/acs-msn2700-t0-version_2_0_3.json rename to tests/db_migrator_input/appl_db/acs-msn2700-t0-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/acs-msn2700-t1-version_2_0_0.json b/tests/db_migrator_input/appl_db/acs-msn2700-t1-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/acs-msn2700-t1-version_2_0_0.json rename to tests/db_migrator_input/appl_db/acs-msn2700-t1-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/acs-msn2700-t1-version_2_0_3.json b/tests/db_migrator_input/appl_db/acs-msn2700-t1-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/acs-msn2700-t1-version_2_0_3.json rename to tests/db_migrator_input/appl_db/acs-msn2700-t1-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/acs-msn3700-t0-version_2_0_0.json b/tests/db_migrator_input/appl_db/acs-msn3700-t0-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/acs-msn3700-t0-version_2_0_0.json rename to tests/db_migrator_input/appl_db/acs-msn3700-t0-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/acs-msn3700-t0-version_2_0_3.json b/tests/db_migrator_input/appl_db/acs-msn3700-t0-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/acs-msn3700-t0-version_2_0_3.json rename to tests/db_migrator_input/appl_db/acs-msn3700-t0-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/acs-msn3700-t1-version_2_0_0.json b/tests/db_migrator_input/appl_db/acs-msn3700-t1-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/acs-msn3700-t1-version_2_0_0.json rename to tests/db_migrator_input/appl_db/acs-msn3700-t1-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/acs-msn3700-t1-version_2_0_3.json b/tests/db_migrator_input/appl_db/acs-msn3700-t1-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/acs-msn3700-t1-version_2_0_3.json rename to tests/db_migrator_input/appl_db/acs-msn3700-t1-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/acs-msn3800-t0-version_2_0_0.json b/tests/db_migrator_input/appl_db/acs-msn3800-t0-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/acs-msn3800-t0-version_2_0_0.json rename to tests/db_migrator_input/appl_db/acs-msn3800-t0-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/acs-msn3800-t0-version_2_0_3.json b/tests/db_migrator_input/appl_db/acs-msn3800-t0-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/acs-msn3800-t0-version_2_0_3.json rename to tests/db_migrator_input/appl_db/acs-msn3800-t0-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/acs-msn3800-t1-version_2_0_0.json b/tests/db_migrator_input/appl_db/acs-msn3800-t1-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/acs-msn3800-t1-version_2_0_0.json rename to tests/db_migrator_input/appl_db/acs-msn3800-t1-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/acs-msn3800-t1-version_2_0_3.json b/tests/db_migrator_input/appl_db/acs-msn3800-t1-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/acs-msn3800-t1-version_2_0_3.json rename to tests/db_migrator_input/appl_db/acs-msn3800-t1-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/acs-msn4700-t0-version_2_0_0.json b/tests/db_migrator_input/appl_db/acs-msn4700-t0-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/acs-msn4700-t0-version_2_0_0.json rename to tests/db_migrator_input/appl_db/acs-msn4700-t0-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/acs-msn4700-t0-version_2_0_3.json b/tests/db_migrator_input/appl_db/acs-msn4700-t0-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/acs-msn4700-t0-version_2_0_3.json rename to tests/db_migrator_input/appl_db/acs-msn4700-t0-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/acs-msn4700-t1-version_2_0_0.json b/tests/db_migrator_input/appl_db/acs-msn4700-t1-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/acs-msn4700-t1-version_2_0_0.json rename to tests/db_migrator_input/appl_db/acs-msn4700-t1-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/acs-msn4700-t1-version_2_0_3.json b/tests/db_migrator_input/appl_db/acs-msn4700-t1-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/acs-msn4700-t1-version_2_0_3.json rename to tests/db_migrator_input/appl_db/acs-msn4700-t1-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-single-pool-t0-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-single-pool-t0-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-single-pool-t0-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-single-pool-t0-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-single-pool-t0-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-single-pool-t0-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-single-pool-t0-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-single-pool-t0-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-single-pool-t1-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-single-pool-t1-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-single-pool-t1-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-single-pool-t1-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-single-pool-t1-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-single-pool-t1-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-single-pool-t1-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-single-pool-t1-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-t0-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-t0-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-t0-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-t0-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-t0-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-t0-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-t0-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-t0-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-t1-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-t1-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-t1-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-t1-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-t1-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-t1-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-t1-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-c28d8-t1-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-d40c8s8-t0-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-d40c8s8-t0-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-d40c8s8-t0-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-d40c8s8-t0-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-d40c8s8-t0-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-d40c8s8-t0-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-d40c8s8-t0-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-d40c8s8-t0-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-d40c8s8-t1-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-d40c8s8-t1-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-d40c8s8-t1-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-d40c8s8-t1-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-d40c8s8-t1-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-d40c8s8-t1-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-d40c8s8-t1-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-d40c8s8-t1-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-single-pool-t0-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-single-pool-t0-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-single-pool-t0-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-single-pool-t0-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-single-pool-t0-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-single-pool-t0-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-single-pool-t0-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-single-pool-t0-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-single-pool-t1-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-single-pool-t1-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-single-pool-t1-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-single-pool-t1-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-single-pool-t1-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-single-pool-t1-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-single-pool-t1-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-single-pool-t1-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-t0-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-t0-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-t0-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-t0-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-t0-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-t0-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-t0-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-t0-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-t1-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-t1-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-t1-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-t1-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-t1-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-t1-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-t1-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-d48c8-t1-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-single-pool-t0-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-single-pool-t0-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-single-pool-t0-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-single-pool-t0-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-single-pool-t0-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-single-pool-t0-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-single-pool-t0-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-single-pool-t0-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-single-pool-t1-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-single-pool-t1-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-single-pool-t1-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-single-pool-t1-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-single-pool-t1-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-single-pool-t1-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-single-pool-t1-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-single-pool-t1-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-t0-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-t0-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-t0-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-t0-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-t0-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-t0-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-t0-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-t0-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-t1-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-t1-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-t1-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-t1-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn2700-t1-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn2700-t1-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn2700-t1-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn2700-t1-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn3800-c64-t0-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn3800-c64-t0-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn3800-c64-t0-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn3800-c64-t0-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn3800-c64-t0-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn3800-c64-t0-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn3800-c64-t0-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn3800-c64-t0-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn3800-c64-t1-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn3800-c64-t1-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn3800-c64-t1-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn3800-c64-t1-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn3800-c64-t1-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn3800-c64-t1-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn3800-c64-t1-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn3800-c64-t1-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn3800-d112c8-t0-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn3800-d112c8-t0-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn3800-d112c8-t0-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn3800-d112c8-t0-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn3800-d112c8-t0-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn3800-d112c8-t0-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn3800-d112c8-t0-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn3800-d112c8-t0-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn3800-d112c8-t1-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn3800-d112c8-t1-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn3800-d112c8-t1-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn3800-d112c8-t1-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn3800-d112c8-t1-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn3800-d112c8-t1-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn3800-d112c8-t1-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn3800-d112c8-t1-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn3800-d24c52-t0-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn3800-d24c52-t0-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn3800-d24c52-t0-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn3800-d24c52-t0-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn3800-d24c52-t0-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn3800-d24c52-t0-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn3800-d24c52-t0-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn3800-d24c52-t0-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn3800-d24c52-t1-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn3800-d24c52-t1-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn3800-d24c52-t1-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn3800-d24c52-t1-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn3800-d24c52-t1-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn3800-d24c52-t1-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn3800-d24c52-t1-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn3800-d24c52-t1-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn3800-d28c50-t0-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn3800-d28c50-t0-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn3800-d28c50-t0-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn3800-d28c50-t0-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn3800-d28c50-t0-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn3800-d28c50-t0-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn3800-d28c50-t0-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn3800-d28c50-t0-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn3800-d28c50-t1-version_2_0_0.json b/tests/db_migrator_input/appl_db/mellanox-sn3800-d28c50-t1-version_3_0_0.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn3800-d28c50-t1-version_2_0_0.json rename to tests/db_migrator_input/appl_db/mellanox-sn3800-d28c50-t1-version_3_0_0.json diff --git a/tests/db_migrator_input/appl_db/mellanox-sn3800-d28c50-t1-version_2_0_3.json b/tests/db_migrator_input/appl_db/mellanox-sn3800-d28c50-t1-version_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/mellanox-sn3800-d28c50-t1-version_2_0_3.json rename to tests/db_migrator_input/appl_db/mellanox-sn3800-d28c50-t1-version_3_0_3.json diff --git a/tests/db_migrator_input/appl_db/qos_tables_db_field_value_reference_format_2_0_1.json b/tests/db_migrator_input/appl_db/qos_tables_db_field_value_reference_format_3_0_1.json similarity index 100% rename from tests/db_migrator_input/appl_db/qos_tables_db_field_value_reference_format_2_0_1.json rename to tests/db_migrator_input/appl_db/qos_tables_db_field_value_reference_format_3_0_1.json diff --git a/tests/db_migrator_input/appl_db/qos_tables_db_field_value_reference_format_2_0_3.json b/tests/db_migrator_input/appl_db/qos_tables_db_field_value_reference_format_3_0_3.json similarity index 100% rename from tests/db_migrator_input/appl_db/qos_tables_db_field_value_reference_format_2_0_3.json rename to tests/db_migrator_input/appl_db/qos_tables_db_field_value_reference_format_3_0_3.json diff --git a/tests/db_migrator_input/config_db/acs-msn2700-t0-version_2_0_0.json b/tests/db_migrator_input/config_db/acs-msn2700-t0-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/acs-msn2700-t0-version_2_0_0.json rename to tests/db_migrator_input/config_db/acs-msn2700-t0-version_3_0_0.json index 76ed34269e..aab45921ab 100644 --- a/tests/db_migrator_input/config_db/acs-msn2700-t0-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/acs-msn2700-t0-version_3_0_0.json @@ -1014,6 +1014,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/acs-msn2700-t0-version_2_0_3.json b/tests/db_migrator_input/config_db/acs-msn2700-t0-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/acs-msn2700-t0-version_2_0_3.json rename to tests/db_migrator_input/config_db/acs-msn2700-t0-version_3_0_3.json index 3f70cfa87e..d740449d80 100644 --- a/tests/db_migrator_input/config_db/acs-msn2700-t0-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/acs-msn2700-t0-version_3_0_3.json @@ -1014,6 +1014,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/acs-msn2700-t1-version_2_0_0.json b/tests/db_migrator_input/config_db/acs-msn2700-t1-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/acs-msn2700-t1-version_2_0_0.json rename to tests/db_migrator_input/config_db/acs-msn2700-t1-version_3_0_0.json index be1f36d71c..c3a9fb204e 100644 --- a/tests/db_migrator_input/config_db/acs-msn2700-t1-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/acs-msn2700-t1-version_3_0_0.json @@ -1086,6 +1086,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/acs-msn2700-t1-version_2_0_3.json b/tests/db_migrator_input/config_db/acs-msn2700-t1-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/acs-msn2700-t1-version_2_0_3.json rename to tests/db_migrator_input/config_db/acs-msn2700-t1-version_3_0_3.json index 44246edd6b..d77a409598 100644 --- a/tests/db_migrator_input/config_db/acs-msn2700-t1-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/acs-msn2700-t1-version_3_0_3.json @@ -1086,6 +1086,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/acs-msn3700-t0-version_2_0_0.json b/tests/db_migrator_input/config_db/acs-msn3700-t0-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/acs-msn3700-t0-version_2_0_0.json rename to tests/db_migrator_input/config_db/acs-msn3700-t0-version_3_0_0.json index c79217b1c5..6bc2ee545b 100644 --- a/tests/db_migrator_input/config_db/acs-msn3700-t0-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/acs-msn3700-t0-version_3_0_0.json @@ -1900,6 +1900,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/acs-msn3700-t0-version_2_0_3.json b/tests/db_migrator_input/config_db/acs-msn3700-t0-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/acs-msn3700-t0-version_2_0_3.json rename to tests/db_migrator_input/config_db/acs-msn3700-t0-version_3_0_3.json index 598b9b16c7..ea78053bb0 100644 --- a/tests/db_migrator_input/config_db/acs-msn3700-t0-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/acs-msn3700-t0-version_3_0_3.json @@ -1900,6 +1900,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/acs-msn3700-t1-version_2_0_0.json b/tests/db_migrator_input/config_db/acs-msn3700-t1-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/acs-msn3700-t1-version_2_0_0.json rename to tests/db_migrator_input/config_db/acs-msn3700-t1-version_3_0_0.json index a756fde63d..919888d8c3 100644 --- a/tests/db_migrator_input/config_db/acs-msn3700-t1-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/acs-msn3700-t1-version_3_0_0.json @@ -1948,6 +1948,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/acs-msn3700-t1-version_2_0_3.json b/tests/db_migrator_input/config_db/acs-msn3700-t1-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/acs-msn3700-t1-version_2_0_3.json rename to tests/db_migrator_input/config_db/acs-msn3700-t1-version_3_0_3.json index 684b52910e..62d00c4e4d 100644 --- a/tests/db_migrator_input/config_db/acs-msn3700-t1-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/acs-msn3700-t1-version_3_0_3.json @@ -1948,6 +1948,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/acs-msn3800-t0-version_2_0_0.json b/tests/db_migrator_input/config_db/acs-msn3800-t0-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/acs-msn3800-t0-version_2_0_0.json rename to tests/db_migrator_input/config_db/acs-msn3800-t0-version_3_0_0.json index d367b5dca2..71a0c751e7 100644 --- a/tests/db_migrator_input/config_db/acs-msn3800-t0-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/acs-msn3800-t0-version_3_0_0.json @@ -2034,6 +2034,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/acs-msn3800-t0-version_2_0_3.json b/tests/db_migrator_input/config_db/acs-msn3800-t0-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/acs-msn3800-t0-version_2_0_3.json rename to tests/db_migrator_input/config_db/acs-msn3800-t0-version_3_0_3.json index add7cf0371..8d8451165e 100644 --- a/tests/db_migrator_input/config_db/acs-msn3800-t0-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/acs-msn3800-t0-version_3_0_3.json @@ -2034,6 +2034,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/acs-msn3800-t1-version_2_0_0.json b/tests/db_migrator_input/config_db/acs-msn3800-t1-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/acs-msn3800-t1-version_2_0_0.json rename to tests/db_migrator_input/config_db/acs-msn3800-t1-version_3_0_0.json index 9a14f0659c..97bdf8f23d 100644 --- a/tests/db_migrator_input/config_db/acs-msn3800-t1-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/acs-msn3800-t1-version_3_0_0.json @@ -2082,6 +2082,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/acs-msn3800-t1-version_2_0_3.json b/tests/db_migrator_input/config_db/acs-msn3800-t1-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/acs-msn3800-t1-version_2_0_3.json rename to tests/db_migrator_input/config_db/acs-msn3800-t1-version_3_0_3.json index b930cc4632..70b8b316c7 100644 --- a/tests/db_migrator_input/config_db/acs-msn3800-t1-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/acs-msn3800-t1-version_3_0_3.json @@ -2082,6 +2082,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/acs-msn4700-t0-version_2_0_0.json b/tests/db_migrator_input/config_db/acs-msn4700-t0-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/acs-msn4700-t0-version_2_0_0.json rename to tests/db_migrator_input/config_db/acs-msn4700-t0-version_3_0_0.json index d0c5b06344..a3085167ae 100644 --- a/tests/db_migrator_input/config_db/acs-msn4700-t0-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/acs-msn4700-t0-version_3_0_0.json @@ -1978,6 +1978,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/acs-msn4700-t0-version_2_0_3.json b/tests/db_migrator_input/config_db/acs-msn4700-t0-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/acs-msn4700-t0-version_2_0_3.json rename to tests/db_migrator_input/config_db/acs-msn4700-t0-version_3_0_3.json index fdcd78b5bb..dc40c8fe71 100644 --- a/tests/db_migrator_input/config_db/acs-msn4700-t0-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/acs-msn4700-t0-version_3_0_3.json @@ -1978,6 +1978,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/acs-msn4700-t1-version_2_0_0.json b/tests/db_migrator_input/config_db/acs-msn4700-t1-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/acs-msn4700-t1-version_2_0_0.json rename to tests/db_migrator_input/config_db/acs-msn4700-t1-version_3_0_0.json index 05dc4f5558..8698a6bcca 100644 --- a/tests/db_migrator_input/config_db/acs-msn4700-t1-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/acs-msn4700-t1-version_3_0_0.json @@ -2044,6 +2044,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/acs-msn4700-t1-version_2_0_3.json b/tests/db_migrator_input/config_db/acs-msn4700-t1-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/acs-msn4700-t1-version_2_0_3.json rename to tests/db_migrator_input/config_db/acs-msn4700-t1-version_3_0_3.json index 9338443212..aec14ade88 100644 --- a/tests/db_migrator_input/config_db/acs-msn4700-t1-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/acs-msn4700-t1-version_3_0_3.json @@ -2044,6 +2044,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/empty-config-expected.json b/tests/db_migrator_input/config_db/empty-config-expected.json index 8111516340..b837c97a08 100644 --- a/tests/db_migrator_input/config_db/empty-config-expected.json +++ b/tests/db_migrator_input/config_db/empty-config-expected.json @@ -1,5 +1,5 @@ { "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t0-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t0-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t0-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t0-version_3_0_0.json index af7b580c12..043d77414d 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t0-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t0-version_3_0_0.json @@ -1055,6 +1055,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t0-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t0-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t0-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t0-version_3_0_3.json index 22850ddc1f..d74f741124 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t0-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t0-version_3_0_3.json @@ -1055,6 +1055,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t1-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t1-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t1-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t1-version_3_0_0.json index 2b680a34e9..d53fc65467 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t1-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t1-version_3_0_0.json @@ -1148,6 +1148,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t1-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t1-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t1-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t1-version_3_0_3.json index 077e8e847f..3b6c1d9774 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t1-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-single-pool-t1-version_3_0_3.json @@ -1148,6 +1148,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t0-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t0-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t0-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t0-version_3_0_0.json index fd55102445..747e332769 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t0-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t0-version_3_0_0.json @@ -1060,6 +1060,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t0-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t0-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t0-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t0-version_3_0_3.json index 5144e7fbc6..842ef4f2ab 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t0-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t0-version_3_0_3.json @@ -1060,6 +1060,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t1-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t1-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t1-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t1-version_3_0_0.json index 38428be263..67ff5f7e27 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t1-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t1-version_3_0_0.json @@ -1153,6 +1153,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t1-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t1-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t1-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t1-version_3_0_3.json index 32c0814a7a..64294841da 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t1-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-c28d8-t1-version_3_0_3.json @@ -1153,6 +1153,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t0-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t0-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t0-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t0-version_3_0_0.json index 2513d40083..4974c1801e 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t0-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t0-version_3_0_0.json @@ -1055,6 +1055,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t0-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t0-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t0-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t0-version_3_0_3.json index 842f8622dd..72e6ba3fd9 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t0-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t0-version_3_0_3.json @@ -1055,6 +1055,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t1-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t1-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t1-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t1-version_3_0_0.json index cf2d4ecdd8..b0a79247e8 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t1-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t1-version_3_0_0.json @@ -1148,6 +1148,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t1-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t1-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t1-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t1-version_3_0_3.json index da9416a429..fb617e0bca 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t1-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-d40c8s8-t1-version_3_0_3.json @@ -1148,6 +1148,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t0-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t0-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t0-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t0-version_3_0_0.json index 8fc4128142..8b3d46d032 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t0-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t0-version_3_0_0.json @@ -1055,6 +1055,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t0-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t0-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t0-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t0-version_3_0_3.json index 62ad538df3..0ae92bac07 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t0-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t0-version_3_0_3.json @@ -1055,6 +1055,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t1-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t1-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t1-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t1-version_3_0_0.json index 171cbe43d0..a5f8b14f9e 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t1-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t1-version_3_0_0.json @@ -1148,6 +1148,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t1-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t1-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t1-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t1-version_3_0_3.json index b6464b117f..f861817758 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t1-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-single-pool-t1-version_3_0_3.json @@ -1148,6 +1148,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t0-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t0-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t0-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t0-version_3_0_0.json index 1119126485..0d8f258c08 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t0-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t0-version_3_0_0.json @@ -1060,6 +1060,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t0-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t0-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t0-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t0-version_3_0_3.json index 3de072f4b7..abaa38c2c4 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t0-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t0-version_3_0_3.json @@ -1060,6 +1060,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t1-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t1-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t1-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t1-version_3_0_0.json index 436e5a4803..17a2cc0bc6 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t1-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t1-version_3_0_0.json @@ -1153,6 +1153,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t1-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t1-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t1-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t1-version_3_0_3.json index 9571eb9c96..af7744f190 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t1-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-d48c8-t1-version_3_0_3.json @@ -1153,6 +1153,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t0-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t0-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t0-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t0-version_3_0_0.json index 093ea2c7db..89d5fda174 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t0-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t0-version_3_0_0.json @@ -1055,6 +1055,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t0-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t0-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t0-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t0-version_3_0_3.json index e9a3ce0336..96afbca2cb 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t0-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t0-version_3_0_3.json @@ -1055,6 +1055,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t1-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t1-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t1-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t1-version_3_0_0.json index 0e07eb8f5b..323afdd76d 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t1-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t1-version_3_0_0.json @@ -1148,6 +1148,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t1-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t1-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t1-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t1-version_3_0_3.json index d150a9667e..1ca4b8ae65 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t1-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-single-pool-t1-version_3_0_3.json @@ -1148,6 +1148,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-t0-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn2700-t0-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-t0-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-t0-version_3_0_0.json index 1061135496..8ffe560550 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-t0-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-t0-version_3_0_0.json @@ -1060,6 +1060,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-t0-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn2700-t0-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-t0-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-t0-version_3_0_3.json index 8422c1e651..b29b89a6f6 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-t0-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-t0-version_3_0_3.json @@ -1060,6 +1060,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-t1-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn2700-t1-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-t1-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-t1-version_3_0_0.json index 7d9192a523..84c0f26e19 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-t1-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-t1-version_3_0_0.json @@ -1153,6 +1153,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn2700-t1-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn2700-t1-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn2700-t1-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn2700-t1-version_3_0_3.json index 3012654e7b..cebc316720 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn2700-t1-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn2700-t1-version_3_0_3.json @@ -1153,6 +1153,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn3800-c64-t0-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn3800-c64-t0-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn3800-c64-t0-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn3800-c64-t0-version_3_0_0.json index b69b2cf019..122376743c 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn3800-c64-t0-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn3800-c64-t0-version_3_0_0.json @@ -1764,6 +1764,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn3800-c64-t0-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn3800-c64-t0-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn3800-c64-t0-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn3800-c64-t0-version_3_0_3.json index a27361ef0f..b799c3897a 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn3800-c64-t0-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn3800-c64-t0-version_3_0_3.json @@ -1764,6 +1764,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn3800-c64-t1-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn3800-c64-t1-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn3800-c64-t1-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn3800-c64-t1-version_3_0_0.json index 318bed85ab..ffc516678f 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn3800-c64-t1-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn3800-c64-t1-version_3_0_0.json @@ -1886,6 +1886,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn3800-c64-t1-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn3800-c64-t1-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn3800-c64-t1-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn3800-c64-t1-version_3_0_3.json index 901bc3fe61..bae0caedf1 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn3800-c64-t1-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn3800-c64-t1-version_3_0_3.json @@ -1886,6 +1886,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t0-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t0-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t0-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t0-version_3_0_0.json index fa15acac8d..c6b81cd314 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t0-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t0-version_3_0_0.json @@ -2044,6 +2044,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t0-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t0-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t0-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t0-version_3_0_3.json index 9cddd16235..e44770b371 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t0-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t0-version_3_0_3.json @@ -2044,6 +2044,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t1-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t1-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t1-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t1-version_3_0_0.json index 9093aa176b..36877e0653 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t1-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t1-version_3_0_0.json @@ -2158,6 +2158,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t1-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t1-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t1-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t1-version_3_0_3.json index 6169646c08..1db7804072 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t1-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn3800-d112c8-t1-version_3_0_3.json @@ -2158,6 +2158,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t0-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t0-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t0-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t0-version_3_0_0.json index a4477de9ad..bcbb6cb3fc 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t0-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t0-version_3_0_0.json @@ -1764,6 +1764,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t0-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t0-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t0-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t0-version_3_0_3.json index 9358cc568e..ee0e7e0065 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t0-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t0-version_3_0_3.json @@ -1764,6 +1764,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t1-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t1-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t1-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t1-version_3_0_0.json index 4772573a66..408fe08bbc 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t1-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t1-version_3_0_0.json @@ -1886,6 +1886,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t1-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t1-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t1-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t1-version_3_0_3.json index 0ae91b3c53..96367faeaf 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t1-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn3800-d24c52-t1-version_3_0_3.json @@ -1886,6 +1886,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t0-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t0-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t0-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t0-version_3_0_0.json index 941a6fdb75..87d27836b5 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t0-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t0-version_3_0_0.json @@ -1764,6 +1764,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t0-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t0-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t0-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t0-version_3_0_3.json index 362f763df1..a267b49c01 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t0-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t0-version_3_0_3.json @@ -1764,6 +1764,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t1-version_2_0_0.json b/tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t1-version_3_0_0.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t1-version_2_0_0.json rename to tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t1-version_3_0_0.json index 429324cb11..a22fe139e3 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t1-version_2_0_0.json +++ b/tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t1-version_3_0_0.json @@ -1886,6 +1886,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t1-version_2_0_3.json b/tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t1-version_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t1-version_2_0_3.json rename to tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t1-version_3_0_3.json index 0a97a493ec..c5e62e2795 100644 --- a/tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t1-version_2_0_3.json +++ b/tests/db_migrator_input/config_db/mellanox-sn3800-d28c50-t1-version_3_0_3.json @@ -1886,6 +1886,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/non-default-config-expected.json b/tests/db_migrator_input/config_db/non-default-config-expected.json index dab1997e24..55cca5cb48 100644 --- a/tests/db_migrator_input/config_db/non-default-config-expected.json +++ b/tests/db_migrator_input/config_db/non-default-config-expected.json @@ -1115,6 +1115,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/non-default-lossless-profile-in-pg-expected.json b/tests/db_migrator_input/config_db/non-default-lossless-profile-in-pg-expected.json index b0e6400785..ed18c0a859 100644 --- a/tests/db_migrator_input/config_db/non-default-lossless-profile-in-pg-expected.json +++ b/tests/db_migrator_input/config_db/non-default-lossless-profile-in-pg-expected.json @@ -2044,6 +2044,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/non-default-lossy-profile-in-pg-expected.json b/tests/db_migrator_input/config_db/non-default-lossy-profile-in-pg-expected.json index f42708ea49..6017b6a356 100644 --- a/tests/db_migrator_input/config_db/non-default-lossy-profile-in-pg-expected.json +++ b/tests/db_migrator_input/config_db/non-default-lossy-profile-in-pg-expected.json @@ -2042,6 +2042,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/non-default-pg-expected.json b/tests/db_migrator_input/config_db/non-default-pg-expected.json index efa881e34d..314ac69318 100644 --- a/tests/db_migrator_input/config_db/non-default-pg-expected.json +++ b/tests/db_migrator_input/config_db/non-default-pg-expected.json @@ -2040,6 +2040,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/non-default-xoff-expected.json b/tests/db_migrator_input/config_db/non-default-xoff-expected.json index 0d762a5baa..ee68ba5745 100644 --- a/tests/db_migrator_input/config_db/non-default-xoff-expected.json +++ b/tests/db_migrator_input/config_db/non-default-xoff-expected.json @@ -2049,6 +2049,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/port-an-expected.json b/tests/db_migrator_input/config_db/port-an-expected.json index 1f3a461206..1ef2cf4916 100644 --- a/tests/db_migrator_input/config_db/port-an-expected.json +++ b/tests/db_migrator_input/config_db/port-an-expected.json @@ -35,6 +35,6 @@ "fec": "none" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_1" + "VERSION": "version_3_0_1" } } diff --git a/tests/db_migrator_input/config_db/port-an-input.json b/tests/db_migrator_input/config_db/port-an-input.json index 373c9ae989..6cda388135 100644 --- a/tests/db_migrator_input/config_db/port-an-input.json +++ b/tests/db_migrator_input/config_db/port-an-input.json @@ -34,6 +34,6 @@ "fec": "none" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_3_0_0" } } \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/portchannel-expected.json b/tests/db_migrator_input/config_db/portchannel-expected.json index acc61d1316..2644e5f4e9 100644 --- a/tests/db_migrator_input/config_db/portchannel-expected.json +++ b/tests/db_migrator_input/config_db/portchannel-expected.json @@ -33,7 +33,7 @@ "lacp_key": "auto" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_2" + "VERSION": "version_3_0_2" } } diff --git a/tests/db_migrator_input/config_db/portchannel-input.json b/tests/db_migrator_input/config_db/portchannel-input.json index 5a890755a0..753a88601d 100644 --- a/tests/db_migrator_input/config_db/portchannel-input.json +++ b/tests/db_migrator_input/config_db/portchannel-input.json @@ -28,6 +28,6 @@ "mtu": "9100" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_1" + "VERSION": "version_3_0_1" } } diff --git a/tests/db_migrator_input/config_db/qos_map_table_expected.json b/tests/db_migrator_input/config_db/qos_map_table_expected.json index 946f7f02d9..7f1a6fd1f2 100644 --- a/tests/db_migrator_input/config_db/qos_map_table_expected.json +++ b/tests/db_migrator_input/config_db/qos_map_table_expected.json @@ -1,6 +1,6 @@ { "VERSIONS|DATABASE": { - "VERSION": "version_2_0_5" + "VERSION": "version_3_0_5" }, "PORT_QOS_MAP": { "Ethernet0": { diff --git a/tests/db_migrator_input/config_db/qos_map_table_input.json b/tests/db_migrator_input/config_db/qos_map_table_input.json index c7dbce9658..f8db38a031 100644 --- a/tests/db_migrator_input/config_db/qos_map_table_input.json +++ b/tests/db_migrator_input/config_db/qos_map_table_input.json @@ -1,6 +1,6 @@ { "VERSIONS|DATABASE": { - "VERSION": "version_2_0_4" + "VERSION": "version_3_0_4" }, "PORT_QOS_MAP": { "Ethernet0": { diff --git a/tests/db_migrator_input/config_db/qos_tables_db_field_value_reference_format_2_0_1.json b/tests/db_migrator_input/config_db/qos_tables_db_field_value_reference_format_3_0_1.json similarity index 99% rename from tests/db_migrator_input/config_db/qos_tables_db_field_value_reference_format_2_0_1.json rename to tests/db_migrator_input/config_db/qos_tables_db_field_value_reference_format_3_0_1.json index 4b14ad7568..7d7e237a75 100644 --- a/tests/db_migrator_input/config_db/qos_tables_db_field_value_reference_format_2_0_1.json +++ b/tests/db_migrator_input/config_db/qos_tables_db_field_value_reference_format_3_0_1.json @@ -3682,7 +3682,7 @@ } }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_1" + "VERSION": "version_3_0_1" }, "WRED_PROFILE": { "AZURE_LOSSLESS": { diff --git a/tests/db_migrator_input/config_db/qos_tables_db_field_value_reference_format_2_0_3.json b/tests/db_migrator_input/config_db/qos_tables_db_field_value_reference_format_3_0_3.json similarity index 99% rename from tests/db_migrator_input/config_db/qos_tables_db_field_value_reference_format_2_0_3.json rename to tests/db_migrator_input/config_db/qos_tables_db_field_value_reference_format_3_0_3.json index 9d22794dc2..492528989b 100644 --- a/tests/db_migrator_input/config_db/qos_tables_db_field_value_reference_format_2_0_3.json +++ b/tests/db_migrator_input/config_db/qos_tables_db_field_value_reference_format_3_0_3.json @@ -3692,7 +3692,7 @@ } }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" }, "WRED_PROFILE": { "AZURE_LOSSLESS": { diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-expected.json b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-expected.json index 03b21b7d4c..88d81e5cff 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-expected.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-expected.json @@ -284,6 +284,6 @@ "description": "Servers6:eth0" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-input.json b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-input.json index 7d059812f5..691572ea3e 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-input.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-input.json @@ -200,6 +200,6 @@ "description": "Servers6:eth0" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_2" + "VERSION": "version_3_0_2" } } diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-expected.json b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-expected.json index 52aeb07c93..fdd4e35807 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-expected.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-expected.json @@ -280,6 +280,6 @@ "description": "Servers6:eth0" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-input.json b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-input.json index 3ec46e9c63..c43d7e7f3c 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-input.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-input.json @@ -196,6 +196,6 @@ "description": "Servers6:eth0" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_2" + "VERSION": "version_3_0_2" } } diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-traditional-double-pools-expected.json b/tests/db_migrator_input/config_db/reclaiming-buffer-traditional-double-pools-expected.json index 7b2b0d658d..97eed056b3 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-traditional-double-pools-expected.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-traditional-double-pools-expected.json @@ -343,6 +343,6 @@ "description": "Servers6:eth0" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-traditional-double-pools-input.json b/tests/db_migrator_input/config_db/reclaiming-buffer-traditional-double-pools-input.json index a7832fe14d..bb3ded3b33 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-traditional-double-pools-input.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-traditional-double-pools-input.json @@ -235,6 +235,6 @@ "description": "Servers6:eth0" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_2" + "VERSION": "version_3_0_2" } } diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-traditional-single-pool-expected.json b/tests/db_migrator_input/config_db/reclaiming-buffer-traditional-single-pool-expected.json index 557072bd86..8cbe35ffb0 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-traditional-single-pool-expected.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-traditional-single-pool-expected.json @@ -338,6 +338,6 @@ "description": "Servers6:eth0" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_3" + "VERSION": "version_3_0_3" } } diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-traditional-single-pool-input.json b/tests/db_migrator_input/config_db/reclaiming-buffer-traditional-single-pool-input.json index 85f50fe3a3..1b6cc7138f 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-traditional-single-pool-input.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-traditional-single-pool-input.json @@ -235,6 +235,6 @@ "description": "Servers6:eth0" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_2" + "VERSION": "version_3_0_2" } } diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-warmreboot-expected.json b/tests/db_migrator_input/config_db/reclaiming-buffer-warmreboot-expected.json index 09d6fc8c77..5037644faf 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-warmreboot-expected.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-warmreboot-expected.json @@ -2043,6 +2043,6 @@ "admin_status": "up" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_4" + "VERSION": "version_3_0_4" } } diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index 7e0d505775..b688aa2de5 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -43,9 +43,9 @@ def setup_class(cls): cls.config_db_tables_to_verify = ['BUFFER_POOL', 'BUFFER_PROFILE', 'BUFFER_PG', 'DEFAULT_LOSSLESS_BUFFER_PARAMETER', 'LOSSLESS_TRAFFIC_PATTERN', 'VERSIONS', 'DEVICE_METADATA'] cls.appl_db_tables_to_verify = ['BUFFER_POOL_TABLE:*', 'BUFFER_PROFILE_TABLE:*', 'BUFFER_PG_TABLE:*', 'BUFFER_QUEUE:*', 'BUFFER_PORT_INGRESS_PROFILE_LIST:*', 'BUFFER_PORT_EGRESS_PROFILE_LIST:*'] cls.warm_reboot_from_version = 'version_1_0_6' - cls.warm_reboot_to_version = 'version_2_0_3' + cls.warm_reboot_to_version = 'version_3_0_3' - cls.version_list = ['version_1_0_1', 'version_1_0_2', 'version_1_0_3', 'version_1_0_4', 'version_1_0_5', 'version_1_0_6', 'version_2_0_0', 'version_2_0_3'] + cls.version_list = ['version_1_0_1', 'version_1_0_2', 'version_1_0_3', 'version_1_0_4', 'version_1_0_5', 'version_1_0_6', 'version_3_0_0', 'version_3_0_3'] os.environ['UTILITIES_UNIT_TESTING'] = "2" @classmethod @@ -211,7 +211,7 @@ def test_mellanox_buffer_reclaiming(self, buffer_model, ingress_pools): dbmgtr = db_migrator.DBMigrator(None) dbmgtr.migrate() expected_db = self.mock_dedicated_config_db(db_after_migrate) - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_2_0_3') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_3_0_3') tables_to_verify = self.config_db_tables_to_verify tables_to_verify.extend(['BUFFER_QUEUE', 'BUFFER_PORT_INGRESS_PROFILE_LIST', 'BUFFER_PORT_EGRESS_PROFILE_LIST']) self.check_config_db(dbmgtr.configDB, expected_db.cfgdb, tables_to_verify) @@ -241,7 +241,7 @@ def test_port_autoneg_migrator(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'port-an-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_2_0_1') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_3_0_1') assert dbmgtr.configDB.get_table('PORT') == expected_db.cfgdb.get_table('PORT') assert dbmgtr.configDB.get_table('VERSIONS') == expected_db.cfgdb.get_table('VERSIONS') @@ -289,7 +289,7 @@ def test_lacp_key_migrator(self): dbmgtr.migrate() dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'portchannel-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_2_0_2') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_3_0_2') assert dbmgtr.configDB.get_table('PORTCHANNEL') == expected_db.cfgdb.get_table('PORTCHANNEL') assert dbmgtr.configDB.get_table('VERSIONS') == expected_db.cfgdb.get_table('VERSIONS') @@ -335,8 +335,8 @@ def check_appl_db(self, result, expected): assert expected.get_all(expected.APPL_DB, key) == result.get_all(result.APPL_DB, key) def test_qos_buffer_migrator_for_cold_reboot(self): - db_before_migrate = 'qos_tables_db_field_value_reference_format_2_0_1' - db_after_migrate = 'qos_tables_db_field_value_reference_format_2_0_3' + db_before_migrate = 'qos_tables_db_field_value_reference_format_3_0_1' + db_after_migrate = 'qos_tables_db_field_value_reference_format_3_0_3' db = self.mock_dedicated_config_db(db_before_migrate) _ = self.mock_dedicated_appl_db(db_before_migrate) import db_migrator @@ -344,7 +344,8 @@ def test_qos_buffer_migrator_for_cold_reboot(self): dbmgtr.migrate() expected_db = self.mock_dedicated_config_db(db_after_migrate) expected_appl_db = self.mock_dedicated_appl_db(db_after_migrate) - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_2_0_3') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_3_0_3') + self.check_config_db(dbmgtr.configDB, expected_db.cfgdb) self.check_appl_db(dbmgtr.appDB, expected_appl_db) self.clear_dedicated_mock_dbs() From e1866e364b8c1b4596fb831b741c7bd8cbf69059 Mon Sep 17 00:00:00 2001 From: "Marty Y. Lok" <76118573+mlok-nokia@users.noreply.github.com> Date: Wed, 20 Jul 2022 19:12:05 -0400 Subject: [PATCH 33/34] [MultiAsic] sudo reboot command doesn't gracefully stop Asic syncd# on multiasic platform (#2258) What I did Function stop_sonic_service() in /usr/local/bin/reboot script doesn't handle stopping the Asic syncd# on multiasic platform. Instead, it only stops the syncd on non-multiasic platform. When issue command "sudo reboot", the below message will be shown. admin@sonic:~$ sudo reboot Error: No such container: syncd Fixes Azure/sonic-buildimage#11377 How I did it Add code the stop_sonic_services() to check and get NUM_ASIC. If it is multiasic, looping all asics and call the syncd_request_shutdown for each asic. How to verify it Issue the "sudo reboot" on the multiasic platform, the error message "Error: No such container: syncd" should not be shown. --- scripts/reboot | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/scripts/reboot b/scripts/reboot index fa7315b3b9..21be3427ed 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -72,8 +72,21 @@ function stop_sonic_services() fi if [[ x"$ASIC_TYPE" != x"mellanox" ]]; then - debug "Stopping syncd process..." - docker exec -i syncd /usr/bin/syncd_request_shutdown --cold > /dev/null + ASIC_CONF=${DEVPATH}/$PLATFORM/asic.conf + if [ -f "$ASIC_CONF" ]; then + source $ASIC_CONF + fi + if [[ ($NUM_ASIC -gt 1) ]]; then + asic_num=0 + while [[ ($asic_num -lt $NUM_ASIC) ]]; do + debug "Stopping syncd$asic_num process..." + docker exec -i syncd$asic_num /usr/bin/syncd_request_shutdown --cold > /dev/null + ((asic_num = asic_num + 1)) + done + else + debug "Stopping syncd process..." + docker exec -i syncd /usr/bin/syncd_request_shutdown --cold > /dev/null + fi sleep 3 fi stop_pmon_service From 27667cf46a8cb642b9bdb7c800cac7cd35176629 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Thu, 21 Jul 2022 16:27:30 +0800 Subject: [PATCH 34/34] Fix test for pfcwd_sw_enable in db_migrator_test (#2253) --- .../config_db/qos_map_table_expected.json | 56 +++++++++---------- .../config_db/qos_map_table_input.json | 53 +++++++++--------- 2 files changed, 52 insertions(+), 57 deletions(-) diff --git a/tests/db_migrator_input/config_db/qos_map_table_expected.json b/tests/db_migrator_input/config_db/qos_map_table_expected.json index 7f1a6fd1f2..e75740f02c 100644 --- a/tests/db_migrator_input/config_db/qos_map_table_expected.json +++ b/tests/db_migrator_input/config_db/qos_map_table_expected.json @@ -2,35 +2,33 @@ "VERSIONS|DATABASE": { "VERSION": "version_3_0_5" }, - "PORT_QOS_MAP": { - "Ethernet0": { - "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", - "pfc_enable": "3,4", - "pfcwd_sw_enable": "3,4", - "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", - "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", - "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" - }, - "Ethernet100": { - "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", - "pfc_enable": "3,4", - "pfcwd_sw_enable": "3,4", - "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", - "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", - "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" - }, - "Ethernet92": { - "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", - "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", - "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", - "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" - }, - "Ethernet96": { - "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", - "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", - "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", - "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" - } + "PORT_QOS_MAP|Ethernet0": { + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", + "pfc_enable": "3,4", + "pfcwd_sw_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" + }, + "PORT_QOS_MAP|Ethernet100": { + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", + "pfc_enable": "3,4", + "pfcwd_sw_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" + }, + "PORT_QOS_MAP|Ethernet92": { + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" + }, + "PORT_QOS_MAP|Ethernet96": { + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" } } diff --git a/tests/db_migrator_input/config_db/qos_map_table_input.json b/tests/db_migrator_input/config_db/qos_map_table_input.json index f8db38a031..4bb237588a 100644 --- a/tests/db_migrator_input/config_db/qos_map_table_input.json +++ b/tests/db_migrator_input/config_db/qos_map_table_input.json @@ -2,33 +2,30 @@ "VERSIONS|DATABASE": { "VERSION": "version_3_0_4" }, - "PORT_QOS_MAP": { - "Ethernet0": { - "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", - "pfc_enable": "3,4", - "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", - "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", - "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" - }, - "Ethernet100": { - "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", - "pfc_enable": "3,4", - "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", - "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", - "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" - }, - "Ethernet92": { - "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", - "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", - "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", - "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" - }, - "Ethernet96": { - "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", - "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", - "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", - "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" - } + "PORT_QOS_MAP|Ethernet0": { + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" + }, + "PORT_QOS_MAP|Ethernet100": { + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" + }, + "PORT_QOS_MAP|Ethernet92": { + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" + }, + "PORT_QOS_MAP|Ethernet96": { + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" } } -