From e3e8f092bd957220507c121a08b639fa070e08e5 Mon Sep 17 00:00:00 2001 From: vdahiya12 <67608553+vdahiya12@users.noreply.github.com> Date: Mon, 23 Aug 2021 14:06:50 -0700 Subject: [PATCH] [Y-Cable][Broadcom] Broadcom implementation of YCable class which inherits from YCableBase required for Y-Cable API's in sonic-platform-daemons (#208) This PR adds support for YCable class required for platform-daemons to use the YCable API's for Broadcom. Description Basically a vendor specific implementation of abstract YCableBase class . detailed design discussion can be found https://github.com/Azure/SONiC/pull/757/files Motivation and Context Required for transitioning to vendor agnostic API's to be called by xcvrd, so that all types of cables can be called. How Has This Been Tested? Ran the changes on Arista7050cx3 switch, making changes inside the container. Signed-off-by: vaibhav-dahiya --- setup.py | 1 + sonic_y_cable/broadcom/__init__.py | 0 sonic_y_cable/broadcom/y_cable_broadcom.py | 6203 ++++++++++++++++++++ sonic_y_cable/y_cable_vendor_mapping.py | 14 + 4 files changed, 6218 insertions(+) create mode 100644 sonic_y_cable/broadcom/__init__.py create mode 100644 sonic_y_cable/broadcom/y_cable_broadcom.py diff --git a/setup.py b/setup.py index de33724fc..451246384 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,7 @@ 'sonic_thermal', 'sonic_y_cable', 'sonic_y_cable.credo', + 'sonic_y_cable.broadcom', ], # NOTE: Install also depends on sonic-config-engine for portconfig.py # This dependency should be eliminated by moving portconfig.py diff --git a/sonic_y_cable/broadcom/__init__.py b/sonic_y_cable/broadcom/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/sonic_y_cable/broadcom/y_cable_broadcom.py b/sonic_y_cable/broadcom/y_cable_broadcom.py new file mode 100644 index 000000000..be800400b --- /dev/null +++ b/sonic_y_cable/broadcom/y_cable_broadcom.py @@ -0,0 +1,6203 @@ +# +# bcm_cable_api.py +# +# property +# $Copyright: (c) 2021 Broadcom. +# Broadcom Proprietary and Confidential. All rights reserved. +# +# definitions for implementing Y cable access and configurations +# API's for Y cable functionality in SONiC +# + +from sonic_y_cable.y_cable_base import YCableBase + +try: + import time + import struct + import array + import math + import os + import threading + from ctypes import c_int16 + from datetime import datetime + from contextlib import contextmanager + + #from chassis import chassis + import sonic_platform.platform + #from sonic_py_common import logger +except ImportError as e: + print("{}".format(e)) + # When build python3 xcvrd, it tries to do basic check which will import this file. However, + # not all platform supports python3 API now, so it could cause an issue when importing + # sonic_platform.platform. We skip the ImportError here. This is safe because: + # 1. If any python package is not available, there will be exception when use it + # 2. Vendors know their platform API version, they are responsible to use correct python + # version when importing this file. + #pass + +# strut definitions used in fw related functions +class cable_image_version_s(object): + + def __init__(self): + self.image_version_minor = 0 + self.image_version_major = 0 + +class cable_image_info_s(object): + + def __init__(self): + self.image_fw_version = cable_image_version_s() + self.image_api_version = cable_image_version_s() + self.image_crc32 = 0 + self.image_ptr = array.array('H', []) + self.image_size = 0 + +class cable_bank_info_s(object): + + def __init__(self): + self.image_fw_version = cable_image_version_s() + self.image_api_version = cable_image_version_s() + self.image_crc32 = 0 + +class cable_status_info_s(): + + def __init__(self): + self.current_bank = 0 + self.next_bank = 0 + self.bank1_info = cable_bank_info_s() + self.bank2_info = cable_bank_info_s() + +class cable_upgrade_info_s(): + + def __init__(self): + self.image_info = cable_image_info_s() + self.status_info = cable_status_info_s() + self.destination = 0 + self.bank = 0 + + +class valid_port_option_table_s: + + def __init__(self, speed, fec_tor, fec_nic, anlt_tor, anlt_nic, mode): + self.speed = speed + self.fec_tor = fec_tor + self.fec_nic = fec_nic + self.anlt_tor = anlt_tor + self.anlt_nic = anlt_nic + self.mode = mode + + +ENABLE_DBG_PRINT = False + +def enable_debug_print(flag): + global ENABLE_DBG_PRINT + ENABLE_DBG_PRINT = flag + +def debug_print(log_msg): + if ENABLE_DBG_PRINT: + curr_timestamp = datetime.utcnow() + cur_tstr = curr_timestamp.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3] + print("({}) {} : {}".format(threading.currentThread().getName(), cur_tstr, log_msg)) + return None + +# +# Lock for port access for thread safe +# +class PortLock(object): + def __init__(self, port_nbr): + self.port_nbr = port_nbr + self.lock = threading.RLock() + + #def __del__(self): + # print("PortLock {} destroyed".format(self.port_nbr)) + + def __enter__(self): + self.lock.acquire() + debug_print("(with) acquired lock for port {}".format(self.port_nbr)) + + def __exit__(self, exc_type, exc_value, traceback): + self.lock.release() + debug_print("(with) released lock for port {}".format(self.port_nbr)) + + @contextmanager + def acquire_timeout(self, timeout): + result = self.lock.acquire(timeout=timeout) + debug_print("(with timeout) acquired lock for port {}".format(self.port_nbr)) + yield result + if result: + self.lock.release() + debug_print("(with timeout) released lock for port {}".format(self.port_nbr)) + + def get_port_nbr(self): + return self.port_nbr + + def acquire(self): + self.lock.acquire() + debug_print("explicitly acquired lock for port {}".format(self.port_nbr)) + + def release(self): + self.lock.release() + debug_print("explicitly released lock for port {}".format(self.port_nbr)) + +# +# BCM Y Cable implementation derived from y_cable_base +# +class YCable(YCableBase): + + + # definitions of the modes to be run for loopback mode + # on the port/cable + LOOPBACK_MODE_NEAR_END = 1 + + # definitions of PRBS run modes + PRBS_DIRECTION_BOTH = 0 + PRBS_DIRECTION_GENERATOR = 1 + PRBS_DIRECTION_CHECKER = 2 + + BCM_API_VERSION = "1.0" + CONSOLE_PRINT = False + + # Log levels + LOG_INFO = 1 + LOG_WARN = 2 + LOG_DEBUG = 3 + LOG_ERROR = 4 + + CABLE_MODE_100G_FEC = 0 + CABLE_MODE_100G_PCS = 1 + CABLE_MODE_50G_FEC = 2 + CABLE_MODE_50G_PCS = 3 + + PORT_SPEED_50 = 0 + PORT_SPEED_100 = 1 + + FEC_MODE_NONE = 0 + FEC_MODE_RS = 1 + PORT_FEC_FC = 2 + + ANLT_DISABLED = 1 + ANLT_ENABLED = 2 + ANLT_DONT_CARE = 3 + #ANLT_DONT_CARE = True + + CABLE_MODE_50G = 50000 + CABLE_MODE_100G = 100000 + + PORT_LOCK_TIMEOUT = 30 # in seconds + + + # Register absolute addresses + QSFP28_LP_3_TX_RX_LOSS = 0x00000003 + QSFP28_LP_5_TX_RX_CDR_LOL = 0x00000005 + QSFP28_LOS_LOL_SEC = 0x0000004A + QSFP28_LINK_DOWN = 0x0000004B + QSFP28_LINK_FAULT = 0x0000004C + QSFP28_MESC_FAULT = 0x0000004D + QSFP28_LP_22_TEMP_MSB = 0x00000016 + QSFP_SQL_STATUS = 0x0000004E + QSFP_LP_31_VENDOR = 0x0000004F # CHANGED to 79 -- Not used. Check! + QSFP_LINK_FAULT_MASK = 0x00007F61 + QSFP_MESC_MASK = 0x00007F62 + QSFP28_LP_100_TX_RX_LOS_MASK = 0x00000064 + QSFP28_LP_102_TX_RX_CDR_LOL_MASK = 0x00000066 + QSFP28_LOS_LOL_SEC_MASK = 0x00007F63 + QSFP28_UP_DOWN_MASK = 0x00007F64 + QSFP28_UP0_148_VENDOR_NAME_0 = 0x00000094 + QSFP28_UP0_168_PN_1 = 0x000000a8 + QSFP28_UP0_224_SPECIFIC_1_RSV = 0x000000e0 + QSFP_BRCM_CABLE_CMD = 0x00000013 + QSFP_BRCM_CABLE_CTRL_CMD_STS = 0x00000014 + QSFP_VEN_FE_130_BRCM_DATA_LENGHT_LSB = 0x00007f82 + QSFP28_VENFD_129_DIE_TEMP_MSB = 0x00007f01 + QSFP28_VENFD_130_DIE_VOLTAGE_LSB = 0x00007f02 + QSFP28_VENFD_184_NIC_TORB_TORA_RESET = 0x00007f38 + QSFP28_VENFD_216_LINK_STATUS = 0x00007f58 + QSFP28_RESET_SELF_OFFSET = 0x0000005D + + # temperature and voltage register offsets + QSFP28_VENFD_128_DIE_TEMP_LSB = 0x00007f00 + QSFP28_VENFD_131_DIE_VOLTAGE_MSB = 0x00007f03 + QSFP28_VENFD_134_TORA_TEMP_MSB = 0x00007f06 + QSFP28_VENFD_135_TORA_TEMP_LSB = 0x00007f07 + QSFP28_VENFD_138_TORB_TEMP_MSB = 0x00007f0a + QSFP28_VENFD_139_TORB_TEMP_LSB = 0x00007f0b + QSFP28_VENFD_142_NIC_TEMP_MSB = 0x00007f0e + QSFP28_VENFD_143_NIC_TEMP_LSB = 0x00007f0f + + QSFP28_LP_QSFP28_LP_2_STATUS_CR = 0x00000002 + + + # User defined + CMD_REQ_PARAM_START_OFFSET = 0x7F87 + CMD_RSP_PARAM_START_OFFSET = 0x7FB8 + + MAX_REQ_PARAM_LEN = 0x30 + MAX_RSP_PARAM_LEN = 0x77 + + # command IDs + CABLE_CMD_ID_PRBS_SET = 0x01 + CABLE_CMD_ID_PRBS_CHK = 0x03 + CABLE_CMD_ID_SET_LOOPBACK = 0x04 + CABLE_CMD_ID_GET_LOOPBACK = 0x05 + CABLE_CMD_ID_SET_TXFIR = 0x06 + CABLE_CMD_ID_GET_TXFIR = 0x07 + CABLE_CMD_ID_SET_ANLT = 0x08 + CABLE_CMD_ID_GET_ANLT = 0x09 + CABLE_CMD_ID_GET_ANLT_RESTART = 0x0A + CABLE_CMD_ID_GET_ANLT_GET_STATUS = 0x0B + CABLE_CMD_ID_SET_POLARITY = 0x0C + CABLE_CMD_ID_GET_POLARITY = 0x0D + CABLE_CMD_ID_SET_MODE = 0x0E + CABLE_CMD_ID_GET_MODE = 0x0F + CABLE_CMD_ID_GET_SQUELCH = 0x10 + CABLE_CMD_ID_SET_SQUELCH = 0x11 + CABLE_CMD_ID_GET_HMUX_CONFIG = 0x12 + CABLE_CMD_ID_SET_HMUX_CONFIG = 0x13 + CABLE_CMD_ID_GET_HMUX_CONTEXT = 0x14 + CABLE_CMD_ID_SET_HMUX_CONTEXT = 0x15 + CABLE_CMD_ID_GET_HMUX_STATS = 0x16 + CABLE_CMD_ID_READ_REG = 0x17 + CABLE_CMD_ID_WRITE_REG = 0x18 + CABLE_CMD_ID_ENABLE_PHY_CHIP = 0x19 + CABLE_CMD_ID_DISABLE_PHY_CHIP = 0x1A + CABLE_CMD_ID_DUMP_PAGE = 0x1B + CABLE_CMD_ID_GET_EYE_MARGIN = 0x1F + CABLE_CMD_ID_GET_SNR = 0x20 + CABLE_CMD_ID_SET_HMUX_CONTEXT_PRI= 0x21 + CABLE_CMD_ID_SET_HMUX_CONTEXT_SEC= 0x22 + + # Download commands + FW_CMD_START = 1 + FW_CMD_TRANSFER = 2 + FW_CMD_COMPLETE = 3 + FW_CMD_SWAP = 4 + FW_CMD_ABORT = 5 + FW_CMD_INFO = 6 + FW_CMD_RESET = 7 + + FW_UP_SUCCESS = 1 + FW_UP_IN_PROGRESS = 2 + + # destination values + TOR_MCU = 0x01 + TOR_MCU_SELF = 0x01 + NIC_MCU = 0x02 + MUX_CHIP = 0x03 + TOR_MCU_PEER = 0x04 + + # FW image address + MCU_FW_IMG_INFO_ADDR = 0x3E7F0 + MCU_FW_IMG_SIZE = 0x3E800 + MUX_FW_IMG_INFO_ADDR = 0x3FFE0 + MUX_FW_IMG_SIZE = 0x40000 + FW_IMG_INFO_SIZE = 12 + FW_UP_PACKET_SIZE = 128 + + QSFP_BRCM_FW_UPGRADE_DATA_PAGE_1 = 0x81 + QSFP_BRCM_FW_UPGRADE_DATA_PAGE_2 = 0x82 + QSFP_BRCM_FW_UPGRADE_DATA_START = 0x80 + QSFP_BRCM_DIAGNOSTIC_PAGE = 0x04 + QSFP_BRCM_DIAGNOSTIC_STATUS = 0x81 + + QSFP_BRCM_FW_UPGRADE_PACKET_SIZE = 0x92 + QSFP_BRCM_FW_UPGRADE_CURRENT_BANK = 0x80 + + QSFP_BRCM_FW_UPGRADE_CTRL_CMD = 0x80 + QSFP_BRCM_FW_UPGRADE_CMD_STS = 0x81 + QSFP_BRCM_FW_UPGRADE_CTRL_STS = 0x81 + + QSFP_BRCM_FW_UPGRADE_PAGE = 0x80 + QSFP_BRCM_FW_UPGRADE_HEADER_0_7 = 0x82 + QSFP_BRCM_FW_UPGRADE_HEADER_24_31 = 0x85 + + # muxchip return codes + RR_ERROR = -1 #-255 # Error Category + RR_ERROR_SYSTEM_UNAVAILABLE = -1 #-250 # System Unavailable Error + RR_SUCCESS = 0 # Success + + # PRBS polynomials + CABLE_PRBS7 = 0 # PRBS poly 7 + CABLE_PRBS9 = 1 # PRBS poly 9 + CABLE_PRBS11 = 2 # PRBS poly 11 + CABLE_PRBS15 = 3 # PRBS poly 15 + CABLE_PRBS23 = 4 # PRBS poly 23 + CABLE_PRBS31 = 5 # PRBS poly 31 + CABLE_PRBS58 = 6 # PRBS poly 58 + CABLE_PRBS49 = 7 # PRBS poly 49 + CABLE_PRBS13 = 8 # PRBS poly 13 + + # Loopback modes + CABLE_NIC_GLOOPBACK_MODE = 0 #Global NIC loopback mode, line/NIC side deep or G-Loop + CABLE_NIC_RLOOPBACK_MODE = 1 #Remote NIC loopback mode, line/NIC side R-LOOP + CABLE_TOR_GLOOPBACK_MODE = 2 #Global TOR loopback mode, TOR side deep or G-Loop + CABLE_TOR_RLOOPBACK_MODE = 3 #Remote TOR loopback mode, side R-LOOP + + # core ip's + CORE_IP_ALL = 0 #Core IP ALL + CORE_IP_LW = 2 #Core IP Line Wrapper + CORE_IP_CLIENT = 3 #Core IP SerDes + CORE_IP_NIC = 1 + CORE_IP_TOR = 2 + CORE_IP_CENTRAL = 3 + + # Error codes returned from y_cable functions + ERROR_PLATFORM_NOT_LOADED = -1 #-1 + ERROR_CMD_STS_CHECK_FAILED = -1 #-2 + ERROR_WRITE_EEPROM_FAILED = -1 #-3 + ERROR_CMD_PROCESSING_FAILED = -1 #-4 + ERROR_MCU_NOT_RELEASED = -1 #-5 + ERROR_MCU_BUSY = -1 #-6 + ERROR_INVALID_PRBS_MODE = -1 #-8 + ERROR_INVALID_TARGET = -1 #-9 + ERROR_INVALID_DIRECTION = -1 #-10 + ERROR_INVALID_POLARITY = -1 #-11 + ERROR_CMD_EXEC_FAILED = -1 #-12 + ERROR_PORT_LOCK_TIMEOUT = -1 #-13 + ERROR_INVALID_INPUT = -1 #-14 + + ERROR_FW_GET_STATUS_FAILED = -1 #-15 + ERROR_NO_MATCHING_FW = -1 #-16 + ERROR_RESET_FAILED = -1 #-17 + ERROR_TOGGLE_FAILED = -1 #-18 + ERROR_FW_ACTIVATE_FAILURE = -1 #-19 + ERROR_FW_ROLLBACK_FAILURE = -1 #-20 + + WARNING_FW_ALREADY_ACTIVE = -1 #-50 + WARNING_FW_ALREADY_ROLLEDBACK = -1 #-51 + + EEPROM_READ_DATA_INVALID = -1 #-100 + EEPROM_ERROR = -1 #-101 + API_FAIL = -1 #-102 + + ERROR_RW_NIC_FAILED = -1 #-30 #Unable to communicate with NIC MCU + ERROR_RW_TOR_FAILED = -1 #-31 #Unable to communicate with TOR MCU + ERROR_GET_VERSION_FAILED = -1 #-32 #Unable to get firmware version from MCU + ERROR_FLASH_SIZE_INVALID = -1 #-33 #Firmware image size is greater than flash bank size + ERROR_FLASH_ERASE_FAILED = -1 #-34 #Flash erase failed + ERROR_FLASH_WRITE_FAILED = -1 #-35 #Flash write failed + ERROR_FLASH_READ_FAILED = -1 #-36 #Flash read failed + ERROR_CRC32_FAILED = -1 #-37 #Flash CRC validation failed + ERROR_CMD_TIMEOUT = -1 #-38 #No response after command sent + ERROR_SYSTEM_BUSY = -1 #-39 #System is busy + + + def __init__(self, port, logger1): + + self.port = port + self.platform_chassis = None + self.sfp = None + self.lock = PortLock(port) + self.logger = logger1 + self.download_firmware_status = self.FIRMWARE_DOWNLOAD_STATUS_NOT_INITIATED_OR_FINISHED + super(YCable, self).__init__(port, logger1) + try: + #self.platform_chassis = chassis() + self.platform_chassis = sonic_platform.platform.Platform().get_chassis() + self.sfp = self.platform_chassis.get_sfp(self.port) + + logger1.log_info("chassis loaded {}".format(self.platform_chassis)) + except Exception as e: + logger1.log_warning("Failed to load chassis due to {}".format(repr(e))) + +############################################################################################# +### Broadcom internal/debug/utility functions ### +############################################################################################# + + def enable_all_log(self, enable): + if enable: + self.CONSOLE_PRINT = True + if self.logger is not None: + self.logger.set_min_log_priority(9) + print("Logging enabled...") + else: + self.CONSOLE_PRINT = False + if self.logger is not None: + self.logger.set_min_log_priority(7) + print("Logging disabled...") + + def __get_pid_str(self): + pid_str = "[{},{}] Port-{} : ".format(os.getpid(), threading.currentThread().getName(), self.port) + return pid_str + + def log_timestamp(self, last_timestamp, log_msg): + curr_timestamp = datetime.utcnow() + cur_tstr = curr_timestamp.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3] + difftime = curr_timestamp - last_timestamp + + tstr = "({}s {}ms)".format(difftime.seconds, difftime.microseconds//1000) + ret_str = cur_tstr + tstr + self.log(self.LOG_DEBUG, "{} : {}".format(ret_str, log_msg)) + + return curr_timestamp + + + def log(self, level, msg, also_print_to_console=False): + + msg = self.__get_pid_str() + msg + also_print_to_console = True if self.CONSOLE_PRINT else also_print_to_console + + if level == self.LOG_INFO: + self.logger.log_info(msg) + elif level == self.LOG_WARN: + self.logger.log_warning(msg) + elif level == self.LOG_DEBUG: + self.logger.log_debug(msg) + elif level == self.LOG_ERROR: + self.logger.log_error(msg) + + if self.CONSOLE_PRINT or also_print_to_console: + curr_timestamp = datetime.utcnow() + cur_tstr = curr_timestamp.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3] + msg = cur_tstr + " " + msg + print(msg) + + def __util_convert_to_phyinfo_details(self, target, lane_map): + """ + + This util API is internally used to simplify the calculation of core_ip, lane_mask + + """ + + if target == self.TARGET_NIC or target == self.EYE_PRBS_LOOPBACK_TARGET_NIC: + core_ip = self.CORE_IP_NIC + else: + core_ip = self.CORE_IP_TOR + + read_side = self.get_read_side() + is_torA = False + + if read_side == 1: + is_torA = True + + if (target == self.EYE_PRBS_LOOPBACK_TARGET_LOCAL): + target = read_side + + # if check is on TOR-B, make is_torA False + if (target == self.TARGET_TOR_B or target == self.EYE_PRBS_LOOPBACK_TARGET_TOR_B) and read_side == self.TARGET_TOR_A: + is_torA = False + # if check is on TOR-A and read side is TOR-B, make is_torA False + elif (target == self.TARGET_TOR_A or target == self.EYE_PRBS_LOOPBACK_TARGET_TOR_A) and read_side == self.TARGET_TOR_B: + is_torA = True + + #lane_mask = lane_map + + if core_ip == self.CORE_IP_NIC: + lane_mask = lane_map + else: + if is_torA is False: + lane_mask = ((lane_map << 4) & 0xF0) | ((lane_map >> 4) & 0x0F) + else: + lane_mask = lane_map + + if core_ip == self.CORE_IP_TOR: + core_ip = self.CORE_IP_CLIENT + elif core_ip == self.CORE_IP_NIC: + core_ip = self.CORE_IP_LW + else: + core_ip = self.CORE_IP_ALL + + return core_ip, lane_mask + + def __util_convert_to_loopback_phyinfo(self, target, lane_map, lb_mode): + """ + + This util API is internally used to simplify the calculation of core_ip, lane_mask + + """ + + if target == self.TARGET_NIC or target == self.EYE_PRBS_LOOPBACK_TARGET_NIC: + core_ip = self.CORE_IP_NIC + else: + core_ip = self.CORE_IP_TOR + + read_side = self.get_read_side() + is_torA = False + + if read_side == self.TARGET_TOR_A: + is_torA = True + + if (target == self.EYE_PRBS_LOOPBACK_TARGET_LOCAL): + target = read_side + + + # if target is TOR-B, but read_side is TOR-A, make is_torA False + if (target == self.TARGET_TOR_B or target == self.EYE_PRBS_LOOPBACK_TARGET_TOR_B) and read_side == self.TARGET_TOR_A: + is_torA = False + # if target is TOR-A but read_side is TOR-B, make is_torA True + elif (target == self.TARGET_TOR_A or target == self.EYE_PRBS_LOOPBACK_TARGET_TOR_A) and read_side == self.TARGET_TOR_B: + is_torA = True + + #lane_mask = lane_map + + if core_ip == self.CORE_IP_NIC: + lane_mask = lane_map + if lb_mode == self.LOOPBACK_MODE_FAR_END: + mode = self.CABLE_NIC_GLOOPBACK_MODE + elif lb_mode == self.LOOPBACK_MODE_NEAR_END: + mode = self.CABLE_NIC_RLOOPBACK_MODE + else: + self.log(self.LOG_ERROR, "Incorrect mode value") + + else: + if is_torA is False: + lane_mask = ((lane_map << 4) & 0xF0) | ((lane_map >> 4) & 0x0F) + if lb_mode == self.LOOPBACK_MODE_FAR_END: + mode = self.CABLE_TOR_GLOOPBACK_MODE + elif lb_mode == self.LOOPBACK_MODE_NEAR_END: + mode = self.CABLE_TOR_RLOOPBACK_MODE + else: + self.log(self.LOG_ERROR, "Incorrect mode value") + else: + lane_mask = lane_map + if lb_mode == self.LOOPBACK_MODE_FAR_END: + mode = self.CABLE_TOR_GLOOPBACK_MODE + elif lb_mode == self.LOOPBACK_MODE_NEAR_END: + mode = self.CABLE_TOR_RLOOPBACK_MODE + else: + self.log(self.LOG_ERROR, "Incorrect mode value") + + + if core_ip == self.CORE_IP_TOR: + core_ip = self.CORE_IP_CLIENT + elif core_ip == self.CORE_IP_NIC: + core_ip = self.CORE_IP_LW + else: + core_ip = self.CORE_IP_ALL + + return core_ip, lane_mask, mode + + def __cable_cmd_execute(self, command_id, cmd_hdr, cmd_req_body): + """ + Internal function, sends command request to MCU and returns the response from MCU + + Args: + command_id: + Command ID + cmd_hdr + command header containing details of the command + cmd_req_body + command request payload, to be sent to MCU + + Returns: + an integer, 0 if transaction is successful + , -N for failure + byte array, cmd_rsp_body containing command response + """ + + start_ts = datetime.utcnow() + ts = datetime.utcnow() + curr_offset = None + cmd_rsp_body = None + ret_val = 0 + + if self.platform_chassis is not None: + + debug_print("Trying for the lock") + with self.lock.acquire_timeout(self.PORT_LOCK_TIMEOUT) as result: + if result: + ts = self.log_timestamp(ts, "lock acquired") + # read cable command and status offsets + curr_offset = self.QSFP_BRCM_CABLE_CMD + result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 2) + if result is None: + self.log(self.LOG_ERROR, "read eeprom failed") + return self.EEPROM_ERROR, None + + cmd_req = result[0] + cmd_sts = result[1] + ts = self.log_timestamp(ts, "read cmd/sts done") + + # if command request and status both are 1, + # write 0 to cmd req and + # wait for status to go 0 + if ((cmd_req & 0x01) == 1) and ((cmd_sts & 0x01) == 1): + cmd_req = 0 + curr_offset = self.QSFP_BRCM_CABLE_CMD + buffer1 = bytearray([cmd_req]) + result = self.platform_chassis.get_sfp(self.port).write_eeprom(curr_offset, 1, buffer1) + if result is False: + return self.ERROR_WR_EEPROM_FAILED, None + + # poll command status for 100ms + start = time.monotonic_ns() + ms_elapsed = 0 + while (ms_elapsed < 100): + sta = 0 + curr_offset = self.QSFP_BRCM_CABLE_CTRL_CMD_STS + result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + sta = result[0] + + if (sta & 0x01) == 0x0: + break + ms_elapsed = (time.monotonic_ns()//1000000) - (start//1000000) + else: + self.log(self.LOG_ERROR, "CMD_REQ/STS both are stuck at 1") + return self.ERROR_CMD_STS_CHECK_FAILED, None + ts = self.log_timestamp(ts, "resetting cmd to 0 done (error logic)") + + # check if any command is currently being executed + if ((cmd_req & 0x01) == 0) and ((cmd_sts & 0x01) == 0): + # + # Combine the write of the cable command header + # - write the request parameter len + # - write the response parameter len + # - write the BH lane mask (Client) + # - write the LW lane mask (Line) + # - write the core ip value + # + + # skip sending cmd_hdr for SET_HMUX_CONTEXT_PRI and SET_HMUX_CONTEXT_SEC + if (command_id != self.CABLE_CMD_ID_SET_HMUX_CONTEXT_PRI and \ + command_id != self.CABLE_CMD_ID_SET_HMUX_CONTEXT_SEC): + curr_offset = self.QSFP_VEN_FE_130_BRCM_DATA_LENGHT_LSB + result = self.platform_chassis.get_sfp(self.port).write_eeprom(curr_offset, 5, cmd_hdr) + if result is False: + self.log(self.LOG_ERROR, "write_eeprom() failed") + return self.ERROR_WRITE_EEPROM_FAILED, None + ts = self.log_timestamp(ts, "writing of cmd_hdr 5 bytes done") + + # write request data + wr_len = cmd_hdr[0] + if wr_len > 0: + curr_offset = self.CMD_REQ_PARAM_START_OFFSET + result = self.platform_chassis.get_sfp(self.port).write_eeprom(curr_offset, wr_len, cmd_req_body) + if result is False: + return self.ERROR_WR_EEPROM_FAILED, None + ts = self.log_timestamp(ts, "write request data done - bytes {}".format(wr_len)) + + # write the command request byte now + cmd_req = 1 + cmd_req = (cmd_req | (command_id << 1)) + curr_offset = self.QSFP_BRCM_CABLE_CMD + buffer1 = bytearray([cmd_req]) + result = self.platform_chassis.get_sfp(self.port).write_eeprom(curr_offset, 1, buffer1) + if result is False: + return self.ERROR_WR_EEPROM_FAILED, None + rd = False + ts = self.log_timestamp(ts, "write command request to 1 done") + + error = 0 + start = time.monotonic_ns() + ms_elapsed = 0 + while (ms_elapsed < 500): + sta = 0 + curr_offset = self.QSFP_BRCM_CABLE_CTRL_CMD_STS + result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + sta = result[0] + + if (sta & 0x7F) == 0x11: + rd = True + break + + if (sta & 0x7F) == 0x31: + #rd = True + error = 1 + self.log(self.LOG_ERROR, "ERROR: NIC command failed") + break + + ms_elapsed = (time.monotonic_ns()//1000000) - (start//1000000) + else: + self.log(self.LOG_ERROR, "CMD_STS never read as 0x11 or 0x31") + ret_val = self.ERROR_CMD_PROCESSING_FAILED + ts = self.log_timestamp(ts, "polling for status done") + + # read response data + if rd is True: + rd_len = cmd_hdr[1] + if rd_len > 0: + curr_offset = self.CMD_RSP_PARAM_START_OFFSET + cmd_rsp_body = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, rd_len) + ts = self.log_timestamp(ts, "read cmd response bytes {} done".format(rd_len)) + + # set the command request to idle state + cmd_req = 0 + curr_offset = self.QSFP_BRCM_CABLE_CMD + buffer1 = bytearray([cmd_req]) + result = self.platform_chassis.get_sfp(self.port).write_eeprom(curr_offset, 1, buffer1) + if result is False: + self.log(self.LOG_ERROR, "write eeprom failed for CMD_req") + return self.ERROR_WRITE_EEPROM_FAILED, None + ts = self.log_timestamp(ts, "write command request to 0 done") + + # wait for MCU response to be pulled down + start = time.monotonic_ns() + ms_elapsed = 0 + while (ms_elapsed < 2000): + sta = 0 + curr_offset = self.QSFP_BRCM_CABLE_CTRL_CMD_STS + result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + sta = result[0] + + if (sta & 0x01) == 0x0: + break + ms_elapsed = (time.monotonic_ns()//1000000) - (start//1000000) + else: + ret_val = self.ERROR_MCU_NOT_RELEASED + self.log_timestamp(ts, "poll for MCU response to be puled down - done") + + if error: + return -1, None + + else: + ret_val = self.ERROR_MCU_BUSY + else: + self.log(self.LOG_ERROR, "Port lock timed-out!") + return self.ERROR_PORT_LOCK_TIMEOUT, None + + else: + self.log(self.LOG_ERROR, "platform_chassis is not loaded, failed to check if link is Active on TOR B side") + return self.ERROR_PLATFORM_NOT_LOADED, None + + self.log_timestamp(start_ts, "__cable_cmd_execute() completed") + return ret_val, cmd_rsp_body + + def __validate_read_data(self, result, size, message): + ''' + This API specifically used to validate the register read value + ''' + + if result is not None: + if isinstance(result, bytearray): + if len(result) != size: + LOG_MESSAGE_TEMPLATE = "Error: for checking mux_cable {}, eeprom read returned a size {} not equal to {} for port {}" + self.log(self.LOG_ERROR, LOG_MESSAGE_TEMPLATE.format(message, len(result), size, self.port)) + return self.EEPROM_READ_DATA_INVALID + else: + LOG_MESSAGE_TEMPLATE = "Error: for checking mux_cable {}, eeprom read returned an instance value of type {} which is not a bytearray for port {}" + self.log(self.LOG_ERROR, LOG_MESSAGE_TEMPLATE.format(message, type(result), self.port)) + return self.EEPROM_READ_DATA_INVALID + else: + LOG_MESSAGE_TEMPLATE = "Error: for checking mux_cable {}, eeprom read returned a None value for port {} which is not expected" + self.log(self.LOG_ERROR, LOG_MESSAGE_TEMPLATE.format(message, self.port)) + return self.EEPROM_READ_DATA_INVALID + + +############################################################################## +# +### Public APIs +# +############################################################################## + def get_api_version(self): + """ + Returns Broadcom y_cable api version + """ + return self.BCM_API_VERSION + + def get_part_number(self): + """ + This API specifically returns the part number of the Y cable for a specfic port. + + Args: + None + + Returns: + a string, with part number + """ + + if self.sfp is not None: + curr_offset = self.QSFP28_UP0_168_PN_1 + part_result = self.sfp.read_eeprom(curr_offset, 15) + else: + self.log(self.LOG_ERROR, "platform_chassis is not loaded, failed to get vendor name and pn_number") + return self.ERROR_PLATFORM_NOT_LOADED + + if self.__validate_read_data(part_result, 15, "get part_number") == self.EEPROM_READ_DATA_INVALID: + return self.EEPROM_ERROR + + part_number = str(part_result.decode()) + self.log(self.LOG_DEBUG, "Part number = {}".format(part_number)) + + return part_number + + def get_vendor(self): + """ + This API returns the vendor name of the Y cable for a specfic port. + The port on which this API is called for can be referred using self.port. + + Args: + None + Returns: + a string, with vendor name + """ + + + if self.platform_chassis is not None: + curr_offset = self.QSFP28_UP0_148_VENDOR_NAME_0 + vendor_result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 15) + else: + self.log(self.LOG_ERROR, "platform_chassis is not loaded, failed to get vendor name ") + return self.ERROR_PLATFORM_NOT_LOADED + + if self.__validate_read_data(vendor_result, 15, "Vendor_name") == -1: + return self.EEPROM_ERROR + + vendor_name = str(vendor_result.decode()) + self.log(self.LOG_DEBUG, "vendor name = {}".format(vendor_name)) + + return vendor_name + + def get_read_side(self): + """ + This API checks which side of the Y cable the reads are actually getting performed + from, either TOR A or TOR B or NIC and returns the value. + The port on which this API is called for can be referred using self.port. + + Args: + None + + Returns: + One of the following predefined constants: + TARGET_TOR_A, if reading the Y cable from TOR A side. + TARGET_TOR_B, if reading the Y cable from TOR B side. + TARGET_NIC, if reading the Y cable from NIC side. + TARGET_UNKNOWN, if reading the Y cable API fails. + """ + + start_ts = datetime.utcnow() + ts = self.log_timestamp(start_ts," get_read_side() start") + + if self.platform_chassis is not None: + curr_offset = self.QSFP28_UP0_224_SPECIFIC_1_RSV + result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + else: + self.log(self.LOG_ERROR, "platform_chassis is not loaded, failed to check read side") + return self.ERROR_PLATFORM_NOT_LOADED + + if self.__validate_read_data(result, 1, "read side") == self.EEPROM_READ_DATA_INVALID: + return self.EEPROM_ERROR + read_side = struct.unpack(" NIC, + TARGET_TOR_A -> TORA, + TARGET_TOR_B -> TORB + Returns: + a boolean, True if the link is active + , False if the link is not active + """ + + if self.platform_chassis is not None: + curr_offset = self.QSFP28_VENFD_216_LINK_STATUS + result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + else: + self.log(self.LOG_ERROR, "platform_chassis is not loaded, failed to check link is active for TOR A side") + return self.ERROR_PLATFORM_NOT_LOADED + + if self.__validate_read_data(result, 1, "link is active for TOR A side") == self.EEPROM_READ_DATA_INVALID: + return self.EEPROM_ERROR + + regval_read = struct.unpack(" manual switch count + SWITCH_COUNT_AUTO -> automatic switch count + clear_on_read: + a boolean, True if the count has to be reset after read to zero + , False if the count is not to be reset after read + + Returns: + an integer, the number of times the Y-cable has been switched + """ + count_value = None + count_list = self.util_get_switch_count(clear_on_read) + if count_list is None: + self.log(self.LOG_ERROR, "Get switch count is failed ") + return None + else: + to_tora_from_tora_manual_cnt = count_list[0] + to_torb_from_tora_manual_cnt = count_list[1] + to_tora_from_torb_manual_cnt = count_list[2] + to_torb_from_torb_manual_cnt = count_list[3] + to_tora_as_cnt = count_list[4] + to_torb_as_cnt = count_list[5] + + if (switch_count_type == self.SWITCH_COUNT_MANUAL): + count_value = to_tora_from_tora_manual_cnt + to_torb_from_tora_manual_cnt + \ + to_tora_from_torb_manual_cnt + to_torb_from_torb_manual_cnt + self.log(self.LOG_INFO, "Total manual count is : {}".format(count_value)) + elif (switch_count_type == self.SWITCH_COUNT_AUTO): + count_value = to_tora_as_cnt + to_torb_as_cnt + self.log(self.LOG_INFO, "Total auto count is : {}".format(count_value)) + + return count_value + + + def get_switch_count_tor_a(self, clear_on_read=False): + """ + This API returns the switch count to change the Active TOR which has + been done manually by the user initiated from ToR A + This is essentially all the successful switches initiated from ToR A. Toggles which were + initiated to toggle from ToR A and did not succed do not count. + The port on which this API is called for can be referred using self.port. + + Args: + clear_on_read: + a boolean, True if the count has to be reset after read to zero + , False if the count is not to be reset after read + + Returns: + an integer, the number of times the Y-cable has been switched from ToR A + """ + + count_value = None + count_list = self.util_get_switch_count(clear_on_read) + if count_list is None: + self.log(self.LOG_ERROR, "Get switch count is failed ") + else: + to_tora_from_tora_manual_cnt = count_list[0] + to_torb_from_tora_manual_cnt = count_list[1] + count_value = to_tora_from_tora_manual_cnt + to_torb_from_tora_manual_cnt + + return count_value + + + def get_switch_count_tor_b(self, clear_on_read=False): + """ + This API returns the switch count to change the Active TOR which has + been done manually by the user initiated from ToR B + This is essentially all the successful switches initiated from ToR B. Toggles which were + initiated to toggle from ToR B and did not succed do not count. + The port on which this API is called for can be referred using self.port. + + Args: + clear_on_read: + a boolean, True if the count has to be reset after read to zero + , False if the count is not to be reset after read + + Returns: + an integer, the number of times the Y-cable has been switched from ToR B + """ + + count_value = None + count_list = self.util_get_switch_count(clear_on_read) + if count_list is None: + self.log(self.LOG_ERROR, "Get switch count is failed ") + else: + to_tora_from_torb_manual_cnt = count_list[2] + to_torb_from_torb_manual_cnt = count_list[3] + + count_value = to_tora_from_torb_manual_cnt + to_torb_from_torb_manual_cnt + + return count_value + + + def get_switch_count_target(self, switch_count_type, target, clear_on_read=False): + """ + This API returns the total number of times the Active TOR has + been done manually/automaticlly toggled towards a target. + For example, TARGET_TOR_A as target would imply + how many times the mux has been toggled towards TOR A. + The port on which this API is called for can be referred using self.port. + + Args: + switch_count_type: + One of the following predefined constants, for getting the count type: + SWITCH_COUNT_MANUAL -> manual switch count + SWITCH_COUNT_AUTO -> automatic switch count + target: + One of the following predefined constants, the actual target to check the link on: + TARGET_TOR_A -> TORA, + TARGET_TOR_B -> TORB + clear_on_read: + a boolean, True if the count has to be reset after read to zero + , False if the count is not to be reset after read + Returns: + an integer, the number of times manually the Y-cable has been switched + """ + count_value = None + if target != self.TARGET_TOR_A and target != self.TARGET_TOR_B: + self.log(self.LOG_ERROR, "Invalid target") + return None + + count_list = self.util_get_switch_count(clear_on_read) + + if count_list is None: + self.log(self.LOG_ERROR, "Get switch count is failed ") + else: + to_tora_from_tora_manual_cnt = count_list[0] + to_torb_from_tora_manual_cnt = count_list[1] + to_tora_from_torb_manual_cnt = count_list[2] + to_torb_from_torb_manual_cnt = count_list[3] + to_tora_as_cnt = count_list[4] + to_torb_as_cnt = count_list[5] + + if target == self.TARGET_TOR_A: + if (switch_count_type == self.SWITCH_COUNT_MANUAL): + count_value = to_tora_from_tora_manual_cnt + to_tora_from_torb_manual_cnt + elif (switch_count_type == self.SWITCH_COUNT_AUTO): + count_value = to_tora_as_cnt + else: + count_value = None + + if target == self.TARGET_TOR_B: + if (switch_count_type == self.SWITCH_COUNT_MANUAL): + count_value = to_torb_from_tora_manual_cnt + to_torb_from_torb_manual_cnt + elif (switch_count_type == self.SWITCH_COUNT_AUTO): + count_value = to_torb_as_cnt + else: + count_value = None + + return count_value + + + def __util_read_eeprom(self, curr_offset, rd_len, message): + """ + This API is internally used for read and validate + """ + result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, rd_len) + + if self.__validate_read_data(result, rd_len, message) == self.EEPROM_READ_DATA_INVALID: + return self.EEPROM_ERROR + + return result + + def __handle_error_abort(self, upgrade_info, error): + """ + Internal API used to abort in case of error in FW related functions + """ + self.log(self.LOG_ERROR, "ERROR : {}, Sending abort".format(error)) + self.__cable_fw_mcu_abort(upgrade_info) + time.sleep(0.001) + + def __handle_error(self, error): + """ + Internal API to handle error in FW related APIs + """ + dat = bytearray(30) + status = 0 + + self.log(self.LOG_ERROR, "ERROR : {} FAILED".format(error)) + + if self.platform_chassis is not None: + + with self.lock.acquire_timeout(self.PORT_LOCK_TIMEOUT) as result: + if result: + # set the command request to idle state + dat[0] = 0x00 + curr_offset = (self.QSFP_BRCM_FW_UPGRADE_PAGE * 128) + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD + result = self.platform_chassis.get_sfp(self.port).write_eeprom(curr_offset, 1, dat) + if result is False: + return self.ERROR_WRITE_EEPROM_FAILED + + # wait for mcu response to be pulled down + for _ in range(30): + curr_offset = (self.QSFP_BRCM_FW_UPGRADE_PAGE * 128) + self.QSFP_BRCM_FW_UPGRADE_CMD_STS + status = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + if status is None: + self.log(self.LOG_ERROR, "__handle_error read eeprom failed") + return self.EEPROM_ERROR + + if (status[0] & 0x01) == 0: + return + time.sleep(0.001) + else: + self.log(self.LOG_ERROR, "Port lock timed-out!") + return self.ERROR_PORT_LOCK_TIMEOUT + + else: + self.log(self.LOG_ERROR, "platform_chassis is not loaded, failed to handle_error") + return self.ERROR_PLATFORM_NOT_LOADED + + + def __cable_fw_mcu_abort(self, upgrade_info): + """ + Internal API used to abort the execution of FW related function in case of error + """ + ret_val = self.RR_ERROR + dat = bytearray(30) + status = 0 + req_status = False + + # SEE which MCU it is: Assuming constant pages have been set for each MCU + curr_offset = (0 * 128) + 0xE0 + result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + if result is None: + self.log(self.LOG_ERROR, "__cable_fw_mcu_abort read eeprom failed") + return self.EEPROM_ERROR + dat[0] = result[0] + + if dat[0] == 0x02: + self.log(self.LOG_INFO, "Current side: TOR B") + elif dat[0] == 0x01: + self.log(self.LOG_INFO, "Current side TOR A") + elif dat[0] == 0x04: + self.log(self.LOG_INFO, "Current side NIC") + else: + self.log(self.LOG_ERROR, "Current side UNKNOWN") + + # Make sure TOR to NIC MCU communication is alive + self.log(self.LOG_DEBUG, "Make sure TOR to NIC MCU communication is alive ") + if (upgrade_info.destination == self.NIC_MCU) and ((dat[0] == 0x02) or (dat[0] == 0x01)): + # Since we are running from TOR side, make sure no flush is on going + for _ in range(3000): + curr_offset = (self.QSFP_BRCM_DIAGNOSTIC_PAGE * 128) + self.QSFP_BRCM_DIAGNOSTIC_STATUS + status = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + if status is None: + self.log(self.LOG_ERROR, "__cable_fw_mcu_abort read eeprom failed") + return self.EEPROM_ERROR + + if status[0] == 0: + break + time.sleep(0.001) + if status[0]: + self.log(self.LOG_ERROR, "Unable to communicate with NIC MCU") + return self.ERROR_RW_NIC_FAILED + + with self.lock.acquire_timeout(self.PORT_LOCK_TIMEOUT) as result: + if result: + # Make sure to clear command first else can have unforseen consequences + curr_offset = (self.QSFP_BRCM_FW_UPGRADE_PAGE*128) + dat[0] = 0x00 + self.platform_chassis.get_sfp(self.port).write_eeprom(curr_offset + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD, 1, dat) + + # Send destination + dat[0] = upgrade_info.destination + self.platform_chassis.get_sfp(self.port).write_eeprom(curr_offset + self.QSFP_BRCM_FW_UPGRADE_HEADER_24_31, 1, dat) + + # Send Abort request + dat[0] = (self.FW_CMD_ABORT << 1) | 1 + self.platform_chassis.get_sfp(self.port).write_eeprom(curr_offset + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD, 1, dat) + + # Check response status + for _ in range(100): + status = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset + self.QSFP_BRCM_FW_UPGRADE_CMD_STS, 1) + + if (status[0] & 0x01) == 0: + req_status = True + ret_val = self.RR_SUCCESS + + # Set the command request to idle state + dat[0] = 0x00 + self.platform_chassis.get_sfp(self.port).write_eeprom(curr_offset + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD, 1, dat) + break + time.sleep(0.001) + else: + self.log(self.LOG_ERROR, "Port lock timed-out!") + return self.ERROR_PORT_LOCK_TIMEOUT + + if not req_status: + self.log(self.LOG_ERROR, "Abort timeout. No response from MCU") + self.__handle_error(17) + #return ret_val + return self.ERROR_CMD_TIMEOUT + + return ret_val + + def cable_fw_get_status(self, upgrade_info): + """ + This function used internally to get the status information of existing firmware. + The status information has the following details, + 1. current bank + 2. bank1 firmware image version minor + 3. bank1 firmware image version major + 4. bank1 API image version minor + 5. bank1 API image version major + 6. bank1 image crc32 + 7. bank2 firmware image version minor + 8. bank2 firmware image version major + 9. bank2 API image version minor + 10. bank2 API image version major + 11. bank2 image crc32 + + upgrade_info: + an object of type cable_upgrade_type_s, must have upgrade_info.destination + set to MUX_CHIP or NIC_MCU or TOR_MCU + + Returns: + RR_SUCCESS : Success + RR_ERROR : Failed + ERROR_RW_NIC_FAILED : Unable to communicate with nic MCU + EEPROM_READ_DATA_INVALID : Read invalid data + RR_ERROR_SYSTEM_UNAVAILABLE : System unavaiable + ERROR_PLATFORM_NOT_LOADED : Platform not loaded + ERROR_PORT_LOCK_TIMEOUT : Port lock timeout + + """ + ret_val = self.RR_ERROR + dat = bytearray(30) + status = 0 + info_stat = 0 + req_status = False + + if self.platform_chassis is not None: + # SEE which MCU it is: Assuming constant pages have been set for each MCU + curr_offset = (0*128) + 0xE0 + result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + if result is None: + self.log(self.LOG_ERROR, "cable_fw_get_status read eeprom failed") + return self.EEPROM_ERROR + + dat[0] = result[0] + + if dat[0] == 0x02: + self.log(self.LOG_INFO, "Current side: TOR B") + elif dat[0] == 0x01: + self.log(self.LOG_INFO, "Current side TOR A") + elif dat[0] == 0x04: + self.log(self.LOG_INFO, "Current side NIC") + else: + self.log(self.LOG_INFO, "Current side UNKNOWN") + + # Make sure TOR to NIC MCU communication is alive + self.log(self.LOG_DEBUG, "cable_fw_get_status : .................................................... ") + self.log(self.LOG_DEBUG, "Make sure TOR to NIC MCU communication is alive ") + if (upgrade_info.destination == self.NIC_MCU) and ((dat[0] == 0x02) or (dat[0] == 0x01)): + + for _ in range(3000): + curr_offset = (self.QSFP_BRCM_DIAGNOSTIC_PAGE * 128) + self.QSFP_BRCM_DIAGNOSTIC_STATUS + status = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + if status[0] == 0: + break + time.sleep(0.001) + + if status[0]: + self.log(self.LOG_ERROR, "Unable to communicate with NIC MCU") + #return self.RR_ERROR + return self.ERROR_RW_NIC_FAILED + + # read cable command and status offsets + self.log(self.LOG_DEBUG, "read cable command and status offsets ") + result = self.__util_read_eeprom(((self.QSFP_BRCM_FW_UPGRADE_PAGE * 128) + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD), 2, "cable_fw_get_status") + if result != self.EEPROM_READ_DATA_INVALID: + dat[0] = result[0] + dat[1] = result[1] + + if ((dat[0] & 0x01) != 0) or ((dat[1] & 0x01) != 0): + self.log(self.LOG_DEBUG, "MCU not in the right state. Sending abort") + self.__cable_fw_mcu_abort(upgrade_info) + time.sleep(0.001) + result = self.__util_read_eeprom(((self.QSFP_BRCM_FW_UPGRADE_PAGE * 128) + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD), 2, "cable_fw_upgrade") + dat[0] = result[0] + dat[1] = result[1] + + with self.lock.acquire_timeout(self.PORT_LOCK_TIMEOUT) as result: + if result: + # check if any command is currently being executed + self.log(self.LOG_DEBUG, "check if any command is currently being executed ") + if ((dat[0] & 0x01) == 0) and ((dat[1] & 0x01) == 0): + # Send destination + self.log(self.LOG_DEBUG, "send destination ") + dat[0] = upgrade_info.destination + current_offset = (self.QSFP_BRCM_FW_UPGRADE_PAGE*128) + self.QSFP_BRCM_FW_UPGRADE_HEADER_24_31 + result = self.platform_chassis.get_sfp(self.port).write_eeprom(current_offset, 1, dat) + if result is False: + return self.ERROR_WRITE_EEPROM_FAILED + + # Send command status request + self.log(self.LOG_DEBUG, "send command status request ") + dat[0] = (self.FW_CMD_INFO << 1) | 1 + current_offset = (self.QSFP_BRCM_FW_UPGRADE_PAGE*128) + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD + result = self.platform_chassis.get_sfp(self.port).write_eeprom(current_offset, 1, dat) + if result is False: + return self.ERROR_WRITE_EEPROM_FAILED + + #Delay reading status as this can block during swap + #time.sleep(0.2) + req_status = False + for _ in range(0, 100): + status = self.__util_read_eeprom(((self.QSFP_BRCM_FW_UPGRADE_PAGE * 128) + self.QSFP_BRCM_FW_UPGRADE_CMD_STS), 1, "cable_fw_get_status") + if status[0] & 0x01: + if ((status[0] & 0xFC) == (self.FW_UP_SUCCESS << 2)) or ((status[0] & 0xFC) == (self.FW_UP_IN_PROGRESS << 2)): + + # SUCCESS, read the status info + dat = self.__util_read_eeprom(((self.QSFP_BRCM_FW_UPGRADE_DATA_PAGE_1 * 128) + self.QSFP_BRCM_FW_UPGRADE_CURRENT_BANK), 26, "cable_fw_get_status") + + # Current bank + upgrade_info.status_info.current_bank = dat[0] + upgrade_info.status_info.next_bank = dat[25] + + # Bank 1 minor fw version + upgrade_info.status_info.bank1_info.image_fw_version.image_version_minor = (dat[2] << 8) | dat[1] + + # Bank 1 major fw version + upgrade_info.status_info.bank1_info.image_fw_version.image_version_major = (dat[4] << 8) | dat[3] + + # Bank 1 minor API version + upgrade_info.status_info.bank1_info.image_api_version.image_version_minor = (dat[6] << 8) | dat[5] + + # Bank 1 major API version + upgrade_info.status_info.bank1_info.image_api_version.image_version_major = (dat[8] << 8) | dat[7] + + # Bank 1 CRC32 + upgrade_info.status_info.bank1_info.image_crc32 = (dat[12] << 24) | (dat[11] << 16) | (dat[10] << 8) | dat[9] + # Bank 2 minor fw version + upgrade_info.status_info.bank2_info.image_fw_version.image_version_minor = (dat[14] << 8) | dat[13] + + # Bank 2 major fw version + upgrade_info.status_info.bank2_info.image_fw_version.image_version_major = (dat[16] << 8) | dat[15] + + # Bank 2 minor API version + upgrade_info.status_info.bank2_info.image_api_version.image_version_minor = (dat[18] << 8) | dat[17] + + # Bank 2 major API version + upgrade_info.status_info.bank2_info.image_api_version.image_version_major = (dat[20] << 8) | dat[19] + + # Bank2 CRC32 + upgrade_info.status_info.bank2_info.image_crc32 = (dat[24] << 24) | (dat[23] << 16) | (dat[22] << 8) | dat[21] + + req_status = True + + if (status[0] & 0xFC) == (self.FW_UP_IN_PROGRESS << 2): + info_stat = 1 + + break + else: + self.__handle_error_abort(upgrade_info, 1) + return ret_val + time.sleep(0.01) + + if req_status: + req_status = False + # set the command request to idle state + self.log(self.LOG_DEBUG, "set the command request to idle state ") + dat[0] = 0x00 + curr_offset = (self.QSFP_BRCM_FW_UPGRADE_PAGE*128) + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD + result = self.platform_chassis.get_sfp(self.port).write_eeprom(curr_offset, 1, dat) + if result is False: + return self.ERROR_WRITE_EEPROM_FAILED + + # Delay reading status as this can block during swap + #time.sleep(0.3) + + # wait for mcu response to be pulled down + self.log(self.LOG_DEBUG, "wait for mcu response to be pulled down ") + for _ in range(100): + status = self.platform_chassis.get_sfp(self.port).read_eeprom(((self.QSFP_BRCM_FW_UPGRADE_PAGE*128) + self.QSFP_BRCM_FW_UPGRADE_CMD_STS), 1) + if (status[0] & 0x01) == 0: + req_status = True + ret_val = self.RR_SUCCESS + break + + time.sleep(0.001) + + if not req_status: + # Timeout, how to handle? + self.log(self.LOG_DEBUG, "timeout handle error abort ") + self.__handle_error_abort(upgrade_info, 2) + #return ret_val + return self.ERROR_CMD_TIMEOUT + else: + # Error + self.log(self.LOG_DEBUG, "Error handle error abort ") + self.__handle_error_abort(upgrade_info, 17) + return ret_val + else: + self.log(self.LOG_ERROR, "MCU not in the right state") + + if info_stat: + ret_val = self.RR_ERROR_SYSTEM_UNAVAILABLE + return ret_val + + else: + self.log(self.LOG_ERROR, "Port lock timed-out!") + return self.ERROR_PORT_LOCK_TIMEOUT + + else: + self.log(self.LOG_ERROR, "platform_chassis is not loaded, failed to read " + "fw_get_status") + return self.ERROR_PLATFORM_NOT_LOADED + + return ret_val + + def cable_fw_bank_toggle(self, upgrade_info): + ''' + This API is internally used by activate firmware + ''' + ctrl_result = bytearray(30) + ret_val = self.RR_ERROR + status = 0 + info_stat = 0 + req_status = False + + if self.platform_chassis is not None: + # SEE which MCU it is: Assuming constant pages have been set for each MCU + curr_offset = (0 * 128) + 0xE0 + result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + if result is None: + self.log(self.LOG_ERROR, "cable_fw_get_status read eeprom failed") + return self.EEPROM_ERROR + + ctrl_result[0] = result[0] + + if ctrl_result[0] == 0x02: + self.log(self.LOG_INFO, "Current side: TOR B") + elif ctrl_result[0] == 0x01: + self.log(self.LOG_INFO, "Current side TOR A") + elif ctrl_result[0] == 0x04: + self.log(self.LOG_INFO, "Current side NIC") + else: + self.log(self.LOG_INFO, "Current side UNKNOWN") + + # Make sure TOR to NIC MCU communication is alive + self.log(self.LOG_DEBUG, "Make sure TOR to NIC MCU communication is alive ") + if (upgrade_info.destination == self.NIC_MCU) and ((ctrl_result[0] == 0x02) or (ctrl_result[0] == 0x01)): + # Since we are running from TOR side, make sure no flush is on going + for _ in range(3000): + curr_offset = (self.QSFP_BRCM_DIAGNOSTIC_PAGE * 128) + self.QSFP_BRCM_DIAGNOSTIC_STATUS + status = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + if status[0] == 0: + break + time.sleep(0.001) + + if status[0]: + self.log(self.LOG_ERROR, "Unable to communicate with NIC MCU") + #return self.RR_ERROR + return self.ERROR_RW_NIC_FAILED + + with self.lock.acquire_timeout(self.PORT_LOCK_TIMEOUT) as result: + if result: + # read cable command and status offsets + curr_offset = ((self.QSFP_BRCM_FW_UPGRADE_PAGE*128) + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD) + ctrl_result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 2) + + if ((ctrl_result[0] & 0x01) != 0) or ((ctrl_result[1] & 0x01) != 0): + self.log(self.LOG_ERROR, "MCU not in the right state. Sending abort") + ret_val = self.__cable_fw_mcu_abort(upgrade_info) + if ret_val != self.RR_SUCCESS: + return ret_val + time.sleep(0.001) + ctrl_result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 2) + + + if ((ctrl_result[0] & 0x01) == 0) and ((ctrl_result[1] & 0x01) == 0): + + # send destinationn + ctrl_result[0] = upgrade_info.destination + curr_offset = ((self.QSFP_BRCM_FW_UPGRADE_PAGE*128) + self.QSFP_BRCM_FW_UPGRADE_HEADER_24_31) + self.platform_chassis.get_sfp(self.port).write_eeprom(curr_offset, 1, ctrl_result) + + # send swap request + ctrl_result[0] = (self.FW_CMD_SWAP << 1) | 1 + curr_offset = ((self.QSFP_BRCM_FW_UPGRADE_PAGE*128) + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD) + self.platform_chassis.get_sfp(self.port).write_eeprom(curr_offset, 1, ctrl_result) + + # Delay reading status as this can block during swap. + time.sleep(0.3) + + # check response status + for _ in range(0, 100): + curr_offset = ((self.QSFP_BRCM_FW_UPGRADE_PAGE*128) + self.QSFP_BRCM_FW_UPGRADE_CTRL_STS) + status = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + if status[0] & 0x01: + if ((status[0] & 0xFC) == (self.FW_UP_SUCCESS << 2)) or ((status[0] & 0xFC) == (self.FW_UP_IN_PROGRESS << 2)): + if (status[0] & 0xFC) == (self.FW_UP_IN_PROGRESS << 2): + info_stat = 1 + req_status = True + break + else: + # ERROR? + self.__handle_error_abort(upgrade_info, 1) + return ret_val + time.sleep(0.001) + if req_status: + req_status = False + ctrl_result[0] = 0x00 + curr_offset = ((self.QSFP_BRCM_FW_UPGRADE_PAGE*128) + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD) + self.platform_chassis.get_sfp(self.port).write_eeprom(curr_offset, 1, ctrl_result) + # Delay reading status as this can block during swap. + time.sleep(0.1) + + for _ in range(0, 100): + curr_offset = ((self.QSFP_BRCM_FW_UPGRADE_PAGE*128) + self.QSFP_BRCM_FW_UPGRADE_CMD_STS) + status = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + if (status[0] & 0x01) == 0: + req_status = True + ret_val = self.RR_SUCCESS + break + time.sleep(0.001) + + if not req_status: + # Timeout, how to handle? + self.__handle_error_abort(upgrade_info, 2) + #return ret_val + return self.ERROR_CMD_TIMEOUT + else: + # ERROR + self.__handle_error_abort(upgrade_info, 17) + return ret_val + + else: + self.log(self.LOG_ERROR, "MCU not in the right state") + else: + self.log(self.LOG_ERROR, "Port lock timed-out!") + return self.ERROR_PORT_LOCK_TIMEOUT + + else: + self.log(self.LOG_ERROR, "platform_chassis is not loaded, failed to read " + "firmware_version") + return self.ERROR_PLATFORM_NOT_LOADED + + if info_stat: + ret_val = self.RR_ERROR_SYSTEM_UNAVAILABLE + + return ret_val + + def __cable_fw_upgrade(self, upgrade_info): + """ + This function used internally to upgrade the firmware of TOR, NIC and MUX + physical_port: + an Integer, the actual physical port connected to a Y cable + + upgrade_info: + an object of type cable_upgrade_info_s, The destination, versions + and image_buffer must be set + + Returns + RR_SUCCESS Success + RR_ERROR Failures + ERROR_RW_NIC_FAILED Unable to comminicate NIC MCU + ERROR_INVALID_TARGET Wrong destination + ERROR_CMD_TIMEOUT Command Time out + """ + ret_val = self.RR_ERROR + dat = bytearray(128) + status = 0 + req_status = False + i = 0 + tmp_cnt = 0 + tmp_print = 0 + start_tstamp = datetime.utcnow() + + QSFP_PAGE_OFFSET = self.QSFP_BRCM_FW_UPGRADE_PAGE * 128 + + if self.platform_chassis is None: + self.log(self.LOG_ERROR, "platform_chassis is not loaded, failed to read - fw_get_status") + return self.ERROR_PLATFORM_NOT_LOADED + if upgrade_info.image_info.image_size == 0: + return self.RR_ERROR + + # SEE which MCU it is: Assuming constant pages have been set for each MCU + curr_offset = (0*128) + 0xE0 + result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + if result is None: + self.log(self.LOG_ERROR, "__cable_fw_upgrade read eeprom failed") + return self.EEPROM_ERROR + dat[0] = result[0] + + if dat[0] == 0x02: + self.log(self.LOG_INFO, "Current side: TOR B") + elif dat[0] == 0x01: + self.log(self.LOG_INFO, "Current side TOR A") + elif dat[0] == 0x04: + self.log(self.LOG_INFO, "Current side NIC") + else: + self.log(self.LOG_INFO, "Current side UNKNOWN") + + # Make sure TPR to NIC MCU communication is alive + self.log(self.LOG_DEBUG, "In cable_fw_upgrade().........................................") + ts = datetime.utcnow() + self.log(self.LOG_DEBUG, "Make sure TOR to NIC MCU communication is alive") + #if (upgrade_info.destination == NIC_MCU): + if (upgrade_info.destination == self.NIC_MCU) and ( (dat[0] == 0x02) or (dat[0] == 0x01) ): + + for i in range(3000): + curr_offset = (self.QSFP_BRCM_DIAGNOSTIC_PAGE * 128) + self.QSFP_BRCM_DIAGNOSTIC_STATUS + status = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + if status[0] == 0: + break + time.sleep(0.001) + + if status[0]: + self.log(self.LOG_ERROR, "Unable to communicate with NIC MCU") + #return self.RR_ERROR + return self.ERROR_RW_NIC_FAILED + ts = self.log_timestamp(ts,"TOR to NIC MCU communication is alive") + + + with self.lock.acquire_timeout(self.PORT_LOCK_TIMEOUT) as result: + if result: + # read cable command and status offsets + self.log(self.LOG_DEBUG, "read cable command and status offsets") + result = self.__util_read_eeprom((QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD), 2, "cable_fw_upgrade") + if result != self.EEPROM_READ_DATA_INVALID: + dat[0] = result[0] + dat[1] = result[1] + if ((dat[0] & 0x01) != 0) or ((dat[1] & 0x01) != 0): + self.log(self.LOG_DEBUG, "MCU not in the right state. Sending abort") + ret_val = self.__cable_fw_mcu_abort(upgrade_info) + if ret_val != self.RR_SUCCESS: + self.log(self.LOG_ERROR, "MCU abort failed") + return ret_val + + time.sleep(0.001) + result = self.__util_read_eeprom((QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD), 2, "cable_fw_upgrade") + dat[0] = result[0] + dat[1] = result[1] + + # check if any command is currently being executed + self.log(self.LOG_DEBUG, "check if any command is currently being executed") + if ((dat[0] & 0x01) == 0) and ((dat[1] & 0x01) == 0): + # Start command - Debug Prints + destination = None + + if upgrade_info.destination == self.TOR_MCU_SELF: + destination = "TOR MCU SELF" + elif upgrade_info.destination == self.TOR_MCU_PEER: + destination = "TOR MCU PEER" + elif upgrade_info.destination == self.NIC_MCU: + destination = "NIC MCU" + elif upgrade_info.destination == self.MUX_CHIP: + destination = "MUX CHIP" + else: + self.log(self.LOG_ERROR, "Wrong destination") + return self.ERROR_INVALID_TARGET + + self.log(self.LOG_DEBUG, "Starting firmware upgrade to {}".format(destination)) + + self.log(self.LOG_DEBUG, "FW Version minor: {}".format(hex(upgrade_info.image_info.image_fw_version.image_version_minor))) + self.log(self.LOG_DEBUG, "FW Version major: {}".format(hex(upgrade_info.image_info.image_fw_version.image_version_major))) + if upgrade_info.destination == self.TOR_MCU_SELF or upgrade_info.destination == self.TOR_MCU_PEER: + self.log(self.LOG_DEBUG, "API version minor: {}".format(hex(upgrade_info.image_info.image_api_version.image_version_minor))) + self.log(self.LOG_DEBUG, "API version major: {}".format(hex(upgrade_info.image_info.image_api_version.image_version_major))) + self.log(self.LOG_DEBUG, "CRC32 : {}".format(hex(upgrade_info.image_info.image_crc32))) + self.log(self.LOG_DEBUG, "Image size : {}".format(hex(upgrade_info.image_info.image_size))) + + # Send image header + self.log(self.LOG_DEBUG, "send image header") + dat[0] = upgrade_info.image_info.image_size & 0xff + dat[1] = (upgrade_info.image_info.image_size >> 8) & 0xFF + dat[2] = (upgrade_info.image_info.image_size >> 16) & 0xFF + dat[3] = upgrade_info.destination + dat[4] = upgrade_info.image_info.image_fw_version.image_version_minor & 0xFF + dat[5] = (upgrade_info.image_info.image_fw_version.image_version_minor >> 8) & 0xFF + dat[6] = upgrade_info.image_info.image_fw_version.image_version_major & 0xFF + dat[7] = (upgrade_info.image_info.image_fw_version.image_version_major >> 8) & 0xFF + dat[8] = upgrade_info.image_info.image_api_version.image_version_minor & 0xFF + dat[9] = (upgrade_info.image_info.image_api_version.image_version_minor >> 8) & 0xFF + dat[10] = upgrade_info.image_info.image_api_version.image_version_major & 0xFF + dat[11] = (upgrade_info.image_info.image_api_version.image_version_major >> 8) & 0xFF + dat[12] = upgrade_info.image_info.image_crc32 & 0xFF + dat[13] = (upgrade_info.image_info.image_crc32 >> 8) & 0xFF + dat[14] = (upgrade_info.image_info.image_crc32 >> 16) & 0xFF + dat[15] = (upgrade_info.image_info.image_crc32 >> 24) & 0xFF + dat[16] = self.FW_UP_PACKET_SIZE + dat[17] = self.QSFP_BRCM_FW_UPGRADE_DATA_PAGE_1 + + self.platform_chassis.get_sfp(self.port).write_eeprom(QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_HEADER_0_7, 18, dat) + + ts = self.log_timestamp(ts,"Image header sent") + + # Send request firmware upgrad to START + self.log(self.LOG_INFO, "START ERASING") + + dat[0] = (self.FW_CMD_START << 1) | 1 + self.platform_chassis.get_sfp(self.port).write_eeprom(QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD, 1, dat) + + time.sleep(3.5) + + ts = self.log_timestamp(ts,"Erase command sent") + + # Check response status + self.log(self.LOG_DEBUG, "check MCU ready status") + for i in range(3000): + status = self.__util_read_eeprom((QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_CMD_STS), 1, "cable_fw_upgrade") + if status[0] & 0x01: + if (status[0] & 0xFC) == (self.FW_UP_SUCCESS << 2): + self.log(self.LOG_DEBUG, "MCU is Ready") + req_status = True + break + else: + if (status[0] & 0xFC) == (self.FW_UP_IN_PROGRESS << 2): + ret_val = self.RR_ERROR_SYSTEM_UNAVAILABLE + self.__handle_error_abort(upgrade_info, 1) + return ret_val + + time.sleep(0.001) + + ts = self.log_timestamp(ts,"MCU ready check done") + + # if MCU is ready + if req_status: + req_status = False + # set the command request to idle state + self.log(self.LOG_DEBUG, "set the command request to idle state ") + dat[0] = 0x00 + self.platform_chassis.get_sfp(self.port).write_eeprom(QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD, 1, dat) + + # wait for mcu response to be pulled down + self.log(self.LOG_DEBUG, "wait for mcu response to be pulled down ") + for i in range(100): + status = self.__util_read_eeprom((QSFP_PAGE_OFFSET + self. QSFP_BRCM_FW_UPGRADE_CMD_STS), 1, "cable_fw_upgrade") + if (status[0] & 0x01) == 0: + req_status = True + break + + time.sleep(0.001) + + if not req_status: + self.log(self.LOG_ERROR, "MCU state - not pulled down ") + self.__handle_error_abort(upgrade_info, 2) + return ret_val + + ts = self.log_timestamp(ts, "MCU response pulled down") + else: + self.log(self.LOG_ERROR, "ERROR MCU is not ready ") + self.__handle_error_abort(upgrade_info, 17) + return ret_val + + # if MCU response pulled down + if req_status: + # TRANSFER command + self.log(self.LOG_INFO, "FW image transfer start ") + tmp_image_ptr = upgrade_info.image_info.image_ptr + count = 0 + page_loc = self.QSFP_BRCM_FW_UPGRADE_DATA_PAGE_1 + + # MCU is now ready for firmware upgrade, Start the loop to transfre the data + self.log(self.LOG_DEBUG, "MCU is now ready for firmware upgrade, Start the loop to transfer the data") + dat[0] = self.FW_UP_PACKET_SIZE + dat[1] = page_loc + self.platform_chassis.get_sfp(self.port).write_eeprom(QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_PACKET_SIZE, 2, dat) + self.log(self.LOG_DEBUG, "fw_up_packet_size : {}".format(dat[0]),"page_loc : {}".format(dat[1])) + + for i in range(0, self.FW_UP_PACKET_SIZE, 4): + dat[0 + i] = tmp_image_ptr[count] & 0xFF + dat[1 + i] = (tmp_image_ptr[count] >> 8) & 0xFF + dat[2 + i] = (tmp_image_ptr[count] >> 16) & 0xFF + dat[3 + i] = (tmp_image_ptr[count] >> 24) & 0xFF + count += 1 + + self.platform_chassis.get_sfp(self.port).write_eeprom((page_loc*128) + self.QSFP_BRCM_FW_UPGRADE_DATA_START, self.FW_UP_PACKET_SIZE, dat) + ts = self.log_timestamp(ts, "First packet written") + + self.log(self.LOG_DEBUG, "TRANSFERING remaining packets..") + + dat[0] = (self.FW_CMD_TRANSFER << 1) | 1 + self.platform_chassis.get_sfp(self.port).write_eeprom(QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD, 1, dat) + + # prepare and send remaining packets + for _ in range(self.FW_UP_PACKET_SIZE, upgrade_info.image_info.image_size, self.FW_UP_PACKET_SIZE): + req_status = False + + # Toggle data page_loc + page_loc = self.QSFP_BRCM_FW_UPGRADE_DATA_PAGE_1 if(page_loc == self.QSFP_BRCM_FW_UPGRADE_DATA_PAGE_2) else self.QSFP_BRCM_FW_UPGRADE_DATA_PAGE_2 + + # prepare packet data + for i in range(0, self.FW_UP_PACKET_SIZE, 4): + dat[0 + i] = tmp_image_ptr[count] & 0xFF + dat[1 + i] = (tmp_image_ptr[count] >> 8) & 0xFF + dat[2 + i] = (tmp_image_ptr[count] >> 16) & 0xFF + dat[3 + i] = (tmp_image_ptr[count] >> 24) & 0xFF + count += 1 + + # write packet + self.platform_chassis.get_sfp(self.port).write_eeprom((page_loc*128) + self.QSFP_BRCM_FW_UPGRADE_DATA_START, self.FW_UP_PACKET_SIZE, dat) + + # Check response status for previous packet + for i in range(500): + status = self.__util_read_eeprom((QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_CMD_STS), 1, "cable_fw_upgrade") + if status[0] & 0x01: + if(status[0] & 0xFC) == (self.FW_UP_SUCCESS << 2): + req_status = True + tmp_cnt += self.FW_UP_PACKET_SIZE + if tmp_cnt >= (upgrade_info.image_info.image_size / 100): + tmp_cnt = 0 + tmp_print += 1 + #_logger.log_info(" {}%".format(tmp_print),CONSOLE_PRINT) + print(" {}%".format(tmp_print)) + break + else: + #ERROR + self.log(self.LOG_ERROR, "ERROR: TRANSFER error {}".format((status[0] & 0xFC) >> 2)) + self.__handle_error_abort(upgrade_info, 3) + return ret_val + + time.sleep(0.001) + + # if previous packet sent successfully + if req_status: + req_status = False + + # Set the command request to idle state + dat[0] = 0x00 + self.platform_chassis.get_sfp(self.port).write_eeprom(QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD, 1, dat) + + # wait for mcu response to be pulled down + for i in range(100): + status = self.__util_read_eeprom((QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_CMD_STS), 1, "cable_fw_upgrade") + + if(status[0] & 0x01) == 0: + # Previous packet is OK + # Set MCU write the next packet + dat[0] = self.FW_UP_PACKET_SIZE + dat[1] = page_loc + self.platform_chassis.get_sfp(self.port).write_eeprom(QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_PACKET_SIZE, 2, dat) + + dat[0] = (self.FW_CMD_TRANSFER << 1) | 1 + self.platform_chassis.get_sfp(self.port).write_eeprom(QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD, 1, dat) + req_status = True + break + time.sleep(0.001) + + if not req_status: + self.log(self.LOG_ERROR, "cable_fw_upgrade : handle error abort1") + self.__handle_error_abort(upgrade_info, 4) + return ret_val + + else: + if not req_status: + self.log(self.LOG_ERROR, "ERROR: TRANSFER timed out") + + self.__handle_error_abort(upgrade_info, 5) + return ret_val + + ts = self.log_timestamp(ts,"All packets written... Check response for last page") + # Check response status for last page + for i in range(100): + status = self.__util_read_eeprom((QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_CMD_STS), 1, "cable_fw_upgrade") + + if status[0] & 0x01: + if (status[0] & 0xFC) == (self.FW_UP_SUCCESS << 2): + req_status = True + self.log(self.LOG_DEBUG, " 100% ") + break + else: + # ERROR + self.log(self.LOG_ERROR, "ERROR: TRANSFER error{}".format((status[0] & 0xFC) >> 2)) + self.__handle_error_abort(upgrade_info, 3) + return ret_val + + time.sleep(0.001) + + ts = self.log_timestamp(ts,"Check response for last page done") + + if req_status: + req_status = False + + # Set the command request to idle state + dat[0] = 0x00 + self.platform_chassis.get_sfp(self.port).write_eeprom(QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD, 1, dat) + + # Wait for mcu response to be pulled down + for i in range(100): + status = self.__util_read_eeprom((QSFP_PAGE_OFFSET + self. QSFP_BRCM_FW_UPGRADE_CMD_STS), 1, "cable_fw_upgrade") + if (status[0] & 0x01) == 0: + # Last packet is OK + req_status = True + break + time.sleep(0.001) + if not req_status: + # Timeout, how to handle? + self.__handle_error_abort(upgrade_info, 4) + #return ret_val + return self.ERROR_CMD_TIMEOUT + + ts = self.log_timestamp(ts,"Wait for mcu response to be pulled down2") + else: + if not req_status: + self.log(self.LOG_ERROR, "ERROR: TRANSFER timed out") + self.__handle_error_abort( upgrade_info, 5) + return ret_val + + # COMPLETE command + # Send firmware upgrade complete + req_status = False + self.log(self.LOG_INFO, "TRANSFER COMPLETE") + ts = self.log_timestamp(ts,"TRANSFER complete") + + dat[0] = (self.FW_CMD_COMPLETE << 1) | 1 + self.platform_chassis.get_sfp(self.port).write_eeprom(QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD, 1, dat) + + # Check response status + for i in range(100): + status = self.__util_read_eeprom((QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_CMD_STS), 1, "cable_fw_upgrade") + + # Check response status + if status[0] & 0x01: + if (status[0] & 0xFC) == (self.FW_UP_SUCCESS << 2): + # MCU SUCCEEDED + req_status = True + break + else: + # ERROR + self.__handle_error_abort(upgrade_info, 6) + return ret_val + time.sleep(0.001) + + ts = self.log_timestamp(ts,"MCU check response state good") + if req_status: + req_status = False + # Set the command request to idle state + dat[0] = 0x00 + self.platform_chassis.get_sfp(self.port).write_eeprom(QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_CTRL_CMD, 1, dat) + + # wait for mcu response to be pulled down + for i in range(100): + status = self.__util_read_eeprom((QSFP_PAGE_OFFSET + self.QSFP_BRCM_FW_UPGRADE_CMD_STS), 1, "cable_fw_upgrade") + if (status[0] & 0x01) == 0: + # MCU is Ready + req_status = True + ret_val = self.RR_SUCCESS + break + time.sleep(0.001) + + if not req_status: + # Timeout + self.log(self.LOG_ERROR, "Timed out - MCU pull down polling") + self.__handle_error_abort(upgrade_info, 7) + #return ret_val + return self.ERROR_CMD_TIMEOUT + + self.log_timestamp(ts,"wait for mcu response to be pulled down3") + else: + # ERROR + self.log(self.LOG_ERROR, "ERROR") + self.__handle_error_abort(upgrade_info, 8) + return ret_val + else: + self.log(self.LOG_WARN, "MCU not in the right state") + else: + self.log(self.LOG_ERROR, "Port lock timed-out!") + return self.ERROR_PORT_LOCK_TIMEOUT + + self.log_timestamp(start_tstamp, "FW upgrade complete") + + return ret_val + + + + def get_firmware_version(self, target): + """ + This routine should return the active, inactive and next (committed) + firmware running on the target. Each of the version values in this context + could be a string with a major and minor number and a build value. + The port on which this API is called for can be referred using self.port. + + Args: + target: + One of the following predefined constants, the actual target to get the firmware version on: + TARGET_NIC -> NIC, + TARGET_TOR_A -> TORA, + TARGET_TOR_B -> TORB + Returns: + a Dictionary: + with version_active, version_inactive and version_next keys + and their corresponding values + """ + + dat = [1000] + dat1 = [1000] + i = 0 + result = {} + upgrade_info = cable_upgrade_info_s() + + if (target != self.TARGET_TOR_A) and (target != self.TARGET_TOR_B) and (target != self.TARGET_NIC): + return self.RR_ERROR + + if self.platform_chassis is not None: + + read_side = self.get_read_side() + if (read_side == self.TARGET_UNKNOWN): + self.log(self.LOG_ERROR, "ERROR: get_read_side Failed!") + return None + + if (target == self.TARGET_TOR_A): + target = self.TOR_MCU_SELF if (read_side == 1) else self.TOR_MCU_PEER + elif (target == self.TARGET_TOR_B): + target = self.TOR_MCU_SELF if (read_side == 2) else self.TOR_MCU_PEER + else: + target = self.NIC_MCU + + upgrade_info.destination = target + + self.log(self.LOG_DEBUG, "read_side {} target {}".format(read_side, target)) + + ret_val = self.cable_fw_get_status(upgrade_info) + if ret_val != self.RR_ERROR: + if upgrade_info.status_info.current_bank == 1: + # Active version + dat.append(format(upgrade_info.status_info.bank1_info.image_fw_version.image_version_major, 'X') + "." + format(upgrade_info.status_info.bank1_info.image_fw_version.image_version_minor, 'X')) + # Inactive version + dat.append(format(upgrade_info.status_info.bank2_info.image_fw_version.image_version_major, 'X') + "." + format(upgrade_info.status_info.bank2_info.image_fw_version.image_version_minor, 'X')) + else: + dat.append(format(upgrade_info.status_info.bank2_info.image_fw_version.image_version_major, 'X') + "." + format(upgrade_info.status_info.bank2_info.image_fw_version.image_version_minor, 'X')) + dat.append(format(upgrade_info.status_info.bank1_info.image_fw_version.image_version_major, 'X') + "." + format(upgrade_info.status_info.bank1_info.image_fw_version.image_version_minor, 'X')) + + if upgrade_info.status_info.next_bank == 1: + dat.append(format(upgrade_info.status_info.bank1_info.image_fw_version.image_version_major, 'X') + "." + format(upgrade_info.status_info.bank1_info.image_fw_version.image_version_minor, 'X')) + else: + dat.append(format(upgrade_info.status_info.bank2_info.image_fw_version.image_version_major, 'X') + "." + format(upgrade_info.status_info.bank2_info.image_fw_version.image_version_minor, 'X')) + + else: + self.log(self.LOG_ERROR, "Error getting version for {}".format("TOR MCU SELF" if target == self.TOR_MCU_SELF else "TOR MCU PEER" if target == self.TOR_MCU_PEER else "NIC MCU")) + #return self.RR_ERROR + return self.ERROR_GET_VERSION_FAILED + + if target == self.NIC_MCU: + upgrade_info.destination = self.MUX_CHIP + ret_val = self.cable_fw_get_status(upgrade_info) + if ret_val != self.RR_ERROR: + if upgrade_info.status_info.current_bank == 1: + # Active version + # Active version + dat1.append('.' + format(upgrade_info.status_info.bank1_info.image_fw_version.image_version_major, 'X') + "." + format(upgrade_info.status_info.bank1_info.image_fw_version.image_version_minor, 'X')) + dat1.append('.' + format(upgrade_info.status_info.bank2_info.image_fw_version.image_version_major, 'X') + "." + format(upgrade_info.status_info.bank2_info.image_fw_version.image_version_minor, 'X')) + # Inactive version + else: + dat1.append('.' + format(upgrade_info.status_info.bank2_info.image_fw_version.image_version_major, 'X') + "." + format(upgrade_info.status_info.bank2_info.image_fw_version.image_version_minor, 'X')) + dat1.append('.' + format(upgrade_info.status_info.bank1_info.image_fw_version.image_version_major, 'X') + "." + format(upgrade_info.status_info.bank1_info.image_fw_version.image_version_minor, 'X')) + if upgrade_info.status_info.next_bank == 1: + # Active version + dat1.append('.' + format(upgrade_info.status_info.bank1_info.image_fw_version.image_version_major, 'X') + "." + format(upgrade_info.status_info.bank1_info.image_fw_version.image_version_minor, 'X')) + else: + dat1.append('.' + format(upgrade_info.status_info.bank2_info.image_fw_version.image_version_major, 'X') + "." + format(upgrade_info.status_info.bank2_info.image_fw_version.image_version_minor, 'X')) + else: + self.log(self.LOG_ERROR, "Error getting version for MUX CHIP") + #return self.RR_ERROR + return self.ERROR_GET_VERSION_FAILED + + for i in range(0, 4): + dat[i] = dat[i] + dat1[i] + + if target == self.TOR_MCU_SELF: + result["version_active"] = dat[1] + result["version_inactive"] = dat[2] + result["version_next"] = dat[3] + elif target == self.TOR_MCU_PEER: + result["version_active"] = dat[1] + result["version_inactive"] = dat[2] + result["version_next"] = dat[3] + elif target == self.NIC_MCU: + result["version_active"] = dat[1] + result["version_inactive"] = dat[2] + result["version_next"] = dat[3] + + return result + + def get_local_temperature(self): + """ + This API returns local ToR temperature of the physical port for which this API is called. + The port on which this API is called for can be referred using self.port. + + Args: + None + + Returns: + an Integer, the temperature of the local MCU + """ + + if self.platform_chassis is not None: + curr_offset = self.QSFP28_VENFD_129_DIE_TEMP_MSB + result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + if result is None: + self.log(self.LOG_ERROR, "get local temperature read eeprom failed") + return self.EEPROM_ERROR + else: + temperature = result[0] + else: + self.log(self.LOG_ERROR, "platform_chassis is not loaded, failed to check read side") + temperature = None + + return temperature + + def get_local_voltage(self): + """ + This API returns local ToR voltage of the physical port for which this API is called. + The port on which this API is called for can be referred using self.port. + + Args: + None + + Returns: + a float, the voltage of the local MCU + """ + + return None + + + def get_nic_voltage(self): + """ + This API returns nic voltage of the physical port for which this API is called. + The port on which this API is called for can be referred using self.port. + Args: + Returns: + a float, the voltage of the NIC MCU + """ + + return None + + def get_nic_temperature(self): + """ + This API returns nic temperature of the physical port for which this API is called. + The port on which this API is called for can be referred using self.port. + Args: + Returns: + an Integer, the temperature of the NIC MCU + """ + + return None + + + def get_eye_heights(self, target): + """ + This API returns the EYE height value for a specfic port. + The target could be local side, TOR_A, TOR_B, NIC etc. + The port on which this API is called for can be referred using self.port. + Args: + target: + One of the following predefined constants, the target on which to get the eye: + EYE_PRBS_LOOPBACK_TARGET_LOCAL -> local side, + EYE_PRBS_LOOPBACK_TARGET_TOR_A -> TOR A + EYE_PRBS_LOOPBACK_TARGET_TOR_B -> TOR B + EYE_PRBS_LOOPBACK_TARGET_NIC -> NIC + Returns: + a list, with EYE values of lane 0 lane 1 lane 2 lane 3 with corresponding index + """ + if target == self.EYE_PRBS_LOOPBACK_TARGET_NIC or target == self.TARGET_NIC: + self.log(self.LOG_WARN, "Get eye heights not supported for NIC target ") + return None + + core_ip, lane_mask = self.__util_convert_to_phyinfo_details(target, 0X0F) + print(lane_mask) + cmd_hdr = bytearray(10) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 0 + cmd_hdr[1] = 0x40 + cmd_hdr[2] = lane_mask if (core_ip == self.CORE_IP_CLIENT) else 0 + cmd_hdr[3] = 0 + #cmd_hdr[3] = lane_mask if (core_ip == self.CORE_IP_LW) else 0 + cmd_hdr[4] = self.CORE_IP_CLIENT + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_GET_EYE_MARGIN, cmd_hdr, cmd_req_body) + + if ret_val == 0: + eye_heights = [[] for i in range(4)] + values = [] + lrud_list = [] + + ind = 0 + for i in range(0, 32): + byte_list = [] + for j in range(0, 2): + byte_list.append(cmd_rsp_body[ind + j]) + byte_array = bytearray(byte_list) + values.append(struct.unpack("h", byte_array)[0]) + ind += 2 + + if lane_mask == 0x0F: + j = 0 + l = 0 + for i in range(0, 4): + for k in range(0, 4): + eye_heights[j].append(values[l]) + l += 1 + j += 1 + for i in range(0, 4): + #lrud_val = (format(eye_heights[i][0]) + " " + format(eye_heights[i][1]) + " " + format(eye_heights[i][2]) + " " + format(eye_heights[i][3])) + lrud_val = eye_heights[i][2] + eye_heights[i][3] + lrud_list.append(lrud_val) + + if lane_mask == 0xF0: + j = 0 + l = 16 + for i in range(0, 4): + for k in range(0, 4): + eye_heights[j].append(values[l]) + l += 1 + j += 1 + k = 0 + for i in range(4, 8): + #lrud_val = (format(eye_heights[k][0]) + " " + format(eye_heights[k][1]) + " " + format(eye_heights[k][2]) + " " + format(eye_heights[k][3])) + lrud_val = eye_heights[k][2] + eye_heights[k][3] + lrud_list.append(lrud_val) + + k += 1 + + return lrud_list + else: + return None + + + + def get_ber_info(self, target): + """ + This API returns the BER (Bit error rate) value for a specfic port. + The target could be local side, TOR_A, TOR_B, NIC etc. + The port on which this API is called for can be referred using self.port. + Args: + target: + One of the following predefined constants, the target on which to get the BER: + EYE_PRBS_LOOPBACK_TARGET_LOCAL -> local side, + EYE_PRBS_LOOPBACK_TARGET_TOR_A -> TOR A + EYE_PRBS_LOOPBACK_TARGET_TOR_B -> TOR B + EYE_PRBS_LOOPBACK_TARGET_NIC -> NIC + Returns: + a list, with BER values of lane 0 lane 1 lane 2 lane 3 with corresponding index + """ + mode_value = 0xff + lane = 0x0f + ber_result = [] + ret_val, lock_sts, err_cnt_list = self.cable_check_prbs(target, mode_value, lane) + if ret_val != 0: + return False + + time.sleep(1) + + ret_val, lock_sts, err_cnt_list = self.cable_check_prbs(target, mode_value, lane) + if ret_val != 0: + return False + + for i in range(0, 8): + prbs_error_per_lane = err_cnt_list[i] + self.log(self.LOG_DEBUG, "prbs_error_per_lane : {}".format(hex(prbs_error_per_lane))) + ber_result.append(prbs_error_per_lane/(25.78125*(math.pow(10, 9)))) + + return ber_result + + + + + def get_target_cursor_values(self, lane, target): + """ + This API returns the cursor equalization parameters for a target(NIC, TOR_A, TOR_B). + This includes pre one, pre two , main, post one, post two , post three cursor values + If any of the value is not available please return None for that filter + The port on which this API is called for can be referred using self.port. + Args: + lane: + an Integer, the lane on which to collect the cursor values + 1 -> lane 1, + 2 -> lane 2 + 3 -> lane 3 + 4 -> lane 4 + target: + One of the following predefined constants, the actual target to get the cursor values on: + TARGET_NIC -> NIC, + TARGET_TOR_A -> TORA, + TARGET_TOR_B -> TORB + Returns: + a list, with pre one, pre two , main, post one, post two , post three cursor values in the order + """ + + # validate lane number + if lane < 1 or lane > 4: + self.log(self.LOG_ERROR, "Invalid lane = {} valid lane is 1 to 4".format(lane)) + return self.ERROR_INVALID_INPUT, None + + lane -= 1 # internally lane starts from 0 + lane_mask = 1 << lane + ret_val = self.__util_convert_to_phyinfo_details(target, lane_mask) + + core_ip = ret_val[0] + lane_mask = ret_val[1] + self.log(self.LOG_DEBUG, "lane_mask = {} core_ip {} target {}".format(hex(lane_mask), core_ip, target)) + + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 0 + cmd_hdr[1] = 14 + cmd_hdr[2] = lane_mask if (core_ip == self.CORE_IP_CLIENT) else 0 + cmd_hdr[3] = lane_mask if (core_ip == self.CORE_IP_LW) else 0 + cmd_hdr[4] = core_ip + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_GET_TXFIR, cmd_hdr, cmd_req_body) + + if ret_val == 0: + txfir = [] + ind = 0 + for _ in range(0, 7): + byte_list = [] + for j in range(0, 2): + byte_list.append(cmd_rsp_body[ind + j]) + byte_array = bytearray(byte_list) + txfir.append(struct.unpack("h", byte_array)[0]) + ind += 2 + + self.log(self.LOG_DEBUG, "lane {} : pre1 = {}".format(lane,txfir[0])) + self.log(self.LOG_DEBUG, "lane {} : pre2 = {}".format(lane,txfir[1])) + self.log(self.LOG_DEBUG, "lane {} : main = {}".format(lane,txfir[2])) + self.log(self.LOG_DEBUG, "lane {} : post1 = {}".format(lane,txfir[3])) + self.log(self.LOG_DEBUG, "lane {} : post2 = {}".format(lane,txfir[4])) + self.log(self.LOG_DEBUG, "lane {} : post3 = {}".format(lane,txfir[5])) + self.log(self.LOG_DEBUG, "lane {} : taps = {}".format(lane,txfir[6])) + return txfir + + return None + + def set_target_cursor_values(self, lane, cursor_values, target): + """ + This API sets the cursor equalization parameters for a target(NIC, TOR_A, TOR_B). + This includes pre one, pre two , main, post one, post two etc. cursor values + The port on which this API is called for can be referred using self.port. + Args: + lane: + an Integer, the lane on which to collect the cursor values + 1 -> lane 1, + 2 -> lane 2 + 3 -> lane 3 + 4 -> lane 4 + cursor_values: + a list, with pre one, pre two , main, post one, post two cursor, post three etc. values in the order + target: + One of the following predefined constants, the actual target to get the cursor values on: + TARGET_NIC -> NIC, + TARGET_TOR_A -> TORA, + Returns: + a boolean, True if cursor values setting is successful + , False if cursor values setting is not successful + """ + + if lane < 1 or lane > 4: + self.log(self.LOG_ERROR, "Invalid lane = {} valid lane is 1 to 4".format(lane)) + return self.ERROR_INVALID_INPUT, None + + lane -= 1 # internally lane starts from 0 + lane_mask = 1 << lane + ret_val = self.__util_convert_to_phyinfo_details(target, lane_mask) + core_ip = ret_val[0] + lane_mask = ret_val[1] + self.log(self.LOG_DEBUG, "lane_mask = {} core_ip {} target {}".format(hex(lane_mask), core_ip, target)) + cmd_hdr = bytearray(5) + #cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + cmd_req_body1 = bytearray() + cmd_hdr[0] = 14 + cmd_hdr[1] = 40 + cmd_hdr[2] = lane_mask if (core_ip == self.CORE_IP_CLIENT) else 0 + cmd_hdr[3] = lane_mask if (core_ip == self.CORE_IP_LW) else 0 + cmd_hdr[4] = core_ip + + for i in range(len(cursor_values)): + cmd_req_body1 += struct.pack(" NIC, + TARGET_TOR_A -> TORA, + TARGET_TOR_B -> TORB + Returns: + a boolean, True if the cable is target reset + , False if the cable target is not reset + """ + + status = bytearray(self.MAX_REQ_PARAM_LEN) + + if target == self.TARGET_TOR_A: + status[0] = 0x1 + elif target == self.TARGET_TOR_B: + status[0] = 0x2 + elif target == self.TARGET_NIC: + status[0] = 0x4 + else: + self.log(self.LOG_ERROR, "Invalid target") + return False + + # if read side is matching target, invoke reset_self() + read_side = self.get_read_side() + if read_side == target: + return self.reset_self() + + if self.platform_chassis is not None: + + debug_print("Trying for the lock") + with self.lock.acquire_timeout(self.PORT_LOCK_TIMEOUT) as result: + if result: + + curr_offset = self.QSFP28_VENFD_184_NIC_TORB_TORA_RESET + result = self.platform_chassis.get_sfp(self.port).write_eeprom(curr_offset, 1, status) + if result is False: + self.log(self.LOG_ERROR, "write to QSFP28_VENFD_184_NIC_TORB_TORA_RESET failed.") + return False + + time.sleep(3) + + self.log(self.LOG_DEBUG, "reset value to write. rval: {} ".format(status[0])) + # for next one second, keep checking the register to see if it becomes 0 + for _ in range(30): + rval = 0 + result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + rval = result[0] + + if (rval & status[0]) == 0x00: + ret_code = True + break + + time.sleep(0.1) #100ms + else: + self.log(self.LOG_ERROR, "TORB_TORA_RESET never become zero. rval: {} ".format(rval)) + ret_code = False + else: + self.log(self.LOG_ERROR, "Port lock timed-out!") + return self.ERROR_PORT_LOCK_TIMEOUT + + else: + self.log(self.LOG_ERROR, "platform_chassis is not loaded, failed to check read side") + ret_code = False + + return ret_code + + + def create_port(self, speed, fec_mode_tor=FEC_MODE_NONE, fec_mode_nic=FEC_MODE_NONE, anlt_tor=False, anlt_nic=False): + """ + This API sets the mode of the cable/port for corresponding lane/fec etc. configuration as specified. + The speed specifies which mode is supposed to be set 50G, 100G etc + the anlt specifies if auto-negotiation + link training (AN/LT) has to be enabled + Note that in case create_port is called multiple times, the most recent api call will take the precedence + on either of TOR side. + The port on which this API is called for can be referred using self.port. + Args: + speed: + an Integer, the value for the link speed to be configured (in megabytes). + examples: + 50000 -> 50G + 100000 -> 100G + fec_mode_tor: + One of the following predefined constants, the actual fec mode for the tor A to be configured: + FEC_MODE_NONE, + FEC_MODE_RS, + FEC_MODE_FC + fec_mode_nic: + One of the following predefined constants, the actual fec mode for the nic to be configured: + FEC_MODE_NONE, + FEC_MODE_RS, + FEC_MODE_FC + anlt_tor: + a boolean, True if auto-negotiation + link training (AN/LT) is to be enabled on tor A + , False if auto-negotiation + link training (AN/LT) is not to be enabled on tor A + anlt_nic: + a boolean, True if auto-negotiation + link training (AN/LT) is to be enabled on nic + , False if auto-negotiation + link training (AN/LT) is not to be enabled on nic + Returns: + a boolean, True if the port is configured + , False if the port is not configured + """ + port_option_table = [] + ret_code = True + + port_option_table.append( valid_port_option_table_s(self.PORT_SPEED_50, self.FEC_MODE_NONE, self.FEC_MODE_NONE, self.ANLT_DONT_CARE, self.ANLT_DONT_CARE, self.CABLE_MODE_50G_PCS) ) + + port_option_table.append( valid_port_option_table_s(self.PORT_SPEED_50, self.FEC_MODE_RS, self.FEC_MODE_RS, self.ANLT_DONT_CARE, self.ANLT_DONT_CARE, self.CABLE_MODE_50G_FEC) ) + + port_option_table.append( valid_port_option_table_s(self.PORT_SPEED_100, self.FEC_MODE_NONE, self.FEC_MODE_NONE, self.ANLT_DONT_CARE, self.ANLT_DONT_CARE, self.CABLE_MODE_100G_PCS) ) + + port_option_table.append( valid_port_option_table_s(self.PORT_SPEED_100, self.FEC_MODE_RS, self.FEC_MODE_RS, self.ANLT_DONT_CARE, self.ANLT_DONT_CARE, self.CABLE_MODE_100G_FEC) ) + + matched_entry = valid_port_option_table_s + for i in range(len(port_option_table)): + if ((speed == port_option_table[i].speed) and (fec_mode_tor == port_option_table[i].fec_tor) and \ + (fec_mode_nic == port_option_table[i].fec_nic) and \ + ((0x2 if anlt_tor == True else 0x1) & port_option_table[i].anlt_tor) and \ + ((0x2 if anlt_nic == True else 0x1) & port_option_table[i].anlt_nic)): + matched_entry.speed = port_option_table[i].speed + matched_entry.fec_tor = port_option_table[i].fec_tor + matched_entry.fec_nic = port_option_table[i].fec_nic + matched_entry.anlt_tor = port_option_table[i].anlt_tor + matched_entry.anlt_nic = port_option_table[i].anlt_nic + matched_entry.mode = port_option_table[i].mode + break + else: + if i == (len(port_option_table) - 1): + self.log(self.LOG_ERROR,"Not supported input parameter") + return False + + # Disable ANLT irrespective, For 100G need to disable AN, mcu wouldn't do it + # Disable AN on the NIC side + if self.set_anlt(0, self.TARGET_NIC) == False: + self.log(self.LOG_ERROR, "disable AN/LT on TARGET_NIC failed") + ret_code = False + # Disable AN on the TORA side + if self.set_anlt(0, self.TARGET_TOR_A) == False: + self.log(self.LOG_ERROR, "disable AN/LT on TARGET_TOR-A failed") + ret_code = False + # Disable AN on the TORB side + if self.set_anlt(0, self.TARGET_TOR_B) == False: + self.log(self.LOG_ERROR, "disable AN/LT on TARGET_TOR-A failed") + ret_code = False + + # configure mode + if self.cable_set_mode(matched_entry.mode) == False: + self.log(self.LOG_ERROR, "set mode failed") + ret_code = False + + # configure AN/LT + if anlt_nic: + if self.set_anlt(1, self.TARGET_NIC) == False: + self.log(self.LOG_ERROR, "Enable AN/LT on TARGET_NIC failed") + ret_code = False + else: + if self.set_anlt(0, self.TARGET_NIC) == False: + self.log(self.LOG_ERROR, "Disable AN/LT on TARGET_NIC failed") + ret_code = False + + if anlt_tor: + if self.set_anlt(1, self.TARGET_TOR_A) == False: + self.log(self.LOG_ERROR, "Enable AN/LT on TARGET_TORA failed") + ret_code = False + + if self.set_anlt(1, self.TARGET_TOR_B) == False: + self.log(self.LOG_ERROR, "Enable AN/LT on TARGET_TORB failed") + ret_code == False + + return ret_code + + + def get_speed(self): + """ + This API gets the mode of the cable for corresponding lane configuration. + The port on which this API is called for can be referred using self.port. + + Args: + None + Returns: + speed: + an Integer, the value for the link speed is configured (in megabytes). + examples: + 50000 -> 50G + 100000 -> 100G + """ + mode = self.cable_get_mode() + if mode == self.CABLE_MODE_100G_FEC or mode == self.CABLE_MODE_100G_PCS: + return self.CABLE_MODE_100G + elif mode == self.CABLE_MODE_50G_FEC or mode == self.CABLE_MODE_50G_PCS: + return self.CABLE_MODE_50G + else: + self.log(self.LOG_ERROR,"No mode configured") + return None + + + def set_fec_mode(self, fec_mode, target=None): + """ + This API gets the fec mode of the cable for which it is set to. + The port on which this API is called for can be referred using self.port. + Args: + fec_mode: + One of the following predefined constants, the actual fec mode for the port to be configured: + FEC_MODE_NONE, + FEC_MODE_RS, + FEC_MODE_FC + target: + One of the following predefined constants, the actual target to set the fec mode on: + TARGET_NIC -> NIC, + TARGET_TOR_A -> TORA, + TARGET_TOR_B -> TORB + Returns: + a boolean, True if the fec mode is configured + , False if the fec mode is not configured + """ + + entry_to_match = valid_port_option_table_s + + mode_select = self.cable_get_mode() + entry_to_match.mode = mode_select + curr_fec_mode = self.FEC_MODE_NONE if (mode_select == self.CABLE_MODE_50G_PCS or mode_select == self.CABLE_MODE_100G_PCS) else self.FEC_MODE_RS + + if fec_mode == curr_fec_mode: + self.log(self.LOG_INFO,"Current mode already configured to {}".format(" PCS " if fec_mode == self.FEC_MODE_NONE else "FEC")) + return True + + if fec_mode == self.FEC_MODE_NONE: + entry_to_match.fec_nic = self.FEC_MODE_NONE + entry_to_match.fec_tor = self.FEC_MODE_NONE + else: + entry_to_match.fec_nic = self.FEC_MODE_RS + entry_to_match.fec_tor = self.FEC_MODE_RS + + entry_to_match.speed = self.PORT_SPEED_50 if (mode_select == self.CABLE_MODE_50G_PCS or mode_select == self.CABLE_MODE_50G_FEC) else self.PORT_SPEED_100 + + anlt_enable = self.get_anlt(self.TARGET_NIC) + entry_to_match.anlt_nic = 1 if anlt_enable == True else 0 + + anlt_enable = self.get_anlt(self.TARGET_TOR_A) + entry_to_match.anlt_tor = 1 if anlt_enable == True else 0 + + anlt_enable = self.get_anlt(self.TARGET_TOR_B) + entry_to_match.anlt_tor = 1 if anlt_enable == True else 0 + + ret_code = self.create_port(entry_to_match.speed, entry_to_match.fec_tor, entry_to_match.fec_nic, (1 if entry_to_match.anlt_tor == True else 0), (1 if entry_to_match.anlt_nic==True else 0)) + + if ret_code == True: + self.log(self.LOG_INFO, "Set {} fec mode success".format(fec_mode)) + return True + else: + self.log(self.LOG_ERROR, "Set fec mode failed") + return False + + + def get_fec_mode(self, target=None): + """ + This API gets the fec mode of the cable which it is set to. + The port on which this API is called for can be referred using self.port. + Args: + target: + One of the following predefined constants, the actual target to fec mode on: + TARGET_NIC -> NIC, + TARGET_TOR_A -> TORA, + TARGET_TOR_B -> TORB + Returns: + fec_mode: + One of the following predefined constants, the actual fec mode for the port to be configured: + FEC_MODE_NONE, + FEC_MODE_RS, + FEC_MODE_FC + """ + mode_select = self.cable_get_mode() + return self.FEC_MODE_NONE if (mode_select == self.CABLE_MODE_50G_PCS or mode_select == self.CABLE_MODE_100G_PCS) else self.FEC_MODE_RS + + def set_anlt(self, enable, target): + """ + This API enables/disables the cable auto-negotiation + link training (AN/LT). + The port on which this API is called for can be referred using self.port. + Args: + enable: + a boolean, True if auto-negotiation + link training (AN/LT) is to be enabled + , False if auto-negotiation + link training (AN/LT) is not to be enabled + target: + One of the following predefined constants, the actual target to get the stats on: + TARGET_NIC -> NIC, + TARGET_TOR_A -> TORA, + TARGET_TOR_B -> TORB + Returns: + a boolean, True if the auto-negotiation + link training (AN/LT) enable/disable specified is configured + , False if the auto-negotiation + link training (AN/LT) enable/disable specified is not configured + """ + # supported only for NIC LW + #if target != self.TARGET_NIC: + # self.log(self.LOG_ERROR, "ANLT not supported for CLIENT") + # return -1 + + values = self.__util_convert_to_phyinfo_details(target, 0x0F) + core_ip = values[0] + lane_mask = values[1] + self.log(self.LOG_DEBUG, "core_ip {} lane_mask {}".format(core_ip, hex(lane_mask))) + + + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 0x1 + cmd_hdr[1] = 0 + cmd_hdr[2] = lane_mask if (core_ip == self.CORE_IP_CLIENT) else 0 + cmd_hdr[3] = lane_mask if (core_ip == self.CORE_IP_LW) else 0 + cmd_hdr[4] = core_ip + + cmd_req_body[0] = enable + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_SET_ANLT, cmd_hdr, cmd_req_body) + if ret_val == 0 and cmd_rsp_body is None: + self.log(self.LOG_INFO, "{} AN/LT Successful".format("Enable" if (enable) else "Disable")) + return True + else: + self.log(self.LOG_ERROR, "Enable/Disable AN LT mode is failed") + return False + + + def get_anlt(self, target): + """ + This API gets the mode of the cable for corresponding lane configuration. + The port on which this API is called for can be referred using self.port. + Args: + target: + One of the following predefined constants, the actual target to get the anlt on: + TARGET_NIC -> NIC, + TARGET_TOR_A -> TORA, + TARGET_TOR_B -> TORB + Returns: + a boolean, True if auto-negotiation + link training (AN/LT) is enabled + , False if auto-negotiation + link training (AN/LT) is not enabled + """ + + values = self.__util_convert_to_phyinfo_details(target, 0x0F) + core_ip = values[0] + lane_mask = values[1] + self.log(self.LOG_DEBUG, "core_ip {} lane_mask {} target {}".format(core_ip, hex(lane_mask), target)) + + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 0 + cmd_hdr[1] = 1 + cmd_hdr[2] = lane_mask if (core_ip == self.CORE_IP_CLIENT) else 0 + cmd_hdr[3] = lane_mask if (core_ip == self.CORE_IP_LW) else 0 + cmd_hdr[4] = core_ip + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_GET_ANLT, cmd_hdr, cmd_req_body) + + if self.__validate_read_data(cmd_rsp_body, 1, "cable anlt get") == self.EEPROM_READ_DATA_INVALID: + return self.EEPROM_ERROR + + if ret_val == 0: + if cmd_rsp_body[0] & 1: + self.log(self.LOG_INFO, "AN/LT mode enabled") + return True + else: + self.log(self.LOG_INFO, "AN/LT mode disabled") + return False + else: + self.log(self.LOG_ERROR, "Get AN LT mode is failed") + return False + + + + + def get_event_log(self, clear_on_read=False): + """ + This API returns the event log of the cable + The port on which this API is called for can be referred using self.port. + Args: + clear_on_read: + a boolean, True if the log has to be cleared after read + , False if the log is not to be cleared after read + Returns: + list: + a list of strings which correspond to the event logs of the cable + """ + + return None + + def get_pcs_stats(self, target): + """ + This API returns the pcs statistics of the cable + The port on which this API is called for can be referred using self.port. + Args: + target: + One of the following predefined constants, the actual target to get the stats on: + TARGET_NIC -> NIC, + TARGET_TOR_A -> TORA, + TARGET_TOR_B -> TORB + Returns: + a dictionary: + a detailed format agreed upon by vendors + """ + + return None + + def get_fec_stats(self, target): + """ + This API returns the fec statistics of the cable + The port on which this API is called for can be referred using self.port. + Args: + target: + One of the following predefined constants, the actual target to get the stats on: + TARGET_NIC -> NIC, + TARGET_TOR_A -> TORA, + TARGET_TOR_B -> TORB + Returns: + a dictionary: + a detailed format agreed upon by vendors + """ + + return None + + + def set_autoswitch_hysteresis_timer(self, time1): + """ + This API sets the hysteresis timer of the cable. This is basically the time in auto-switch mode + which the mux has to wait after toggling it once, before again toggling the mux to a different ToR + The port on which this API is called for can be referred using self.port. + Args: + time: + an Integer, the time value for hysteresis to be set in milliseconds + Returns: + a boolean, True if the time is configured + , False if the time is not configured + """ + + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 4 + cmd_hdr[1] = 0 + cmd_hdr[2] = 0 + cmd_hdr[3] = 0 + cmd_hdr[4] = self.CORE_IP_CENTRAL + + cmd_req_body[0] = ((0x01 | (time1 << 1)) | 1<< 7 ) + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_SET_HMUX_CONFIG, cmd_hdr, cmd_req_body) + if ret_val == 0 and cmd_rsp_body is None: + return True + else: + return False + + + def get_autoswitch_hysteresis_timer(self): + """ + This API gets the hysteresis timer of the cable. This is basically the time in auto-switch mode + which the mux has to wait after toggling it once, before again toggling the mux to a different ToR + The port on which this API is called for can be referred using self.port. + + Args: + None + Returns: + time: + an Integer, the time value for hysteresis is configured in milliseconds + """ + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 0 + cmd_hdr[1] = 4 + cmd_hdr[2] = 0 + cmd_hdr[3] = 0 + cmd_hdr[4] = self.CORE_IP_CENTRAL + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_GET_HMUX_CONFIG, cmd_hdr, cmd_req_body) + if ret_val == 0: + timer = (cmd_rsp_body[0] >> 1) + self.log(self.LOG_DEBUG, "Timer = {}".format(timer)) + + return timer + else: + self.log(self.LOG_ERROR, "Failed to get Timer") + return False + + + def restart_anlt(self, target): + """ + This API restarts auto-negotiation + link training (AN/LT) mode + The port on which this API is called for can be referred using self.port. + Args: + target: + One of the following predefined constants, the actual target to restart anlt on: + TARGET_NIC -> NIC, + TARGET_TOR_A -> TORA, + TARGET_TOR_B -> TORB + Returns: + a boolean, True if restart is successful + """ + + values = self.__util_convert_to_phyinfo_details(target, 0x0F) + core_ip = values[0] + lane_mask = values[1] + self.log(self.LOG_DEBUG, "core_ip {} lane_mask {}".format(core_ip, hex(lane_mask))) + + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 0 + cmd_hdr[1] = 0 + cmd_hdr[2] = lane_mask if (core_ip == self.CORE_IP_CLIENT) else 0 + cmd_hdr[3] = lane_mask if (core_ip == self.CORE_IP_LW) else 0 + cmd_hdr[4] = core_ip + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_GET_ANLT_RESTART, cmd_hdr, cmd_req_body) + + if ret_val == 0 and cmd_rsp_body is None: + self.log(self.LOG_INFO, "AN LT Restart successful") + return True + else: + self.log(self.LOG_ERROR, "AN LT Restart failed") + return False + + + + + def get_anlt_stats(self, target): + """ + This API returns auto-negotiation + link training (AN/LT) mode statistics + The port on which this API is called for can be referred using self.port. + Args: + target: + One of the following predefined constants, the actual target to get anlt stats on: + TARGET_NIC -> NIC, + TARGET_TOR_A -> TORA, + TARGET_TOR_B -> TORB + Returns: + a dictionary: + a detailed format agreed upon by vendors + """ + + values = self.__util_convert_to_phyinfo_details(target, 0x0F) + core_ip = values[0] + lane_mask = values[1] + + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 0 + cmd_hdr[1] = 9 + cmd_hdr[2] = lane_mask if (core_ip == self.CORE_IP_CLIENT) else 0 + cmd_hdr[3] = lane_mask if (core_ip == self.CORE_IP_LW) else 0 + cmd_hdr[4] = core_ip + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_GET_ANLT_GET_STATUS, cmd_hdr, cmd_req_body) + + if self.__validate_read_data(cmd_rsp_body, 9, "anlt get status") == self.EEPROM_READ_DATA_INVALID: + return self.EEPROM_ERROR, None + + ret_array = [0 for _ in range(5)] + if ret_val == 0 and len(cmd_rsp_body) == 9: + ret_array[0] = an_state = cmd_rsp_body[0] + ret_array[1] = lp_ability1 = struct.unpack('H', cmd_rsp_body[1:3])[0] + ret_array[2] = lp_ability2 = struct.unpack('H', cmd_rsp_body[3:5])[0] + ret_array[3] = lp_ability3 = struct.unpack('H', cmd_rsp_body[5:7])[0] + ret_array[4] = lp_fec_ability = struct.unpack('H', cmd_rsp_body[7:9])[0] + + self.log(self.LOG_DEBUG, "Get AN LT AN State = {}".format(an_state)) + self.log(self.LOG_DEBUG, "Get AN LT LP ability1 = {}".format(lp_ability1)) + self.log(self.LOG_DEBUG, "Get AN LT LP ability2 = {}".format(lp_ability2)) + self.log(self.LOG_DEBUG, "Get AN LT LP ability3 = {}".format(lp_ability3)) + self.log(self.LOG_DEBUG, "Get AN LT LP FEC ability = {}".format(lp_fec_ability)) + + else: + self.log(self.LOG_ERROR, "Get AN LT status is failed") + return self.EEPROM_ERROR + + return ret_val ,ret_array + + + + +############################################################################################# +### Debug Functionality ### +############################################################################################# + + def set_debug_mode(self, enable): + """ + This API enables/disables a debug mode that the port is now + going to be run on. If enabled, this means that PRBS/Loopback etc. type diagnostic mode + is now going to be run on the port and hence normal traffic will be disabled + on it if enabled and vice-versa if disabled. + enable is typically to be used at the software level to inform the software + that debug APIs will be called afterwords. + disable will disable any previously enabled debug functionality inside the cable + so that traffic can pass through. Also it'll inform the software to come out of the debug mode. + The port on which this API is called for can be referred using self.port. + Args: + enable: + a boolean, True if the debug mode needs to be enabled + , False if the debug mode needs to be disabled + Returns: + a boolean, True if the enable is successful + , False if the enable failed + """ + + return None + + def get_debug_mode(self): + """ + This API checks if a debug mode is currently being run on the port + for which this API is called for. + This means that PRBS/Loopback etc. type diagnostic mode + if any are being run on the port this should return True else False. + The port on which this API is called for can be referred using self.port. + Args: + Returns: + a boolean, True if debug mode enabled + , False if debug mode not enabled + """ + + return None + + + def enable_prbs_mode(self, target, mode_value, lane_mask, direction=PRBS_DIRECTION_BOTH): + """ + This API configures and enables the PRBS mode/type depending upon the mode_value the user provides. + The mode_value configures the PRBS Type for generation and BER sensing on a per side basis. + Target is an integer for selecting which end of the Y cable we want to run PRBS on. + LaneMap specifies the lane configuration to run the PRBS on. + Note that this is a diagnostic mode command and must not run during normal traffic/switch operation + The port on which this API is called for can be referred using self.port. + Args: + target: + One of the following predefined constants, the target on which to enable the PRBS: + EYE_PRBS_LOOPBACK_TARGET_LOCAL -> local side, + EYE_PRBS_LOOPBACK_TARGET_TOR_A -> TOR A + EYE_PRBS_LOOPBACK_TARGET_TOR_B -> TOR B + EYE_PRBS_LOOPBACK_TARGET_NIC -> NIC + mode_value: + an Integer, the mode/type for configuring the PRBS mode. + lane_mask: + an Integer, representing the lane_mask to be run PRBS on + 0bit for lane 0, 1bit for lane1 and so on. + for example 3 -> 0b'0011 , means running on lane0 and lane1 + direction: + One of the following predefined constants, the direction to run the PRBS: + PRBS_DIRECTION_BOTH + PRBS_DIRECTION_GENERATOR + PRBS_DIRECTION_CHECKER + Returns: + a boolean, True if the enable is successful + , False if the enable failed + """ + + core_ip, lane_mask = self.__util_convert_to_phyinfo_details(target, lane_mask) + + if mode_value == 0: + prbs_type = self.CABLE_PRBS7 + elif mode_value == 1: + prbs_type = self.CABLE_PRBS9 + elif mode_value == 2: + prbs_type = self.CABLE_PRBS11 + elif mode_value == 3: + prbs_type = self.CABLE_PRBS15 + elif mode_value == 4: + prbs_type = self.CABLE_PRBS23 + elif mode_value == 5: + prbs_type = self.CABLE_PRBS31 + elif mode_value == 6: + prbs_type = self.CABLE_PRBS58 + elif mode_value == 7: + prbs_type = self.CABLE_PRBS49 + elif mode_value == 8: + prbs_type = self.CABLE_PRBS13 + else: + self.log(self.LOG_ERROR, "Error: for checking mux_cable enable PRBS mode, the mode_value is wrong") + return self.ERROR_INVALID_PRBS_MODE + + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 0x2 + cmd_hdr[1] = 0 + cmd_hdr[2] = lane_mask if (core_ip == self.CORE_IP_CLIENT) else 0 + cmd_hdr[3] = lane_mask if (core_ip == self.CORE_IP_LW) else 0 + cmd_hdr[4] = core_ip + + cmd_req_body[0] = 1 #enable + cmd_req_body[1] = prbs_type + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_PRBS_SET, cmd_hdr, cmd_req_body) + + if ret_val == 0 and cmd_rsp_body is None: + self.log(self.LOG_INFO, "Enable PRBS mode is successful") + return True + else: + self.log(self.LOG_ERROR, "Enable PRBS mode is failed") + return False + + + def disable_prbs_mode(self, target, direction=PRBS_DIRECTION_BOTH): + """ + This API disables the PRBS mode on the physical port. + The port on which this API is called for can be referred using self.port. + + Args: + target: + One of the following predefined constants, the target on which to disable the PRBS: + EYE_PRBS_LOOPBACK_TARGET_LOCAL -> local side, + EYE_PRBS_LOOPBACK_TARGET_TOR_A -> TOR A + EYE_PRBS_LOOPBACK_TARGET_TOR_B -> TOR B + EYE_PRBS_LOOPBACK_TARGET_NIC -> NIC + direction: + One of the following predefined constants, the direction to run the PRBS: + PRBS_DIRECTION_BOTH + PRBS_DIRECTION_GENERATOR + PRBS_DIRECTION_CHECKER + Returns: + a boolean, True if the disable is successful + , False if the disable failed + """ + + core_ip, lane_mask = self.__util_convert_to_phyinfo_details(target, 0xF) + + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 0x2 + cmd_hdr[1] = 0 + cmd_hdr[2] = lane_mask if (core_ip == self.CORE_IP_CLIENT) else 0 + cmd_hdr[3] = lane_mask if (core_ip == self.CORE_IP_LW) else 0 + cmd_hdr[4] = core_ip + + cmd_req_body[0] = 0 #disable + cmd_req_body[1] = 0 + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_PRBS_SET, cmd_hdr, cmd_req_body) + + if ret_val == 0 and cmd_rsp_body is None: + self.log(self.LOG_INFO, "Disable PRBS mode is successful") + return True + else: + self.log(self.LOG_ERROR, "Disable PRBS mode is failed") + return False + + + def enable_loopback_mode(self, target, lane_mask, mode=LOOPBACK_MODE_NEAR_END): + """ + This API configures and enables the Loopback mode on the port user provides. + Target is an integer for selecting which end of the Y cable we want to run loopback on. + LaneMap specifies the lane configuration to run the loopback on. + Note that this is a diagnostic mode command and must not run during normal traffic/switch operation + The port on which this API is called for can be referred using self.port. + Args: + target: + One of the following predefined constants, the target on which to enable the loopback: + EYE_PRBS_LOOPBACK_TARGET_LOCAL -> local side, + EYE_PRBS_LOOPBACK_TARGET_TOR_A -> TOR A + EYE_PRBS_LOOPBACK_TARGET_TOR_B -> TOR B + EYE_PRBS_LOOPBACK_TARGET_NIC -> NIC + mode_value: + One of the following predefined constants, the mode to be run for loopback: + LOOPBACK_MODE_NEAR_END + LOOPBACK_MODE_FAR_END + lane_mask: + an Integer, representing the lane_mask to be run loopback on + 0bit for lane 0, 1bit for lane1 and so on. + for example 3 -> 0b'0011 , means running on lane0 and lane1 + Returns: + a boolean, True if the enable is successful + , False if the enable failed + """ + + ret_val = self.__util_convert_to_loopback_phyinfo(target, lane_mask, mode) + core_ip = ret_val[0] + lane_mask = ret_val[1] + mode = ret_val[2] + + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 0x2 + cmd_hdr[1] = 0 + cmd_hdr[2] = lane_mask if (core_ip == self.CORE_IP_CLIENT) else 0 + cmd_hdr[3] = lane_mask if (core_ip == self.CORE_IP_LW) else 0 + cmd_hdr[4] = core_ip + + cmd_req_body[0] = mode + cmd_req_body[1] = 1 #enable + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_SET_LOOPBACK, cmd_hdr, cmd_req_body) + + if ret_val == 0 and cmd_rsp_body is None: + self.log(self.LOG_INFO, "Enable looback mode is successful") + return True + else: + self.log(self.LOG_ERROR, "Enable loopback mode is failed") + return False + + + def disable_loopback_mode(self, target): + """ + This API disables the Loopback mode on the port user provides. + Target is an integer for selecting which end of the Y cable we want to run loopback on. + The port on which this API is called for can be referred using self.port. + Args: + target: + One of the following predefined constants, the target on which to disable the loopback: + EYE_PRBS_LOOPBACK_TARGET_LOCAL -> local side, + EYE_PRBS_LOOPBACK_TARGET_TOR_A -> TOR A + EYE_PRBS_LOOPBACK_TARGET_TOR_B -> TOR B + EYE_PRBS_LOOPBACK_TARGET_NIC -> NIC + Returns: + a boolean, True if the disable is successful + , False if the disable failed + """ + + ret_val = self.__util_convert_to_loopback_phyinfo(target, 0xF, self.LOOPBACK_MODE_NEAR_END) + core_ip = ret_val[0] + lane_mask = ret_val[1] + mode = ret_val[2] + + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 0x2 + cmd_hdr[1] = 0 + cmd_hdr[2] = lane_mask if (core_ip == self.CORE_IP_CLIENT) else 0 + cmd_hdr[3] = lane_mask if (core_ip == self.CORE_IP_LW) else 0 + cmd_hdr[4] = core_ip + + # disable LOOPBACK_MODE_NEAR_END first + cmd_req_body[0] = mode + cmd_req_body[1] = 0 #disable + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_SET_LOOPBACK, cmd_hdr, cmd_req_body) + + if ret_val != 0: + self.log(self.LOG_ERROR, "Disable loopback mode is failed") + return False + + # disable LOOPBACK_MODE_FAR_END next + ret_val = self.__util_convert_to_loopback_phyinfo(target, 0xF, self.LOOPBACK_MODE_FAR_END) + mode = ret_val[2] + + cmd_req_body[0] = mode + cmd_req_body[1] = 0 #disable + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_SET_LOOPBACK, cmd_hdr, cmd_req_body) + + if ret_val == 0 and cmd_rsp_body is None: + self.log(self.LOG_INFO, "Disable looback mode is successful") + return True + else: + self.log(self.LOG_ERROR, "Disable loopback mode is failed") + return False + + def get_loopback_mode(self, target): + """ + This API returns the Loopback mode on the port which it has been configured to + Target is an integer for selecting which end of the Y cable we want to run loopback on. + The port on which this API is called for can be referred using self.port. + Args: + target: + One of the following predefined constants, the target on which to disable the loopback: + EYE_PRBS_LOOPBACK_TARGET_LOCAL -> local side, + EYE_PRBS_LOOPBACK_TARGET_TOR_A -> TOR A + EYE_PRBS_LOOPBACK_TARGET_TOR_B -> TOR B + EYE_PRBS_LOOPBACK_TARGET_NIC -> NIC + Returns: + mode_value: + One of the following predefined constants, the mode to be run for loopback: + LOOPBACK_MODE_NEAR_END + LOOPBACK_MODE_FAR_END + """ + ret_val = self.__util_convert_to_loopback_phyinfo(target, 0xF, self.LOOPBACK_MODE_FAR_END) + core_ip = ret_val[0] + lane_mask = ret_val[1] + mode = ret_val[2] + + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 0x1 + cmd_hdr[1] = 0x1 + cmd_hdr[2] = lane_mask if (core_ip == self.CORE_IP_CLIENT) else 0 + cmd_hdr[3] = lane_mask if (core_ip == self.CORE_IP_LW) else 0 + cmd_hdr[4] = core_ip + + cmd_req_body[0] = mode + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_GET_LOOPBACK, cmd_hdr, cmd_req_body) + if self.__validate_read_data(cmd_rsp_body, 1, "get loopback") == self.EEPROM_READ_DATA_INVALID: + return self.EEPROM_ERROR + if ret_val == 0: + if cmd_rsp_body[0] == 1: + self.log(self.LOG_INFO, "The Far-End loopback mode is set ON") + return self.LOOPBACK_MODE_FAR_END + else: + self.log(self.LOG_INFO, "Error getting the loopback mode ON/OFF") + return self.LOOPBACK_MODE_NONE + + # check NEAR_END loopback + ret_val = self.__util_convert_to_loopback_phyinfo(target, 0xF, self.LOOPBACK_MODE_NEAR_END) + mode = ret_val[2] + + cmd_req_body[0] = mode + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_GET_LOOPBACK, cmd_hdr, cmd_req_body) + if self.__validate_read_data(cmd_rsp_body, 1, "get loopback") == self.EEPROM_READ_DATA_INVALID: + return self.EEPROM_ERROR + + if ret_val == 0: + if cmd_rsp_body[0] == 1: + self.log(self.LOG_INFO, "The Near-End loopback mode is set ON") + return self.LOOPBACK_MODE_NEAR_END + else: + self.log(self.LOG_INFO, "Error getting the loopback mode ON/OFF") + return self.LOOPBACK_MODE_NONE + + return self.LOOPBACK_MODE_NONE + + + def debug_dump_registers(self): + """ + This API should dump all registers with meaningful values + for the cable to be diagnosed for proper functioning. + This means that for all the fields on relevant vendor-specific pages + this API should dump the appropriate fields with parsed values + which would help debug the Y-Cable + + Args: + None + Returns: + a Dictionary: + with all the relevant key-value pairs for all the meaningful fields + which would help diagnose the cable for proper functioning + """ + + print("\nPHY CHIP DEBUG info dump") + ret_code, reg_val = self.rd_reg_ex(0x5200C820, 0x0) + if ret_code == -1: + self.log(self.LOG_ERROR, "debug dump register read eeprom failed") + return self.EEPROM_ERROR + + print("active port status = {}".format(hex(reg_val))) + + ret_code, reg_val = self.rd_reg_ex(0x5200C894, 0x0) + print("standby port status = {}".format(hex(reg_val))) + + ret_code = self.wr_reg_ex(0x5200C81C, 0xFFFF, 0x0) + if ret_code is False: + print("ERROR: Writing to 0x5200C81C Failed!") + + ret_code = self.wr_reg_ex(0x5200C81C, 0x0, 0x0) + if ret_code is False: + print("ERROR: Writing to 0x5200C81C Failed!") + + ret_code, reg_val = self.rd_reg_ex(0x5200C8B4, 0x0) + print("GP_REG_45_int register = {} (cmd_ret: {})".format(hex(reg_val), ret_code)) + + print("CW=>LW IPC registers:") + for i in range(0,4): + reg_addr = 0x5200CC20 + i * 4 + ret_code, reg_val = self.rd_reg_ex(reg_addr, 0x0) + print("Lane {} = {} (cmd_ret: {})".format(i, hex(reg_val), ret_code)) + + print("LW=>CW IPC registers") + for i in range(0,4): + reg_addr = 0x5200CC40 + i * 4 + ret_code, reg_val = self.rd_reg_ex(reg_addr, 0x0) + print("Lane {} = {} (cmd_ret: {})".format(i, hex(reg_val), ret_code)) + + print("CW=>BH IPC registers") + for i in range(0,8): + reg_addr = 0x5200CC60 + i * 4 + ret_code, reg_val = self.rd_reg_ex(reg_addr, 0x0) + print("{} Lane {} = {} (cmd_ret: {})".format( "TORB" if i> 3 else "TORA", i, hex(reg_val), ret_code)) + + print("BH=>CW IPC registers") + for i in range(0,8): + reg_addr = 0x5200CCA0 + i * 4 + ret_code, reg_val = self.rd_reg_ex(reg_addr, 0x0) + print("{} Lane {} = {} (cmd_ret: {})".format( "TORB" if i> 3 else "TORA", i, hex(reg_val), ret_code)) + + mode = self.cable_get_mode() + print("pcs receive irq status registers") + print("lanes 0 to 3") + for i in range(0,3): + reg_addr = 0x52007E80 + i * 4 + ret_code, reg_val = self.rd_reg_ex(reg_addr, 0x0) + if ret_code == -1: + print("ERROR: rd_reg_ex {} Failed!".format(hex(reg_addr))) + + ret_code = self.wr_reg_ex(reg_addr, reg_val, 0x0) + if ret_code is False: + print("ERROR: wr_reg_ex to {} Failed!".format(hex(reg_addr))) + if(i==0): + print("{} {} = {}".format( "DESK_ALIGN_LOSS:", hex(reg_addr), hex(reg_val))) + elif i==1: + print("{} {} = {}".format( "DSKW0:", hex(reg_addr), hex(reg_val))) + elif i==2: + print("{} {} = {}".format( "DSKW1:", hex(reg_addr), hex(reg_val))) + + for i in range(0,4): + reg_addr = 0x52007E8C + i * 4 + ret_code, reg_val = self.rd_reg_ex(reg_addr, 0x0) + if ret_code == -1: + print("ERROR: rd_reg_ex {} Failed!".format(hex(reg_addr))) + reg_val = reg_val & 0x7FFF#dont clear bit 15 + ret_code = self.wr_reg_ex(reg_addr, reg_val, 0x0) + if ret_code is False: + print("ERROR: wr_reg_ex to {} Failed!".format(hex(reg_addr))) + print("Lane {} {} = {}".format( i, hex(reg_addr), hex(reg_val))) + if(mode == 0 or mode == 2):#for fec modes + print("FEC irq status") + for i in range(0,4): + reg_addr = 0x52007ED0 + i * 4 + ret_code, reg_val = self.rd_reg_ex(reg_addr, 0x0) + if ret_code == -1: + print("ERROR: rd_reg_ex {} Failed!".format(hex(reg_addr))) + ret_code = self.wr_reg_ex(reg_addr, reg_val, 0x0) + if ret_code is False: + print("ERROR: wr_reg_ex to {} Failed!".format(hex(reg_addr))) + if(i==0): + print("{} {} = {}".format( "DEC_AM_LOCK_UNLOCK:", hex(reg_addr), hex(reg_val))) + elif i==1: + print("{} {} = {}".format( "DEC_DGBOX:", hex(reg_addr), hex(reg_val))) + elif i==2: + print("{} {} = {}".format( "DEC_IGBOX:", hex(reg_addr), hex(reg_val))) + elif i==3: + print("{} {} = {}".format( "XDEC_ERR:", hex(reg_addr), hex(reg_val))) + for i in range(0,2): + reg_addr = 0x52007E60 + i * 4 + ret_code, reg_val = self.rd_reg_ex(reg_addr, 0x0) + if ret_code == -1: + print("ERROR: rd_reg_ex {} Failed!".format(hex(reg_addr))) + ret_code = self.wr_reg_ex(reg_addr, reg_val, 0x0) + if ret_code is False: + print("ERROR: wr_reg_ex to {} Failed!".format(hex(reg_addr))) + if(i==0): + print("{} {} = {}".format( "ENC_GBOX:", hex(reg_addr), hex(reg_val))) + elif i==1: + print("{} {} = {}".format( "ENC_PFIFO:", hex(reg_addr), hex(reg_val))) + print("lanes 4 to 7") + for i in range(0,3): + reg_addr = 0x52017E80 + i * 4 + ret_code, reg_val = self.rd_reg_ex(reg_addr, 0x0) + if ret_code == -1: + print("ERROR: rd_reg_ex {} Failed!".format(hex(reg_addr))) + ret_code = self.wr_reg_ex(reg_addr, reg_val, 0x0) + if ret_code is False: + print("ERROR: wr_reg_ex to {} Failed!".format(hex(reg_addr))) + if(i==0): + print("{} {} = {}".format( "DESK_ALIGN_LOSS:", hex(reg_addr), hex(reg_val))) + elif i==1: + print("{} {} = {}".format( "DSKW0:", hex(reg_addr), hex(reg_val))) + elif i==2: + print("{} {} = {}".format( "DSKW1:", hex(reg_addr), hex(reg_val))) + + for i in range(0,4): + reg_addr = 0x52017E8C + i * 4 + ret_code, reg_val = self.rd_reg_ex(reg_addr, 0x0) + if ret_code == -1: + print("ERROR: rd_reg_ex {} Failed!".format(hex(reg_addr))) + reg_val = reg_val & 0x7FFF#dont clear bit 15 + ret_code = self.wr_reg_ex(reg_addr, reg_val, 0x0) + if ret_code is False: + print("ERROR: wr_reg_ex to {} Failed!".format(hex(reg_addr))) + print("Lane {} {} = {}".format( i, hex(reg_addr), hex(reg_val))) + if(mode == 0 or mode == 2):#for fec modes + print("FEC irq status") + for i in range(0,4): + reg_addr = 0x52017ED0 + i * 4 + ret_code, reg_val = self.rd_reg_ex(reg_addr, 0x0) + if ret_code == -1: + print("ERROR: rd_reg_ex {} Failed!".format(hex(reg_addr))) + ret_code = self.wr_reg_ex(reg_addr, reg_val, 0x0) + if ret_code is False: + print("ERROR: wr_reg_ex to {} Failed!".format(hex(reg_addr))) + if(i==0): + print("{} {} = {}".format( "DEC_AM_LOCK_UNLOCK:", hex(reg_addr), hex(reg_val))) + elif i==1: + print("{} {} = {}".format( "DEC_DGBOX:", hex(reg_addr), hex(reg_val))) + elif i==2: + print("{} {} = {}".format( "DEC_IGBOX:", hex(reg_addr), hex(reg_val))) + elif i==3: + print("{} {} = {}".format( "XDEC_ERR:", hex(reg_addr), hex(reg_val))) + print("\n") + + return True + +############################################################################## +# +# Broadcom internal/debug functions +# +############################################################################## + + def rd_reg_ex(self, reg_addr, lane_map): + """ + This API specifically used to read the register values + + Args: + reg_addr: + an hexadecomal,the register address which we intrested to read + lane_map: + register belong to lane_map to be read + + Returns: + an integer, on sucess returns the register values + unsigned integer, register value + + """ + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 5 + cmd_hdr[1] = 4 + cmd_hdr[2] = 0 + cmd_hdr[3] = 0 + cmd_hdr[4] = 0 + + if lane_map == 0 or lane_map is None: + cmd_req_body[0] = 0 + else: + # if lane_mask is for 4..7 lanes, then set port_id to 1. Else, port_id to 0 + if (lane_map & 0xF0): + cmd_req_body[0] = 1 + else: + cmd_req_body[0] = 0 + + cmd_req_body[1] = (reg_addr & 0xFF) + cmd_req_body[2] = ((reg_addr >> 8) & 0xFF) + cmd_req_body[3] = ((reg_addr >>16) & 0xFF) + cmd_req_body[4] = ((reg_addr >>24) & 0xFF) + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_READ_REG, cmd_hdr, cmd_req_body) + if ret_val == 0: + out = struct.unpack('I', cmd_rsp_body)[0] + else: + out = None + + return ret_val, out + + def wr_reg_ex(self, reg_addr, reg_value, lane_map): + """ + This API specifically used to write the register values + + Args: + reg_addr: + an hexadecomal,the register address where we want to write value + reg_value: + an hexadecomal,the register value which we want to write + lane_map: + Write register to be performed to given lane_map block + + Returns: + a Boolean, true if the write register succeeded and false if it did not succeed. + + """ + + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 9 + cmd_hdr[1] = 0 + cmd_hdr[2] = 0 + cmd_hdr[3] = 0 + cmd_hdr[4] = 0 + + if lane_map == 0 or lane_map is None: + cmd_req_body[0] = 0 + else: + # if lane_mask is for 4..7 lanes, then set port_id to 1. Else, port_id to 0 + if (lane_map & 0xF0): + cmd_req_body[0] = 1 + else: + cmd_req_body[0] = 0 + + cmd_req_body[1] = (reg_addr & 0xFF) + cmd_req_body[2] = ((reg_addr >> 8) & 0xFF) + cmd_req_body[3] = ((reg_addr >>16) & 0xFF) + cmd_req_body[4] = ((reg_addr >>24) & 0xFF) + cmd_req_body[5] = (reg_value & 0xFF) + cmd_req_body[6] = ((reg_value >> 8) & 0xFF) + cmd_req_body[7] = ((reg_value >>16) & 0xFF) + cmd_req_body[8] = ((reg_value >>24) & 0xFF) + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_WRITE_REG, cmd_hdr, cmd_req_body) + + if cmd_rsp_body is not None: + self.log(self.LOG_ERROR, "ERROR: response not expected") + + return ret_val + + def util_print_ctx_debug(self): + """ + Utility api to print context debug info + """ + ret_code, cnt_val = self.rd_reg_ex(0x5200CCE0, 0x0) + if ret_code != 0: + return self.EEPROM_ERROR + ret_code, tmp_start_ppm = self.rd_reg_ex(0x5200CCE4, 0x0) + if ret_code != 0: + return self.EEPROM_ERROR + ret_code, tmp_stop_ppm = self.rd_reg_ex(0x5200CCE8, 0x0) + if ret_code != 0: + return self.EEPROM_ERROR + ret_code, tmp_bh_ppm = self.rd_reg_ex(0x5200CCEC, 0x0) + if ret_code != 0: + return self.EEPROM_ERROR + + start_ppm = c_int16(tmp_start_ppm).value + stop_ppm = c_int16(tmp_stop_ppm).value + bh_ppm = c_int16(tmp_bh_ppm).value + #print("cnt_val {} start_ppm {} stop_ppm {} bh_ppm {}".format(cnt_val, start_ppm, stop_ppm, bh_ppm)) + + if (start_ppm & 0x1000): + start_ppm = start_ppm | 0xFFFF0000 + + if (bh_ppm & 0x1000): + bh_ppm = bh_ppm | 0xFFFF0000 + + if (stop_ppm & 0x1000): + stop_ppm = stop_ppm | 0xFFFF0000 + + start_ppm = (start_ppm * 10)/105 + stop_ppm = (stop_ppm * 10)/105 + bh_ppm = (bh_ppm * 10)/105 + + ret_code, switch_time = self.rd_reg_ex(0x5200C7D4, 0x0) + if ret_code != 0: + return self.EEPROM_ERROR + + print("cnt_val = {}".format(cnt_val)) + print("start_ppm = {}".format(start_ppm)) + print("stop_ppm = {}".format(stop_ppm)) + print("bh_ppm = {}".format(bh_ppm)) + print("switch_time = {}".format(switch_time)) + + return ret_code + + def __qsfp_is_valid_page(self, page): + + if ((page == 5 or page == 6 or page == 7 or page == 8 or page == 9 or page == 10 or page == 11 or page == 12) or \ + (page == 0 or page == 1 or page == 2 or page == 4 or page == 3 or page == 0x80 or page == 0x81 or \ + page == 0x82 or page == 0xB1 or page == 0xFF or page == 0xFE or page == 0xFD)): + return True + + return False + + def cable_print_qsfp_page(self, interface, page_no): + """ + This API prints QSFP registers for give interface/side and page number + + Args: + interface: + 0 - TORA + 1 - TORB + 2 - NIC + page_no: + an Integer, indicates the page number + + Returns: + an bool, True on success + False on failure + """ + + if self.__qsfp_is_valid_page(page_no) == False: + self.log(self.LOG_ERROR, "Error: invalid page no {}".format(hex(page_no))) + return False + + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 4 + cmd_hdr[1] = 16 + cmd_hdr[2] = 0 + cmd_hdr[3] = 0 + cmd_hdr[4] = 0 + + cmd_req_body[0] = interface + cmd_req_body[1] = page_no + + if (page_no): + itr = 8 + else: + itr = 16 + + for i in range(0, itr): + if (page_no): + start_off = 0x80 + i*16 + else: + start_off = i*16 + + cmd_req_body[2] = start_off + cmd_req_body[3] = 16 + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_DUMP_PAGE, cmd_hdr, cmd_req_body) + + if ret_val == 0: + print('0x{0:0{1}x}'.format((i*16), 2), end = " ") + for j in range(0, 16): + print('0x{0:0{1}x}'.format(cmd_rsp_body[j], 2), end = " ") + print("\n") + + else: + self.log(self.LOG_ERROR, "QSFP_DUMP_PAGE failed! interface {} page {}".format(interface, page_no)) + return False + + return True + + + + def cable_set_mode(self, cable_mode): + """ + This API specifically set the cable mode on the port user provides. + + Args: + physical_port: + an Integer, the actual physical port connected to a Y cable + + cable_mode: + an Integer, specifies the cable_mode + CABLE_MODE_100G_FEC -> 0 + CABLE_MODE_100G_PCS -> 1 + CABLE_MODE_50G_FEC -> 2 + CABLE_MODE_50G_PCS -> 3 + Returns: + a boolean, true if the cable mode is set + , false if the cable mode set failed + """ + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 1 + cmd_hdr[1] = 0 + cmd_hdr[2] = 0 + cmd_hdr[3] = 0 + cmd_hdr[4] = 0 + + if cable_mode == 0: + mode = "CABLE_MODE_100G_FEC" + elif cable_mode == 1: + mode = "CABLE_MODE_100G_PCS" + elif cable_mode == 2: + mode = "CABLE_MODE_50G_FEC" + elif cable_mode == 3: + mode = "CABLE_MODE_50G_PCS" + else: + self.log(self.LOG_ERROR, "CABLE MODE input is wrong") + return False + + + cmd_req_body[0] = cable_mode + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_SET_MODE, cmd_hdr, cmd_req_body) + if ret_val == -1: + self.log(self.LOG_ERROR, "set mode read eeprom failed") + return self.EEPROM_ERROR + + if ret_val == 0: + self.log(self.LOG_INFO, "CABLE MODE SET {} SUCCESSFUL".format(mode)) + return True + else: + if cmd_rsp_body is not None: + self.log(self.LOG_ERROR, "ERROR: Responce unexpected") + + self.log(self.LOG_WARN, "CABLE MODE SET {} NOT SUCCESSFUL".format(mode)) + return False + + + + + def cable_get_mode(self): + """ + This API specifically set the cable mode on the port user provides. + + Args: + None + + Returns: + integer , specifies one of the cable_mode (0 - CABLE_MODE_100G_FEC, + 1 - CABLE_MODE_100G_PCS, 2 - CABLE_MODE_50G_FEC, 3 - CABLE_MODE_50G_PCS) + -1 if api fails + """ + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 0 + cmd_hdr[1] = 1 + cmd_hdr[2] = 0 + cmd_hdr[3] = 0 + cmd_hdr[4] = 0 + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_GET_MODE, cmd_hdr, cmd_req_body) + + if self.__validate_read_data(cmd_rsp_body, 1, "get cable mode") == self.EEPROM_READ_DATA_INVALID: + return self.EEPROM_ERROR + + if ret_val == 0: + regval_read = struct.unpack(" local side, + 1 - PRBS_TARGET_TOR1 -> TOR 1 + 2 - PRBS_TARGET_TOR2 -> TOR 2 + 3 - PRBS_TARGET_NIC -> NIC + mode_value: + an Integer, the mode/type for configuring the PRBS mode. + 0x00 = PRBS 9, 0x01 = PRBS 15, 0x02 = PRBS 23, 0x03 = PRBS 31 + lane_map: + an Integer, representing the lane_map to be run PRBS on + 0bit for lane 0, 1bit for lane1 and so on. + for example 3 -> 0b'0011 , means running on lane0 and lane1 + Returns: + a boolean, true if the PRBS lock is successful + , false if the PRBS lock is failed + + lock_status, lock status - each bit represents a lane PRBS lock stats + + list, error count list contains error count for each lane + """ + + core_ip, lane_mask = self.__util_convert_to_phyinfo_details(target, lane_mask) + if mode_value == 0: + prbs_type = self.CABLE_PRBS7 + elif mode_value == 1: + prbs_type = self.CABLE_PRBS9 + elif mode_value == 2: + prbs_type = self.CABLE_PRBS11 + elif mode_value == 3: + prbs_type = self.CABLE_PRBS15 + elif mode_value == 4: + prbs_type = self.CABLE_PRBS23 + elif mode_value == 5: + prbs_type = self.CABLE_PRBS31 + elif mode_value == 6: + prbs_type = self.CABLE_PRBS58 + elif mode_value == 7: + prbs_type = self.CABLE_PRBS49 + elif mode_value == 8: + prbs_type = self.CABLE_PRBS13 + elif mode_value == 0xff: + prbs_type = 0xff + else: + self.log(self.LOG_ERROR, "Error: for checking mux_cable check PRBS mode, the mode_value is wrong") + return self.ERROR_INVALID_PRBS_MODE + + self.log(self.LOG_DEBUG, "Check PRBS for core_ip {} lane_mask {} prbs_type {}".format(core_ip, hex(lane_mask), prbs_type)) + + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 0x2 + cmd_hdr[1] = 0x21 + cmd_hdr[2] = lane_mask if (core_ip == self.CORE_IP_CLIENT) else 0 + cmd_hdr[3] = lane_mask if (core_ip == self.CORE_IP_LW) else 0 + cmd_hdr[4] = core_ip + + cmd_req_body[0] = 1 #enable + cmd_req_body[1] = prbs_type + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_PRBS_CHK, cmd_hdr, cmd_req_body) + + if self.__validate_read_data(cmd_rsp_body, 0x21, "PRBS Check") == self.EEPROM_READ_DATA_INVALID: + return self.EEPROM_ERROR, None, None + + lock_sts = int(cmd_rsp_body[0]) + self.log(self.LOG_DEBUG, "ret_val {} lock_status {}".format(ret_val, hex(lock_sts))) + err_cnt_list = [] + + if ret_val == 0: + if cmd_rsp_body is not None: + for i in range(0, 8): + if lane_mask & (1 << i): + if lock_sts & (1 << i): + self.log(self.LOG_DEBUG, "Lane {} PRBS_LOCKED".format(i)) + else: + self.log(self.LOG_DEBUG, "Lane {} PRBS_NOT_LOCKED".format(i)) + + err_cnt = struct.unpack_from('I', cmd_rsp_body, 1 + i*4)[0] + err_cnt_list.append(err_cnt) + self.log(self.LOG_DEBUG, "Error count[{}] : {} ".format(i, hex(err_cnt_list[i]))) + else: + self.log(self.LOG_WARN, "The check PRBS returned none") + return self.EEPROM_ERROR, None, None + else: + self.log(self.LOG_ERROR, "Check PRBS mode is failed") + return self.EEPROM_ERROR, None, None + + return ret_val, lock_sts, err_cnt_list + + def cable_get_intr_status(self): + """ + This API spcifically gets the Lane interupt status,Chip interupt status + and Port interupt status on torA, torB and NIC. + + bits Name Description + 3-0 TORA torA cdr loss of lock + 7-4 NIC nic cdr loss of lock + + 3-0 TORB torB cdr loss of lock + 7-4 TORB torB loss + + 3-0 TORA torA loss + 7-4 NIC nic loss + + 0-2 phy watchdog,fw ser,fw ded status respectively + 7-3 reserved + + 0 active tor to nic fault + 1 nic to torA link fault + 2 nic to torB link fault + 7-3 reserved + + 0 torA to nic pcs fec_link + 1 torB to nic pcs fec link + 7-2 reserved + + Args: + + physical_port: + an Integer, the actual physical port connected to a Y cable + Returns: + a bytearray, with Nic and torA loss of lock intr status,torB cdr loss of lock + and torB loss intr status,torA loss and Nic loss intr status, + CHIP interupt status ,PORT interupt status1, + PORT interupt status2. + + """ + + intr_status = bytearray(6) + curr_offset = [self.QSFP28_LP_5_TX_RX_CDR_LOL, + self.QSFP28_LOS_LOL_SEC, + self.QSFP28_LP_3_TX_RX_LOSS, + self.QSFP28_MESC_FAULT, + self.QSFP28_LINK_FAULT, + self.QSFP28_LINK_DOWN] + + for ind in range(0, len(curr_offset)): + result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset[ind], 1) + if result is None: + self.log(self.LOG_ERROR,"intr_status read_eeprom failed") + return self.EEPROM_ERROR + else: + self.log(self.LOG_DEBUG, "intr_result[{}] value : {}".format(ind, hex(result[0]))) + + if curr_offset[ind] == self.QSFP28_LP_5_TX_RX_CDR_LOL: + self.log(self.LOG_DEBUG, "LANE Interupt status") + status = struct.unpack(" is documented below + + Byte offset bits Name Description + < > 0 squelch 0x01 enable squelch + 0x00 enable un-squelch + 0 direction 0x02 direction Egress(Tx) + 0x00 direction Ingress(Rx) + Args: + physical_port: + an Integer, the actual physical port connected to a Y cable + target: + an Integer, the actual target to get the cursor values on + TARGET_NIC -> NIC, + TARGET_TOR1-> TOR1, + TARGET_TOR2 -> TOR2 + lane_map: + an Integer, representing the lane_map to be set squelch on + 0bit for lane 0, 1bit for lane1 and so on. + for example 3 -> 0b'0011 , means running on lane0 and lane1 + enable: + an Integer,specifies SQUELCH or UNSQUELCH + SQUELCh -> 1 + UNSQUELCH -> 0 + direction: + an Integer, specifies INGRESS, EGRESS or BOTH + INGRESS -> 0 + EGRESS -> 1 + Returns: + a Boolean, True on sucess + False on api fail + """ + + + ret_val = self.__util_convert_to_phyinfo_details(target, lane_map) + core_ip = ret_val[0] + lane_mask = ret_val[1] + + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 4 + cmd_hdr[1] = 0 + cmd_hdr[2] = lane_mask if (core_ip == self.CORE_IP_CLIENT) else 0 + cmd_hdr[3] = lane_mask if (core_ip == self.CORE_IP_LW) else 0 + cmd_hdr[4] = core_ip + + cmd_req_body[0] = enable + cmd_req_body[1] = direction + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_SET_SQUELCH, cmd_hdr, cmd_req_body) + if ret_val == 0 and cmd_rsp_body is None: + return True + else: + return False + + def cable_get_squelch(self): + + """ + This API specifically returns the Rx squelch and Tx squelch status on TOR and NIC + + Args: + physical_port: + an Integer, the actual physical port connected to a Y cable + Returns: + an integer, 0 and cmd_rsp_body contains squelch status on success + -1 on api fail + """ + + if self.platform_chassis is not None: + curr_offset = self.QSFP_SQL_STATUS + result = self.platform_chassis.get_sfp(self.port).read_eeprom(curr_offset, 1) + if result is None: + self.log(self.LOG_ERROR,"get_squelch read eeprom failed") + return self.EEPROM_ERROR + else: + self.log(self.LOG_ERROR, "platform_chassis is not loaded") + return self.ERROR_PLATFORM_NOT_LOADED + + + cmd_hdr = bytearray(5) + cmd_req_body = bytearray(self.MAX_REQ_PARAM_LEN) + + cmd_hdr[0] = 0 + cmd_hdr[1] = 4 + cmd_hdr[2] = 0 + cmd_hdr[3] = 0 + cmd_hdr[4] = self.CORE_IP_ALL + + ret_val, cmd_rsp_body = self.__cable_cmd_execute(self.CABLE_CMD_ID_GET_SQUELCH, cmd_hdr, cmd_req_body) + + if ret_val == 0: + lane = 0 + for _ in range(0, 8): + if cmd_rsp_body[0] & (1<