-
Notifications
You must be signed in to change notification settings - Fork 355
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Hpe 3par adds controller, disk, and port interfaces #579
Changes from 7 commits
2909318
928cf0e
7a34410
b51667c
39ed0f6
065bdcd
7d04321
6d1bcd8
c113654
1991353
6c2bb2f
ec56749
1a5f19a
0427869
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -11,12 +11,15 @@ | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
import re | ||
|
||
import six | ||
from oslo_log import log | ||
from oslo_utils import units | ||
|
||
from delfin import exception | ||
from delfin.common import constants | ||
from delfin.drivers.hpe.hpe_3par import consts | ||
|
||
LOG = log.getLogger(__name__) | ||
|
||
|
@@ -211,3 +214,198 @@ def list_volumes(self, context): | |
(six.text_type(e)) | ||
LOG.error(err_msg) | ||
raise exception.InvalidResults(err_msg) | ||
|
||
def list_controllers(self, storage_id): | ||
controllers = self.ssh_handler.get_controllers() | ||
controller_list = [] | ||
if controllers: | ||
node_cpu_map = self.ssh_handler.get_controllers_cpu() | ||
node_version_map = self.ssh_handler.get_controllers_version() | ||
for controller in controllers: | ||
node_id = controller.get('node_id') | ||
memory_size = int(controller.get('node_control_mem', | ||
'0')) * units.Mi + int( | ||
controller.get('node_data_mem', '0')) * units.Mi | ||
cpu_info = '' | ||
if node_cpu_map: | ||
cpu_info_map = node_cpu_map.get(node_id) | ||
cpu_info_keys = list(cpu_info_map.keys()) | ||
for cpu_key in cpu_info_keys: | ||
if cpu_info: | ||
cpu_info = '%s%s' % (cpu_info, ',') | ||
cpu_info = '%s%s * %s MHz' % ( | ||
cpu_info, cpu_info_map.get(cpu_key), cpu_key) | ||
soft_version = None | ||
if node_version_map: | ||
soft_version = node_version_map.get(node_id, '') | ||
controller_model = { | ||
'name': controller.get('node_name'), | ||
'storage_id': storage_id, | ||
'native_controller_id': node_id, | ||
'status': consts.CONTROLLER_STATUS_MAP.get( | ||
controller.get('node_state', '').upper(), | ||
constants.ControllerStatus.OFFLINE), | ||
'location': None, | ||
'soft_version': soft_version, | ||
'cpu_info': cpu_info, | ||
'memory_size': str(memory_size) | ||
} | ||
controller_list.append(controller_model) | ||
return controller_list | ||
|
||
def list_disks(self, storage_id): | ||
disks = self.ssh_handler.get_disks() | ||
disk_list = [] | ||
if disks: | ||
disks_inventory_map = self.ssh_handler.get_disks_inventory() | ||
for disk in disks: | ||
disk_id = disk.get('id') | ||
status = consts.DISK_STATUS_MAP.get( | ||
disk.get('state', '').upper(), | ||
constants.DiskStatus.ABNORMAL) | ||
capacity = int(float(disk.get("total", 0)) * units.Mi) | ||
serial_number = None | ||
manufacturer = None | ||
model = None | ||
firmware = None | ||
if disks_inventory_map: | ||
inventory_map = disks_inventory_map.get(disk_id) | ||
if inventory_map: | ||
serial_number = inventory_map.get('disk_serial') | ||
manufacturer = inventory_map.get('disk_mfr') | ||
model = inventory_map.get('disk_model') | ||
firmware = inventory_map.get('disk_fw_rev') | ||
speed = None | ||
if disk.get('rpm'): | ||
speed = int(disk.get('rpm')) * units.k | ||
disk_model = { | ||
'name': disk.get('cagepos'), | ||
'storage_id': storage_id, | ||
'native_disk_id': disk_id, | ||
'serial_number': serial_number, | ||
'manufacturer': manufacturer, | ||
'model': model, | ||
'firmware': firmware, | ||
'speed': speed, | ||
'capacity': capacity, | ||
'status': status, | ||
'physical_type': consts.DISK_PHYSICAL_TYPE_MAP.get( | ||
disk.get('type').upper(), | ||
constants.DiskPhysicalType.UNKNOWN), | ||
'logical_type': None, | ||
'health_score': None, | ||
'native_disk_group_id': None, | ||
'location': disk.get('cagepos') | ||
} | ||
disk_list.append(disk_model) | ||
return disk_list | ||
|
||
def list_ports(self, storage_id): | ||
ports = self.ssh_handler.get_ports() | ||
port_list = [] | ||
if ports: | ||
ports_inventory_map = self.ssh_handler.get_ports_inventory() | ||
ports_config_map = self.ssh_handler.get_ports_config() | ||
ports_iscsi_map = self.ssh_handler.get_ports_iscsi() | ||
ports_rcip_map = self.ssh_handler.get_ports_rcip() | ||
ports_connected_map = self.ssh_handler.get_ports_connected() | ||
ports_fcoe_map = self.ssh_handler.get_ports_fcoe() | ||
port_fs_map = self.ssh_handler.get_ports_fs() | ||
for port in ports: | ||
port_id = port.get('n:s:p') | ||
port_type = '' | ||
if ports_inventory_map: | ||
port_type = ports_inventory_map.get(port_id, '') | ||
max_speed = '' | ||
if ports_config_map: | ||
max_speed = ports_config_map.get(port_id, '') | ||
ip_addr = None | ||
ip_mask = None | ||
ipv4 = None | ||
ipv4_mask = None | ||
ipv6 = None | ||
ipv6_mask = None | ||
rate = '' | ||
if ports_connected_map: | ||
rate = ports_connected_map.get(port_id, '') | ||
if not ip_addr and ports_iscsi_map: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. "not ip_addr" can be placed outside as it is neede in multiple cases There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Each map extracts different values, and when a value is found in one of the maps, there is no need to extract values from other maps. If you add NOT IP_ADDR to the outermost layer, it will look up each map once. |
||
iscsi_map = ports_iscsi_map.get(port_id) | ||
if iscsi_map: | ||
ip_addr = iscsi_map.get('ipaddr') | ||
ip_mask = iscsi_map.get('netmask/prefixlen') | ||
rate = iscsi_map.get('rate') | ||
if not ip_addr and ports_rcip_map: | ||
rcip_map = ports_rcip_map.get(port_id) | ||
if rcip_map: | ||
ip_addr = rcip_map.get('ipaddr') | ||
ip_mask = rcip_map.get('netmask') | ||
rate = rcip_map.get('rate') | ||
if not ip_addr and port_fs_map: | ||
fs_map = port_fs_map.get(port_id) | ||
if fs_map: | ||
ip_addr = fs_map.get('ipaddr') | ||
ip_mask = fs_map.get('netmask') | ||
rate = fs_map.get('rate') | ||
if not rate and ports_fcoe_map: | ||
fcoe_map = ports_fcoe_map.get(port_id) | ||
if fcoe_map: | ||
rate = fcoe_map.get('rate') | ||
if ip_addr and ip_addr != '-': | ||
pattern = re.compile(consts.IPV4_PATTERN) | ||
search_obj = pattern.search(ip_addr) | ||
if search_obj: | ||
ipv4 = ip_addr | ||
ipv4_mask = ip_mask | ||
else: | ||
ipv6 = ip_addr | ||
ipv6_mask = ip_mask | ||
wwn = None | ||
mac = None | ||
if port_type.upper() == 'ETH': | ||
mac = port.get('port_wwn/hw_addr') | ||
else: | ||
wwn = port.get('port_wwn/hw_addr') | ||
port_model = { | ||
'name': port_id, | ||
'storage_id': storage_id, | ||
'native_port_id': port_id, | ||
'location': port_id, | ||
'connection_status': | ||
consts.PORT_CONNECTION_STATUS_MAP.get( | ||
port.get('state', '').upper(), | ||
constants.PortConnectionStatus.UNKNOWN), | ||
'health_status': constants.PortHealthStatus.NORMAL, | ||
'type': consts.PORT_TYPE_MAP.get(port_type.upper(), | ||
constants.PortType.OTHER), | ||
'logical_type': None, | ||
'speed': self.parse_speed(rate), | ||
'max_speed': self.parse_speed(max_speed), | ||
'native_parent_id': None, | ||
'wwn': wwn, | ||
'mac_address': mac, | ||
'ipv4': ipv4, | ||
'ipv4_mask': ipv4_mask, | ||
'ipv6': ipv6, | ||
'ipv6_mask': ipv6_mask, | ||
} | ||
port_list.append(port_model) | ||
return port_list | ||
|
||
def parse_speed(self, speed_value): | ||
speed = 0 | ||
try: | ||
if speed_value == '' or speed_value == 'n/a': | ||
return None | ||
speeds = re.findall("\\d+", speed_value) | ||
if speeds: | ||
speed = int(speeds[0]) | ||
if 'Gbps' in speed_value: | ||
speed = speed * units.G | ||
elif 'Mbps' in speed_value: | ||
speed = speed * units.M | ||
elif 'Kbps' in speed_value: | ||
speed = speed * units.k | ||
except Exception as err: | ||
err_msg = "analyse speed error: %s" % (six.text_type(err)) | ||
LOG.error(err_msg) | ||
return speed |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -15,6 +15,8 @@ | |
# under the License. | ||
|
||
# CPG's status | ||
from delfin.common import constants | ||
|
||
STATUS_POOL_NORMAL = 1 # CPG STATUS Normal operation | ||
STATUS_POOL_DEGRADED = 2 # CPG STATUS Degraded state | ||
STATUS_POOL_FAILED = 3 # CPG STATUS Abnormal operation | ||
|
@@ -624,3 +626,77 @@ | |
'0x09f0002': 'File Persona CPG grow limit warning', | ||
'0x0a50001': 'File Access Auditing Alerts' | ||
} | ||
NODE_PATTERN = "^\\s*Node\\s+[-]*Name[-]*\\s+[-]*State[-]*\\s+Master\\s+" | ||
CPU_PATTERN = "^\\s*Node\\s+CPU\\s+[-]*Manufacturer[-]*\\s+[-]*Serial[-]*" \ | ||
"\\s+CPUSpeed" | ||
DISK_PATTERN = "^\\s*Id\\s+[-]*CagePos[-]*\\s+[-]*Type[-]*\\s+RPM\\s+State\\s+" | ||
DISK_I_PATTERN = "^\\s*Id\\s+[-]*CagePos[-]*\\s+[-]*State[-]*\\s+" \ | ||
"[-]*Node_WWN[-]*\\s+[-]*MFR[-]*\\s+[-]*Model[-]*\\s+" \ | ||
"[-]*Serial[-]*\\s+[-]*FW_Rev[-]*" | ||
PORT_PATTERN = "^\\s*N:S:P\\s+[-]*Mode[-]*\\s+[-]*State[-]*\\s+[-]*" \ | ||
"Node_WWN[-]*\\s+[-]*Port_WWN/HW_Addr[-]*\\s+" | ||
PORT_I_PATTERN = "^\\s*N:S:P\\s+Brand\\s+Model\\s+Rev\\s+Firmware\\s+" \ | ||
"Serial\\s+HWType" | ||
PORT_PER_PATTERN = "^\\s*N:S:P\\s+Connmode\\s+ConnType\\s+CfgRate\\s+MaxRate" | ||
PORT_C_PATTERN = "^\\s*N:S:P\\s+Mode\\s+Device\\s+Pos\\s+Config\\s+" \ | ||
"Topology\\s+Rate" | ||
PORT_ISCSI_PATTERN = "^\\s*N:S:P\\s+State\\s+IPAddr\\s+Netmask/PrefixLen\\s+" \ | ||
"Gateway" | ||
PORT_RCIP_PATTERN = "^\\s*N:S:P\\s+State\\s+[-]*HwAddr[-]*\\s+IPAddr\\s+" \ | ||
"Netmask\\s+Gateway\\s+MTU\\s+Rate" | ||
PORT_FCOE_PATTERN = "^\\s*N:S:P\\s+State\\s+" | ||
PORT_FS_PATTERN = "^\\s*N:S:P\\s+State\\s+" | ||
FPG_PATTERN = "^\\s*FPG\\s+[-]*Mountpath[-]*\\s+[-]*Size[-]*\\s+[-]*" \ | ||
"Available[-]*\\s+[-]*ActiveStates" | ||
CPG_PATTERN = "^\\s*Id\\s+[-]*Name[-]*\\s+Warn" | ||
VOLUME_PATTERN = "^\\s*Id\\s+Name\\s+Prov\\s+Compr\\s+Dedup" | ||
FSTORE_PATTERN = "^\\s*Fstore\\s+VFS\\s+FPG\\s+State\\s+Mode" | ||
FSHARE_PATTERN = "^\\s*ShareName\\s+Protocol\\s+VFS\\s+FileStore\\s+" \ | ||
"ShareDir\\s+State" | ||
VFS_PATTERN = "^\\s*VFS\\s+FPG\\s+IPAddr\\s+State" | ||
IPV4_PATTERN = "^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$" | ||
CONTROLLER_STATUS_MAP = { | ||
'OK': constants.ControllerStatus.NORMAL, | ||
'NORMAL': constants.ControllerStatus.NORMAL, | ||
'DEGRADED': constants.ControllerStatus.OFFLINE, | ||
'FAILED': constants.ControllerStatus.OFFLINE | ||
} | ||
DISK_PHYSICAL_TYPE_MAP = { | ||
'FC': constants.DiskPhysicalType.FC, | ||
'SSD': constants.DiskPhysicalType.SSD, | ||
'NL': constants.DiskPhysicalType.UNKNOWN | ||
} | ||
DISK_STATUS_MAP = { | ||
'NORMAL': constants.DiskStatus.NORMAL, | ||
'DEGRADED': constants.DiskStatus.ABNORMAL, | ||
'FAILED': constants.DiskStatus.ABNORMAL, | ||
'NEW': constants.DiskStatus.ABNORMAL | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. query: Should it not be NORMAL? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Disk state does not have 'query' property, only contains' NORMAL, DEGRADED, FAILED, NEW ' There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I meant It was query or doubt :), sorry, not written clearly There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 'NORMAL': constants.DiskStatus.NORMAL,Others are classified as abnormal |
||
} | ||
PORT_CONNECTION_STATUS_MAP = { | ||
'CONFIG_WAIT': constants.PortConnectionStatus.DISCONNECTED, | ||
'ALPA_WAIT': constants.PortConnectionStatus.DISCONNECTED, | ||
'LOGIN_WAIT': constants.PortConnectionStatus.DISCONNECTED, | ||
'READY': constants.PortConnectionStatus.CONNECTED, | ||
'LOSS_SYNC': constants.PortConnectionStatus.DISCONNECTED, | ||
'ERROR_STATE': constants.PortConnectionStatus.DISCONNECTED, | ||
'XXX': constants.PortConnectionStatus.DISCONNECTED, | ||
'NONPARTICIPATE': constants.PortConnectionStatus.DISCONNECTED, | ||
'COREDUMP': constants.PortConnectionStatus.DISCONNECTED, | ||
'OFFLINE': constants.PortConnectionStatus.DISCONNECTED, | ||
'FWDEAD': constants.PortConnectionStatus.DISCONNECTED, | ||
'IDLE_FOR_RESET': constants.PortConnectionStatus.DISCONNECTED, | ||
'DHCP_IN_PROGRESS': constants.PortConnectionStatus.DISCONNECTED, | ||
'PENDING_RESET': constants.PortConnectionStatus.DISCONNECTED | ||
} | ||
PORT_TYPE_MAP = { | ||
'FC': constants.PortType.FC, | ||
'ISCSI': constants.PortType.ISCSI, | ||
'ETH': constants.PortType.ETH, | ||
'CNA': constants.PortType.CNA, | ||
'SAS': constants.PortType.SAS, | ||
'COMBO': constants.PortType.COMBO, | ||
'NVMe': constants.PortType.OTHER, | ||
'UNKNOWN': constants.PortType.OTHER, | ||
'RCIP': constants.PortType.RCIP, | ||
'RCFC': constants.PortType.OTHER | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can we set coverage
threshold
drop to0%
?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The current coverage rate is 71.25%, which is 70% according to the regulations. However, after modifying the code, the coverage rate of compilation check is reduced by 0.01%, so the coverage rate cannot pass. This problem can be avoided by configuring this parameter
Threshold: 5% means that when the coverage is greater than 70 and the change is less than 5%, it can be passed