diff --git a/delfin/api/v1/port_groups.py b/delfin/api/v1/port_groups.py index 052e6580f..da8119bf8 100644 --- a/delfin/api/v1/port_groups.py +++ b/delfin/api/v1/port_groups.py @@ -42,6 +42,22 @@ def show(self, req, id): port_groups = db.port_groups_get_all( ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) + + # Get Port Group to Port relation from DB + for port_group in port_groups: + params = { + "native_port_group_id": + port_group['native_port_group_id'] + } + ports = db.port_grp_port_rels_get_all( + ctxt, filters=params) + + native_port_id_list = [] + for port in ports: + native_port_id_list.append(port['native_port_id']) + + port_group['ports'] = native_port_id_list + return port_group_view.build_port_groups(port_groups) diff --git a/delfin/api/v1/storage_host_groups.py b/delfin/api/v1/storage_host_groups.py index a15669022..65171a6ae 100644 --- a/delfin/api/v1/storage_host_groups.py +++ b/delfin/api/v1/storage_host_groups.py @@ -42,6 +42,23 @@ def show(self, req, id): storage_host_groups = db.storage_host_groups_get_all( ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) + + # Get Storage Host Group to Host relation from DB + for host_group in storage_host_groups: + params = { + "native_storage_host_group_id": + host_group['native_storage_host_group_id'] + } + hosts = db.storage_host_grp_host_rels_get_all( + ctxt, filters=params) + + native_storage_host_id_list = [] + for host in hosts: + native_storage_host_id_list.append( + host['native_storage_host_id']) + + host_group['storage_hosts'] = native_storage_host_id_list + return storage_host_group_view\ .build_storage_host_groups(storage_host_groups) diff --git a/delfin/api/v1/volume_groups.py b/delfin/api/v1/volume_groups.py index c6a6cf35c..d97f328dd 100644 --- a/delfin/api/v1/volume_groups.py +++ b/delfin/api/v1/volume_groups.py @@ -42,6 +42,22 @@ def show(self, req, id): volume_groups = db.volume_groups_get_all( ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) + + # Get Volume Group to Volume relation from DB + for volume_group in volume_groups: + params = { + "native_volume_group_id": + volume_group['native_volume_group_id'] + } + volumes = db.vol_grp_vol_rels_get_all( + ctxt, filters=params) + + native_volume_id_list = [] + for volume in volumes: + native_volume_id_list.append(volume['native_volume_id']) + + volume_group['volumes'] = native_volume_id_list + return volume_group_view.build_volume_groups(volume_groups) diff --git a/delfin/db/sqlalchemy/api.py b/delfin/db/sqlalchemy/api.py index 755fa65c0..224b1adb3 100644 --- a/delfin/db/sqlalchemy/api.py +++ b/delfin/db/sqlalchemy/api.py @@ -2822,7 +2822,7 @@ def vol_grp_vol_rels_get_all(context, marker=None, limit=None, with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models. - VolGrpVolRelation, + VolGrpVolRel, marker, limit, sort_keys, sort_dirs, filters, offset) # No volume grp volume relation would match, return empty list diff --git a/delfin/drivers/fake_storage/__init__.py b/delfin/drivers/fake_storage/__init__.py index 1a8fe60b0..7bbef40d9 100644 --- a/delfin/drivers/fake_storage/__init__.py +++ b/delfin/drivers/fake_storage/__init__.py @@ -76,12 +76,13 @@ # Min and max are currently set to 1 to make sure at least one relation can be # built in fake driver for host mapping elements -MIN_STORAGE_HOST_INITIATORS, MAX_STORAGE_HOST_INITIATORS = 1, 1 -MIN_STORAGE_HOSTS, MAX_STORAGE_HOSTS = 1, 1 -MIN_STORAGE_HOST_GROUPS, MAX_STORAGE_HOST_GROUPS = 1, 1 -MIN_VOLUME_GROUPS, MAX_VOLUME_GROUPS = 1, 1 -MIN_PORT_GROUPS, MAX_PORT_GROUPS = 1, 1 -MIN_MASKING_VIEWS, MAX_MASKING_VIEWS = 1, 1 +MIN_STORAGE_HOST_INITIATORS, MAX_STORAGE_HOST_INITIATORS = 1, 3 +MIN_STORAGE_HOSTS, MAX_STORAGE_HOSTS = 1, 5 +MIN_STORAGE_HOST_GROUPS, MAX_STORAGE_HOST_GROUPS = 1, 5 +MIN_VOLUME_GROUPS, MAX_VOLUME_GROUPS = 1, 5 +MIN_PORT_GROUPS, MAX_PORT_GROUPS = 1, 5 +MAX_GROUP_RESOURCES_SIZE = 5 +MIN_MASKING_VIEWS, MAX_MASKING_VIEWS = 1, 5 def get_range_val(range_str, t): @@ -124,6 +125,10 @@ def __init__(self, **kwargs): MIN_VOLUME, MAX_VOLUME = get_range_val( CONF.fake_driver.fake_volume_range, int) PAGE_LIMIT = int(CONF.fake_driver.fake_page_query_limit) + self.rd_volumes_count = random.randint(MIN_VOLUME, MAX_VOLUME) + self.rd_ports_count = random.randint(MIN_PORTS, MAX_PORTS) + self.rd_storage_hosts_count = random.randint(MIN_STORAGE_HOSTS, + MAX_STORAGE_HOSTS) def _get_random_capacity(self): total = random.randint(1000, 2000) @@ -190,7 +195,7 @@ def list_storage_pools(self, ctx): def list_volumes(self, ctx): # Get a random number as the volume count. - rd_volumes_count = random.randint(MIN_VOLUME, MAX_VOLUME) + rd_volumes_count = self.rd_volumes_count LOG.info("###########fake_volumes number for %s: %d" % ( self.storage_id, rd_volumes_count)) loops = math.ceil(rd_volumes_count / PAGE_LIMIT) @@ -228,7 +233,7 @@ def list_controllers(self, ctx): return ctrl_list def list_ports(self, ctx): - rd_ports_count = random.randint(MIN_PORTS, MAX_PORTS) + rd_ports_count = self.rd_ports_count LOG.info("###########fake_ports for %s: %d" % (self.storage_id, rd_ports_count)) port_list = [] @@ -871,8 +876,7 @@ def list_storage_host_initiators(self, ctx): return storage_host_initiators_list def list_storage_hosts(self, ctx): - rd_storage_hosts_count = random.randint(MIN_STORAGE_HOSTS, - MAX_STORAGE_HOSTS) + rd_storage_hosts_count = self.rd_storage_hosts_count LOG.info("###########fake_storage_hosts for %s: %d" % (self.storage_id, rd_storage_hosts_count)) storage_host_list = [] @@ -890,33 +894,72 @@ def list_storage_hosts(self, ctx): return storage_host_list def list_storage_host_groups(self, ctx): - rd_storage_host_groups_count = random.randint(MIN_STORAGE_HOST_GROUPS, - MAX_STORAGE_HOST_GROUPS) + rd_storage_host_groups_count = random.randint( + MIN_STORAGE_HOST_GROUPS, MAX_STORAGE_HOST_GROUPS) LOG.info("###########fake_storage_host_groups for %s: %d" % (self.storage_id, rd_storage_host_groups_count)) storage_host_grp_list = [] for idx in range(rd_storage_host_groups_count): + # Create hosts in hosts group + host_name_list = [] + storage_hosts_count = self.rd_storage_hosts_count - 1 + if storage_hosts_count > 0: + for i in range(MAX_GROUP_RESOURCES_SIZE): + host_name = "storage_host_" + str( + random.randint(0, storage_hosts_count)) + if host_name not in host_name_list: + host_name_list.append(host_name) + + # Create comma separated list + storage_hosts = None + for host in host_name_list: + if storage_hosts: + storage_hosts = storage_hosts + "," + host + else: + storage_hosts = host + f = { "name": "storage_host_group_" + str(idx), "description": "storage_host_group_" + str(idx), "storage_id": self.storage_id, "native_storage_host_group_id": "storage_host_group_" + str(idx), + "storage_hosts": storage_hosts } storage_host_grp_list.append(f) return storage_host_grp_list def list_port_groups(self, ctx): - rd_port_groups_count = random.randint(MIN_PORT_GROUPS, MAX_PORT_GROUPS) + rd_port_groups_count = random.randint(MIN_PORT_GROUPS, + MAX_PORT_GROUPS) LOG.info("###########fake_port_groups for %s: %d" % (self.storage_id, rd_port_groups_count)) port_grp_list = [] for idx in range(rd_port_groups_count): + # Create ports in ports group + port_name_list = [] + ports_count = self.rd_ports_count - 1 + if ports_count > 0: + for i in range(MAX_GROUP_RESOURCES_SIZE): + port_name = "port_" + str( + random.randint(0, ports_count)) + if port_name not in port_name_list: + port_name_list.append(port_name) + + # Create comma separated list + ports = None + for port in port_name_list: + if ports: + ports = ports + "," + port + else: + ports = port + f = { "name": "port_group_" + str(idx), "description": "port_group_" + str(idx), "storage_id": self.storage_id, "native_port_group_id": "port_group_" + str(idx), + "ports": ports } port_grp_list.append(f) @@ -929,11 +972,30 @@ def list_volume_groups(self, ctx): % (self.storage_id, rd_volume_groups_count)) volume_grp_list = [] for idx in range(rd_volume_groups_count): + # Create volumes in volumes group + volume_name_list = [] + volumes_count = self.rd_volumes_count - 1 + if volumes_count > 0: + for i in range(MAX_GROUP_RESOURCES_SIZE): + volume_name = "volume_" + str( + random.randint(0, volumes_count)) + if volume_name not in volume_name_list: + volume_name_list.append(volume_name) + + # Create comma separated list + volumes = None + for volume in volume_name_list: + if volumes: + volumes = volumes + "," + volume + else: + volumes = volume + f = { "name": "volume_group_" + str(idx), "description": "volume_group_" + str(idx), "storage_id": self.storage_id, "native_volume_group_id": "volume_group_" + str(idx), + "volumes": volumes } volume_grp_list.append(f) return volume_grp_list diff --git a/delfin/task_manager/tasks/resources.py b/delfin/task_manager/tasks/resources.py index 1da8961da..5803fcefe 100644 --- a/delfin/task_manager/tasks/resources.py +++ b/delfin/task_manager/tasks/resources.py @@ -19,6 +19,9 @@ from delfin import coordination from delfin import db +from delfin.db.sqlalchemy.models import StorageHostGrpHostRel +from delfin.db.sqlalchemy.models import VolGrpVolRel +from delfin.db.sqlalchemy.models import PortGrpPortRel from delfin import exception from delfin.common import constants from delfin.drivers import api as driverapi @@ -80,6 +83,77 @@ def _check_deleted(func, *args, **kwargs): return _check_deleted +def _build_storage_host_group_relations(ctx, storage_id, + storage_host_groups): + """ Builds storage host group to host relations.""" + db.storage_host_grp_host_rels_delete_by_storage(ctx, + storage_id) + storage_host_grp_relation_list = [] + for storage_host_group in storage_host_groups: + storage_hosts = storage_host_group.pop('storage_hosts', None) + if not storage_hosts: + continue + storage_hosts = storage_hosts.split(',') + + for storage_host in storage_hosts: + storage_host_group_relation = { + StorageHostGrpHostRel.storage_id.name: storage_id, + StorageHostGrpHostRel.native_storage_host_group_id.name: + storage_host_group['native_storage_host_group_id'], + StorageHostGrpHostRel.native_storage_host_id.name: + storage_host + } + storage_host_grp_relation_list \ + .append(storage_host_group_relation) + + db.storage_host_grp_host_rels_create( + ctx, storage_host_grp_relation_list) + + +def _build_volume_group_relations(ctx, storage_id, volume_groups): + """ Builds volume group to volume relations.""" + db.vol_grp_vol_rels_delete_by_storage(ctx, storage_id) + volume_group_relation_list = [] + for volume_group in volume_groups: + volumes = volume_group.pop('volumes', None) + if not volumes: + continue + volumes = volumes.split(',') + + for volume in volumes: + volume_group_relation = { + VolGrpVolRel.storage_id.name: storage_id, + VolGrpVolRel.native_volume_group_id.name: + volume_group['native_volume_group_id'], + VolGrpVolRel.native_volume_id.name: volume} + volume_group_relation_list.append(volume_group_relation) + + db.vol_grp_vol_rels_create(ctx, volume_group_relation_list) + + +def _build_port_group_relations(ctx, storage_id, port_groups): + """ Builds resource group to resource relations.""" + db.port_grp_port_rels_delete_by_storage(ctx, storage_id) + + port_group_relation_list = [] + for port_group in port_groups: + ports = port_group.pop('ports', None) + if not ports: + continue + ports = ports.split(',') + + for port in ports: + port_group_relation = { + PortGrpPortRel.storage_id.name: storage_id, + PortGrpPortRel.native_port_group_id .name: + port_group['native_port_group_id'], + PortGrpPortRel.native_port_id.name: port + } + port_group_relation_list.append(port_group_relation) + + db.port_grp_port_rels_create(ctx, port_group_relation_list) + + class StorageResourceTask(object): def __init__(self, context, storage_id): @@ -741,6 +815,12 @@ def sync(self): # Build relation between host grp and host to be handled here. storage_host_groups = self.driver_api \ .list_storage_host_groups(self.context, self.storage_id) + if storage_host_groups: + _build_storage_host_group_relations( + self.context, self.storage_id, storage_host_groups) + LOG.info('Building host group relations successful for ' + 'storage id:{0}'.format(self.storage_id)) + db_storage_host_groups = db.storage_host_groups_get_all( self.context, filters={"storage_id": self.storage_id}) @@ -777,6 +857,8 @@ def sync(self): def remove(self): LOG.info('Remove storage host groups for storage id:{0}' .format(self.storage_id)) + db.storage_host_grp_host_rels_delete_by_storage(self.context, + self.storage_id) db.storage_host_groups_delete_by_storage(self.context, self.storage_id) @@ -797,6 +879,12 @@ def sync(self): # Build relation between port grp and port to be handled here. port_groups = self.driver_api \ .list_port_groups(self.context, self.storage_id) + if port_groups: + _build_port_group_relations( + self.context, self.storage_id, port_groups) + LOG.info('Building port group relations successful for ' + 'storage id:{0}'.format(self.storage_id)) + db_port_groups = db.port_groups_get_all( self.context, filters={"storage_id": self.storage_id}) @@ -831,6 +919,8 @@ def sync(self): def remove(self): LOG.info('Remove port groups for storage id:{0}' .format(self.storage_id)) + db.port_grp_port_rels_delete_by_storage(self.context, + self.storage_id) db.port_groups_delete_by_storage(self.context, self.storage_id) @@ -851,6 +941,12 @@ def sync(self): # Build relation between volume grp and volume to be handled here. volume_groups = self.driver_api \ .list_volume_groups(self.context, self.storage_id) + if volume_groups: + _build_volume_group_relations( + self.context, self.storage_id, volume_groups) + LOG.info('Building volume group relations successful for ' + 'storage id:{0}'.format(self.storage_id)) + db_volume_groups = db.volume_groups_get_all( self.context, filters={"storage_id": self.storage_id}) @@ -885,6 +981,7 @@ def sync(self): def remove(self): LOG.info('Remove volume groups for storage id:{0}' .format(self.storage_id)) + db.vol_grp_vol_rels_delete_by_storage(self.context, self.storage_id) db.volume_groups_delete_by_storage(self.context, self.storage_id) diff --git a/delfin/tests/unit/task_manager/test_resources.py b/delfin/tests/unit/task_manager/test_resources.py index b51056fba..dd39bc9ef 100644 --- a/delfin/tests/unit/task_manager/test_resources.py +++ b/delfin/tests/unit/task_manager/test_resources.py @@ -924,3 +924,109 @@ def test_remove(self, mock_volume_groups_del): context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') volume_group_obj.remove() self.assertTrue(mock_volume_groups_del.called) + + +class TestPortGroupTask(test.TestCase): + @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') + @mock.patch('delfin.drivers.api.API.list_port_groups') + @mock.patch('delfin.db.port_groups_get_all') + @mock.patch('delfin.db.port_groups_delete') + @mock.patch('delfin.db.port_groups_update') + @mock.patch('delfin.db.port_groups_create') + def test_sync_successful(self, mock_port_group_create, + mock_port_group_update, + mock_port_group_del, + mock_port_groups_get_all, + mock_list_port_groups, get_lock): + ctxt = context.get_admin_context() + port_group_obj = resources.PortGroupTask( + ctxt, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') + port_group_obj.sync() + self.assertTrue(mock_list_port_groups.called) + self.assertTrue(mock_port_groups_get_all.called) + self.assertTrue(get_lock.called) + + # Collect the storage host groups from fake_storage + fake_storage_obj = fake_storage.FakeStorageDriver() + + # Add the storage host groups to DB + mock_list_port_groups.return_value \ + = fake_storage_obj.list_port_groups(context) + mock_port_groups_get_all.return_value = list() + port_group_obj.sync() + self.assertTrue(mock_port_group_create.called) + + # Update the storage host groups to DB + mock_list_port_groups.return_value \ + = port_groups_list + mock_port_groups_get_all.return_value \ + = port_groups_list + port_group_obj.sync() + self.assertTrue(mock_port_group_update.called) + + # Delete the storage host groups to DB + mock_list_port_groups.return_value = list() + mock_port_groups_get_all.return_value \ + = port_groups_list + port_group_obj.sync() + self.assertTrue(mock_port_group_del.called) + + @mock.patch('delfin.db.port_groups_delete_by_storage') + def test_remove(self, mock_port_groups_del): + port_group_obj = resources.PortGroupTask( + context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') + port_group_obj.remove() + self.assertTrue(mock_port_groups_del.called) + + +class TestMaskingViewTask(test.TestCase): + @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') + @mock.patch('delfin.drivers.api.API.list_masking_views') + @mock.patch('delfin.db.masking_views_get_all') + @mock.patch('delfin.db.masking_views_delete') + @mock.patch('delfin.db.masking_views_update') + @mock.patch('delfin.db.masking_views_create') + def test_sync_successful(self, mock_masking_view_create, + mock_masking_view_update, + mock_masking_view_del, + mock_masking_views_get_all, + mock_list_masking_views, get_lock): + cntxt = context.get_admin_context() + masking_view_obj = resources.MaskingViewTask( + cntxt, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') + masking_view_obj.sync() + self.assertTrue(mock_list_masking_views.called) + self.assertTrue(mock_masking_views_get_all.called) + self.assertTrue(get_lock.called) + + # Collect the volume groups from fake_storage + fake_storage_obj = fake_storage.FakeStorageDriver() + + # Add the volume groups to DB + mock_list_masking_views.return_value \ + = fake_storage_obj.list_masking_views(context) + mock_masking_views_get_all.return_value = list() + masking_view_obj.sync() + self.assertTrue(mock_masking_view_create.called) + + # Update the volume groups to DB + mock_list_masking_views.return_value \ + = masking_views_list + mock_masking_views_get_all.return_value \ + = masking_views_list + masking_view_obj.sync() + self.assertTrue(mock_masking_view_update.called) + + # Delete the volume groups to DB + mock_list_masking_views.return_value = list() + mock_masking_views_get_all.return_value \ + = masking_views_list + masking_view_obj.sync() + self.assertTrue(mock_masking_view_del.called) + + @mock.patch('delfin.db.masking_views_delete_by_storage') + def test_remove(self, mock_masking_views_del): + masking_view_obj = resources.MaskingViewTask( + context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') + masking_view_obj.remove() + self.assertTrue(mock_masking_views_del.called)