diff --git a/src/tests/ftest/container/rf_enforcement.py b/src/tests/ftest/container/rf_enforcement.py index aafb24bd841..c96f0db1af7 100644 --- a/src/tests/ftest/container/rf_enforcement.py +++ b/src/tests/ftest/container/rf_enforcement.py @@ -1,5 +1,5 @@ """ - (C) Copyright 2019-2023 Intel Corporation. + (C) Copyright 2019-2024 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent """ @@ -13,11 +13,6 @@ class ContRfEnforce(ContRedundancyFactor): :avocado: recursive """ - def __init__(self, *args, **kwargs): - """Initialize a Rebuild Container RF with ObjClass Write object.""" - super().__init__(*args, **kwargs) - self.daos_cmd = None - def test_container_redundancy_factor_oclass_enforcement(self): """Jira ID: DAOS-6267: Verify that a container can be created with and enforces @@ -40,7 +35,7 @@ def test_container_redundancy_factor_oclass_enforcement(self): :avocado: tags=all,full_regression :avocado: tags=vm - :avocado: tags=container,container_rf,cont_rf_oclass_enforcement + :avocado: tags=container :avocado: tags=ContRfEnforce,test_container_redundancy_factor_oclass_enforcement """ self.execute_cont_rf_test(mode="cont_rf_enforcement") diff --git a/src/tests/ftest/control/ms_resilience.py b/src/tests/ftest/control/ms_resilience.py index 45c03e909c4..8c646a40dbc 100644 --- a/src/tests/ftest/control/ms_resilience.py +++ b/src/tests/ftest/control/ms_resilience.py @@ -73,13 +73,13 @@ def create_pool(self): self.log.info("*** creating pool") self.pool.create() - self.log.info("Pool UUID %s on server group: %s", self.pool.uuid, self.server_group) + self.log.info("%s on server group: %s", str(self.pool), self.server_group) # Verify that the pool persisted. while not self.find_pool(self.pool.uuid): # Occasionally the pool may not be found # immediately after creation if the read # is serviced by a non-leader replica. - self.log.info("Pool %s not found yet.", self.pool.uuid) + self.log.info("%s not found yet.", str(self.pool)) time.sleep(1) self.log.info("Found pool in system.") diff --git a/src/tests/ftest/io/macsio_test.py b/src/tests/ftest/io/macsio_test.py index a891df713a6..348017968d6 100644 --- a/src/tests/ftest/io/macsio_test.py +++ b/src/tests/ftest/io/macsio_test.py @@ -18,13 +18,12 @@ class MacsioTest(TestWithServers): :avocado: recursive """ - def get_macsio_command(self, pool_uuid, pool_svcl, cont_uuid): + def _get_macsio_command(self, pool, cont): """Get the MacsioCommand object. Args: - pool_uuid (str): pool uuid - pool_svcl (str): pool service replica - cont_uuid (str, optional): container uuid. Defaults to None. + pool (TestPool): pool object + cont (TestContainer): container object Returns: MacsioCommand: object defining the macsio command @@ -39,9 +38,9 @@ def get_macsio_command(self, pool_uuid, pool_svcl, cont_uuid): # Update the MACSio pool and container info before gathering manager # environment information to ensure they are included. - macsio.daos_pool = pool_uuid - macsio.daos_svcl = pool_svcl - macsio.daos_cont = cont_uuid + macsio.daos_pool = pool.identifier + macsio.daos_svcl = list_to_str(pool.svc_ranks) + macsio.daos_cont = cont.identifier return macsio @@ -115,7 +114,7 @@ def test_macsio(self): # Run macsio self.log_step("Running MACSio") - macsio = self.get_macsio_command(pool.uuid, list_to_str(pool.svc_ranks), container.uuid) + macsio = self._get_macsio_command(pool, container) result = self.run_macsio(macsio, self.hostlist_clients, processes) if not macsio.check_results(result, self.hostlist_clients): self.fail("MACSio failed") @@ -159,7 +158,7 @@ def test_macsio_daos_vol(self): # Run macsio self.log_step("Running MACSio with DAOS VOL connector") - macsio = self.get_macsio_command(pool.uuid, list_to_str(pool.svc_ranks), container.uuid) + macsio = self._get_macsio_command(pool, container) result = self.run_macsio( macsio, self.hostlist_clients, processes, plugin_path, working_dir=dfuse.mount_dir.value) diff --git a/src/tests/ftest/io/parallel_io.py b/src/tests/ftest/io/parallel_io.py index 7be497dd28f..28257f13a27 100644 --- a/src/tests/ftest/io/parallel_io.py +++ b/src/tests/ftest/io/parallel_io.py @@ -170,8 +170,7 @@ def test_parallelio(self): try: self.fio_cmd.update_directory(os.path.join(dfuse.mount_dir.value, container_to_destroy)) self.execute_fio() - self.fail( - "Fio was able to access destroyed container: {}".format(self.container[0].uuid)) + self.fail(f"Fio was able to access destroyed container: {self.container[0]}") except CommandFailure: self.log.info("fio failed as expected") # check dfuse is still running after attempting to access deleted container diff --git a/src/tests/ftest/nvme/pool_capacity.py b/src/tests/ftest/nvme/pool_capacity.py index 57e9fb7c6a7..dea4d5bedca 100644 --- a/src/tests/ftest/nvme/pool_capacity.py +++ b/src/tests/ftest/nvme/pool_capacity.py @@ -186,8 +186,7 @@ def run_test(self, num_pool=1): # Destroy the last num_pool pools created offset = loop_count * num_pool for index in range(offset, offset + num_pool): - display_string = "Pool {} space at the End".format( - self.pool[index].uuid) + display_string = "{} space at the End".format(str(self.pool[index])) self.pool[index].display_pool_daos_space(display_string) self.pool[index].destroy() diff --git a/src/tests/ftest/pool/destroy.py b/src/tests/ftest/pool/destroy.py index 2bcd5f6cea7..e5dcb6cc521 100644 --- a/src/tests/ftest/pool/destroy.py +++ b/src/tests/ftest/pool/destroy.py @@ -98,7 +98,6 @@ def validate_pool_creation(self, hosts, scm_mount): self.log.info("Create a pool") self.add_pool(create=False) self.pool.create() - self.log.info("Pool UUID is %s", self.pool.uuid) # Check that the pool was created. self.assertTrue( @@ -473,7 +472,7 @@ def test_destroy_connected(self): self.log.info("Check if files still exist") self.assertTrue( self.pool.check_files(hostlist_servers, scm_mount), - "Pool UUID {} should not be removed when connected".format(self.pool.uuid)) + "{} should not be removed when connected".format(str(self.pool))) self.assertTrue( exception_detected, "No exception when deleting a connected pool") @@ -563,7 +562,7 @@ def test_destroy_with_containers(self): self.log.info("Check if files still exist") self.assertTrue( self.pool.check_files(hostlist_servers, scm_mount), - "Pool UUID {} should not be removed when containers exist".format(self.pool.uuid)) + "{} should not be removed when containers exist".format(str(self.pool))) self.assertTrue( exception_detected, "No exception when deleting a pool with containers") diff --git a/src/tests/ftest/pool/query_attribute.py b/src/tests/ftest/pool/query_attribute.py index a9b0f5ecf6c..bed2c4984f8 100644 --- a/src/tests/ftest/pool/query_attribute.py +++ b/src/tests/ftest/pool/query_attribute.py @@ -1,5 +1,5 @@ """ - (C) Copyright 2020-2023 Intel Corporation. + (C) Copyright 2020-2024 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent """ @@ -52,7 +52,7 @@ def test_query_attr(self): # Call daos pool query, obtain pool UUID and SCM size, and compare # against those used when creating the pool. - query_result = daos_cmd.pool_query(pool=self.pool.uuid) + query_result = daos_cmd.pool_query(pool=self.pool.identifier) actual_uuid = query_result["response"]["uuid"] actual_size = query_result["response"]["tier_stats"][0]["total"] actual_size_roundup = int(actual_size / 100000) * 100000 @@ -82,11 +82,11 @@ def test_query_attr(self): sample_attrs.append(sample_attr) sample_vals.append(sample_val) daos_cmd.pool_set_attr( - pool=self.pool.uuid, attr=sample_attr, value=sample_val) + pool=self.pool.identifier, attr=sample_attr, value=sample_val) expected_attrs.append(sample_attr) # List the attribute names and compare against those set. - attrs = daos_cmd.pool_list_attrs(pool=self.pool.uuid) + attrs = daos_cmd.pool_list_attrs(pool=self.pool.identifier) for attr in attrs["response"]: actual_attrs.append(attr) @@ -102,7 +102,7 @@ def test_query_attr(self): # Get each attribute's value and compare against those set. for idx in range(5): output = daos_cmd.pool_get_attr( - pool=self.pool.uuid, attr=sample_attrs[idx]) + pool=self.pool.identifier, attr=sample_attrs[idx]) actual_val = base64.b64decode(output["response"]["value"]).decode() if sample_vals[idx] != actual_val: msg = "Unexpected attribute value! " +\ diff --git a/src/tests/ftest/rebuild/cascading_failures.py b/src/tests/ftest/rebuild/cascading_failures.py index 448280b9227..234105df6d2 100644 --- a/src/tests/ftest/rebuild/cascading_failures.py +++ b/src/tests/ftest/rebuild/cascading_failures.py @@ -3,7 +3,6 @@ SPDX-License-Identifier: BSD-2-Clause-Patent """ -from daos_utils import DaosCommand from rebuild_test_base import RebuildTestBase @@ -17,7 +16,6 @@ def __init__(self, *args, **kwargs): """Initialize a CascadingFailures object.""" super().__init__(*args, **kwargs) self.mode = None - self.daos_cmd = None def create_test_container(self): """Create a container and write objects.""" @@ -66,13 +64,11 @@ def start_rebuild(self): def execute_during_rebuild(self): """Execute test steps during rebuild.""" - self.daos_cmd = DaosCommand(self.bin) if self.mode == "cascading": # Exclude the second rank from the pool during rebuild self.server_managers[0].stop_ranks([self.inputs.rank.value[1]], self.d_log, force=True) - self.daos_cmd.container_set_prop( - pool=self.pool.uuid, cont=self.container.uuid, prop="status", value="healthy") + self.container.set_prop(prop="status", value="healthy") # Populate the container with additional data during rebuild self.container.write_objects(obj_class=self.inputs.object_class.value) diff --git a/src/tests/ftest/rebuild/no_cap.py b/src/tests/ftest/rebuild/no_cap.py index b262619d545..1725a6af471 100644 --- a/src/tests/ftest/rebuild/no_cap.py +++ b/src/tests/ftest/rebuild/no_cap.py @@ -117,8 +117,7 @@ def test_rebuild_no_capacity(self): # query the pool before rebuild self.log.info("....Pool query after filling, written_pload=%s", written_pload) self.pool.set_query_data() - self.log.info( - "..Pool %s query data: %s\n", self.pool.uuid, self.pool.query_data) + self.log.info("..%s query data: %s\n", str(self.pool), self.pool.query_data) # Start rebuild rank = 1 diff --git a/src/tests/ftest/rebuild/pool_destroy_race.py b/src/tests/ftest/rebuild/pool_destroy_race.py index 68b31d7d42c..a66714f7f98 100644 --- a/src/tests/ftest/rebuild/pool_destroy_race.py +++ b/src/tests/ftest/rebuild/pool_destroy_race.py @@ -79,9 +79,9 @@ def test_pool_destroy_with_io(self): pool.wait_for_rebuild_to_start(interval=1) rebuild_state = pool.get_rebuild_state(True) - self.log.info("Pool %s rebuild status:%s", pool.uuid, rebuild_state) + self.log.info("%s rebuild status:%s", str(pool), rebuild_state) - self.log_step(f'Destroy pool {pool.uuid} while rebuild is {rebuild_state}') + self.log_step(f'Destroy {str(pool)} while rebuild is {rebuild_state}') pool.destroy() # Disable cleanup for all containers under the destroyed pool diff --git a/src/tests/ftest/rebuild/read_array.py b/src/tests/ftest/rebuild/read_array.py index 6b72bf45bbd..b2c0f33d2af 100644 --- a/src/tests/ftest/rebuild/read_array.py +++ b/src/tests/ftest/rebuild/read_array.py @@ -3,7 +3,6 @@ SPDX-License-Identifier: BSD-2-Clause-Patent """ -from daos_utils import DaosCommand from general_utils import DaosTestError from rebuild_test_base import RebuildTestBase @@ -14,16 +13,9 @@ class RbldReadArrayTest(RebuildTestBase): :avocado: recursive """ - def __init__(self, *args, **kwargs): - """Initialize a RbldReadArrayTest object.""" - super().__init__(*args, **kwargs) - self.daos_cmd = None - def execute_during_rebuild(self): """Read the objects during rebuild.""" - self.daos_cmd = DaosCommand(self.bin) - self.daos_cmd.container_set_prop( - pool=self.pool.uuid, cont=self.container.uuid, prop="status", value="healthy") + self.container.set_prop(prop="status", value="healthy") message = "Reading the array objects during rebuild" self.log.info(message) diff --git a/src/tests/ftest/rebuild/with_io.py b/src/tests/ftest/rebuild/with_io.py index 7e7a1e623d4..eaf60ad7ac3 100644 --- a/src/tests/ftest/rebuild/with_io.py +++ b/src/tests/ftest/rebuild/with_io.py @@ -4,7 +4,6 @@ SPDX-License-Identifier: BSD-2-Clause-Patent """ from apricot import TestWithServers -from daos_utils import DaosCommand class RbldWithIO(TestWithServers): @@ -62,7 +61,7 @@ def test_rebuild_with_io(self): # Write data to the container for 30 seconds self.log.info( "Wrote %s bytes to container %s", - self.container.execute_io(30, rank, obj_class), self.container.uuid) + self.container.execute_io(30, rank, obj_class), str(self.container)) # Determine how many objects will need to be rebuilt self.container.get_target_rank_lists(" prior to rebuild") @@ -73,14 +72,12 @@ def test_rebuild_with_io(self): # Wait for recovery to start self.pool.wait_for_rebuild_to_start() - daos_cmd = DaosCommand(self.bin) - daos_cmd.container_set_prop( - pool=self.pool.uuid, cont=self.container.uuid, prop="status", value="healthy") + self.container.set_prop(prop="status", value="healthy") # Write data to the container for another 30 seconds self.log.info( "Wrote an additional %s bytes to container %s", - self.container.execute_io(30), self.container.uuid) + self.container.execute_io(30), str(self.container)) # Wait for recovery to complete self.pool.wait_for_rebuild_to_end() diff --git a/src/tests/ftest/scrubber/aggregation.py b/src/tests/ftest/scrubber/aggregation.py index e578fbb55f8..66147c4cd15 100644 --- a/src/tests/ftest/scrubber/aggregation.py +++ b/src/tests/ftest/scrubber/aggregation.py @@ -50,7 +50,7 @@ def test_target_eviction_during_aggregation(self): self.ior_cmd.namespace = "/run/ior_large_block_size/*" self.processes = self.params.get("np", self.ior_cmd.namespace, self.processes) self.ior_cmd.get_params(self) - self.run_ior_and_check_scruber_status(pool=self.pool, cont=self.container) + self.run_ior_and_check_scrubber_status(pool=self.pool, cont=self.container) # Enable the aggregation on the pool. self.pool.set_property("reclaim", "time") # We want both aggregation and scrubber tasks diff --git a/src/tests/ftest/scrubber/basic.py b/src/tests/ftest/scrubber/basic.py index 1ba3338f272..13ec352c360 100644 --- a/src/tests/ftest/scrubber/basic.py +++ b/src/tests/ftest/scrubber/basic.py @@ -34,7 +34,7 @@ def run_scrubber_basic(self, pool_prop=None, cont_prop=None): for test in transfer_block_size: self.ior_cmd.transfer_size.update(test[0]) self.ior_cmd.block_size.update(test[1]) - status = self.run_ior_and_check_scruber_status(pool=self.pool, cont=self.container) + status = self.run_ior_and_check_scrubber_status(pool=self.pool, cont=self.container) if status is False: self.log.info("-------Test Failed-------") self.log.info("---No metrics value change----") diff --git a/src/tests/ftest/scrubber/csum_fault.py b/src/tests/ftest/scrubber/csum_fault.py index 4003f6a13f1..2107ff0e1c4 100644 --- a/src/tests/ftest/scrubber/csum_fault.py +++ b/src/tests/ftest/scrubber/csum_fault.py @@ -43,7 +43,7 @@ def test_scrubber_csum_fault(self): for test in transfer_block_size: self.ior_cmd.transfer_size.update(test[0]) self.ior_cmd.block_size.update(test[1]) - self.run_ior_and_check_scruber_status(pool=self.pool, cont=self.container) + self.run_ior_and_check_scrubber_status(pool=self.pool, cont=self.container) start_time = 0 finish_time = 0 poll_status = False diff --git a/src/tests/ftest/scrubber/frequency.py b/src/tests/ftest/scrubber/frequency.py index 9931a3123ee..5e95babbbf9 100644 --- a/src/tests/ftest/scrubber/frequency.py +++ b/src/tests/ftest/scrubber/frequency.py @@ -36,7 +36,7 @@ def test_objects_scrubbed_properly(self): cont_prop = self.params.get("properties", '/run/container/*') self.create_pool_cont_with_scrubber(pool_prop=pool_prop, cont_prop=cont_prop) # Run IOR and gather the total scrubbed metrics information. - self.run_ior_and_check_scruber_status(pool=self.pool, cont=self.container) + self.run_ior_and_check_scrubber_status(pool=self.pool, cont=self.container) # Wait for 5 minutes to get first scrubber bytes scrubbed metrics. # NOTE: This value could change depending on the IOR data (objects created) self.log.info("Sleeping for 5 minutes pool property set to scrub:timed") diff --git a/src/tests/ftest/scrubber/rebuild.py b/src/tests/ftest/scrubber/rebuild.py index e2e079f15ad..0feaa228dcc 100644 --- a/src/tests/ftest/scrubber/rebuild.py +++ b/src/tests/ftest/scrubber/rebuild.py @@ -33,7 +33,7 @@ def test_target_eviction_during_rebuild(self): self.create_pool_cont_with_scrubber(pool_prop=pool_prop, cont_prop=cont_prop) self.pool.query() initial_metrics = self.scrubber.get_scrub_corrupt_metrics() - self.run_ior_and_check_scruber_status(pool=self.pool, cont=self.container) + self.run_ior_and_check_scrubber_status(pool=self.pool, cont=self.container) # Exclude Rank 5 to start the rebuild operation. self.pool.exclude("5") # Wait for a minute for the scrubber to take action and evict target diff --git a/src/tests/ftest/scrubber/snapshot.py b/src/tests/ftest/scrubber/snapshot.py index ace7e147cae..cac713b7a28 100644 --- a/src/tests/ftest/scrubber/snapshot.py +++ b/src/tests/ftest/scrubber/snapshot.py @@ -33,7 +33,7 @@ def test_target_eviction_during_snapshot(self): self.create_pool_cont_with_scrubber(pool_prop=pool_prop, cont_prop=cont_prop) self.pool.query() initial_metrics = self.scrubber.get_scrub_corrupt_metrics() - self.run_ior_and_check_scruber_status(pool=self.pool, cont=self.container) + self.run_ior_and_check_scrubber_status(pool=self.pool, cont=self.container) # Wait for a minute for the scrubber to take action and evict target # after corruption threshold reached. # Take a snap-shot after 15 seconds while the csum faults are injected. diff --git a/src/tests/ftest/scrubber/target_auto_eviction.py b/src/tests/ftest/scrubber/target_auto_eviction.py index b7a04bee2df..d602b0405c3 100644 --- a/src/tests/ftest/scrubber/target_auto_eviction.py +++ b/src/tests/ftest/scrubber/target_auto_eviction.py @@ -31,10 +31,10 @@ def test_scrubber_ssd_auto_eviction(self): initial_metrics = {} final_metrics = {} self.create_pool_cont_with_scrubber(pool_prop=pool_prop, cont_prop=cont_prop) - self.dmg_cmd.pool_query(self.pool.identifier) + self.pool.query() initial_metrics = self.scrubber.get_scrub_corrupt_metrics() t_start = journalctl_time() - self.run_ior_and_check_scruber_status(pool=self.pool, cont=self.container) + self.run_ior_and_check_scrubber_status(pool=self.pool, cont=self.container) # Wait for a minute for the scrubber to take action and evict target # after corruption threshold reached. self.log.info("Sleeping for 60 seconds") @@ -57,7 +57,7 @@ def test_scrubber_ssd_auto_eviction(self): self.log.info("Data corrupted occurrence %s", occurrence) else: self.fail("Test Failed: RAS data corrupted messages missing on system logs") - self.dmg_cmd.pool_query(self.pool.identifier) + self.pool.query() final_metrics = self.scrubber.get_scrub_corrupt_metrics() status = self.verify_scrubber_metrics_value(initial_metrics, final_metrics) if status is False: diff --git a/src/tests/ftest/server/metadata.py b/src/tests/ftest/server/metadata.py index a7492d04eb6..89ae6af6236 100644 --- a/src/tests/ftest/server/metadata.py +++ b/src/tests/ftest/server/metadata.py @@ -99,7 +99,7 @@ def create_pool(self, svc_ops_enabled=True): params = {} params['properties'] = "svc_ops_enabled:0" self.add_pool(**params) - self.log.info("Created pool %s: svc ranks:", self.pool.uuid) + self.log.info("Created %s: svc ranks:", str(self.pool)) for index, rank in enumerate(self.pool.svc_ranks): self.log.info("[%d]: %d", index, rank) diff --git a/src/tests/ftest/util/data_mover_test_base.py b/src/tests/ftest/util/data_mover_test_base.py index db272febc94..8fbce2c8f36 100644 --- a/src/tests/ftest/util/data_mover_test_base.py +++ b/src/tests/ftest/util/data_mover_test_base.py @@ -329,8 +329,7 @@ def dataset_gen(self, cont, num_objs, num_dkeys, num_akeys_single, list: a list of DaosObj created. """ - self.log.info("Creating dataset in %s/%s", - str(cont.pool.uuid), str(cont.uuid)) + self.log.info("Creating dataset in %s/%s", str(cont.pool), str(cont)) cont.open() @@ -400,8 +399,7 @@ def dataset_verify(self, obj_list, cont, num_objs, num_dkeys, akey_extents (list): varying number of akey extents to iterate. """ - self.log.info("Verifying dataset in %s/%s", - str(cont.pool.uuid), str(cont.uuid)) + self.log.info("Verifying dataset in %s/%s", str(cont.pool), str(cont)) cont.open() diff --git a/src/tests/ftest/util/osa_utils.py b/src/tests/ftest/util/osa_utils.py index a4a13abbab5..410b6ce46a2 100644 --- a/src/tests/ftest/util/osa_utils.py +++ b/src/tests/ftest/util/osa_utils.py @@ -392,7 +392,7 @@ def ior_thread(self, pool, oclass, test, flags, single_cont_read=True, fail_on_w except CommandFailure as err_msg: self.out_queue.put(err_msg) self.assert_on_exception() - job_manager.job.dfs_cont.update(self.container.uuid) + job_manager.job.dfs_cont.update(self.container.identifier) self.ior_cmd.transfer_size.update(test[2]) self.ior_cmd.block_size.update(test[3]) self.ior_cmd.flags.update(flags) @@ -433,7 +433,7 @@ def run_mdtest_thread(self, oclass="RP_2G1"): if create_container == 1: self.container.create() job_manager = self.get_mdtest_job_manager_command(self.manager) - job_manager.job.dfs_cont.update(self.container.uuid) + job_manager.job.dfs_cont.update(self.container.identifier) # Add a thread for these IOR arguments process = threading.Thread(target=self.execute_mdtest) # Launch the MDtest thread diff --git a/src/tests/ftest/util/rebuild_test_base.py b/src/tests/ftest/util/rebuild_test_base.py index c3df3c1efd0..5d083028ad3 100644 --- a/src/tests/ftest/util/rebuild_test_base.py +++ b/src/tests/ftest/util/rebuild_test_base.py @@ -5,7 +5,6 @@ """ from apricot import TestWithServers from command_utils_base import BasicParameter, ObjectWithParameters -from daos_utils import DaosCommand class RebuildTestParams(ObjectWithParameters): @@ -33,7 +32,6 @@ def __init__(self, *args, **kwargs): self.server_count = 0 self.info_checks = None self.rebuild_checks = None - self.daos_cmd = None def setUp(self): """Set up each test case.""" @@ -160,7 +158,6 @@ def execute_rebuild_test(self, create_container=True): """ # Get the test params self.setup_test_pool() - self.daos_cmd = DaosCommand(self.bin) if create_container: self.setup_test_container() @@ -183,11 +180,7 @@ def execute_rebuild_test(self, create_container=True): self.pool.wait_for_rebuild_to_end(1) # clear container status for the RF issue - self.daos_cmd.container_set_prop( - pool=self.pool.uuid, - cont=self.container.uuid, - prop="status", - value="healthy") + self.container.set_prop(prop="status", value="healthy") # Refresh local pool and container self.pool.check_pool_info() diff --git a/src/tests/ftest/util/scrubber_test_base.py b/src/tests/ftest/util/scrubber_test_base.py index f2096edf58d..f3a828e366d 100644 --- a/src/tests/ftest/util/scrubber_test_base.py +++ b/src/tests/ftest/util/scrubber_test_base.py @@ -1,5 +1,5 @@ """ -(C) Copyright 2021-2023 Intel Corporation. +(C) Copyright 2021-2024 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent """ @@ -23,14 +23,9 @@ def __init__(self, *args, **kwargs): def setUp(self): """Set up each test case.""" super().setUp() - self.scrubber = ScrubberUtils(self.get_dmg_command(), - self.server_managers[0].hosts) - self.daos_cmd = self.get_daos_command() - self.dmg_cmd = self.get_dmg_command() + self.scrubber = ScrubberUtils(self.get_dmg_command(), self.server_managers[0].hosts) self.pool = None self.container = None - self.initial_metrics = {} - self.final_metrics = {} def verify_scrubber_metrics_value(self, initial_metrics, final_metrics): """Compare the initial metrics value to final value after IO data. @@ -73,47 +68,40 @@ def create_pool_cont_with_scrubber(self, pool_prop=None, cont_prop=None): # Testing scenario : Create a pool and container without properties # and update them at runtime. if pool_prop is None: - self.add_pool(create=False, connect=False) - self.pool.properties.value = pool_prop - self.pool.create() - self.pool.connect() + # Create without properties and set at runtime below + self.add_pool(properties=None) else: + # Create with properties self.add_pool() - self.add_container(pool=self.pool, create=False) if pool_prop is None: pool_prop = "scrub:timed,scrub_freq:1" - if cont_prop is None: - cont_prop = "cksum:crc16" for prop_val in pool_prop.split(","): if prop_val is not None: value = prop_val.split(":") self.pool.set_property(value[0], value[1]) - self.container.properties.value = cont_prop - self.container.create() - values = "Pool : {} Container: {}".format(self.pool, self.container) - self.log.info(values) + if cont_prop is None: + cont_prop = "cksum:crc16" + self.add_container(pool=self.pool, properties=cont_prop) - def run_ior_and_check_scruber_status(self, pool, cont, fail_on_warning=True): + def run_ior_and_check_scrubber_status(self, pool, cont): """Run IOR and get scrubber metrics Args: pool (object): Pool object cont (object): Container object within the pool. - fail_on_warning (bool, optional): [description]. Defaults to True. Returns: - status(bool) : True (Scrubber working), False(Scrubber not working) + bool: True (Scrubber working), False(Scrubber not working) """ - status = False - self.initial_metrics = self.scrubber.get_csum_total_metrics() + initial_metrics = self.scrubber.get_csum_total_metrics() self.pool = pool self.container = cont # Print the pool properties - result = self.dmg_cmd.pool_get_prop(self.pool.uuid, "scrub") + result = self.pool.get_prop("scrub") self.log.info("Pool Properties") self.log.info("===============") self.log.info(result) - result = self.daos_cmd.container_get_prop(self.pool.uuid, self.container.uuid) + result = self.container.get_prop() self.log.info("Container Properties") self.log.info("===============") self.log.info(result) @@ -122,12 +110,11 @@ def run_ior_and_check_scruber_status(self, pool, cont, fail_on_warning=True): process = threading.Thread(target=self.run_ior_with_pool, kwargs={"create_pool": True, "create_cont": False, - "fail_on_warning": fail_on_warning}) + "fail_on_warning": True}) # Launch the IOR thread process.start() # Wait for the thread to finish process.join() - self.final_metrics = self.scrubber.get_csum_total_metrics() + final_metrics = self.scrubber.get_csum_total_metrics() # Just make sure scrubber is working here. - status = self.verify_scrubber_metrics_value(self.initial_metrics, self.final_metrics) - return status + return self.verify_scrubber_metrics_value(initial_metrics, final_metrics) diff --git a/src/tests/ftest/util/test_utils_container.py b/src/tests/ftest/util/test_utils_container.py index b7cb9588e9f..98729bba95e 100644 --- a/src/tests/ftest/util/test_utils_container.py +++ b/src/tests/ftest/util/test_utils_container.py @@ -192,7 +192,7 @@ def write_record(self, container, akey, dkey, data, rank=None, "Error writing {}data (dkey={}, akey={}, data={}) to " "container {}: {}".format( "array " if isinstance(data, list) else "", dkey, akey, - data, container.uuid, error)) from error + data, str(container), error)) from error def write_object(self, container, record_qty, akey_size, dkey_size, data_size, rank=None, obj_class=None, data_array_size=0): @@ -272,7 +272,7 @@ def read_record(self, container, akey, dkey, data_size, data_array_size=0, "Error reading {}data (dkey={}, akey={}, size={}) from " "container {}: {}".format( "array " if data_array_size > 0 else "", dkey, akey, - data_size, container.uuid, error)) from error + data_size, str(container), error)) from error return [data[:-1] for data in read_data] \ if data_array_size > 0 else read_data.value diff --git a/src/tests/ftest/util/vol_test_base.py b/src/tests/ftest/util/vol_test_base.py index ad9f7b2ca51..6e54e791169 100644 --- a/src/tests/ftest/util/vol_test_base.py +++ b/src/tests/ftest/util/vol_test_base.py @@ -48,8 +48,8 @@ def run_test(self, job_manager, plugin_path, test_repo): check_results=["FAILED", "stderr"]) env = EnvironmentVariables() - env["DAOS_POOL"] = "{}".format(pool.uuid) - env["DAOS_CONT"] = "{}".format(container.uuid) + env["DAOS_POOL"] = pool.identifier + env["DAOS_CONT"] = container.identifier env["HDF5_VOL_CONNECTOR"] = "daos" env["HDF5_PLUGIN_PATH"] = "{}".format(plugin_path) job_manager.assign_hosts(self.hostlist_clients)