Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DAOS-12859 test: use pool and container labels (pass 3) #13210

Merged
merged 25 commits into from
Sep 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
1cbeb53
DAOS-12859 test: use pool and container labels (pass 3)
daltonbohning Oct 9, 2023
37e6833
fixes
daltonbohning Nov 27, 2023
e256bbb
fixes
daltonbohning Nov 27, 2023
abd877e
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning Jan 17, 2024
a7d5441
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning Jan 22, 2024
7a5232b
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning Jan 29, 2024
9ed7bac
fixes after merge
daltonbohning Jan 29, 2024
451a8bc
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning Feb 2, 2024
e3f1ff7
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning Feb 5, 2024
b3ab309
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning Feb 6, 2024
4b0254a
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning Feb 22, 2024
d842b50
fix bad merge
daltonbohning Feb 22, 2024
479dab0
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning Mar 1, 2024
394a6e8
update test-tag
daltonbohning Mar 4, 2024
e922295
fix test
daltonbohning Mar 5, 2024
be09665
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning Mar 19, 2024
2f9ec5c
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning Apr 1, 2024
f004eda
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning Apr 3, 2024
74fb2fc
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning Apr 12, 2024
d26f932
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning Apr 26, 2024
6deaf05
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning May 16, 2024
b2532cf
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning Jul 22, 2024
00dca2a
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning Aug 9, 2024
4ccd890
Merge remote-tracking branch 'origin/master' into dbohning/daos-12859…
daltonbohning Sep 10, 2024
f0e1d01
fix macsio_test
daltonbohning Sep 10, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 2 additions & 7 deletions src/tests/ftest/container/rf_enforcement.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""
(C) Copyright 2019-2023 Intel Corporation.
(C) Copyright 2019-2024 Intel Corporation.

SPDX-License-Identifier: BSD-2-Clause-Patent
"""
Expand All @@ -13,11 +13,6 @@ class ContRfEnforce(ContRedundancyFactor):
:avocado: recursive
"""

def __init__(self, *args, **kwargs):
"""Initialize a Rebuild Container RF with ObjClass Write object."""
super().__init__(*args, **kwargs)
self.daos_cmd = None

def test_container_redundancy_factor_oclass_enforcement(self):
"""Jira ID:
DAOS-6267: Verify that a container can be created with and enforces
Expand All @@ -40,7 +35,7 @@ def test_container_redundancy_factor_oclass_enforcement(self):

:avocado: tags=all,full_regression
:avocado: tags=vm
:avocado: tags=container,container_rf,cont_rf_oclass_enforcement
:avocado: tags=container
:avocado: tags=ContRfEnforce,test_container_redundancy_factor_oclass_enforcement
"""
self.execute_cont_rf_test(mode="cont_rf_enforcement")
4 changes: 2 additions & 2 deletions src/tests/ftest/control/ms_resilience.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,13 +73,13 @@ def create_pool(self):
self.log.info("*** creating pool")
self.pool.create()

self.log.info("Pool UUID %s on server group: %s", self.pool.uuid, self.server_group)
self.log.info("%s on server group: %s", str(self.pool), self.server_group)
# Verify that the pool persisted.
while not self.find_pool(self.pool.uuid):
# Occasionally the pool may not be found
# immediately after creation if the read
# is serviced by a non-leader replica.
self.log.info("Pool %s not found yet.", self.pool.uuid)
self.log.info("%s not found yet.", str(self.pool))
time.sleep(1)
self.log.info("Found pool in system.")

Expand Down
17 changes: 8 additions & 9 deletions src/tests/ftest/io/macsio_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,12 @@ class MacsioTest(TestWithServers):
:avocado: recursive
"""

def get_macsio_command(self, pool_uuid, pool_svcl, cont_uuid):
def _get_macsio_command(self, pool, cont):
"""Get the MacsioCommand object.

Args:
pool_uuid (str): pool uuid
pool_svcl (str): pool service replica
cont_uuid (str, optional): container uuid. Defaults to None.
pool (TestPool): pool object
cont (TestContainer): container object

Returns:
MacsioCommand: object defining the macsio command
Expand All @@ -39,9 +38,9 @@ def get_macsio_command(self, pool_uuid, pool_svcl, cont_uuid):

# Update the MACSio pool and container info before gathering manager
# environment information to ensure they are included.
macsio.daos_pool = pool_uuid
macsio.daos_svcl = pool_svcl
macsio.daos_cont = cont_uuid
macsio.daos_pool = pool.identifier
macsio.daos_svcl = list_to_str(pool.svc_ranks)
macsio.daos_cont = cont.identifier

return macsio

Expand Down Expand Up @@ -115,7 +114,7 @@ def test_macsio(self):

# Run macsio
self.log_step("Running MACSio")
macsio = self.get_macsio_command(pool.uuid, list_to_str(pool.svc_ranks), container.uuid)
macsio = self._get_macsio_command(pool, container)
result = self.run_macsio(macsio, self.hostlist_clients, processes)
if not macsio.check_results(result, self.hostlist_clients):
self.fail("MACSio failed")
Expand Down Expand Up @@ -159,7 +158,7 @@ def test_macsio_daos_vol(self):

# Run macsio
self.log_step("Running MACSio with DAOS VOL connector")
macsio = self.get_macsio_command(pool.uuid, list_to_str(pool.svc_ranks), container.uuid)
macsio = self._get_macsio_command(pool, container)
result = self.run_macsio(
macsio, self.hostlist_clients, processes, plugin_path,
working_dir=dfuse.mount_dir.value)
Expand Down
3 changes: 1 addition & 2 deletions src/tests/ftest/io/parallel_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,7 @@ def test_parallelio(self):
try:
self.fio_cmd.update_directory(os.path.join(dfuse.mount_dir.value, container_to_destroy))
self.execute_fio()
self.fail(
"Fio was able to access destroyed container: {}".format(self.container[0].uuid))
self.fail(f"Fio was able to access destroyed container: {self.container[0]}")
except CommandFailure:
self.log.info("fio failed as expected")
# check dfuse is still running after attempting to access deleted container
Expand Down
3 changes: 1 addition & 2 deletions src/tests/ftest/nvme/pool_capacity.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,7 @@ def run_test(self, num_pool=1):
# Destroy the last num_pool pools created
offset = loop_count * num_pool
for index in range(offset, offset + num_pool):
display_string = "Pool {} space at the End".format(
self.pool[index].uuid)
display_string = "{} space at the End".format(str(self.pool[index]))
self.pool[index].display_pool_daos_space(display_string)
self.pool[index].destroy()

Expand Down
5 changes: 2 additions & 3 deletions src/tests/ftest/pool/destroy.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,6 @@ def validate_pool_creation(self, hosts, scm_mount):
self.log.info("Create a pool")
self.add_pool(create=False)
self.pool.create()
self.log.info("Pool UUID is %s", self.pool.uuid)

# Check that the pool was created.
self.assertTrue(
Expand Down Expand Up @@ -473,7 +472,7 @@ def test_destroy_connected(self):
self.log.info("Check if files still exist")
self.assertTrue(
self.pool.check_files(hostlist_servers, scm_mount),
"Pool UUID {} should not be removed when connected".format(self.pool.uuid))
"{} should not be removed when connected".format(str(self.pool)))

self.assertTrue(
exception_detected, "No exception when deleting a connected pool")
Expand Down Expand Up @@ -563,7 +562,7 @@ def test_destroy_with_containers(self):
self.log.info("Check if files still exist")
self.assertTrue(
self.pool.check_files(hostlist_servers, scm_mount),
"Pool UUID {} should not be removed when containers exist".format(self.pool.uuid))
"{} should not be removed when containers exist".format(str(self.pool)))

self.assertTrue(
exception_detected, "No exception when deleting a pool with containers")
Expand Down
10 changes: 5 additions & 5 deletions src/tests/ftest/pool/query_attribute.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""
(C) Copyright 2020-2023 Intel Corporation.
(C) Copyright 2020-2024 Intel Corporation.

SPDX-License-Identifier: BSD-2-Clause-Patent
"""
Expand Down Expand Up @@ -52,7 +52,7 @@ def test_query_attr(self):

# Call daos pool query, obtain pool UUID and SCM size, and compare
# against those used when creating the pool.
query_result = daos_cmd.pool_query(pool=self.pool.uuid)
query_result = daos_cmd.pool_query(pool=self.pool.identifier)
actual_uuid = query_result["response"]["uuid"]
actual_size = query_result["response"]["tier_stats"][0]["total"]
actual_size_roundup = int(actual_size / 100000) * 100000
Expand Down Expand Up @@ -82,11 +82,11 @@ def test_query_attr(self):
sample_attrs.append(sample_attr)
sample_vals.append(sample_val)
daos_cmd.pool_set_attr(
pool=self.pool.uuid, attr=sample_attr, value=sample_val)
pool=self.pool.identifier, attr=sample_attr, value=sample_val)
expected_attrs.append(sample_attr)

# List the attribute names and compare against those set.
attrs = daos_cmd.pool_list_attrs(pool=self.pool.uuid)
attrs = daos_cmd.pool_list_attrs(pool=self.pool.identifier)
for attr in attrs["response"]:
actual_attrs.append(attr)

Expand All @@ -102,7 +102,7 @@ def test_query_attr(self):
# Get each attribute's value and compare against those set.
for idx in range(5):
output = daos_cmd.pool_get_attr(
pool=self.pool.uuid, attr=sample_attrs[idx])
pool=self.pool.identifier, attr=sample_attrs[idx])
actual_val = base64.b64decode(output["response"]["value"]).decode()
if sample_vals[idx] != actual_val:
msg = "Unexpected attribute value! " +\
Expand Down
6 changes: 1 addition & 5 deletions src/tests/ftest/rebuild/cascading_failures.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from daos_utils import DaosCommand
from rebuild_test_base import RebuildTestBase


Expand All @@ -17,7 +16,6 @@ def __init__(self, *args, **kwargs):
"""Initialize a CascadingFailures object."""
super().__init__(*args, **kwargs)
self.mode = None
self.daos_cmd = None

def create_test_container(self):
"""Create a container and write objects."""
Expand Down Expand Up @@ -66,13 +64,11 @@ def start_rebuild(self):

def execute_during_rebuild(self):
"""Execute test steps during rebuild."""
self.daos_cmd = DaosCommand(self.bin)
if self.mode == "cascading":
# Exclude the second rank from the pool during rebuild
self.server_managers[0].stop_ranks([self.inputs.rank.value[1]], self.d_log, force=True)

self.daos_cmd.container_set_prop(
pool=self.pool.uuid, cont=self.container.uuid, prop="status", value="healthy")
self.container.set_prop(prop="status", value="healthy")
# Populate the container with additional data during rebuild
self.container.write_objects(obj_class=self.inputs.object_class.value)

Expand Down
3 changes: 1 addition & 2 deletions src/tests/ftest/rebuild/no_cap.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,8 +117,7 @@ def test_rebuild_no_capacity(self):
# query the pool before rebuild
self.log.info("....Pool query after filling, written_pload=%s", written_pload)
self.pool.set_query_data()
self.log.info(
"..Pool %s query data: %s\n", self.pool.uuid, self.pool.query_data)
self.log.info("..%s query data: %s\n", str(self.pool), self.pool.query_data)

# Start rebuild
rank = 1
Expand Down
4 changes: 2 additions & 2 deletions src/tests/ftest/rebuild/pool_destroy_race.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,9 @@ def test_pool_destroy_with_io(self):
pool.wait_for_rebuild_to_start(interval=1)

rebuild_state = pool.get_rebuild_state(True)
self.log.info("Pool %s rebuild status:%s", pool.uuid, rebuild_state)
self.log.info("%s rebuild status:%s", str(pool), rebuild_state)

self.log_step(f'Destroy pool {pool.uuid} while rebuild is {rebuild_state}')
self.log_step(f'Destroy {str(pool)} while rebuild is {rebuild_state}')
pool.destroy()

# Disable cleanup for all containers under the destroyed pool
Expand Down
10 changes: 1 addition & 9 deletions src/tests/ftest/rebuild/read_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from daos_utils import DaosCommand
from general_utils import DaosTestError
from rebuild_test_base import RebuildTestBase

Expand All @@ -14,16 +13,9 @@ class RbldReadArrayTest(RebuildTestBase):
:avocado: recursive
"""

def __init__(self, *args, **kwargs):
"""Initialize a RbldReadArrayTest object."""
super().__init__(*args, **kwargs)
self.daos_cmd = None

def execute_during_rebuild(self):
"""Read the objects during rebuild."""
self.daos_cmd = DaosCommand(self.bin)
self.daos_cmd.container_set_prop(
pool=self.pool.uuid, cont=self.container.uuid, prop="status", value="healthy")
self.container.set_prop(prop="status", value="healthy")

message = "Reading the array objects during rebuild"
self.log.info(message)
Expand Down
9 changes: 3 additions & 6 deletions src/tests/ftest/rebuild/with_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from apricot import TestWithServers
from daos_utils import DaosCommand


class RbldWithIO(TestWithServers):
Expand Down Expand Up @@ -62,7 +61,7 @@ def test_rebuild_with_io(self):
# Write data to the container for 30 seconds
self.log.info(
"Wrote %s bytes to container %s",
self.container.execute_io(30, rank, obj_class), self.container.uuid)
self.container.execute_io(30, rank, obj_class), str(self.container))

# Determine how many objects will need to be rebuilt
self.container.get_target_rank_lists(" prior to rebuild")
Expand All @@ -73,14 +72,12 @@ def test_rebuild_with_io(self):
# Wait for recovery to start
self.pool.wait_for_rebuild_to_start()

daos_cmd = DaosCommand(self.bin)
daos_cmd.container_set_prop(
pool=self.pool.uuid, cont=self.container.uuid, prop="status", value="healthy")
self.container.set_prop(prop="status", value="healthy")

# Write data to the container for another 30 seconds
self.log.info(
"Wrote an additional %s bytes to container %s",
self.container.execute_io(30), self.container.uuid)
self.container.execute_io(30), str(self.container))

# Wait for recovery to complete
self.pool.wait_for_rebuild_to_end()
Expand Down
2 changes: 1 addition & 1 deletion src/tests/ftest/scrubber/aggregation.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def test_target_eviction_during_aggregation(self):
self.ior_cmd.namespace = "/run/ior_large_block_size/*"
self.processes = self.params.get("np", self.ior_cmd.namespace, self.processes)
self.ior_cmd.get_params(self)
self.run_ior_and_check_scruber_status(pool=self.pool, cont=self.container)
self.run_ior_and_check_scrubber_status(pool=self.pool, cont=self.container)
# Enable the aggregation on the pool.
self.pool.set_property("reclaim", "time")
# We want both aggregation and scrubber tasks
Expand Down
2 changes: 1 addition & 1 deletion src/tests/ftest/scrubber/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def run_scrubber_basic(self, pool_prop=None, cont_prop=None):
for test in transfer_block_size:
self.ior_cmd.transfer_size.update(test[0])
self.ior_cmd.block_size.update(test[1])
status = self.run_ior_and_check_scruber_status(pool=self.pool, cont=self.container)
status = self.run_ior_and_check_scrubber_status(pool=self.pool, cont=self.container)
if status is False:
self.log.info("-------Test Failed-------")
self.log.info("---No metrics value change----")
Expand Down
2 changes: 1 addition & 1 deletion src/tests/ftest/scrubber/csum_fault.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def test_scrubber_csum_fault(self):
for test in transfer_block_size:
self.ior_cmd.transfer_size.update(test[0])
self.ior_cmd.block_size.update(test[1])
self.run_ior_and_check_scruber_status(pool=self.pool, cont=self.container)
self.run_ior_and_check_scrubber_status(pool=self.pool, cont=self.container)
start_time = 0
finish_time = 0
poll_status = False
Expand Down
2 changes: 1 addition & 1 deletion src/tests/ftest/scrubber/frequency.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def test_objects_scrubbed_properly(self):
cont_prop = self.params.get("properties", '/run/container/*')
self.create_pool_cont_with_scrubber(pool_prop=pool_prop, cont_prop=cont_prop)
# Run IOR and gather the total scrubbed metrics information.
self.run_ior_and_check_scruber_status(pool=self.pool, cont=self.container)
self.run_ior_and_check_scrubber_status(pool=self.pool, cont=self.container)
# Wait for 5 minutes to get first scrubber bytes scrubbed metrics.
# NOTE: This value could change depending on the IOR data (objects created)
self.log.info("Sleeping for 5 minutes pool property set to scrub:timed")
Expand Down
2 changes: 1 addition & 1 deletion src/tests/ftest/scrubber/rebuild.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def test_target_eviction_during_rebuild(self):
self.create_pool_cont_with_scrubber(pool_prop=pool_prop, cont_prop=cont_prop)
self.pool.query()
initial_metrics = self.scrubber.get_scrub_corrupt_metrics()
self.run_ior_and_check_scruber_status(pool=self.pool, cont=self.container)
self.run_ior_and_check_scrubber_status(pool=self.pool, cont=self.container)
# Exclude Rank 5 to start the rebuild operation.
self.pool.exclude("5")
# Wait for a minute for the scrubber to take action and evict target
Expand Down
2 changes: 1 addition & 1 deletion src/tests/ftest/scrubber/snapshot.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def test_target_eviction_during_snapshot(self):
self.create_pool_cont_with_scrubber(pool_prop=pool_prop, cont_prop=cont_prop)
self.pool.query()
initial_metrics = self.scrubber.get_scrub_corrupt_metrics()
self.run_ior_and_check_scruber_status(pool=self.pool, cont=self.container)
self.run_ior_and_check_scrubber_status(pool=self.pool, cont=self.container)
# Wait for a minute for the scrubber to take action and evict target
# after corruption threshold reached.
# Take a snap-shot after 15 seconds while the csum faults are injected.
Expand Down
6 changes: 3 additions & 3 deletions src/tests/ftest/scrubber/target_auto_eviction.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,10 @@ def test_scrubber_ssd_auto_eviction(self):
initial_metrics = {}
final_metrics = {}
self.create_pool_cont_with_scrubber(pool_prop=pool_prop, cont_prop=cont_prop)
self.dmg_cmd.pool_query(self.pool.identifier)
self.pool.query()
initial_metrics = self.scrubber.get_scrub_corrupt_metrics()
t_start = journalctl_time()
self.run_ior_and_check_scruber_status(pool=self.pool, cont=self.container)
self.run_ior_and_check_scrubber_status(pool=self.pool, cont=self.container)
# Wait for a minute for the scrubber to take action and evict target
# after corruption threshold reached.
self.log.info("Sleeping for 60 seconds")
Expand All @@ -57,7 +57,7 @@ def test_scrubber_ssd_auto_eviction(self):
self.log.info("Data corrupted occurrence %s", occurrence)
else:
self.fail("Test Failed: RAS data corrupted messages missing on system logs")
self.dmg_cmd.pool_query(self.pool.identifier)
self.pool.query()
final_metrics = self.scrubber.get_scrub_corrupt_metrics()
status = self.verify_scrubber_metrics_value(initial_metrics, final_metrics)
if status is False:
Expand Down
2 changes: 1 addition & 1 deletion src/tests/ftest/server/metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def create_pool(self, svc_ops_enabled=True):
params = {}
params['properties'] = "svc_ops_enabled:0"
self.add_pool(**params)
self.log.info("Created pool %s: svc ranks:", self.pool.uuid)
self.log.info("Created %s: svc ranks:", str(self.pool))
for index, rank in enumerate(self.pool.svc_ranks):
self.log.info("[%d]: %d", index, rank)

Expand Down
6 changes: 2 additions & 4 deletions src/tests/ftest/util/data_mover_test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,8 +344,7 @@ def dataset_gen(self, cont, num_objs, num_dkeys, num_akeys_single,
list: a list of DaosObj created.

"""
self.log.info("Creating dataset in %s/%s",
str(cont.pool.uuid), str(cont.uuid))
self.log.info("Creating dataset in %s/%s", str(cont.pool), str(cont))

cont.open()

Expand Down Expand Up @@ -415,8 +414,7 @@ def dataset_verify(self, obj_list, cont, num_objs, num_dkeys,
akey_extents (list): varying number of akey extents to iterate.

"""
self.log.info("Verifying dataset in %s/%s",
str(cont.pool.uuid), str(cont.uuid))
self.log.info("Verifying dataset in %s/%s", str(cont.pool), str(cont))

cont.open()

Expand Down
Loading
Loading