Skip to content

Commit

Permalink
DAOS-16845 test: add dynamic pool wait_for_aggregation
Browse files Browse the repository at this point in the history
Add TestPool.wait_for_aggregation to dynamically wait for pool
aggregation to complete.
Update tests to use the new function.

Test-tag: test_enospace_time_with_fg
Skip-unit-tests: true
Skip-fault-injection-test: true

Required-githooks: true

Signed-off-by: Dalton Bohning <[email protected]>
  • Loading branch information
daltonbohning committed Dec 3, 2024
1 parent 9a4bec3 commit df36132
Show file tree
Hide file tree
Showing 2 changed files with 49 additions and 6 deletions.
20 changes: 14 additions & 6 deletions src/tests/ftest/nvme/enospace.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,22 +464,30 @@ def test_enospace_time_with_fg(self):
"""
self.log.info(self.pool.pool_percentage_used())

# Enabled TIme mode for Aggregation.
self.log_step("Enable pool aggregation")
self.pool.set_property("reclaim", "time")

self.log_step("Get initial pool free space")
initial_space = self.pool.get_pool_daos_space()
initial_free_scm = initial_space["s_free"][0]
initial_free_nvme = initial_space["s_free"][1]

# Repeat the test in loop.
for _loop in range(10):
self.log.info("-------enospc_time_fg Loop--------- %d", _loop)
self.log_step(f"Run IOR to fill the pool - enospace_time_with_fg loop {_loop}")
self.log.info(self.pool.pool_percentage_used())
# Run IOR to fill the pool.
log_file = f"-loop_{_loop}".join(os.path.splitext(self.client_log))
self.run_enospace_with_bg_job(log_file)
# Delete all the containers
self.log_step(f"Delete all containers - enospace_time_with_fg loop {_loop}")
self.delete_all_containers()
# Delete container will take some time to release the space
time.sleep(60)
self.log_step(f"Wait for aggregation to complete - enospace_time_with_fg loop {_loop}")
agg_did_complete = self.pool.wait_for_aggregation(
initial_free_scm, initial_free_nvme, retries=4, interval=30)
if not agg_did_complete:
self.fail("Pool space not reclaimed after deleting all containers")

# Run last IO
self.log_step("Run one more sanity IOR to fill 1%")
self.start_ior_load(storage='SCM', operation="Auto_Write", percent=1)

@skipForTicket("DAOS-8896")
Expand Down
35 changes: 35 additions & 0 deletions src/tests/ftest/util/test_utils_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -1475,6 +1475,41 @@ def wait_pool_suspect_ranks(self, expected, interval=1, timeout=30):

self.log.info("Wait for suspect ranks complete: suspect ranks %s", expected)

def wait_for_aggregation(self, expected_free_scm=None, expected_free_nvme=None,
retries=4, interval=30):
"""Wait for aggregation to finish.
Args:
expected_free_scm (int, optional): expected scm space to compare to. Defaults to None.
Must supply at least one of expected_free_scm or expected_free_nvme.
expected_free_nvme (int, optional): expected nvme space to compare to. Defaults to None.
Must supply at least one of expected_free_scm or expected_free_nvme.
retries (int, optional): number of times to retry. Default is 4.
interval (int, optional): seconds to wait before retrying. Default is 60.
Returns:
bool: whether aggregation completed within the time limit
Raises:
ValueError: if neither expected_free_scm nor expected_free_nvme are given
"""
if expected_free_scm is None and expected_free_nvme is None:
raise ValueError("expected_free_scm or expected_free_nvme is required")
for _ in range(retries):
current_space = self.get_pool_daos_space()
current_free_scm = current_space["s_free"][0]
current_free_nvme = current_space["s_free"][1]
self.log.info("expected_free_scm = %s", expected_free_scm)
self.log.info("expected_free_nvme = %s", expected_free_nvme)
self.log.info("current_free_scm = %s", current_free_scm)
self.log.info("current_free_nvme = %s", current_free_nvme)
if current_free_scm == expected_free_scm and current_free_nvme == expected_free_nvme:
return True
sleep(interval)

return False

def verify_uuid_directory(self, host, scm_mount):
"""Check if pool folder exist on server.
Expand Down

0 comments on commit df36132

Please sign in to comment.