Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix rebalance logs #983

Merged
merged 1 commit into from
Dec 15, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions control/rebalance.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def auto_rebalance_task(self ):
try:
rc = self.gw_srv.execute_grpc_function(self.rebalance_logic, None, "context")
if rc == 1:
self.logger.info(f"Nothing found for rebalance, break at {i} iteration")
self.logger.debug(f"Nothing found for rebalance, break at {i} iteration")
break
except Exception:
self.logger.exception(f"Exception in auto rebalance")
Expand Down Expand Up @@ -85,7 +85,7 @@ def find_min_loaded_group_in_subsys(self, nqn, grp_list)->int:
# and reballance results will be accurate. Monitor in nvme-gw show response publishes the index of ANA group that is currently responsible for rebalance
def rebalance_logic(self, request, context)->int:
worker_ana_group = self.ceph_utils.get_rebalance_ana_group()
self.logger.info(f"Called rebalance logic: current rebalancing ana group {worker_ana_group}")
self.logger.debug(f"Called rebalance logic: current rebalancing ana group {worker_ana_group}")
ongoing_scale_down_rebalance = False
grps_list = self.ceph_utils.get_number_created_gateways(self.gw_srv.gateway_pool, self.gw_srv.gateway_group)
if not self.ceph_utils.is_rebalance_supported():
Expand All @@ -101,15 +101,15 @@ def rebalance_logic(self, request, context)->int:
for ana_grp in self.gw_srv.ana_grp_state:
if self.gw_srv.ana_grp_state[ana_grp] == pb2.ana_state.OPTIMIZED :
if ana_grp not in grps_list:
self.logger.info(f"Found optimized ana group {ana_grp} that handles to group of deleted GW."
self.logger.info(f"Found optimized ana group {ana_grp} that handles the group of deleted GW."
f"Number NS in group {self.gw_srv.ana_grp_ns_load[ana_grp]} - Start NS rebalance")
if self.gw_srv.ana_grp_ns_load[ana_grp] >= self.rebalance_max_ns_to_change_lb_grp:
num = self.rebalance_max_ns_to_change_lb_grp
else:
num = self.gw_srv.ana_grp_ns_load[ana_grp]
if num > 0 :
min_ana_grp, chosen_nqn = self.find_min_loaded_group(grps_list)
self.logger.info(f"Found destination ana group {min_ana_grp}, subsystem {chosen_nqn}")
self.logger.info(f"Start rebalance (scale down) destination ana group {min_ana_grp}, subsystem {chosen_nqn}")
self.ns_rebalance(context, ana_grp, min_ana_grp, 1, "0")#scale down rebalance
return 0
else :
Expand All @@ -130,7 +130,7 @@ def rebalance_logic(self, request, context)->int:
(self.gw_srv.ana_grp_subs_load[min_ana_grp][nqn] + 1) <= target_subs_per_ana
or (self.gw_srv.ana_grp_subs_load[min_ana_grp][nqn] + 1) == (self.gw_srv.ana_grp_subs_load[ana_grp][nqn] - 1)
):
self.logger.debug(f"Start rebalance in subsystem {nqn}, dest ana {min_ana_grp}, dest ana load per subs {min_load}")
self.logger.info(f"Start rebalance (regular) in subsystem {nqn}, dest ana {min_ana_grp}, dest ana load per subs {min_load}")
self.ns_rebalance(context, ana_grp, min_ana_grp, 1, nqn) #regular rebalance
return 0
else:
Expand Down
Loading