Skip to content

Commit

Permalink
Update KubeRay Autoscaler to use NumOfHosts for min/max workers (#48212)
Browse files Browse the repository at this point in the history
Signed-off-by: Ryan O'Leary <[email protected]>
  • Loading branch information
ryanaoleary authored Feb 5, 2025
1 parent 233296c commit e3680f7
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 5 deletions.
7 changes: 4 additions & 3 deletions python/ray/autoscaler/_private/kuberay/autoscaling_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,9 +192,10 @@ def _node_type_from_group_spec(
# The head node type has no workers because the head is not a worker.
min_workers = max_workers = 0
else:
# `minReplicas` and `maxReplicas` are required fields for each workerGroupSpec
min_workers = group_spec["minReplicas"]
max_workers = group_spec["maxReplicas"]
# `minReplicas` and `maxReplicas` are required fields for each workerGroupSpec.
# numOfHosts specifies the number of workers per replica in KubeRay v1.1+.
min_workers = group_spec["minReplicas"] * group_spec.get("numOfHosts", 1)
max_workers = group_spec["maxReplicas"] * group_spec.get("numOfHosts", 1)

resources = _get_ray_resources_from_group_spec(group_spec, is_head)

Expand Down
4 changes: 2 additions & 2 deletions python/ray/tests/kuberay/test_autoscaling_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def _get_basic_autoscaling_config() -> dict:
# Same as "small-group" with a TPU resource entry added
# and modified max_workers and node_config.
"tpu-group": {
"max_workers": 4,
"max_workers": 8,
"min_workers": 0,
"node_config": {},
"resources": {
Expand All @@ -131,7 +131,7 @@ def _get_basic_autoscaling_config() -> dict:
"head_start_ray_commands": [],
"idle_timeout_minutes": 1.0,
"initialization_commands": [],
"max_workers": 504,
"max_workers": 508,
"setup_commands": [],
"upscaling_speed": 1000,
"worker_setup_commands": [],
Expand Down

0 comments on commit e3680f7

Please sign in to comment.