From e37545b2accb63d0a694eca822aa8c479d5a29a0 Mon Sep 17 00:00:00 2001 From: Ben Barnard Date: Sun, 22 Jul 2018 22:46:52 +0200 Subject: [PATCH] Don't shrink the executor count to non-zero number Fixes #14. When using dynamic allocation, a smaller executor count than what is currently running might be requested. Since we don't want to kill existing allocations (they have shuffle services, as well as executors that are potentially still running tasks), we won't reduce the number of executor task groups unless we are asked to reduce it to 0. --- .../scheduler/cluster/nomad/SparkNomadJobController.scala | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/resource-managers/nomad/src/main/scala/org/apache/spark/scheduler/cluster/nomad/SparkNomadJobController.scala b/resource-managers/nomad/src/main/scala/org/apache/spark/scheduler/cluster/nomad/SparkNomadJobController.scala index 196d810387558..77c421e7fc15a 100644 --- a/resource-managers/nomad/src/main/scala/org/apache/spark/scheduler/cluster/nomad/SparkNomadJobController.scala +++ b/resource-managers/nomad/src/main/scala/org/apache/spark/scheduler/cluster/nomad/SparkNomadJobController.scala @@ -68,8 +68,10 @@ private[spark] class SparkNomadJobController(jobManipulator: NomadJobManipulator def setExecutorCount(count: Int): Unit = { jobManipulator.updateJob(startIfNotYetRunning = count > 0) { job => - SparkNomadJob.find(job, ExecutorTaskGroup).get - .setCount(count) + val executorGroup = SparkNomadJob.find(job, ExecutorTaskGroup).get + if (count > 0 && executorGroup.getCount < count) { + executorGroup.setCount(count) + } } }