diff --git a/pkg/controllers/job/helpers/helpers.go b/pkg/controllers/job/helpers/helpers.go index bac6a8f900..3b6f0d77fe 100644 --- a/pkg/controllers/job/helpers/helpers.go +++ b/pkg/controllers/job/helpers/helpers.go @@ -120,8 +120,8 @@ func GetJobKeyByReq(req *apis.Request) string { return fmt.Sprintf("%s/%s", req.Namespace, req.JobName) } -// GetTasklndexUnderJob return index of the task in the job. -func GetTasklndexUnderJob(taskName string, job *batch.Job) int { +// GetTaskIndexUnderJob return index of the task in the job. +func GetTaskIndexUnderJob(taskName string, job *batch.Job) int { for index, task := range job.Spec.Tasks { if task.Name == taskName { return index diff --git a/pkg/controllers/job/helpers/helpers_test.go b/pkg/controllers/job/helpers/helpers_test.go index 84240c5cbe..146b71dafe 100644 --- a/pkg/controllers/job/helpers/helpers_test.go +++ b/pkg/controllers/job/helpers/helpers_test.go @@ -186,7 +186,7 @@ func TestGetTasklndexUnderJobFunc(t *testing.T) { for _, testCase := range testCases { t.Run(testCase.Name, func(t *testing.T) { - index := GetTasklndexUnderJob(testCase.TaskName, testCase.Job) + index := GetTaskIndexUnderJob(testCase.TaskName, testCase.Job) if index != testCase.Expect { t.Errorf("GetTasklndexUnderJobFunc(%s) = %d, expect %d", testCase.TaskName, index, testCase.Expect) } diff --git a/pkg/controllers/job/job_controller_actions.go b/pkg/controllers/job/job_controller_actions.go index 1dabef5003..2964ec98b0 100644 --- a/pkg/controllers/job/job_controller_actions.go +++ b/pkg/controllers/job/job_controller_actions.go @@ -375,7 +375,7 @@ func (cc *jobcontroller) syncJob(jobInfo *apis.JobInfo, updateStatus state.Updat continue } go func(taskName string, podToCreateEachTask []*v1.Pod) { - taskIndex := jobhelpers.GetTasklndexUnderJob(taskName, job) + taskIndex := jobhelpers.GetTaskIndexUnderJob(taskName, job) if job.Spec.Tasks[taskIndex].DependsOn != nil { if !cc.waitDependsOnTaskMeetCondition(taskName, taskIndex, podToCreateEachTask, job) { klog.V(3).Infof("Job %s/%s depends on task not ready", job.Name, job.Namespace) @@ -512,7 +512,7 @@ func (cc *jobcontroller) waitDependsOnTaskMeetCondition(taskName string, taskInd func (cc *jobcontroller) isDependsOnPodsReady(task string, job *batch.Job) bool { dependsOnPods := jobhelpers.GetPodsNameUnderTask(task, job) - dependsOnTaskIndex := jobhelpers.GetTasklndexUnderJob(task, job) + dependsOnTaskIndex := jobhelpers.GetTaskIndexUnderJob(task, job) runningPodCount := 0 for _, podName := range dependsOnPods { pod, err := cc.podLister.Pods(job.Namespace).Get(podName) diff --git a/pkg/controllers/job/plugins/distributed-framework/mpi/mpi.go b/pkg/controllers/job/plugins/distributed-framework/mpi/mpi.go index 685e029131..1bcda596a1 100644 --- a/pkg/controllers/job/plugins/distributed-framework/mpi/mpi.go +++ b/pkg/controllers/job/plugins/distributed-framework/mpi/mpi.go @@ -80,7 +80,7 @@ func (mp *Plugin) OnPodCreate(pod *v1.Pod, job *batch.Job) error { workerHosts := "" env := v1.EnvVar{} if helpers.GetTaskKey(pod) == mp.masterName { - workerHosts = mp.generateTaskHosts(job.Spec.Tasks[helpers.GetTasklndexUnderJob(mp.workerName, job)], job.Name) + workerHosts = mp.generateTaskHosts(job.Spec.Tasks[helpers.GetTaskIndexUnderJob(mp.workerName, job)], job.Name) env = v1.EnvVar{ Name: MPIHost, Value: workerHosts, diff --git a/pkg/controllers/job/plugins/distributed-framework/pytorch/pytorch.go b/pkg/controllers/job/plugins/distributed-framework/pytorch/pytorch.go index 83168685dc..07d1556a6a 100644 --- a/pkg/controllers/job/plugins/distributed-framework/pytorch/pytorch.go +++ b/pkg/controllers/job/plugins/distributed-framework/pytorch/pytorch.go @@ -64,7 +64,7 @@ func (pp *pytorchPlugin) Name() string { func (pp *pytorchPlugin) OnPodCreate(pod *v1.Pod, job *batch.Job) error { taskType := helpers.GetTaskKey(pod) - masterIndex := helpers.GetTasklndexUnderJob(pp.masterName, job) + masterIndex := helpers.GetTaskIndexUnderJob(pp.masterName, job) if masterIndex == -1 { klog.Errorf("job %v doesn't have task %v", job.Name, pp.masterName) return nil diff --git a/pkg/scheduler/actions/preempt/preempt.go b/pkg/scheduler/actions/preempt/preempt.go index f3be6aa64b..e8dba79695 100644 --- a/pkg/scheduler/actions/preempt/preempt.go +++ b/pkg/scheduler/actions/preempt/preempt.go @@ -67,7 +67,7 @@ func (pmpt *Action) Execute(ssn *framework.Session) { queues[queue.UID] = queue } - // check job if starting for more resources. + // check job if starving for more resources. if ssn.JobStarving(job) { if _, found := preemptorsMap[job.Queue]; !found { preemptorsMap[job.Queue] = util.NewPriorityQueue(ssn.JobOrderFn) @@ -273,7 +273,7 @@ func preempt( for !victimsQueue.Empty() { // If reclaimed enough resources, break loop to avoid Sub panic. - // If preemptor's queue is overused, it means preemptor can not be allcated. So no need care about the node idle resourace + // If preemptor's queue is overused, it means preemptor can not be allocated. So no need care about the node idle resource if !ssn.Overused(currentQueue) && preemptor.InitResreq.LessEqual(node.FutureIdle(), api.Zero) { break } @@ -292,7 +292,7 @@ func preempt( klog.V(3).Infof("Preempted <%v> for Task <%s/%s> requested <%v>.", preempted, preemptor.Namespace, preemptor.Name, preemptor.InitResreq) - // If preemptor's queue is overused, it means preemptor can not be allcated. So no need care about the node idle resourace + // If preemptor's queue is overused, it means preemptor can not be allocated. So no need care about the node idle resource if !ssn.Overused(currentQueue) && preemptor.InitResreq.LessEqual(node.FutureIdle(), api.Zero) { if err := stmt.Pipeline(preemptor, node.Name); err != nil { klog.Errorf("Failed to pipeline Task <%s/%s> on Node <%s>", diff --git a/pkg/scheduler/api/job_info.go b/pkg/scheduler/api/job_info.go index ec9e5fe606..6800c67ab2 100644 --- a/pkg/scheduler/api/job_info.go +++ b/pkg/scheduler/api/job_info.go @@ -36,7 +36,7 @@ import ( volumescheduling "volcano.sh/volcano/pkg/scheduler/capabilities/volumebinding" ) -// DisruptionBudget define job min pod available and max pod unvailable value +// DisruptionBudget define job min pod available and max pod unavailable value type DisruptionBudget struct { MinAvailable string MaxUnavilable string @@ -323,7 +323,7 @@ type JobInfo struct { Preemptable bool // RevocableZone support set volcano.sh/revocable-zone annotaion or label for pod/podgroup - // we only support empty value or * value for this version and we will support specify revocable zone name for futrue release + // we only support empty value or * value for this version and we will support specify revocable zone name for future release // empty value means workload can not use revocable node // * value means workload can use all the revocable node for during node active revocable time. RevocableZone string @@ -666,7 +666,7 @@ func (ji *JobInfo) TaskSchedulingReason(tid TaskID) (reason string, msg string) return PodReasonUnschedulable, msg case Pending: if fe := ji.NodesFitErrors[tid]; fe != nil { - // Pod is not schedulable + // Pod is unschedulable return PodReasonUnschedulable, fe.Error() } // Pod is not scheduled yet, keep UNSCHEDULABLE as the reason to support cluster autoscaler @@ -703,7 +703,7 @@ func (ji *JobInfo) WaitingTaskNum() int32 { // CheckTaskValid returns whether each task of job is valid. func (ji *JobInfo) CheckTaskValid() bool { - // if job minAvailable is less than sumof(task minAvailable), skip this check + // if job minAvailable is less than sum of(task minAvailable), skip this check if ji.MinAvailable < ji.TaskMinAvailableTotal { return true } diff --git a/pkg/scheduler/api/node_info.go b/pkg/scheduler/api/node_info.go index 49f7d4dbf4..5ac538761c 100644 --- a/pkg/scheduler/api/node_info.go +++ b/pkg/scheduler/api/node_info.go @@ -493,7 +493,7 @@ func (ni *NodeInfo) addResource(pod *v1.Pod) { ni.Others[vgpu.DeviceName].(Devices).AddResource(pod) } -// subResource is used to substract sharable devices +// subResource is used to subtract sharable devices func (ni *NodeInfo) subResource(pod *v1.Pod) { ni.Others[GPUSharingDevice].(Devices).SubResource(pod) ni.Others[vgpu.DeviceName].(Devices).SubResource(pod) diff --git a/pkg/scheduler/conf/scheduler_conf.go b/pkg/scheduler/conf/scheduler_conf.go index ec34fb9df2..148ffb76ac 100644 --- a/pkg/scheduler/conf/scheduler_conf.go +++ b/pkg/scheduler/conf/scheduler_conf.go @@ -49,7 +49,7 @@ type PluginOption struct { Name string `yaml:"name"` // EnabledJobOrder defines whether jobOrderFn is enabled EnabledJobOrder *bool `yaml:"enableJobOrder"` - // EnabledHierachy defines whether hierarchical sharing is enabled + // EnabledHierarchy defines whether hierarchical sharing is enabled EnabledHierarchy *bool `yaml:"enableHierarchy"` // EnabledJobReady defines whether jobReadyFn is enabled EnabledJobReady *bool `yaml:"enableJobReady"` diff --git a/pkg/scheduler/plugins/gang/gang.go b/pkg/scheduler/plugins/gang/gang.go index 5ceb3ace40..fb4f13a447 100644 --- a/pkg/scheduler/plugins/gang/gang.go +++ b/pkg/scheduler/plugins/gang/gang.go @@ -155,7 +155,7 @@ func (gp *gangPlugin) OnSessionOpen(ssn *framework.Session) { jobStarvingFn := func(obj interface{}) bool { ji := obj.(*api.JobInfo) occupied := ji.WaitingTaskNum() + ji.ReadyTaskNum() - // In the preemption scenario, the taskMinAvailble configuration is not concerned, only the jobMinAvailble is concerned + // In the preemption scenario, the taskMinAvailable configuration is not concerned, only the jobMinAvailable is concerned return occupied < ji.MinAvailable } ssn.AddJobStarvingFns(gp.Name(), jobStarvingFn) diff --git a/pkg/scheduler/plugins/resourcequota/resourcequota.go b/pkg/scheduler/plugins/resourcequota/resourcequota.go index bf01cebbb5..5fa7ff6113 100644 --- a/pkg/scheduler/plugins/resourcequota/resourcequota.go +++ b/pkg/scheduler/plugins/resourcequota/resourcequota.go @@ -7,7 +7,7 @@ import ( quotav1 "k8s.io/apiserver/pkg/quota/v1" "k8s.io/klog/v2" - scheduling "volcano.sh/apis/pkg/apis/scheduling/v1beta1" + scheduling "volcano.sh/apis/pkg/apis/scheduling" "volcano.sh/volcano/pkg/scheduler/api" "volcano.sh/volcano/pkg/scheduler/framework" "volcano.sh/volcano/pkg/scheduler/plugins/util" diff --git a/pkg/webhooks/admission/jobs/plugins/mpi/mpi.go b/pkg/webhooks/admission/jobs/plugins/mpi/mpi.go index 1c6ada9414..6dd8f74db0 100644 --- a/pkg/webhooks/admission/jobs/plugins/mpi/mpi.go +++ b/pkg/webhooks/admission/jobs/plugins/mpi/mpi.go @@ -26,7 +26,7 @@ import ( func AddDependsOn(job *v1alpha1.Job) { mp := controllerMpi.NewInstance(job.Spec.Plugins[controllerMpi.MPIPluginName]) - masterIndex := helpers.GetTasklndexUnderJob(mp.GetMasterName(), job) + masterIndex := helpers.GetTaskIndexUnderJob(mp.GetMasterName(), job) if masterIndex == -1 { klog.Errorln("Failed to find master task") return diff --git a/pkg/webhooks/admission/jobs/validate/admit_job.go b/pkg/webhooks/admission/jobs/validate/admit_job.go index 5d9bf5ad51..06ae119b1d 100644 --- a/pkg/webhooks/admission/jobs/validate/admit_job.go +++ b/pkg/webhooks/admission/jobs/validate/admit_job.go @@ -144,8 +144,8 @@ func validateJobCreate(job *v1alpha1.Job, reviewResponse *admissionv1.AdmissionR if _, ok := job.Spec.Plugins[controllerMpi.MPIPluginName]; ok { mp := controllerMpi.NewInstance(job.Spec.Plugins[controllerMpi.MPIPluginName]) - masterIndex := helpers.GetTasklndexUnderJob(mp.GetMasterName(), job) - workerIndex := helpers.GetTasklndexUnderJob(mp.GetWorkerName(), job) + masterIndex := helpers.GetTaskIndexUnderJob(mp.GetMasterName(), job) + workerIndex := helpers.GetTaskIndexUnderJob(mp.GetWorkerName(), job) if masterIndex == -1 { reviewResponse.Allowed = false return "The specified mpi master task was not found"