Skip to content

Commit

Permalink
Merge pull request #3069 from lowang-bh/docs
Browse files Browse the repository at this point in the history
correct spelling errors in comments and unify the package to apis/scheduling
  • Loading branch information
volcano-sh-bot authored Sep 15, 2023
2 parents 82d4b85 + 1a26266 commit 9a96046
Show file tree
Hide file tree
Showing 13 changed files with 21 additions and 21 deletions.
4 changes: 2 additions & 2 deletions pkg/controllers/job/helpers/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,8 @@ func GetJobKeyByReq(req *apis.Request) string {
return fmt.Sprintf("%s/%s", req.Namespace, req.JobName)
}

// GetTasklndexUnderJob return index of the task in the job.
func GetTasklndexUnderJob(taskName string, job *batch.Job) int {
// GetTaskIndexUnderJob return index of the task in the job.
func GetTaskIndexUnderJob(taskName string, job *batch.Job) int {
for index, task := range job.Spec.Tasks {
if task.Name == taskName {
return index
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/job/helpers/helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ func TestGetTasklndexUnderJobFunc(t *testing.T) {

for _, testCase := range testCases {
t.Run(testCase.Name, func(t *testing.T) {
index := GetTasklndexUnderJob(testCase.TaskName, testCase.Job)
index := GetTaskIndexUnderJob(testCase.TaskName, testCase.Job)
if index != testCase.Expect {
t.Errorf("GetTasklndexUnderJobFunc(%s) = %d, expect %d", testCase.TaskName, index, testCase.Expect)
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/job/job_controller_actions.go
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,7 @@ func (cc *jobcontroller) syncJob(jobInfo *apis.JobInfo, updateStatus state.Updat
continue
}
go func(taskName string, podToCreateEachTask []*v1.Pod) {
taskIndex := jobhelpers.GetTasklndexUnderJob(taskName, job)
taskIndex := jobhelpers.GetTaskIndexUnderJob(taskName, job)
if job.Spec.Tasks[taskIndex].DependsOn != nil {
if !cc.waitDependsOnTaskMeetCondition(taskName, taskIndex, podToCreateEachTask, job) {
klog.V(3).Infof("Job %s/%s depends on task not ready", job.Name, job.Namespace)
Expand Down Expand Up @@ -512,7 +512,7 @@ func (cc *jobcontroller) waitDependsOnTaskMeetCondition(taskName string, taskInd

func (cc *jobcontroller) isDependsOnPodsReady(task string, job *batch.Job) bool {
dependsOnPods := jobhelpers.GetPodsNameUnderTask(task, job)
dependsOnTaskIndex := jobhelpers.GetTasklndexUnderJob(task, job)
dependsOnTaskIndex := jobhelpers.GetTaskIndexUnderJob(task, job)
runningPodCount := 0
for _, podName := range dependsOnPods {
pod, err := cc.podLister.Pods(job.Namespace).Get(podName)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func (mp *Plugin) OnPodCreate(pod *v1.Pod, job *batch.Job) error {
workerHosts := ""
env := v1.EnvVar{}
if helpers.GetTaskKey(pod) == mp.masterName {
workerHosts = mp.generateTaskHosts(job.Spec.Tasks[helpers.GetTasklndexUnderJob(mp.workerName, job)], job.Name)
workerHosts = mp.generateTaskHosts(job.Spec.Tasks[helpers.GetTaskIndexUnderJob(mp.workerName, job)], job.Name)
env = v1.EnvVar{
Name: MPIHost,
Value: workerHosts,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ func (pp *pytorchPlugin) Name() string {

func (pp *pytorchPlugin) OnPodCreate(pod *v1.Pod, job *batch.Job) error {
taskType := helpers.GetTaskKey(pod)
masterIndex := helpers.GetTasklndexUnderJob(pp.masterName, job)
masterIndex := helpers.GetTaskIndexUnderJob(pp.masterName, job)
if masterIndex == -1 {
klog.Errorf("job %v doesn't have task %v", job.Name, pp.masterName)
return nil
Expand Down
6 changes: 3 additions & 3 deletions pkg/scheduler/actions/preempt/preempt.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ func (pmpt *Action) Execute(ssn *framework.Session) {
queues[queue.UID] = queue
}

// check job if starting for more resources.
// check job if starving for more resources.
if ssn.JobStarving(job) {
if _, found := preemptorsMap[job.Queue]; !found {
preemptorsMap[job.Queue] = util.NewPriorityQueue(ssn.JobOrderFn)
Expand Down Expand Up @@ -271,7 +271,7 @@ func preempt(

for !victimsQueue.Empty() {
// If reclaimed enough resources, break loop to avoid Sub panic.
// If preemptor's queue is overused, it means preemptor can not be allcated. So no need care about the node idle resourace
// If preemptor's queue is overused, it means preemptor can not be allocated. So no need care about the node idle resource
if !ssn.Overused(currentQueue) && preemptor.InitResreq.LessEqual(node.FutureIdle(), api.Zero) {
break
}
Expand All @@ -290,7 +290,7 @@ func preempt(
klog.V(3).Infof("Preempted <%v> for Task <%s/%s> requested <%v>.",
preempted, preemptor.Namespace, preemptor.Name, preemptor.InitResreq)

// If preemptor's queue is overused, it means preemptor can not be allcated. So no need care about the node idle resourace
// If preemptor's queue is overused, it means preemptor can not be allocated. So no need care about the node idle resource
if !ssn.Overused(currentQueue) && preemptor.InitResreq.LessEqual(node.FutureIdle(), api.Zero) {
if err := stmt.Pipeline(preemptor, node.Name); err != nil {
klog.Errorf("Failed to pipeline Task <%s/%s> on Node <%s>",
Expand Down
8 changes: 4 additions & 4 deletions pkg/scheduler/api/job_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ import (
volumescheduling "volcano.sh/volcano/pkg/scheduler/capabilities/volumebinding"
)

// DisruptionBudget define job min pod available and max pod unvailable value
// DisruptionBudget define job min pod available and max pod unavailable value
type DisruptionBudget struct {
MinAvailable string
MaxUnavilable string
Expand Down Expand Up @@ -329,7 +329,7 @@ type JobInfo struct {
Preemptable bool

// RevocableZone support set volcano.sh/revocable-zone annotaion or label for pod/podgroup
// we only support empty value or * value for this version and we will support specify revocable zone name for futrue release
// we only support empty value or * value for this version and we will support specify revocable zone name for future release
// empty value means workload can not use revocable node
// * value means workload can use all the revocable node for during node active revocable time.
RevocableZone string
Expand Down Expand Up @@ -672,7 +672,7 @@ func (ji *JobInfo) TaskSchedulingReason(tid TaskID) (reason string, msg string)
return PodReasonUnschedulable, msg
case Pending:
if fe := ji.NodesFitErrors[tid]; fe != nil {
// Pod is not schedulable
// Pod is unschedulable
return PodReasonUnschedulable, fe.Error()
}
// Pod is not scheduled yet, keep UNSCHEDULABLE as the reason to support cluster autoscaler
Expand Down Expand Up @@ -709,7 +709,7 @@ func (ji *JobInfo) WaitingTaskNum() int32 {

// CheckTaskValid returns whether each task of job is valid.
func (ji *JobInfo) CheckTaskValid() bool {
// if job minAvailable is less than sumof(task minAvailable), skip this check
// if job minAvailable is less than sum of(task minAvailable), skip this check
if ji.MinAvailable < ji.TaskMinAvailableTotal {
return true
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/api/node_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -493,7 +493,7 @@ func (ni *NodeInfo) addResource(pod *v1.Pod) {
ni.Others[vgpu.DeviceName].(Devices).AddResource(pod)
}

// subResource is used to substract sharable devices
// subResource is used to subtract sharable devices
func (ni *NodeInfo) subResource(pod *v1.Pod) {
ni.Others[GPUSharingDevice].(Devices).SubResource(pod)
ni.Others[vgpu.DeviceName].(Devices).SubResource(pod)
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/conf/scheduler_conf.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ type PluginOption struct {
Name string `yaml:"name"`
// EnabledJobOrder defines whether jobOrderFn is enabled
EnabledJobOrder *bool `yaml:"enableJobOrder"`
// EnabledHierachy defines whether hierarchical sharing is enabled
// EnabledHierarchy defines whether hierarchical sharing is enabled
EnabledHierarchy *bool `yaml:"enableHierarchy"`
// EnabledJobReady defines whether jobReadyFn is enabled
EnabledJobReady *bool `yaml:"enableJobReady"`
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/plugins/gang/gang.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ func (gp *gangPlugin) OnSessionOpen(ssn *framework.Session) {
jobStarvingFn := func(obj interface{}) bool {
ji := obj.(*api.JobInfo)
occupied := ji.WaitingTaskNum() + ji.ReadyTaskNum()
// In the preemption scenario, the taskMinAvailble configuration is not concerned, only the jobMinAvailble is concerned
// In the preemption scenario, the taskMinAvailable configuration is not concerned, only the jobMinAvailable is concerned
return occupied < ji.MinAvailable
}
ssn.AddJobStarvingFns(gp.Name(), jobStarvingFn)
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/plugins/resourcequota/resourcequota.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import (
quotav1 "k8s.io/apiserver/pkg/quota/v1"
"k8s.io/klog/v2"

scheduling "volcano.sh/apis/pkg/apis/scheduling/v1beta1"
scheduling "volcano.sh/apis/pkg/apis/scheduling"
"volcano.sh/volcano/pkg/scheduler/api"
"volcano.sh/volcano/pkg/scheduler/framework"
"volcano.sh/volcano/pkg/scheduler/plugins/util"
Expand Down
2 changes: 1 addition & 1 deletion pkg/webhooks/admission/jobs/plugins/mpi/mpi.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import (

func AddDependsOn(job *v1alpha1.Job) {
mp := controllerMpi.NewInstance(job.Spec.Plugins[controllerMpi.MPIPluginName])
masterIndex := helpers.GetTasklndexUnderJob(mp.GetMasterName(), job)
masterIndex := helpers.GetTaskIndexUnderJob(mp.GetMasterName(), job)
if masterIndex == -1 {
klog.Errorln("Failed to find master task")
return
Expand Down
4 changes: 2 additions & 2 deletions pkg/webhooks/admission/jobs/validate/admit_job.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,8 +144,8 @@ func validateJobCreate(job *v1alpha1.Job, reviewResponse *admissionv1.AdmissionR

if _, ok := job.Spec.Plugins[controllerMpi.MPIPluginName]; ok {
mp := controllerMpi.NewInstance(job.Spec.Plugins[controllerMpi.MPIPluginName])
masterIndex := helpers.GetTasklndexUnderJob(mp.GetMasterName(), job)
workerIndex := helpers.GetTasklndexUnderJob(mp.GetWorkerName(), job)
masterIndex := helpers.GetTaskIndexUnderJob(mp.GetMasterName(), job)
workerIndex := helpers.GetTaskIndexUnderJob(mp.GetWorkerName(), job)
if masterIndex == -1 {
reviewResponse.Allowed = false
return "The specified mpi master task was not found"
Expand Down

0 comments on commit 9a96046

Please sign in to comment.