diff --git a/definition/pipelines.go b/definition/pipelines.go index 60c9ca8..d8bfa08 100644 --- a/definition/pipelines.go +++ b/definition/pipelines.go @@ -69,10 +69,12 @@ type PipelineDef struct { // Env sets/overrides environment variables for all tasks (takes precedence over process environment) Env map[string]string `yaml:"env"` + // Tasks is a map of task names to task definitions Tasks map[string]TaskDef `yaml:"tasks"` - // Script to be executed if this pipeline fails, e.g. for notifications. - // In this script, you have the following variables set: + // Task to be executed if this pipeline fails, e.g. for notifications. + // + // In this task, you have the following variables set: // - failedTaskName: Name of the failed task (key from pipelines.yml) // - failedTaskExitCode: Exit code of the failed task // - failedTaskError: Error message of the failed task diff --git a/prunner.go b/prunner.go index d1eb480..41a8404 100644 --- a/prunner.go +++ b/prunner.go @@ -421,7 +421,7 @@ func (r *PipelineRunner) HandleTaskChange(t *task.Task) { } updateJobTaskStateFromTask(jt, t) - // if the task has errored, and we want to fail-fast (ContinueRunningTasksAfterFailure is set to FALSE), + // If the task has errored, and we want to fail-fast (ContinueRunningTasksAfterFailure is false), // then we directly abort all other tasks of the job. // NOTE: this is NOT the context.Canceled case from above (if a job is explicitly aborted), but only // if one task failed, and we want to kill the other tasks. @@ -496,7 +496,7 @@ func (r *PipelineRunner) runOnErrorScript(t *task.Task, j *PipelineJob, onErrorT WithField("pipeline", j.Pipeline). WithField("failedTaskName", t.Name). WithError(err). - Debug("Could not create stdoutReader for failed task") + Warn("Could not create stdout reader for failed task") } else { defer func(rc io.ReadCloser) { _ = rc.Close() @@ -509,7 +509,7 @@ func (r *PipelineRunner) runOnErrorScript(t *task.Task, j *PipelineJob, onErrorT WithField("pipeline", j.Pipeline). WithField("failedTaskName", t.Name). WithError(err). - Debug("Could not read stdout of failed task") + Warn("Could not read stdout of failed task") } } @@ -552,7 +552,7 @@ func (r *PipelineRunner) runOnErrorScript(t *task.Task, j *PipelineJob, onErrorT onErrorJobTask := jobTask{ TaskDef: definition.TaskDef{ Script: onErrorTaskDef.Script, - // AllowFailure needs to be FALSE; otherwise lastError below won't be filled (so errors will not appear in the log) + // AllowFailure needs to be false, otherwise lastError below won't be filled (so errors will not appear in the log) AllowFailure: false, Env: onErrorTaskDef.Env, }, @@ -962,6 +962,7 @@ func (r *PipelineRunner) Shutdown(ctx context.Context) error { // Wait for all running jobs to have called JobCompleted r.wg.Wait() + // TODO This is not safe to do outside of the requestPersist loop, since we might have a save in progress. So we need to wait until the save loop is finished before calling SaveToStore. // Do a final save to include the state of recently completed jobs r.SaveToStore() }() diff --git a/taskctl/runner.go b/taskctl/runner.go index 4bf0480..dec2fd4 100644 --- a/taskctl/runner.go +++ b/taskctl/runner.go @@ -166,8 +166,8 @@ func (r *TaskRunner) Run(t *task.Task) error { // but this lead to a huge memory leak because the full job output was retained // in memory forever. // This enabled features of taskctl like {{ .Tasks.TASKNAME.Output }} and {{.Output}}, - // but we never promised these features. Thus it is fine to not log to stdout and stderr - // into a Buffer, but directly to a file. + // but we never promised these features. Thus, it is fine to not log stdout and stderr + // into a Buffer, but directly to the output store. stdoutWriter []io.Writer stderrWriter []io.Writer )