From 3615c015db3a2e3ce51fb4e815ff164495ad3f19 Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Tue, 19 Sep 2023 21:04:51 -0400 Subject: [PATCH] [DATALAD RUNCMD] fix ambigous typos manually === Do not change lines below === { "chain": [], "cmd": "codespell -i3 -C4 -w", "exit": 0, "extra_inputs": [], "inputs": [], "outputs": [], "pwd": "." } ^^^ Do not change lines above ^^^ --- CHANGELOG/CHANGELOG-v0.18.2.md | 4 ++-- boilerplate/flyte/end2end/run-tests.py | 2 +- datacatalog/boilerplate/flyte/end2end/run-tests.py | 2 +- flyteadmin/boilerplate/flyte/end2end/run-tests.py | 2 +- flyteadmin/pkg/manager/impl/metrics_manager.go | 6 +++--- flyteadmin/pkg/repositories/transformers/execution.go | 2 +- flyteadmin/pkg/repositories/transformers/task_execution.go | 2 +- flyteadmin/scheduler/doc.go | 2 +- flytecopilot/boilerplate/flyte/end2end/run-tests.py | 2 +- flytepropeller/boilerplate/flyte/end2end/run-tests.py | 2 +- flytepropeller/pkg/webhook/aws_secret_manager.go | 2 +- rfc/core language/1461-cache-serialize-api.md | 2 +- rsts/concepts/architecture.rst | 2 +- 13 files changed, 16 insertions(+), 16 deletions(-) diff --git a/CHANGELOG/CHANGELOG-v0.18.2.md b/CHANGELOG/CHANGELOG-v0.18.2.md index ec3f1213eb7..8130474aafe 100644 --- a/CHANGELOG/CHANGELOG-v0.18.2.md +++ b/CHANGELOG/CHANGELOG-v0.18.2.md @@ -43,7 +43,7 @@ See the flytekit [0.25.0 release notes](https://github.com/flyteorg/flytekit/rel ... minio: ... - countour: + contour: ... ``` @@ -56,7 +56,7 @@ See the flytekit [0.25.0 release notes](https://github.com/flyteorg/flytekit/rel ... minio: ... - countour: + contour: ... ``` * Alternatively, if you do not have any dependency on external flyte dependencies, you can keep your ``myvalues.yaml`` and switch to using ``flyte-core`` helm chart directly with no changes. diff --git a/boilerplate/flyte/end2end/run-tests.py b/boilerplate/flyte/end2end/run-tests.py index eb2b28d8d36..5365da006e7 100644 --- a/boilerplate/flyte/end2end/run-tests.py +++ b/boilerplate/flyte/end2end/run-tests.py @@ -127,7 +127,7 @@ def schedule_workflow_groups( terminate_workflow_on_failure: bool, ) -> Dict[str, bool]: """ - Schedule workflows executions for all workflow gropus and return True if all executions succeed, otherwise + Schedule workflows executions for all workflow groups and return True if all executions succeed, otherwise return False. """ executions_by_wfgroup = {} diff --git a/datacatalog/boilerplate/flyte/end2end/run-tests.py b/datacatalog/boilerplate/flyte/end2end/run-tests.py index 66c678fd45a..6da7ae98589 100644 --- a/datacatalog/boilerplate/flyte/end2end/run-tests.py +++ b/datacatalog/boilerplate/flyte/end2end/run-tests.py @@ -126,7 +126,7 @@ def schedule_workflow_groups( terminate_workflow_on_failure: bool, ) -> Dict[str, bool]: """ - Schedule workflows executions for all workflow gropus and return True if all executions succeed, otherwise + Schedule workflows executions for all workflow groups and return True if all executions succeed, otherwise return False. """ executions_by_wfgroup = {} diff --git a/flyteadmin/boilerplate/flyte/end2end/run-tests.py b/flyteadmin/boilerplate/flyte/end2end/run-tests.py index 66c678fd45a..6da7ae98589 100644 --- a/flyteadmin/boilerplate/flyte/end2end/run-tests.py +++ b/flyteadmin/boilerplate/flyte/end2end/run-tests.py @@ -126,7 +126,7 @@ def schedule_workflow_groups( terminate_workflow_on_failure: bool, ) -> Dict[str, bool]: """ - Schedule workflows executions for all workflow gropus and return True if all executions succeed, otherwise + Schedule workflows executions for all workflow groups and return True if all executions succeed, otherwise return False. """ executions_by_wfgroup = {} diff --git a/flyteadmin/pkg/manager/impl/metrics_manager.go b/flyteadmin/pkg/manager/impl/metrics_manager.go index a6d010b1e21..27f36840697 100644 --- a/flyteadmin/pkg/manager/impl/metrics_manager.go +++ b/flyteadmin/pkg/manager/impl/metrics_manager.go @@ -209,7 +209,7 @@ func (m *MetricsManager) parseBranchNodeExecution(ctx context.Context, *spans = append(*spans, nodeExecutionSpan) - // backened overhead + // backend overhead if !nodeExecution.Closure.UpdatedAt.AsTime().Before(branchNodeExecution.Closure.UpdatedAt.AsTime()) { *spans = append(*spans, createOperationSpan(branchNodeExecution.Closure.UpdatedAt, nodeExecution.Closure.UpdatedAt, nodeTeardown)) @@ -271,7 +271,7 @@ func (m *MetricsManager) parseDynamicNodeExecution(ctx context.Context, nodeExec return err } - // backened overhead + // backend overhead latestUpstreamNode := m.getLatestUpstreamNodeExecution(v1alpha1.EndNodeID, nodeExecutionData.DynamicWorkflow.CompiledWorkflow.Primary.Connections.Upstream, nodeExecutions) if latestUpstreamNode != nil && !nodeExecution.Closure.UpdatedAt.AsTime().Before(latestUpstreamNode.Closure.UpdatedAt.AsTime()) { @@ -540,7 +540,7 @@ func (m *MetricsManager) parseSubworkflowNodeExecution(ctx context.Context, return err } - // backened overhead + // backend overhead latestUpstreamNode := m.getLatestUpstreamNodeExecution(v1alpha1.EndNodeID, workflow.Closure.CompiledWorkflow.Primary.Connections.Upstream, nodeExecutions) if latestUpstreamNode != nil && !nodeExecution.Closure.UpdatedAt.AsTime().Before(latestUpstreamNode.Closure.UpdatedAt.AsTime()) { diff --git a/flyteadmin/pkg/repositories/transformers/execution.go b/flyteadmin/pkg/repositories/transformers/execution.go index abd77413e00..71f90a65460 100644 --- a/flyteadmin/pkg/repositories/transformers/execution.go +++ b/flyteadmin/pkg/repositories/transformers/execution.go @@ -385,7 +385,7 @@ func FromExecutionModel(ctx context.Context, executionModel models.Execution, op }, nil } -// PopulateDefaultStateChangeDetails used to populate execution state change details for older executions which donot +// PopulateDefaultStateChangeDetails used to populate execution state change details for older executions which do not // have these details captured. Hence we construct a default state change details from existing data model. func PopulateDefaultStateChangeDetails(executionModel models.Execution) (*admin.ExecutionStateChangeDetails, error) { var err error diff --git a/flyteadmin/pkg/repositories/transformers/task_execution.go b/flyteadmin/pkg/repositories/transformers/task_execution.go index edfc32b1990..8e0eef6dd2d 100644 --- a/flyteadmin/pkg/repositories/transformers/task_execution.go +++ b/flyteadmin/pkg/repositories/transformers/task_execution.go @@ -280,7 +280,7 @@ func mergeCustom(existing, latest *_struct.Struct) (*_struct.Struct, error) { return &response, nil } -// mergeExternalResource combines the lastest ExternalResourceInfo proto with an existing instance +// mergeExternalResource combines the latest ExternalResourceInfo proto with an existing instance // by updating fields and merging logs. func mergeExternalResource(existing, latest *event.ExternalResourceInfo) *event.ExternalResourceInfo { if existing == nil { diff --git a/flyteadmin/scheduler/doc.go b/flyteadmin/scheduler/doc.go index 9caf26d74bc..ed00b33f6d3 100644 --- a/flyteadmin/scheduler/doc.go +++ b/flyteadmin/scheduler/doc.go @@ -4,7 +4,7 @@ // 1] Schedule management // This component is part of the pkg/async/schedule/flytescheduler package // Role of this component is to create / activate / deactivate schedules -// The above actions are exposed through launchplan activation/deactivation api's and donot have separate controls. +// The above actions are exposed through launchplan activation/deactivation api's and do not have separate controls. // Whenever a launchplan with a schedule is activated, a new schedule entry is created in the datastore // On deactivation the created scheduled and launchplan is deactivated through a flag // Atmost one launchplan is active at any moment across its various versions and same semantics apply for the diff --git a/flytecopilot/boilerplate/flyte/end2end/run-tests.py b/flytecopilot/boilerplate/flyte/end2end/run-tests.py index 15b35e1d93b..c795591caa9 100644 --- a/flytecopilot/boilerplate/flyte/end2end/run-tests.py +++ b/flytecopilot/boilerplate/flyte/end2end/run-tests.py @@ -126,7 +126,7 @@ def schedule_workflow_groups( terminate_workflow_on_failure: bool, ) -> Dict[str, bool]: """ - Schedule workflows executions for all workflow gropus and return True if all executions succeed, otherwise + Schedule workflows executions for all workflow groups and return True if all executions succeed, otherwise return False. """ executions_by_wfgroup = {} diff --git a/flytepropeller/boilerplate/flyte/end2end/run-tests.py b/flytepropeller/boilerplate/flyte/end2end/run-tests.py index 66c678fd45a..6da7ae98589 100644 --- a/flytepropeller/boilerplate/flyte/end2end/run-tests.py +++ b/flytepropeller/boilerplate/flyte/end2end/run-tests.py @@ -126,7 +126,7 @@ def schedule_workflow_groups( terminate_workflow_on_failure: bool, ) -> Dict[str, bool]: """ - Schedule workflows executions for all workflow gropus and return True if all executions succeed, otherwise + Schedule workflows executions for all workflow groups and return True if all executions succeed, otherwise return False. """ executions_by_wfgroup = {} diff --git a/flytepropeller/pkg/webhook/aws_secret_manager.go b/flytepropeller/pkg/webhook/aws_secret_manager.go index 9f4e60a74c5..2014c3a8d80 100644 --- a/flytepropeller/pkg/webhook/aws_secret_manager.go +++ b/flytepropeller/pkg/webhook/aws_secret_manager.go @@ -31,7 +31,7 @@ const ( ) var ( - // AWSSecretMountPathPrefix defins the default mount path for secrets + // AWSSecretMountPathPrefix defines the default mount path for secrets AWSSecretMountPathPrefix = []string{string(os.PathSeparator), "etc", "flyte", "secrets"} ) diff --git a/rfc/core language/1461-cache-serialize-api.md b/rfc/core language/1461-cache-serialize-api.md index 6788d994967..03a8bfa9681 100644 --- a/rfc/core language/1461-cache-serialize-api.md +++ b/rfc/core language/1461-cache-serialize-api.md @@ -51,7 +51,7 @@ Reservation requests will include a requested heartbeat-interval-seconds configu ### Datacatalog Managed Reservations The `datacatalog` service will be responsible for managing cache reservations. This will entail the addition of a new ReservationManager and ReservationRepo (with gorm implementation) per the project standards. Additionally it requires a new table in the db where reservations are uniquely defined based on DatasetID and an artifact tag. -All database operations are performed with write consistency, where records are only inserted or updated on restrictive conditions. This eliminates the possibility for race conditions. Where two executions attempt to acquire a cache reservation simultaneously, only one can succeeed. +All database operations are performed with write consistency, where records are only inserted or updated on restrictive conditions. This eliminates the possibility for race conditions. Where two executions attempt to acquire a cache reservation simultaneously, only one can succeed. Additionally, the `datacatalog` configuration file defines max-heartbeat-interval-seconds and heartbeat-grace-period-multiplier to define the maximum heartbeat interval of reservation extensions and set the reservation expiration (computed as heartbeat-interval-seconds * heartbeat-grace-period-multiplier). diff --git a/rsts/concepts/architecture.rst b/rsts/concepts/architecture.rst index 45bbaee5f61..062523146c7 100644 --- a/rsts/concepts/architecture.rst +++ b/rsts/concepts/architecture.rst @@ -91,7 +91,7 @@ Complex task types require workloads to be distributed across hundreds of pods. The type-specific task logic is separated into isolated code modules known as **plugins**. Each task type has an associated plugin that is responsible for handling tasks of its type. -For each task in a workflow, FlytePropeller activates the appropriate plugin based on the task type in order to fullfill the task. +For each task in a workflow, FlytePropeller activates the appropriate plugin based on the task type in order to fulfill the task. The Flyte team has pre-built plugins for Hive, Spark, AWS Batch, and :ref:`more `. To support new use-cases, developers can create their own plugins and bundle them in their FlytePropeller deployment.